hexsha
stringlengths
40
40
size
int64
1
1.03M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
239
max_stars_repo_name
stringlengths
5
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
239
max_issues_repo_name
stringlengths
5
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
239
max_forks_repo_name
stringlengths
5
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.03M
avg_line_length
float64
1
958k
max_line_length
int64
1
1.03M
alphanum_fraction
float64
0
1
4a076eacb579e6f24f57706c9a29188eacb31618
2,028
py
Python
aliyun-python-sdk-oos/aliyunsdkoos/request/v20190601/ListInstanceStateReportsRequest.py
ankitdobhal/aliyun-openapi-python-sdk
991b1c2d91adc468480defc23ba790d4369cce7b
[ "Apache-2.0" ]
null
null
null
aliyun-python-sdk-oos/aliyunsdkoos/request/v20190601/ListInstanceStateReportsRequest.py
ankitdobhal/aliyun-openapi-python-sdk
991b1c2d91adc468480defc23ba790d4369cce7b
[ "Apache-2.0" ]
null
null
null
aliyun-python-sdk-oos/aliyunsdkoos/request/v20190601/ListInstanceStateReportsRequest.py
ankitdobhal/aliyun-openapi-python-sdk
991b1c2d91adc468480defc23ba790d4369cce7b
[ "Apache-2.0" ]
null
null
null
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkoos.endpoint import endpoint_data class ListInstanceStateReportsRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'oos', '2019-06-01', 'ListInstanceStateReports','oos') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_InstanceId(self): return self.get_query_params().get('InstanceId') def set_InstanceId(self,InstanceId): self.add_query_param('InstanceId',InstanceId) def get_NextToken(self): return self.get_query_params().get('NextToken') def set_NextToken(self,NextToken): self.add_query_param('NextToken',NextToken) def get_MaxResults(self): return self.get_query_params().get('MaxResults') def set_MaxResults(self,MaxResults): self.add_query_param('MaxResults',MaxResults) def get_StateConfigurationId(self): return self.get_query_params().get('StateConfigurationId') def set_StateConfigurationId(self,StateConfigurationId): self.add_query_param('StateConfigurationId',StateConfigurationId)
36.214286
83
0.773176
4a076f72fecc430b2c78fcb990b9f901fce49ddb
5,517
py
Python
conans/util/progress_bar.py
matthiasng/conan
634eadc319da928084633a344d42785edccb8d6c
[ "MIT" ]
6,205
2015-12-01T13:40:05.000Z
2022-03-31T07:30:25.000Z
conans/util/progress_bar.py
matthiasng/conan
634eadc319da928084633a344d42785edccb8d6c
[ "MIT" ]
8,747
2015-12-01T16:28:48.000Z
2022-03-31T23:34:53.000Z
conans/util/progress_bar.py
Mattlk13/conan
005fc53485557b0a570bb71670f2ca9c66082165
[ "MIT" ]
961
2015-12-01T16:56:43.000Z
2022-03-31T13:50:52.000Z
import os from contextlib import contextmanager import time from tqdm import tqdm from conans.client.output import ConanOutput TIMEOUT_BEAT_SECONDS = 30 TIMEOUT_BEAT_CHARACTER = '.' LEFT_JUSTIFY_DESC = 28 LEFT_JUSTIFY_MESSAGE = 90 def left_justify_message(msg): return msg.ljust(LEFT_JUSTIFY_MESSAGE) def left_justify_description(msg): return msg.ljust(LEFT_JUSTIFY_DESC) class ProgressOutput(ConanOutput): def __init__(self, output): super(ProgressOutput, self).__init__(output._stream, output._stream_err, output._color) def _write(self, data, newline=False): end = "\n" if newline else "" tqdm.write(str(data), file=self._stream, end=end) def _write_err(self, data, newline=False): end = "\n" if newline else "" tqdm.write(str(data), file=self._stream_err, end=end) class Progress(object): def __init__(self, length, output, description, post_description=None): self._tqdm_bar = None self._total_length = length self._output = output self._processed_size = 0 self._description = description self._post_description = "{} completed".format( self._description) if not post_description else post_description self._last_time = time.time() if self._output and self._output.is_terminal and self._description: self._tqdm_bar = tqdm(total=self._total_length, desc=left_justify_description(self._description), file=self._output, unit="B", leave=False, dynamic_ncols=False, ascii=True, unit_scale=True, unit_divisor=1024) def initial_value(self, value): self._processed_size = value self._pb_update(value) def _pb_update(self, chunk_size): if self._tqdm_bar is not None: self._tqdm_bar.update(chunk_size) elif self._output and time.time() - self._last_time > TIMEOUT_BEAT_SECONDS: self._last_time = time.time() self._output.write(TIMEOUT_BEAT_CHARACTER) def update(self, chunks): for chunk in chunks: yield chunk data_size = len(chunk) self._processed_size += data_size self._pb_update(data_size) if self._total_length > self._processed_size: self._pb_update(self._total_length - self._processed_size) self.pb_close() def pb_close(self): if self._tqdm_bar is not None: self._tqdm_bar.close() msg = "\r{} [{:1.2f}k]".format(self._post_description, self._processed_size / 1024.0) tqdm.write(left_justify_message(msg), file=self._output, end="\n") class FileWrapper(Progress): def __init__(self, fileobj, output, description, post_description=None): self._fileobj = fileobj self.seek(0, os.SEEK_END) super(FileWrapper, self).__init__(self.tell(), output, description, post_description) self.seek(0) def seekable(self): return self._fileobj.seekable() def seek(self, *args, **kwargs): return self._fileobj.seek(*args, **kwargs) def tell(self): return self._fileobj.tell() def read(self, size): prev = self.tell() ret = self._fileobj.read(size) self._pb_update(self.tell() - prev) return ret class ListWrapper(object): def __init__(self, files_list, output, description, post_description=None): self._files_list = files_list self._total_length = len(self._files_list) self._iterator = iter(self._files_list) self._last_progress = None self._i_file = 0 self._output = output self._description = description self._post_description = "{} completed".format( self._description) if not post_description else post_description self._last_time = time.time() if self._output and self._output.is_terminal: self._tqdm_bar = tqdm(total=len(files_list), desc=left_justify_description(self._description), file=self._output, unit="files ", leave=False, dynamic_ncols=False, ascii=True) def update(self): self._i_file = self._i_file + 1 if self._output and self._output.is_terminal: self._tqdm_bar.update() elif self._output and time.time() - self._last_time > TIMEOUT_BEAT_SECONDS: self._last_time = time.time() self._output.write(TIMEOUT_BEAT_CHARACTER) def pb_close(self): if self._output and self._output.is_terminal: self._tqdm_bar.close() msg = "\r{} [{} files]".format(self._post_description, self._total_length) tqdm.write(left_justify_message(msg), file=self._output, end="\n") def __iter__(self): return self def __next__(self): val = next(self._iterator) self.update() return val def next(self): return self.__next__() @contextmanager def open_binary(path, output, description): with open(path, mode='rb') as file_handler: file_wrapped = FileWrapper(file_handler, output, description) yield file_wrapped file_wrapped.pb_close() @contextmanager def iterate_list_with_progress(files_list, output, description): list_wrapped = ListWrapper(files_list, output, description) yield list_wrapped list_wrapped.pb_close()
34.055556
101
0.646185
4a0770b2d1cd1a1a587f80e26d9c659bc4a20bad
5,022
py
Python
srv6_sdn_control_plane/southbound/netconf/sb_netconf_client.py
everywan-io/srv6-sdn-control-plane
afb7ce82571c852f784b763b8dec766b75f350fd
[ "Apache-2.0" ]
null
null
null
srv6_sdn_control_plane/southbound/netconf/sb_netconf_client.py
everywan-io/srv6-sdn-control-plane
afb7ce82571c852f784b763b8dec766b75f350fd
[ "Apache-2.0" ]
null
null
null
srv6_sdn_control_plane/southbound/netconf/sb_netconf_client.py
everywan-io/srv6-sdn-control-plane
afb7ce82571c852f784b763b8dec766b75f350fd
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python import time from netconf.client import NetconfSSHSession try: from lxml import etree except ImportError: from xml.etree import ElementTree as etree # Utility to close Netconf sessions def close_netconf_session(session): # Let's take the reference of the transport transport = session.pkt_stream.stream # Let's close the Netconf session session.close() # This is a workaround for RST_ACK time.sleep(0.05) # Close the transport transport.close() # Flush the cache transport.cache.flush() # Let's create a NetConf session session = NetconfSSHSession("127.0.0.1", 830, "srv6", "srv6") # From the hello, we got the capabilities for capability in session.capabilities: print(capability) config = """ <edit-config> <target> <running/> </target> <default-operation>none</default-operation> <test-option>test-then-set</test-option> <error-option>rollback-on-error</error-option> <config xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"> <srv6-explicit-path operation="create" xmlns="urn:ietf:params:xml:ns:yang:srv6-explicit-path"> <path> <destination>1111:4::2/128</destination> <sr-path> <srv6-segment>1111:3::2</srv6-segment> </sr-path> <encapmode>inline</encapmode> <device>eth0</device> </path> </srv6-explicit-path> </config> </edit-config> """ # Single add result = session.send_rpc(config) print(format(etree.tostring(result[0], pretty_print=True))) config = """ <edit-config> <target> <running/> </target> <default-operation>none</default-operation> <test-option>test-then-set</test-option> <error-option>rollback-on-error</error-option> <config xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"> <srv6-explicit-path operation="create" xmlns="urn:ietf:params:xml:ns:yang:srv6-explicit-path"> <path> <destination>2222:4::2/128</destination> <sr-path> <srv6-segment>2222:3::2</srv6-segment> </sr-path> <encapmode>inline</encapmode> <device>eth0</device> </path> <path> <destination>3333:4::2/128</destination> <sr-path> <srv6-segment>3333:3::2</srv6-segment> <srv6-segment>3333:2::2</srv6-segment> <srv6-segment>3333:1::2</srv6-segment> </sr-path> <encapmode>encap</encapmode> <device>eth0</device> </path> </srv6-explicit-path> </config> </edit-config> """ # Bulk add result = session.send_rpc(config) print(format(etree.tostring(result[0], pretty_print=True))) # Close the session close_netconf_session(session) # Delete all the routes created before configs = [ """ <edit-config> <target> <running/> </target> <default-operation>none</default-operation> <test-option>test-then-set</test-option> <error-option>rollback-on-error</error-option> <config xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"> <srv6-explicit-path operation="remove" xmlns="urn:ietf:params:xml:ns:yang:srv6-explicit-path"> <path> <destination>1111:4::2/128</destination> <sr-path> <srv6-segment>1111:3::2</srv6-segment> </sr-path> <encapmode>inline</encapmode> <device>eth0</device> </path> </srv6-explicit-path> </config> </edit-config> """, """ <edit-config> <target> <running/> </target> <default-operation>none</default-operation> <test-option>test-then-set</test-option> <error-option>rollback-on-error</error-option> <config xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"> <srv6-explicit-path operation="remove" xmlns="urn:ietf:params:xml:ns:yang:srv6-explicit-path"> <path> <destination>2222:4::2/128</destination> <sr-path> <srv6-segment>2222:3::2</srv6-segment> </sr-path> <encapmode>inline</encapmode> <device>eth0</device> </path> </srv6-explicit-path> </config> </edit-config> """, """ <edit-config> <target> <running/> </target> <default-operation>none</default-operation> <test-option>test-then-set</test-option> <error-option>rollback-on-error</error-option> <config xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"> <srv6-explicit-path operation="remove" xmlns="urn:ietf:params:xml:ns:yang:srv6-explicit-path"> <path> <destination>3333:4::2/128</destination> <sr-path> <srv6-segment>3333:3::2</srv6-segment> <srv6-segment>3333:2::2</srv6-segment> <srv6-segment>3333:1::2</srv6-segment> </sr-path> <encapmode>encap</encapmode> <device>eth0</device> </path> </srv6-explicit-path> </config> </edit-config> """, ] # Iterate over the array and delete one by one all the paths for config in configs: # Each time we create a new session session = NetconfSSHSession("127.0.0.1", 830, "srv6", "srv6") result = session.send_rpc(config) print(format(etree.tostring(result[0], pretty_print=True))) close_netconf_session(session)
28.697143
96
0.651334
4a07716323b1294a7acf972df50274b9ae162e07
4,449
py
Python
src/spider/spider/plugins/ouest_france_immo.py
asteroide/immo_spider
864828c389173f6d6417392983bc8d39b5fd4ea2
[ "Apache-2.0" ]
null
null
null
src/spider/spider/plugins/ouest_france_immo.py
asteroide/immo_spider
864828c389173f6d6417392983bc8d39b5fd4ea2
[ "Apache-2.0" ]
null
null
null
src/spider/spider/plugins/ouest_france_immo.py
asteroide/immo_spider
864828c389173f6d6417392983bc8d39b5fd4ea2
[ "Apache-2.0" ]
null
null
null
from lxml import html # nosec from io import StringIO import requests import logging import hashlib logger = logging.getLogger("spider.ofi") __url__ = "https://www.ouestfrance-immo.com/" __urls__ = [ "https://www.ouestfrance-immo.com/acheter/maison/?lieux=24303&rayon=30&prix=0_200000", "https://www.ouestfrance-immo.com/acheter/maison/?lieux=24163&rayon=30&prix=0_200000", "https://www.ouestfrance-immo.com/acheter/maison/dinard-35-35800/?prix=0_200000", "https://www.ouestfrance-immo.com/acheter/maison/lamballe-22-22400/?prix=0_200000", "https://www.ouestfrance-immo.com/acheter/maison/guerande-44-44350/?prix=0_200000" ] # https://www.ouestfrance-immo.com/acheter/maison/vannes-56-56000/?prix=50000_80000&surface=60_0&chambres=3_0 # return_exemple = [ # { # 'address': "", # "description": "", # "price": "", # "date": "", # "size": "", # "groundsurface": "", # "extra": {} # } # ] class ofi(object): # data_template = { # "address": "", # "description": "", # "price": "", # "date": "", # "surface": "", # "groundsurface": "", # "url": [], # "photos": [], # "extra": {}, # } def compute_ad(self, url): url = "https://www.ouestfrance-immo.com" + url xml_str = StringIO(requests.get(url, verify=True).text) tree = html.parse(xml_str) description = " ".join(tree.xpath('/html/body/div/section/div/div/div[@class=\'txtAnn\']/text()')) _id = hashlib.sha1(description.encode('utf-8')).hexdigest() price = " ".join(tree.xpath('/html/body/div/section/div/div/strong[@itemprop="price"]/text()')) price = price.replace('€', "").strip().replace(" ", "") price = int(price) address = "".join(tree.xpath('/html/body/div/section/div/div/h2[@id="caractDetail"]/text()')).replace("Vente maison", "").strip() ground_surface = " ".join(tree.xpath('/html/body/div/section/div/div/div/ul/li[text()="Surf. terrain : "]/strong/text()')).replace(" ", "") try: ground_surface = int(ground_surface.replace("m²", "")) except ValueError: ground_surface = 0 options = " ".join(tree.xpath('/html/body/div/section/div/div/div/ul/li[@class="options"]/text()')) surface = " ".join(tree.xpath('/html/body/div/section/div/div/div/ul/li[text()="Surf. habitable : "]/strong/text()')).replace(" ", "") try: surface = int(surface.replace("m²", "")) except ValueError: surface = 0 date = " ".join(tree.xpath('/html/body/div/section/div/h2/em/text()')).replace(" ", "").split("-")[-1].strip() img_urls = map(lambda x: x.get("src"), tree.xpath('//ul/li/img')) img_urls = list(filter(lambda x: "photo" in x, img_urls)) return { 'id': _id, 'address': address, "description": description, "price": price, "date": date, "surface": surface, "groundsurface": ground_surface, "url": url, "img_urls": img_urls, "show": True, "extra": { "options": options }, } def compute(self): ads = [] for url in __urls__: xml_str = StringIO(requests.get(url, verify=True).text) tree = html.parse(xml_str) for _a in tree.xpath('/html/body/div/section/div/div/ul/li/div/a[@class="txt lienDetail"]'): # logger.debug(_a.get("href")) _ad = self.compute_ad(_a.get("href")) ads.append(_ad) # logger.debug("ad = {}".format(_ad)) # addresses = tree.xpath('/html/body//h2/a/span/text()') # # for _address in addresses: # _dict = { # 'address': _address.encode("utf-8"), # "description": "", # "price": "", # "date": "", # "surface": "", # "groundsurface": "", # "url": url, # "extra": {}, # } # logger.info("addresses={}".format(_address.encode("utf-8"))) # ads.append(_dict) return ads __driver__ = ofi()
37.70339
147
0.5118
4a07717ec896a0d570840cab9c21ff6a53ef7923
2,508
py
Python
tests/performance/runs/taurus/__init__.py
dhanainme/multi-model-server
cd5a693032b1bec4c46b0f7a9844df496a62c1a8
[ "Apache-2.0" ]
527
2017-12-04T20:58:19.000Z
2019-11-14T03:15:39.000Z
tests/performance/runs/taurus/__init__.py
dhanainme/multi-model-server
cd5a693032b1bec4c46b0f7a9844df496a62c1a8
[ "Apache-2.0" ]
303
2017-12-05T06:14:08.000Z
2019-11-16T01:35:15.000Z
tests/performance/runs/taurus/__init__.py
dhanainme/multi-model-server
cd5a693032b1bec4c46b0f7a9844df496a62c1a8
[ "Apache-2.0" ]
144
2017-12-05T19:27:39.000Z
2019-11-15T22:15:50.000Z
#!/usr/bin/env python # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # http://www.apache.org/licenses/LICENSE-2.0 # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. """ Convert the Taurus Test suite XML to Junit XML """ # pylint: disable=redefined-builtin import glob import shutil import os from .reader import get_mon_metrics_list def get_taurus_options(artifacts_dir, jmeter_path=None): """The options for Taurus BZT command""" options = [] if jmeter_path: options.append('-o modules.jmeter.path={}'.format(jmeter_path)) options.append('-o settings.artifacts-dir={}'.format(artifacts_dir)) options.append('-o modules.console.disable=true') options.append('-o settings.env.BASEDIR={}'.format(artifacts_dir)) options_str = ' '.join(options) return options_str def update_taurus_metric_files(suite_artifacts_dir, test_file): """ It renames the server and local metric monitoring log files to metrics.csv. The order of the columns in header of server metric monitoring SALogs file generated by taurus is not inline with data. So as a work around this function rewrites the header based on order defined in the test yaml. """ metrics_new_file = os.path.join(suite_artifacts_dir, "metrics.csv") server_metric_file_pattern = os.path.join(suite_artifacts_dir, "SAlogs_*") metrics_log_file = glob.glob(server_metric_file_pattern) if metrics_log_file: metrics = get_mon_metrics_list(test_file) if metrics: with open(metrics_log_file[0]) as from_file: line = from_file.readline() with open(metrics_log_file[0], mode="w") as to_file: to_file.write(','.join(line.split(',')[0:1] + metrics) + "\n") shutil.copyfileobj(from_file, to_file) os.rename(metrics_log_file[0], metrics_new_file) else: metrics_log_file = os.path.join(suite_artifacts_dir, "local_monitoring_logs.csv") if os.path.exists(metrics_log_file): os.rename(metrics_log_file, metrics_new_file)
39.1875
98
0.710128
4a0774885da3e013efa2d9bf2e2c55be546c741a
40,353
py
Python
DCLS/construct/modules/Dcls.py
K-H-Ismail/Dilated-Convolution-with-Learnable-Spacings-PyTorch
4714eddbd007f36930938ee53a172abcd46febfb
[ "MIT" ]
13
2021-12-09T01:24:56.000Z
2022-03-21T10:31:33.000Z
DCLS/construct/modules/Dcls.py
K-H-Ismail/Dilated-Convolution-with-Learnable-Spacings-PyTorch
4714eddbd007f36930938ee53a172abcd46febfb
[ "MIT" ]
null
null
null
DCLS/construct/modules/Dcls.py
K-H-Ismail/Dilated-Convolution-with-Learnable-Spacings-PyTorch
4714eddbd007f36930938ee53a172abcd46febfb
[ "MIT" ]
1
2022-02-12T06:26:57.000Z
2022-02-12T06:26:57.000Z
# coding=utf-8 import math import warnings import torch from torch import Tensor from torch.nn.parameter import Parameter import torch.nn.functional as F import DCLS import DCLS.construct.functions.dcls_functionnal as SD #import DCLS.construct.functions.swc_functionnal as SW from torch.nn import init from torch.nn.modules import Module from torch.nn.modules.utils import _single, _pair, _triple, _reverse_repeat_tuple from torch.nn.common_types import _size_1_t, _size_2_t, _size_3_t from typing import Optional, List, Tuple import operator import functools try: from depthwise_conv2d_implicit_gemm import _DepthWiseConv2dImplicitGEMMFP32, _DepthWiseConv2dImplicitGEMMFP16 except ImportError as error: # Output expected ImportErrors. Logging.log_exception(error) # Include the name and path attributes in output. Logging.log(f'error.name: {error.name}') Logging.log(f'error.path: {error.path}') Logging.log('switching to native conv2d') except Exception as exception: # Output unexpected Exceptions. Logging.log_exception(exception, False) Logging.log('switching to native conv2d') convolution_notes = \ {"groups_note": r"""* :attr:`groups` controls the connections between inputs and outputs. :attr:`in_channels` and :attr:`out_channels` must both be divisible by :attr:`groups`. For example, * At groups=1, all inputs are convolved to all outputs. * At groups=2, the operation becomes equivalent to having two conv layers side by side, each seeing half the input channels and producing half the output channels, and both subsequently concatenated. * At groups= :attr:`in_channels`, each input channel is convolved with its own set of filters (of size :math:`\frac{\text{out\_channels}}{\text{in\_channels}}`).""", "depthwise_separable_note": r"""When `groups == in_channels` and `out_channels == K * in_channels`, where `K` is a positive integer, this operation is also known as a "depthwise convolution". In other words, for an input of size :math:`(N, C_{in}, L_{in})`, a depthwise convolution with a depthwise multiplier `K` can be performed with the arguments :math:`(C_\text{in}=C_\text{in}, C_\text{out}=C_\text{in} \times \text{K}, ..., \text{groups}=C_\text{in})`."""} # noqa: B950 class _DclsNd(Module): __constants__ = ['stride', 'padding', 'dilated_kernel_size', 'groups', 'padding_mode', 'output_padding', 'in_channels', 'out_channels', 'kernel_count', 'scaling'] __annotations__ = {'bias': Optional[torch.Tensor]} def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor: ... _in_channels: int out_channels: int kernel_count: int stride: Tuple[int, ...] padding: Tuple[int, ...] dilated_kernel_size: Tuple[int, ...] transposed: bool output_padding: Tuple[int, ...] groups: int padding_mode: str weight: Tensor bias: Optional[Tensor] scaling: float def __init__(self, in_channels: int, out_channels: int, kernel_count: int, stride: Tuple[int, ...], padding: Tuple[int, ...], dilated_kernel_size: Tuple[int, ...], transposed: bool, output_padding: Tuple[int, ...], groups: int, bias: bool, padding_mode: str, scaling: float) -> None: super(_DclsNd, self).__init__() if in_channels % groups != 0: raise ValueError('in_channels must be divisible by groups') if out_channels % groups != 0: raise ValueError('out_channels must be divisible by groups') valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'} if padding_mode not in valid_padding_modes: raise ValueError("padding_mode must be one of {}, but got padding_mode='{}'".format( valid_padding_modes, padding_mode)) self.in_channels = in_channels self.out_channels = out_channels self.kernel_count = kernel_count self.stride = stride self.padding = padding self.dilated_kernel_size = dilated_kernel_size self.transposed = transposed self.output_padding = output_padding self.groups = groups self.scaling = scaling self.padding_mode = padding_mode # `_reversed_padding_repeated_twice` is the padding to be passed to # `F.pad` if needed (e.g., for non-zero padding types that are # implemented as two ops: padding + conv). `F.pad` accepts paddings in # reverse order than the dimension. self._reversed_padding_repeated_twice = _reverse_repeat_tuple(self.padding, 2) if transposed: self.weight = Parameter(torch.Tensor( in_channels, out_channels // groups, kernel_count)) else: self.weight = Parameter(torch.Tensor( out_channels, in_channels // groups, kernel_count)) if bias: self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) self.P = Parameter(torch.Tensor(len(dilated_kernel_size), out_channels, in_channels // groups, kernel_count)) self.reset_parameters() def reset_parameters(self) -> None: init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) bound = 1 / math.sqrt(fan_in) init.uniform_(self.bias, -bound, bound) for i in range(len(self.dilated_kernel_size)): lim = self.dilated_kernel_size[i] // 2 with torch.no_grad(): init.normal_(self.P.select(0,i), 0, 0.5).clamp(-lim, lim).div_(self.scaling) def clamp_parameters(self) -> None: for i in range(len(self.dilated_kernel_size)): with torch.no_grad(): lim = self.dilated_kernel_size[i] // 2 self.P.select(0,i).clamp_(-lim, lim) def extra_repr(self): s = ('{in_channels}, {out_channels}, kernel_count={kernel_count} (previous kernel_size)' ', stride={stride}') if self.padding != (0,) * len(self.padding): s += ', padding={padding}' if self.dilated_kernel_size != (1,) * len(self.dilated_kernel_size): s += ', dilated_kernel_size={dilated_kernel_size} (learnable)' if self.scaling != 1.0: s += ', scaling={scaling} (applied scaling)' if self.output_padding != (0,) * len(self.output_padding): s += ', output_padding={output_padding}' if self.groups != 1: s += ', groups={groups}' if self.bias is None: s += ', bias=False' if self.padding_mode != 'zeros': s += ', padding_mode={padding_mode}' if (self.in_channels == self.out_channels == self.groups and self.padding[0] == self.dilated_kernel_size[0] // 2): s += ', (using DepthWiseConv2dImplicitGEMMFP32)' return s.format(**self.__dict__) def __setstate__(self, state): super(_DclsNd, self).__setstate__(state) if not hasattr(self, 'padding_mode'): self.padding_mode = 'zeros' class _DclsN_Md(Module): __constants__ = ['dim_dilation', 'stride', 'padding', 'dilated_kernel_size', 'groups', 'padding_mode', 'output_padding', 'in_channels', 'out_channels', 'kernel_count', 'scaling'] __annotations__ = {'bias': Optional[torch.Tensor]} def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor: ... _in_channels: int out_channels: int kernel_count: int stride: Tuple[int, ...] padding: Tuple[int, ...] dilated_kernel_size: Tuple[int, ...] dim_dilation: Tuple[int, ...] transposed: bool output_padding: Tuple[int, ...] groups: int padding_mode: str weight: Tensor bias: Optional[Tensor] scaling: float def __init__(self, in_channels: int, out_channels: int, kernel_count: int, stride: Tuple[int, ...], padding: Tuple[int, ...], dilated_kernel_size: Tuple[int, ...], dim_dilation: Tuple[int, ...], transposed: bool, output_padding: Tuple[int, ...], groups: int, bias: bool, padding_mode: str, scaling: float) -> None: super(_DclsN_Md, self).__init__() if in_channels % groups != 0: raise ValueError('in_channels must be divisible by groups') if out_channels % groups != 0: raise ValueError('out_channels must be divisible by groups') valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'} if padding_mode not in valid_padding_modes: raise ValueError("padding_mode must be one of {}, but got padding_mode='{}'".format( valid_padding_modes, padding_mode)) self.in_channels = in_channels self.out_channels = out_channels self.kernel_count = kernel_count self.stride = stride self.padding = padding self.dilated_kernel_size = dilated_kernel_size self.dim_dilation = dim_dilation self.transposed = transposed self.output_padding = output_padding self.groups = groups self.scaling = scaling self.padding_mode = padding_mode # `_reversed_padding_repeated_twice` is the padding to be passed to # `F.pad` if needed (e.g., for non-zero padding types that are # implemented as two ops: padding + conv). `F.pad` accepts paddings in # reverse order than the dimension. self._reversed_padding_repeated_twice = _reverse_repeat_tuple(self.padding, 2) if transposed: self.weight = Parameter(torch.Tensor( in_channels, out_channels // groups, *kernel_size)) else: self.weight = Parameter(torch.Tensor( out_channels, in_channels // groups, *kernel_size)) if bias: self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) self.P = Parameter(torch.Tensor(len(dim_dilation), out_channels, in_channels // groups, *kernel_size)) self.reset_parameters() def reset_parameters(self) -> None: init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) bound = 1 / math.sqrt(fan_in) init.uniform_(self.bias, -bound, bound) for i in range(len(self.dim_dilation)): lim = self.kernel_size[i] // 2 with torch.no_grad(): init.normal_(self.P.select(0,i), 0, 0.5).clamp(-lim, lim).div_(scaling) def extra_repr(self): s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}' ', stride={stride}') if self.padding != (0,) * len(self.padding): s += ', padding={padding}' if self.dilation != (1,) * len(self.dilation): s += ', dilation_max={dilation} (learnable along {dim_dilation})' if self.gain != 1.0: s += ', gain={gain} (an extra multiplicative factor is applied to scaling)' if self.output_padding != (0,) * len(self.output_padding): s += ', output_padding={output_padding}' if self.groups != 1: s += ', groups={groups}' if self.bias is None: s += ', bias=False' if self.padding_mode != 'zeros': s += ', padding_mode={padding_mode}' return s.format(**self.__dict__) def __setstate__(self, state): super(_DclsN_Md, self).__setstate__(state) if not hasattr(self, 'padding_mode'): self.padding_mode = 'zeros' class Dcls1d(_DclsNd): __doc__ = r"""Applies a 1D convolution over an input signal composed of several input planes. In the simplest case, the output value of the layer with input size :math:`(N, C_{\text{in}}, L)` and output :math:`(N, C_{\text{out}}, L_{\text{out}})` can be precisely described as: .. math:: \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) + \sum_{k = 0}^{C_{in} - 1} \text{weight}(C_{\text{out}_j}, k) \star \text{input}(N_i, k) where :math:`\star` is the valid `cross-correlation`_ operator, :math:`N` is a batch size, :math:`C` denotes a number of channels, :math:`L` is a length of signal sequence. """ + r""" This module supports :ref:`TensorFloat32<tf32_on_ampere>`. * :attr:`stride` controls the stride for the cross-correlation, a single number or a one-element tuple. * :attr:`padding` controls the amount of implicit padding on both sides for :attr:`padding` number of points. * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. {groups_note} Note: {depthwise_separable_note} Note: {cudnn_reproducibility_note} Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the convolving kernel stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 padding_mode (string, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'`` dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True`` """ + r""" Shape: - Input: :math:`(N, C_{in}, L_{in})` - Output: :math:`(N, C_{out}, L_{out})` where .. math:: L_{out} = \left\lfloor\frac{L_{in} + 2 \times \text{padding} - \text{dilation} \times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor Attributes: weight (Tensor): the learnable weights of the module of shape :math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}}, \text{kernel\_size})`. The values of these weights are sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{groups}{C_\text{in} * \text{kernel\_size}}` bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``, then the values of these weights are sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{groups}{C_\text{in} * \text{kernel\_size}}` Examples:: >>> m = nn.Conv1d(16, 33, 3, stride=2) >>> input = torch.randn(20, 16, 50) >>> output = m(input) .. _cross-correlation: https://en.wikipedia.org/wiki/Cross-correlation .. _link: https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md """ def __init__( self, in_channels: int, out_channels: int, kernel_count: int, stride: _size_1_t = 1, padding: _size_1_t = 0, dilated_kernel_size: _size_1_t = 1, groups: int = 1, bias: bool = True, padding_mode: str = 'zeros', # TODO: refine this type scaling: float = 1.0 ): # we create new variables below to make mypy happy since kernel_size has # type Union[int, Tuple[int]] and kernel_size_ has type Tuple[int] stride_ = _single(stride) padding_ = _single(padding) dilated_kernel_size_ = _single(dilated_kernel_size) super(Dcls1d, self).__init__( in_channels, out_channels, kernel_count, stride_, padding_, dilated_kernel_size_, False, _single(0), groups, bias, padding_mode, scaling) def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor], P: Tensor): if self.padding_mode != 'zeros': return F.conv1d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode), SD.ConstructKernel1d.apply(weight, P, self.dilated_kernel_size, self.scaling), bias, self.stride, _single(0), _single(1), self.groups) return F.conv1d(input, SD.ConstructKernel1d.apply(weight, P, self.dilated_kernel_size, self.scaling), bias, self.stride, self.padding, _single(1), self.groups) def forward(self, input: Tensor) -> Tensor: return self._conv_forward(input, self.weight, self.bias, self.P.select(0,0)) class Dcls2d(_DclsNd): __doc__ = r"""Applies a 2D convolution over an input signal composed of several input planes. In the simplest case, the output value of the layer with input size :math:`(N, C_{\text{in}}, H, W)` and output :math:`(N, C_{\text{out}}, H_{\text{out}}, W_{\text{out}})` can be precisely described as: .. math:: \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) + \sum_{k = 0}^{C_{\text{in}} - 1} \text{weight}(C_{\text{out}_j}, k) \star \text{input}(N_i, k) where :math:`\star` is the valid 2D `cross-correlation`_ operator, :math:`N` is a batch size, :math:`C` denotes a number of channels, :math:`H` is a height of input planes in pixels, and :math:`W` is width in pixels. """ + r""" This module supports :ref:`TensorFloat32<tf32_on_ampere>`. * :attr:`stride` controls the stride for the cross-correlation, a single number or a tuple. * :attr:`padding` controls the amount of implicit padding on both sides for :attr:`padding` number of points for each dimension. * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. {groups_note} The parameters :attr:`kernel_count`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be: - a single ``int`` -- in which case the same value is used for the height and width dimension - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, and the second `int` for the width dimension Note: {depthwise_separable_note} Note: {cudnn_reproducibility_note} Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_count (int): Number of elements in the convolving kernel stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 padding_mode (string, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'`` dilated_kernel_size (int or tuple, optional): Size of dilated kernel. Default: 1 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True`` """ + r""" Shape: - Input: :math:`(N, C_{in}, H_{in}, W_{in})` - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where .. math:: H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor .. math:: W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] \times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor Attributes: weight (Tensor): the learnable weights of the module of shape :math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},` :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]})`. The values of these weights are sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}` bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``, then the values of these weights are sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}` Examples: >>> # With square kernels and equal stride >>> m = nn.Conv2d(16, 33, 3, stride=2) >>> # non-square kernels and unequal stride and with padding >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)) >>> # non-square kernels and unequal stride and with padding and dilation >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1)) >>> input = torch.randn(20, 16, 50, 100) >>> output = m(input) .. _cross-correlation: https://en.wikipedia.org/wiki/Cross-correlation .. _link: https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md """ def __init__( self, in_channels: int, out_channels: int, kernel_count: int, stride: _size_2_t = 1, padding: _size_2_t = 0, dilated_kernel_size: _size_2_t = 1, groups: int = 1, bias: bool = True, padding_mode: str = 'zeros', # TODO: refine this type scaling: float = 1.0 ): stride_ = _pair(stride) padding_ = _pair(padding) dilated_kernel_size_ = _pair(dilated_kernel_size) super(Dcls2d, self).__init__( in_channels, out_channels, kernel_count, stride_, padding_, dilated_kernel_size_, False, _pair(0), groups, bias, padding_mode, scaling) def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor], P1: Tensor, P2: Tensor): if (self.in_channels == self.out_channels == self.groups and self.padding[0] == self.dilated_kernel_size[0] // 2): if input.dtype == torch.float32: x = _DepthWiseConv2dImplicitGEMMFP32.apply( input, SD.ConstructKernel2d.apply(weight, P1, P2, self.dilated_kernel_size, self.scaling)) elif x.dtype == torch.float16: x = _DepthWiseConv2dImplicitGEMMFP16.apply( input, SD.ConstructKernel2d.apply(weight, P1, P2, self.dilated_kernel_size, self.scaling)) else: raise TypeError("Only support fp32 and fp16, get {}".format(x.dtype)) if self.bias is not None: x = x + self.bias.to(x).view(1, -1, 1, 1) return x else: if self.padding_mode != 'zeros': return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode), SD.ConstructKernel2d.apply(weight, P1, P2, self.dilated_kernel_size, self.scaling), bias, self.stride, _pair(0), _pair(1), self.groups) return F.conv2d(input, SD.ConstructKernel2d.apply(weight, P1, P2, self.dilated_kernel_size, self.scaling), bias, self.stride, self.padding, _pair(1), self.groups) def forward(self, input: Tensor) -> Tensor: return self._conv_forward(input, self.weight, self.bias, self.P.select(0,0), self.P.select(0,1)); class Dcls3d(_DclsNd): __doc__ = r"""Applies a 3D convolution over an input signal composed of several input planes. In the simplest case, the output value of the layer with input size :math:`(N, C_{in}, D, H, W)` and output :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` can be precisely described as: .. math:: out(N_i, C_{out_j}) = bias(C_{out_j}) + \sum_{k = 0}^{C_{in} - 1} weight(C_{out_j}, k) \star input(N_i, k) where :math:`\star` is the valid 3D `cross-correlation`_ operator """ + r""" This module supports :ref:`TensorFloat32<tf32_on_ampere>`. * :attr:`stride` controls the stride for the cross-correlation. * :attr:`padding` controls the amount of implicit padding on both sides for :attr:`padding` number of points for each dimension. * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. {groups_note} The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be: - a single ``int`` -- in which case the same value is used for the depth, height and width dimension - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension, the second `int` for the height dimension and the third `int` for the width dimension Note: {depthwise_separable_note} Note: {cudnn_reproducibility_note} Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the convolving kernel stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to all three sides of the input. Default: 0 padding_mode (string, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'`` dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True`` """ + r""" Shape: - Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` - Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` where .. math:: D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor .. math:: H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] \times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor .. math:: W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2] \times (\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor Attributes: weight (Tensor): the learnable weights of the module of shape :math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},` :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]}, \text{kernel\_size[2]})`. The values of these weights are sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}` bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``, then the values of these weights are sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}` Examples:: >>> # With square kernels and equal stride >>> m = nn.Conv3d(16, 33, 3, stride=2) >>> # non-square kernels and unequal stride and with padding >>> m = nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0)) >>> input = torch.randn(20, 16, 10, 50, 100) >>> output = m(input) .. _cross-correlation: https://en.wikipedia.org/wiki/Cross-correlation .. _link: https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md """ def __init__( self, in_channels: int, out_channels: int, kernel_count: int, stride: _size_3_t = 1, padding: _size_3_t = 0, dilated_kernel_size: _size_3_t = 1, groups: int = 1, bias: bool = True, padding_mode: str = 'zeros', scaling: float = 1.0 ): stride_ = _triple(stride) padding_ = _triple(padding) dilated_kernel_size_ = _triple(dilated_kernel_size) super(Dcls3d, self).__init__( in_channels, out_channels, kernel_count, stride_, padding_, dilated_kernel_size_, False, _triple(0), groups, bias, padding_mode, scaling) def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor], P1: Tensor, P2: Tensor, P3: Tensor): if self.padding_mode != 'zeros': return F.conv3d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode), SD.ConstructKernel3d.apply(weight, P1, P2, P3, self.dilated_kernel_size, self.scaling), bias, self.stride, _triple(0), _triple(1), self.groups) return F.conv3d(input, SD.ConstructKernel3d.apply(weight, P1, P2, P3, self.dilated_kernel_size, self.scaling), bias, self.stride, self.padding, _triple(1), self.groups) def forward(self, input: Tensor) -> Tensor: return self._conv_forward(input, self.weight, self.bias, self.P.select(0,0), self.P.select(0,1), self.P.select(0,2)) class Dcls2_1d(_DclsN_Md): __doc__ = r"""Applies a 2D convolution over an input signal composed of several input planes. In the simplest case, the output value of the layer with input size :math:`(N, C_{\text{in}}, H, W)` and output :math:`(N, C_{\text{out}}, H_{\text{out}}, W_{\text{out}})` can be precisely described as: .. math:: \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) + \sum_{k = 0}^{C_{\text{in}} - 1} \text{weight}(C_{\text{out}_j}, k) \star \text{input}(N_i, k) where :math:`\star` is the valid 2D `cross-correlation`_ operator, :math:`N` is a batch size, :math:`C` denotes a number of channels, :math:`H` is a height of input planes in pixels, and :math:`W` is width in pixels. """ + r""" This module supports :ref:`TensorFloat32<tf32_on_ampere>`. * :attr:`stride` controls the stride for the cross-correlation, a single number or a tuple. * :attr:`padding` controls the amount of implicit padding on both sides for :attr:`padding` number of points for each dimension. * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. {groups_note} The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be: - a single ``int`` -- in which case the same value is used for the height and width dimension - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, and the second `int` for the width dimension Note: {depthwise_separable_note} Note: {cudnn_reproducibility_note} Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the convolving kernel stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 padding_mode (string, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'`` dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True`` """ + r""" Shape: - Input: :math:`(N, C_{in}, H_{in}, W_{in})` - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where .. math:: H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor .. math:: W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] \times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor Attributes: weight (Tensor): the learnable weights of the module of shape :math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},` :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]})`. The values of these weights are sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}` bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``, then the values of these weights are sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}` Examples: >>> # With square kernels and equal stride >>> m = nn.Conv2d(16, 33, 3, stride=2) >>> # non-square kernels and unequal stride and with padding >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)) >>> # non-square kernels and unequal stride and with padding and dilation >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1)) >>> input = torch.randn(20, 16, 50, 100) >>> output = m(input) .. _cross-correlation: https://en.wikipedia.org/wiki/Cross-correlation .. _link: https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md """ def __init__( self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t = 1, padding: _size_2_t = 0, dilation: _size_2_t = 1, dim_dilation: _size_1_t = 0, groups: int = 1, bias: bool = True, padding_mode: str = 'zeros' # TODO: refine this type ): def _adjust_padding(padding, dilation): if (type(padding) == tuple): return (padding[0] + dilation[0] // 2, padding[1]) else: return _pair(padding + dilation // 2) kernel_size_ = _pair(kernel_size) stride_ = _pair(stride) padding_ = _adjust_padding(padding, dilation) dilation_ = _pair(dilation) super(Dcls2_1d, self).__init__( in_channels, out_channels, kernel_size_, stride_, padding_, dilation_, dim_dilation, False, _pair(0), groups, bias, padding_mode) def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor], P1: Tensor): if self.padding_mode != 'zeros': return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode), SD.ConstructKernel2_1d.apply(weight, P1, self.dilation), bias, self.stride, _pair(0), _pair(1), self.groups) return F.conv2d(input, SD.ConstructKernel2_1d.apply(weight, P1, self.dilation), bias, self.stride, self.padding, _pair(1), self.groups) def forward(self, input: Tensor) -> Tensor: return self._conv_forward(input, self.weight, self.bias, self.P.select(0,0)) class Dcls3_1d(_DclsN_Md): def __init__( self, in_channels: int, out_channels: int, kernel_size: _size_3_t, stride: _size_3_t = 1, padding: _size_3_t = 0, dilation: _size_3_t = 1, dim_dilation: _size_1_t = 0, groups: int = 1, bias: bool = True, padding_mode: str = 'zeros', gain: float = 1.0 ): kernel_size_ = _triple(kernel_size) stride_ = _triple(stride) padding_ = _triple(padding) dilation_ = _triple(dilation) dim_dilation_ = _triple(dim_dilation) super(Dcls3_1d, self).__init__( in_channels, out_channels, kernel_size_, stride_, padding_, dilation_, dim_dilation_, False, _triple(0), groups, bias, padding_mode, gain) def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor], P: Tensor): if self.padding_mode != 'zeros': return F.conv3d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode), SD.ConstructKernel3_1d.apply(weight, P, self.dilation), bias, self.stride, _triple(0), _triple(1), self.groups) return F.conv3d(input, SD.ConstructKernel3_1d.apply(weight, P, self.dilation), bias, self.stride, self.padding, _triple(1), self.groups) def forward(self, input: Tensor) -> Tensor: return self._conv_forward(input, self.weight, self.bias, self.P.select(0,0)) class Dcls3_2d(_DclsN_Md): def __init__( self, in_channels: int, out_channels: int, kernel_size: _size_3_t, stride: _size_3_t = 1, padding: _size_3_t = 0, dilation: _size_3_t = 1, dim_dilation: _size_2_t = (0,1), groups: int = 1, bias: bool = True, padding_mode: str = 'zeros' ): def _adjust_padding(padding, dilation): if (type(padding) == tuple): return (padding[0] + dilation[0] // 2, padding[1] + dilation[1] // 2, padding[2]) else: return _triple(padding + dilation // 2) kernel_size_ = _triple(kernel_size) stride_ = _triple(stride) padding_ = _adjust_padding() dilation_ = _triple(dilation) super(Dcls3_1d, self).__init__( in_channels, out_channels, kernel_size_, stride_, padding_, dilation_, dim_dilation, False, _triple(0), groups, bias, padding_mode) def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor], P1: Tensor, P2: Tensor): if self.padding_mode != 'zeros': return F.conv3d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode), SD.ConstructKernel3_2d.apply(weight, P1, P2, self.dilation), bias, self.stride, _triple(0), _triple(1), self.groups) return F.conv3d(input, SD.ConstructKernel3_2d.apply(weight, P1, P2, self.dilation), bias, self.stride, self.padding, _triple(1), self.groups) def forward(self, input: Tensor) -> Tensor: return self._conv_forward(input, self.weight, self.bias, self.P.select(0,0), self.P.select(0,1))
43.719393
134
0.603152
4a0774c0389b128a618b1cebc893c38a906c38d5
6,052
py
Python
nlptoolkit/classification/models/BERT/train_funcs.py
jackashore/NLP_Toolkit
e5bd8bcfad87f4906c45e66351adf93bd5c2727f
[ "Apache-2.0" ]
null
null
null
nlptoolkit/classification/models/BERT/train_funcs.py
jackashore/NLP_Toolkit
e5bd8bcfad87f4906c45e66351adf93bd5c2727f
[ "Apache-2.0" ]
null
null
null
nlptoolkit/classification/models/BERT/train_funcs.py
jackashore/NLP_Toolkit
e5bd8bcfad87f4906c45e66351adf93bd5c2727f
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Thu Aug 1 17:44:38 2019 @author: WT """ import os import pandas as pd import torch from torch.utils.data import Dataset, DataLoader from .preprocessing_funcs import preprocess, load_pickle import logging from tqdm import tqdm logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', \ datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO) logger = logging.getLogger(__file__) def load_dataloaders(args): train_path = "./data/train_processed.pkl" test_path = "./data/infer_processed.pkl" if os.path.isfile(train_path) and os.path.isfile(test_path): df_train = pd.read_pickle(train_path) df_test = pd.read_pickle(test_path) logger.info("Loaded preprocessed data.") else: logger.info("Preprocessing...") preprocess(args) df_train = pd.read_pickle(train_path) df_test = pd.read_pickle(test_path) train_set = sentiments(df_train, tokens_length=args.tokens_length, labels=True) train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=0, pin_memory=False) if args.train_test_split == 1: test_set = sentiments(df_test, tokens_length=args.tokens_length, labels=True) else: test_set = sentiments(df_test, tokens_length=args.tokens_length, labels=False) test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=0, pin_memory=False) del df_train, df_test return train_loader, test_loader, len(train_set) class sentiments(Dataset): def __init__(self, df, tokens_length=300, labels=True): self.X = torch.tensor(df["text"],requires_grad=False) self.labels = labels if self.labels == True: self.y = torch.tensor(df["label"],requires_grad=False) self.type = torch.zeros([len(df["text"]), tokens_length], requires_grad=False).long() s = torch.ones([len(df["text"]), tokens_length],requires_grad=False).long() for i in range(len(s)): if df["fills"].loc[i] != 0: s[i, -df["fills"].loc[i]:] = 0 self.mask = s def __len__(self): return len(self.X) def __getitem__(self, idx): if self.labels == True: return self.X[idx], self.type[idx], self.mask[idx], self.y[idx] else: return self.X[idx], self.type[idx], self.mask[idx], 0 def load_state(net, optimizer, scheduler, args, load_best=False): """ Loads saved model and optimizer states if exists """ base_path = "./data/" checkpoint_path = os.path.join(base_path,"test_checkpoint_%d.pth.tar" % args.model_no) best_path = os.path.join(base_path,"test_model_best_%d.pth.tar" % args.model_no) start_epoch, best_pred, checkpoint = 0, 0, None if (load_best == True) and os.path.isfile(best_path): checkpoint = torch.load(best_path) logger.info("Loaded best model.") elif os.path.isfile(checkpoint_path): checkpoint = torch.load(checkpoint_path) logger.info("Loaded checkpoint model.") if checkpoint != None: start_epoch = checkpoint['epoch'] best_pred = checkpoint['best_acc'] net.load_state_dict(checkpoint['state_dict']) if optimizer is not None: optimizer.load_state_dict(checkpoint['optimizer']) if scheduler is not None: scheduler.load_state_dict(checkpoint['scheduler']) logger.info("Loaded model and optimizer.") return start_epoch, best_pred def load_results(args): """ Loads saved results if exists """ losses_path = "./data/test_losses_per_epoch_%d.pkl" % args.model_no accuracy_path = "./data/test_accuracy_per_epoch_%d.pkl" % args.model_no if os.path.isfile(losses_path) and os.path.isfile(accuracy_path): losses_per_epoch = load_pickle("test_losses_per_epoch_%d.pkl" % args.model_no) accuracy_per_epoch = load_pickle("test_accuracy_per_epoch_%d.pkl" % args.model_no) logger.info("Loaded results buffer") else: losses_per_epoch, accuracy_per_epoch = [], [] return losses_per_epoch, accuracy_per_epoch def model_eval(net, test_loader, cuda=None): correct = 0 total = 0 print("Evaluating...") with torch.no_grad(): net.eval() for data in tqdm(test_loader): images, token_type, mask, labels = data if cuda: images, token_type, mask, labels = images.cuda(), token_type.cuda(), mask.cuda(), labels.cuda() images = images.long(); labels = labels.long() outputs = net(images, token_type_ids=token_type, attention_mask=mask) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print("Accuracy of the network on the %d test data points: %d %%" % (total,\ 100*correct/total)) return 100*correct/total def infer(infer_loader, net): logger.info("Evaluating on inference data...") cuda = next(net.parameters()).is_cuda net.eval() preds = [] with torch.no_grad(): for i, data in tqdm(enumerate(infer_loader, 0), total = len(infer_loader)): inputs, token_type, mask, _ = data if cuda: inputs, token_type, mask = inputs.cuda(), token_type.cuda(), mask.cuda() inputs = inputs.long() outputs = net(inputs, token_type_ids=token_type, attention_mask=mask) _, predicted = torch.max(outputs.data, 1) predicted = list(predicted.cpu().numpy()) if cuda else list(predicted.numpy()) preds.extend(predicted) df_results = pd.DataFrame(columns=["index", "predicted_label"]) df_results.loc[:, "index"] = [i for i in range(len(preds))] df_results.loc[:, "predicted_label"] = preds df_results.to_csv("./data/results.csv", columns=df_results.columns, index=False) return df_results
43.855072
115
0.645406
4a07752c2cb82a06c340f465bc5c580896aa370a
4,545
py
Python
strings/strip_url_params.py
n33t1/Algorithms
80a59c2fd3860a35a20919f59160d1408c69b29a
[ "MIT" ]
null
null
null
strings/strip_url_params.py
n33t1/Algorithms
80a59c2fd3860a35a20919f59160d1408c69b29a
[ "MIT" ]
null
null
null
strings/strip_url_params.py
n33t1/Algorithms
80a59c2fd3860a35a20919f59160d1408c69b29a
[ "MIT" ]
null
null
null
""" Write a function that does the following: Removes any duplicate query string parameters from the url Removes any query string parameters specified within the 2nd argument (optional array) An example: www.saadbenn.com?a=1&b=2&a=2') // returns 'www.saadbenn.com?a=1&b=2' """ import unittest from collections import defaultdict import urllib import urllib.parse # Here is a very non-pythonic grotesque solution def strip_url_params1(url, params_to_strip=None): if not params_to_strip: params_to_strip = [] if url: result = '' # final result to be returned tokens = url.split('?') domain = tokens[0] query_string = tokens[-1] result += domain # add the '?' to our result if it is in the url if len(tokens) > 1: result += '?' if not query_string: return url else: # logic for removing duplicate query strings # build up the list by splitting the query_string using digits key_value_string = [] string = '' for char in query_string: if char.isdigit(): key_value_string.append(string + char) string = '' else: string += char dict = defaultdict(int) # logic for checking whether we should add the string to our result for i in key_value_string: _token = i.split('=') if _token[0]: length = len(_token[0]) if length == 1: if _token and (not(_token[0] in dict)): if params_to_strip: if _token[0] != params_to_strip[0]: dict[_token[0]] = _token[1] result = result + _token[0] + '=' + _token[1] else: if not _token[0] in dict: dict[_token[0]] = _token[1] result = result + _token[0] + '=' + _token[1] else: check = _token[0] letter = check[1] if _token and (not(letter in dict)): if params_to_strip: if letter != params_to_strip[0]: dict[letter] = _token[1] result = result + _token[0] + '=' + _token[1] else: if not letter in dict: dict[letter] = _token[1] result = result + _token[0] + '=' + _token[1] return result # A very friendly pythonic solution (easy to follow) def strip_url_params2(url, param_to_strip=[]): if '?' not in url: return url queries = (url.split('?')[1]).split('&') queries_obj = [query[0] for query in queries] for i in range(len(queries_obj) - 1, 0, -1): if queries_obj[i] in param_to_strip or queries_obj[i] in queries_obj[0:i]: queries.pop(i) return url.split('?')[0] + '?' + '&'.join(queries) # Here is my friend's solution using python's builtin libraries def strip_url_params3(url, strip=None): if not strip: strip = [] parse = urllib.parse.urlparse(url) query = urllib.parse.parse_qs(parse.query) query = {k: v[0] for k, v in query.items() if k not in strip} query = urllib.parse.urlencode(query) new = parse._replace(query=query) return new.geturl() class TestSuite(unittest.TestCase): def test_strip_url_params1(self): self.assertEqual(strip_url_params1("www.saadbenn.com?a=1&b=2&a=2"), "www.saadbenn.com?a=1&b=2") self.assertEqual(strip_url_params1("www.saadbenn.com?a=1&b=2", ['b']), "www.saadbenn.com?a=1") def test_strip_url_params2(self): self.assertEqual(strip_url_params2("www.saadbenn.com?a=1&b=2&a=2"), "www.saadbenn.com?a=1&b=2") self.assertEqual(strip_url_params2("www.saadbenn.com?a=1&b=2", ['b']), "www.saadbenn.com?a=1") def test_strip_url_params3(self): self.assertEqual(strip_url_params3("www.saadbenn.com?a=1&b=2&a=2"), "www.saadbenn.com?a=1&b=2") self.assertEqual(strip_url_params3("www.saadbenn.com?a=1&b=2", ['b']), "www.saadbenn.com?a=1") if __name__ == "__main__": unittest.main()
38.193277
103
0.528933
4a0775fbd16977b09df48441431cec3a4721a28f
707
py
Python
Python-Unit-Testing/employee.py
HenkICT/Python_Unit_Testing_3
13b05a0a1b3928ad3235d7d5ed5971cf124aafee
[ "MIT" ]
null
null
null
Python-Unit-Testing/employee.py
HenkICT/Python_Unit_Testing_3
13b05a0a1b3928ad3235d7d5ed5971cf124aafee
[ "MIT" ]
null
null
null
Python-Unit-Testing/employee.py
HenkICT/Python_Unit_Testing_3
13b05a0a1b3928ad3235d7d5ed5971cf124aafee
[ "MIT" ]
null
null
null
import requests class Employee3: """A sample SampP y Employee class""" raise_amt = 1.05 def __init__(self, first, last, pay): self.first = first self.last = last self.pay = pay @property def email(self): return "{}.{}@email.com".format(self.first, self.last) @property def fullname(self): return "{} {}".format(self.first, self.last) def apply_raise(self): self.pay = int(self.pay * self.raise_amt) def monthly_schedule(self, month): response = requests.get(f"http://company.com/{self.last}/{month}") if response.ok: return response.text else: return "Bad Response!"
22.806452
74
0.582744
4a0777b4c989b328d258c91f97eeb5ae59991d4c
715
py
Python
ssvos/utils/dist_utils.py
wuyongfa-genius/SSVOS_mindspore
9f5f6bb29d9fc78d5dbb4e4b163b597887b03c47
[ "MIT" ]
1
2021-12-30T08:54:43.000Z
2021-12-30T08:54:43.000Z
ssvos/utils/dist_utils.py
wuyongfa-genius/SSVOS_mindspore
9f5f6bb29d9fc78d5dbb4e4b163b597887b03c47
[ "MIT" ]
null
null
null
ssvos/utils/dist_utils.py
wuyongfa-genius/SSVOS_mindspore
9f5f6bb29d9fc78d5dbb4e4b163b597887b03c47
[ "MIT" ]
null
null
null
"""Some distribute training utils.""" import os from mindspore import context from mindspore.context import ParallelMode from mindspore import communication as dist def init_dist(parallel_mode=ParallelMode.DATA_PARALLEL): device_id = int(os.getenv('DEVICE_ID')) rank_size = int(os.getenv('RANK_SIZE')) context.set_context(device_id=device_id) context.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=rank_size) dist.init() return dist.get_rank(), dist.get_group_size() # if __name__=="__main__": # rank, group_size = init_dist() # print(rank) # print(group_size)
28.6
66
0.678322
4a0777d8eda7d56b5e81ec3e8d961fe557a92c4e
251
py
Python
read.py
dezounet/google_hash_code
48aa82b8b07eb257c91beeb4201d5c39d103e338
[ "MIT" ]
null
null
null
read.py
dezounet/google_hash_code
48aa82b8b07eb257c91beeb4201d5c39d103e338
[ "MIT" ]
null
null
null
read.py
dezounet/google_hash_code
48aa82b8b07eb257c91beeb4201d5c39d103e338
[ "MIT" ]
null
null
null
def read(filename): header = None lines = [] with open(filename) as f: for line in f: if header is None: header = line else: lines.append(line) # TODO return None
15.6875
34
0.462151
4a07783bcae2184410744c1f1fba3afc943e4f21
29,418
py
Python
pypower/mesh.py
adematti/pypower
e037ccc96b9c8d917e2918ddeba2fc30c6d65067
[ "BSD-3-Clause" ]
10
2021-11-09T01:59:36.000Z
2022-01-20T08:47:17.000Z
pypower/mesh.py
adematti/pypower
e037ccc96b9c8d917e2918ddeba2fc30c6d65067
[ "BSD-3-Clause" ]
1
2021-12-09T08:51:59.000Z
2022-01-11T22:20:08.000Z
pypower/mesh.py
adematti/pypower
e037ccc96b9c8d917e2918ddeba2fc30c6d65067
[ "BSD-3-Clause" ]
null
null
null
"""Implementation of methods to paint a catalog on mesh; workhorse is :class:`CatalogMesh`.""" import numpy as np from mpi4py import MPI from pmesh.pm import ParticleMesh from pmesh.window import FindResampler, ResampleWindow from .utils import BaseClass, _make_array, _get_box from .direct_power import _format_positions, _format_weights from . import mpi def _get_real_dtype(dtype): # Return real-dtype equivalent return np.empty(0, dtype=dtype).real.dtype def _get_resampler(resampler): # Return :class:`ResampleWindow` from string or :class:`ResampleWindow` instance if isinstance(resampler, ResampleWindow): return resampler conversions = {'ngp': 'nnb', 'cic': 'cic', 'tsc': 'tsc', 'pcs': 'pcs'} if resampler not in conversions: raise ValueError('Unknown resampler {}, choices are {}'.format(resampler, list(conversions.keys()))) resampler = conversions[resampler] return FindResampler(resampler) def _get_resampler_name(resampler): # Translate input :class:`ResampleWindow` instance to string conversions = {'nearest': 'ngp', 'tunednnb': 'ngp', 'tunedcic': 'cic', 'tunedtsc': 'tsc', 'tunedpcs': 'pcs'} return conversions[resampler.kind] def _get_compensation_window(resampler='cic', shotnoise=False): r""" Return the compensation function, which corrects for the particle-mesh assignment (resampler) kernel. Taken from https://github.com/bccp/nbodykit/blob/master/nbodykit/source/mesh/catalog.py, following https://arxiv.org/abs/astro-ph/0409240. ("shotnoise" formula for pcs has been checked with WolframAlpha). Parameters ---------- resampler : string, default='cic' Resampler used to assign particles to the mesh. Choices are ['ngp', 'cic', 'tcs', 'pcs']. shotnoise : bool, default=False If ``False``, return expression for eq. 18 in https://arxiv.org/abs/astro-ph/0409240. This the correct choice when applying interlacing, as aliased images (:math:`\mathbf{n} \neq (0,0,0)`) are suppressed in eq. 17. If ``True``, return expression for eq. 19. Returns ------- window : callable Window function, taking as input :math:`\pi k_{i} / k_{N} = k / c` where :math:`k_{N}` is the Nyquist wavenumber and :math:`c` is the cell size, for each :math:`x`, :math:`y`, :math:`z`, axis. """ resampler = resampler.lower() if shotnoise: if resampler == 'ngp': def window(*x): return 1. elif resampler == 'cic': def window(*x): toret = 1. for xi in x: toret = toret * (1 - 2. / 3 * np.sin(0.5 * xi) ** 2) ** 0.5 return toret elif resampler == 'tsc': def window(*x): toret = 1. for xi in x: s = np.sin(0.5 * xi)**2 toret = toret * (1 - s + 2. / 15 * s**2) ** 0.5 return toret elif resampler == 'pcs': def window(*x): toret = 1. for xi in x: s = np.sin(0.5 * xi)**2 toret = toret * (1 - 4. / 3. * s + 2. / 5. * s**2 - 4. / 315. * s**3) ** 0.5 return toret else: p = {'ngp': 1, 'cic': 2, 'tsc': 3, 'pcs': 4}[resampler] def window(*x): toret = 1. for xi in x: toret = toret * np.sinc(0.5 / np.pi * xi) ** p return toret return window def _wrap_positions(array, boxsize, offset=0.): return (array - offset) % boxsize + offset def _get_mesh_attrs(nmesh=None, boxsize=None, boxcenter=None, cellsize=None, positions=None, boxpad=2., check=True, mpicomm=mpi.COMM_WORLD): """ Compute enclosing box. Parameters ---------- nmesh : array, int, default=None Mesh size, i.e. number of mesh nodes along each axis. If not provided, see ``value``. boxsize : float, default=None Physical size of the box. If not provided, see ``positions``. boxcenter : array, float, default=None Box center. If not provided, see ``positions``. cellsize : array, float, default=None Physical size of mesh cells. If not ``None``, ``boxsize`` is ``None`` and mesh size ``nmesh`` is not ``None``, used to set ``boxsize`` to ``nmesh * cellsize``. If ``nmesh`` is ``None``, it is set to (the nearest integer(s) to) ``boxsize / cellsize`` if ``boxsize`` is provided, else to the nearest even integer to ``boxsize / cellsize``, and ``boxsize`` is then reset to ``nmesh * cellsize``. positions : (list of) (N, 3) arrays, default=None If ``boxsize`` and / or ``boxcenter`` is ``None``, use this (list of) position arrays to determine ``boxsize`` and / or ``boxcenter``. boxpad : float, default=2. When ``boxsize`` is determined from ``positions``, take ``boxpad`` times the smallest box enclosing ``positions`` as ``boxsize``. check : bool, default=True If ``True``, and input ``positions`` (if provided) are not contained in the box, raise a :class:`ValueError`. mpicomm : MPI communicator, default=MPI.COMM_WORLD The MPI communicator. Returns ------- nmesh : array of shape (3,) Mesh size, i.e. number of mesh nodes along each axis. boxsize : array Physical size of the box. boxcenter : array Box center. """ provided_boxsize = boxsize is not None if not provided_boxsize or boxcenter is None or check: if positions is None: raise ValueError('positions must be provided if boxsize and boxcenter are not specified, or check is True') if not isinstance(positions, (tuple, list)): positions = [positions] # Find bounding coordinates pos_min, pos_max = _get_box(*positions) pos_min, pos_max = np.min(mpicomm.allgather(pos_min), axis=0), np.max(mpicomm.allgather(pos_max), axis=0) delta = np.abs(pos_max - pos_min) if boxcenter is None: boxcenter = 0.5 * (pos_min + pos_max) if boxsize is None: if cellsize is not None and nmesh is not None: boxsize = nmesh * cellsize else: boxsize = delta.max() * boxpad if check and (boxsize < delta).any(): raise ValueError('boxsize {} too small to contain all data (max {})'.format(boxsize, delta)) if nmesh is None: if cellsize is not None: nmesh = boxsize / cellsize if provided_boxsize: nmesh = np.rint(nmesh).astype('i8') else: nmesh = np.ceil(nmesh).astype('i8') nmesh += nmesh % 2 # to make it even boxsize = nmesh * cellsize # enforce exact cellsize else: raise ValueError('nmesh (or cellsize) must be specified') nmesh = _make_array(nmesh, 3, dtype='i4') boxsize = _make_array(boxsize, 3, dtype='f8') boxcenter = _make_array(boxcenter, 3, dtype='f8') return nmesh, boxsize, boxcenter def ArrayMesh(array, boxsize, nmesh=None, mpiroot=0, mpicomm=MPI.COMM_WORLD): """ Turn numpy array into :class:`pmesh.pm.RealField`. Parameters ---------- array : array Mesh numpy array gathered on ``mpiroot``. boxsize : array, float, default=None Physical size of the box along each axis. nmesh : array, int, default=None If ``mpiroot`` is ``None``, mesh size, i.e. number of mesh nodes along each axis. mpiroot : int, default=0 MPI rank where input array is gathered. If input array is scattered accross all ranks in C ordering, pass ``mpiroot = None`` and specify ``nmesh``. mpicomm : MPI communicator, default=MPI.COMM_WORLD The MPI communicator. Returns ------- mesh : pmesh.pm.RealField """ if mpiroot is None: dtype = array.dtype if nmesh is None: raise ValueError('In case input mesh is scattered accross all ranks, provide its shape (nmesh)') shape = _make_array(nmesh, 3, dtype='i8') else: if mpicomm.rank == mpiroot: dtype, shape = array.dtype, array.shape else: dtype, shape, array = None, None, None dtype = mpicomm.bcast(dtype, root=mpiroot) shape = mpicomm.bcast(shape, root=mpiroot) boxsize = _make_array(boxsize, 3, dtype='f8') pm = ParticleMesh(BoxSize=boxsize, Nmesh=shape, dtype=dtype, comm=mpicomm) mesh = pm.create(type='real') if mpiroot is None or mpicomm.rank == mpiroot: array = np.ravel(array) # ignore data from other ranks else: array = np.empty((0,), dtype=dtype) mesh.unravel(array) return mesh class CatalogMesh(BaseClass): """Class to paint catalog of positions and weights to mesh.""" _slab_npoints_max = int(1024 * 1024 * 4) def __init__(self, data_positions, data_weights=None, randoms_positions=None, randoms_weights=None, shifted_positions=None, shifted_weights=None, nmesh=None, boxsize=None, boxcenter=None, cellsize=None, boxpad=2., wrap=False, dtype='f8', resampler='tsc', interlacing=2, position_type='xyz', copy=False, mpiroot=None, mpicomm=MPI.COMM_WORLD): """ Initialize :class:`CatalogMesh`. Note ---- When running with MPI, input positions and weights are assumed to be scatted on all MPI ranks of ``mpicomm``. If this is not the case, use :func:`mpi.scatter_array`. Parameters ---------- data_positions : list, array Positions in the data catalog. Typically of shape (3, N) or (N, 3). data_weights : array of shape (N,), default=None Optionally, data weights. randoms_positions : list, array Positions in the randoms catalog. Typically of shape (3, N) or (N, 3). randoms_weights : array of shape (N,), default=None Randoms weights. shifted_positions : array, default=None Optionally, in case of BAO reconstruction, positions of the shifted catalog. shifted_weights : array, default=None Optionally, in case of BAO reconstruction, weigths of the shifted catalog. nmesh : array, int, default=None Mesh size, i.e. number of mesh nodes along each axis. boxsize : array, float, default=None Physical size of the box along each axis, defaults to maximum extent taken by all input positions, times ``boxpad``. boxcenter : array, float, default=None Box center, defaults to center of the Cartesian box enclosing all input positions. cellsize : array, float, default=None Physical size of mesh cells. If not ``None``, and mesh size ``nmesh`` is not ``None``, used to set ``boxsize`` as ``nmesh * cellsize``. If ``nmesh`` is ``None``, it is set as (the nearest integer(s) to) ``boxsize / cellsize``. wrap : bool, default=False Whether to wrap input positions? If ``False`` and input positions do not fit in the the box size, raise a :class:`ValueError`. boxpad : float, default=2. When ``boxsize`` is determined from ``positions``, take ``boxpad`` times the smallest box enclosing ``positions`` as ``boxsize``. dtype : string, dtype, default='f8' The data type to use for the mesh. Input ``positions`` and ``weights`` are cast to the corresponding (real) precision. resampler : string, ResampleWindow, default='tsc' Resampler used to assign particles to the mesh. Choices are ['ngp', 'cic', 'tcs', 'pcs']. interlacing : bool, int, default=2 Whether to use interlacing to reduce aliasing when painting the particles on the mesh. If positive int, the interlacing order (minimum: 2). position_type : string, default='xyz' Type of input positions, one of: - "pos": Cartesian positions of shape (N, 3) - "xyz": Cartesian positions of shape (3, N) - "rdd": RA/Dec in degree, distance of shape (3, N) copy : bool, default=False If ``False``, avoids copy of positions and weights if they are of (real) type ``dtype``, ``mpiroot`` is ``None``, and ``position_type`` is "pos" (for positions). Setting to ``True`` is only useful if one wants to modify positions or weights that have been passed as input while keeping those attached to the current mesh instance the same. mpiroot : int, default=None If ``None``, input positions and weights are assumed to be scatted across all ranks. Else the MPI rank where input positions and weights are gathered. mpicomm : MPI communicator, default=MPI.COMM_WORLD The MPI communicator. """ self.mpicomm = mpicomm self.dtype = np.dtype(dtype) self.rdtype = _get_real_dtype(self.dtype) self._set_positions(data_positions=data_positions, randoms_positions=randoms_positions, shifted_positions=shifted_positions, position_type=position_type, copy=copy, mpiroot=mpiroot) self._set_weights(data_weights=data_weights, randoms_weights=randoms_weights, shifted_weights=shifted_weights, copy=copy, mpiroot=mpiroot) self._set_box(boxsize=boxsize, cellsize=cellsize, nmesh=nmesh, boxcenter=boxcenter, boxpad=boxpad, wrap=wrap) self._set_resampler(resampler) self._set_interlacing(interlacing) def __repr__(self): """String representation of current mesh.""" info = ['{}={}'.format(name, getattr(self, name)) for name in ['nmesh', 'boxsize', 'boxcenter', 'dtype']] return '{}({})'.format(self.__class__.__name__, ', '.join(info)) @property def compensation(self): """Return dictionary specifying compensation scheme for particle-mesh resampling.""" return {'resampler': _get_resampler_name(self.resampler), 'shotnoise': not bool(self.interlacing)} def clone(self, data_positions=None, data_weights=None, randoms_positions=None, randoms_weights=None, shifted_positions=None, shifted_weights=None, boxsize=None, cellsize=None, nmesh=None, boxcenter=None, dtype=None, resampler=None, interlacing=None, position_type='xyz', mpicomm=None): """ Clone current instance, i.e. copy and set new positions and weights. Arguments 'boxsize', 'nmesh', 'boxcenter', 'dtype', 'resampler', 'interlacing', 'mpicomm', if ``None``, are overriden by those of the current instance. """ new = self.__class__.__new__(self.__class__) kwargs = {} loc = locals() for name in ['boxsize', 'nmesh', 'boxcenter', 'dtype', 'resampler', 'interlacing', 'mpicomm']: kwargs[name] = loc[name] if loc[name] is not None else getattr(self, name) if cellsize is not None: # if cellsize is provided, remove default nmesh or boxsize value from current instance. kwargs['cellsize'] = cellsize if nmesh is None: kwargs.pop('nmesh') elif boxsize is None: kwargs.pop('boxsize') new.__init__(data_positions=data_positions, data_weights=data_weights, randoms_positions=randoms_positions, randoms_weights=randoms_weights, shifted_positions=shifted_positions, shifted_weights=shifted_weights, position_type=position_type, **kwargs) return new def _set_interlacing(self, interlacing): self.interlacing = int(interlacing) if self.interlacing != interlacing: raise ValueError('Interlacing must be either bool (False, 0) or an integer >= 2') if self.interlacing == 1: if self.mpicomm.rank == 0: self.log_warning('Provided interlacing is {}; setting it to 2.'.format(interlacing)) self.interlacing = 2 def _set_box(self, nmesh=None, boxsize=None, cellsize=None, boxcenter=None, boxpad=2., wrap=False): # Set :attr:`nmesh`, :attr:`boxsize` and :attr:`boxcenter` positions = [self.data_positions] if self.with_randoms: positions += [self.randoms_positions] if self.with_shifted: positions += [self.shifted_positions] self.nmesh, self.boxsize, self.boxcenter = _get_mesh_attrs(nmesh=nmesh, boxsize=boxsize, cellsize=cellsize, boxcenter=boxcenter, positions=positions, boxpad=boxpad, check=not wrap, mpicomm=self.mpicomm) if wrap: for position in positions: _wrap_positions(position, self.boxsize, self.boxcenter - self.boxsize / 2.) def _set_positions(self, data_positions, randoms_positions=None, shifted_positions=None, position_type='xyz', copy=False, mpiroot=None): # Set data and optionally shifted and randoms positions, scattering on all ranks if not already if position_type is not None: position_type = position_type.lower() self.position_type = position_type for name in ['data', 'randoms', 'shifted']: positions_name = '{}_positions'.format(name) positions = locals()[positions_name] positions = _format_positions(positions, position_type=self.position_type, dtype=self.rdtype, copy=copy, mpicomm=self.mpicomm, mpiroot=mpiroot) setattr(self, positions_name, positions) if name == 'data' and positions is None: raise ValueError('Provide at least an array of data positions') size = 0 if positions is None else self.mpicomm.allreduce(len(positions)) setattr(self, '{}_size'.format(name), size) def _set_weights(self, data_weights, randoms_weights=None, shifted_weights=None, copy=False, mpiroot=None): # Set data and optionally shifted and randoms weights and their sum, scattering on all ranks if not already for name in ['data', 'randoms', 'shifted']: positions_name = '{}_positions'.format(name) positions = getattr(self, positions_name, None) weights_name = '{}_weights'.format(name) weights = locals()[weights_name] size = len(positions) if positions is not None else None weights = _format_weights(weights, weight_type='product_individual', dtype=self.rdtype, size=size, copy=copy, mpicomm=self.mpicomm, mpiroot=mpiroot)[0] weights = weights[0] if weights else None if size is None and weights is not None: raise ValueError('{} are provided, but not {}'.format(weights_name, positions_name)) setattr(self, weights_name, weights) if weights is None: if size is None: sum_weights = 0. else: sum_weights = self.mpicomm.allreduce(size) else: sum_weights = self.mpicomm.allreduce(sum(weights)) setattr(self, 'sum_{}'.format(weights_name), sum_weights) @property def with_randoms(self): """Whether randoms positions have been provided.""" return self.randoms_positions is not None @property def with_shifted(self): """Whether "shifted" positions have been provided (e.g. for reconstruction).""" return self.shifted_positions is not None def _set_resampler(self, resampler='cic'): # Set :attr:`resampler` self.resampler = _get_resampler(resampler=resampler) def to_mesh(self, field=None, dtype=None, compensate=False): """ Paint positions/weights to mesh. Parameters ---------- field : string, default=None Field to paint to mesh, one of: - "data": data positions and weights - "shifted": shifted positions and weights (available only if shifted positions are provided) - "randoms": randoms positions and weights - "data-normalized_shifted": shifted positions and weights, renormalized (by alpha) such that their sum is same as data weights - "data-normalized_randoms": randoms positions and weights, renormalized (by alpha) such that their sum is same as data weights - "fkp": FKP field, i.e. data - alpha * (shifted if provided else randoms) - ``None``: defaults to "data" if no shifted/randoms, else "fkp" dtype : string, dtype, default='f8' The data type of the mesh when painting, to override current :attr:`dtype`. compensate : bool, default=False Wether to apply compensation for particle-mesh assignment scheme. Returns ------- out : RealField Mesh, with values in "weights" units (not *normalized* as density). """ if dtype is None: dtype = self.dtype if field is None: field = 'fkp' if (self.with_randoms or self.with_shifted) else 'data' field = field.lower() allowed_fields = set(['data', 'normalized_data']) if self.with_shifted: allowed_fields |= set(['shifted', 'data-normalized_shifted', 'fkp']) if self.with_randoms: allowed_fields |= set(['randoms', 'data-normalized_randoms', 'fkp']) if field not in allowed_fields: raise ValueError('Unknown field {}. Choices are {}'.format(field, allowed_fields)) positions, weights = [], [] if field in ['data', 'fkp']: positions += [self.data_positions] weights += [(self.data_weights, None)] if field in ['normalized_data']: positions += [self.data_positions] weights += [(self.data_weights, self.nmesh.prod(dtype='f8') / self.sum_data_weights)] # mean mesh is 1 if field in ['fkp']: if self.with_shifted: positions += [self.shifted_positions] weights += [(self.shifted_weights, -self.sum_data_weights / self.sum_shifted_weights)] else: positions += [self.randoms_positions] weights += [(self.randoms_weights, -self.sum_data_weights / self.sum_randoms_weights)] if field in ['shifted', 'data-normalized_shifted']: positions += [self.shifted_positions] if field == 'data-normalized_shifted': weights += [(self.shifted_weights, self.sum_data_weights / self.sum_shifted_weights)] else: weights += [(self.shifted_weights, None)] if field in ['randoms', 'data-normalized_randoms']: positions += [self.randoms_positions] if field == 'data-normalized_randoms': weights += [(self.randoms_weights, self.sum_data_weights / self.sum_randoms_weights)] else: weights += [(self.randoms_weights, None)] pm = ParticleMesh(BoxSize=self.boxsize, Nmesh=self.nmesh, dtype=dtype, comm=self.mpicomm) offset = self.boxcenter - self.boxsize / 2. # offset = self.boxcenter # offset = 0. def paint(positions, weights, scaling, out, transform=None): positions = positions - offset factor = bool(self.interlacing) + 0.5 scalar_weights = weights is None if scaling is not None: if scalar_weights: weights = scaling else: weights = weights * scaling # We work by slab to limit memory footprint # Merely copy-pasted from https://github.com/bccp/nbodykit/blob/4aec168f176939be43f5f751c90363b39ec6cf3a/nbodykit/source/mesh/catalog.py#L300 def paint_slab(sl): # Decompose positions such that they live in the same region as the mesh in the current process p = positions[sl] size = len(p) layout = pm.decompose(p, smoothing=factor * self.resampler.support) # If we are receiving too many particles, abort and retry with a smaller chunksize recvlengths = pm.comm.allgather(layout.recvlength) if any(recvlength > 2 * self._slab_npoints_max for recvlength in recvlengths): if pm.comm.rank == 0: self.log_info('Throttling slab size as some ranks will receive too many particles. ({:d} > {:d})'.format(max(recvlengths), self._slab_npoints_max * 2)) raise StopIteration p = layout.exchange(p) w = weights if scalar_weights else layout.exchange(weights[sl]) # hold = True means no zeroing of out pm.paint(p, mass=w, resampler=self.resampler, transform=transform, hold=True, out=out) return size islab = 0 slab_npoints = self._slab_npoints_max sizes = pm.comm.allgather(len(positions)) csize = sum(sizes) local_size_max = max(sizes) painted_size = 0 import gc while islab < local_size_max: sl = slice(islab, islab + slab_npoints) if pm.comm.rank == 0: self.log_info('Slab {:d} ~ {:d} / {:d}.'.format(islab, islab + slab_npoints, local_size_max)) try: painted_size_slab = paint_slab(sl) except StopIteration: slab_npoints = slab_npoints // 2 if slab_npoints < 1: raise RuntimeError('Cannot find a slab size that fits into memory.') continue finally: # collect unfreed items gc.collect() painted_size += pm.comm.allreduce(painted_size_slab) if pm.comm.rank == 0: self.log_info('Painted {:d} out of {:d} objects to mesh.'.format(painted_size, csize)) islab += slab_npoints slab_npoints = min(self._slab_npoints_max, int(slab_npoints * 1.2)) out = pm.create(type='real', value=0.) for p, w in zip(positions, weights): paint(p, *w, out) if self.interlacing: if self.mpicomm.rank == 0: self.log_info('Running interlacing at order {:d}.'.format(self.interlacing)) cellsize = self.boxsize / self.nmesh shifts = np.arange(self.interlacing) * 1. / self.interlacing # remove 0 shift, already computed shifts = shifts[1:] out = out.r2c() for shift in shifts: transform = pm.affine.shift(shift) # this shifts particle positions by ``shift`` before painting to mesh # paint to two shifted meshes mesh_shifted = pm.create(type='real', value=0.) for p, w in zip(positions, weights): paint(p, *w, mesh_shifted, transform=transform) mesh_shifted = mesh_shifted.r2c() for k, s1, s2 in zip(out.slabs.x, out.slabs, mesh_shifted.slabs): kc = sum(k[i] * cellsize[i] for i in range(3)) # pmesh convention is F(k) = 1/N^3 \sum_{r} e^{-ikr} F(r) # shifting by "shift * cellsize" we compute F(k) = 1/N^3 \sum_{r} e^{-ikr} F(r - shift * cellsize) # i.e. F(k) = e^{- i shift * kc} 1/N^3 e^{-ikr} F(r) # Hence compensation below s1[...] = s1[...] + s2[...] * np.exp(shift * 1j * kc) if compensate: self._compensate(out) out = out.c2r() out[:] /= self.interlacing elif compensate: out = out.r2c() self._compensate(out) out = out.c2r() return out def _compensate(self, cfield): if self.mpicomm.rank == 0: self.log_info('Applying compensation {}.'.format(self.compensation)) # Apply compensation window for particle-assignment scheme window = _get_compensation_window(**self.compensation) cellsize = self.boxsize / self.nmesh for k, slab in zip(cfield.slabs.x, cfield.slabs): kc = tuple(ki * ci for ki, ci in zip(k, cellsize)) slab[...] /= window(*kc) def unnormalized_shotnoise(self): r""" Return unnormalized shotnoise, as: .. math:: \sum_{i=1}^{N_{g}} w_{i,g}^{2} + \alpha^{2} \sum_{i=1}^{N_{r}} w_{i,r}^{2} Where the sum runs over data (and optionally) shifted/randoms weights. """ def sum_weights2(positions, weights=None): if weights is None: return self.mpicomm.allreduce(len(positions)) return self.mpicomm.allreduce(sum(weights**2)) shotnoise = sum_weights2(self.data_positions, self.data_weights) if self.with_shifted: alpha = self.sum_data_weights / self.sum_shifted_weights shotnoise += alpha**2 * sum_weights2(self.shifted_positions, self.shifted_weights) elif self.with_randoms: alpha = self.sum_data_weights / self.sum_randoms_weights shotnoise += alpha**2 * sum_weights2(self.randoms_positions, self.randoms_weights) return shotnoise
45.18894
189
0.607077
4a0778efdbc5ecbeb28e7c9b562c922612ce607a
3,730
py
Python
examples/mcmc/gibbs_linreg.py
Bhumbra/probayes
e5ac193076e4188b9b38c0e18466223ab4d041f7
[ "BSD-3-Clause" ]
null
null
null
examples/mcmc/gibbs_linreg.py
Bhumbra/probayes
e5ac193076e4188b9b38c0e18466223ab4d041f7
[ "BSD-3-Clause" ]
null
null
null
examples/mcmc/gibbs_linreg.py
Bhumbra/probayes
e5ac193076e4188b9b38c0e18466223ab4d041f7
[ "BSD-3-Clause" ]
null
null
null
""" Example of linear regression using Gibbs taken from Radford Neil's slides at: http://www.cs.toronto.edu/~radford/csc2541.S11/week3.pdf p(y|x, beta_0, beta_1, y_sigma) = N(beta_1*x + beta_0, y_sigma) p(beta_0) = N(beta_0_mu, beta_0_sigma) p(beta_1) = N(beta_1_mu, beta_1_sigma) p(1/y_sigma^2) = Gamma(y_sigma_alpha, 1/y_sigma_beta) """ import numpy as np import scipy.stats import probayes as pb from pylab import *; ion() from mpl_toolkits.mplot3d import Axes3D # import needed for 3D projection n_steps = 1000 # Simulate data rand_size = 60 x_range = [-3, 3] slope = 1.5 intercept = -1. y_noise = 0.5 x_obs = np.random.normal(0, 1, size=rand_size) y_obs = np.random.normal(slope*x_obs + intercept, y_noise) # Set up RVs, RFs, and SP x = pb.RV('x', vtype=float, vset=x_range) y = pb.RV('y', vtype=float, vset=[-np.inf, np.inf]) beta_0 = pb.RV('beta_0', vtype=float, vset=[-6., 6.]) beta_1 = pb.RV('beta_1', vtype=float, vset=[-6., 6.]) y_sigma = pb.RV('y_sigma', vtype=float, vset=[(0.001), 10.]) # Define likelihood and conditional functions def norm_reg(x, y, beta_0, beta_1, y_sigma): return scipy.stats.norm.logpdf(y, loc=beta_0 + beta_1*x, scale=y_sigma) def cond_reg(x, y, beta_0, beta_1, y_sigma, unknown, beta_0_mu=0, beta_0_sigma=1, beta_1_mu=0, beta_1_sigma=1., y_sigma_alpha=1., y_sigma_beta=1.): if unknown == 'y_sigma': cond_alpha = y_sigma_alpha + 0.5*rand_size cond_beta = y_sigma_beta + 0.5*np.sum((y - beta_0 - beta_1*x)**2) y_sigma = 1 / np.sqrt(np.random.gamma(cond_alpha, 1/cond_beta)) return y_sigma y_prec = 1 / (y_sigma**2) if unknown == 'beta_0': beta_0_prec = 1/(beta_0_sigma**2) cond_var = 1 / (beta_0_prec + rand_size*y_prec) cond_mu = (beta_0_prec*beta_0_mu + y_prec*np.sum(y - beta_1*x)) * cond_var cond_sigma = np.sqrt(cond_var) beta_0 = np.random.normal(cond_mu, cond_sigma) return beta_0 if unknown == 'beta_1': beta_1_prec = 1/(beta_1_sigma**2) cond_var = 1 / (beta_1_prec + y_prec*np.sum(x**2)) cond_mu = (beta_1_prec*beta_1_mu + y_prec*np.sum(x*(y - beta_0))) * cond_var cond_sigma = np.sqrt(cond_var) beta_1 = np.random.normal(cond_mu, cond_sigma) return beta_1 raise ValueError("Unknown unknown: {}".format(unknown)) # Setup up RFs and SP stats = x & y paras = beta_0 & beta_1 & y_sigma paras.set_tfun(cond_reg, tsteps=1, x=x_obs, y=y_obs) process = pb.SP(stats, paras) process.set_tfun(paras) process.set_prob(norm_reg, pscale='log') process.set_scores('gibbs') lr = scipy.stats.linregress(x_obs, y_obs) init_state = {'beta_0': lr.intercept, 'beta_1': lr.slope, 'y_sigma': np.sqrt(lr.stderr)} sampler = process.sampler(init_state, {'x,y': [x_obs,y_obs]}, stop=n_steps, iid=True, joint=True) samples = [sample for sample in sampler] summary = process(samples) n_accept = summary.u.count(True) inference = summary.v.rescaled() b0, b1, ys, post = inference['beta_0'], inference['beta_1'], \ inference['y_sigma'], inference.prob hat_beta_0 = np.median(b0) hat_beta_1 = np.median(b1) hat_y_sigma = np.median(ys) hat_beta_0_str = '{:.2f}'.format(hat_beta_0) hat_beta_1_str = '{:.2f}'.format(hat_beta_1) hat_y_sigma_str = '{:.2f}'.format(hat_y_sigma) # PLOT DATA fig = figure() ax = fig.add_subplot(111, projection='3d') c_norm = Normalize(vmin=np.min(post), vmax=np.max(post)) c_map = cm.jet(c_norm(post)) ax.plot(b0, b1, ys, '-', color=(0.7, 0.7, 0.7, 0.3)) ax.scatter(b0, b1, ys, color=c_map, marker='.', alpha=1.) ax.set_xlabel(r'$\beta_0$') ax.set_ylabel(r'$\beta_1$') ax.set_zlabel(r'$\sigma_y$') ax.set_title(r'$\hat{\beta_0}=' + hat_beta_0_str + r',\hat{\beta_1}=' + hat_beta_1_str + \ r',\hat{\sigma_y}=' + hat_y_sigma_str + r'$')
36.213592
97
0.681233
4a077912972a3dc8a863c463135f31a27a36677d
1,332
py
Python
codeFilesPackage/nameDayViewer.py
karadalex/PythonOrganizerAppProject
37c7d10b240e9d883d4a8a50c4e94cf5315275d9
[ "MIT" ]
3
2015-12-16T01:54:09.000Z
2016-01-31T00:55:37.000Z
codeFilesPackage/nameDayViewer.py
karadalex/PythonOrganizerAppProject
37c7d10b240e9d883d4a8a50c4e94cf5315275d9
[ "MIT" ]
null
null
null
codeFilesPackage/nameDayViewer.py
karadalex/PythonOrganizerAppProject
37c7d10b240e9d883d4a8a50c4e94cf5315275d9
[ "MIT" ]
null
null
null
import gotoMainFolderDirectory import textFileOperations import greeklish def nameDayDictionaryCreation(): gotoMainFolderDirectory.go() dataString = textFileOperations.textFileToString("mediaFilesPackage/eortes.dat") dataList = dataString.split('\n\n') nameDayDictionary = {} for day in dataList: dayList = day.split("\n") date = dayList[0] date = date.split(" ") date = date[0] dayList.pop(0) dayString = "" for name in dayList: name = name.strip() name = name.replace("(", "") name = name.replace(")", "") namesList = name.split(",") name = "" if len(namesList) > 1: name += "(" for i in range(len(namesList)): namesList[i] = greeklish.greekStringToGreeklishString(namesList[i]) name += namesList[i]+"," if len(namesList) > 1: name += ")" dayString += name+"\n" # put date and names in dictionary # dictionary key: date, format: "day/month" # dictionary value: names, type:String nameDayDictionary.update({date:dayString}) return nameDayDictionary # Uncomment to check function nameDayDictionaryCreation() #print nameDayDictionaryCreation()
32.487805
84
0.57958
4a077a1fe02b056d0eeafd1a46bb6cfadef4746b
8,727
py
Python
setup.py
AlanDecode/detectron2
94af461322feba0c3cadde886367445d62bc45a7
[ "Apache-2.0" ]
1
2021-09-27T17:14:13.000Z
2021-09-27T17:14:13.000Z
setup.py
AlanDecode/detectron2
94af461322feba0c3cadde886367445d62bc45a7
[ "Apache-2.0" ]
null
null
null
setup.py
AlanDecode/detectron2
94af461322feba0c3cadde886367445d62bc45a7
[ "Apache-2.0" ]
2
2020-12-10T12:58:12.000Z
2022-03-25T02:27:46.000Z
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. import glob import os import shutil from os import path from setuptools import find_packages, setup from typing import List import torch from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension from torch.utils.hipify import hipify_python torch_ver = [int(x) for x in torch.__version__.split(".")[:2]] assert torch_ver >= [1, 6], "Requires PyTorch >= 1.6" def get_version(): init_py_path = path.join(path.abspath(path.dirname(__file__)), "detectron2", "__init__.py") init_py = open(init_py_path, "r").readlines() version_line = [l.strip() for l in init_py if l.startswith("__version__")][0] version = version_line.split("=")[-1].strip().strip("'\"") # The following is used to build release packages. # Users should never use it. suffix = os.getenv("D2_VERSION_SUFFIX", "") version = version + suffix if os.getenv("BUILD_NIGHTLY", "0") == "1": from datetime import datetime date_str = datetime.today().strftime("%y%m%d") version = version + ".dev" + date_str new_init_py = [l for l in init_py if not l.startswith("__version__")] new_init_py.append('__version__ = "{}"\n'.format(version)) with open(init_py_path, "w") as f: f.write("".join(new_init_py)) return version def get_extensions(): this_dir = path.dirname(path.abspath(__file__)) extensions_dir = path.join(this_dir, "detectron2", "layers", "csrc") main_source = path.join(extensions_dir, "vision.cpp") sources = glob.glob(path.join(extensions_dir, "**", "*.cpp")) from torch.utils.cpp_extension import ROCM_HOME is_rocm_pytorch = ( True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False ) hipify_ver = ( [int(x) for x in torch.utils.hipify.__version__.split(".")] if hasattr(torch.utils.hipify, "__version__") else [0, 0, 0] ) if is_rocm_pytorch and hipify_ver < [1, 0, 0]: # TODO not needed since pt1.8 # Earlier versions of hipification and extension modules were not # transparent, i.e. would require an explicit call to hipify, and the # hipification would introduce "hip" subdirectories, possibly changing # the relationship between source and header files. # This path is maintained for backwards compatibility. hipify_python.hipify( project_directory=this_dir, output_directory=this_dir, includes="/detectron2/layers/csrc/*", show_detailed=True, is_pytorch_extension=True, ) source_cuda = glob.glob(path.join(extensions_dir, "**", "hip", "*.hip")) + glob.glob( path.join(extensions_dir, "hip", "*.hip") ) shutil.copy( "detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h", "detectron2/layers/csrc/box_iou_rotated/hip/box_iou_rotated_utils.h", ) shutil.copy( "detectron2/layers/csrc/deformable/deform_conv.h", "detectron2/layers/csrc/deformable/hip/deform_conv.h", ) sources = [main_source] + sources sources = [ s for s in sources if not is_rocm_pytorch or torch_ver < [1, 7] or not s.endswith("hip/vision.cpp") ] else: # common code between cuda and rocm platforms, # for hipify version [1,0,0] and later. source_cuda = glob.glob(path.join(extensions_dir, "**", "*.cu")) + glob.glob( path.join(extensions_dir, "*.cu") ) sources = [main_source] + sources extension = CppExtension extra_compile_args = {"cxx": []} define_macros = [] if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or os.getenv( "FORCE_CUDA", "0" ) == "1": extension = CUDAExtension sources += source_cuda if not is_rocm_pytorch: define_macros += [("WITH_CUDA", None)] extra_compile_args["nvcc"] = [ "-O3", "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] else: define_macros += [("WITH_HIP", None)] extra_compile_args["nvcc"] = [] if torch_ver < [1, 7]: # supported by https://github.com/pytorch/pytorch/pull/43931 CC = os.environ.get("CC", None) if CC is not None: extra_compile_args["nvcc"].append("-ccbin={}".format(CC)) include_dirs = [extensions_dir] ext_modules = [ extension( "detectron2._C", sources, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args, ) ] return ext_modules def get_model_zoo_configs() -> List[str]: """ Return a list of configs to include in package for model zoo. Copy over these configs inside detectron2/model_zoo. """ # Use absolute paths while symlinking. source_configs_dir = path.join(path.dirname(path.realpath(__file__)), "configs") destination = path.join( path.dirname(path.realpath(__file__)), "detectron2", "model_zoo", "configs" ) # Symlink the config directory inside package to have a cleaner pip install. # Remove stale symlink/directory from a previous build. if path.exists(source_configs_dir): if path.islink(destination): os.unlink(destination) elif path.isdir(destination): shutil.rmtree(destination) if not path.exists(destination): try: os.symlink(source_configs_dir, destination) except OSError: # Fall back to copying if symlink fails: ex. on Windows. shutil.copytree(source_configs_dir, destination) config_paths = glob.glob("configs/**/*.yaml", recursive=True) + glob.glob( "configs/**/*.py", recursive=True ) return config_paths # For projects that are relative small and provide features that are very close # to detectron2's core functionalities, we install them under detectron2.projects PROJECTS = { "detectron2.projects.point_rend": "projects/PointRend/point_rend", "detectron2.projects.deeplab": "projects/DeepLab/deeplab", "detectron2.projects.panoptic_deeplab": "projects/Panoptic-DeepLab/panoptic_deeplab", } setup( name="detectron2", version=get_version(), author="FAIR", url="https://github.com/facebookresearch/detectron2", description="Detectron2 is FAIR's next-generation research " "platform for object detection and segmentation.", packages=find_packages(exclude=("configs", "tests*")) + list(PROJECTS.keys()), package_dir=PROJECTS, package_data={"detectron2.model_zoo": get_model_zoo_configs()}, python_requires=">=3.6", install_requires=[ # Do not add opencv here. Just like pytorch, user should install # opencv themselves, preferrably by OS's package manager, or by # choosing the proper pypi package name at https://github.com/skvark/opencv-python "termcolor>=1.1", "Pillow>=7.1", # or use pillow-simd for better performance "yacs>=0.1.6", "tabulate", "cloudpickle", "matplotlib", "tqdm>4.29.0", "tensorboard", # Lock version of fvcore/iopath because they may have breaking changes # NOTE: when updating fvcore/iopath version, make sure fvcore depends # on the same version of iopath. "fvcore>=0.1.5,<0.1.6", # required like this to make it pip installable "iopath>=0.1.7,<0.1.9", "pycocotools>=2.0.2", # corresponds to https://github.com/ppwwyyxx/cocoapi "future", # used by caffe2 "pydot", # used to save caffe2 SVGs "dataclasses; python_version<'3.7'", "omegaconf==2.1.0.dev22", # When adding to the list, may need to update docs/requirements.txt # or add mock in docs/conf.py ], extras_require={ "all": [ "shapely", "pygments>=2.2", "psutil", "hydra-core", "panopticapi @ https://github.com/cocodataset/panopticapi/archive/master.zip", ], "dev": [ "flake8==3.8.1", "isort==4.3.21", "black==20.8b1", "flake8-bugbear", "flake8-comprehensions", ], }, ext_modules=get_extensions(), cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension}, )
35.189516
97
0.623009
4a077a90153d5babe5212b0cac4c615c93028192
1,526
py
Python
bindings/python/debug_script.py
Keithcat1/synthizer
242a06855a36b9a9049d5fb00630800cda4a2984
[ "Unlicense" ]
2
2022-01-02T14:41:45.000Z
2022-01-12T16:38:59.000Z
bindings/python/debug_script.py
Keithcat1/synthizer
242a06855a36b9a9049d5fb00630800cda4a2984
[ "Unlicense" ]
9
2021-11-04T00:26:52.000Z
2022-03-23T02:12:16.000Z
bindings/python/debug_script.py
Keithcat1/synthizer
242a06855a36b9a9049d5fb00630800cda4a2984
[ "Unlicense" ]
2
2022-03-02T21:34:57.000Z
2022-03-14T12:44:43.000Z
# Used so that I can get a quick python -i up for today's debugging session. You don't want to learn from this code; it may not even work. import synthizer from synthizer import EchoTapConfig import time import random import sys import math import atexit # Normally you want to use the synthizer.initialized context manager, but I'm using this example # as a script that sets up a Python shell for debugging, and I # forgot to shut this down and had to kill via task manager one too many times. # # You always need to shut Synthizer down, but I'll be improving things so that failing to do so # doesn't freeze things so badly that you have to kill it via task manager. atexit.register(synthizer.shutdown) synthizer.initialize( log_level=synthizer.LogLevel.DEBUG, logging_backend=synthizer.LoggingBackend.STDERR ) ctx = synthizer.Context(enable_events=True) buffer = synthizer.Buffer.from_stream_params("file", sys.argv[1]) gen = synthizer.BufferGenerator(ctx) gen2 = synthizer.BufferGenerator(ctx) #gen = synthizer.StreamingGenerator.from_file(ctx, sys.argv[1]) #gen2 = synthizer.StreamingGenerator.from_file(ctx, sys.argv[1]) gen.buffer = buffer gen2.buffer=buffer # ctx.panner_strategy = synthizer.PannerStrategy.HRTF src = synthizer.PannedSource(ctx) src.add_generator(gen) src.add_generator(gen2) gen.config_delete_behavior(linger=True) gen2.config_delete_behavior(linger=True) src.config_delete_behavior(linger=True) gen.dec_ref() gen2.dec_ref() src.dec_ref() #src.panner_strategy = synthizer.PannerStrategy.HRTF
36.333333
138
0.79882
4a077af565d2a2e05c68c0693ce65f77b375e392
5,544
py
Python
datastore/ndb/transactions/main.py
xiaopeng163/python-docs-samples
b2bbfe15c27798d012f4a6e1fde33ae292a1e62a
[ "Apache-2.0" ]
null
null
null
datastore/ndb/transactions/main.py
xiaopeng163/python-docs-samples
b2bbfe15c27798d012f4a6e1fde33ae292a1e62a
[ "Apache-2.0" ]
null
null
null
datastore/ndb/transactions/main.py
xiaopeng163/python-docs-samples
b2bbfe15c27798d012f4a6e1fde33ae292a1e62a
[ "Apache-2.0" ]
1
2018-05-13T05:31:10.000Z
2018-05-13T05:31:10.000Z
# Copyright 2015 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cgi import random import urllib import flask # [START taskq-imp] from google.appengine.api import taskqueue from google.appengine.ext import ndb # [END taskq-imp] class Note(ndb.Model): """Models an individual Note entry with content.""" content = ndb.StringProperty() def parent_key(page_name): return ndb.Key("Parent", page_name) app = flask.Flask(__name__) @app.route('/') def main_page(): page_name = flask.request.args.get('page_name', 'default') response = """ <html><body> <h2>Permenant note page: %s</h2>""" % cgi.escape(page_name) parent = parent_key(page_name) notes = Note.query(ancestor=parent).fetch(20) for note in notes: response += '<h3>%s</h3>' % cgi.escape(note.key.id()) response += '<blockquote>%s</blockquote>' % cgi.escape(note.content) response += ( """<hr> <form action="/add?%s" method="post"> Submit Note: <input value="Title" name="note_title"><br> <textarea value="Note" name="note_text" rows="4" cols="60"> </textarea> <input type="submit" value="Etch in stone"></form>""" % urllib.urlencode({'page_name': page_name})) response += """ <hr> <form>Switch page: <input value="%s" name="page_name"> <input type="submit" value="Switch"></form> </body> </html>""" % cgi.escape(page_name, quote=True) return response # [START standard] @ndb.transactional def insert_if_absent(note_key, note): fetch = note_key.get() if fetch is None: note.put() return True return False # [END standard] # [START two-tries] @ndb.transactional(retries=1) def insert_if_absent_2_retries(note_key, note): # do insert # [END two-tries] fetch = note_key.get() if fetch is None: note.put() return True return False # [START cross-group] @ndb.transactional(xg=True) def insert_if_absent_xg(note_key, note): # do insert # [END cross-group] fetch = note_key.get() if fetch is None: note.put() return True return False # [START sometimes] def insert_if_absent_sometimes(note_key, note): # do insert # [END sometimes] fetch = note_key.get() if fetch is None: note.put() return True return False # [START indep] @ndb.transactional(propagation=ndb.TransactionOptions.INDEPENDENT) def insert_if_absent_indep(note_key, note): # do insert # [END indep] fetch = note_key.get() if fetch is None: note.put() return True return False # [START taskq] @ndb.transactional def insert_if_absent_taskq(note_key, note): taskqueue.add(url=flask.url_for('taskq_worker'), transactional=True) # do insert # [END taskq] fetch = note_key.get() if fetch is None: note.put() return True return False @app.route('/worker') def taskq_worker(): pass def pick_random_insert(note_key, note): choice = random.randint(0, 5) if choice == 0: # [START calling2] inserted = insert_if_absent(note_key, note) # [END calling2] elif choice == 1: inserted = insert_if_absent_2_retries(note_key, note) elif choice == 2: inserted = insert_if_absent_xg(note_key, note) elif choice == 3: # [START sometimes-call] inserted = ndb.transaction(lambda: insert_if_absent_sometimes(note_key, note)) # [END sometimes-call] elif choice == 4: inserted = insert_if_absent_indep(note_key, note) elif choice == 5: inserted = insert_if_absent_taskq(note_key, note) return inserted @app.route('/add', methods=['POST']) def add_note(): page_name = flask.request.args.get('page_name', 'default') note_title = flask.request.form['note_title'] note_text = flask.request.form['note_text'] parent = parent_key(page_name) choice = random.randint(0, 1) if choice == 0: # Use transactional function # [START calling] note_key = ndb.Key(Note, note_title, parent=parent) note = Note(key=note_key, content=note_text) # [END calling] if pick_random_insert(note_key, note) is False: return ('Already there<br><a href="%s">Return</a>' % flask.url_for('main_page', page_name=page_name)) return flask.redirect(flask.url_for('main_page', page_name=page_name)) elif choice == 1: # Use get_or_insert, which is transactional note = Note.get_or_insert(note_title, parent=parent, content=note_text) if note.content != note_text: return ('Already there<br><a href="%s">Return</a>' % flask.url_for('main_page', page_name=page_name)) return flask.redirect(flask.url_for('main_page', page_name=page_name)) if __name__ == '__main__': app.run()
28
79
0.637626
4a077b04986156dc74cbb7220f0ae0fa98fae8f0
598
py
Python
management/commands/generate_backup_key.py
audacious-software/Simple-Backup-Django
bdacadc916da93e68f19696b2167fc71ee4dd919
[ "Apache-2.0" ]
null
null
null
management/commands/generate_backup_key.py
audacious-software/Simple-Backup-Django
bdacadc916da93e68f19696b2167fc71ee4dd919
[ "Apache-2.0" ]
null
null
null
management/commands/generate_backup_key.py
audacious-software/Simple-Backup-Django
bdacadc916da93e68f19696b2167fc71ee4dd919
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # pylint: disable=no-member,line-too-long from __future__ import print_function import base64 import nacl.secret import nacl.utils from django.core.management.base import BaseCommand class Command(BaseCommand): help = 'Generates a SecretBox key to use for backups.' def add_arguments(self, parser): pass def handle(self, *args, **options): # pylint: disable=too-many-locals,too-many-branches,too-many-statements key = nacl.utils.random(nacl.secret.SecretBox.KEY_SIZE) print('BACKUP KEY: ' + base64.b64encode(key).decode('utf-8'))
26
111
0.714047
4a077b44639308bf77a35575fa964eebb5224c3c
5,293
py
Python
kubernetes/client/models/v1beta1_subject_access_review.py
SEJeff/client-python
baba523c28a684b3f537502977d600dedd1f17c5
[ "Apache-2.0" ]
null
null
null
kubernetes/client/models/v1beta1_subject_access_review.py
SEJeff/client-python
baba523c28a684b3f537502977d600dedd1f17c5
[ "Apache-2.0" ]
null
null
null
kubernetes/client/models/v1beta1_subject_access_review.py
SEJeff/client-python
baba523c28a684b3f537502977d600dedd1f17c5
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.5.0-beta.1 Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from pprint import pformat from six import iteritems import re class V1beta1SubjectAccessReview(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, metadata=None, spec=None, status=None): """ V1beta1SubjectAccessReview - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'metadata': 'V1ObjectMeta', 'spec': 'V1beta1SubjectAccessReviewSpec', 'status': 'V1beta1SubjectAccessReviewStatus' } self.attribute_map = { 'metadata': 'metadata', 'spec': 'spec', 'status': 'status' } self._metadata = metadata self._spec = spec self._status = status @property def metadata(self): """ Gets the metadata of this V1beta1SubjectAccessReview. :return: The metadata of this V1beta1SubjectAccessReview. :rtype: V1ObjectMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """ Sets the metadata of this V1beta1SubjectAccessReview. :param metadata: The metadata of this V1beta1SubjectAccessReview. :type: V1ObjectMeta """ self._metadata = metadata @property def spec(self): """ Gets the spec of this V1beta1SubjectAccessReview. Spec holds information about the request being evaluated :return: The spec of this V1beta1SubjectAccessReview. :rtype: V1beta1SubjectAccessReviewSpec """ return self._spec @spec.setter def spec(self, spec): """ Sets the spec of this V1beta1SubjectAccessReview. Spec holds information about the request being evaluated :param spec: The spec of this V1beta1SubjectAccessReview. :type: V1beta1SubjectAccessReviewSpec """ if spec is None: raise ValueError("Invalid value for `spec`, must not be `None`") self._spec = spec @property def status(self): """ Gets the status of this V1beta1SubjectAccessReview. Status is filled in by the server and indicates whether the request is allowed or not :return: The status of this V1beta1SubjectAccessReview. :rtype: V1beta1SubjectAccessReviewStatus """ return self._status @status.setter def status(self, status): """ Sets the status of this V1beta1SubjectAccessReview. Status is filled in by the server and indicates whether the request is allowed or not :param status: The status of this V1beta1SubjectAccessReview. :type: V1beta1SubjectAccessReviewStatus """ self._status = status def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
29.243094
105
0.598148
4a077ba99a12727fbc0145b3a2f7a79f103d0b4d
2,395
py
Python
watertap3/watertap3/utils/cost_curves.py
NREL/WaterTAP3
74b83dbd189784ccfddac4bc5d27002190473619
[ "BSD-3-Clause" ]
null
null
null
watertap3/watertap3/utils/cost_curves.py
NREL/WaterTAP3
74b83dbd189784ccfddac4bc5d27002190473619
[ "BSD-3-Clause" ]
34
2021-06-25T17:54:12.000Z
2021-06-25T17:54:27.000Z
watertap3/watertap3/utils/cost_curves.py
NREL/WaterTAP3
74b83dbd189784ccfddac4bc5d27002190473619
[ "BSD-3-Clause" ]
4
2021-06-25T18:32:31.000Z
2022-03-24T20:24:18.000Z
import numpy as np import pandas as pd from scipy.optimize import curve_fit __all__ = ['epa_cost_curve', 'basic_unit'] def epa_cost_curve(unit_process, **kwargs): df = pd.read_csv('data/epa_cost_curves.csv', index_col='unit_process') df = df.loc[unit_process] params = ['flow_in', 'cap_total', 'electricity_intensity', 'tds_in', 'num_stage', 'radon_rem', 'ebct'] def power(x, a, b): return a * x ** b if kwargs: temp = list(dict(**kwargs).items())[0] k, v = temp[0], temp[1] if k == 'tds_in': if unit_process == 'cation_exchange': if v >= 1000: df = df[df.tds_in == 1000] elif v < 1000 and v >= 600: df = df[df.tds_in == 600] else: df = df[df.tds_in == 200] elif unit_process == 'anion_exchange': if v >= 150: df = df[df.tds_in == 150] elif v < 150 and v >= 100: df = df[df.tds_in == 100] else: df = df[df.tds_in == 50] if k == 'radon_rem': if v >= 0.9: df = df[df.radon_rem == 0.99] else: df = df[df.radon_rem == 0.9] if k == 'ebct': if v > 30: df = df[df.ebct == 60] else: df = df[df.ebct == 30] df.dropna(axis=1, inplace=True) cols = df.columns mats_name = [c for c in cols if c not in params] mats_cost = {} for mat in mats_name: mats_cost[mat] = np.mean(df[mat]) x = df.flow_in.to_list() y_cost = df.cap_total.to_list() y_elect = df.electricity_intensity.to_list() cost, _ = curve_fit(power, x, y_cost) elect, _ = curve_fit(power, x, y_elect) return cost, elect, mats_name, mats_cost, df def basic_unit(unit_process, case_specific=None): if case_specific == 'solaire': df = pd.read_csv('data/basic_units_solaire.csv', index_col='unit_process') else: df = pd.read_csv('data/basic_unit.csv', index_col='unit_process') df = df.loc[unit_process] flow_basis = df.flow_basis cap_basis = df.cap_basis cap_exp = df.cap_exp elect = df.electricity_intensity year = df.year kind = df.kind return flow_basis, cap_basis, cap_exp, elect, year, kind
29.567901
106
0.533612
4a077bb1b0d9c182cc5237880e95d98cac0dce3b
9,920
py
Python
sensor-2.py
kinivi/end_to_end_example
71df6fa847155f4c42dc091f2c20f9a2cf001483
[ "MIT" ]
null
null
null
sensor-2.py
kinivi/end_to_end_example
71df6fa847155f4c42dc091f2c20f9a2cf001483
[ "MIT" ]
null
null
null
sensor-2.py
kinivi/end_to_end_example
71df6fa847155f4c42dc091f2c20f9a2cf001483
[ "MIT" ]
null
null
null
# Copyright 2017 Google Inc. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Sample device that consumes configuration from Google Cloud IoT. This example represents a simple device with a temperature sensor and a fan (simulated with software). When the device's fan is turned on, its temperature decreases by one degree per second, and when the device's fan is turned off, its temperature increases by one degree per second. Every second, the device publishes its temperature reading to Google Cloud IoT Core. The server meanwhile receives these temperature readings, and decides whether to re-configure the device to turn its fan on or off. The server will instruct the device to turn the fan on when the device's temperature exceeds 10 degrees, and to turn it off when the device's temperature is less than 0 degrees. In a real system, one could use the cloud to compute the optimal thresholds for turning on and off the fan, but for illustrative purposes we use a simple threshold model. To connect the device you must have downloaded Google's CA root certificates, and a copy of your private key file. See cloud.google.com/iot for instructions on how to do this. Run this script with the corresponding algorithm flag. $ python cloudiot_pubsub_example_mqtt_device.py \ --project_id=my-project-id \ --registry_id=example-my-registry-id \ --device_id=my-device-id \ --private_key_file=rsa_private.pem \ --algorithm=RS256 With a single server, you can run multiple instances of the device with different device ids, and the server will distinguish them. Try creating a few devices and running them all at the same time. """ import argparse import datetime import json import os import random import ssl import time import jwt import paho.mqtt.client as mqtt def create_jwt(project_id, private_key_file, algorithm): """Create a JWT (https://jwt.io) to establish an MQTT connection.""" token = { 'iat': datetime.datetime.utcnow(), 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60), 'aud': project_id } with open(private_key_file, 'r') as f: private_key = f.read() print('Creating JWT using {} from private key file {}'.format( algorithm, private_key_file)) return jwt.encode(token, private_key, algorithm=algorithm) def error_str(rc): """Convert a Paho error to a human readable string.""" return '{}: {}'.format(rc, mqtt.error_string(rc)) class Device(object): """Represents the state of a single device.""" def __init__(self): self.humidity = 0 self.attributes = {"id": "sensor-2", "location": [41.80555, 20.10730], "time": str(datetime.datetime.utcnow())} self.fan_on = False self.connected = False def update_sensor_data(self): """Pretend to read the device's sensor data. If the fan is on, assume the temperature decreased one degree, otherwise assume that it increased one degree. """ if self.fan_on: self.humidity = random.randint(0, 100) else: self.humidity = random.randint(0, 100) def wait_for_connection(self, timeout): """Wait for the device to become connected.""" total_time = 0 while not self.connected and total_time < timeout: time.sleep(1) total_time += 1 if not self.connected: raise RuntimeError('Could not connect to MQTT bridge.') def on_connect(self, unused_client, unused_userdata, unused_flags, rc): """Callback for when a device connects.""" print('Connection Result:', error_str(rc)) self.connected = True def on_disconnect(self, unused_client, unused_userdata, rc): """Callback for when a device disconnects.""" print('Disconnected:', error_str(rc)) self.connected = False def on_publish(self, unused_client, unused_userdata, unused_mid): """Callback when the device receives a PUBACK from the MQTT bridge.""" print('Published message acked.') def on_subscribe(self, unused_client, unused_userdata, unused_mid, granted_qos): """Callback when the device receives a SUBACK from the MQTT bridge.""" print('Subscribed: ', granted_qos) if granted_qos[0] == 128: print('Subscription failed.') def on_message(self, unused_client, unused_userdata, message): """Callback when the device receives a message on a subscription.""" payload = message.payload.decode('utf-8') print('Received message \'{}\' on topic \'{}\' with Qos {}'.format( payload, message.topic, str(message.qos))) # The device will receive its latest config when it subscribes to the # config topic. If there is no configuration for the device, the device # will receive a config with an empty payload. if not payload: return # The config is passed in the payload of the message. In this example, # the server sends a serialized JSON string. data = json.loads(payload) if data['fan_on'] != self.fan_on: # If changing the state of the fan, print a message and # update the internal state. self.fan_on = data['fan_on'] if self.fan_on: print('Fan turned on.') else: print('Fan turned off.') def parse_command_line_args(): """Parse command line arguments.""" parser = argparse.ArgumentParser( description='Example Google Cloud IoT MQTT device connection code.') parser.add_argument( '--project_id', default=os.environ.get("GOOGLE_CLOUD_PROJECT"), required=True, help='GCP cloud project name.') parser.add_argument( '--registry_id', required=True, help='Cloud IoT registry id') parser.add_argument( '--device_id', required=True, help='Cloud IoT device id') parser.add_argument( '--private_key_file', required=True, help='Path to private key file.') parser.add_argument( '--algorithm', choices=('RS256', 'ES256'), required=True, help='Which encryption algorithm to use to generate the JWT.') parser.add_argument( '--cloud_region', default='us-central1', help='GCP cloud region') parser.add_argument( '--ca_certs', default='roots.pem', help='CA root certificate. Get from https://pki.google.com/roots.pem') parser.add_argument( '--num_messages', type=int, default=100, help='Number of messages to publish.') parser.add_argument( '--mqtt_bridge_hostname', default='mqtt.googleapis.com', help='MQTT bridge hostname.') parser.add_argument( '--mqtt_bridge_port', type=int, default=8883, help='MQTT bridge port.') parser.add_argument( '--message_type', choices=('event', 'state'), default='event', help=('Indicates whether the message to be published is a ' 'telemetry event or a device state message.')) return parser.parse_args() def main(): args = parse_command_line_args() # Create the MQTT client and connect to Cloud IoT. client = mqtt.Client( client_id='projects/{}/locations/{}/registries/{}/devices/{}'.format( args.project_id, args.cloud_region, args.registry_id, args.device_id)) client.username_pw_set( username='unused', password=create_jwt( args.project_id, args.private_key_file, args.algorithm)) client.tls_set(ca_certs=args.ca_certs, tls_version=ssl.PROTOCOL_TLSv1_2) device = Device() client.on_connect = device.on_connect client.on_publish = device.on_publish client.on_disconnect = device.on_disconnect client.on_subscribe = device.on_subscribe client.on_message = device.on_message client.connect(args.mqtt_bridge_hostname, args.mqtt_bridge_port) client.loop_start() # This is the topic that the device will publish telemetry events # (temperature data) to. mqtt_telemetry_topic = '/devices/{}/events'.format(args.device_id) # This is the topic thaat the device will receive configuration updates on. mqtt_config_topic = '/devices/{}/config'.format(args.device_id) # Wait up to 5 seconds for the device to connect. device.wait_for_connection(5) # Subscribe to the config topic. client.subscribe(mqtt_config_topic, qos=1) # Update and publish temperature readings at a rate of one per second. for _ in range(args.num_messages): # In an actual device, this would read the device's sensors. Here, # you update the temperature based on whether the fan is on. device.update_sensor_data() # Report the device's temperature to the server by serializing it # as a JSON string. payload = json.dumps({'humidity': device.humidity, 'attributes': device.attributes}) print('Publishing payload', payload) client.publish(mqtt_telemetry_topic, payload, qos=1) # Send events every second. time.sleep(0.5) client.disconnect() client.loop_stop() print('Finished loop successfully. Goodbye!') if __name__ == '__main__': main()
37.718631
119
0.671573
4a077cc4ca11038ad0bd7f7b78446c7f1efa3b69
51,067
py
Python
Lib/test/test_tempfile.py
pelotoncycle/cpython-fork
1ab99a0e912aac9c3f16555f23284d7e381f2f69
[ "PSF-2.0" ]
332
2015-08-22T12:43:56.000Z
2022-03-17T01:05:43.000Z
Lib/test/test_tempfile.py
sky-skynet/Python3
b816507f56ee14b730b7ab52a61eb17f9eb9d815
[ "PSF-2.0" ]
36
2015-05-30T08:39:19.000Z
2022-03-04T20:42:33.000Z
Lib/test/test_tempfile.py
sky-skynet/Python3
b816507f56ee14b730b7ab52a61eb17f9eb9d815
[ "PSF-2.0" ]
74
2015-05-29T17:18:53.000Z
2022-01-15T14:06:44.000Z
# tempfile.py unit tests. import tempfile import errno import io import os import signal import sys import re import warnings import contextlib import weakref from unittest import mock import unittest from test import support from test.support import script_helper if hasattr(os, 'stat'): import stat has_stat = 1 else: has_stat = 0 has_textmode = (tempfile._text_openflags != tempfile._bin_openflags) has_spawnl = hasattr(os, 'spawnl') # TEST_FILES may need to be tweaked for systems depending on the maximum # number of files that can be opened at one time (see ulimit -n) if sys.platform.startswith('openbsd'): TEST_FILES = 48 else: TEST_FILES = 100 # This is organized as one test for each chunk of code in tempfile.py, # in order of their appearance in the file. Testing which requires # threads is not done here. class TestLowLevelInternals(unittest.TestCase): def test_infer_return_type_singles(self): self.assertIs(str, tempfile._infer_return_type('')) self.assertIs(bytes, tempfile._infer_return_type(b'')) self.assertIs(str, tempfile._infer_return_type(None)) def test_infer_return_type_multiples(self): self.assertIs(str, tempfile._infer_return_type('', '')) self.assertIs(bytes, tempfile._infer_return_type(b'', b'')) with self.assertRaises(TypeError): tempfile._infer_return_type('', b'') with self.assertRaises(TypeError): tempfile._infer_return_type(b'', '') def test_infer_return_type_multiples_and_none(self): self.assertIs(str, tempfile._infer_return_type(None, '')) self.assertIs(str, tempfile._infer_return_type('', None)) self.assertIs(str, tempfile._infer_return_type(None, None)) self.assertIs(bytes, tempfile._infer_return_type(b'', None)) self.assertIs(bytes, tempfile._infer_return_type(None, b'')) with self.assertRaises(TypeError): tempfile._infer_return_type('', None, b'') with self.assertRaises(TypeError): tempfile._infer_return_type(b'', None, '') # Common functionality. class BaseTestCase(unittest.TestCase): str_check = re.compile(r"^[a-z0-9_-]{8}$") b_check = re.compile(br"^[a-z0-9_-]{8}$") def setUp(self): self._warnings_manager = support.check_warnings() self._warnings_manager.__enter__() warnings.filterwarnings("ignore", category=RuntimeWarning, message="mktemp", module=__name__) def tearDown(self): self._warnings_manager.__exit__(None, None, None) def nameCheck(self, name, dir, pre, suf): (ndir, nbase) = os.path.split(name) npre = nbase[:len(pre)] nsuf = nbase[len(nbase)-len(suf):] if dir is not None: self.assertIs(type(name), str if type(dir) is str else bytes, "unexpected return type") if pre is not None: self.assertIs(type(name), str if type(pre) is str else bytes, "unexpected return type") if suf is not None: self.assertIs(type(name), str if type(suf) is str else bytes, "unexpected return type") if (dir, pre, suf) == (None, None, None): self.assertIs(type(name), str, "default return type must be str") # check for equality of the absolute paths! self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir), "file %r not in directory %r" % (name, dir)) self.assertEqual(npre, pre, "file %r does not begin with %r" % (nbase, pre)) self.assertEqual(nsuf, suf, "file %r does not end with %r" % (nbase, suf)) nbase = nbase[len(pre):len(nbase)-len(suf)] check = self.str_check if isinstance(nbase, str) else self.b_check self.assertTrue(check.match(nbase), "random characters %r do not match %r" % (nbase, check.pattern)) class TestExports(BaseTestCase): def test_exports(self): # There are no surprising symbols in the tempfile module dict = tempfile.__dict__ expected = { "NamedTemporaryFile" : 1, "TemporaryFile" : 1, "mkstemp" : 1, "mkdtemp" : 1, "mktemp" : 1, "TMP_MAX" : 1, "gettempprefix" : 1, "gettempprefixb" : 1, "gettempdir" : 1, "gettempdirb" : 1, "tempdir" : 1, "template" : 1, "SpooledTemporaryFile" : 1, "TemporaryDirectory" : 1, } unexp = [] for key in dict: if key[0] != '_' and key not in expected: unexp.append(key) self.assertTrue(len(unexp) == 0, "unexpected keys: %s" % unexp) class TestRandomNameSequence(BaseTestCase): """Test the internal iterator object _RandomNameSequence.""" def setUp(self): self.r = tempfile._RandomNameSequence() super().setUp() def test_get_six_char_str(self): # _RandomNameSequence returns a six-character string s = next(self.r) self.nameCheck(s, '', '', '') def test_many(self): # _RandomNameSequence returns no duplicate strings (stochastic) dict = {} r = self.r for i in range(TEST_FILES): s = next(r) self.nameCheck(s, '', '', '') self.assertNotIn(s, dict) dict[s] = 1 def supports_iter(self): # _RandomNameSequence supports the iterator protocol i = 0 r = self.r for s in r: i += 1 if i == 20: break @unittest.skipUnless(hasattr(os, 'fork'), "os.fork is required for this test") def test_process_awareness(self): # ensure that the random source differs between # child and parent. read_fd, write_fd = os.pipe() pid = None try: pid = os.fork() if not pid: os.close(read_fd) os.write(write_fd, next(self.r).encode("ascii")) os.close(write_fd) # bypass the normal exit handlers- leave those to # the parent. os._exit(0) parent_value = next(self.r) child_value = os.read(read_fd, len(parent_value)).decode("ascii") finally: if pid: # best effort to ensure the process can't bleed out # via any bugs above try: os.kill(pid, signal.SIGKILL) except OSError: pass os.close(read_fd) os.close(write_fd) self.assertNotEqual(child_value, parent_value) class TestCandidateTempdirList(BaseTestCase): """Test the internal function _candidate_tempdir_list.""" def test_nonempty_list(self): # _candidate_tempdir_list returns a nonempty list of strings cand = tempfile._candidate_tempdir_list() self.assertFalse(len(cand) == 0) for c in cand: self.assertIsInstance(c, str) def test_wanted_dirs(self): # _candidate_tempdir_list contains the expected directories # Make sure the interesting environment variables are all set. with support.EnvironmentVarGuard() as env: for envname in 'TMPDIR', 'TEMP', 'TMP': dirname = os.getenv(envname) if not dirname: env[envname] = os.path.abspath(envname) cand = tempfile._candidate_tempdir_list() for envname in 'TMPDIR', 'TEMP', 'TMP': dirname = os.getenv(envname) if not dirname: raise ValueError self.assertIn(dirname, cand) try: dirname = os.getcwd() except (AttributeError, OSError): dirname = os.curdir self.assertIn(dirname, cand) # Not practical to try to verify the presence of OS-specific # paths in this list. # We test _get_default_tempdir some more by testing gettempdir. class TestGetDefaultTempdir(BaseTestCase): """Test _get_default_tempdir().""" def test_no_files_left_behind(self): # use a private empty directory with tempfile.TemporaryDirectory() as our_temp_directory: # force _get_default_tempdir() to consider our empty directory def our_candidate_list(): return [our_temp_directory] with support.swap_attr(tempfile, "_candidate_tempdir_list", our_candidate_list): # verify our directory is empty after _get_default_tempdir() tempfile._get_default_tempdir() self.assertEqual(os.listdir(our_temp_directory), []) def raise_OSError(*args, **kwargs): raise OSError() with support.swap_attr(io, "open", raise_OSError): # test again with failing io.open() with self.assertRaises(FileNotFoundError): tempfile._get_default_tempdir() self.assertEqual(os.listdir(our_temp_directory), []) open = io.open def bad_writer(*args, **kwargs): fp = open(*args, **kwargs) fp.write = raise_OSError return fp with support.swap_attr(io, "open", bad_writer): # test again with failing write() with self.assertRaises(FileNotFoundError): tempfile._get_default_tempdir() self.assertEqual(os.listdir(our_temp_directory), []) class TestGetCandidateNames(BaseTestCase): """Test the internal function _get_candidate_names.""" def test_retval(self): # _get_candidate_names returns a _RandomNameSequence object obj = tempfile._get_candidate_names() self.assertIsInstance(obj, tempfile._RandomNameSequence) def test_same_thing(self): # _get_candidate_names always returns the same object a = tempfile._get_candidate_names() b = tempfile._get_candidate_names() self.assertTrue(a is b) @contextlib.contextmanager def _inside_empty_temp_dir(): dir = tempfile.mkdtemp() try: with support.swap_attr(tempfile, 'tempdir', dir): yield finally: support.rmtree(dir) def _mock_candidate_names(*names): return support.swap_attr(tempfile, '_get_candidate_names', lambda: iter(names)) class TestBadTempdir: def test_read_only_directory(self): with _inside_empty_temp_dir(): oldmode = mode = os.stat(tempfile.tempdir).st_mode mode &= ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH) os.chmod(tempfile.tempdir, mode) try: if os.access(tempfile.tempdir, os.W_OK): self.skipTest("can't set the directory read-only") with self.assertRaises(PermissionError): self.make_temp() self.assertEqual(os.listdir(tempfile.tempdir), []) finally: os.chmod(tempfile.tempdir, oldmode) def test_nonexisting_directory(self): with _inside_empty_temp_dir(): tempdir = os.path.join(tempfile.tempdir, 'nonexistent') with support.swap_attr(tempfile, 'tempdir', tempdir): with self.assertRaises(FileNotFoundError): self.make_temp() def test_non_directory(self): with _inside_empty_temp_dir(): tempdir = os.path.join(tempfile.tempdir, 'file') open(tempdir, 'wb').close() with support.swap_attr(tempfile, 'tempdir', tempdir): with self.assertRaises((NotADirectoryError, FileNotFoundError)): self.make_temp() class TestMkstempInner(TestBadTempdir, BaseTestCase): """Test the internal function _mkstemp_inner.""" class mkstemped: _bflags = tempfile._bin_openflags _tflags = tempfile._text_openflags _close = os.close _unlink = os.unlink def __init__(self, dir, pre, suf, bin): if bin: flags = self._bflags else: flags = self._tflags output_type = tempfile._infer_return_type(dir, pre, suf) (self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags, output_type) def write(self, str): os.write(self.fd, str) def __del__(self): self._close(self.fd) self._unlink(self.name) def do_create(self, dir=None, pre=None, suf=None, bin=1): output_type = tempfile._infer_return_type(dir, pre, suf) if dir is None: if output_type is str: dir = tempfile.gettempdir() else: dir = tempfile.gettempdirb() if pre is None: pre = output_type() if suf is None: suf = output_type() file = self.mkstemped(dir, pre, suf, bin) self.nameCheck(file.name, dir, pre, suf) return file def test_basic(self): # _mkstemp_inner can create files self.do_create().write(b"blat") self.do_create(pre="a").write(b"blat") self.do_create(suf="b").write(b"blat") self.do_create(pre="a", suf="b").write(b"blat") self.do_create(pre="aa", suf=".txt").write(b"blat") def test_basic_with_bytes_names(self): # _mkstemp_inner can create files when given name parts all # specified as bytes. dir_b = tempfile.gettempdirb() self.do_create(dir=dir_b, suf=b"").write(b"blat") self.do_create(dir=dir_b, pre=b"a").write(b"blat") self.do_create(dir=dir_b, suf=b"b").write(b"blat") self.do_create(dir=dir_b, pre=b"a", suf=b"b").write(b"blat") self.do_create(dir=dir_b, pre=b"aa", suf=b".txt").write(b"blat") # Can't mix str & binary types in the args. with self.assertRaises(TypeError): self.do_create(dir="", suf=b"").write(b"blat") with self.assertRaises(TypeError): self.do_create(dir=dir_b, pre="").write(b"blat") with self.assertRaises(TypeError): self.do_create(dir=dir_b, pre=b"", suf="").write(b"blat") def test_basic_many(self): # _mkstemp_inner can create many files (stochastic) extant = list(range(TEST_FILES)) for i in extant: extant[i] = self.do_create(pre="aa") def test_choose_directory(self): # _mkstemp_inner can create files in a user-selected directory dir = tempfile.mkdtemp() try: self.do_create(dir=dir).write(b"blat") finally: os.rmdir(dir) @unittest.skipUnless(has_stat, 'os.stat not available') def test_file_mode(self): # _mkstemp_inner creates files with the proper mode file = self.do_create() mode = stat.S_IMODE(os.stat(file.name).st_mode) expected = 0o600 if sys.platform == 'win32': # There's no distinction among 'user', 'group' and 'world'; # replicate the 'user' bits. user = expected >> 6 expected = user * (1 + 8 + 64) self.assertEqual(mode, expected) @unittest.skipUnless(has_spawnl, 'os.spawnl not available') def test_noinherit(self): # _mkstemp_inner file handles are not inherited by child processes if support.verbose: v="v" else: v="q" file = self.do_create() self.assertEqual(os.get_inheritable(file.fd), False) fd = "%d" % file.fd try: me = __file__ except NameError: me = sys.argv[0] # We have to exec something, so that FD_CLOEXEC will take # effect. The core of this test is therefore in # tf_inherit_check.py, which see. tester = os.path.join(os.path.dirname(os.path.abspath(me)), "tf_inherit_check.py") # On Windows a spawn* /path/ with embedded spaces shouldn't be quoted, # but an arg with embedded spaces should be decorated with double # quotes on each end if sys.platform == 'win32': decorated = '"%s"' % sys.executable tester = '"%s"' % tester else: decorated = sys.executable retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd) self.assertFalse(retval < 0, "child process caught fatal signal %d" % -retval) self.assertFalse(retval > 0, "child process reports failure %d"%retval) @unittest.skipUnless(has_textmode, "text mode not available") def test_textmode(self): # _mkstemp_inner can create files in text mode # A text file is truncated at the first Ctrl+Z byte f = self.do_create(bin=0) f.write(b"blat\x1a") f.write(b"extra\n") os.lseek(f.fd, 0, os.SEEK_SET) self.assertEqual(os.read(f.fd, 20), b"blat") def make_temp(self): return tempfile._mkstemp_inner(tempfile.gettempdir(), tempfile.gettempprefix(), '', tempfile._bin_openflags, str) def test_collision_with_existing_file(self): # _mkstemp_inner tries another name when a file with # the chosen name already exists with _inside_empty_temp_dir(), \ _mock_candidate_names('aaa', 'aaa', 'bbb'): (fd1, name1) = self.make_temp() os.close(fd1) self.assertTrue(name1.endswith('aaa')) (fd2, name2) = self.make_temp() os.close(fd2) self.assertTrue(name2.endswith('bbb')) def test_collision_with_existing_directory(self): # _mkstemp_inner tries another name when a directory with # the chosen name already exists with _inside_empty_temp_dir(), \ _mock_candidate_names('aaa', 'aaa', 'bbb'): dir = tempfile.mkdtemp() self.assertTrue(dir.endswith('aaa')) (fd, name) = self.make_temp() os.close(fd) self.assertTrue(name.endswith('bbb')) class TestGetTempPrefix(BaseTestCase): """Test gettempprefix().""" def test_sane_template(self): # gettempprefix returns a nonempty prefix string p = tempfile.gettempprefix() self.assertIsInstance(p, str) self.assertGreater(len(p), 0) pb = tempfile.gettempprefixb() self.assertIsInstance(pb, bytes) self.assertGreater(len(pb), 0) def test_usable_template(self): # gettempprefix returns a usable prefix string # Create a temp directory, avoiding use of the prefix. # Then attempt to create a file whose name is # prefix + 'xxxxxx.xxx' in that directory. p = tempfile.gettempprefix() + "xxxxxx.xxx" d = tempfile.mkdtemp(prefix="") try: p = os.path.join(d, p) fd = os.open(p, os.O_RDWR | os.O_CREAT) os.close(fd) os.unlink(p) finally: os.rmdir(d) class TestGetTempDir(BaseTestCase): """Test gettempdir().""" def test_directory_exists(self): # gettempdir returns a directory which exists for d in (tempfile.gettempdir(), tempfile.gettempdirb()): self.assertTrue(os.path.isabs(d) or d == os.curdir, "%r is not an absolute path" % d) self.assertTrue(os.path.isdir(d), "%r is not a directory" % d) def test_directory_writable(self): # gettempdir returns a directory writable by the user # sneaky: just instantiate a NamedTemporaryFile, which # defaults to writing into the directory returned by # gettempdir. file = tempfile.NamedTemporaryFile() file.write(b"blat") file.close() def test_same_thing(self): # gettempdir always returns the same object a = tempfile.gettempdir() b = tempfile.gettempdir() c = tempfile.gettempdirb() self.assertTrue(a is b) self.assertNotEqual(type(a), type(c)) self.assertEqual(a, os.fsdecode(c)) def test_case_sensitive(self): # gettempdir should not flatten its case # even on a case-insensitive file system case_sensitive_tempdir = tempfile.mkdtemp("-Temp") _tempdir, tempfile.tempdir = tempfile.tempdir, None try: with support.EnvironmentVarGuard() as env: # Fake the first env var which is checked as a candidate env["TMPDIR"] = case_sensitive_tempdir self.assertEqual(tempfile.gettempdir(), case_sensitive_tempdir) finally: tempfile.tempdir = _tempdir support.rmdir(case_sensitive_tempdir) class TestMkstemp(BaseTestCase): """Test mkstemp().""" def do_create(self, dir=None, pre=None, suf=None): output_type = tempfile._infer_return_type(dir, pre, suf) if dir is None: if output_type is str: dir = tempfile.gettempdir() else: dir = tempfile.gettempdirb() if pre is None: pre = output_type() if suf is None: suf = output_type() (fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf) (ndir, nbase) = os.path.split(name) adir = os.path.abspath(dir) self.assertEqual(adir, ndir, "Directory '%s' incorrectly returned as '%s'" % (adir, ndir)) try: self.nameCheck(name, dir, pre, suf) finally: os.close(fd) os.unlink(name) def test_basic(self): # mkstemp can create files self.do_create() self.do_create(pre="a") self.do_create(suf="b") self.do_create(pre="a", suf="b") self.do_create(pre="aa", suf=".txt") self.do_create(dir=".") def test_basic_with_bytes_names(self): # mkstemp can create files when given name parts all # specified as bytes. d = tempfile.gettempdirb() self.do_create(dir=d, suf=b"") self.do_create(dir=d, pre=b"a") self.do_create(dir=d, suf=b"b") self.do_create(dir=d, pre=b"a", suf=b"b") self.do_create(dir=d, pre=b"aa", suf=b".txt") self.do_create(dir=b".") with self.assertRaises(TypeError): self.do_create(dir=".", pre=b"aa", suf=b".txt") with self.assertRaises(TypeError): self.do_create(dir=b".", pre="aa", suf=b".txt") with self.assertRaises(TypeError): self.do_create(dir=b".", pre=b"aa", suf=".txt") def test_choose_directory(self): # mkstemp can create directories in a user-selected directory dir = tempfile.mkdtemp() try: self.do_create(dir=dir) finally: os.rmdir(dir) class TestMkdtemp(TestBadTempdir, BaseTestCase): """Test mkdtemp().""" def make_temp(self): return tempfile.mkdtemp() def do_create(self, dir=None, pre=None, suf=None): output_type = tempfile._infer_return_type(dir, pre, suf) if dir is None: if output_type is str: dir = tempfile.gettempdir() else: dir = tempfile.gettempdirb() if pre is None: pre = output_type() if suf is None: suf = output_type() name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf) try: self.nameCheck(name, dir, pre, suf) return name except: os.rmdir(name) raise def test_basic(self): # mkdtemp can create directories os.rmdir(self.do_create()) os.rmdir(self.do_create(pre="a")) os.rmdir(self.do_create(suf="b")) os.rmdir(self.do_create(pre="a", suf="b")) os.rmdir(self.do_create(pre="aa", suf=".txt")) def test_basic_with_bytes_names(self): # mkdtemp can create directories when given all binary parts d = tempfile.gettempdirb() os.rmdir(self.do_create(dir=d)) os.rmdir(self.do_create(dir=d, pre=b"a")) os.rmdir(self.do_create(dir=d, suf=b"b")) os.rmdir(self.do_create(dir=d, pre=b"a", suf=b"b")) os.rmdir(self.do_create(dir=d, pre=b"aa", suf=b".txt")) with self.assertRaises(TypeError): os.rmdir(self.do_create(dir=d, pre="aa", suf=b".txt")) with self.assertRaises(TypeError): os.rmdir(self.do_create(dir=d, pre=b"aa", suf=".txt")) with self.assertRaises(TypeError): os.rmdir(self.do_create(dir="", pre=b"aa", suf=b".txt")) def test_basic_many(self): # mkdtemp can create many directories (stochastic) extant = list(range(TEST_FILES)) try: for i in extant: extant[i] = self.do_create(pre="aa") finally: for i in extant: if(isinstance(i, str)): os.rmdir(i) def test_choose_directory(self): # mkdtemp can create directories in a user-selected directory dir = tempfile.mkdtemp() try: os.rmdir(self.do_create(dir=dir)) finally: os.rmdir(dir) @unittest.skipUnless(has_stat, 'os.stat not available') def test_mode(self): # mkdtemp creates directories with the proper mode dir = self.do_create() try: mode = stat.S_IMODE(os.stat(dir).st_mode) mode &= 0o777 # Mask off sticky bits inherited from /tmp expected = 0o700 if sys.platform == 'win32': # There's no distinction among 'user', 'group' and 'world'; # replicate the 'user' bits. user = expected >> 6 expected = user * (1 + 8 + 64) self.assertEqual(mode, expected) finally: os.rmdir(dir) def test_collision_with_existing_file(self): # mkdtemp tries another name when a file with # the chosen name already exists with _inside_empty_temp_dir(), \ _mock_candidate_names('aaa', 'aaa', 'bbb'): file = tempfile.NamedTemporaryFile(delete=False) file.close() self.assertTrue(file.name.endswith('aaa')) dir = tempfile.mkdtemp() self.assertTrue(dir.endswith('bbb')) def test_collision_with_existing_directory(self): # mkdtemp tries another name when a directory with # the chosen name already exists with _inside_empty_temp_dir(), \ _mock_candidate_names('aaa', 'aaa', 'bbb'): dir1 = tempfile.mkdtemp() self.assertTrue(dir1.endswith('aaa')) dir2 = tempfile.mkdtemp() self.assertTrue(dir2.endswith('bbb')) class TestMktemp(BaseTestCase): """Test mktemp().""" # For safety, all use of mktemp must occur in a private directory. # We must also suppress the RuntimeWarning it generates. def setUp(self): self.dir = tempfile.mkdtemp() super().setUp() def tearDown(self): if self.dir: os.rmdir(self.dir) self.dir = None super().tearDown() class mktemped: _unlink = os.unlink _bflags = tempfile._bin_openflags def __init__(self, dir, pre, suf): self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf) # Create the file. This will raise an exception if it's # mysteriously appeared in the meanwhile. os.close(os.open(self.name, self._bflags, 0o600)) def __del__(self): self._unlink(self.name) def do_create(self, pre="", suf=""): file = self.mktemped(self.dir, pre, suf) self.nameCheck(file.name, self.dir, pre, suf) return file def test_basic(self): # mktemp can choose usable file names self.do_create() self.do_create(pre="a") self.do_create(suf="b") self.do_create(pre="a", suf="b") self.do_create(pre="aa", suf=".txt") def test_many(self): # mktemp can choose many usable file names (stochastic) extant = list(range(TEST_FILES)) for i in extant: extant[i] = self.do_create(pre="aa") ## def test_warning(self): ## # mktemp issues a warning when used ## warnings.filterwarnings("error", ## category=RuntimeWarning, ## message="mktemp") ## self.assertRaises(RuntimeWarning, ## tempfile.mktemp, dir=self.dir) # We test _TemporaryFileWrapper by testing NamedTemporaryFile. class TestNamedTemporaryFile(BaseTestCase): """Test NamedTemporaryFile().""" def do_create(self, dir=None, pre="", suf="", delete=True): if dir is None: dir = tempfile.gettempdir() file = tempfile.NamedTemporaryFile(dir=dir, prefix=pre, suffix=suf, delete=delete) self.nameCheck(file.name, dir, pre, suf) return file def test_basic(self): # NamedTemporaryFile can create files self.do_create() self.do_create(pre="a") self.do_create(suf="b") self.do_create(pre="a", suf="b") self.do_create(pre="aa", suf=".txt") def test_method_lookup(self): # Issue #18879: Looking up a temporary file method should keep it # alive long enough. f = self.do_create() wr = weakref.ref(f) write = f.write write2 = f.write del f write(b'foo') del write write2(b'bar') del write2 if support.check_impl_detail(cpython=True): # No reference cycle was created. self.assertIsNone(wr()) def test_iter(self): # Issue #23700: getting iterator from a temporary file should keep # it alive as long as it's being iterated over lines = [b'spam\n', b'eggs\n', b'beans\n'] def make_file(): f = tempfile.NamedTemporaryFile(mode='w+b') f.write(b''.join(lines)) f.seek(0) return f for i, l in enumerate(make_file()): self.assertEqual(l, lines[i]) self.assertEqual(i, len(lines) - 1) def test_creates_named(self): # NamedTemporaryFile creates files with names f = tempfile.NamedTemporaryFile() self.assertTrue(os.path.exists(f.name), "NamedTemporaryFile %s does not exist" % f.name) def test_del_on_close(self): # A NamedTemporaryFile is deleted when closed dir = tempfile.mkdtemp() try: f = tempfile.NamedTemporaryFile(dir=dir) f.write(b'blat') f.close() self.assertFalse(os.path.exists(f.name), "NamedTemporaryFile %s exists after close" % f.name) finally: os.rmdir(dir) def test_dis_del_on_close(self): # Tests that delete-on-close can be disabled dir = tempfile.mkdtemp() tmp = None try: f = tempfile.NamedTemporaryFile(dir=dir, delete=False) tmp = f.name f.write(b'blat') f.close() self.assertTrue(os.path.exists(f.name), "NamedTemporaryFile %s missing after close" % f.name) finally: if tmp is not None: os.unlink(tmp) os.rmdir(dir) def test_multiple_close(self): # A NamedTemporaryFile can be closed many times without error f = tempfile.NamedTemporaryFile() f.write(b'abc\n') f.close() f.close() f.close() def test_context_manager(self): # A NamedTemporaryFile can be used as a context manager with tempfile.NamedTemporaryFile() as f: self.assertTrue(os.path.exists(f.name)) self.assertFalse(os.path.exists(f.name)) def use_closed(): with f: pass self.assertRaises(ValueError, use_closed) def test_no_leak_fd(self): # Issue #21058: don't leak file descriptor when io.open() fails closed = [] os_close = os.close def close(fd): closed.append(fd) os_close(fd) with mock.patch('os.close', side_effect=close): with mock.patch('io.open', side_effect=ValueError): self.assertRaises(ValueError, tempfile.NamedTemporaryFile) self.assertEqual(len(closed), 1) # How to test the mode and bufsize parameters? class TestSpooledTemporaryFile(BaseTestCase): """Test SpooledTemporaryFile().""" def do_create(self, max_size=0, dir=None, pre="", suf=""): if dir is None: dir = tempfile.gettempdir() file = tempfile.SpooledTemporaryFile(max_size=max_size, dir=dir, prefix=pre, suffix=suf) return file def test_basic(self): # SpooledTemporaryFile can create files f = self.do_create() self.assertFalse(f._rolled) f = self.do_create(max_size=100, pre="a", suf=".txt") self.assertFalse(f._rolled) def test_del_on_close(self): # A SpooledTemporaryFile is deleted when closed dir = tempfile.mkdtemp() try: f = tempfile.SpooledTemporaryFile(max_size=10, dir=dir) self.assertFalse(f._rolled) f.write(b'blat ' * 5) self.assertTrue(f._rolled) filename = f.name f.close() self.assertFalse(isinstance(filename, str) and os.path.exists(filename), "SpooledTemporaryFile %s exists after close" % filename) finally: os.rmdir(dir) def test_rewrite_small(self): # A SpooledTemporaryFile can be written to multiple within the max_size f = self.do_create(max_size=30) self.assertFalse(f._rolled) for i in range(5): f.seek(0, 0) f.write(b'x' * 20) self.assertFalse(f._rolled) def test_write_sequential(self): # A SpooledTemporaryFile should hold exactly max_size bytes, and roll # over afterward f = self.do_create(max_size=30) self.assertFalse(f._rolled) f.write(b'x' * 20) self.assertFalse(f._rolled) f.write(b'x' * 10) self.assertFalse(f._rolled) f.write(b'x') self.assertTrue(f._rolled) def test_writelines(self): # Verify writelines with a SpooledTemporaryFile f = self.do_create() f.writelines((b'x', b'y', b'z')) f.seek(0) buf = f.read() self.assertEqual(buf, b'xyz') def test_writelines_sequential(self): # A SpooledTemporaryFile should hold exactly max_size bytes, and roll # over afterward f = self.do_create(max_size=35) f.writelines((b'x' * 20, b'x' * 10, b'x' * 5)) self.assertFalse(f._rolled) f.write(b'x') self.assertTrue(f._rolled) def test_sparse(self): # A SpooledTemporaryFile that is written late in the file will extend # when that occurs f = self.do_create(max_size=30) self.assertFalse(f._rolled) f.seek(100, 0) self.assertFalse(f._rolled) f.write(b'x') self.assertTrue(f._rolled) def test_fileno(self): # A SpooledTemporaryFile should roll over to a real file on fileno() f = self.do_create(max_size=30) self.assertFalse(f._rolled) self.assertTrue(f.fileno() > 0) self.assertTrue(f._rolled) def test_multiple_close_before_rollover(self): # A SpooledTemporaryFile can be closed many times without error f = tempfile.SpooledTemporaryFile() f.write(b'abc\n') self.assertFalse(f._rolled) f.close() f.close() f.close() def test_multiple_close_after_rollover(self): # A SpooledTemporaryFile can be closed many times without error f = tempfile.SpooledTemporaryFile(max_size=1) f.write(b'abc\n') self.assertTrue(f._rolled) f.close() f.close() f.close() def test_bound_methods(self): # It should be OK to steal a bound method from a SpooledTemporaryFile # and use it independently; when the file rolls over, those bound # methods should continue to function f = self.do_create(max_size=30) read = f.read write = f.write seek = f.seek write(b"a" * 35) write(b"b" * 35) seek(0, 0) self.assertEqual(read(70), b'a'*35 + b'b'*35) def test_properties(self): f = tempfile.SpooledTemporaryFile(max_size=10) f.write(b'x' * 10) self.assertFalse(f._rolled) self.assertEqual(f.mode, 'w+b') self.assertIsNone(f.name) with self.assertRaises(AttributeError): f.newlines with self.assertRaises(AttributeError): f.encoding f.write(b'x') self.assertTrue(f._rolled) self.assertEqual(f.mode, 'rb+') self.assertIsNotNone(f.name) with self.assertRaises(AttributeError): f.newlines with self.assertRaises(AttributeError): f.encoding def test_text_mode(self): # Creating a SpooledTemporaryFile with a text mode should produce # a file object reading and writing (Unicode) text strings. f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10) f.write("abc\n") f.seek(0) self.assertEqual(f.read(), "abc\n") f.write("def\n") f.seek(0) self.assertEqual(f.read(), "abc\ndef\n") self.assertFalse(f._rolled) self.assertEqual(f.mode, 'w+') self.assertIsNone(f.name) self.assertIsNone(f.newlines) self.assertIsNone(f.encoding) f.write("xyzzy\n") f.seek(0) self.assertEqual(f.read(), "abc\ndef\nxyzzy\n") # Check that Ctrl+Z doesn't truncate the file f.write("foo\x1abar\n") f.seek(0) self.assertEqual(f.read(), "abc\ndef\nxyzzy\nfoo\x1abar\n") self.assertTrue(f._rolled) self.assertEqual(f.mode, 'w+') self.assertIsNotNone(f.name) self.assertEqual(f.newlines, os.linesep) self.assertIsNotNone(f.encoding) def test_text_newline_and_encoding(self): f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10, newline='', encoding='utf-8') f.write("\u039B\r\n") f.seek(0) self.assertEqual(f.read(), "\u039B\r\n") self.assertFalse(f._rolled) self.assertEqual(f.mode, 'w+') self.assertIsNone(f.name) self.assertIsNone(f.newlines) self.assertIsNone(f.encoding) f.write("\u039B" * 20 + "\r\n") f.seek(0) self.assertEqual(f.read(), "\u039B\r\n" + ("\u039B" * 20) + "\r\n") self.assertTrue(f._rolled) self.assertEqual(f.mode, 'w+') self.assertIsNotNone(f.name) self.assertIsNotNone(f.newlines) self.assertEqual(f.encoding, 'utf-8') def test_context_manager_before_rollover(self): # A SpooledTemporaryFile can be used as a context manager with tempfile.SpooledTemporaryFile(max_size=1) as f: self.assertFalse(f._rolled) self.assertFalse(f.closed) self.assertTrue(f.closed) def use_closed(): with f: pass self.assertRaises(ValueError, use_closed) def test_context_manager_during_rollover(self): # A SpooledTemporaryFile can be used as a context manager with tempfile.SpooledTemporaryFile(max_size=1) as f: self.assertFalse(f._rolled) f.write(b'abc\n') f.flush() self.assertTrue(f._rolled) self.assertFalse(f.closed) self.assertTrue(f.closed) def use_closed(): with f: pass self.assertRaises(ValueError, use_closed) def test_context_manager_after_rollover(self): # A SpooledTemporaryFile can be used as a context manager f = tempfile.SpooledTemporaryFile(max_size=1) f.write(b'abc\n') f.flush() self.assertTrue(f._rolled) with f: self.assertFalse(f.closed) self.assertTrue(f.closed) def use_closed(): with f: pass self.assertRaises(ValueError, use_closed) def test_truncate_with_size_parameter(self): # A SpooledTemporaryFile can be truncated to zero size f = tempfile.SpooledTemporaryFile(max_size=10) f.write(b'abcdefg\n') f.seek(0) f.truncate() self.assertFalse(f._rolled) self.assertEqual(f._file.getvalue(), b'') # A SpooledTemporaryFile can be truncated to a specific size f = tempfile.SpooledTemporaryFile(max_size=10) f.write(b'abcdefg\n') f.truncate(4) self.assertFalse(f._rolled) self.assertEqual(f._file.getvalue(), b'abcd') # A SpooledTemporaryFile rolls over if truncated to large size f = tempfile.SpooledTemporaryFile(max_size=10) f.write(b'abcdefg\n') f.truncate(20) self.assertTrue(f._rolled) if has_stat: self.assertEqual(os.fstat(f.fileno()).st_size, 20) if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile: class TestTemporaryFile(BaseTestCase): """Test TemporaryFile().""" def test_basic(self): # TemporaryFile can create files # No point in testing the name params - the file has no name. tempfile.TemporaryFile() def test_has_no_name(self): # TemporaryFile creates files with no names (on this system) dir = tempfile.mkdtemp() f = tempfile.TemporaryFile(dir=dir) f.write(b'blat') # Sneaky: because this file has no name, it should not prevent # us from removing the directory it was created in. try: os.rmdir(dir) except: # cleanup f.close() os.rmdir(dir) raise def test_multiple_close(self): # A TemporaryFile can be closed many times without error f = tempfile.TemporaryFile() f.write(b'abc\n') f.close() f.close() f.close() # How to test the mode and bufsize parameters? def test_mode_and_encoding(self): def roundtrip(input, *args, **kwargs): with tempfile.TemporaryFile(*args, **kwargs) as fileobj: fileobj.write(input) fileobj.seek(0) self.assertEqual(input, fileobj.read()) roundtrip(b"1234", "w+b") roundtrip("abdc\n", "w+") roundtrip("\u039B", "w+", encoding="utf-16") roundtrip("foo\r\n", "w+", newline="") def test_no_leak_fd(self): # Issue #21058: don't leak file descriptor when io.open() fails closed = [] os_close = os.close def close(fd): closed.append(fd) os_close(fd) with mock.patch('os.close', side_effect=close): with mock.patch('io.open', side_effect=ValueError): self.assertRaises(ValueError, tempfile.TemporaryFile) self.assertEqual(len(closed), 1) # Helper for test_del_on_shutdown class NulledModules: def __init__(self, *modules): self.refs = [mod.__dict__ for mod in modules] self.contents = [ref.copy() for ref in self.refs] def __enter__(self): for d in self.refs: for key in d: d[key] = None def __exit__(self, *exc_info): for d, c in zip(self.refs, self.contents): d.clear() d.update(c) class TestTemporaryDirectory(BaseTestCase): """Test TemporaryDirectory().""" def do_create(self, dir=None, pre="", suf="", recurse=1): if dir is None: dir = tempfile.gettempdir() tmp = tempfile.TemporaryDirectory(dir=dir, prefix=pre, suffix=suf) self.nameCheck(tmp.name, dir, pre, suf) # Create a subdirectory and some files if recurse: d1 = self.do_create(tmp.name, pre, suf, recurse-1) d1.name = None with open(os.path.join(tmp.name, "test.txt"), "wb") as f: f.write(b"Hello world!") return tmp def test_mkdtemp_failure(self): # Check no additional exception if mkdtemp fails # Previously would raise AttributeError instead # (noted as part of Issue #10188) with tempfile.TemporaryDirectory() as nonexistent: pass with self.assertRaises(FileNotFoundError) as cm: tempfile.TemporaryDirectory(dir=nonexistent) self.assertEqual(cm.exception.errno, errno.ENOENT) def test_explicit_cleanup(self): # A TemporaryDirectory is deleted when cleaned up dir = tempfile.mkdtemp() try: d = self.do_create(dir=dir) self.assertTrue(os.path.exists(d.name), "TemporaryDirectory %s does not exist" % d.name) d.cleanup() self.assertFalse(os.path.exists(d.name), "TemporaryDirectory %s exists after cleanup" % d.name) finally: os.rmdir(dir) @support.skip_unless_symlink def test_cleanup_with_symlink_to_a_directory(self): # cleanup() should not follow symlinks to directories (issue #12464) d1 = self.do_create() d2 = self.do_create(recurse=0) # Symlink d1/foo -> d2 os.symlink(d2.name, os.path.join(d1.name, "foo")) # This call to cleanup() should not follow the "foo" symlink d1.cleanup() self.assertFalse(os.path.exists(d1.name), "TemporaryDirectory %s exists after cleanup" % d1.name) self.assertTrue(os.path.exists(d2.name), "Directory pointed to by a symlink was deleted") self.assertEqual(os.listdir(d2.name), ['test.txt'], "Contents of the directory pointed to by a symlink " "were deleted") d2.cleanup() @support.cpython_only def test_del_on_collection(self): # A TemporaryDirectory is deleted when garbage collected dir = tempfile.mkdtemp() try: d = self.do_create(dir=dir) name = d.name del d # Rely on refcounting to invoke __del__ self.assertFalse(os.path.exists(name), "TemporaryDirectory %s exists after __del__" % name) finally: os.rmdir(dir) def test_del_on_shutdown(self): # A TemporaryDirectory may be cleaned up during shutdown with self.do_create() as dir: for mod in ('builtins', 'os', 'shutil', 'sys', 'tempfile', 'warnings'): code = """if True: import builtins import os import shutil import sys import tempfile import warnings tmp = tempfile.TemporaryDirectory(dir={dir!r}) sys.stdout.buffer.write(tmp.name.encode()) tmp2 = os.path.join(tmp.name, 'test_dir') os.mkdir(tmp2) with open(os.path.join(tmp2, "test.txt"), "w") as f: f.write("Hello world!") {mod}.tmp = tmp warnings.filterwarnings("always", category=ResourceWarning) """.format(dir=dir, mod=mod) rc, out, err = script_helper.assert_python_ok("-c", code) tmp_name = out.decode().strip() self.assertFalse(os.path.exists(tmp_name), "TemporaryDirectory %s exists after cleanup" % tmp_name) err = err.decode('utf-8', 'backslashreplace') self.assertNotIn("Exception ", err) self.assertIn("ResourceWarning: Implicitly cleaning up", err) def test_exit_on_shutdown(self): # Issue #22427 with self.do_create() as dir: code = """if True: import sys import tempfile import warnings def generator(): with tempfile.TemporaryDirectory(dir={dir!r}) as tmp: yield tmp g = generator() sys.stdout.buffer.write(next(g).encode()) warnings.filterwarnings("always", category=ResourceWarning) """.format(dir=dir) rc, out, err = script_helper.assert_python_ok("-c", code) tmp_name = out.decode().strip() self.assertFalse(os.path.exists(tmp_name), "TemporaryDirectory %s exists after cleanup" % tmp_name) err = err.decode('utf-8', 'backslashreplace') self.assertNotIn("Exception ", err) self.assertIn("ResourceWarning: Implicitly cleaning up", err) def test_warnings_on_cleanup(self): # ResourceWarning will be triggered by __del__ with self.do_create() as dir: d = self.do_create(dir=dir, recurse=3) name = d.name # Check for the resource warning with support.check_warnings(('Implicitly', ResourceWarning), quiet=False): warnings.filterwarnings("always", category=ResourceWarning) del d support.gc_collect() self.assertFalse(os.path.exists(name), "TemporaryDirectory %s exists after __del__" % name) def test_multiple_close(self): # Can be cleaned-up many times without error d = self.do_create() d.cleanup() d.cleanup() d.cleanup() def test_context_manager(self): # Can be used as a context manager d = self.do_create() with d as name: self.assertTrue(os.path.exists(name)) self.assertEqual(name, d.name) self.assertFalse(os.path.exists(name)) if __name__ == "__main__": unittest.main()
35.316044
96
0.579024
4a077d23d9e9d462b2c63a4c8ba4b7ecd387391d
3,556
py
Python
consensus/poet/cli/sawtooth_poet_cli/main.py
suparnadhar/SuparnaGit
bec2704d8b6bc1802523ec26dcb902f59a747a4d
[ "Apache-2.0" ]
1
2017-08-04T10:31:00.000Z
2017-08-04T10:31:00.000Z
consensus/poet/cli/sawtooth_poet_cli/main.py
suparnadhar/SuparnaGit
bec2704d8b6bc1802523ec26dcb902f59a747a4d
[ "Apache-2.0" ]
null
null
null
consensus/poet/cli/sawtooth_poet_cli/main.py
suparnadhar/SuparnaGit
bec2704d8b6bc1802523ec26dcb902f59a747a4d
[ "Apache-2.0" ]
null
null
null
# Copyright 2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------ import argparse import logging import os import sys import traceback from colorlog import ColoredFormatter from sawtooth_poet_cli.exceptions import CliException from sawtooth_poet_cli.genesis import add_genesis_parser from sawtooth_poet_cli.genesis import do_genesis from sawtooth_poet_cli.enclave import add_enclave_parser from sawtooth_poet_cli.enclave import do_enclave def create_console_handler(verbose_level): clog = logging.StreamHandler() formatter = ColoredFormatter( "%(log_color)s[%(asctime)s %(levelname)-8s%(module)s]%(reset)s " "%(white)s%(message)s", datefmt="%H:%M:%S", reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red', }) clog.setFormatter(formatter) if verbose_level == 0: clog.setLevel(logging.WARN) elif verbose_level == 1: clog.setLevel(logging.INFO) else: clog.setLevel(logging.DEBUG) return clog def setup_loggers(verbose_level): logger = logging.getLogger() logger.setLevel(logging.DEBUG) logger.addHandler(create_console_handler(verbose_level)) def create_parent_parser(prog_name): parent_parser = argparse.ArgumentParser(prog=prog_name, add_help=False) parent_parser.add_argument( '-v', '--verbose', action='count', help='enable more verbose output') return parent_parser def create_parser(prog_name): parent_parser = create_parent_parser(prog_name) parser = argparse.ArgumentParser( parents=[parent_parser], formatter_class=argparse.RawDescriptionHelpFormatter) subparsers = parser.add_subparsers(title='subcommand', dest='command') subparsers.required = True add_genesis_parser(subparsers, parent_parser) add_enclave_parser(subparsers, parent_parser) return parser def main(prog_name=os.path.basename(sys.argv[0]), args=None, with_loggers=True): if args is None: args = sys.argv[1:] parser = create_parser(prog_name) args = parser.parse_args(args) if with_loggers is True: if args.verbose is None: verbose_level = 0 else: verbose_level = args.verbose setup_loggers(verbose_level=verbose_level) if args.command == 'genesis': do_genesis(args) elif args.command == 'enclave': do_enclave(args) else: raise AssertionError('invalid command: {}'.format(args.command)) def main_wrapper(): # pylint: disable=bare-except try: main() except CliException as e: print("Error: {}".format(e), file=sys.stderr) sys.exit(1) except KeyboardInterrupt: pass except SystemExit as e: raise e except: traceback.print_exc(file=sys.stderr) sys.exit(1)
28.222222
80
0.667885
4a077d610bff944659b940eb306946fcfc8768f5
72,433
py
Python
sqlova/utils/utils_wikisql.py
ds-keshev/sqlova
8523af748520cfa78025c6ba28f6b3ed5df8de62
[ "Apache-2.0" ]
null
null
null
sqlova/utils/utils_wikisql.py
ds-keshev/sqlova
8523af748520cfa78025c6ba28f6b3ed5df8de62
[ "Apache-2.0" ]
null
null
null
sqlova/utils/utils_wikisql.py
ds-keshev/sqlova
8523af748520cfa78025c6ba28f6b3ed5df8de62
[ "Apache-2.0" ]
null
null
null
# Copyright 2019-present NAVER Corp. # Apache License v2.0 # Wonseok Hwang import os, json import random as rd from copy import deepcopy from matplotlib.pylab import * import torch import torchvision.datasets as dsets import torch.nn as nn import torch.nn.functional as F from .utils import generate_perm_inv from .utils import json_default_type_checker from .wikisql_formatter import get_squad_style_ans device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Load data ----------------------------------------------------------------------------------------------- def load_wikisql(path_wikisql, toy_model, toy_size, bert=False, no_w2i=False, no_hs_tok=False, aug=False): # Get data train_data, train_table = load_wikisql_data(path_wikisql, mode='train', toy_model=toy_model, toy_size=toy_size, no_hs_tok=no_hs_tok, aug=aug) dev_data, dev_table = load_wikisql_data(path_wikisql, mode='dev', toy_model=toy_model, toy_size=toy_size, no_hs_tok=no_hs_tok) # Get word vector if no_w2i: w2i, wemb = None, None else: w2i, wemb = load_w2i_wemb(path_wikisql, bert) return train_data, train_table, dev_data, dev_table, w2i, wemb def load_wikisql_data(path_wikisql, mode='train', toy_model=False, toy_size=10, no_hs_tok=False, aug=False): """ Load training sets """ if aug: mode = f"aug.{mode}" print('Augmented data is loaded!') path_sql = os.path.join(path_wikisql, mode+'_tok.jsonl') if no_hs_tok: path_table = os.path.join(path_wikisql, mode + '.tables.jsonl') else: path_table = os.path.join(path_wikisql, mode+'_tok.tables.jsonl') data = [] table = {} with open(path_sql) as f: for idx, line in enumerate(f): if toy_model and idx >= toy_size: break t1 = json.loads(line.strip()) data.append(t1) with open(path_table) as f: for idx, line in enumerate(f): if toy_model and idx > toy_size: break t1 = json.loads(line.strip()) table[t1['id']] = t1 return data, table def load_w2i_wemb(path_wikisql, bert=False): """ Load pre-made subset of TAPI. """ if bert: with open(os.path.join(path_wikisql, 'w2i_bert.json'), 'r') as f_w2i: w2i = json.load(f_w2i) wemb = load(os.path.join(path_wikisql, 'wemb_bert.npy'), ) else: with open(os.path.join(path_wikisql, 'w2i.json'), 'r') as f_w2i: w2i = json.load(f_w2i) wemb = load(os.path.join(path_wikisql, 'wemb.npy'), ) return w2i, wemb def get_loader_wikisql(data_train, data_dev, bS, shuffle_train=True, shuffle_dev=False): train_loader = torch.utils.data.DataLoader( batch_size=bS, dataset=data_train, shuffle=shuffle_train, num_workers=4, collate_fn=lambda x: x # now dictionary values are not merged! ) dev_loader = torch.utils.data.DataLoader( batch_size=bS, dataset=data_dev, shuffle=shuffle_dev, num_workers=4, collate_fn=lambda x: x # now dictionary values are not merged! ) return train_loader, dev_loader def get_fields_1(t1, tables, no_hs_t=False, no_sql_t=False): nlu1 = t1['question'] nlu_t1 = t1['question_tok'] tid1 = t1['table_id'] sql_i1 = t1['sql'] sql_q1 = t1['query'] if no_sql_t: sql_t1 = None else: sql_t1 = t1['query_tok'] tb1 = tables[tid1] if not no_hs_t: hs_t1 = tb1['header_tok'] else: hs_t1 = [] hs1 = tb1['header'] return nlu1, nlu_t1, tid1, sql_i1, sql_q1, sql_t1, tb1, hs_t1, hs1 def get_fields(t1s, tables, no_hs_t=False, no_sql_t=False): nlu, nlu_t, tid, sql_i, sql_q, sql_t, tb, hs_t, hs = [], [], [], [], [], [], [], [], [] for t1 in t1s: if no_hs_t: nlu1, nlu_t1, tid1, sql_i1, sql_q1, sql_t1, tb1, hs_t1, hs1 = get_fields_1(t1, tables, no_hs_t, no_sql_t) else: nlu1, nlu_t1, tid1, sql_i1, sql_q1, sql_t1, tb1, hs_t1, hs1 = get_fields_1(t1, tables, no_hs_t, no_sql_t) nlu.append(nlu1) nlu_t.append(nlu_t1) tid.append(tid1) sql_i.append(sql_i1) sql_q.append(sql_q1) sql_t.append(sql_t1) tb.append(tb1) hs_t.append(hs_t1) hs.append(hs1) return nlu, nlu_t, sql_i, sql_q, sql_t, tb, hs_t, hs # Embedding ------------------------------------------------------------------------- def word_to_idx1(words1, w2i, no_BE): w2i_l1 = [] l1 = len(words1) # +2 because of <BEG>, <END> for w in words1: idx = w2i.get(w, 0) w2i_l1.append(idx) if not no_BE: l1 += 2 w2i_l1 = [1] + w2i_l1 + [2] return w2i_l1, l1 def words_to_idx(words, w2i, no_BE=False): """ Input: [ ['I', 'am', 'hero'], ['You', 'are 'geneus'] ] output: w2i = [ B x max_seq_len, 1] wemb = [B x max_seq_len, dim] - Zero-padded when word is not available (teated as <UNK>) """ bS = len(words) l = torch.zeros(bS, dtype=torch.long).to(device) # length of the seq. of words. w2i_l_list = [] # shall be replaced to arr # wemb_NLq_batch = [] for i, words1 in enumerate(words): w2i_l1, l1 = word_to_idx1(words1, w2i, no_BE) w2i_l_list.append(w2i_l1) l[i] = l1 # Prepare tensor of wemb # overwrite w2i_l w2i_l = torch.zeros([bS, int(max(l))], dtype=torch.long).to(device) for b in range(bS): w2i_l[b, :l[b]] = torch.LongTensor(w2i_l_list[b]).to(device) return w2i_l, l def hs_to_idx(hs_t, w2i, no_BE=False): """ Zero-padded when word is not available (teated as <UNK>) Treat each "header tokens" as if they are NL-utterance tokens. """ bS = len(hs_t) # now, B = B_NLq hpu_t = [] # header pseudo-utterance l_hs = [] for hs_t1 in hs_t: hpu_t += hs_t1 l_hs1 = len(hs_t1) l_hs.append(l_hs1) w2i_hpu, l_hpu = words_to_idx(hpu_t, w2i, no_BE=no_BE) return w2i_hpu, l_hpu, l_hs # Encoding --------------------------------------------------------------------- def encode(lstm, wemb_l, l, return_hidden=False, hc0=None, last_only=False): """ [batch_size, max token length, dim_emb] """ bS, mL, eS = wemb_l.shape # sort before packking l = array(l) perm_idx = argsort(-l) perm_idx_inv = generate_perm_inv(perm_idx) # pack sequence packed_wemb_l = nn.utils.rnn.pack_padded_sequence(wemb_l[perm_idx, :, :], l[perm_idx], batch_first=True) # Time to encode if hc0 is not None: hc0 = (hc0[0][:, perm_idx], hc0[1][:, perm_idx]) # ipdb.set_trace() packed_wemb_l = packed_wemb_l.float() # I don't know why.. packed_wenc, hc_out = lstm(packed_wemb_l, hc0) hout, cout = hc_out # unpack wenc, _l = nn.utils.rnn.pad_packed_sequence(packed_wenc, batch_first=True) if last_only: # Take only final outputs for each columns. wenc = wenc[tuple(range(bS)), l[perm_idx] - 1] # [batch_size, dim_emb] wenc.unsqueeze_(1) # [batch_size, 1, dim_emb] wenc = wenc[perm_idx_inv] if return_hidden: # hout.shape = [number_of_directoin * num_of_layer, seq_len(=batch size), dim * number_of_direction ] w/ batch_first.. w/o batch_first? I need to see. hout = hout[:, perm_idx_inv].to(device) cout = cout[:, perm_idx_inv].to(device) # Is this correct operation? return wenc, hout, cout else: return wenc def encode_hpu(lstm, wemb_hpu, l_hpu, l_hs): wenc_hpu, hout, cout = encode( lstm, wemb_hpu, l_hpu, return_hidden=True, hc0=None, last_only=True ) wenc_hpu = wenc_hpu.squeeze(1) bS_hpu, mL_hpu, eS = wemb_hpu.shape hS = wenc_hpu.size(-1) wenc_hs = wenc_hpu.new_zeros(len(l_hs), max(l_hs), hS) wenc_hs = wenc_hs.to(device) # Re-pack according to batch. # ret = [B_NLq, max_len_headers_all, dim_lstm] st = 0 for i, l_hs1 in enumerate(l_hs): wenc_hs[i, :l_hs1] = wenc_hpu[st:(st + l_hs1)] st += l_hs1 return wenc_hs # Statistics ------------------------------------------------------------------------------------------------------------------- def get_wc1(conds): """ [ [wc, wo, wv], [wc, wo, wv], ... ] """ wc1 = [] for cond in conds: wc1.append(cond[0]) return wc1 def get_wo1(conds): """ [ [wc, wo, wv], [wc, wo, wv], ... ] """ wo1 = [] for cond in conds: wo1.append(cond[1]) return wo1 def get_wv1(conds): """ [ [wc, wo, wv], [wc, wo, wv], ... ] """ wv1 = [] for cond in conds: wv1.append(cond[2]) return wv1 def get_g(sql_i): """ for backward compatibility, separated with get_g""" g_sc = [] g_sa = [] g_wn = [] g_wc = [] g_wo = [] g_wv = [] for b, psql_i1 in enumerate(sql_i): g_sc.append( psql_i1["sel"] ) g_sa.append( psql_i1["agg"]) conds = psql_i1['conds'] if not psql_i1["agg"] < 0: g_wn.append( len( conds ) ) g_wc.append( get_wc1(conds) ) g_wo.append( get_wo1(conds) ) g_wv.append( get_wv1(conds) ) else: raise EnvironmentError return g_sc, g_sa, g_wn, g_wc, g_wo, g_wv def get_g_wvi_corenlp(t): g_wvi_corenlp = [] for t1 in t: g_wvi_corenlp.append( t1['wvi_corenlp'] ) return g_wvi_corenlp def update_w2i_wemb(word, wv, idx_w2i, n_total, w2i, wemb): """ Follow same approach from SQLNet author's code. Used inside of generaet_w2i_wemb. """ # global idx_w2i, w2i, wemb # idx, word2vec, word to idx dictionary, list of embedding vec, n_total: total number of words if (word in wv) and (word not in w2i): idx_w2i += 1 w2i[word] = idx_w2i wemb.append(wv[word]) n_total += 1 return idx_w2i, n_total def make_w2i_wemb(args, path_save_w2i_wemb, wv, data_train, data_dev, data_test, table_train, table_dev, table_test): w2i = {'<UNK>': 0, '<BEG>': 1, '<END>': 2} # to use it when embeds NL query. idx_w2i = 2 n_total = 3 wemb = [np.zeros(300, dtype=np.float32) for _ in range(3)] # 128 is of TAPI vector. idx_w2i, n_total = generate_w2i_wemb(data_train, wv, idx_w2i, n_total, w2i, wemb) idx_w2i, n_total = generate_w2i_wemb_table(table_train, wv, idx_w2i, n_total, w2i, wemb) idx_w2i, n_total = generate_w2i_wemb(data_dev, wv, idx_w2i, n_total, w2i, wemb) idx_w2i, n_total = generate_w2i_wemb_table(table_dev, wv, idx_w2i, n_total, w2i, wemb) idx_w2i, n_total = generate_w2i_wemb(data_test, wv, idx_w2i, n_total, w2i, wemb) idx_w2i, n_total = generate_w2i_wemb_table(table_test, wv, idx_w2i, n_total, w2i, wemb) path_w2i = os.path.join(path_save_w2i_wemb, 'w2i.json') path_wemb = os.path.join(path_save_w2i_wemb, 'wemb.npy') wemb = np.stack(wemb, axis=0) with open(path_w2i, 'w') as f_w2i: json.dump(w2i, f_w2i) np.save(path_wemb, wemb) return w2i, wemb def generate_w2i_wemb_table(tables, wv, idx_w2i, n_total, w2i, wemb): """ Generate subset of GloVe update_w2i_wemb. It uses wv, w2i, wemb, idx_w2i as global variables. To do 1. What should we do with the numeric? """ # word_set from NL query for table_id, table_contents in tables.items(): # NLq = t1['question'] # word_tokens = NLq.rstrip().replace('?', '').split(' ') headers = table_contents['header_tok'] # [ ['state/terriotry'], ['current', 'slogan'], [], for header_tokens in headers: for token in header_tokens: idx_w2i, n_total = update_w2i_wemb(token, wv, idx_w2i, n_total, w2i, wemb) # WikiSQL generaets unbelivable query... using state/territory in the NLq. Unnatural.. but as is # when there is slash, unlike original SQLNet which treats them as single token, we use # both tokens. e.g. 'state/terriotry' -> 'state' # token_spl = token.split('/') # for token_spl1 in token_spl: # idx_w2i, n_total = update_w2i_wemb(token_spl1, wv, idx_w2i, n_total, w2i, wemb) return idx_w2i, n_total def generate_w2i_wemb(train_data, wv, idx_w2i, n_total, w2i, wemb): """ Generate subset of GloVe update_w2i_wemb. It uses wv, w2i, wemb, idx_w2i as global variables. To do 1. What should we do with the numeric? """ # word_set from NL query for i, t1 in enumerate(train_data): # NLq = t1['question'] # word_tokens = NLq.rstrip().replace('?', '').split(' ') word_tokens = t1['question_tok'] # Currently, TAPI does not use "?". So, it is removed. for word in word_tokens: idx_w2i, n_total = update_w2i_wemb(word, wv, idx_w2i, n_total, w2i, wemb) n_total += 1 return idx_w2i, n_total def generate_w2i_wemb_e2k_headers(e2k_dicts, wv, idx_w2i, n_total, w2i, wemb): """ Generate subset of TAPI from english-to-korean dict of table headers etc.. update_w2i_wemb. It uses wv, w2i, wemb, idx_w2i as global variables. To do 1. What should we do with the numeric? Current version do not treat them specially. But this would be modified later so that we can use tags. """ # word_set from NL query for table_name, e2k_dict in e2k_dicts.items(): word_tokens_list = list(e2k_dict.values()) # Currently, TAPI does not use "?". So, it is removed. for word_tokens in word_tokens_list: for word in word_tokens: idx_w2i, n_total = update_w2i_wemb(word, wv, idx_w2i, n_total, w2i, wemb) n_total += 1 return idx_w2i, n_total # BERT ================================================================================================================= def tokenize_nlu1(tokenizer, nlu1): nlu1_tok = tokenizer.tokenize(nlu1) return nlu1_tok def tokenize_hds1(tokenizer, hds1): hds_all_tok = [] for hds11 in hds1: sub_tok = tokenizer.tokenize(hds11) hds_all_tok.append(sub_tok) def generate_inputs(tokenizer, nlu1_tok, hds1): tokens = [] segment_ids = [] tokens.append("[CLS]") i_st_nlu = len(tokens) # to use it later segment_ids.append(0) for token in nlu1_tok: tokens.append(token) segment_ids.append(0) i_ed_nlu = len(tokens) tokens.append("[SEP]") segment_ids.append(0) i_hds = [] # for doc for i, hds11 in enumerate(hds1): i_st_hd = len(tokens) sub_tok = tokenizer.tokenize(hds11) tokens += sub_tok i_ed_hd = len(tokens) i_hds.append((i_st_hd, i_ed_hd)) segment_ids += [1] * len(sub_tok) if i < len(hds1)-1: tokens.append("[SEP]") segment_ids.append(0) elif i == len(hds1)-1: tokens.append("[SEP]") segment_ids.append(1) else: raise EnvironmentError i_nlu = (i_st_nlu, i_ed_nlu) return tokens, segment_ids, i_nlu, i_hds def gen_l_hpu(i_hds): """ # Treat columns as if it is a batch of natural language utterance with batch-size = # of columns * # of batch_size i_hds = [(17, 18), (19, 21), (22, 23), (24, 25), (26, 29), (30, 34)]) """ l_hpu = [] for i_hds1 in i_hds: for i_hds11 in i_hds1: l_hpu.append(i_hds11[1] - i_hds11[0]) return l_hpu def get_bert_output_s2s(model_bert, tokenizer, nlu_t, hds, sql_vocab, max_seq_length): """ s2s version. Treat SQL-tokens as pseudo-headers sql_vocab = ("sql select", "sql where", "sql and", "sql equal", "sql greater than", "sql less than") e.g.) Q: What is the name of the player with score greater than 15? H: Name of the player, score Input: [CLS], what, is, ..., [SEP], name, of, the, player, [SEP], score, [SEP] sql, select, [SEP], sql, where, [SEP], sql, and, [SEP], ... Here, input is tokenized further by WordPiece (WP) tokenizer and fed into BERT. INPUT :param model_bert: :param tokenizer: WordPiece toknizer :param nlu: Question :param nlu_t: CoreNLP tokenized nlu. :param hds: Headers :param hs_t: None or 1st-level tokenized headers :param max_seq_length: max input token length OUTPUT tokens: BERT input tokens nlu_tt: WP-tokenized input natural language questions orig_to_tok_index: map the index of 1st-level-token to the index of 2nd-level-token tok_to_orig_index: inverse map. """ l_n = [] l_hs = [] # The length of columns for each batch l_input = [] input_ids = [] tokens = [] segment_ids = [] input_mask = [] i_nlu = [] # index to retreive the position of contextual vector later. i_hds = [] i_sql_vocab = [] doc_tokens = [] nlu_tt = [] t_to_tt_idx = [] tt_to_t_idx = [] for b, nlu_t1 in enumerate(nlu_t): hds1 = hds[b] l_hs.append(len(hds1)) # 1. 2nd tokenization using WordPiece tt_to_t_idx1 = [] # number indicates where sub-token belongs to in 1st-level-tokens (here, CoreNLP). t_to_tt_idx1 = [] # orig_to_tok_idx[i] = start index of i-th-1st-level-token in all_tokens. nlu_tt1 = [] # all_doc_tokens[ orig_to_tok_idx[i] ] returns first sub-token segement of i-th-1st-level-token for (i, token) in enumerate(nlu_t1): t_to_tt_idx1.append( len(nlu_tt1)) # all_doc_tokens[ indicate the start position of original 'white-space' tokens. sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tt_to_t_idx1.append(i) nlu_tt1.append(sub_token) # all_doc_tokens are further tokenized using WordPiece tokenizer nlu_tt.append(nlu_tt1) tt_to_t_idx.append(tt_to_t_idx1) t_to_tt_idx.append(t_to_tt_idx1) l_n.append(len(nlu_tt1)) # hds1_all_tok = tokenize_hds1(tokenizer, hds1) # [CLS] nlu [SEP] col1 [SEP] col2 [SEP] ...col-n [SEP] # 2. Generate BERT inputs & indices. # Combine hds1 and sql_vocab tokens1, segment_ids1, i_sql_vocab1, i_nlu1, i_hds1 = generate_inputs_s2s(tokenizer, nlu_tt1, hds1, sql_vocab) # i_hds1 input_ids1 = tokenizer.convert_tokens_to_ids(tokens1) # Input masks # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask1 = [1] * len(input_ids1) # 3. Zero-pad up to the sequence length. l_input.append( len(input_ids1) ) while len(input_ids1) < max_seq_length: input_ids1.append(0) input_mask1.append(0) segment_ids1.append(0) assert len(input_ids1) == max_seq_length assert len(input_mask1) == max_seq_length assert len(segment_ids1) == max_seq_length input_ids.append(input_ids1) tokens.append(tokens1) segment_ids.append(segment_ids1) input_mask.append(input_mask1) i_nlu.append(i_nlu1) i_hds.append(i_hds1) i_sql_vocab.append(i_sql_vocab1) # Convert to tensor all_input_ids = torch.tensor(input_ids, dtype=torch.long).to(device) all_input_mask = torch.tensor(input_mask, dtype=torch.long).to(device) all_segment_ids = torch.tensor(segment_ids, dtype=torch.long).to(device) # 4. Generate BERT output. all_encoder_layer, pooled_output = model_bert(all_input_ids, all_segment_ids, all_input_mask) # 5. generate l_hpu from i_hds l_hpu = gen_l_hpu(i_hds) return all_encoder_layer, pooled_output, tokens, i_nlu, i_hds, i_sql_vocab, \ l_n, l_hpu, l_hs, l_input, \ nlu_tt, t_to_tt_idx, tt_to_t_idx def get_bert_output(model_bert, tokenizer, nlu_t, hds, max_seq_length): """ Here, input is toknized further by WordPiece (WP) tokenizer and fed into BERT. INPUT :param model_bert: :param tokenizer: WordPiece toknizer :param nlu: Question :param nlu_t: CoreNLP tokenized nlu. :param hds: Headers :param hs_t: None or 1st-level tokenized headers :param max_seq_length: max input token length OUTPUT tokens: BERT input tokens nlu_tt: WP-tokenized input natural language questions orig_to_tok_index: map the index of 1st-level-token to the index of 2nd-level-token tok_to_orig_index: inverse map. """ l_n = [] l_hs = [] # The length of columns for each batch input_ids = [] tokens = [] segment_ids = [] input_mask = [] i_nlu = [] # index to retreive the position of contextual vector later. i_hds = [] doc_tokens = [] nlu_tt = [] t_to_tt_idx = [] tt_to_t_idx = [] for b, nlu_t1 in enumerate(nlu_t): hds1 = hds[b] l_hs.append(len(hds1)) # 1. 2nd tokenization using WordPiece tt_to_t_idx1 = [] # number indicates where sub-token belongs to in 1st-level-tokens (here, CoreNLP). t_to_tt_idx1 = [] # orig_to_tok_idx[i] = start index of i-th-1st-level-token in all_tokens. nlu_tt1 = [] # all_doc_tokens[ orig_to_tok_idx[i] ] returns first sub-token segement of i-th-1st-level-token for (i, token) in enumerate(nlu_t1): t_to_tt_idx1.append( len(nlu_tt1)) # all_doc_tokens[ indicate the start position of original 'white-space' tokens. sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tt_to_t_idx1.append(i) nlu_tt1.append(sub_token) # all_doc_tokens are further tokenized using WordPiece tokenizer nlu_tt.append(nlu_tt1) tt_to_t_idx.append(tt_to_t_idx1) t_to_tt_idx.append(t_to_tt_idx1) l_n.append(len(nlu_tt1)) # hds1_all_tok = tokenize_hds1(tokenizer, hds1) # [CLS] nlu [SEP] col1 [SEP] col2 [SEP] ...col-n [SEP] # 2. Generate BERT inputs & indices. tokens1, segment_ids1, i_nlu1, i_hds1 = generate_inputs(tokenizer, nlu_tt1, hds1) input_ids1 = tokenizer.convert_tokens_to_ids(tokens1) # Input masks # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask1 = [1] * len(input_ids1) # 3. Zero-pad up to the sequence length. while len(input_ids1) < max_seq_length: input_ids1.append(0) input_mask1.append(0) segment_ids1.append(0) assert len(input_ids1) == max_seq_length assert len(input_mask1) == max_seq_length assert len(segment_ids1) == max_seq_length input_ids.append(input_ids1) tokens.append(tokens1) segment_ids.append(segment_ids1) input_mask.append(input_mask1) i_nlu.append(i_nlu1) i_hds.append(i_hds1) # Convert to tensor all_input_ids = torch.tensor(input_ids, dtype=torch.long).to(device) all_input_mask = torch.tensor(input_mask, dtype=torch.long).to(device) all_segment_ids = torch.tensor(segment_ids, dtype=torch.long).to(device) # 4. Generate BERT output. all_encoder_layer, pooled_output = model_bert(all_input_ids, all_segment_ids, all_input_mask) # 5. generate l_hpu from i_hds l_hpu = gen_l_hpu(i_hds) return all_encoder_layer, pooled_output, tokens, i_nlu, i_hds, \ l_n, l_hpu, l_hs, \ nlu_tt, t_to_tt_idx, tt_to_t_idx def get_wemb_n(i_nlu, l_n, hS, num_hidden_layers, all_encoder_layer, num_out_layers_n): """ Get the representation of each tokens. """ bS = len(l_n) l_n_max = max(l_n) wemb_n = torch.zeros([bS, l_n_max, hS * num_out_layers_n]).to(device) #print(all_encoder_layer) #print(wemb_n.shape) for b in range(bS): # [B, max_len, dim] # Fill zero for non-exist part. l_n1 = l_n[b] i_nlu1 = i_nlu[b] for i_noln in range(num_out_layers_n): i_layer = num_hidden_layers - 1 - i_noln st = i_noln * hS ed = (i_noln + 1) * hS wemb_n[b, 0:(i_nlu1[1] - i_nlu1[0]), st:ed] = all_encoder_layer[i_layer][b, i_nlu1[0]:i_nlu1[1], :] return wemb_n # def get_wemb_h(i_hds, l_hpu, l_hs, hS, num_hidden_layers, all_encoder_layer, num_out_layers_h): """ As if [ [table-1-col-1-tok1, t1-c1-t2, ...], [t1-c2-t1, t1-c2-t2, ...]. ... [t2-c1-t1, ...,] ] """ bS = len(l_hs) l_hpu_max = max(l_hpu) num_of_all_hds = sum(l_hs) wemb_h = torch.zeros([num_of_all_hds, l_hpu_max, hS * num_out_layers_h]).to(device) b_pu = -1 for b, i_hds1 in enumerate(i_hds): for b1, i_hds11 in enumerate(i_hds1): b_pu += 1 for i_nolh in range(num_out_layers_h): i_layer = num_hidden_layers - 1 - i_nolh st = i_nolh * hS ed = (i_nolh + 1) * hS wemb_h[b_pu, 0:(i_hds11[1] - i_hds11[0]), st:ed] \ = all_encoder_layer[i_layer][b, i_hds11[0]:i_hds11[1],:] return wemb_h def get_wemb_bert(bert_config, model_bert, tokenizer, nlu_t, hds, max_seq_length, num_out_layers_n=1, num_out_layers_h=1): # get contextual output of all tokens from bert all_encoder_layer, pooled_output, tokens, i_nlu, i_hds,\ l_n, l_hpu, l_hs, \ nlu_tt, t_to_tt_idx, tt_to_t_idx = get_bert_output(model_bert, tokenizer, nlu_t, hds, max_seq_length) # all_encoder_layer: BERT outputs from all layers. # pooled_output: output of [CLS] vec. # tokens: BERT intput tokens # i_nlu: start and end indices of question in tokens # i_hds: start and end indices of headers # get the wemb wemb_n = get_wemb_n(i_nlu, l_n, bert_config.hidden_size, bert_config.num_hidden_layers, all_encoder_layer, num_out_layers_n) wemb_h = get_wemb_h(i_hds, l_hpu, l_hs, bert_config.hidden_size, bert_config.num_hidden_layers, all_encoder_layer, num_out_layers_h) return wemb_n, wemb_h, l_n, l_hpu, l_hs, \ nlu_tt, t_to_tt_idx, tt_to_t_idx def gen_pnt_n(g_wvi, mL_w, mL_nt): """ Generate one-hot idx indicating vectors with their lenghts. :param g_wvi: e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]] where_val idx in nlu_t. 0 = <BEG>, -1 = <END>. :param mL_w: 4 :param mL_nt: 200 :return: """ bS = len(g_wvi) for g_wvi1 in g_wvi: for g_wvi11 in g_wvi1: l11 = len(g_wvi11) mL_g_wvi = max([max([0] + [len(tok) for tok in gwsi]) for gwsi in g_wvi]) - 1 # zero because of '' case. # -1 because we already have <BEG> if mL_g_wvi < 1: mL_g_wvi = 1 # NLq_token_pos = torch.zeros(bS, 5 - 1, mL_g_wvi, self.max_NLq_token_num) # l_g_wvi = torch.zeros(bS, 5 - 1) pnt_n = torch.zeros(bS, mL_w, mL_g_wvi, mL_nt).to(device) # one hot l_g_wvi = torch.zeros(bS, mL_w).to(device) for b, g_wvi1 in enumerate(g_wvi): i_wn = 0 # To prevent error from zero number of condition. for i_wn, g_wvi11 in enumerate(g_wvi1): # g_wvi11: [0, where_conds pos in NLq, end] g_wvi11_n1 = g_wvi11[:-1] # doesn't count <END> idx. l_g_wvi[b, i_wn] = len(g_wvi11_n1) for t, idx in enumerate(g_wvi11_n1): pnt_n[b, i_wn, t, idx] = 1 # Pad if i_wn < (mL_w - 1): # maximum number of conidtions is 4 pnt_n[b, i_wn + 1:, 0, 1] = 1 # # cannot understand... [<BEG>, <END>]?? l_g_wvi[b, i_wn + 1:] = 1 # it means there is only <BEG>. return pnt_n, l_g_wvi def pred_sc(s_sc): """ return: [ pr_wc1_i, pr_wc2_i, ...] """ # get g_num pr_sc = [] for s_sc1 in s_sc: pr_sc.append(s_sc1.argmax().item()) return pr_sc def pred_sc_beam(s_sc, beam_size): """ return: [ pr_wc1_i, pr_wc2_i, ...] """ # get g_num pr_sc_beam = [] for s_sc1 in s_sc: val, idxes = s_sc1.topk(k=beam_size) pr_sc_beam.append(idxes.tolist()) return pr_sc_beam def pred_sa(s_sa): """ return: [ pr_wc1_i, pr_wc2_i, ...] """ # get g_num pr_sa = [] for s_sa1 in s_sa: pr_sa.append(s_sa1.argmax().item()) return pr_sa def pred_wn(s_wn): """ return: [ pr_wc1_i, pr_wc2_i, ...] """ # get g_num pr_wn = [] for s_wn1 in s_wn: pr_wn.append(s_wn1.argmax().item()) # print(pr_wn, s_wn1) # if s_wn1.argmax().item() == 3: # input('') return pr_wn def pred_wc_old(sql_i, s_wc): """ return: [ pr_wc1_i, pr_wc2_i, ...] """ # get g_num pr_wc = [] for b, sql_i1 in enumerate(sql_i): wn = len(sql_i1['conds']) s_wc1 = s_wc[b] pr_wc1 = argsort(-s_wc1.data.cpu().numpy())[:wn] pr_wc1.sort() pr_wc.append(list(pr_wc1)) return pr_wc def pred_wc(wn, s_wc): """ return: [ pr_wc1_i, pr_wc2_i, ...] ! Returned index is sorted! """ # get g_num pr_wc = [] for b, wn1 in enumerate(wn): s_wc1 = s_wc[b] pr_wc1 = argsort(-s_wc1.data.cpu().numpy())[:wn1] pr_wc1.sort() pr_wc.append(list(pr_wc1)) return pr_wc def pred_wc_sorted_by_prob(s_wc): """ return: [ pr_wc1_i, pr_wc2_i, ...] ! Returned index is sorted by prob. All colume-indexes are returned here. """ # get g_num bS = len(s_wc) pr_wc = [] for b in range(bS): s_wc1 = s_wc[b] pr_wc1 = argsort(-s_wc1.data.cpu().numpy()) pr_wc.append(list(pr_wc1)) return pr_wc def pred_wo(wn, s_wo): """ return: [ pr_wc1_i, pr_wc2_i, ...] """ # s_wo = [B, 4, n_op] pr_wo_a = s_wo.argmax(dim=2) # [B, 4] # get g_num pr_wo = [] for b, pr_wo_a1 in enumerate(pr_wo_a): wn1 = wn[b] pr_wo.append(list(pr_wo_a1.data.cpu().numpy()[:wn1])) return pr_wo def pred_wvi_se(wn, s_wv): """ s_wv: [B, 4, mL, 2] - predict best st-idx & ed-idx """ s_wv_st, s_wv_ed = s_wv.split(1, dim=3) # [B, 4, mL, 2] -> [B, 4, mL, 1], [B, 4, mL, 1] s_wv_st = s_wv_st.squeeze(3) # [B, 4, mL, 1] -> [B, 4, mL] s_wv_ed = s_wv_ed.squeeze(3) pr_wvi_st_idx = s_wv_st.argmax(dim=2) # [B, 4, mL] -> [B, 4, 1] pr_wvi_ed_idx = s_wv_ed.argmax(dim=2) pr_wvi = [] for b, wn1 in enumerate(wn): pr_wvi1 = [] for i_wn in range(wn1): pr_wvi_st_idx11 = pr_wvi_st_idx[b][i_wn] pr_wvi_ed_idx11 = pr_wvi_ed_idx[b][i_wn] pr_wvi1.append([pr_wvi_st_idx11.item(), pr_wvi_ed_idx11.item()]) pr_wvi.append(pr_wvi1) return pr_wvi def pred_wvi_se_beam(max_wn, s_wv, beam_size): """ s_wv: [B, 4, mL, 2] - predict best st-idx & ed-idx output: pr_wvi_beam = [B, max_wn, n_pairs, 2]. 2 means [st, ed]. prob_wvi_beam = [B, max_wn, n_pairs] """ bS = s_wv.shape[0] s_wv_st, s_wv_ed = s_wv.split(1, dim=3) # [B, 4, mL, 2] -> [B, 4, mL, 1], [B, 4, mL, 1] s_wv_st = s_wv_st.squeeze(3) # [B, 4, mL, 1] -> [B, 4, mL] s_wv_ed = s_wv_ed.squeeze(3) prob_wv_st = F.softmax(s_wv_st, dim=-1).detach().to('cpu').numpy() prob_wv_ed = F.softmax(s_wv_ed, dim=-1).detach().to('cpu').numpy() k_logit = int(ceil(sqrt(beam_size))) n_pairs = k_logit**2 assert n_pairs >= beam_size values_st, idxs_st = s_wv_st.topk(k_logit) # [B, 4, mL] -> [B, 4, k_logit] values_ed, idxs_ed = s_wv_ed.topk(k_logit) # [B, 4, mL] -> [B, 4, k_logit] # idxs = [B, k_logit, 2] # Generate all possible combination of st, ed indices & prob pr_wvi_beam = [] # [B, max_wn, k_logit**2 [st, ed] paris] prob_wvi_beam = zeros([bS, max_wn, n_pairs]) for b in range(bS): pr_wvi_beam1 = [] idxs_st1 = idxs_st[b] idxs_ed1 = idxs_ed[b] for i_wn in range(max_wn): idxs_st11 = idxs_st1[i_wn] idxs_ed11 = idxs_ed1[i_wn] pr_wvi_beam11 = [] pair_idx = -1 for i_k in range(k_logit): for j_k in range(k_logit): pair_idx += 1 st = idxs_st11[i_k].item() ed = idxs_ed11[j_k].item() pr_wvi_beam11.append([st, ed]) p1 = prob_wv_st[b, i_wn, st] p2 = prob_wv_ed[b, i_wn, ed] prob_wvi_beam[b, i_wn, pair_idx] = p1*p2 pr_wvi_beam1.append(pr_wvi_beam11) pr_wvi_beam.append(pr_wvi_beam1) # prob return pr_wvi_beam, prob_wvi_beam def is_whitespace_g_wvi(c): # if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: if c == " ": return True return False def convert_pr_wvi_to_string(pr_wvi, nlu_t, nlu_wp_t, wp_to_wh_index, nlu): """ - Convert to the string in whilte-space-separated tokens - Add-hoc addition. """ pr_wv_str_wp = [] # word-piece version pr_wv_str = [] for b, pr_wvi1 in enumerate(pr_wvi): pr_wv_str_wp1 = [] pr_wv_str1 = [] wp_to_wh_index1 = wp_to_wh_index[b] nlu_wp_t1 = nlu_wp_t[b] nlu_t1 = nlu_t[b] for i_wn, pr_wvi11 in enumerate(pr_wvi1): st_idx, ed_idx = pr_wvi11 # Ad-hoc modification of ed_idx to deal with wp-tokenization effect. # e.g.) to convert "butler cc (" ->"butler cc (ks)" (dev set 1st question). pr_wv_str_wp11 = nlu_wp_t1[st_idx:ed_idx+1] pr_wv_str_wp1.append(pr_wv_str_wp11) st_wh_idx = wp_to_wh_index1[st_idx] ed_wh_idx = wp_to_wh_index1[ed_idx] pr_wv_str11 = nlu_t1[st_wh_idx:ed_wh_idx+1] pr_wv_str1.append(pr_wv_str11) pr_wv_str_wp.append(pr_wv_str_wp1) pr_wv_str.append(pr_wv_str1) return pr_wv_str, pr_wv_str_wp def pred_sw_se(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv): pr_sc = pred_sc(s_sc) pr_sa = pred_sa(s_sa) pr_wn = pred_wn(s_wn) pr_wc = pred_wc(pr_wn, s_wc) pr_wo = pred_wo(pr_wn, s_wo) pr_wvi = pred_wvi_se(pr_wn, s_wv) return pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi def merge_wv_t1_eng(where_str_tokens, NLq): """ Almost copied of SQLNet. The main purpose is pad blank line while combining tokens. """ nlq = NLq.lower() where_str_tokens = [tok.lower() for tok in where_str_tokens] alphabet = 'abcdefghijklmnopqrstuvwxyz0123456789$' special = {'-LRB-': '(', '-RRB-': ')', '-LSB-': '[', '-RSB-': ']', '``': '"', '\'\'': '"', } # '--': '\u2013'} # this generate error for test 5661 case. ret = '' double_quote_appear = 0 for raw_w_token in where_str_tokens: # if '' (empty string) of None, continue if not raw_w_token: continue # Change the special characters w_token = special.get(raw_w_token, raw_w_token) # maybe necessary for some case? # check the double quote if w_token == '"': double_quote_appear = 1 - double_quote_appear # Check whether ret is empty. ret is selected where condition. if len(ret) == 0: pass # Check blank character. elif len(ret) > 0 and ret + ' ' + w_token in nlq: # Pad ' ' if ret + ' ' is part of nlq. ret = ret + ' ' elif len(ret) > 0 and ret + w_token in nlq: pass # already in good form. Later, ret + w_token will performed. # Below for unnatural question I guess. Is it likely to appear? elif w_token == '"': if double_quote_appear: ret = ret + ' ' # pad blank line between next token when " because in this case, it is of closing apperas # for the case of opening, no blank line. elif w_token[0] not in alphabet: pass # non alphabet one does not pad blank line. # when previous character is the special case. elif (ret[-1] not in ['(', '/', '\u2013', '#', '$', '&']) and (ret[-1] != '"' or not double_quote_appear): ret = ret + ' ' ret = ret + w_token return ret.strip() def find_sql_where_op(gt_sql_tokens_part): """ gt_sql_tokens_part: Between 'WHERE' and 'AND'(if exists). """ # sql_where_op = ['=', 'EQL', '<', 'LT', '>', 'GT'] sql_where_op = ['EQL','LT','GT'] # wv sometimes contains =, < or >. for sql_where_op in sql_where_op: if sql_where_op in gt_sql_tokens_part: found_sql_where_op = sql_where_op break return found_sql_where_op def find_sub_list(sl, l): # from stack overflow. results = [] sll = len(sl) for ind in (i for i, e in enumerate(l) if e == sl[0]): if l[ind:ind + sll] == sl: results.append((ind, ind + sll - 1)) return results def get_g_wvi_bert(nlu, nlu_t, wh_to_wp_index, sql_i, sql_t, tokenizer, nlu_wp_t): """ Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization. Assumption: where_str always presents in the nlu. """ g_wvi = [] for b, sql_i1 in enumerate(sql_i): nlu1 = nlu[b] nlu_t1 = nlu_t[b] nlu_wp_t1 = nlu_wp_t[b] sql_t1 = sql_t[b] wh_to_wp_index1 = wh_to_wp_index[b] st = sql_t1.index('WHERE') + 1 if 'WHERE' in sql_t1 else len(sql_t1) g_wvi1 = [] while st < len(sql_t1): if 'AND' not in sql_t1[st:]: ed = len(sql_t1) else: ed = sql_t1[st:].index('AND') + st sql_wop = find_sql_where_op(sql_t1[st:ed]) # sql where operator st_wop = st + sql_t1[st:ed].index(sql_wop) wv_str11_t = sql_t1[st_wop + 1:ed] results = find_sub_list(wv_str11_t, nlu_t1) st_idx, ed_idx = results[0] st_wp_idx = wh_to_wp_index1[st_idx] ed_wp_idx = wh_to_wp_index1[ed_idx] g_wvi11 = [st_wp_idx, ed_wp_idx] g_wvi1.append(g_wvi11) st = ed + 1 g_wvi.append(g_wvi1) return g_wvi def get_g_wvi_bert_from_g_wvi_corenlp(wh_to_wp_index, g_wvi_corenlp): """ Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization. Assumption: where_str always presents in the nlu. """ g_wvi = [] for b, g_wvi_corenlp1 in enumerate(g_wvi_corenlp): wh_to_wp_index1 = wh_to_wp_index[b] g_wvi1 = [] for i_wn, g_wvi_corenlp11 in enumerate(g_wvi_corenlp1): st_idx, ed_idx = g_wvi_corenlp11 st_wp_idx = wh_to_wp_index1[st_idx] ed_wp_idx = wh_to_wp_index1[ed_idx] g_wvi11 = [st_wp_idx, ed_wp_idx] g_wvi1.append(g_wvi11) g_wvi.append(g_wvi1) return g_wvi def get_g_wvi_bert_from_sql_i(nlu, nlu_t, wh_to_wp_index, sql_i, sql_t, tokenizer, nlu_wp_t): """ Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization. Assumption: where_str always presents in the nlu. """ g_wvi = [] for b, sql_i1 in enumerate(sql_i): nlu1 = nlu[b] nlu_t1 = nlu_t[b] nlu_wp_t1 = nlu_wp_t[b] sql_t1 = sql_t[b] wh_to_wp_index1 = wh_to_wp_index[b] st = sql_t1.index('WHERE') + 1 if 'WHERE' in sql_t1 else len(sql_t1) g_wvi1 = [] while st < len(sql_t1): if 'AND' not in sql_t1[st:]: ed = len(sql_t1) else: ed = sql_t1[st:].index('AND') + st sql_wop = find_sql_where_op(sql_t1[st:ed]) # sql where operator st_wop = st + sql_t1[st:ed].index(sql_wop) wv_str11_t = sql_t1[st_wop + 1:ed] results = find_sub_list(wv_str11_t, nlu_t1) st_idx, ed_idx = results[0] st_wp_idx = wh_to_wp_index1[st_idx] ed_wp_idx = wh_to_wp_index1[ed_idx] g_wvi11 = [st_wp_idx, ed_wp_idx] g_wvi1.append(g_wvi11) st = ed + 1 g_wvi.append(g_wvi1) return g_wvi def get_cnt_sc(g_sc, pr_sc): cnt = 0 for b, g_sc1 in enumerate(g_sc): pr_sc1 = pr_sc[b] if pr_sc1 == g_sc1: cnt += 1 return cnt def get_cnt_sc_list(g_sc, pr_sc): cnt_list = [] for b, g_sc1 in enumerate(g_sc): pr_sc1 = pr_sc[b] if pr_sc1 == g_sc1: cnt_list.append(1) else: cnt_list.append(0) return cnt_list def get_cnt_sa(g_sa, pr_sa): cnt = 0 for b, g_sa1 in enumerate(g_sa): pr_sa1 = pr_sa[b] if pr_sa1 == g_sa1: cnt += 1 return cnt def get_cnt_wn(g_wn, pr_wn): cnt = 0 for b, g_wn1 in enumerate(g_wn): pr_wn1 = pr_wn[b] if pr_wn1 == g_wn1: cnt += 1 return cnt def get_cnt_wc(g_wc, pr_wc): cnt = 0 for b, g_wc1 in enumerate(g_wc): pr_wc1 = pr_wc[b] pr_wn1 = len(pr_wc1) g_wn1 = len(g_wc1) if pr_wn1 != g_wn1: continue else: wc1 = array(g_wc1) wc1.sort() if array_equal(pr_wc1, wc1): cnt += 1 return cnt def get_cnt_wc_list(g_wc, pr_wc): cnt_list= [] for b, g_wc1 in enumerate(g_wc): pr_wc1 = pr_wc[b] pr_wn1 = len(pr_wc1) g_wn1 = len(g_wc1) if pr_wn1 != g_wn1: cnt_list.append(0) continue else: wc1 = array(g_wc1) wc1.sort() if array_equal(pr_wc1, wc1): cnt_list.append(1) else: cnt_list.append(0) return cnt_list def get_cnt_wo(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode): """ pr's are all sorted as pr_wc are sorted in increasing order (in column idx) However, g's are not sorted. Sort g's in increasing order (in column idx) """ cnt = 0 for b, g_wo1 in enumerate(g_wo): g_wc1 = g_wc[b] pr_wc1 = pr_wc[b] pr_wo1 = pr_wo[b] pr_wn1 = len(pr_wo1) g_wn1 = g_wn[b] if g_wn1 != pr_wn1: continue else: # Sort based on wc sequence. if mode == 'test': idx = argsort(array(g_wc1)) g_wo1_s = array(g_wo1)[idx] g_wo1_s = list(g_wo1_s) elif mode == 'train': # due to teacher forcing, no need to sort. g_wo1_s = g_wo1 else: raise ValueError if type(pr_wo1) != list: raise TypeError if g_wo1_s == pr_wo1: cnt += 1 return cnt def get_cnt_wo_list(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode): """ pr's are all sorted as pr_wc are sorted in increasing order (in column idx) However, g's are not sorted. Sort g's in increasing order (in column idx) """ cnt_list=[] for b, g_wo1 in enumerate(g_wo): g_wc1 = g_wc[b] pr_wc1 = pr_wc[b] pr_wo1 = pr_wo[b] pr_wn1 = len(pr_wo1) g_wn1 = g_wn[b] if g_wn1 != pr_wn1: cnt_list.append(0) continue else: # Sort based wc sequence. if mode == 'test': idx = argsort(array(g_wc1)) g_wo1_s = array(g_wo1)[idx] g_wo1_s = list(g_wo1_s) elif mode == 'train': # due to tearch forcing, no need to sort. g_wo1_s = g_wo1 else: raise ValueError if type(pr_wo1) != list: raise TypeError if g_wo1_s == pr_wo1: cnt_list.append(1) else: cnt_list.append(0) return cnt_list def get_cnt_wv(g_wn, g_wc, g_wvi, pr_wvi, mode): """ usalbe only when g_wc was used to find pr_wv g_wvi """ cnt = 0 for b, g_wvi1 in enumerate(g_wvi): pr_wvi1 = pr_wvi[b] g_wc1 = g_wc[b] pr_wn1 = len(pr_wvi1) g_wn1 = g_wn[b] # Now sorting. # Sort based wc sequence. if mode == 'test': idx1 = argsort(array(g_wc1)) elif mode == 'train': idx1 = list( range( g_wn1) ) else: raise ValueError if g_wn1 != pr_wn1: continue else: flag = True for i_wn, idx11 in enumerate(idx1): g_wvi11 = g_wvi1[idx11] pr_wvi11 = pr_wvi1[i_wn] if g_wvi11 != pr_wvi11: flag = False # print(g_wv1, g_wv11) # print(pr_wv1, pr_wv11) # input('') break if flag: cnt += 1 return cnt def get_cnt_wvi_list(g_wn, g_wc, g_wvi, pr_wvi, mode): """ usalbe only when g_wc was used to find pr_wv """ cnt_list =[] for b, g_wvi1 in enumerate(g_wvi): g_wc1 = g_wc[b] pr_wvi1 = pr_wvi[b] pr_wn1 = len(pr_wvi1) g_wn1 = g_wn[b] # Now sorting. # Sort based wc sequence. if mode == 'test': idx1 = argsort(array(g_wc1)) elif mode == 'train': idx1 = list( range( g_wn1) ) else: raise ValueError if g_wn1 != pr_wn1: cnt_list.append(0) continue else: flag = True for i_wn, idx11 in enumerate(idx1): g_wvi11 = g_wvi1[idx11] pr_wvi11 = pr_wvi1[i_wn] if g_wvi11 != pr_wvi11: flag = False # print(g_wv1, g_wv11) # print(pr_wv1, pr_wv11) # input('') break if flag: cnt_list.append(1) else: cnt_list.append(0) return cnt_list def get_cnt_wv_list(g_wn, g_wc, g_sql_i, pr_sql_i, mode): """ usalbe only when g_wc was used to find pr_wv """ cnt_list =[] for b, g_wc1 in enumerate(g_wc): pr_wn1 = len(pr_sql_i[b]["conds"]) g_wn1 = g_wn[b] # Now sorting. # Sort based wc sequence. if mode == 'test': idx1 = argsort(array(g_wc1)) elif mode == 'train': idx1 = list( range( g_wn1) ) else: raise ValueError if g_wn1 != pr_wn1: cnt_list.append(0) continue else: flag = True for i_wn, idx11 in enumerate(idx1): g_wvi_str11 = str(g_sql_i[b]["conds"][idx11][2]).lower() pr_wvi_str11 = str(pr_sql_i[b]["conds"][i_wn][2]).lower() # print(g_wvi_str11) # print(pr_wvi_str11) # print(g_wvi_str11==pr_wvi_str11) if g_wvi_str11 != pr_wvi_str11: flag = False # print(g_wv1, g_wv11) # print(pr_wv1, pr_wv11) # input('') break if flag: cnt_list.append(1) else: cnt_list.append(0) return cnt_list def get_cnt_sw(g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi, pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi, mode): """ usalbe only when g_wc was used to find pr_wv """ cnt_sc = get_cnt_sc(g_sc, pr_sc) cnt_sa = get_cnt_sa(g_sa, pr_sa) cnt_wn = get_cnt_wn(g_wn, pr_wn) cnt_wc = get_cnt_wc(g_wc, pr_wc) cnt_wo = get_cnt_wo(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode) cnt_wv = get_cnt_wv(g_wn, g_wc, g_wvi, pr_wvi, mode) return cnt_sc, cnt_sa, cnt_wn, cnt_wc, cnt_wo, cnt_wv def get_cnt_sw_list(g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi, pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi, g_sql_i, pr_sql_i, mode): """ usalbe only when g_wc was used to find pr_wv """ cnt_sc = get_cnt_sc_list(g_sc, pr_sc) cnt_sa = get_cnt_sc_list(g_sa, pr_sa) cnt_wn = get_cnt_sc_list(g_wn, pr_wn) cnt_wc = get_cnt_wc_list(g_wc, pr_wc) cnt_wo = get_cnt_wo_list(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode) if pr_wvi: cnt_wvi = get_cnt_wvi_list(g_wn, g_wc, g_wvi, pr_wvi, mode) else: cnt_wvi = [0]*len(cnt_sc) cnt_wv = get_cnt_wv_list(g_wn, g_wc, g_sql_i, pr_sql_i, mode) # compare using wv-str which presented in original data. return cnt_sc, cnt_sa, cnt_wn, cnt_wc, cnt_wo, cnt_wvi, cnt_wv def get_cnt_lx_list(cnt_sc1, cnt_sa1, cnt_wn1, cnt_wc1, cnt_wo1, cnt_wv1): # all cnt are list here. cnt_list = [] cnt_lx = 0 for csc, csa, cwn, cwc, cwo, cwv in zip(cnt_sc1, cnt_sa1, cnt_wn1, cnt_wc1, cnt_wo1, cnt_wv1): if csc and csa and cwn and cwc and cwo and cwv: cnt_list.append(1) else: cnt_list.append(0) return cnt_list def get_cnt_x_list(engine, tb, g_sc, g_sa, g_sql_i, pr_sc, pr_sa, pr_sql_i): cnt_x1_list = [] g_ans = [] pr_ans = [] for b in range(len(g_sc)): g_ans1 = engine.execute(tb[b]['id'], g_sc[b], g_sa[b], g_sql_i[b]['conds']) # print(f'cnt: {cnt}') # print(f"pr_sql_i: {pr_sql_i[b]['conds']}") try: pr_ans1 = engine.execute(tb[b]['id'], pr_sc[b], pr_sa[b], pr_sql_i[b]['conds']) if bool(pr_ans1): # not empty due to lack of the data from incorretly generated sql if g_ans1 == pr_ans1: cnt_x1 = 1 else: cnt_x1 = 0 else: cnt_x1 = 0 except: # type error etc... Execution-guided decoding may be used here. pr_ans1 = None cnt_x1 = 0 cnt_x1_list.append(cnt_x1) g_ans.append(g_ans1) pr_ans.append(pr_ans1) return cnt_x1_list, g_ans, pr_ans def get_mean_grad(named_parameters): """ Get list of mean, std of grad of each parameters Code based on web searched result.. """ mu_list = [] sig_list = [] for name, param in named_parameters: if param.requires_grad: # and ("bias" not in name) : # bias makes std = nan as it is of single parameters magnitude = param.grad.abs() mu_list.append(magnitude.mean()) if len(magnitude) == 1: # why nan for single param? Anyway to avoid that.. sig_list.append(torch.tensor(0)) else: sig_list.append(magnitude.std()) # if "svp_se" return mu_list, sig_list def generate_sql_i(pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wv_str, nlu): pr_sql_i = [] for b, nlu1 in enumerate(nlu): conds = [] for i_wn in range(pr_wn[b]): conds1 = [] conds1.append(pr_wc[b][i_wn]) conds1.append(pr_wo[b][i_wn]) merged_wv11 = merge_wv_t1_eng(pr_wv_str[b][i_wn], nlu[b]) conds1.append(merged_wv11) conds.append(conds1) pr_sql_i1 = {'agg': pr_sa[b], 'sel': pr_sc[b], 'conds': conds} pr_sql_i.append(pr_sql_i1) return pr_sql_i def save_for_evaluation(path_save, results, dset_name, ): path_save_file = os.path.join(path_save, f'results_{dset_name}.jsonl') with open(path_save_file, 'w', encoding='utf-8') as f: for i, r1 in enumerate(results): json_str = json.dumps(r1, ensure_ascii=False, default=json_default_type_checker) json_str += '\n' f.writelines(json_str) def save_for_evaluation_aux(path_save, results, dset_name, ): path_save_file = os.path.join(path_save, f'results_aux_{dset_name}.jsonl') with open(path_save_file, 'w', encoding='utf-8') as f: for i, r1 in enumerate(results): json_str = json.dumps(r1, ensure_ascii=False, default=json_default_type_checker) json_str += '\n' f.writelines(json_str) def check_sc_sa_pairs(tb, pr_sc, pr_sa, ): """ Check whether pr_sc, pr_sa are allowed pairs or not. agg_ops = ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG'] """ bS = len(pr_sc) check = [False] * bS for b, pr_sc1 in enumerate(pr_sc): pr_sa1 = pr_sa[b] hd_types1 = tb[b]['types'] hd_types11 = hd_types1[pr_sc1] if hd_types11 == 'text': if pr_sa1 == 0 or pr_sa1 == 3: # '' check[b] = True else: check[b] = False elif hd_types11 == 'real': check[b] = True else: raise Exception("New TYPE!!") return check def remap_sc_idx(idxs, pr_sc_beam): for b, idxs1 in enumerate(idxs): for i_beam, idxs11 in enumerate(idxs1): sc_beam_idx = idxs[b][i_beam][0] sc_idx = pr_sc_beam[b][sc_beam_idx] idxs[b][i_beam][0] = sc_idx return idxs def sort_and_generate_pr_w(pr_sql_i): pr_wc = [] pr_wo = [] pr_wv = [] for b, pr_sql_i1 in enumerate(pr_sql_i): conds1 = pr_sql_i1["conds"] pr_wc1 = [] pr_wo1 = [] pr_wv1 = [] # Generate for i_wn, conds11 in enumerate(conds1): pr_wc1.append( conds11[0]) pr_wo1.append( conds11[1]) pr_wv1.append( conds11[2]) # sort based on pr_wc1 idx = argsort(pr_wc1) pr_wc1 = array(pr_wc1)[idx].tolist() pr_wo1 = array(pr_wo1)[idx].tolist() pr_wv1 = array(pr_wv1)[idx].tolist() conds1_sorted = [] for i, idx1 in enumerate(idx): conds1_sorted.append( conds1[idx1] ) pr_wc.append(pr_wc1) pr_wo.append(pr_wo1) pr_wv.append(pr_wv1) pr_sql_i1['conds'] = conds1_sorted return pr_wc, pr_wo, pr_wv, pr_sql_i def generate_sql_q(sql_i, tb): sql_q = [] for b, sql_i1 in enumerate(sql_i): tb1 = tb[b] sql_q1 = generate_sql_q1(sql_i1, tb1) sql_q.append(sql_q1) return sql_q def generate_sql_q1(sql_i1, tb1): """ sql = {'sel': 5, 'agg': 4, 'conds': [[3, 0, '59']]} agg_ops = ['', 'max', 'min', 'count', 'sum', 'avg'] cond_ops = ['=', '>', '<', 'OP'] Temporal as it can show only one-time conditioned case. sql_query: real sql_query sql_plus_query: More redable sql_query "PLUS" indicates, it deals with the some of db specific facts like PCODE <-> NAME """ agg_ops = ['', 'max', 'min', 'count', 'sum', 'avg'] cond_ops = ['=', '>', '<', 'OP'] headers = tb1["header"] # select_header = headers[sql['sel']].lower() # try: # select_table = tb1["name"] # except: # print(f"No table name while headers are {headers}") select_table = tb1["id"] select_agg = agg_ops[sql_i1['agg']] select_header = headers[sql_i1['sel']] sql_query_part1 = f'SELECT {select_agg}({select_header}) ' where_num = len(sql_i1['conds']) if where_num == 0: sql_query_part2 = f'FROM {select_table}' # sql_plus_query_part2 = f'FROM {select_table}' else: sql_query_part2 = f'FROM {select_table} WHERE' # sql_plus_query_part2 = f'FROM {select_table_refined} WHERE' # ---------------------------------------------------------------------------------------------------------- for i in range(where_num): # check 'OR' # number_of_sub_conds = len(sql['conds'][i]) where_header_idx, where_op_idx, where_str = sql_i1['conds'][i] where_header = headers[where_header_idx] where_op = cond_ops[where_op_idx] if i > 0: sql_query_part2 += ' AND' # sql_plus_query_part2 += ' AND' sql_query_part2 += f" {where_header} {where_op} {where_str}" sql_query = sql_query_part1 + sql_query_part2 # sql_plus_query = sql_plus_query_part1 + sql_plus_query_part2 return sql_query def get_pnt_idx1(col_pool_type, st_ed): st, ed = st_ed if col_pool_type == 'start_tok': pnt_idx1 = st elif col_pool_type == 'end_tok': pnt_idx1 = ed elif col_pool_type == 'avg': pnt_idx1 = arange(st, ed, 1) return pnt_idx1 def gen_g_pnt_idx(g_wvi, sql_i, i_hds, i_sql_vocab, col_pool_type): """ sql_vocab = ( 0.. "sql none", "sql max", "sql min", "sql count", "sql sum", "sql average", ..5 6.. "sql select", "sql where", "sql and", .. 8 9.. "sql equal", "sql greater than", "sql less than", .. 11 12.. "sql start", "sql end" .. 13 ) """ g_pnt_idxs = [] for b, sql_i1 in enumerate(sql_i): i_sql_vocab1 = i_sql_vocab[b] i_hds1 = i_hds[b] g_pnt_idxs1 = [] # start token pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[-2]) g_pnt_idxs1.append(pnt_idx1) # select token pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[6]) g_pnt_idxs1.append(pnt_idx1) # select agg idx_agg = sql_i1["agg"] pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[idx_agg]) g_pnt_idxs1.append(pnt_idx1) # select column idx_sc = sql_i1["sel"] pnt_idx1 = get_pnt_idx1(col_pool_type, i_hds1[idx_sc]) g_pnt_idxs1.append(pnt_idx1) conds = sql_i1["conds"] wn = len(conds) if wn <= 0: pass else: # select where pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[7]) g_pnt_idxs1.append(pnt_idx1) for i_wn, conds1 in enumerate(conds): # where column idx_wc = conds1[0] pnt_idx1 = get_pnt_idx1(col_pool_type, i_hds1[idx_wc]) g_pnt_idxs1.append(pnt_idx1) # where op idx_wo = conds1[1] pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[idx_wo + 9]) g_pnt_idxs1.append(pnt_idx1) # where val st, ed = g_wvi[b][i_wn] end_pos_of_sql_vocab = i_sql_vocab1[-1][-1] g_pnt_idxs1.append(st + 1 + end_pos_of_sql_vocab) # due to inital [CLS] token in BERT-input vector g_pnt_idxs1.append(ed + 1 + end_pos_of_sql_vocab) # due to inital [CLS] token in BERT-input vector # and token if i_wn < wn - 1: pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[8]) g_pnt_idxs1.append(pnt_idx1) # end token pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[-1]) g_pnt_idxs1.append(pnt_idx1) g_pnt_idxs.append(g_pnt_idxs1) return g_pnt_idxs def pred_pnt_idxs(score, pnt_start_tok, pnt_end_tok): pr_pnt_idxs = [] for b, score1 in enumerate(score): # score1 = [T, max_seq_length] pr_pnt_idxs1 = [pnt_start_tok] for t, score11 in enumerate(score1): pnt = score11.argmax().item() pr_pnt_idxs1.append(pnt) if pnt == pnt_end_tok: break pr_pnt_idxs.append(pr_pnt_idxs1) return pr_pnt_idxs def generate_sql_q_s2s(pnt_idxs, tokens, tb): sql_q = [] for b, pnt_idxs1 in enumerate(pnt_idxs): tb1 = tb[b] sql_q1 = generate_sql_q1_s2s(pnt_idxs1, tokens[b], tb1) sql_q.append(sql_q1) return sql_q def generate_sql_q1_s2s(pnt_idxs1, tokens1, tb1): """ agg_ops = ['', 'max', 'min', 'count', 'sum', 'avg'] cond_ops = ['=', '>', '<', 'OP'] Temporal as it can show only one-time conditioned case. sql_query: real sql_query sql_plus_query: More redable sql_query "PLUS" indicates, it deals with the some of db specific facts like PCODE <-> NAME """ sql_query = "" for t, pnt_idxs11 in enumerate(pnt_idxs1): tok = tokens1[pnt_idxs11] sql_query += tok if t < len(pnt_idxs1)-1: sql_query += " " return sql_query # Generate sql_i from pnt_idxs def find_where_pnt_belong(pnt, vg): idx_sub = -1 for i, st_ed in enumerate(vg): st, ed = st_ed if pnt < ed and pnt >= st: idx_sub = i return idx_sub def gen_pnt_i_from_pnt(pnt, i_sql_vocab1, i_nlu1, i_hds1): # Find where it belong vg_list = [i_sql_vocab1, [i_nlu1], i_hds1] # as i_nlu has only single st and ed i_vg = -1 i_vg_sub = -1 for i, vg in enumerate(vg_list): idx_sub = find_where_pnt_belong(pnt, vg) if idx_sub > -1: i_vg = i i_vg_sub = idx_sub break return i_vg, i_vg_sub def gen_i_vg_from_pnt_idxs(pnt_idxs, i_sql_vocab, i_nlu, i_hds): i_vg_list = [] i_vg_sub_list = [] for b, pnt_idxs1 in enumerate(pnt_idxs): # if properly generated, sql_q1_list = [] i_vg_list1 = [] # index of (sql_vocab, nlu, hds) i_vg_sub_list1 = [] # index inside of each vocab group for t, pnt in enumerate(pnt_idxs1): i_vg, i_vg_sub = gen_pnt_i_from_pnt(pnt, i_sql_vocab[b], i_nlu[b], i_hds[b]) i_vg_list1.append(i_vg) i_vg_sub_list1.append(i_vg_sub) # sql_q1 = sql_q1.join(' ') # sql_q.append(sql_q1) i_vg_list.append(i_vg_list1) i_vg_sub_list.append(i_vg_sub_list1) return i_vg_list, i_vg_sub_list def gen_sql_q_from_i_vg(tokens, nlu, nlu_t, hds, tt_to_t_idx, pnt_start_tok, pnt_end_tok, pnt_idxs, i_vg_list, i_vg_sub_list): """ ( "none", "max", "min", "count", "sum", "average", "select", "where", "and", "equal", "greater than", "less than", "start", "end" ), """ sql_q = [] sql_i = [] for b, nlu_t1 in enumerate(nlu_t): sql_q1_list = [] sql_i1 = {} tt_to_t_idx1 = tt_to_t_idx[b] nlu_st_observed = False agg_observed = False wc_obs = False wo_obs = False conds = [] for t, i_vg in enumerate(i_vg_list[b]): i_vg_sub = i_vg_sub_list[b][t] pnt = pnt_idxs[b][t] if i_vg == 0: # sql_vocab if pnt == pnt_start_tok or pnt == pnt_end_tok: pass else: tok = tokens[b][pnt] if tok in ["none", "max", "min", "count", "sum", "average"]: agg_observed = True if tok == "none": pass sql_i1["agg"] = ["none", "max", "min", "count", "sum", "average"].index(tok) else: if tok in ["greater", "less", "equal"]: if tok == 'greater': tok = '>' elif tok == 'less': tok = '<' elif tok == 'equal': tok = '=' # gen conds1 if wc_obs: conds1.append( ['=','>','<'].index(tok) ) wo_obs = True sql_q1_list.append(tok) elif i_vg == 1: # nlu case if not nlu_st_observed: idx_nlu_st = pnt nlu_st_observed = True else: # now to wrap up idx_nlu_ed = pnt st_wh_idx = tt_to_t_idx1[idx_nlu_st - pnt_end_tok - 2] ed_wh_idx = tt_to_t_idx1[idx_nlu_ed - pnt_end_tok - 2] pr_wv_str11 = nlu_t1[st_wh_idx:ed_wh_idx + 1] merged_wv11 = merge_wv_t1_eng(pr_wv_str11, nlu[b]) sql_q1_list.append(merged_wv11) nlu_st_observed = False if wc_obs and wo_obs: conds1.append(merged_wv11) conds.append(conds1) wc_obs = False wo_obs = False elif i_vg == 2: # headers tok = hds[b][i_vg_sub] if agg_observed: sql_q1_list.append(f"({tok})") sql_i1["sel"] = i_vg_sub agg_observed = False else: wc_obs = True conds1 = [i_vg_sub] sql_q1_list.append(tok) # insert table name between. sql_i1["conds"] = conds sql_i.append(sql_i1) sql_q1 = ' '.join(sql_q1_list) sql_q.append(sql_q1) return sql_q, sql_i def get_cnt_lx_list_s2s(g_pnt_idxs, pr_pnt_idxs): # all cnt are list here. cnt_list = [] for b, g_pnt_idxs1 in enumerate(g_pnt_idxs): pr_pnt_idxs1 = pr_pnt_idxs[b] if g_pnt_idxs1 == pr_pnt_idxs1: cnt_list.append(1) else: cnt_list.append(0) return cnt_list def get_wemb_h_FT_Scalar_1(i_hds, l_hs, hS, all_encoder_layer, col_pool_type='start_tok'): """ As if [ [table-1-col-1-tok1, t1-c1-t2, ...], [t1-c2-t1, t1-c2-t2, ...]. ... [t2-c1-t1, ...,] ] # i_hds = [ [ Batch 1 ] [ Batch 2 ] ] # [Batch 1] = [ (col1_st_idx, col1_ed_idx), (col2_st_idx, col2_ed_idx), ...] # i_hds = [[(11, 14), (15, 19), (20, 21), (22, 24), (25, 27), (28, 29)], # [(16, 19), (20, 24), (25, 26), (27, 29), (30, 32), (33, 34)]] pool_type = 'start_tok', 'end_tok', 'avg' """ bS = len(l_hs) l_hs_max = max(l_hs) wemb_h = torch.zeros([bS, l_hs_max, hS]).to(device) for b, i_hds1 in enumerate(i_hds): for i_hd, st_ed_pair in enumerate(i_hds1): st, ed = st_ed_pair if col_pool_type == 'start_tok': vec = all_encoder_layer[-1][b, st,:] elif col_pool_type == 'end_tok': vec = all_encoder_layer[-1][b, ed, :] elif col_pool_type == 'avg': vecs = all_encoder_layer[-1][b, st:ed,:] vec = vecs.mean(dim=1, keepdim=True) else: raise ValueError wemb_h[b, i_hd, :] = vec return wemb_h def cal_prob(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi): """ :param s_sc: [B, l_h] :param s_sa: [B, l_a] # 16 :param s_wn: [B, 5] :param s_wc: [B, l_h] :param s_wo: [B, 4, l_o] # :param s_wv: [B, 4, 22] :return: """ # First get selected index # # Predict prob p_sc = cal_prob_sc(s_sc, pr_sc) p_sa = cal_prob_sa(s_sa, pr_sa) p_wn = cal_prob_wn(s_wn, pr_wn) p_wc = cal_prob_wc(s_wc, pr_wc) p_wo = cal_prob_wo(s_wo, pr_wo) p_wvi = cal_prob_wvi_se(s_wv, pr_wvi) # calculate select-clause probability p_select = cal_prob_select(p_sc, p_sa) # calculate where-clause probability p_where = cal_prob_where(p_wn, p_wc, p_wo, p_wvi) # calculate total probability p_tot = cal_prob_tot(p_select, p_where) return p_tot, p_select, p_where, p_sc, p_sa, p_wn, p_wc, p_wo, p_wvi def cal_prob_tot(p_select, p_where): p_tot = [] for b, p_select1 in enumerate(p_select): p_where1 = p_where[b] p_tot.append( p_select1 * p_where1 ) return p_tot def cal_prob_select(p_sc, p_sa): p_select = [] for b, p_sc1 in enumerate(p_sc): p1 = 1.0 p1 *= p_sc1 p1 *= p_sa[b] p_select.append(p1) return p_select def cal_prob_where(p_wn, p_wc, p_wo, p_wvi): p_where = [] for b, p_wn1 in enumerate(p_wn): p1 = 1.0 p1 *= p_wn1 p_wc1 = p_wc[b] for i_wn, p_wc11 in enumerate(p_wc1): p_wo11 = p_wo[b][i_wn] p_wv11_st, p_wv11_ed = p_wvi[b][i_wn] p1 *= p_wc11 p1 *= p_wo11 p1 *= p_wv11_st p1 *= p_wv11_ed p_where.append(p1) return p_where def cal_prob_sc(s_sc, pr_sc): ps = F.softmax(s_sc, dim=1) p = [] for b, ps1 in enumerate(ps): pr_sc1 = pr_sc[b] p1 = ps1[pr_sc1] p.append(p1.item()) return p def cal_prob_sa(s_sa, pr_sa): ps = F.softmax(s_sa, dim=1) p = [] for b, ps1 in enumerate(ps): pr_sa1 = pr_sa[b] p1 = ps1[pr_sa1] p.append(p1.item()) return p def cal_prob_wn(s_wn, pr_wn): ps = F.softmax(s_wn, dim=1) p = [] for b, ps1 in enumerate(ps): pr_wn1 = pr_wn[b] p1 = ps1[pr_wn1] p.append(p1.item()) return p def cal_prob_wc(s_wc, pr_wc): ps = torch.sigmoid(s_wc) ps_out = [] for b, pr_wc1 in enumerate(pr_wc): ps1 = array(ps[b].cpu()) ps_out1 = ps1[pr_wc1] ps_out.append(list(ps_out1)) return ps_out def cal_prob_wo(s_wo, pr_wo): # assume there is always at least single condition. ps = F.softmax(s_wo, dim=2) ps_out = [] for b, pr_wo1 in enumerate(pr_wo): ps_out1 = [] for n, pr_wo11 in enumerate(pr_wo1): ps11 = ps[b][n] ps_out1.append( ps11[pr_wo11].item() ) ps_out.append(ps_out1) return ps_out def cal_prob_wvi_se(s_wv, pr_wvi): prob_wv = F.softmax(s_wv, dim=-2).detach().to('cpu').numpy() p_wv = [] for b, pr_wvi1 in enumerate(pr_wvi): p_wv1 = [] for i_wn, pr_wvi11 in enumerate(pr_wvi1): st, ed = pr_wvi11 p_st = prob_wv[b, i_wn, st, 0] p_ed = prob_wv[b, i_wn, ed, 1] p_wv1.append([p_st, p_ed]) p_wv.append(p_wv1) return p_wv def generate_inputs_s2s(tokenizer, nlu1_tt, hds1, sql_vocab1): """ [CLS] sql_vocab [SEP] question [SEP] headers To make sql_vocab in a fixed position. """ tokens = [] segment_ids = [] tokens.append("[CLS]") # sql_vocab i_sql_vocab = [] # for doc for i, sql_vocab11 in enumerate(sql_vocab1): i_st_sql = len(tokens) sub_tok = tokenizer.tokenize(sql_vocab11) tokens += sub_tok i_ed_sql = len(tokens) i_sql_vocab.append((i_st_sql, i_ed_sql)) segment_ids += [1] * len(sub_tok) if i < len(sql_vocab1) - 1: tokens.append("[SEP]") segment_ids.append(0) elif i == len(sql_vocab1) - 1: tokens.append("[SEP]") segment_ids.append(1) else: raise EnvironmentError # question i_st_nlu = len(tokens) # to use it later segment_ids.append(0) for token in nlu1_tt: tokens.append(token) segment_ids.append(0) i_ed_nlu = len(tokens) tokens.append("[SEP]") segment_ids.append(0) i_nlu = (i_st_nlu, i_ed_nlu) # headers i_hds = [] # for doc for i, hds11 in enumerate(hds1): i_st_hd = len(tokens) sub_tok = tokenizer.tokenize(hds11) tokens += sub_tok i_ed_hd = len(tokens) i_hds.append((i_st_hd, i_ed_hd)) segment_ids += [1] * len(sub_tok) if i < len(hds1)-1: tokens.append("[SEP]") segment_ids.append(0) elif i == len(hds1)-1: tokens.append("[SEP]") segment_ids.append(1) else: raise EnvironmentError return tokens, segment_ids, i_sql_vocab, i_nlu, i_hds def sort_pr_wc(pr_wc, g_wc): """ Input: list pr_wc = [B, n_conds] g_wc = [B, n_conds] Return: list pr_wc_sorted = [B, n_conds] """ pr_wc_sorted = [] for b, pr_wc1 in enumerate(pr_wc): g_wc1 = g_wc[b] pr_wc1_sorted = [] if set(g_wc1) == set(pr_wc1): pr_wc1_sorted = deepcopy(g_wc1) else: # no sorting when g_wc1 and pr_wc1 are different. pr_wc1_sorted = deepcopy(pr_wc1) pr_wc_sorted.append(pr_wc1_sorted) return pr_wc_sorted
29.697827
158
0.565999
4a077d7d6c688a6504f86da37ba4242c33b3b17b
3,065
py
Python
tools/perf/benchmarks/rasterize_and_record_micro.py
metux/chromium-deb
3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
tools/perf/benchmarks/rasterize_and_record_micro.py
metux/chromium-deb
3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
tools/perf/benchmarks/rasterize_and_record_micro.py
metux/chromium-deb
3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from core import perf_benchmark from measurements import rasterize_and_record_micro import page_sets from telemetry import benchmark class _RasterizeAndRecordMicro(perf_benchmark.PerfBenchmark): @classmethod def AddBenchmarkCommandLineArgs(cls, parser): parser.add_option('--start-wait-time', type='float', default=2, help='Wait time before the benchmark is started ' '(must be long enough to load all content)') parser.add_option('--rasterize-repeat', type='int', default=100, help='Repeat each raster this many times. Increase ' 'this value to reduce variance.') parser.add_option('--record-repeat', type='int', default=100, help='Repeat each record this many times. Increase ' 'this value to reduce variance.') parser.add_option('--timeout', type='int', default=120, help='The length of time to wait for the micro ' 'benchmark to finish, expressed in seconds.') parser.add_option('--report-detailed-results', action='store_true', help='Whether to report additional detailed results.') @classmethod def Name(cls): return 'rasterize_and_record_micro' def CreatePageTest(self, options): return rasterize_and_record_micro.RasterizeAndRecordMicro( options.start_wait_time, options.rasterize_repeat, options.record_repeat, options.timeout, options.report_detailed_results) # RasterizeAndRecord disabled on mac because of crbug.com/350684. # RasterizeAndRecord disabled on windows because of crbug.com/338057. @benchmark.Disabled('mac', 'win', 'android') # http://crbug.com/610018 class RasterizeAndRecordMicroTop25(_RasterizeAndRecordMicro): """Measures rasterize and record performance on the top 25 web pages. http://www.chromium.org/developers/design-documents/rendering-benchmarks""" page_set = page_sets.Top25PageSet @classmethod def Name(cls): return 'rasterize_and_record_micro.top_25' def GetExpectations(self): return page_sets.Top25StoryExpectations() # New benchmark only enabled on Linux until we've observed behavior for a # reasonable period of time. @benchmark.Disabled('mac', 'win', 'android') class RasterizeAndRecordMicroPartialInvalidation(_RasterizeAndRecordMicro): """Measures rasterize and record performance for partial inval. on big pages. http://www.chromium.org/developers/design-documents/rendering-benchmarks""" page_set = page_sets.PartialInvalidationCasesPageSet @classmethod def Name(cls): return 'rasterize_and_record_micro.partial_invalidation' def GetExpectations(self): return page_sets.PartialInvalidationCasesStoryExpectations()
38.797468
80
0.695269
4a077d7d6d23ce7bc45caf87d0dbd3d8ef5551e9
135
py
Python
falcon/base/file.py
lorne-luo/falcon
a56ee8e121d70086721292ae33f1070a1f7e1f7b
[ "BSD-3-Clause" ]
null
null
null
falcon/base/file.py
lorne-luo/falcon
a56ee8e121d70086721292ae33f1070a1f7e1f7b
[ "BSD-3-Clause" ]
7
2020-02-11T23:56:08.000Z
2022-02-10T07:35:07.000Z
falcon/base/file.py
lorne-luo/falcon
a56ee8e121d70086721292ae33f1070a1f7e1f7b
[ "BSD-3-Clause" ]
1
2021-05-11T09:57:38.000Z
2021-05-11T09:57:38.000Z
import os def create_folder(path): folder = os.path.dirname(path) if not os.path.exists(folder): os.makedirs(folder)
16.875
34
0.666667
4a077e46b35c57120a8245209f74be572227f840
2,634
py
Python
test_proj/apps/app1/tests/test_overloads.py
andrewbird2/django-data-validation
8c3a8e14440f0299a5f4b188dfaa7864b53b5123
[ "MIT" ]
1
2020-08-05T16:42:48.000Z
2020-08-05T16:42:48.000Z
test_proj/apps/app1/tests/test_overloads.py
andrewbird2/django-data-validation
8c3a8e14440f0299a5f4b188dfaa7864b53b5123
[ "MIT" ]
1
2020-11-04T07:06:37.000Z
2020-11-04T07:06:37.000Z
test_proj/apps/app1/tests/test_overloads.py
andrewbird2/django-data-validation
8c3a8e14440f0299a5f4b188dfaa7864b53b5123
[ "MIT" ]
1
2020-11-04T02:16:05.000Z
2020-11-04T02:16:05.000Z
import pytest from datavalidation import data_validator from datavalidation.runners import ModelValidationRunner, ObjectValidationRunner from app1.models import Overloaded def test_bad_instancemethod_overloading(): """ test that overloading an instance method with an instance method fails """ try: class _Test: @data_validator def foo(self): pass @foo.overload def foo(self): pass assert False, "expected exception" except RuntimeError: pass def test_bad_classmethod_overloading(): """ test that overloading a class method with a class method fails """ try: class _Test: @data_validator @classmethod def foo(cls): pass @foo.overload @classmethod def foo(cls): pass assert False, "expected exception" except RuntimeError: pass def test_bad_naming(): """ test that overloading a method with a different name fails """ try: class _Test: @data_validator def foo(self): pass @foo.overload @classmethod def bar(cls): pass assert False, "expected exception" except ValueError: pass @pytest.mark.django_db def test_model_runner(caplog): """ test that the model validation runner uses the class methods from overloaded validators """ summaries = ModelValidationRunner(Overloaded).run() assert len(summaries) == 3 # 2 overloaded + 1 from BaseModel messages = [ message for name, level, message in caplog.record_tuples if name == "app1.models.overloads" ] assert len(messages) == 2 # the two overloaded function # the model validation runner should use the class methods where available assert all("class method" in msg for msg in messages) @pytest.mark.django_db def test_object_runner(caplog): """ test that the object validation runner uses the instance methods from overloaded validators """ obj = Overloaded.objects.first() result = ObjectValidationRunner(obj).run(class_methods=True) assert result == (3, 0, 0) messages = [ message for name, level, message in caplog.record_tuples if name == "app1.models.overloads" ] assert len(messages) == 2 # the two overloaded function # the object validation runner should use the instance methods where available assert all("instance method" in msg for msg in messages)
28.322581
82
0.630979
4a077eadf68999876ef3b23fe81822ee0cb0862e
7,533
bzl
Python
intellij_platform_sdk/build_defs.bzl
d-haxton/intellij
3acafac0566ed0b314afb4d0873b289790e9a37d
[ "Apache-2.0" ]
null
null
null
intellij_platform_sdk/build_defs.bzl
d-haxton/intellij
3acafac0566ed0b314afb4d0873b289790e9a37d
[ "Apache-2.0" ]
null
null
null
intellij_platform_sdk/build_defs.bzl
d-haxton/intellij
3acafac0566ed0b314afb4d0873b289790e9a37d
[ "Apache-2.0" ]
null
null
null
"""Convenience methods for plugin_api.""" # The current indirect ij_product mapping (eg. "intellij-latest") INDIRECT_IJ_PRODUCTS = { "intellij-latest": "intellij-2019.2", "intellij-latest-mac": "intellij-2019.2-mac", "intellij-beta": "intellij-2019.3", "intellij-canary": "intellij-2020.1", "intellij-ue-latest": "intellij-ue-2019.2", "intellij-ue-latest-mac": "intellij-ue-2019.2-mac", "intellij-ue-beta": "intellij-ue-2019.3", "intellij-ue-canary": "intellij-ue-2020.1", "android-studio-latest": "android-studio-3.6", "android-studio-beta": "android-studio-3.6", "android-studio-beta-mac": "android-studio-3.6-mac", "android-studio-canary": "android-studio-4.0", "clion-latest": "clion-2019.2", "clion-beta": "clion-2019.3", } DIRECT_IJ_PRODUCTS = { "intellij-2019.2": struct( ide = "intellij", directory = "intellij_ce_2019_2", ), "intellij-2019.2-mac": struct( ide = "intellij", directory = "intellij_ce_2019_2", ), "intellij-ue-2019.2": struct( ide = "intellij-ue", directory = "intellij_ue_2019_2", ), "intellij-ue-2019.2-mac": struct( ide = "intellij-ue", directory = "intellij_ue_2019_2", ), "intellij-2019.3": struct( ide = "intellij", directory = "intellij_ce_2019_3", ), "intellij-2019.3-mac": struct( ide = "intellij", directory = "intellij_ce_2019_3", ), "intellij-ue-2019.3": struct( ide = "intellij-ue", directory = "intellij_ue_2019_3", ), "intellij-ue-2019.3-mac": struct( ide = "intellij-ue", directory = "intellij_ue_2019_3", ), "intellij-2020.1": struct( ide = "intellij", directory = "intellij_ce_2020_1", ), "intellij-2020.1-mac": struct( ide = "intellij", directory = "intellij_ce_2020_1", ), "intellij-ue-2020.1": struct( ide = "intellij-ue", directory = "intellij_ue_2020_1", ), "intellij-ue-2020.1-mac": struct( ide = "intellij-ue", directory = "intellij_ue_2020_1", ), "android-studio-3.6": struct( ide = "android-studio", directory = "android_studio_3_6", ), "android-studio-3.6-mac": struct( ide = "android-studio", directory = "android_studio_3_6", ), "android-studio-4.0": struct( ide = "android-studio", directory = "android_studio_4_0", ), "clion-2019.2": struct( ide = "clion", directory = "clion_2019_2", ), "clion-2019.3": struct( ide = "clion", directory = "clion_2019_3", ), } def select_for_plugin_api(params): """Selects for a plugin_api. Args: params: A dict with ij_product -> value. You may only include direct ij_products here, not indirects (eg. intellij-latest). Returns: A select statement on all plugin_apis. Unless you include a "default", a non-matched plugin_api will result in an error. Example: java_library( name = "foo", srcs = select_for_plugin_api({ "intellij-2016.3.1": [...my intellij 2016.3 sources ....], "intellij-2012.2.4": [...my intellij 2016.2 sources ...], }), ) """ for indirect_ij_product in INDIRECT_IJ_PRODUCTS: if indirect_ij_product in params: error_message = "".join([ "Do not select on indirect ij_product %s. " % indirect_ij_product, "Instead, select on an exact ij_product.", ]) fail(error_message) return _do_select_for_plugin_api(params) def _do_select_for_plugin_api(params): """A version of select_for_plugin_api which accepts indirect products.""" if not params: fail("Empty select_for_plugin_api") expanded_params = dict(**params) # Expand all indirect plugin_apis to point to their # corresponding direct plugin_api. # # {"intellij-2016.3.1": "foo"} -> # {"intellij-2016.3.1": "foo", "intellij-latest": "foo"} fallback_value = None for indirect_ij_product, resolved_plugin_api in INDIRECT_IJ_PRODUCTS.items(): if resolved_plugin_api in params: expanded_params[indirect_ij_product] = params[resolved_plugin_api] if not fallback_value: fallback_value = params[resolved_plugin_api] if indirect_ij_product in params: expanded_params[resolved_plugin_api] = params[indirect_ij_product] # Map the shorthand ij_products to full config_setting targets. # This makes it more convenient so the user doesn't have to # fully specify the path to the plugin_apis select_params = dict() for ij_product, value in expanded_params.items(): if ij_product == "default": select_params["//conditions:default"] = value else: select_params["//intellij_platform_sdk:" + ij_product] = value return select( select_params, no_match_error = "define an intellij product version, e.g. --define=ij_product=intellij-latest", ) def select_for_ide(intellij = None, intellij_ue = None, android_studio = None, clion = None, default = []): """Selects for the supported IDEs. Args: intellij: Files to use for IntelliJ. If None, will use default. intellij_ue: Files to use for IntelliJ UE. If None, will use value chosen for 'intellij'. android_studio: Files to use for Android Studio. If None will use default. clion: Files to use for CLion. If None will use default. default: Files to use for any IDEs not passed. Returns: A select statement on all plugin_apis to lists of files, sorted into IDEs. Example: java_library( name = "foo", srcs = select_for_ide( clion = [":cpp_only_sources"], default = [":java_only_sources"], ), ) """ intellij = intellij if intellij != None else default intellij_ue = intellij_ue if intellij_ue != None else intellij android_studio = android_studio if android_studio != None else default clion = clion if clion != None else default ide_to_value = { "intellij": intellij, "intellij-ue": intellij_ue, "android-studio": android_studio, "clion": clion, } # Map (direct ij_product) -> corresponding ide value params = dict() for ij_product, value in DIRECT_IJ_PRODUCTS.items(): params[ij_product] = ide_to_value[value.ide] params["default"] = default return select_for_plugin_api(params) def _plugin_api_directory(value): return "@" + value.directory + "//" def select_from_plugin_api_directory(intellij, android_studio, clion, intellij_ue = None): """Internal convenience method to generate select statement from the IDE's plugin_api directories.""" ide_to_value = { "intellij": intellij, "intellij-ue": intellij_ue if intellij_ue else intellij, "android-studio": android_studio, "clion": clion, } # Map (direct ij_product) -> corresponding product directory params = dict() for ij_product, value in DIRECT_IJ_PRODUCTS.items(): params[ij_product] = [_plugin_api_directory(value) + item for item in ide_to_value[value.ide]] # No ij_product == intellij-latest params["default"] = params[INDIRECT_IJ_PRODUCTS["intellij-latest"]] return select_for_plugin_api(params)
34.240909
107
0.628169
4a077f3fbedc8a5776aa5a74591d5e2d5034d470
7,469
py
Python
deploy.py
varun-raghavendra/fl_faas_fabric
36310d24805c5bd7258f2e432997ac9b91aee61a
[ "MIT" ]
6
2021-05-19T20:36:55.000Z
2022-03-20T05:56:21.000Z
deploy.py
varun-raghavendra/fl_faas_fabric
36310d24805c5bd7258f2e432997ac9b91aee61a
[ "MIT" ]
null
null
null
deploy.py
varun-raghavendra/fl_faas_fabric
36310d24805c5bd7258f2e432997ac9b91aee61a
[ "MIT" ]
2
2022-03-16T08:59:15.000Z
2022-03-20T11:47:55.000Z
#!/usr/bin/env python import yaml import sys, getopt from typing import List import traceback import asyncio import json import logging from Clusters import BaseDeployment from Clusters import OpenWhiskDeployment from Clusters import GoogleDeployment functions_meta = [] from commons.Logger import ScriptLogger logging.basicConfig(level=logging.DEBUG) logger = ScriptLogger(__name__, 'SWI.log') logger.setLevel(logging.DEBUG) logging.captureWarnings(True) async def deploy_to_clusters(configfile: str, provider: str, scenario_name: str, functions_list: list, cluster_obj: BaseDeployment = None, providers_list: list = None, all_clusters: bool = False): with open(configfile, 'r') as stream: try: data = yaml.safe_load(stream) if all_clusters: for cluster in data['providers'][provider]: curr_cluster = data['providers'][provider][cluster] scenario = data['scenarios'][scenario_name] for function in functions_list: function_object = scenario['functions'][function][provider] await cluster_obj.deploy(curr_cluster, function, function_object) else: for cluster_name in providers_list: for cluster in data['providers'][provider]: curr_cluster = data['providers'][provider][cluster] scenario = data['scenarios'][scenario_name] if cluster_name == cluster: for function in functions_list: function_object = scenario['functions'][function][provider] await cluster_obj.deploy(curr_cluster, function, function_object) break except yaml.YAMLError as exc: print(exc) async def remove_from_clusters(configfile: str, provider: str, scenario_name: str, functions_list: list, cluster_obj: BaseDeployment = None, providers_list: list = None, all_clusters: bool = False): with open(configfile, 'r') as stream: try: data = yaml.safe_load(stream) if all_clusters: for cluster in data['providers'][provider]: curr_cluster = data['providers'][provider][cluster] scenario = data['scenarios'][scenario_name] for function in functions_list: function_object = scenario['functions'][function][provider] await cluster_obj.delete(curr_cluster, function, function_object) else: for cluster_name in providers_list: for cluster in data['providers'][provider]: curr_cluster = data['providers'][provider][cluster] scenario = data['scenarios'][scenario_name] if cluster_name == cluster: for function in functions_list: function_object = scenario['functions'][function][provider] await cluster_obj.delete(curr_cluster, function, function_object) break except yaml.YAMLError as exc: print(exc) async def main(argv): openwhisk_obj = OpenWhiskDeployment() google_obj = GoogleDeployment() configfile = '' all_providers = False ow_providers_list = [] gcf_providers_list = [] functions_list = [] scenario_name = "" deployment = False remove = False meta = False try: arguments, values = getopt.getopt(argv, "hc:ao:g:s:f:drm", ["help", "configfile=", "all_providers", "ow_providers_list=", "gcf_providers_list=", "scenario_name=", "functions_list=", "deploy", "remove", "get_meta_data"]) except getopt.GetoptError: print('main.py -c <configfile path> -a <for all providers> ' '-o <OW provider_list separated by comma> -g <GCF provider_list separated by comma> ' '-s <scenario_name> -f <functions list separated by comma> ' '-m <for saving functions meta data in a file>' '-d <for deploying> -r <for removing>') sys.exit(2) for current_argument, current_value in arguments: if current_argument in ("-h", "--help"): print('python3 deploy.py \n -c <configfile path> \n -a <for all providers> ' '\n -o <OW provider_list separated by comma> \n -g <GCF provider_list separated by comma>' '\n -s <scenario_name> ' '\n -f <functions separated by comma> \n -m <for saving functions meta data in a file> ' '\n -d <for deploying> \n -r <for removing>') elif current_argument in ("-c", "--configfile"): configfile = current_value elif current_argument in ("-a", "--all_providers"): all_providers = True elif current_argument in ("-d", "--deploy"): deployment = True elif current_argument in ("-r", "--remove"): remove = True elif current_argument in ("-o", "--ow_providers_list"): all_arguments = current_value.split(',') ow_providers_list = all_arguments elif current_argument in ("-g", "--gcf_providers_list"): all_arguments = current_value.split(',') gcf_providers_list = all_arguments elif current_argument in ("-s", "--scenario_name"): scenario_name = current_value elif current_argument in ("-f", "--functions_list"): all_arguments = current_value.split(',') functions_list = all_arguments tasks: List[asyncio.Task] = [] if deployment: tasks.append( asyncio.create_task( deploy_to_clusters(configfile, 'openwhisk', scenario_name, functions_list, openwhisk_obj, ow_providers_list, all_providers) ) ) tasks.append( asyncio.create_task( deploy_to_clusters(configfile, 'google', scenario_name, functions_list, google_obj, gcf_providers_list, all_providers) ) ) elif remove: tasks.append( asyncio.create_task( remove_from_clusters(configfile, 'openwhisk', scenario_name, functions_list, openwhisk_obj, ow_providers_list, all_providers) ) ) tasks.append( asyncio.create_task( remove_from_clusters(configfile, 'google', scenario_name, functions_list, google_obj, gcf_providers_list, all_providers) ) ) # wait for all workers if len(tasks): try: await asyncio.wait(tasks) except Exception as e: print("Exception in main worker loop") print(e) traceback.print_exc() print("All deployment/removal finished") if __name__ == "__main__": asyncio.run(main(sys.argv[1:]))
41.960674
109
0.560985
4a077fe5a0441d17f0620091c48549194d1a0098
1,544
py
Python
openfe/tests/setup/test_lomap_atommapper.py
mikemhenry/openfe
d4c78af62a7ae05b99eb95d173661ac134b7e7b9
[ "MIT" ]
null
null
null
openfe/tests/setup/test_lomap_atommapper.py
mikemhenry/openfe
d4c78af62a7ae05b99eb95d173661ac134b7e7b9
[ "MIT" ]
null
null
null
openfe/tests/setup/test_lomap_atommapper.py
mikemhenry/openfe
d4c78af62a7ae05b99eb95d173661ac134b7e7b9
[ "MIT" ]
null
null
null
# This code is part of OpenFE and is licensed under the MIT license. # For details, see https://github.com/OpenFreeEnergy/openfe import pytest from rdkit import Chem import openfe from openfe.setup import LomapAtomMapper, LigandMolecule def test_simple(lomap_basic_test_files): # basic sanity check on the LigandAtomMapper mol1 = lomap_basic_test_files['methylcyclohexane'] mol2 = lomap_basic_test_files['toluene'] mapper = LomapAtomMapper() mapping_gen = mapper.suggest_mappings(mol1, mol2) mapping = next(mapping_gen) assert isinstance(mapping, openfe.setup.LigandAtomMapping) # methylcyclohexane to toluene is a 1:1 mapping between all atoms # so 7 values should be present assert len(mapping.mol1_to_mol2) == 7 def test_generator_length(lomap_basic_test_files): # check that we get one mapping back from Lomap LigandAtomMapper then the # generator stops correctly mol1 = lomap_basic_test_files['methylcyclohexane'] mol2 = lomap_basic_test_files['toluene'] mapper = LomapAtomMapper() mapping_gen = mapper.suggest_mappings(mol1, mol2) _ = next(mapping_gen) with pytest.raises(StopIteration): next(mapping_gen) def test_bad_mapping(lomap_basic_test_files): toluene = lomap_basic_test_files['toluene'] NigelTheNitrogen = LigandMolecule(Chem.MolFromSmiles('N'), name='Nigel') mapper = LomapAtomMapper() mapping_gen = mapper.suggest_mappings(toluene, NigelTheNitrogen) with pytest.raises(StopIteration): next(mapping_gen)
30.27451
77
0.754534
4a07815e84acb8e0e44ebecaecc3982f1555eca9
168,831
py
Python
numpy/lib/function_base.py
chatcannon/numpy
f1b3f00f7abdd97d59dc5b1c0bb922a692452736
[ "BSD-3-Clause" ]
1
2022-02-16T05:32:38.000Z
2022-02-16T05:32:38.000Z
numpy/lib/function_base.py
chatcannon/numpy
f1b3f00f7abdd97d59dc5b1c0bb922a692452736
[ "BSD-3-Clause" ]
null
null
null
numpy/lib/function_base.py
chatcannon/numpy
f1b3f00f7abdd97d59dc5b1c0bb922a692452736
[ "BSD-3-Clause" ]
1
2018-11-15T19:41:09.000Z
2018-11-15T19:41:09.000Z
from __future__ import division, absolute_import, print_function import collections import operator import re import sys import warnings import numpy as np import numpy.core.numeric as _nx from numpy.core import linspace, atleast_1d, atleast_2d, transpose from numpy.core.numeric import ( ones, zeros, arange, concatenate, array, asarray, asanyarray, empty, empty_like, ndarray, around, floor, ceil, take, dot, where, intp, integer, isscalar, absolute, AxisError ) from numpy.core.umath import ( pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, mod, exp, log10 ) from numpy.core.fromnumeric import ( ravel, nonzero, sort, partition, mean, any, sum ) from numpy.core.numerictypes import typecodes, number from numpy.lib.twodim_base import diag from .utils import deprecate from numpy.core.multiarray import ( _insert, add_docstring, digitize, bincount, normalize_axis_index, interp as compiled_interp, interp_complex as compiled_interp_complex ) from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc from numpy.compat import long from numpy.compat.py3k import basestring if sys.version_info[0] < 3: # Force range to be a generator, for np.delete's usage. range = xrange import __builtin__ as builtins else: import builtins __all__ = [ 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip', 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', 'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef', 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc' ] def rot90(m, k=1, axes=(0,1)): """ Rotate an array by 90 degrees in the plane specified by axes. Rotation direction is from the first towards the second axis. .. versionadded:: 1.12.0 Parameters ---------- m : array_like Array of two or more dimensions. k : integer Number of times the array is rotated by 90 degrees. axes: (2,) array_like The array is rotated in the plane defined by the axes. Axes must be different. Returns ------- y : ndarray A rotated view of `m`. See Also -------- flip : Reverse the order of elements in an array along the given axis. fliplr : Flip an array horizontally. flipud : Flip an array vertically. Notes ----- rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1)) rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1)) Examples -------- >>> m = np.array([[1,2],[3,4]], int) >>> m array([[1, 2], [3, 4]]) >>> np.rot90(m) array([[2, 4], [1, 3]]) >>> np.rot90(m, 2) array([[4, 3], [2, 1]]) >>> m = np.arange(8).reshape((2,2,2)) >>> np.rot90(m, 1, (1,2)) array([[[1, 3], [0, 2]], [[5, 7], [4, 6]]]) """ axes = tuple(axes) if len(axes) != 2: raise ValueError("len(axes) must be 2.") m = asanyarray(m) if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim: raise ValueError("Axes must be different.") if (axes[0] >= m.ndim or axes[0] < -m.ndim or axes[1] >= m.ndim or axes[1] < -m.ndim): raise ValueError("Axes={} out of range for array of ndim={}." .format(axes, m.ndim)) k %= 4 if k == 0: return m[:] if k == 2: return flip(flip(m, axes[0]), axes[1]) axes_list = arange(0, m.ndim) (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], axes_list[axes[0]]) if k == 1: return transpose(flip(m,axes[1]), axes_list) else: # k == 3 return flip(transpose(m, axes_list), axes[1]) def flip(m, axis): """ Reverse the order of elements in an array along the given axis. The shape of the array is preserved, but the elements are reordered. .. versionadded:: 1.12.0 Parameters ---------- m : array_like Input array. axis : integer Axis in array, which entries are reversed. Returns ------- out : array_like A view of `m` with the entries of axis reversed. Since a view is returned, this operation is done in constant time. See Also -------- flipud : Flip an array vertically (axis=0). fliplr : Flip an array horizontally (axis=1). Notes ----- flip(m, 0) is equivalent to flipud(m). flip(m, 1) is equivalent to fliplr(m). flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. Examples -------- >>> A = np.arange(8).reshape((2,2,2)) >>> A array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> flip(A, 0) array([[[4, 5], [6, 7]], [[0, 1], [2, 3]]]) >>> flip(A, 1) array([[[2, 3], [0, 1]], [[6, 7], [4, 5]]]) >>> A = np.random.randn(3,4,5) >>> np.all(flip(A,2) == A[:,:,::-1,...]) True """ if not hasattr(m, 'ndim'): m = asarray(m) indexer = [slice(None)] * m.ndim try: indexer[axis] = slice(None, None, -1) except IndexError: raise ValueError("axis=%i is invalid for the %i-dimensional input array" % (axis, m.ndim)) return m[tuple(indexer)] def iterable(y): """ Check whether or not an object can be iterated over. Parameters ---------- y : object Input object. Returns ------- b : bool Return ``True`` if the object has an iterator method or is a sequence and ``False`` otherwise. Examples -------- >>> np.iterable([1, 2, 3]) True >>> np.iterable(2) False """ try: iter(y) except TypeError: return False return True def _hist_bin_sqrt(x): """ Square root histogram bin estimator. Bin width is inversely proportional to the data size. Used by many programs for its simplicity. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return x.ptp() / np.sqrt(x.size) def _hist_bin_sturges(x): """ Sturges histogram bin estimator. A very simplistic estimator based on the assumption of normality of the data. This estimator has poor performance for non-normal data, which becomes especially obvious for large data sets. The estimate depends only on size of the data. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return x.ptp() / (np.log2(x.size) + 1.0) def _hist_bin_rice(x): """ Rice histogram bin estimator. Another simple estimator with no normality assumption. It has better performance for large data than Sturges, but tends to overestimate the number of bins. The number of bins is proportional to the cube root of data size (asymptotically optimal). The estimate depends only on size of the data. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return x.ptp() / (2.0 * x.size ** (1.0 / 3)) def _hist_bin_scott(x): """ Scott histogram bin estimator. The binwidth is proportional to the standard deviation of the data and inversely proportional to the cube root of data size (asymptotically optimal). Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x) def _hist_bin_doane(x): """ Doane's histogram bin estimator. Improved version of Sturges' formula which works better for non-normal data. See stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ if x.size > 2: sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3))) sigma = np.std(x) if sigma > 0.0: # These three operations add up to # g1 = np.mean(((x - np.mean(x)) / sigma)**3) # but use only one temp array instead of three temp = x - np.mean(x) np.true_divide(temp, sigma, temp) np.power(temp, 3, temp) g1 = np.mean(temp) return x.ptp() / (1.0 + np.log2(x.size) + np.log2(1.0 + np.absolute(g1) / sg1)) return 0.0 def _hist_bin_fd(x): """ The Freedman-Diaconis histogram bin estimator. The Freedman-Diaconis rule uses interquartile range (IQR) to estimate binwidth. It is considered a variation of the Scott rule with more robustness as the IQR is less affected by outliers than the standard deviation. However, the IQR depends on fewer points than the standard deviation, so it is less accurate, especially for long tailed distributions. If the IQR is 0, this function returns 1 for the number of bins. Binwidth is inversely proportional to the cube root of data size (asymptotically optimal). Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ iqr = np.subtract(*np.percentile(x, [75, 25])) return 2.0 * iqr * x.size ** (-1.0 / 3.0) def _hist_bin_auto(x): """ Histogram bin estimator that uses the minimum width of the Freedman-Diaconis and Sturges estimators. The FD estimator is usually the most robust method, but its width estimate tends to be too large for small `x`. The Sturges estimator is quite good for small (<1000) datasets and is the default in the R language. This method gives good off the shelf behaviour. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. See Also -------- _hist_bin_fd, _hist_bin_sturges """ # There is no need to check for zero here. If ptp is, so is IQR and # vice versa. Either both are zero or neither one is. return min(_hist_bin_fd(x), _hist_bin_sturges(x)) # Private dict initialized at module load time _hist_bin_selectors = {'auto': _hist_bin_auto, 'doane': _hist_bin_doane, 'fd': _hist_bin_fd, 'rice': _hist_bin_rice, 'scott': _hist_bin_scott, 'sqrt': _hist_bin_sqrt, 'sturges': _hist_bin_sturges} def histogram(a, bins=10, range=None, normed=False, weights=None, density=None): r""" Compute the histogram of a set of data. Parameters ---------- a : array_like Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars or str, optional If `bins` is an int, it defines the number of equal-width bins in the given range (10, by default). If `bins` is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths. .. versionadded:: 1.11.0 If `bins` is a string from the list below, `histogram` will use the method chosen to calculate the optimal bin width and consequently the number of bins (see `Notes` for more detail on the estimators) from the data that falls within the requested range. While the bin width will be optimal for the actual data in the range, the number of bins will be computed to fill the entire range, including the empty portions. For visualisation, using the 'auto' option is suggested. Weighted data is not supported for automated bin size selection. 'auto' Maximum of the 'sturges' and 'fd' estimators. Provides good all around performance. 'fd' (Freedman Diaconis Estimator) Robust (resilient to outliers) estimator that takes into account data variability and data size. 'doane' An improved version of Sturges' estimator that works better with non-normal datasets. 'scott' Less robust estimator that that takes into account data variability and data size. 'rice' Estimator does not take variability into account, only data size. Commonly overestimates number of bins required. 'sturges' R's default method, only accounts for data size. Only optimal for gaussian data and underestimates number of bins for large non-gaussian datasets. 'sqrt' Square root (of data size) estimator, used by Excel and other programs for its speed and simplicity. range : (float, float), optional The lower and upper range of the bins. If not provided, range is simply ``(a.min(), a.max())``. Values outside the range are ignored. The first element of the range must be less than or equal to the second. `range` affects the automatic bin computation as well. While bin width is computed to be optimal based on the actual data within `range`, the bin count will fill the entire range including portions containing no data. normed : bool, optional This keyword is deprecated in NumPy 1.6.0 due to confusing/buggy behavior. It will be removed in NumPy 2.0.0. Use the ``density`` keyword instead. If ``False``, the result will contain the number of samples in each bin. If ``True``, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that this latter behavior is known to be buggy with unequal bin widths; use ``density`` instead. weights : array_like, optional An array of weights, of the same shape as `a`. Each value in `a` only contributes its associated weight towards the bin count (instead of 1). If `density` is True, the weights are normalized, so that the integral of the density over the range remains 1. density : bool, optional If ``False``, the result will contain the number of samples in each bin. If ``True``, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that the sum of the histogram values will not be equal to 1 unless bins of unity width are chosen; it is not a probability *mass* function. Overrides the ``normed`` keyword if given. Returns ------- hist : array The values of the histogram. See `density` and `weights` for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. See Also -------- histogramdd, bincount, searchsorted, digitize Notes ----- All but the last (righthand-most) bin is half-open. In other words, if `bins` is:: [1, 2, 3, 4] then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes* 4. .. versionadded:: 1.11.0 The methods to estimate the optimal number of bins are well founded in literature, and are inspired by the choices R provides for histogram visualisation. Note that having the number of bins proportional to :math:`n^{1/3}` is asymptotically optimal, which is why it appears in most estimators. These are simply plug-in methods that give good starting points for number of bins. In the equations below, :math:`h` is the binwidth and :math:`n_h` is the number of bins. All estimators that compute bin counts are recast to bin width using the `ptp` of the data. The final bin count is obtained from ``np.round(np.ceil(range / h))`. 'Auto' (maximum of the 'Sturges' and 'FD' estimators) A compromise to get a good value. For small datasets the Sturges value will usually be chosen, while larger datasets will usually default to FD. Avoids the overly conservative behaviour of FD and Sturges for small and large datasets respectively. Switchover point is usually :math:`a.size \approx 1000`. 'FD' (Freedman Diaconis Estimator) .. math:: h = 2 \frac{IQR}{n^{1/3}} The binwidth is proportional to the interquartile range (IQR) and inversely proportional to cube root of a.size. Can be too conservative for small datasets, but is quite good for large datasets. The IQR is very robust to outliers. 'Scott' .. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}} The binwidth is proportional to the standard deviation of the data and inversely proportional to cube root of ``x.size``. Can be too conservative for small datasets, but is quite good for large datasets. The standard deviation is not very robust to outliers. Values are very similar to the Freedman-Diaconis estimator in the absence of outliers. 'Rice' .. math:: n_h = 2n^{1/3} The number of bins is only proportional to cube root of ``a.size``. It tends to overestimate the number of bins and it does not take into account data variability. 'Sturges' .. math:: n_h = \log _{2}n+1 The number of bins is the base 2 log of ``a.size``. This estimator assumes normality of data and is too conservative for larger, non-normal datasets. This is the default method in R's ``hist`` method. 'Doane' .. math:: n_h = 1 + \log_{2}(n) + \log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}}) g_1 = mean[(\frac{x - \mu}{\sigma})^3] \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}} An improved version of Sturges' formula that produces better estimates for non-normal datasets. This estimator attempts to account for the skew of the data. 'Sqrt' .. math:: n_h = \sqrt n The simplest and fastest estimator. Only takes into account the data size. Examples -------- >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) (array([0, 2, 1]), array([0, 1, 2, 3])) >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) (array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) (array([1, 4, 1]), array([0, 1, 2, 3])) >>> a = np.arange(5) >>> hist, bin_edges = np.histogram(a, density=True) >>> hist array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) >>> hist.sum() 2.4999999999999996 >>> np.sum(hist*np.diff(bin_edges)) 1.0 .. versionadded:: 1.11.0 Automated Bin Selection Methods example, using 2 peak random data with 2000 points: >>> import matplotlib.pyplot as plt >>> rng = np.random.RandomState(10) # deterministic random data >>> a = np.hstack((rng.normal(size=1000), ... rng.normal(loc=5, scale=2, size=1000))) >>> plt.hist(a, bins='auto') # arguments are passed to np.histogram >>> plt.title("Histogram with 'auto' bins") >>> plt.show() """ a = asarray(a) if weights is not None: weights = asarray(weights) if np.any(weights.shape != a.shape): raise ValueError( 'weights should have the same shape as a.') weights = weights.ravel() a = a.ravel() # Do not modify the original value of range so we can check for `None` if range is None: if a.size == 0: # handle empty arrays. Can't determine range, so use 0-1. mn, mx = 0.0, 1.0 else: mn, mx = a.min() + 0.0, a.max() + 0.0 else: mn, mx = [mi + 0.0 for mi in range] if mn > mx: raise ValueError( 'max must be larger than min in range parameter.') if not np.all(np.isfinite([mn, mx])): raise ValueError( 'range parameter must be finite.') if mn == mx: mn -= 0.5 mx += 0.5 if isinstance(bins, basestring): # if `bins` is a string for an automatic method, # this will replace it with the number of bins calculated if bins not in _hist_bin_selectors: raise ValueError("{0} not a valid estimator for bins".format(bins)) if weights is not None: raise TypeError("Automated estimation of the number of " "bins is not supported for weighted data") # Make a reference to `a` b = a # Update the reference if the range needs truncation if range is not None: keep = (a >= mn) keep &= (a <= mx) if not np.logical_and.reduce(keep): b = a[keep] if b.size == 0: bins = 1 else: # Do not call selectors on empty arrays width = _hist_bin_selectors[bins](b) if width: bins = int(np.ceil((mx - mn) / width)) else: # Width can be zero for some estimators, e.g. FD when # the IQR of the data is zero. bins = 1 # Histogram is an integer or a float array depending on the weights. if weights is None: ntype = np.dtype(np.intp) else: ntype = weights.dtype # We set a block size, as this allows us to iterate over chunks when # computing histograms, to minimize memory usage. BLOCK = 65536 if not iterable(bins): if np.isscalar(bins) and bins < 1: raise ValueError( '`bins` should be a positive integer.') # At this point, if the weights are not integer, floating point, or # complex, we have to use the slow algorithm. if weights is not None and not (np.can_cast(weights.dtype, np.double) or np.can_cast(weights.dtype, np.complex)): bins = linspace(mn, mx, bins + 1, endpoint=True) if not iterable(bins): # We now convert values of a to bin indices, under the assumption of # equal bin widths (which is valid here). # Initialize empty histogram n = np.zeros(bins, ntype) # Pre-compute histogram scaling factor norm = bins / (mx - mn) # Compute the bin edges for potential correction. bin_edges = linspace(mn, mx, bins + 1, endpoint=True) # We iterate over blocks here for two reasons: the first is that for # large arrays, it is actually faster (for example for a 10^8 array it # is 2x as fast) and it results in a memory footprint 3x lower in the # limit of large arrays. for i in arange(0, len(a), BLOCK): tmp_a = a[i:i+BLOCK] if weights is None: tmp_w = None else: tmp_w = weights[i:i + BLOCK] # Only include values in the right range keep = (tmp_a >= mn) keep &= (tmp_a <= mx) if not np.logical_and.reduce(keep): tmp_a = tmp_a[keep] if tmp_w is not None: tmp_w = tmp_w[keep] tmp_a_data = tmp_a.astype(float) tmp_a = tmp_a_data - mn tmp_a *= norm # Compute the bin indices, and for values that lie exactly on mx we # need to subtract one indices = tmp_a.astype(np.intp) indices[indices == bins] -= 1 # The index computation is not guaranteed to give exactly # consistent results within ~1 ULP of the bin edges. decrement = tmp_a_data < bin_edges[indices] indices[decrement] -= 1 # The last bin includes the right edge. The other bins do not. increment = ((tmp_a_data >= bin_edges[indices + 1]) & (indices != bins - 1)) indices[increment] += 1 # We now compute the histogram using bincount if ntype.kind == 'c': n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins) n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins) else: n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype) # Rename the bin edges for return. bins = bin_edges else: bins = asarray(bins) if (np.diff(bins) < 0).any(): raise ValueError( 'bins must increase monotonically.') # Initialize empty histogram n = np.zeros(bins.shape, ntype) if weights is None: for i in arange(0, len(a), BLOCK): sa = sort(a[i:i+BLOCK]) n += np.r_[sa.searchsorted(bins[:-1], 'left'), sa.searchsorted(bins[-1], 'right')] else: zero = array(0, dtype=ntype) for i in arange(0, len(a), BLOCK): tmp_a = a[i:i+BLOCK] tmp_w = weights[i:i+BLOCK] sorting_index = np.argsort(tmp_a) sa = tmp_a[sorting_index] sw = tmp_w[sorting_index] cw = np.concatenate(([zero, ], sw.cumsum())) bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), sa.searchsorted(bins[-1], 'right')] n += cw[bin_index] n = np.diff(n) if density is not None: if density: db = array(np.diff(bins), float) return n/db/n.sum(), bins else: return n, bins else: # deprecated, buggy behavior. Remove for NumPy 2.0.0 if normed: db = array(np.diff(bins), float) return n/(n*db).sum(), bins else: return n, bins def histogramdd(sample, bins=10, range=None, normed=False, weights=None): """ Compute the multidimensional histogram of some data. Parameters ---------- sample : array_like The data to be histogrammed. It must be an (N,D) array or data that can be converted to such. The rows of the resulting array are the coordinates of points in a D dimensional polytope. bins : sequence or int, optional The bin specification: * A sequence of arrays describing the bin edges along each dimension. * The number of bins for each dimension (nx, ny, ... =bins) * The number of bins for all dimensions (nx=ny=...=bins). range : sequence, optional A sequence of lower and upper bin edges to be used if the edges are not given explicitly in `bins`. Defaults to the minimum and maximum values along each dimension. normed : bool, optional If False, returns the number of samples in each bin. If True, returns the bin density ``bin_count / sample_count / bin_volume``. weights : (N,) array_like, optional An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. Weights are normalized to 1 if normed is True. If normed is False, the values of the returned histogram are equal to the sum of the weights belonging to the samples falling into each bin. Returns ------- H : ndarray The multidimensional histogram of sample x. See normed and weights for the different possible semantics. edges : list A list of D arrays describing the bin edges for each dimension. See Also -------- histogram: 1-D histogram histogram2d: 2-D histogram Examples -------- >>> r = np.random.randn(100,3) >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) >>> H.shape, edges[0].size, edges[1].size, edges[2].size ((5, 8, 4), 6, 9, 5) """ try: # Sample is an ND-array. N, D = sample.shape except (AttributeError, ValueError): # Sample is a sequence of 1D arrays. sample = atleast_2d(sample).T N, D = sample.shape nbin = empty(D, int) edges = D*[None] dedges = D*[None] if weights is not None: weights = asarray(weights) try: M = len(bins) if M != D: raise ValueError( 'The dimension of bins must be equal to the dimension of the ' ' sample x.') except TypeError: # bins is an integer bins = D*[bins] # Select range for each dimension # Used only if number of bins is given. if range is None: # Handle empty input. Range can't be determined in that case, use 0-1. if N == 0: smin = zeros(D) smax = ones(D) else: smin = atleast_1d(array(sample.min(0), float)) smax = atleast_1d(array(sample.max(0), float)) else: if not np.all(np.isfinite(range)): raise ValueError( 'range parameter must be finite.') smin = zeros(D) smax = zeros(D) for i in arange(D): smin[i], smax[i] = range[i] # Make sure the bins have a finite width. for i in arange(len(smin)): if smin[i] == smax[i]: smin[i] = smin[i] - .5 smax[i] = smax[i] + .5 # avoid rounding issues for comparisons when dealing with inexact types if np.issubdtype(sample.dtype, np.inexact): edge_dt = sample.dtype else: edge_dt = float # Create edge arrays for i in arange(D): if isscalar(bins[i]): if bins[i] < 1: raise ValueError( "Element at index %s in `bins` should be a positive " "integer." % i) nbin[i] = bins[i] + 2 # +2 for outlier bins edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt) else: edges[i] = asarray(bins[i], edge_dt) nbin[i] = len(edges[i]) + 1 # +1 for outlier bins dedges[i] = diff(edges[i]) if np.any(np.asarray(dedges[i]) <= 0): raise ValueError( "Found bin edge of size <= 0. Did you specify `bins` with" "non-monotonic sequence?") nbin = asarray(nbin) # Handle empty input. if N == 0: return np.zeros(nbin-2), edges # Compute the bin number each sample falls into. Ncount = {} for i in arange(D): Ncount[i] = digitize(sample[:, i], edges[i]) # Using digitize, values that fall on an edge are put in the right bin. # For the rightmost bin, we want values equal to the right edge to be # counted in the last bin, and not as an outlier. for i in arange(D): # Rounding precision mindiff = dedges[i].min() if not np.isinf(mindiff): decimal = int(-log10(mindiff)) + 6 # Find which points are on the rightmost edge. not_smaller_than_edge = (sample[:, i] >= edges[i][-1]) on_edge = (around(sample[:, i], decimal) == around(edges[i][-1], decimal)) # Shift these points one bin to the left. Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1 # Flattened histogram matrix (1D) # Reshape is used so that overlarge arrays # will raise an error. hist = zeros(nbin, float).reshape(-1) # Compute the sample indices in the flattened histogram matrix. ni = nbin.argsort() xy = zeros(N, int) for i in arange(0, D-1): xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod() xy += Ncount[ni[-1]] # Compute the number of repetitions in xy and assign it to the # flattened histmat. if len(xy) == 0: return zeros(nbin-2, int), edges flatcount = bincount(xy, weights) a = arange(len(flatcount)) hist[a] = flatcount # Shape into a proper matrix hist = hist.reshape(sort(nbin)) for i in arange(nbin.size): j = ni.argsort()[i] hist = hist.swapaxes(i, j) ni[i], ni[j] = ni[j], ni[i] # Remove outliers (indices 0 and -1 for each dimension). core = D*[slice(1, -1)] hist = hist[core] # Normalize if normed is True if normed: s = hist.sum() for i in arange(D): shape = ones(D, int) shape[i] = nbin[i] - 2 hist = hist / dedges[i].reshape(shape) hist /= s if (hist.shape != nbin - 2).any(): raise RuntimeError( "Internal Shape Error") return hist, edges def average(a, axis=None, weights=None, returned=False): """ Compute the weighted average along the specified axis. Parameters ---------- a : array_like Array containing data to be averaged. If `a` is not an array, a conversion is attempted. axis : None or int or tuple of ints, optional Axis or axes along which to average `a`. The default, axis=None, will average over all of the elements of the input array. If axis is negative it counts from the last to the first axis. .. versionadded:: 1.7.0 If axis is a tuple of ints, averaging is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. weights : array_like, optional An array of weights associated with the values in `a`. Each value in `a` contributes to the average according to its associated weight. The weights array can either be 1-D (in which case its length must be the size of `a` along the given axis) or of the same shape as `a`. If `weights=None`, then all data in `a` are assumed to have a weight equal to one. returned : bool, optional Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) is returned, otherwise only the average is returned. If `weights=None`, `sum_of_weights` is equivalent to the number of elements over which the average is taken. Returns ------- average, [sum_of_weights] : array_type or double Return the average along the specified axis. When returned is `True`, return a tuple with the average as the first element and the sum of the weights as the second element. The return type is `Float` if `a` is of integer type, otherwise it is of the same type as `a`. `sum_of_weights` is of the same type as `average`. Raises ------ ZeroDivisionError When all weights along axis are zero. See `numpy.ma.average` for a version robust to this type of error. TypeError When the length of 1D `weights` is not the same as the shape of `a` along axis. See Also -------- mean ma.average : average for masked arrays -- useful if your data contains "missing" values Examples -------- >>> data = range(1,5) >>> data [1, 2, 3, 4] >>> np.average(data) 2.5 >>> np.average(range(1,11), weights=range(10,0,-1)) 4.0 >>> data = np.arange(6).reshape((3,2)) >>> data array([[0, 1], [2, 3], [4, 5]]) >>> np.average(data, axis=1, weights=[1./4, 3./4]) array([ 0.75, 2.75, 4.75]) >>> np.average(data, weights=[1./4, 3./4]) Traceback (most recent call last): ... TypeError: Axis must be specified when shapes of a and weights differ. """ a = np.asanyarray(a) if weights is None: avg = a.mean(axis) scl = avg.dtype.type(a.size/avg.size) else: wgt = np.asanyarray(weights) if issubclass(a.dtype.type, (np.integer, np.bool_)): result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') else: result_dtype = np.result_type(a.dtype, wgt.dtype) # Sanity checks if a.shape != wgt.shape: if axis is None: raise TypeError( "Axis must be specified when shapes of a and weights " "differ.") if wgt.ndim != 1: raise TypeError( "1D weights expected when shapes of a and weights differ.") if wgt.shape[0] != a.shape[axis]: raise ValueError( "Length of weights not compatible with specified axis.") # setup wgt to broadcast along axis wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape) wgt = wgt.swapaxes(-1, axis) scl = wgt.sum(axis=axis, dtype=result_dtype) if np.any(scl == 0.0): raise ZeroDivisionError( "Weights sum to zero, can't be normalized") avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl if returned: if scl.shape != avg.shape: scl = np.broadcast_to(scl, avg.shape).copy() return avg, scl else: return avg def asarray_chkfinite(a, dtype=None, order=None): """Convert the input to an array, checking for NaNs or Infs. Parameters ---------- a : array_like Input data, in any form that can be converted to an array. This includes lists, lists of tuples, tuples, tuples of tuples, tuples of lists and ndarrays. Success requires no NaNs or Infs. dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F'}, optional Whether to use row-major (C-style) or column-major (Fortran-style) memory representation. Defaults to 'C'. Returns ------- out : ndarray Array interpretation of `a`. No copy is performed if the input is already an ndarray. If `a` is a subclass of ndarray, a base class ndarray is returned. Raises ------ ValueError Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). See Also -------- asarray : Create and array. asanyarray : Similar function which passes through subclasses. ascontiguousarray : Convert input to a contiguous array. asfarray : Convert input to a floating point ndarray. asfortranarray : Convert input to an ndarray with column-major memory order. fromiter : Create an array from an iterator. fromfunction : Construct an array by executing a function on grid positions. Examples -------- Convert a list into an array. If all elements are finite ``asarray_chkfinite`` is identical to ``asarray``. >>> a = [1, 2] >>> np.asarray_chkfinite(a, dtype=float) array([1., 2.]) Raises ValueError if array_like contains Nans or Infs. >>> a = [1, 2, np.inf] >>> try: ... np.asarray_chkfinite(a) ... except ValueError: ... print('ValueError') ... ValueError """ a = asarray(a, dtype=dtype, order=order) if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): raise ValueError( "array must not contain infs or NaNs") return a def piecewise(x, condlist, funclist, *args, **kw): """ Evaluate a piecewise-defined function. Given a set of conditions and corresponding functions, evaluate each function on the input data wherever its condition is true. Parameters ---------- x : ndarray or scalar The input domain. condlist : list of bool arrays or bool scalars Each boolean array corresponds to a function in `funclist`. Wherever `condlist[i]` is True, `funclist[i](x)` is used as the output value. Each boolean array in `condlist` selects a piece of `x`, and should therefore be of the same shape as `x`. The length of `condlist` must correspond to that of `funclist`. If one extra function is given, i.e. if ``len(funclist) - len(condlist) == 1``, then that extra function is the default value, used wherever all conditions are false. funclist : list of callables, f(x,*args,**kw), or scalars Each function is evaluated over `x` wherever its corresponding condition is True. It should take an array as input and give an array or a scalar value as output. If, instead of a callable, a scalar is provided then a constant function (``lambda x: scalar``) is assumed. args : tuple, optional Any further arguments given to `piecewise` are passed to the functions upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then each function is called as ``f(x, 1, 'a')``. kw : dict, optional Keyword arguments used in calling `piecewise` are passed to the functions upon execution, i.e., if called ``piecewise(..., ..., alpha=1)``, then each function is called as ``f(x, alpha=1)``. Returns ------- out : ndarray The output is the same shape and type as x and is found by calling the functions in `funclist` on the appropriate portions of `x`, as defined by the boolean arrays in `condlist`. Portions not covered by any condition have a default value of 0. See Also -------- choose, select, where Notes ----- This is similar to choose or select, except that functions are evaluated on elements of `x` that satisfy the corresponding condition from `condlist`. The result is:: |-- |funclist[0](x[condlist[0]]) out = |funclist[1](x[condlist[1]]) |... |funclist[n2](x[condlist[n2]]) |-- Examples -------- Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. >>> x = np.linspace(-2.5, 2.5, 6) >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) array([-1., -1., -1., 1., 1., 1.]) Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for ``x >= 0``. >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) Apply the same function to a scalar value. >>> y = -2 >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x]) array(2) """ x = asanyarray(x) n2 = len(funclist) if (isscalar(condlist) or not (isinstance(condlist[0], list) or isinstance(condlist[0], ndarray))): if not isscalar(condlist) and x.size == 1 and x.ndim == 0: condlist = [[c] for c in condlist] else: condlist = [condlist] condlist = array(condlist, dtype=bool) n = len(condlist) # This is a hack to work around problems with NumPy's # handling of 0-d arrays and boolean indexing with # numpy.bool_ scalars zerod = False if x.ndim == 0: x = x[None] zerod = True if n == n2 - 1: # compute the "otherwise" condition. totlist = np.logical_or.reduce(condlist, axis=0) # Only able to stack vertically if the array is 1d or less if x.ndim <= 1: condlist = np.vstack([condlist, ~totlist]) else: condlist = [asarray(c, dtype=bool) for c in condlist] totlist = condlist[0] for k in range(1, n): totlist |= condlist[k] condlist.append(~totlist) n += 1 y = zeros(x.shape, x.dtype) for k in range(n): item = funclist[k] if not isinstance(item, collections.Callable): y[condlist[k]] = item else: vals = x[condlist[k]] if vals.size > 0: y[condlist[k]] = item(vals, *args, **kw) if zerod: y = y.squeeze() return y def select(condlist, choicelist, default=0): """ Return an array drawn from elements in choicelist, depending on conditions. Parameters ---------- condlist : list of bool ndarrays The list of conditions which determine from which array in `choicelist` the output elements are taken. When multiple conditions are satisfied, the first one encountered in `condlist` is used. choicelist : list of ndarrays The list of arrays from which the output elements are taken. It has to be of the same length as `condlist`. default : scalar, optional The element inserted in `output` when all conditions evaluate to False. Returns ------- output : ndarray The output at position m is the m-th element of the array in `choicelist` where the m-th element of the corresponding array in `condlist` is True. See Also -------- where : Return elements from one of two arrays depending on condition. take, choose, compress, diag, diagonal Examples -------- >>> x = np.arange(10) >>> condlist = [x<3, x>5] >>> choicelist = [x, x**2] >>> np.select(condlist, choicelist) array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81]) """ # Check the size of condlist and choicelist are the same, or abort. if len(condlist) != len(choicelist): raise ValueError( 'list of cases must be same length as list of conditions') # Now that the dtype is known, handle the deprecated select([], []) case if len(condlist) == 0: # 2014-02-24, 1.9 warnings.warn("select with an empty condition list is not possible" "and will be deprecated", DeprecationWarning, stacklevel=2) return np.asarray(default)[()] choicelist = [np.asarray(choice) for choice in choicelist] choicelist.append(np.asarray(default)) # need to get the result type before broadcasting for correct scalar # behaviour dtype = np.result_type(*choicelist) # Convert conditions to arrays and broadcast conditions and choices # as the shape is needed for the result. Doing it separately optimizes # for example when all choices are scalars. condlist = np.broadcast_arrays(*condlist) choicelist = np.broadcast_arrays(*choicelist) # If cond array is not an ndarray in boolean format or scalar bool, abort. deprecated_ints = False for i in range(len(condlist)): cond = condlist[i] if cond.dtype.type is not np.bool_: if np.issubdtype(cond.dtype, np.integer): # A previous implementation accepted int ndarrays accidentally. # Supported here deliberately, but deprecated. condlist[i] = condlist[i].astype(bool) deprecated_ints = True else: raise ValueError( 'invalid entry in choicelist: should be boolean ndarray') if deprecated_ints: # 2014-02-24, 1.9 msg = "select condlists containing integer ndarrays is deprecated " \ "and will be removed in the future. Use `.astype(bool)` to " \ "convert to bools." warnings.warn(msg, DeprecationWarning, stacklevel=2) if choicelist[0].ndim == 0: # This may be common, so avoid the call. result_shape = condlist[0].shape else: result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape result = np.full(result_shape, choicelist[-1], dtype) # Use np.copyto to burn each choicelist array onto result, using the # corresponding condlist as a boolean mask. This is done in reverse # order since the first choice should take precedence. choicelist = choicelist[-2::-1] condlist = condlist[::-1] for choice, cond in zip(choicelist, condlist): np.copyto(result, choice, where=cond) return result def copy(a, order='K'): """ Return an array copy of the given object. Parameters ---------- a : array_like Input data. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. (Note that this function and :meth:`ndarray.copy` are very similar, but have different default values for their order= arguments.) Returns ------- arr : ndarray Array interpretation of `a`. Notes ----- This is equivalent to: >>> np.array(a, copy=True) #doctest: +SKIP Examples -------- Create an array x, with a reference y and a copy z: >>> x = np.array([1, 2, 3]) >>> y = x >>> z = np.copy(x) Note that, when we modify x, y changes, but not z: >>> x[0] = 10 >>> x[0] == y[0] True >>> x[0] == z[0] False """ return array(a, order=order, copy=True) # Basic operations def gradient(f, *varargs, **kwargs): """ Return the gradient of an N-dimensional array. The gradient is computed using second order accurate central differences in the interior points and either first or second order accurate one-sides (forward or backwards) differences at the boundaries. The returned gradient hence has the same shape as the input array. Parameters ---------- f : array_like An N-dimensional array containing samples of a scalar function. varargs : list of scalar or array, optional Spacing between f values. Default unitary spacing for all dimensions. Spacing can be specified using: 1. single scalar to specify a sample distance for all dimensions. 2. N scalars to specify a constant sample distance for each dimension. i.e. `dx`, `dy`, `dz`, ... 3. N arrays to specify the coordinates of the values along each dimension of F. The length of the array must match the size of the corresponding dimension 4. Any combination of N scalars/arrays with the meaning of 2. and 3. If `axis` is given, the number of varargs must equal the number of axes. Default: 1. edge_order : {1, 2}, optional Gradient is calculated using N-th order accurate differences at the boundaries. Default: 1. .. versionadded:: 1.9.1 axis : None or int or tuple of ints, optional Gradient is calculated only along the given axis or axes The default (axis = None) is to calculate the gradient for all the axes of the input array. axis may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.11.0 Returns ------- gradient : ndarray or list of ndarray A set of ndarrays (or a single ndarray if there is only one dimension) corresponding to the derivatives of f with respect to each dimension. Each derivative has the same shape as f. Examples -------- >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) >>> np.gradient(f) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) >>> np.gradient(f, 2) array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) Spacing can be also specified with an array that represents the coordinates of the values F along the dimensions. For instance a uniform spacing: >>> x = np.arange(f.size) >>> np.gradient(f, x) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) Or a non uniform one: >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=np.float) >>> np.gradient(f, x) array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5]) For two dimensional arrays, the return will be two arrays ordered by axis. In this example the first array stands for the gradient in rows and the second one in columns direction: >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float)) [array([[ 2., 2., -1.], [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], [ 1. , 1. , 1. ]])] In this example the spacing is also specified: uniform for axis=0 and non uniform for axis=1 >>> dx = 2. >>> y = [1., 1.5, 3.5] >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), dx, y) [array([[ 1. , 1. , -0.5], [ 1. , 1. , -0.5]]), array([[ 2. , 2. , 2. ], [ 2. , 1.7, 0.5]])] It is possible to specify how boundaries are treated using `edge_order` >>> x = np.array([0, 1, 2, 3, 4]) >>> f = x**2 >>> np.gradient(f, edge_order=1) array([ 1., 2., 4., 6., 7.]) >>> np.gradient(f, edge_order=2) array([-0., 2., 4., 6., 8.]) The `axis` keyword can be used to specify a subset of axes of which the gradient is calculated >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0) array([[ 2., 2., -1.], [ 2., 2., -1.]]) Notes ----- Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continous derivatives) and let be :math:`h_{*}` a non homogeneous stepsize, the spacing the finite difference coefficients are computed by minimising the consistency error :math:`\\eta_{i}`: .. math:: \\eta_{i} = f_{i}^{\\left(1\\right)} - \\left[ \\alpha f\\left(x_{i}\\right) + \\beta f\\left(x_{i} + h_{d}\\right) + \\gamma f\\left(x_{i}-h_{s}\\right) \\right] By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` with their Taylor series expansion, this translates into solving the following the linear system: .. math:: \\left\\{ \\begin{array}{r} \\alpha+\\beta+\\gamma=0 \\\\ -\\beta h_{d}+\\gamma h_{s}=1 \\\\ \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0 \\end{array} \\right. The resulting approximation of :math:`f_{i}^{(1)}` is the following: .. math:: \\hat f_{i}^{(1)} = \\frac{ h_{s}^{2}f\\left(x_{i} + h_{d}\\right) + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} + h_{s}h_{d}^{2}}{h_{d} + h_{s}}\\right) It is worth noting that if :math:`h_{s}=h_{d}` (i.e., data are evenly spaced) we find the standard second order approximation: .. math:: \\hat f_{i}^{(1)}= \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} + \\mathcal{O}\\left(h^{2}\\right) With a similar procedure the forward/backward approximations used for boundaries can be derived. References ---------- .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics (Texts in Applied Mathematics). New York: Springer. .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations in Geophysical Fluid Dynamics. New York: Springer. .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on Arbitrarily Spaced Grids, Mathematics of Computation 51, no. 184 : 699-706. `PDF <http://www.ams.org/journals/mcom/1988-51-184/ S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_. """ f = np.asanyarray(f) N = f.ndim # number of dimensions axes = kwargs.pop('axis', None) if axes is None: axes = tuple(range(N)) else: axes = _nx.normalize_axis_tuple(axes, N) len_axes = len(axes) n = len(varargs) if n == 0: dx = [1.0] * len_axes elif n == len_axes or (n == 1 and np.isscalar(varargs[0])): dx = list(varargs) for i, distances in enumerate(dx): if np.isscalar(distances): continue if len(distances) != f.shape[axes[i]]: raise ValueError("distances must be either scalars or match " "the length of the corresponding dimension") diffx = np.diff(dx[i]) # if distances are constant reduce to the scalar case # since it brings a consistent speedup if (diffx == diffx[0]).all(): diffx = diffx[0] dx[i] = diffx if len(dx) == 1: dx *= len_axes else: raise TypeError("invalid number of arguments") edge_order = kwargs.pop('edge_order', 1) if kwargs: raise TypeError('"{}" are not valid keyword arguments.'.format( '", "'.join(kwargs.keys()))) if edge_order > 2: raise ValueError("'edge_order' greater than 2 not supported") # use central differences on interior and one-sided differences on the # endpoints. This preserves second order-accuracy over the full domain. outvals = [] # create slice objects --- initially all are [:, :, ..., :] slice1 = [slice(None)]*N slice2 = [slice(None)]*N slice3 = [slice(None)]*N slice4 = [slice(None)]*N otype = f.dtype.char if otype not in ['f', 'd', 'F', 'D', 'm', 'M']: otype = 'd' # Difference of datetime64 elements results in timedelta64 if otype == 'M': # Need to use the full dtype name because it contains unit information otype = f.dtype.name.replace('datetime', 'timedelta') elif otype == 'm': # Needs to keep the specific units, can't be a general unit otype = f.dtype # Convert datetime64 data into ints. Make dummy variable `y` # that is a view of ints if the data is datetime64, otherwise # just set y equal to the array `f`. if f.dtype.char in ["M", "m"]: y = f.view('int64') else: y = f for i, axis in enumerate(axes): if y.shape[axis] < edge_order + 1: raise ValueError( "Shape of array too small to calculate a numerical gradient, " "at least (edge_order + 1) elements are required.") # result allocation out = np.empty_like(y, dtype=otype) uniform_spacing = np.isscalar(dx[i]) # Numerical differentiation: 2nd order interior slice1[axis] = slice(1, -1) slice2[axis] = slice(None, -2) slice3[axis] = slice(1, -1) slice4[axis] = slice(2, None) if uniform_spacing: out[slice1] = (f[slice4] - f[slice2]) / (2. * dx[i]) else: dx1 = dx[i][0:-1] dx2 = dx[i][1:] a = -(dx2)/(dx1 * (dx1 + dx2)) b = (dx2 - dx1) / (dx1 * dx2) c = dx1 / (dx2 * (dx1 + dx2)) # fix the shape for broadcasting shape = np.ones(N, dtype=int) shape[axis] = -1 a.shape = b.shape = c.shape = shape # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4] # Numerical differentiation: 1st order edges if edge_order == 1: slice1[axis] = 0 slice2[axis] = 1 slice3[axis] = 0 dx_0 = dx[i] if uniform_spacing else dx[i][0] # 1D equivalent -- out[0] = (y[1] - y[0]) / (x[1] - x[0]) out[slice1] = (y[slice2] - y[slice3]) / dx_0 slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 dx_n = dx[i] if uniform_spacing else dx[i][-1] # 1D equivalent -- out[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2]) out[slice1] = (y[slice2] - y[slice3]) / dx_n # Numerical differentiation: 2nd order edges else: slice1[axis] = 0 slice2[axis] = 0 slice3[axis] = 1 slice4[axis] = 2 if uniform_spacing: a = -1.5 / dx[i] b = 2. / dx[i] c = -0.5 / dx[i] else: dx1 = dx[i][0] dx2 = dx[i][1] a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2)) b = (dx1 + dx2) / (dx1 * dx2) c = - dx1 / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[0] = a * y[0] + b * y[1] + c * y[2] out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4] slice1[axis] = -1 slice2[axis] = -3 slice3[axis] = -2 slice4[axis] = -1 if uniform_spacing: a = 0.5 / dx[i] b = -2. / dx[i] c = 1.5 / dx[i] else: dx1 = dx[i][-2] dx2 = dx[i][-1] a = (dx2) / (dx1 * (dx1 + dx2)) b = - (dx2 + dx1) / (dx1 * dx2) c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4] outvals.append(out) # reset the slice object in this dimension to ":" slice1[axis] = slice(None) slice2[axis] = slice(None) slice3[axis] = slice(None) slice4[axis] = slice(None) if len_axes == 1: return outvals[0] else: return outvals def diff(a, n=1, axis=-1): """ Calculate the n-th discrete difference along given axis. The first difference is given by ``out[n] = a[n+1] - a[n]`` along the given axis, higher differences are calculated by using `diff` recursively. Parameters ---------- a : array_like Input array n : int, optional The number of times values are differenced. axis : int, optional The axis along which the difference is taken, default is the last axis. Returns ------- diff : ndarray The n-th differences. The shape of the output is the same as `a` except along `axis` where the dimension is smaller by `n`. The type of the output is the same as that of the input. See Also -------- gradient, ediff1d, cumsum Notes ----- For boolean arrays, the preservation of type means that the result will contain `False` when consecutive elements are the same and `True` when they differ. Examples -------- >>> x = np.array([1, 2, 4, 7, 0]) >>> np.diff(x) array([ 1, 2, 3, -7]) >>> np.diff(x, n=2) array([ 1, 1, -10]) >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) >>> np.diff(x) array([[2, 3, 4], [5, 1, 2]]) >>> np.diff(x, axis=0) array([[-1, 2, 0, -2]]) """ if n == 0: return a if n < 0: raise ValueError( "order must be non-negative but got " + repr(n)) a = asanyarray(a) nd = a.ndim slice1 = [slice(None)]*nd slice2 = [slice(None)]*nd slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) slice1 = tuple(slice1) slice2 = tuple(slice2) if n > 1: return diff(a[slice1]-a[slice2], n-1, axis=axis) else: return a[slice1]-a[slice2] def interp(x, xp, fp, left=None, right=None, period=None): """ One-dimensional linear interpolation. Returns the one-dimensional piecewise linear interpolant to a function with given values at discrete data-points. Parameters ---------- x : array_like The x-coordinates of the interpolated values. xp : 1-D sequence of floats The x-coordinates of the data points, must be increasing if argument `period` is not specified. Otherwise, `xp` is internally sorted after normalizing the periodic boundaries with ``xp = xp % period``. fp : 1-D sequence of float or complex The y-coordinates of the data points, same length as `xp`. left : optional float or complex corresponding to fp Value to return for `x < xp[0]`, default is `fp[0]`. right : optional float or complex corresponding to fp Value to return for `x > xp[-1]`, default is `fp[-1]`. period : None or float, optional A period for the x-coordinates. This parameter allows the proper interpolation of angular x-coordinates. Parameters `left` and `right` are ignored if `period` is specified. .. versionadded:: 1.10.0 Returns ------- y : float or complex (corresponding to fp) or ndarray The interpolated values, same shape as `x`. Raises ------ ValueError If `xp` and `fp` have different length If `xp` or `fp` are not 1-D sequences If `period == 0` Notes ----- Does not check that the x-coordinate sequence `xp` is increasing. If `xp` is not increasing, the results are nonsense. A simple check for increasing is:: np.all(np.diff(xp) > 0) Examples -------- >>> xp = [1, 2, 3] >>> fp = [3, 2, 0] >>> np.interp(2.5, xp, fp) 1.0 >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) array([ 3. , 3. , 2.5 , 0.56, 0. ]) >>> UNDEF = -99.0 >>> np.interp(3.14, xp, fp, right=UNDEF) -99.0 Plot an interpolant to the sine function: >>> x = np.linspace(0, 2*np.pi, 10) >>> y = np.sin(x) >>> xvals = np.linspace(0, 2*np.pi, 50) >>> yinterp = np.interp(xvals, x, y) >>> import matplotlib.pyplot as plt >>> plt.plot(x, y, 'o') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.plot(xvals, yinterp, '-x') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.show() Interpolation with periodic x-coordinates: >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] >>> xp = [190, -190, 350, -350] >>> fp = [5, 10, 3, 4] >>> np.interp(x, xp, fp, period=360) array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]) Complex interpolation >>> x = [1.5, 4.0] >>> xp = [2,3,5] >>> fp = [1.0j, 0, 2+3j] >>> np.interp(x, xp, fp) array([ 0.+1.j , 1.+1.5j]) """ fp = np.asarray(fp) if np.iscomplexobj(fp): interp_func = compiled_interp_complex input_dtype = np.complex128 else: interp_func = compiled_interp input_dtype = np.float64 if period is None: if isinstance(x, (float, int, number)): return interp_func([x], xp, fp, left, right).item() elif isinstance(x, np.ndarray) and x.ndim == 0: return interp_func([x], xp, fp, left, right).item() else: return interp_func(x, xp, fp, left, right) else: if period == 0: raise ValueError("period must be a non-zero value") period = abs(period) left = None right = None return_array = True if isinstance(x, (float, int, number)): return_array = False x = [x] x = np.asarray(x, dtype=np.float64) xp = np.asarray(xp, dtype=np.float64) fp = np.asarray(fp, dtype=input_dtype) if xp.ndim != 1 or fp.ndim != 1: raise ValueError("Data points must be 1-D sequences") if xp.shape[0] != fp.shape[0]: raise ValueError("fp and xp are not of the same length") # normalizing periodic boundaries x = x % period xp = xp % period asort_xp = np.argsort(xp) xp = xp[asort_xp] fp = fp[asort_xp] xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) fp = np.concatenate((fp[-1:], fp, fp[0:1])) if return_array: return interp_func(x, xp, fp, left, right) else: return interp_func(x, xp, fp, left, right).item() def angle(z, deg=0): """ Return the angle of the complex argument. Parameters ---------- z : array_like A complex number or sequence of complex numbers. deg : bool, optional Return angle in degrees if True, radians if False (default). Returns ------- angle : ndarray or scalar The counterclockwise angle from the positive real axis on the complex plane, with dtype as numpy.float64. See Also -------- arctan2 absolute Examples -------- >>> np.angle([1.0, 1.0j, 1+1j]) # in radians array([ 0. , 1.57079633, 0.78539816]) >>> np.angle(1+1j, deg=True) # in degrees 45.0 """ if deg: fact = 180/pi else: fact = 1.0 z = asarray(z) if (issubclass(z.dtype.type, _nx.complexfloating)): zimag = z.imag zreal = z.real else: zimag = 0 zreal = z return arctan2(zimag, zreal) * fact def unwrap(p, discont=pi, axis=-1): """ Unwrap by changing deltas between values to 2*pi complement. Unwrap radian phase `p` by changing absolute jumps greater than `discont` to their 2*pi complement along the given axis. Parameters ---------- p : array_like Input array. discont : float, optional Maximum discontinuity between values, default is ``pi``. axis : int, optional Axis along which unwrap will operate, default is the last axis. Returns ------- out : ndarray Output array. See Also -------- rad2deg, deg2rad Notes ----- If the discontinuity in `p` is smaller than ``pi``, but larger than `discont`, no unwrapping is done because taking the 2*pi complement would only make the discontinuity larger. Examples -------- >>> phase = np.linspace(0, np.pi, num=5) >>> phase[3:] += np.pi >>> phase array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) >>> np.unwrap(phase) array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) """ p = asarray(p) nd = p.ndim dd = diff(p, axis=axis) slice1 = [slice(None, None)]*nd # full slices slice1[axis] = slice(1, None) ddmod = mod(dd + pi, 2*pi) - pi _nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0)) ph_correct = ddmod - dd _nx.copyto(ph_correct, 0, where=abs(dd) < discont) up = array(p, copy=True, dtype='d') up[slice1] = p[slice1] + ph_correct.cumsum(axis) return up def sort_complex(a): """ Sort a complex array using the real part first, then the imaginary part. Parameters ---------- a : array_like Input array Returns ------- out : complex ndarray Always returns a sorted complex array. Examples -------- >>> np.sort_complex([5, 3, 6, 2, 1]) array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) """ b = array(a, copy=True) b.sort() if not issubclass(b.dtype.type, _nx.complexfloating): if b.dtype.char in 'bhBH': return b.astype('F') elif b.dtype.char == 'g': return b.astype('G') else: return b.astype('D') else: return b def trim_zeros(filt, trim='fb'): """ Trim the leading and/or trailing zeros from a 1-D array or sequence. Parameters ---------- filt : 1-D array or sequence Input array. trim : str, optional A string with 'f' representing trim from front and 'b' to trim from back. Default is 'fb', trim zeros from both front and back of the array. Returns ------- trimmed : 1-D array or sequence The result of trimming the input. The input data type is preserved. Examples -------- >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) >>> np.trim_zeros(a) array([1, 2, 3, 0, 2, 1]) >>> np.trim_zeros(a, 'b') array([0, 0, 0, 1, 2, 3, 0, 2, 1]) The input data type is preserved, list/tuple in means list/tuple out. >>> np.trim_zeros([0, 1, 2, 0]) [1, 2] """ first = 0 trim = trim.upper() if 'F' in trim: for i in filt: if i != 0.: break else: first = first + 1 last = len(filt) if 'B' in trim: for i in filt[::-1]: if i != 0.: break else: last = last - 1 return filt[first:last] @deprecate def unique(x): """ This function is deprecated. Use numpy.lib.arraysetops.unique() instead. """ try: tmp = x.flatten() if tmp.size == 0: return tmp tmp.sort() idx = concatenate(([True], tmp[1:] != tmp[:-1])) return tmp[idx] except AttributeError: items = sorted(set(x)) return asarray(items) def extract(condition, arr): """ Return the elements of an array that satisfy some condition. This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. Note that `place` does the exact opposite of `extract`. Parameters ---------- condition : array_like An array whose nonzero or True entries indicate the elements of `arr` to extract. arr : array_like Input array of the same size as `condition`. Returns ------- extract : ndarray Rank 1 array of values from `arr` where `condition` is True. See Also -------- take, put, copyto, compress, place Examples -------- >>> arr = np.arange(12).reshape((3, 4)) >>> arr array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> condition = np.mod(arr, 3)==0 >>> condition array([[ True, False, False, True], [False, False, True, False], [False, True, False, False]], dtype=bool) >>> np.extract(condition, arr) array([0, 3, 6, 9]) If `condition` is boolean: >>> arr[condition] array([0, 3, 6, 9]) """ return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) def place(arr, mask, vals): """ Change elements of an array based on conditional and input values. Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that `place` uses the first N elements of `vals`, where N is the number of True values in `mask`, while `copyto` uses the elements where `mask` is True. Note that `extract` does the exact opposite of `place`. Parameters ---------- arr : ndarray Array to put data into. mask : array_like Boolean mask array. Must have the same size as `a`. vals : 1-D sequence Values to put into `a`. Only the first N elements are used, where N is the number of True values in `mask`. If `vals` is smaller than N, it will be repeated, and if elements of `a` are to be masked, this sequence must be non-empty. See Also -------- copyto, put, take, extract Examples -------- >>> arr = np.arange(6).reshape(2, 3) >>> np.place(arr, arr>2, [44, 55]) >>> arr array([[ 0, 1, 2], [44, 55, 44]]) """ if not isinstance(arr, np.ndarray): raise TypeError("argument 1 must be numpy.ndarray, " "not {name}".format(name=type(arr).__name__)) return _insert(arr, mask, vals) def disp(mesg, device=None, linefeed=True): """ Display a message on a device. Parameters ---------- mesg : str Message to display. device : object Device to write message. If None, defaults to ``sys.stdout`` which is very similar to ``print``. `device` needs to have ``write()`` and ``flush()`` methods. linefeed : bool, optional Option whether to print a line feed or not. Defaults to True. Raises ------ AttributeError If `device` does not have a ``write()`` or ``flush()`` method. Examples -------- Besides ``sys.stdout``, a file-like object can also be used as it has both required methods: >>> from StringIO import StringIO >>> buf = StringIO() >>> np.disp('"Display" in a file', device=buf) >>> buf.getvalue() '"Display" in a file\\n' """ if device is None: device = sys.stdout if linefeed: device.write('%s\n' % mesg) else: device.write('%s' % mesg) device.flush() return # See http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html _DIMENSION_NAME = r'\w+' _CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME) _ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST) _ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT) _SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST) def _parse_gufunc_signature(signature): """ Parse string signatures for a generalized universal function. Arguments --------- signature : string Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)`` for ``np.matmul``. Returns ------- Tuple of input and output core dimensions parsed from the signature, each of the form List[Tuple[str, ...]]. """ if not re.match(_SIGNATURE, signature): raise ValueError( 'not a valid gufunc signature: {}'.format(signature)) return tuple([tuple(re.findall(_DIMENSION_NAME, arg)) for arg in re.findall(_ARGUMENT, arg_list)] for arg_list in signature.split('->')) def _update_dim_sizes(dim_sizes, arg, core_dims): """ Incrementally check and update core dimension sizes for a single argument. Arguments --------- dim_sizes : Dict[str, int] Sizes of existing core dimensions. Will be updated in-place. arg : ndarray Argument to examine. core_dims : Tuple[str, ...] Core dimensions for this argument. """ if not core_dims: return num_core_dims = len(core_dims) if arg.ndim < num_core_dims: raise ValueError( '%d-dimensional argument does not have enough ' 'dimensions for all core dimensions %r' % (arg.ndim, core_dims)) core_shape = arg.shape[-num_core_dims:] for dim, size in zip(core_dims, core_shape): if dim in dim_sizes: if size != dim_sizes[dim]: raise ValueError( 'inconsistent size for core dimension %r: %r vs %r' % (dim, size, dim_sizes[dim])) else: dim_sizes[dim] = size def _parse_input_dimensions(args, input_core_dims): """ Parse broadcast and core dimensions for vectorize with a signature. Arguments --------- args : Tuple[ndarray, ...] Tuple of input arguments to examine. input_core_dims : List[Tuple[str, ...]] List of core dimensions corresponding to each input. Returns ------- broadcast_shape : Tuple[int, ...] Common shape to broadcast all non-core dimensions to. dim_sizes : Dict[str, int] Common sizes for named core dimensions. """ broadcast_args = [] dim_sizes = {} for arg, core_dims in zip(args, input_core_dims): _update_dim_sizes(dim_sizes, arg, core_dims) ndim = arg.ndim - len(core_dims) dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim]) broadcast_args.append(dummy_array) broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args) return broadcast_shape, dim_sizes def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims): """Helper for calculating broadcast shapes with core dimensions.""" return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims) for core_dims in list_of_core_dims] def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes): """Helper for creating output arrays in vectorize.""" shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims) arrays = tuple(np.empty(shape, dtype=dtype) for shape, dtype in zip(shapes, dtypes)) return arrays class vectorize(object): """ vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False, signature=None) Generalized function class. Define a vectorized function which takes a nested sequence of objects or numpy arrays as inputs and returns an single or tuple of numpy array as output. The vectorized function evaluates `pyfunc` over successive tuples of the input arrays like the python map function, except it uses the broadcasting rules of numpy. The data type of the output of `vectorized` is determined by calling the function with the first element of the input. This can be avoided by specifying the `otypes` argument. Parameters ---------- pyfunc : callable A python function or method. otypes : str or list of dtypes, optional The output data type. It must be specified as either a string of typecode characters or a list of data type specifiers. There should be one data type specifier for each output. doc : str, optional The docstring for the function. If `None`, the docstring will be the ``pyfunc.__doc__``. excluded : set, optional Set of strings or integers representing the positional or keyword arguments for which the function will not be vectorized. These will be passed directly to `pyfunc` unmodified. .. versionadded:: 1.7.0 cache : bool, optional If `True`, then cache the first function call that determines the number of outputs if `otypes` is not provided. .. versionadded:: 1.7.0 signature : string, optional Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for vectorized matrix-vector multiplication. If provided, ``pyfunc`` will be called with (and expected to return) arrays with shapes given by the size of corresponding core dimensions. By default, ``pyfunc`` is assumed to take scalars as input and output. .. versionadded:: 1.12.0 Returns ------- vectorized : callable Vectorized function. Examples -------- >>> def myfunc(a, b): ... "Return a-b if a>b, otherwise return a+b" ... if a > b: ... return a - b ... else: ... return a + b >>> vfunc = np.vectorize(myfunc) >>> vfunc([1, 2, 3, 4], 2) array([3, 4, 1, 2]) The docstring is taken from the input function to `vectorize` unless it is specified: >>> vfunc.__doc__ 'Return a-b if a>b, otherwise return a+b' >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') >>> vfunc.__doc__ 'Vectorized `myfunc`' The output type is determined by evaluating the first element of the input, unless it is specified: >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) <type 'numpy.int32'> >>> vfunc = np.vectorize(myfunc, otypes=[np.float]) >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) <type 'numpy.float64'> The `excluded` argument can be used to prevent vectorizing over certain arguments. This can be useful for array-like arguments of a fixed length such as the coefficients for a polynomial as in `polyval`: >>> def mypolyval(p, x): ... _p = list(p) ... res = _p.pop(0) ... while _p: ... res = res*x + _p.pop(0) ... return res >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) >>> vpolyval(p=[1, 2, 3], x=[0, 1]) array([3, 6]) Positional arguments may also be excluded by specifying their position: >>> vpolyval.excluded.add(0) >>> vpolyval([1, 2, 3], x=[0, 1]) array([3, 6]) The `signature` argument allows for vectorizing functions that act on non-scalar arrays of fixed length. For example, you can use it for a vectorized calculation of Pearson correlation coefficient and its p-value: >>> import scipy.stats >>> pearsonr = np.vectorize(scipy.stats.pearsonr, ... signature='(n),(n)->(),()') >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) (array([ 1., -1.]), array([ 0., 0.])) Or for a vectorized convolution: >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)') >>> convolve(np.eye(4), [1, 2, 1]) array([[ 1., 2., 1., 0., 0., 0.], [ 0., 1., 2., 1., 0., 0.], [ 0., 0., 1., 2., 1., 0.], [ 0., 0., 0., 1., 2., 1.]]) See Also -------- frompyfunc : Takes an arbitrary Python function and returns a ufunc Notes ----- The `vectorize` function is provided primarily for convenience, not for performance. The implementation is essentially a for loop. If `otypes` is not specified, then a call to the function with the first argument will be used to determine the number of outputs. The results of this call will be cached if `cache` is `True` to prevent calling the function twice. However, to implement the cache, the original function must be wrapped which will slow down subsequent calls, so only do this if your function is expensive. The new keyword argument interface and `excluded` argument support further degrades performance. References ---------- .. [1] NumPy Reference, section `Generalized Universal Function API <http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html>`_. """ def __init__(self, pyfunc, otypes=None, doc=None, excluded=None, cache=False, signature=None): self.pyfunc = pyfunc self.cache = cache self.signature = signature self._ufunc = None # Caching to improve default performance if doc is None: self.__doc__ = pyfunc.__doc__ else: self.__doc__ = doc if isinstance(otypes, str): for char in otypes: if char not in typecodes['All']: raise ValueError("Invalid otype specified: %s" % (char,)) elif iterable(otypes): otypes = ''.join([_nx.dtype(x).char for x in otypes]) elif otypes is not None: raise ValueError("Invalid otype specification") self.otypes = otypes # Excluded variable support if excluded is None: excluded = set() self.excluded = set(excluded) if signature is not None: self._in_and_out_core_dims = _parse_gufunc_signature(signature) else: self._in_and_out_core_dims = None def __call__(self, *args, **kwargs): """ Return arrays with the results of `pyfunc` broadcast (vectorized) over `args` and `kwargs` not in `excluded`. """ excluded = self.excluded if not kwargs and not excluded: func = self.pyfunc vargs = args else: # The wrapper accepts only positional arguments: we use `names` and # `inds` to mutate `the_args` and `kwargs` to pass to the original # function. nargs = len(args) names = [_n for _n in kwargs if _n not in excluded] inds = [_i for _i in range(nargs) if _i not in excluded] the_args = list(args) def func(*vargs): for _n, _i in enumerate(inds): the_args[_i] = vargs[_n] kwargs.update(zip(names, vargs[len(inds):])) return self.pyfunc(*the_args, **kwargs) vargs = [args[_i] for _i in inds] vargs.extend([kwargs[_n] for _n in names]) return self._vectorize_call(func=func, args=vargs) def _get_ufunc_and_otypes(self, func, args): """Return (ufunc, otypes).""" # frompyfunc will fail if args is empty if not args: raise ValueError('args can not be empty') if self.otypes is not None: otypes = self.otypes nout = len(otypes) # Note logic here: We only *use* self._ufunc if func is self.pyfunc # even though we set self._ufunc regardless. if func is self.pyfunc and self._ufunc is not None: ufunc = self._ufunc else: ufunc = self._ufunc = frompyfunc(func, len(args), nout) else: # Get number of outputs and output types by calling the function on # the first entries of args. We also cache the result to prevent # the subsequent call when the ufunc is evaluated. # Assumes that ufunc first evaluates the 0th elements in the input # arrays (the input values are not checked to ensure this) args = [asarray(arg) for arg in args] if builtins.any(arg.size == 0 for arg in args): raise ValueError('cannot call `vectorize` on size 0 inputs ' 'unless `otypes` is set') inputs = [arg.flat[0] for arg in args] outputs = func(*inputs) # Performance note: profiling indicates that -- for simple # functions at least -- this wrapping can almost double the # execution time. # Hence we make it optional. if self.cache: _cache = [outputs] def _func(*vargs): if _cache: return _cache.pop() else: return func(*vargs) else: _func = func if isinstance(outputs, tuple): nout = len(outputs) else: nout = 1 outputs = (outputs,) otypes = ''.join([asarray(outputs[_k]).dtype.char for _k in range(nout)]) # Performance note: profiling indicates that creating the ufunc is # not a significant cost compared with wrapping so it seems not # worth trying to cache this. ufunc = frompyfunc(_func, len(args), nout) return ufunc, otypes def _vectorize_call(self, func, args): """Vectorized call to `func` over positional `args`.""" if self.signature is not None: res = self._vectorize_call_with_signature(func, args) elif not args: res = func() else: ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) # Convert args to object arrays first inputs = [array(a, copy=False, subok=True, dtype=object) for a in args] outputs = ufunc(*inputs) if ufunc.nout == 1: res = array(outputs, copy=False, subok=True, dtype=otypes[0]) else: res = tuple([array(x, copy=False, subok=True, dtype=t) for x, t in zip(outputs, otypes)]) return res def _vectorize_call_with_signature(self, func, args): """Vectorized call over positional arguments with a signature.""" input_core_dims, output_core_dims = self._in_and_out_core_dims if len(args) != len(input_core_dims): raise TypeError('wrong number of positional arguments: ' 'expected %r, got %r' % (len(input_core_dims), len(args))) args = tuple(asanyarray(arg) for arg in args) broadcast_shape, dim_sizes = _parse_input_dimensions( args, input_core_dims) input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, input_core_dims) args = [np.broadcast_to(arg, shape, subok=True) for arg, shape in zip(args, input_shapes)] outputs = None otypes = self.otypes nout = len(output_core_dims) for index in np.ndindex(*broadcast_shape): results = func(*(arg[index] for arg in args)) n_results = len(results) if isinstance(results, tuple) else 1 if nout != n_results: raise ValueError( 'wrong number of outputs from pyfunc: expected %r, got %r' % (nout, n_results)) if nout == 1: results = (results,) if outputs is None: for result, core_dims in zip(results, output_core_dims): _update_dim_sizes(dim_sizes, result, core_dims) if otypes is None: otypes = [asarray(result).dtype for result in results] outputs = _create_arrays(broadcast_shape, dim_sizes, output_core_dims, otypes) for output, result in zip(outputs, results): output[index] = result if outputs is None: # did not call the function even once if otypes is None: raise ValueError('cannot call `vectorize` on size 0 inputs ' 'unless `otypes` is set') if builtins.any(dim not in dim_sizes for dims in output_core_dims for dim in dims): raise ValueError('cannot call `vectorize` with a signature ' 'including new output dimensions on size 0 ' 'inputs') outputs = _create_arrays(broadcast_shape, dim_sizes, output_core_dims, otypes) return outputs[0] if nout == 1 else outputs def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None): """ Estimate a covariance matrix, given data and weights. Covariance indicates the level to which two variables vary together. If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, then the covariance matrix element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance of :math:`x_i`. See the notes for an outline of the algorithm. Parameters ---------- m : array_like A 1-D or 2-D array containing multiple variables and observations. Each row of `m` represents a variable, and each column a single observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same form as that of `m`. rowvar : bool, optional If `rowvar` is True (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. bias : bool, optional Default normalization (False) is by ``(N - 1)``, where ``N`` is the number of observations given (unbiased estimate). If `bias` is True, then normalization is by ``N``. These values can be overridden by using the keyword ``ddof`` in numpy versions >= 1.5. ddof : int, optional If not ``None`` the default value implied by `bias` is overridden. Note that ``ddof=1`` will return the unbiased estimate, even if both `fweights` and `aweights` are specified, and ``ddof=0`` will return the simple average. See the notes for the details. The default value is ``None``. .. versionadded:: 1.5 fweights : array_like, int, optional 1-D array of integer freguency weights; the number of times each observation vector should be repeated. .. versionadded:: 1.10 aweights : array_like, optional 1-D array of observation vector weights. These relative weights are typically large for observations considered "important" and smaller for observations considered less "important". If ``ddof=0`` the array of weights can be used to assign probabilities to observation vectors. .. versionadded:: 1.10 Returns ------- out : ndarray The covariance matrix of the variables. See Also -------- corrcoef : Normalized covariance matrix Notes ----- Assume that the observations are in the columns of the observation array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The steps to compute the weighted covariance are as follows:: >>> w = f * a >>> v1 = np.sum(w) >>> v2 = np.sum(w * a) >>> m -= np.sum(m * w, axis=1, keepdims=True) / v1 >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) Note that when ``a == 1``, the normalization factor ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` as it should. Examples -------- Consider two variables, :math:`x_0` and :math:`x_1`, which correlate perfectly, but in opposite directions: >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T >>> x array([[0, 1, 2], [2, 1, 0]]) Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance matrix shows this clearly: >>> np.cov(x) array([[ 1., -1.], [-1., 1.]]) Note that element :math:`C_{0,1}`, which shows the correlation between :math:`x_0` and :math:`x_1`, is negative. Further, note how `x` and `y` are combined: >>> x = [-2.1, -1, 4.3] >>> y = [3, 1.1, 0.12] >>> X = np.vstack((x,y)) >>> print(np.cov(X)) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] >>> print(np.cov(x, y)) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] >>> print(np.cov(x)) 11.71 """ # Check inputs if ddof is not None and ddof != int(ddof): raise ValueError( "ddof must be integer") # Handles complex arrays too m = np.asarray(m) if m.ndim > 2: raise ValueError("m has more than 2 dimensions") if y is None: dtype = np.result_type(m, np.float64) else: y = np.asarray(y) if y.ndim > 2: raise ValueError("y has more than 2 dimensions") dtype = np.result_type(m, y, np.float64) X = array(m, ndmin=2, dtype=dtype) if not rowvar and X.shape[0] != 1: X = X.T if X.shape[0] == 0: return np.array([]).reshape(0, 0) if y is not None: y = array(y, copy=False, ndmin=2, dtype=dtype) if not rowvar and y.shape[0] != 1: y = y.T X = np.vstack((X, y)) if ddof is None: if bias == 0: ddof = 1 else: ddof = 0 # Get the product of frequencies and weights w = None if fweights is not None: fweights = np.asarray(fweights, dtype=np.float) if not np.all(fweights == np.around(fweights)): raise TypeError( "fweights must be integer") if fweights.ndim > 1: raise RuntimeError( "cannot handle multidimensional fweights") if fweights.shape[0] != X.shape[1]: raise RuntimeError( "incompatible numbers of samples and fweights") if any(fweights < 0): raise ValueError( "fweights cannot be negative") w = fweights if aweights is not None: aweights = np.asarray(aweights, dtype=np.float) if aweights.ndim > 1: raise RuntimeError( "cannot handle multidimensional aweights") if aweights.shape[0] != X.shape[1]: raise RuntimeError( "incompatible numbers of samples and aweights") if any(aweights < 0): raise ValueError( "aweights cannot be negative") if w is None: w = aweights else: w *= aweights avg, w_sum = average(X, axis=1, weights=w, returned=True) w_sum = w_sum[0] # Determine the normalization if w is None: fact = X.shape[1] - ddof elif ddof == 0: fact = w_sum elif aweights is None: fact = w_sum - ddof else: fact = w_sum - ddof*sum(w*aweights)/w_sum if fact <= 0: warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, stacklevel=2) fact = 0.0 X -= avg[:, None] if w is None: X_T = X.T else: X_T = (X*w).T c = dot(X, X_T.conj()) c *= 1. / np.float64(fact) return c.squeeze() def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue): """ Return Pearson product-moment correlation coefficients. Please refer to the documentation for `cov` for more detail. The relationship between the correlation coefficient matrix, `R`, and the covariance matrix, `C`, is .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } } The values of `R` are between -1 and 1, inclusive. Parameters ---------- x : array_like A 1-D or 2-D array containing multiple variables and observations. Each row of `x` represents a variable, and each column a single observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same shape as `x`. rowvar : bool, optional If `rowvar` is True (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. bias : _NoValue, optional Has no effect, do not use. .. deprecated:: 1.10.0 ddof : _NoValue, optional Has no effect, do not use. .. deprecated:: 1.10.0 Returns ------- R : ndarray The correlation coefficient matrix of the variables. See Also -------- cov : Covariance matrix Notes ----- Due to floating point rounding the resulting array may not be Hermitian, the diagonal elements may not be 1, and the elements may not satisfy the inequality abs(a) <= 1. The real and imaginary parts are clipped to the interval [-1, 1] in an attempt to improve on that situation but is not much help in the complex case. This function accepts but discards arguments `bias` and `ddof`. This is for backwards compatibility with previous versions of this function. These arguments had no effect on the return values of the function and can be safely ignored in this and previous versions of numpy. """ if bias is not np._NoValue or ddof is not np._NoValue: # 2015-03-15, 1.10 warnings.warn('bias and ddof have no effect and are deprecated', DeprecationWarning, stacklevel=2) c = cov(x, y, rowvar) try: d = diag(c) except ValueError: # scalar covariance # nan if incorrect value (nan, inf, 0), 1 otherwise return c / c stddev = sqrt(d.real) c /= stddev[:, None] c /= stddev[None, :] # Clip real and imaginary parts to [-1, 1]. This does not guarantee # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without # excessive work. np.clip(c.real, -1, 1, out=c.real) if np.iscomplexobj(c): np.clip(c.imag, -1, 1, out=c.imag) return c def blackman(M): """ Return the Blackman window. The Blackman window is a taper formed by using the first three terms of a summation of cosines. It was designed to have close to the minimal leakage possible. It is close to optimal, only slightly worse than a Kaiser window. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, hamming, hanning, kaiser Notes ----- The Blackman window is defined as .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) Most references to the Blackman window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. It is known as a "near optimal" tapering function, almost as good (by some measures) as the kaiser window. References ---------- Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. Examples -------- >>> np.blackman(12) array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01, 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.blackman(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Blackman window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Blackman window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1)) def bartlett(M): """ Return the Bartlett window. The Bartlett window is very similar to a triangular window, except that the end points are at zero. It is often used in signal processing for tapering a signal, without generating too much ripple in the frequency domain. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : array The triangular window, with the maximum value normalized to one (the value one appears only if the number of samples is odd), with the first and last samples equal to zero. See Also -------- blackman, hamming, hanning, kaiser Notes ----- The Bartlett window is defined as .. math:: w(n) = \\frac{2}{M-1} \\left( \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| \\right) Most references to the Bartlett window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. Note that convolution with this window produces linear interpolation. It is also known as an apodization (which means"removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. The fourier transform of the Bartlett is the product of two sinc functions. Note the excellent discussion in Kanasewich. References ---------- .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", Biometrika 37, 1-16, 1950. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal Processing", Prentice-Hall, 1999, pp. 468-471. .. [4] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 429. Examples -------- >>> np.bartlett(12) array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, 0.18181818, 0. ]) Plot the window and its frequency response (requires SciPy and matplotlib): >>> from numpy.fft import fft, fftshift >>> window = np.bartlett(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Bartlett window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Bartlett window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1)) def hanning(M): """ Return the Hanning window. The Hanning window is a taper formed by using a weighted cosine. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray, shape(M,) The window, with the maximum value normalized to one (the value one appears only if `M` is odd). See Also -------- bartlett, blackman, hamming, kaiser Notes ----- The Hanning window is defined as .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 The Hanning was named for Julius von Hann, an Austrian meteorologist. It is also known as the Cosine Bell. Some authors prefer that it be called a Hann window, to help avoid confusion with the very similar Hamming window. Most references to the Hanning window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 106-108. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- >>> np.hanning(12) array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, 0.07937323, 0. ]) Plot the window and its frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.hanning(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hann window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of the Hann window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.5 - 0.5*cos(2.0*pi*n/(M-1)) def hamming(M): """ Return the Hamming window. The Hamming window is a taper formed by using a weighted cosine. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, blackman, hanning, kaiser Notes ----- The Hamming window is defined as .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and is described in Blackman and Tukey. It was recommended for smoothing the truncated autocovariance function in the time domain. Most references to the Hamming window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- >>> np.hamming(12) array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, 0.15302337, 0.08 ]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.hamming(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hamming window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Hamming window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.54 - 0.46*cos(2.0*pi*n/(M-1)) ## Code from cephes for i0 _i0A = [ -4.41534164647933937950E-18, 3.33079451882223809783E-17, -2.43127984654795469359E-16, 1.71539128555513303061E-15, -1.16853328779934516808E-14, 7.67618549860493561688E-14, -4.85644678311192946090E-13, 2.95505266312963983461E-12, -1.72682629144155570723E-11, 9.67580903537323691224E-11, -5.18979560163526290666E-10, 2.65982372468238665035E-9, -1.30002500998624804212E-8, 6.04699502254191894932E-8, -2.67079385394061173391E-7, 1.11738753912010371815E-6, -4.41673835845875056359E-6, 1.64484480707288970893E-5, -5.75419501008210370398E-5, 1.88502885095841655729E-4, -5.76375574538582365885E-4, 1.63947561694133579842E-3, -4.32430999505057594430E-3, 1.05464603945949983183E-2, -2.37374148058994688156E-2, 4.93052842396707084878E-2, -9.49010970480476444210E-2, 1.71620901522208775349E-1, -3.04682672343198398683E-1, 6.76795274409476084995E-1 ] _i0B = [ -7.23318048787475395456E-18, -4.83050448594418207126E-18, 4.46562142029675999901E-17, 3.46122286769746109310E-17, -2.82762398051658348494E-16, -3.42548561967721913462E-16, 1.77256013305652638360E-15, 3.81168066935262242075E-15, -9.55484669882830764870E-15, -4.15056934728722208663E-14, 1.54008621752140982691E-14, 3.85277838274214270114E-13, 7.18012445138366623367E-13, -1.79417853150680611778E-12, -1.32158118404477131188E-11, -3.14991652796324136454E-11, 1.18891471078464383424E-11, 4.94060238822496958910E-10, 3.39623202570838634515E-9, 2.26666899049817806459E-8, 2.04891858946906374183E-7, 2.89137052083475648297E-6, 6.88975834691682398426E-5, 3.36911647825569408990E-3, 8.04490411014108831608E-1 ] def _chbevl(x, vals): b0 = vals[0] b1 = 0.0 for i in range(1, len(vals)): b2 = b1 b1 = b0 b0 = x*b1 - b2 + vals[i] return 0.5*(b0 - b2) def _i0_1(x): return exp(x) * _chbevl(x/2.0-2, _i0A) def _i0_2(x): return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) def i0(x): """ Modified Bessel function of the first kind, order 0. Usually denoted :math:`I_0`. This function does broadcast, but will *not* "up-cast" int dtype arguments unless accompanied by at least one float or complex dtype argument (see Raises below). Parameters ---------- x : array_like, dtype float or complex Argument of the Bessel function. Returns ------- out : ndarray, shape = x.shape, dtype = x.dtype The modified Bessel function evaluated at each of the elements of `x`. Raises ------ TypeError: array cannot be safely cast to required type If argument consists exclusively of int dtypes. See Also -------- scipy.special.iv, scipy.special.ive Notes ----- We use the algorithm published by Clenshaw [1]_ and referenced by Abramowitz and Stegun [2]_, for which the function domain is partitioned into the two intervals [0,8] and (8,inf), and Chebyshev polynomial expansions are employed in each interval. Relative error on the domain [0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). References ---------- .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in *National Physical Laboratory Mathematical Tables*, vol. 5, London: Her Majesty's Stationery Office, 1962. .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical Functions*, 10th printing, New York: Dover, 1964, pp. 379. http://www.math.sfu.ca/~cbm/aands/page_379.htm .. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html Examples -------- >>> np.i0([0.]) array(1.0) >>> np.i0([0., 1. + 2j]) array([ 1.00000000+0.j , 0.18785373+0.64616944j]) """ x = atleast_1d(x).copy() y = empty_like(x) ind = (x < 0) x[ind] = -x[ind] ind = (x <= 8.0) y[ind] = _i0_1(x[ind]) ind2 = ~ind y[ind2] = _i0_2(x[ind2]) return y.squeeze() ## End of cephes code for i0 def kaiser(M, beta): """ Return the Kaiser window. The Kaiser window is a taper formed by using a Bessel function. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. beta : float Shape parameter for window. Returns ------- out : array The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, blackman, hamming, hanning Notes ----- The Kaiser window is defined as .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} \\right)/I_0(\\beta) with .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, where :math:`I_0` is the modified zeroth-order Bessel function. The Kaiser was named for Jim Kaiser, who discovered a simple approximation to the DPSS window based on Bessel functions. The Kaiser window is a very good approximation to the Digital Prolate Spheroidal Sequence, or Slepian window, which is the transform which maximizes the energy in the main lobe of the window relative to total energy. The Kaiser can approximate many other windows by varying the beta parameter. ==== ======================= beta Window shape ==== ======================= 0 Rectangular 5 Similar to a Hamming 6 Similar to a Hanning 8.6 Similar to a Blackman ==== ======================= A beta value of 14 is probably a good starting point. Note that as beta gets large, the window narrows, and so the number of samples needs to be large enough to sample the increasingly narrow spike, otherwise NaNs will get returned. Most references to the Kaiser window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. John Wiley and Sons, New York, (1966). .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 177-178. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function Examples -------- >>> np.kaiser(12, 14) array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02, 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.kaiser(51, 14) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Kaiser window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Kaiser window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ from numpy.dual import i0 if M == 1: return np.array([1.]) n = arange(0, M) alpha = (M-1)/2.0 return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) def sinc(x): """ Return the sinc function. The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`. Parameters ---------- x : ndarray Array (possibly multi-dimensional) of values for which to to calculate ``sinc(x)``. Returns ------- out : ndarray ``sinc(x)``, which has the same shape as the input. Notes ----- ``sinc(0)`` is the limit value 1. The name sinc is short for "sine cardinal" or "sinus cardinalis". The sinc function is used in various signal processing applications, including in anti-aliasing, in the construction of a Lanczos resampling filter, and in interpolation. For bandlimited interpolation of discrete-time signals, the ideal interpolation kernel is proportional to the sinc function. References ---------- .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/SincFunction.html .. [2] Wikipedia, "Sinc function", http://en.wikipedia.org/wiki/Sinc_function Examples -------- >>> x = np.linspace(-4, 4, 41) >>> np.sinc(x) array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02, -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, -4.92362781e-02, -3.89804309e-17]) >>> plt.plot(x, np.sinc(x)) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Sinc Function") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("X") <matplotlib.text.Text object at 0x...> >>> plt.show() It works in 2-D as well: >>> x = np.linspace(-4, 4, 401) >>> xx = np.outer(x, x) >>> plt.imshow(np.sinc(xx)) <matplotlib.image.AxesImage object at 0x...> """ x = np.asanyarray(x) y = pi * where(x == 0, 1.0e-20, x) return sin(y)/y def msort(a): """ Return a copy of an array sorted along the first axis. Parameters ---------- a : array_like Array to be sorted. Returns ------- sorted_array : ndarray Array of the same type and shape as `a`. See Also -------- sort Notes ----- ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. """ b = array(a, subok=True, copy=True) b.sort(0) return b def _ureduce(a, func, **kwargs): """ Internal Function. Call `func` with `a` as first argument swapping the axes to use extended axis on functions that don't support it natively. Returns result and a.shape with axis dims set to 1. Parameters ---------- a : array_like Input array or object that can be converted to an array. func : callable Reduction function capable of receiving a single axis argument. It is is called with `a` as first argument followed by `kwargs`. kwargs : keyword arguments additional keyword arguments to pass to `func`. Returns ------- result : tuple Result of func(a, **kwargs) and a.shape with axis dims set to 1 which can be used to reshape the result to the same shape a ufunc with keepdims=True would produce. """ a = np.asanyarray(a) axis = kwargs.get('axis', None) if axis is not None: keepdim = list(a.shape) nd = a.ndim axis = _nx.normalize_axis_tuple(axis, nd) for ax in axis: keepdim[ax] = 1 if len(axis) == 1: kwargs['axis'] = axis[0] else: keep = set(range(nd)) - set(axis) nkeep = len(keep) # swap axis that should not be reduced to front for i, s in enumerate(sorted(keep)): a = a.swapaxes(i, s) # merge reduced axis a = a.reshape(a.shape[:nkeep] + (-1,)) kwargs['axis'] = -1 else: keepdim = [1] * a.ndim r = func(a, **kwargs) return r, keepdim def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): """ Compute the median along the specified axis. Returns the median of the array elements. Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : {int, sequence of int, None}, optional Axis or axes along which the medians are computed. The default is to compute the median along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array `a` for calculations. The input array will be modified by the call to `median`. This will save memory when you do not need to preserve the contents of the input array. Treat the input as undefined, but it will probably be fully or partially sorted. Default is False. If `overwrite_input` is ``True`` and `a` is not already an `ndarray`, an error will be raised. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. .. versionadded:: 1.9.0 Returns ------- median : ndarray A new array holding the result. If the input contains integers or floats smaller than ``float64``, then the output data-type is ``np.float64``. Otherwise, the data-type of the output is the same as that of the input. If `out` is specified, that array is returned instead. See Also -------- mean, percentile Notes ----- Given a vector ``V`` of length ``N``, the median of ``V`` is the middle value of a sorted copy of ``V``, ``V_sorted`` - i e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the two middle values of ``V_sorted`` when ``N`` is even. Examples -------- >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], [ 3, 2, 1]]) >>> np.median(a) 3.5 >>> np.median(a, axis=0) array([ 6.5, 4.5, 2.5]) >>> np.median(a, axis=1) array([ 7., 2.]) >>> m = np.median(a, axis=0) >>> out = np.zeros_like(m) >>> np.median(a, axis=0, out=m) array([ 6.5, 4.5, 2.5]) >>> m array([ 6.5, 4.5, 2.5]) >>> b = a.copy() >>> np.median(b, axis=1, overwrite_input=True) array([ 7., 2.]) >>> assert not np.all(a==b) >>> b = a.copy() >>> np.median(b, axis=None, overwrite_input=True) 3.5 >>> assert not np.all(a==b) """ r, k = _ureduce(a, func=_median, axis=axis, out=out, overwrite_input=overwrite_input) if keepdims: return r.reshape(k) else: return r def _median(a, axis=None, out=None, overwrite_input=False): # can't be reasonably be implemented in terms of percentile as we have to # call mean to not break astropy a = np.asanyarray(a) # Set the partition indexes if axis is None: sz = a.size else: sz = a.shape[axis] if sz % 2 == 0: szh = sz // 2 kth = [szh - 1, szh] else: kth = [(sz - 1) // 2] # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): kth.append(-1) if overwrite_input: if axis is None: part = a.ravel() part.partition(kth) else: a.partition(kth, axis=axis) part = a else: part = partition(a, kth, axis=axis) if part.shape == (): # make 0-D arrays work return part.item() if axis is None: axis = 0 indexer = [slice(None)] * part.ndim index = part.shape[axis] // 2 if part.shape[axis] % 2 == 1: # index with slice to allow mean (below) to work indexer[axis] = slice(index, index+1) else: indexer[axis] = slice(index-1, index+1) # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact) and sz > 0: # warn and return nans like mean would rout = mean(part[indexer], axis=axis, out=out) return np.lib.utils._median_nancheck(part, rout, axis, out) else: # if there are no nans # Use mean in odd and even case to coerce data type # and check, use out array. return mean(part[indexer], axis=axis, out=out) def percentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): """ Compute the qth percentile of the data along the specified axis. Returns the qth percentile(s) of the array elements. Parameters ---------- a : array_like Input array or object that can be converted to an array. q : float in range of [0,100] (or sequence of floats) Percentile to compute, which must be between 0 and 100 inclusive. axis : {int, sequence of int, None}, optional Axis or axes along which the percentiles are computed. The default is to compute the percentile(s) along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array `a` calculations. The input array will be modified by the call to `percentile`. This will save memory when you do not need to preserve the contents of the input array. In this case you should not make any assumptions about the contents of the input `a` after this function completes -- treat it as undefined. Default is False. If `a` is not already an array, this parameter will have no effect as `a` will be converted to an array internally regardless of the value of this parameter. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: * linear: ``i + (j - i) * fraction``, where ``fraction`` is the fractional part of the index surrounded by ``i`` and ``j``. * lower: ``i``. * higher: ``j``. * nearest: ``i`` or ``j``, whichever is nearest. * midpoint: ``(i + j) / 2``. .. versionadded:: 1.9.0 keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array `a`. .. versionadded:: 1.9.0 Returns ------- percentile : scalar or ndarray If `q` is a single percentile and `axis=None`, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the percentiles. The other axes are the axes that remain after the reduction of `a`. If the input contains integers or floats smaller than ``float64``, the output data-type is ``float64``. Otherwise, the output data-type is the same as that of the input. If `out` is specified, that array is returned instead. See Also -------- mean, median, nanpercentile Notes ----- Given a vector ``V`` of length ``N``, the ``q``-th percentile of ``V`` is the value ``q/100`` of the way from the minimum to the maximum in a sorted copy of ``V``. The values and distances of the two nearest neighbors as well as the `interpolation` parameter will determine the percentile if the normalized ranking does not match the location of ``q`` exactly. This function is the same as the median if ``q=50``, the same as the minimum if ``q=0`` and the same as the maximum if ``q=100``. Examples -------- >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], [ 3, 2, 1]]) >>> np.percentile(a, 50) 3.5 >>> np.percentile(a, 50, axis=0) array([[ 6.5, 4.5, 2.5]]) >>> np.percentile(a, 50, axis=1) array([ 7., 2.]) >>> np.percentile(a, 50, axis=1, keepdims=True) array([[ 7.], [ 2.]]) >>> m = np.percentile(a, 50, axis=0) >>> out = np.zeros_like(m) >>> np.percentile(a, 50, axis=0, out=out) array([[ 6.5, 4.5, 2.5]]) >>> m array([[ 6.5, 4.5, 2.5]]) >>> b = a.copy() >>> np.percentile(b, 50, axis=1, overwrite_input=True) array([ 7., 2.]) >>> assert not np.all(a == b) """ q = array(q, dtype=np.float64, copy=True) r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out, overwrite_input=overwrite_input, interpolation=interpolation) if keepdims: if q.ndim == 0: return r.reshape(k) else: return r.reshape([len(q)] + k) else: return r def _percentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): a = asarray(a) if q.ndim == 0: # Do not allow 0-d arrays because following code fails for scalar zerod = True q = q[None] else: zerod = False # avoid expensive reductions, relevant for arrays with < O(1000) elements if q.size < 10: for i in range(q.size): if q[i] < 0. or q[i] > 100.: raise ValueError("Percentiles must be in the range [0,100]") q[i] /= 100. else: # faster than any() if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.): raise ValueError("Percentiles must be in the range [0,100]") q /= 100. # prepare a for partioning if overwrite_input: if axis is None: ap = a.ravel() else: ap = a else: if axis is None: ap = a.flatten() else: ap = a.copy() if axis is None: axis = 0 Nx = ap.shape[axis] indices = q * (Nx - 1) # round fractional indices according to interpolation method if interpolation == 'lower': indices = floor(indices).astype(intp) elif interpolation == 'higher': indices = ceil(indices).astype(intp) elif interpolation == 'midpoint': indices = 0.5 * (floor(indices) + ceil(indices)) elif interpolation == 'nearest': indices = around(indices).astype(intp) elif interpolation == 'linear': pass # keep index as fraction and interpolate else: raise ValueError( "interpolation can only be 'linear', 'lower' 'higher', " "'midpoint', or 'nearest'") n = np.array(False, dtype=bool) # check for nan's flag if indices.dtype == intp: # take the points along axis # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices = concatenate((indices, [-1])) ap.partition(indices, axis=axis) # ensure axis with qth is first ap = np.rollaxis(ap, axis, 0) axis = 0 # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices = indices[:-1] n = np.isnan(ap[-1:, ...]) if zerod: indices = indices[0] r = take(ap, indices, axis=axis, out=out) else: # weight the points above and below the indices indices_below = floor(indices).astype(intp) indices_above = indices_below + 1 indices_above[indices_above > Nx - 1] = Nx - 1 # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices_above = concatenate((indices_above, [-1])) weights_above = indices - indices_below weights_below = 1.0 - weights_above weights_shape = [1, ] * ap.ndim weights_shape[axis] = len(indices) weights_below.shape = weights_shape weights_above.shape = weights_shape ap.partition(concatenate((indices_below, indices_above)), axis=axis) # ensure axis with qth is first ap = np.rollaxis(ap, axis, 0) weights_below = np.rollaxis(weights_below, axis, 0) weights_above = np.rollaxis(weights_above, axis, 0) axis = 0 # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices_above = indices_above[:-1] n = np.isnan(ap[-1:, ...]) x1 = take(ap, indices_below, axis=axis) * weights_below x2 = take(ap, indices_above, axis=axis) * weights_above # ensure axis with qth is first x1 = np.rollaxis(x1, axis, 0) x2 = np.rollaxis(x2, axis, 0) if zerod: x1 = x1.squeeze(0) x2 = x2.squeeze(0) if out is not None: r = add(x1, x2, out=out) else: r = add(x1, x2) if np.any(n): warnings.warn("Invalid value encountered in percentile", RuntimeWarning, stacklevel=3) if zerod: if ap.ndim == 1: if out is not None: out[...] = a.dtype.type(np.nan) r = out else: r = a.dtype.type(np.nan) else: r[..., n.squeeze(0)] = a.dtype.type(np.nan) else: if r.ndim == 1: r[:] = a.dtype.type(np.nan) else: r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan) return r def trapz(y, x=None, dx=1.0, axis=-1): """ Integrate along the given axis using the composite trapezoidal rule. Integrate `y` (`x`) along given axis. Parameters ---------- y : array_like Input array to integrate. x : array_like, optional The sample points corresponding to the `y` values. If `x` is None, the sample points are assumed to be evenly spaced `dx` apart. The default is None. dx : scalar, optional The spacing between sample points when `x` is None. The default is 1. axis : int, optional The axis along which to integrate. Returns ------- trapz : float Definite integral as approximated by trapezoidal rule. See Also -------- sum, cumsum Notes ----- Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will be taken from `y` array, by default x-axis distances between points will be 1.0, alternatively they can be provided with `x` array or with `dx` scalar. Return value will be equal to combined area under the red lines. References ---------- .. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule .. [2] Illustration image: http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png Examples -------- >>> np.trapz([1,2,3]) 4.0 >>> np.trapz([1,2,3], x=[4,6,8]) 8.0 >>> np.trapz([1,2,3], dx=2) 8.0 >>> a = np.arange(6).reshape(2, 3) >>> a array([[0, 1, 2], [3, 4, 5]]) >>> np.trapz(a, axis=0) array([ 1.5, 2.5, 3.5]) >>> np.trapz(a, axis=1) array([ 2., 8.]) """ y = asanyarray(y) if x is None: d = dx else: x = asanyarray(x) if x.ndim == 1: d = diff(x) # reshape to correct shape shape = [1]*y.ndim shape[axis] = d.shape[0] d = d.reshape(shape) else: d = diff(x, axis=axis) nd = y.ndim slice1 = [slice(None)]*nd slice2 = [slice(None)]*nd slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) try: ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis) except ValueError: # Operations didn't work, cast to ndarray d = np.asarray(d) y = np.asarray(y) ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis) return ret #always succeed def add_newdoc(place, obj, doc): """ Adds documentation to obj which is in module place. If doc is a string add it to obj as a docstring If doc is a tuple, then the first element is interpreted as an attribute of obj and the second as the docstring (method, docstring) If doc is a list, then each element of the list should be a sequence of length two --> [(method1, docstring1), (method2, docstring2), ...] This routine never raises an error. This routine cannot modify read-only docstrings, as appear in new-style classes or built-in functions. Because this routine never raises an error the caller must check manually that the docstrings were changed. """ try: new = getattr(__import__(place, globals(), {}, [obj]), obj) if isinstance(doc, str): add_docstring(new, doc.strip()) elif isinstance(doc, tuple): add_docstring(getattr(new, doc[0]), doc[1].strip()) elif isinstance(doc, list): for val in doc: add_docstring(getattr(new, val[0]), val[1].strip()) except: pass # Based on scitools meshgrid def meshgrid(*xi, **kwargs): """ Return coordinate matrices from coordinate vectors. Make N-D coordinate arrays for vectorized evaluations of N-D scalar/vector fields over N-D grids, given one-dimensional coordinate arrays x1, x2,..., xn. .. versionchanged:: 1.9 1-D and 0-D cases are allowed. Parameters ---------- x1, x2,..., xn : array_like 1-D arrays representing the coordinates of a grid. indexing : {'xy', 'ij'}, optional Cartesian ('xy', default) or matrix ('ij') indexing of output. See Notes for more details. .. versionadded:: 1.7.0 sparse : bool, optional If True a sparse grid is returned in order to conserve memory. Default is False. .. versionadded:: 1.7.0 copy : bool, optional If False, a view into the original arrays are returned in order to conserve memory. Default is True. Please note that ``sparse=False, copy=False`` will likely return non-contiguous arrays. Furthermore, more than one element of a broadcast array may refer to a single memory location. If you need to write to the arrays, make copies first. .. versionadded:: 1.7.0 Returns ------- X1, X2,..., XN : ndarray For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` , return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij' or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy' with the elements of `xi` repeated to fill the matrix along the first dimension for `x1`, the second for `x2` and so on. Notes ----- This function supports both indexing conventions through the indexing keyword argument. Giving the string 'ij' returns a meshgrid with matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. In the 2-D case with inputs of length M and N, the outputs are of shape (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case with inputs of length M, N and P, outputs are of shape (N, M, P) for 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is illustrated by the following code snippet:: xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij') for i in range(nx): for j in range(ny): # treat xv[i,j], yv[i,j] xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy') for i in range(nx): for j in range(ny): # treat xv[j,i], yv[j,i] In the 1-D and 0-D case, the indexing and sparse keywords have no effect. See Also -------- index_tricks.mgrid : Construct a multi-dimensional "meshgrid" using indexing notation. index_tricks.ogrid : Construct an open multi-dimensional "meshgrid" using indexing notation. Examples -------- >>> nx, ny = (3, 2) >>> x = np.linspace(0, 1, nx) >>> y = np.linspace(0, 1, ny) >>> xv, yv = np.meshgrid(x, y) >>> xv array([[ 0. , 0.5, 1. ], [ 0. , 0.5, 1. ]]) >>> yv array([[ 0., 0., 0.], [ 1., 1., 1.]]) >>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays >>> xv array([[ 0. , 0.5, 1. ]]) >>> yv array([[ 0.], [ 1.]]) `meshgrid` is very useful to evaluate functions on a grid. >>> x = np.arange(-5, 5, 0.1) >>> y = np.arange(-5, 5, 0.1) >>> xx, yy = np.meshgrid(x, y, sparse=True) >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) >>> h = plt.contourf(x,y,z) """ ndim = len(xi) copy_ = kwargs.pop('copy', True) sparse = kwargs.pop('sparse', False) indexing = kwargs.pop('indexing', 'xy') if kwargs: raise TypeError("meshgrid() got an unexpected keyword argument '%s'" % (list(kwargs)[0],)) if indexing not in ['xy', 'ij']: raise ValueError( "Valid values for `indexing` are 'xy' and 'ij'.") s0 = (1,) * ndim output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:]) for i, x in enumerate(xi)] if indexing == 'xy' and ndim > 1: # switch first and second axis output[0].shape = (1, -1) + s0[2:] output[1].shape = (-1, 1) + s0[2:] if not sparse: # Return the full N-D matrix (not only the 1-D vector) output = np.broadcast_arrays(*output, subok=True) if copy_: output = [x.copy() for x in output] return output def delete(arr, obj, axis=None): """ Return a new array with sub-arrays along an axis deleted. For a one dimensional array, this returns those entries not returned by `arr[obj]`. Parameters ---------- arr : array_like Input array. obj : slice, int or array of ints Indicate which sub-arrays to remove. axis : int, optional The axis along which to delete the subarray defined by `obj`. If `axis` is None, `obj` is applied to the flattened array. Returns ------- out : ndarray A copy of `arr` with the elements specified by `obj` removed. Note that `delete` does not occur in-place. If `axis` is None, `out` is a flattened array. See Also -------- insert : Insert elements into an array. append : Append elements at the end of an array. Notes ----- Often it is preferable to use a boolean mask. For example: >>> mask = np.ones(len(arr), dtype=bool) >>> mask[[0,2,4]] = False >>> result = arr[mask,...] Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further use of `mask`. Examples -------- >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) >>> arr array([[ 1, 2, 3, 4], [ 5, 6, 7, 8], [ 9, 10, 11, 12]]) >>> np.delete(arr, 1, 0) array([[ 1, 2, 3, 4], [ 9, 10, 11, 12]]) >>> np.delete(arr, np.s_[::2], 1) array([[ 2, 4], [ 6, 8], [10, 12]]) >>> np.delete(arr, [1,3,5], None) array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) """ wrap = None if type(arr) is not ndarray: try: wrap = arr.__array_wrap__ except AttributeError: pass arr = asarray(arr) ndim = arr.ndim arrorder = 'F' if arr.flags.fnc else 'C' if axis is None: if ndim != 1: arr = arr.ravel() ndim = arr.ndim axis = -1 if ndim == 0: # 2013-09-24, 1.9 warnings.warn( "in the future the special handling of scalars will be removed " "from delete and raise an error", DeprecationWarning, stacklevel=2) if wrap: return wrap(arr) else: return arr.copy(order=arrorder) axis = normalize_axis_index(axis, ndim) slobj = [slice(None)]*ndim N = arr.shape[axis] newshape = list(arr.shape) if isinstance(obj, slice): start, stop, step = obj.indices(N) xr = range(start, stop, step) numtodel = len(xr) if numtodel <= 0: if wrap: return wrap(arr.copy(order=arrorder)) else: return arr.copy(order=arrorder) # Invert if step is negative: if step < 0: step = -step start = xr[-1] stop = xr[0] + 1 newshape[axis] -= numtodel new = empty(newshape, arr.dtype, arrorder) # copy initial chunk if start == 0: pass else: slobj[axis] = slice(None, start) new[slobj] = arr[slobj] # copy end chunck if stop == N: pass else: slobj[axis] = slice(stop-numtodel, None) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(stop, None) new[slobj] = arr[slobj2] # copy middle pieces if step == 1: pass else: # use array indexing. keep = ones(stop-start, dtype=bool) keep[:stop-start:step] = False slobj[axis] = slice(start, stop-numtodel) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(start, stop) arr = arr[slobj2] slobj2[axis] = keep new[slobj] = arr[slobj2] if wrap: return wrap(new) else: return new _obj = obj obj = np.asarray(obj) # After removing the special handling of booleans and out of # bounds values, the conversion to the array can be removed. if obj.dtype == bool: warnings.warn("in the future insert will treat boolean arrays and " "array-likes as boolean index instead of casting it " "to integer", FutureWarning, stacklevel=2) obj = obj.astype(intp) if isinstance(_obj, (int, long, integer)): # optimization for a single value obj = obj.item() if (obj < -N or obj >= N): raise IndexError( "index %i is out of bounds for axis %i with " "size %i" % (obj, axis, N)) if (obj < 0): obj += N newshape[axis] -= 1 new = empty(newshape, arr.dtype, arrorder) slobj[axis] = slice(None, obj) new[slobj] = arr[slobj] slobj[axis] = slice(obj, None) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(obj+1, None) new[slobj] = arr[slobj2] else: if obj.size == 0 and not isinstance(_obj, np.ndarray): obj = obj.astype(intp) if not np.can_cast(obj, intp, 'same_kind'): # obj.size = 1 special case always failed and would just # give superfluous warnings. # 2013-09-24, 1.9 warnings.warn( "using a non-integer array as obj in delete will result in an " "error in the future", DeprecationWarning, stacklevel=2) obj = obj.astype(intp) keep = ones(N, dtype=bool) # Test if there are out of bound indices, this is deprecated inside_bounds = (obj < N) & (obj >= -N) if not inside_bounds.all(): # 2013-09-24, 1.9 warnings.warn( "in the future out of bounds indices will raise an error " "instead of being ignored by `numpy.delete`.", DeprecationWarning, stacklevel=2) obj = obj[inside_bounds] positive_indices = obj >= 0 if not positive_indices.all(): warnings.warn( "in the future negative indices will not be ignored by " "`numpy.delete`.", FutureWarning, stacklevel=2) obj = obj[positive_indices] keep[obj, ] = False slobj[axis] = keep new = arr[slobj] if wrap: return wrap(new) else: return new def insert(arr, obj, values, axis=None): """ Insert values along the given axis before the given indices. Parameters ---------- arr : array_like Input array. obj : int, slice or sequence of ints Object that defines the index or indices before which `values` is inserted. .. versionadded:: 1.8.0 Support for multiple insertions when `obj` is a single scalar or a sequence with one element (similar to calling insert multiple times). values : array_like Values to insert into `arr`. If the type of `values` is different from that of `arr`, `values` is converted to the type of `arr`. `values` should be shaped so that ``arr[...,obj,...] = values`` is legal. axis : int, optional Axis along which to insert `values`. If `axis` is None then `arr` is flattened first. Returns ------- out : ndarray A copy of `arr` with `values` inserted. Note that `insert` does not occur in-place: a new array is returned. If `axis` is None, `out` is a flattened array. See Also -------- append : Append elements at the end of an array. concatenate : Join a sequence of arrays along an existing axis. delete : Delete elements from an array. Notes ----- Note that for higher dimensional inserts `obj=0` behaves very different from `obj=[0]` just like `arr[:,0,:] = values` is different from `arr[:,[0],:] = values`. Examples -------- >>> a = np.array([[1, 1], [2, 2], [3, 3]]) >>> a array([[1, 1], [2, 2], [3, 3]]) >>> np.insert(a, 1, 5) array([1, 5, 1, 2, 2, 3, 3]) >>> np.insert(a, 1, 5, axis=1) array([[1, 5, 1], [2, 5, 2], [3, 5, 3]]) Difference between sequence and scalars: >>> np.insert(a, [1], [[1],[2],[3]], axis=1) array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]) >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), ... np.insert(a, [1], [[1],[2],[3]], axis=1)) True >>> b = a.flatten() >>> b array([1, 1, 2, 2, 3, 3]) >>> np.insert(b, [2, 2], [5, 6]) array([1, 1, 5, 6, 2, 2, 3, 3]) >>> np.insert(b, slice(2, 4), [5, 6]) array([1, 1, 5, 2, 6, 2, 3, 3]) >>> np.insert(b, [2, 2], [7.13, False]) # type casting array([1, 1, 7, 0, 2, 2, 3, 3]) >>> x = np.arange(8).reshape(2, 4) >>> idx = (1, 3) >>> np.insert(x, idx, 999, axis=1) array([[ 0, 999, 1, 2, 999, 3], [ 4, 999, 5, 6, 999, 7]]) """ wrap = None if type(arr) is not ndarray: try: wrap = arr.__array_wrap__ except AttributeError: pass arr = asarray(arr) ndim = arr.ndim arrorder = 'F' if arr.flags.fnc else 'C' if axis is None: if ndim != 1: arr = arr.ravel() ndim = arr.ndim axis = ndim - 1 elif ndim == 0: # 2013-09-24, 1.9 warnings.warn( "in the future the special handling of scalars will be removed " "from insert and raise an error", DeprecationWarning, stacklevel=2) arr = arr.copy(order=arrorder) arr[...] = values if wrap: return wrap(arr) else: return arr else: axis = normalize_axis_index(axis, ndim) slobj = [slice(None)]*ndim N = arr.shape[axis] newshape = list(arr.shape) if isinstance(obj, slice): # turn it into a range object indices = arange(*obj.indices(N), **{'dtype': intp}) else: # need to copy obj, because indices will be changed in-place indices = np.array(obj) if indices.dtype == bool: # See also delete warnings.warn( "in the future insert will treat boolean arrays and " "array-likes as a boolean index instead of casting it to " "integer", FutureWarning, stacklevel=2) indices = indices.astype(intp) # Code after warning period: #if obj.ndim != 1: # raise ValueError('boolean array argument obj to insert ' # 'must be one dimensional') #indices = np.flatnonzero(obj) elif indices.ndim > 1: raise ValueError( "index array argument obj to insert must be one dimensional " "or scalar") if indices.size == 1: index = indices.item() if index < -N or index > N: raise IndexError( "index %i is out of bounds for axis %i with " "size %i" % (obj, axis, N)) if (index < 0): index += N # There are some object array corner cases here, but we cannot avoid # that: values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype) if indices.ndim == 0: # broadcasting is very different here, since a[:,0,:] = ... behaves # very different from a[:,[0],:] = ...! This changes values so that # it works likes the second case. (here a[:,0:1,:]) values = np.rollaxis(values, 0, (axis % values.ndim) + 1) numnew = values.shape[axis] newshape[axis] += numnew new = empty(newshape, arr.dtype, arrorder) slobj[axis] = slice(None, index) new[slobj] = arr[slobj] slobj[axis] = slice(index, index+numnew) new[slobj] = values slobj[axis] = slice(index+numnew, None) slobj2 = [slice(None)] * ndim slobj2[axis] = slice(index, None) new[slobj] = arr[slobj2] if wrap: return wrap(new) return new elif indices.size == 0 and not isinstance(obj, np.ndarray): # Can safely cast the empty list to intp indices = indices.astype(intp) if not np.can_cast(indices, intp, 'same_kind'): # 2013-09-24, 1.9 warnings.warn( "using a non-integer array as obj in insert will result in an " "error in the future", DeprecationWarning, stacklevel=2) indices = indices.astype(intp) indices[indices < 0] += N numnew = len(indices) order = indices.argsort(kind='mergesort') # stable sort indices[order] += np.arange(numnew) newshape[axis] += numnew old_mask = ones(newshape[axis], dtype=bool) old_mask[indices] = False new = empty(newshape, arr.dtype, arrorder) slobj2 = [slice(None)]*ndim slobj[axis] = indices slobj2[axis] = old_mask new[slobj] = values new[slobj2] = arr if wrap: return wrap(new) return new def append(arr, values, axis=None): """ Append values to the end of an array. Parameters ---------- arr : array_like Values are appended to a copy of this array. values : array_like These values are appended to a copy of `arr`. It must be of the correct shape (the same shape as `arr`, excluding `axis`). If `axis` is not specified, `values` can be any shape and will be flattened before use. axis : int, optional The axis along which `values` are appended. If `axis` is not given, both `arr` and `values` are flattened before use. Returns ------- append : ndarray A copy of `arr` with `values` appended to `axis`. Note that `append` does not occur in-place: a new array is allocated and filled. If `axis` is None, `out` is a flattened array. See Also -------- insert : Insert elements into an array. delete : Delete elements from an array. Examples -------- >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) array([1, 2, 3, 4, 5, 6, 7, 8, 9]) When `axis` is specified, `values` must have the correct shape. >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) Traceback (most recent call last): ... ValueError: arrays must have same number of dimensions """ arr = asanyarray(arr) if axis is None: if arr.ndim != 1: arr = arr.ravel() values = ravel(values) axis = arr.ndim-1 return concatenate((arr, values), axis=axis)
32.904112
88
0.573793
4a07817c41c6d5349f6381a0ae823dd662de84e9
28,681
py
Python
utils_hausdorff.py
EIU-GIScience-Center/Polyline_Hausdorff
af8c8f7f138cd4201d60ad6067feebefee74711c
[ "MIT" ]
null
null
null
utils_hausdorff.py
EIU-GIScience-Center/Polyline_Hausdorff
af8c8f7f138cd4201d60ad6067feebefee74711c
[ "MIT" ]
null
null
null
utils_hausdorff.py
EIU-GIScience-Center/Polyline_Hausdorff
af8c8f7f138cd4201d60ad6067feebefee74711c
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Wed May 26 13:29:02 2021 @author: bjkronenfeld """ import math as m import utils_geom as g def componentDistance (dr,k,len_a): """ Calculates the distance from a location on segment a on polyline A to a component of polyline B. Parameters ---------- A : list of (x,y) tuples First polyline. dr : distance representation distance representation of component of polyline B. k : float k-value of location along segment a. a : int ID of segment on polyline A. Returns ------- Float Distance from location k on segment a to dr component. # LJ and MT """ if dr[0] == True: answer = segDistance(dr,k,len_a) else: answer = vertDistance(dr,k,len_a) return answer def component_label(comp): """for debugging""" if comp == None: return "None" else: if comp[0] == True: return"seg {}".format(comp[1]) else: return "vert {}".format(comp[1]) def compValid(n_vert,comp): """ Determines if the input component is valid on the input polyline. Args: polyline : list of tuple The input polyline. comp : (bool, float) The input component Returns: Bool # BK """ if comp[1] < 0: return False if comp[0]: # component is a segment if comp[1] > n_vert-2: return False else: # component is a vertex if comp[1] > n_vert-1: return False return True def distanceRepresentation (A,B,a,bcomp): """ Computes the distance representation of a component of B with respect to segment a Args: A,B - the two polylines a (int) - segment of A bcomp - componet of B Returns: distance representation # LJ and TJ """ # if bcomp is a vertex if bcomp[0] == False: answer = vertDistRep(A, B, a, bcomp[1]) return answer #if bcomp is a segment else: answer = segDistRep(A, B, a, bcomp[1]) return answer def effectiveInterval(A,B,a,bcomp): """ Determines the range along segment a that is closer to the component bcomp than to either adjacent component. Parameters ---------- A : [(x,y),(x,y)...] The main polyline. B : [(x,y),(x,y)...] The other polyline a : Integer Segment on A bcomp : Component component of B Returns ------- tuple of two k-values @author: Folaitan & TJones """ # if statement to determine if bcomp is a segment or vertex if bcomp[0] == True: sint = segEffectiveInterval(A, B, a, bcomp[1]) return sint elif bcomp[0] == False: vint = vertEffectiveInterval(A, B, a, bcomp[1]) return vint def segDistance(dr,k,len_a): """ Computes the distance from dr to k on a segment a. Parameters ---------- dr : (is_seg,sk or None,sin_theta or q-value) dist rep of segament B. k: k-value along segment a. len_a: length of segment a. Returns ---------- float The distance from dr to k on segment a. @author: megshithakur and Farouk """ # check if segments are parallel if dr[1] == None: return dr[2] * len_a else: # normal case # segDistance formula segDist=abs((k-dr[1])*len_a*dr[2]) return segDist def segDistRep (A,B,a,b): """ Constructs the component distance representation for segment b with respect to segment a. Parameters ---------- A : list of (x,y) tuples The first polyline. B : list of (x,y) tuples The second polyline. a : int Index of segment on A b : int Index of segment on B Returns ---------- tuple (distance representation): isSeg : boolean True (always True by definition) k : float k-value of intersection between two segments, or None if segments are parallel sin_theta : float sine of angle between lines through segments, or q-distance between lines if they are parallel. # LJ and TJ """ # initiate list for results dist_rep = [] # Create a boolean statement saying that this is indeed a segment dist_rep.append(True) # run intersection tool to find the point of intersection between two infinite lines x = g.intersection(A[a], A[a+1], B[b], B[b+1]) # if we've found an intersection, determine sin theta if x[0] != None: # If there is an intersection, where is it on the x-axis, and then append it K = g.kvalue(x, A[a], A[a+1]) # convert to a k-value dist_rep.append(K) # Find the angle created by the two lines s_rad = g.angle([A[a],A[a+1]],[B[b],B[b+1]]) # Calculate the sine of the angle s = m.sin(s_rad) #Append the sine of the angle dist_rep.append(s) # Handle case of no intersection # If sin theta is zero, there really isn't an intersection (but this # might not be caught above due to floating point precision errors) if x[0] == None or dist_rep[2] == 0: # reset dist_rep=[] dist_rep.append(True) # Append a no result (no intersection) into the final list dist_rep.append(None) # Find the distance between the two lines q = g.distance_to_line(B[b],A[a],A[a+1]) # normalize by length of a q=q/g.distance(A[a], A[a+1]) # Append the distance dist_rep.append(q) dist_rept = tuple(dist_rep) return dist_rept def withinUnitInterval(a,b): """ Computes the portion of the input interval that is within the interval [0,1] Parameters ---------- interval : (float,float) An effective interval, not necessarily in sequence. Returns ------- The portion of the input interval within [0,1], in sequence from low to high, or (-inf,-inf) if the input interval does not overlap the unit interval """ kmin = min(min(a,b),1) kmax = max(max(a,b),0) if kmin==1 or kmax==0: return (float('-inf'),float('-inf')) else: return (kmin,kmax) def segEffectiveInterval(A,B,a,b, tolerance=0.000001): """ Computes the effective interval of segment b on segment a, that is the interval on a for which the interior of segment b is closer than either endpoint Parameters ---------- A : list of (x,y) tuples The first polyline. B : list of (x,y) tuples The second polyline. a : int Index of segment on A. b : int Index of segment on B. tolerance : float If seg b's k-values on seg a are within this tolerance, the segments will be treated as perpendicular Returns: tuple k1 : float First k-value of effective interval. k2 : float Second k-value of effective interval. @author: megshithakur and tannerjones """ a1 = A[a] a2 = A[a+1] b1 = B[b] b2 = B[b+1] # check for perpendicular segments prjout1 = g.project_pt_to_line(b1, a1, a2) prjout2 = g.project_pt_to_line(b2, a1, a2) k1 = g.kvalue(prjout1,a1,a2) k2 = g.kvalue(prjout2,a1,a2) if abs(k1-k2) <= tolerance: # segment is perpendicular, so effective interval is entire segment return [0,1] else: # project out from each b vertex to segment a k1 = g.project_out(a1,a2,b1,b2) k2 = g.project_out(a1,a2,b2,b1) # return them in sequence, bound to range [0,1] return withinUnitInterval(k1,k2) def switchPoint (dr1, dr2): """ Determines point along a where the nearest point on B switches from the component represented by dr1 to the component represented by dr2. Parameters ---------- dr1 : dr1 Distance representation 1 dr2 : dr2 Distance representation 2 Returns ------- [float] list of k-values """ # input: segment, segment - return Seg Seg switch point if dr1[0] == True and dr2[0] == True: segseg = segSegSwitchPoint(dr1, dr2) return segseg # input: vertex, segment - return vert seg switch point elif dr1[0] == False and dr2[0] == True: vertseg = vertSegSwitchPoint(dr1, dr2) return vertseg # input: segment, vertex - return vert seg switch point elif dr1[0] == True and dr2[0] == False: vertseg = vertSegSwitchPoint(dr2, dr1) return vertseg # input: vertex, vertex - return vert vert switch point elif dr1[0] == False and dr2[0] == False: vertvert = vertVertSwitchPoint(dr1, dr2) return vertvert def vertDistance(dr, k, len_a): """ Computes the distance from the vertex represented by dr to the location k on the segment a that the distance representation was constructed from. Parameters ---------- dr : (is_seg, vk,q) distance representation of a vertex of B, being a tuple of three values k : the k-value along segment a len_a : the length of segment a Returns ------- float the distance from point k on segment a to the vertex of B represented by dr @author: Folaitan & @Ljansen """ d = ((k-dr[1])**2) + (dr[2]**2) d = m.sqrt(d) d = len_a * d return d def vertDistRep(A,B,a,b): """ Constructs the Distance Representation for vertex b with respect to segment a. Parameters ---------- A : list of (x,y) tuples The first polyline. B : list of (x,y) tuples The second polyline. a : int Index of segment on A b : int Index of vertex on B Returns ---------- tuple (distance representation): is_seg : bool False(always False by definition). k : float k-value of the location of the perpendicular projection of b onto the line through a. q : float q-distance from b to the line through a. @author: MT and TJ """ # initiate list for results FinalList = [] #Create a boolean statement saying that this is not a segment FinalList.append(False) #run project to line tool to find perpendicular intersection point between point b and segment a p = g.project_pt_to_line(B[b], A[a], A[a+1]) k = g.kvalue(p, A[a], A[a+1]) #append result to results list FinalList.append(k) #run distance tool to find distance between the new point (k) and b q = g.distance(p, B[b]) q = q/g.distance(A[a],A[a+1]) #append the result FinalList.append(q) FinalList = tuple(FinalList) return FinalList def vertEffectiveInterval(A,B,a,b,tolerance=0.000001): """ Computes the effective interval of vertex b on segment a, that is the interval on a for which vertex b is closer than either adjacent segment Parameters ---------- A : list of (x,y) tuples The first polyline. B : list of (x,y) tuples The second polyline. a : int Index of segment on A b : int Index of vertex on B tolerance : float If b segs' k-values on seg a are within this tolerance, the segments will be treated as perpendicular Returns: ---------- tuple of two floats, with (-inf,-inf) representing no effective interval. @author: megshithakur and tannerJones """ # get coordinates a1, a2 = A[a],A[a+1] # handle cases where b is an end of the polyline if b==0 or b==len(B)-1: # get neighbor segment id if b==0: nb = 1 else: nb = len(B)-2 # get coordinates of b vertex and neighbor bc,bnb = B[b],B[nb] # determine projections onto segment a prjc = g.project_pt_to_line(bc,a1,a2) prjnb = g.project_pt_to_line(bnb,a1,a2) # determine k-values kc = g.kvalue(prjc, a1, a2) knb = g.kvalue(prjnb, a1, a2) # check for perpendicularity if abs(kc-knb) <= tolerance: # calculate distances from each B vertex to segment a distc = g.distance(bc,prjc) distnb = g.distance(bnb,prjnb) # is vertex b on segment a? if distc < tolerance: return (0,1) # is the neighboring vertex on segment a? elif distnb < tolerance: return (float('-inf'),float('-inf')) # are the two vertices on opposite sides of segment a? else: # calculate areas areac = g.area([a1,a2,bc,a1]) areanb = g.area([a1,a2,bnb,a1]) # are the two vertices on opposite sides of segment a? if (areac > 0) != (areanb > 0): return (float('-inf'),float('-inf')) # is the center vertex closer to segment b than the neighbor elif distc < distnb: return [0,1] else: return (float('-inf'),float('-inf')) else: # project points out from b to segment a kbout = g.project_out(a1, a2, bc, bnb) # return interval from kbout to end of segment a opposite neighbor if knb < kc: return withinUnitInterval(kbout,1) else: return withinUnitInterval(0,kbout) else: # vertex b has two neighbors # get coordinates of B vertices bp,bc,bn = B[b-1],B[b],B[b+1] # prev, current, next vertices on B # project each vertex of B onto segment A prjp = g.project_pt_to_line(bp,a1,a2) prjc = g.project_pt_to_line(bc,a1,a2) prjn = g.project_pt_to_line(bn,a1,a2) # get k-values of projections of B vertices onto a kp = g.kvalue(prjp, a1, a2) kc = g.kvalue(prjc, a1, a2) kn = g.kvalue(prjn, a1, a2) # check for perpendicular segments prev_perp = abs(kc-kp) <= tolerance next_perp = abs(kn-kp) <= tolerance if prev_perp and next_perp: # both perpendicular # get distances and areas to all vertices dcur = g.distance(bc,prjc) dprev = g.distance(bp,prjp) dnext = g.distance(bp,prjn) areacur = g.area([a1,a2,bc,a1]) areaprev = g.area([a1,a2,bp,a1]) areanext = g.area([a1,a2,bn,a1]) # vertex has effective interval only if it is closest to segment a # and all vertices are on same side of segment a if (areaprev > 0) == (areanext > 0) and (areacur > 0) == (areanext > 0) and dcur < dprev and dcur < dnext: return (0,-1) else: return (float('-inf'),float('-inf')) elif prev_perp or next_perp: # one perpendicular # get coordinates and k-values of vertex on perpendicular segment, other vertex if prev_perp: bperp = bp bother = bn kother = kn prjperp = prjp else: bperp = bn bother = bp kother = kp prjperp = prjn # get distances and areas of each vertex on perpendicular segment dperp = g.distance(bperp,prjperp) dcur = g.distance(bc,prjc) areaperp = g.area([a1,a2,bperp,a1]) areacur = g.area([a1,a2,bc,a1]) # Is perpendicular segment on one side of A and current vertex is closer? if (areaperp > 0) == (areacur > 0) and dcur < dperp: # project out from other segment otherprjoutk = g.project_out(a1,a2,bc,bother) # interval is from end of line opposite other to k-value of other if kother > kc: return (0,otherprjoutk) else: return (otherprjoutk,1) else: return (float('-inf'),float('-inf')) else: # neither perpendicular # get k-values of projections of vertex out from each B segment onto segment a kcpout = g.project_out(a1, a2, bc, bp) kcnout = g.project_out(a1, a2, bc, bn) # check sides of neighboring vertices with respect to b if kp < kc and kn < kc: # both neighbors left of b maxk = max(kcpout,kcnout) return withinUnitInterval(maxk,1) elif kp > kc and kn > kc: # both neighbors right of b mink = min(kcpout,kcnout) return withinUnitInterval(0,mink) else: # nieghbors on either side of b # determine min and max k-values of b based on positions of # previous and next vertices if kp < kc: # previous neighbor left mink = kcpout maxk = kcnout else: # previous neighbor right mink = kcnout maxk = kcpout if mink <= maxk: return withinUnitInterval(mink,maxk) else: return (float('-inf'),float('-inf')) def segSegSwitchPoint(dr1, dr2): """ Determines the k-values of the two locations along segment a that are equidistant to the two segments b1 and b2 represented by dr1 and dr2, i.e. the location at which the nearest component of a “switches” from b1 to b2. Parameters ---------- dr1 : seg_p1 distance representation of a segment on b dr2 : seg_p2 distance representation of another segment on b Returns ------- List of 1 or 2 floats representing k-value(s) of the switch points @author: Folaitan & Ljansen """ k = [] if dr1[1] == None and dr2[1] == None: # both segments parallel to a pass elif dr1[1] == None: # first segment parallel to a k_out_1 = dr2[1] + (dr1[2]/dr2[2]) k_out_2 = dr2[1] - (dr1[2]/dr2[2]) k.append(k_out_1) k.append(k_out_2) elif dr2[1] == None: # second segment parallel to a k_out_1 = dr1[1] + ((dr2[2])/dr1[2]) k_out_2 = dr1[1] - ((dr2[2])/dr1[2]) k.append(k_out_1) k.append(k_out_2) else: # neither segment parallel to a # create more readable variables k1 = dr1[1] k2 = dr2[1] # calculate alpha parameter a = dr2[2]/dr1[2] if a == 1: # first solution is not valid; return second solution k_out_2 = (k1+a*k2)/(1+a) k.append(k_out_2) elif a == -1: # second solution is not valid; return first solution k_out_1 = (k1-a*k2)/(1-a) k.append(k_out_1) else: # return both solutions k_out_1 = (k1-a*k2)/(1-a) k.append(k_out_1) k_out_2 = (k1+a*k2)/(1+a) k.append(k_out_2) # return values in ascending order, for consistency return sorted(k) def vertSegSwitchPoint(vdr, sdr): """ Determines the k-value of the location along segment a that is equidistant to the input vertex and input segment, i.e. the location at which the nearest component “switches” from the first component to the second. Parameters ---------- vdr : ver_rep : (False, k, q) distance representation of a vertex on B sdr : seg_rep : (True, k, sin_theta) or (True, none, q) distance representation of a segment on B Returns ------- [float,float] List containing two floats representing the k-values of the switch points @author: Folaitan & Ljansen """ r_list = [] # get distance representation values into more readable variables k_vert = vdr[1] q_vert = vdr[2] k_seg = sdr[1] sin_theta = sdr[2] # three cases if sin_theta== 1: # b segment is perpendicular to A numerator = ((k_seg**2)-(k_vert**2)-(q_vert**2)) denominator = (2*k_seg) - (2*k_vert) if denominator == 0: # point and line have same k-value, so this is the switch point return [k_seg] vertSegPoint = numerator/denominator r_list.append(vertSegPoint) else: if sdr[1] == None: # b segment is parallel to a q_seg = sin_theta # distance representation value is q not sin_theta a = 1 b = -2*k_vert c = k_vert**2 + q_vert**2 - q_seg**2 else: # normal case a = (sin_theta**2)-1 b = (2*k_vert)-((2*k_seg)*(sin_theta**2)) c = ((k_seg**2)*(sin_theta**2))-(k_vert**2)-(q_vert**2) inside_root = (b**2)-(4*a*c) # catch floating point precision issues: # better to show a switch point when there is none than to miss one if -0.0000000000001 < inside_root < 0: inside_root = 0 # if inside_root is less than zero, quadratic formula has no # solution and there is no switch point to return # if it equals zero, there is one switch point if inside_root >= 0: r_list.append((-b + (m.sqrt(inside_root)))/(2*a)) if inside_root > 0: r_list.append((-b - (m.sqrt(inside_root)))/(2*a)) # let's put these in ascending order, just to be safe return sorted(r_list) def vertVertSwitchPoint (dr1,dr2): """ Determines the k-value of the location along segment a that is equidistant to the two vertices b1 and b2 represented by dr1 and dr2, i.e. the location at which the nearest component of a “switches” from b1 to b2. Parameters ---------- dr1 : tuple, distance representation of a vertex of B dr2 : tuple, distance representation of a different vertex of B Returns : [k] - list containing one float representing the k-value of the switch point # MT and LJ """ #checks to see if k values for dr1 and dr2 are not equal if dr1[1] != dr2[1]: #calculates k value for switch point k = [((dr2[2]**2 - dr1[2]**2) + (dr2[1]**2 - dr1[1]**2)) / (2 * (dr2[1]-dr1[1]))] else: #k is an empty list k = [] return k def candidateComponents(A,B,a): """ Identifies all components of B that could be the target of the Hausdorff distance from segment a on A. Parameters ---------- A : [(x,y),...] list of tuples The coordinates of the main polyline. B : [(x,y),...] list of tuples The coordinates of the other polyline. a : int The index of a segment on polyline A. Returns ---------- [(bool, int)] A list of candidate components on B. """ # for now, simply return a list of all components of B result = [] # get vertex components for i in range(len(B)): result.append((False,i)) for i in range(len(B)-1): result.append((True,i)) return result def nearSegment(A,B,a): """ Identifies the segment of of B nearest to vertex a on A. Parameters ---------- A : [(x,y),...] list of tuples The coordinates of the main polyline. B : [(x,y),...] list of tuples The coordinates of the other polyline. a : int The index of a vertex on polyline A. Returns ---------- int The index of the nearest segment on B. """ # for now, this will be coded with a "brute force" method, checking the # distance from every segment of B to vertex a of A # later this should be updated to use an indexing structure such as # an r-tree for computational efficiency # initialize to first segment min_index = 0 min_d = g.distance_to_segment(A[a], B[0], B[1]) # check other segments for b in range(1,len(B)-1): d = g.distance_to_segment(A[a],B[b],B[b+1]) if d < min_d: min_d = d min_index = b return min_index def nearComponent(A,B,a,b): """ Among segment b, vertex b and vertex b+1, determines which component is nearest to vertex a. Parameters ---------- A : [(x,y),...] list of tuples The coordinates of the main polyline. B : [(x,y),...] list of tuples The coordinates of the other polyline. a : int The index of a vertex on polyline A. b : int The index of a segment on polyline B. Returns ---------- (component, float) The nearest component of B along with its distance from vertex a. """ # # get distances from each component # d_seg = g.distance_to_segment(A[a],B[b],B[b+1]) # d_vert1 = g.distance(A[a],B[b]) # d_vert2 = g.distance(A[a], B[b+1]) # # if distance to d_seg is strictly the minimum, return the segment # if d_seg == min(d_seg,d_vert1,d_vert2): # return ((True,b),d_seg) # elif d_vert1 < d_vert2: # return((False,b),d_vert1) # else: # return ((False,b+1),d_vert2) # calculate k-value of projection of vertex a onto segment b prj = g.project_pt_to_line(A[a], B[b], B[b+1]) k = g.kvalue(prj, B[b], B[b+1]) if k <= 0: # return vertex b d = g.distance(A[a],B[b]) return ((False,b),d) elif k >= 1: # return vertex b+1 d = g.distance(A[a],B[b+1]) return ((False,b+1),d) else: # return segment b d = g.distance(A[a],prj) return ((True,b),d) def nearLoc(srcloc,trgline,trgcomp): """ Finds the location on the target component nearest to the source location. Parameters ---------- srcloc : (float,float) The coordinates of the source location. trgline : [(x,y),...] list of tuples The coordinates of the target polyline. trgcomp : (bool,int) The target component. Returns ---------- (float,float) The nearest location on the target component to the source location. """ if trgcomp[0] == False: # target is a vertex return trgline[trgcomp[0]] else: # target is a segment trgstart = trgline[trgcomp[1]] trgend = trgline[trgcomp[1]+1] srcprj = g.project_pt_to_line(srcloc, trgstart,trgend) k = g.kvalue(srcprj,trgstart,trgend) if k <= 0: return trgstart elif k >=1: return trgend else: x = trgstart[0] + k * (trgend[0]-trgstart[0]) y = trgstart[1] + k * (trgend[1]-trgstart[1]) return (x,y) def checkSegment(c1,c2): """ Determines whether or not it is necessary to further process a segment given that its endpoints have been processed already. Parameters ---------- c1 : component The component of B closest to the first vertex of a segment of A. c2 : component The component of B closest to the second vertex of a segment of A. Returns ---------- (component, float) The nearest component of B along with its distance from vertex a. """ # No need to check a segment if either: # c1 and c2 are the same component, or # c1 and c2 are consective vertices if c1==c2: return False elif c1[0] == False and c2[0] == False and abs(c1[1]-c2[1])==1: return False else: return True
33.782097
119
0.547331
4a0781e6a425de72554cf6eb161a0c6a29d86ad1
840
py
Python
DeeProtein/sense.py
juzb/DeeProtein
487694a24abdb4656499111c8a8904dfcb1d98ab
[ "MIT" ]
12
2019-02-21T14:09:13.000Z
2021-03-05T02:02:21.000Z
DeeProtein/sense.py
juzb/DeeProtein
487694a24abdb4656499111c8a8904dfcb1d98ab
[ "MIT" ]
null
null
null
DeeProtein/sense.py
juzb/DeeProtein
487694a24abdb4656499111c8a8904dfcb1d98ab
[ "MIT" ]
5
2019-05-15T05:37:41.000Z
2021-09-29T12:20:00.000Z
import subprocess while True: #name = input('Please enter four letter name for this run: ') name = "AAAA" sequence = input('Please enter the sequence to analyze: ') gos = input('Please enter the GO terms to analyze sperarated by commas: ') with open('/results/tmp/masked_dataset.txt', 'w') as ofile: ofile.write('{};{};{};{};{};{}'.format(name, 'A', gos, sequence, '.' * len(sequence), '_' * len(sequence))) subprocess.call(['bash', '/code/analyze_sensitivity.sh', gos]) print('Performed sensitivity analysis. ' 'Please find the results in /results\n\n')
40
78
0.458333
4a0782340d892fe8904921f79672f6a1effbb022
8,050
py
Python
frappe/core/doctype/doctype/test_doctype.py
ramen123/frappe
ede92ef61ad640036bdd98bffdf2ea593de0a5ef
[ "MIT" ]
null
null
null
frappe/core/doctype/doctype/test_doctype.py
ramen123/frappe
ede92ef61ad640036bdd98bffdf2ea593de0a5ef
[ "MIT" ]
null
null
null
frappe/core/doctype/doctype/test_doctype.py
ramen123/frappe
ede92ef61ad640036bdd98bffdf2ea593de0a5ef
[ "MIT" ]
1
2021-11-19T18:46:53.000Z
2021-11-19T18:46:53.000Z
# -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # See license.txt from __future__ import unicode_literals import frappe import unittest # test_records = frappe.get_test_records('DocType') class TestDocType(unittest.TestCase): def new_doctype(self, name, unique=0, depends_on=''): return frappe.get_doc({ "doctype": "DocType", "module": "Core", "custom": 1, "fields": [{ "label": "Some Field", "fieldname": "some_fieldname", "fieldtype": "Data", "unique": unique, "depends_on": depends_on, }], "permissions": [{ "role": "System Manager", "read": 1 }], "name": name }) def test_validate_name(self): self.assertRaises(frappe.NameError, self.new_doctype("_Some DocType").insert) self.assertRaises(frappe.NameError, self.new_doctype("8Some DocType").insert) self.assertRaises(frappe.NameError, self.new_doctype("Some (DocType)").insert) for name in ("Some DocType", "Some_DocType"): if frappe.db.exists("DocType", name): frappe.delete_doc("DocType", name) doc = self.new_doctype(name).insert() doc.delete() def test_doctype_unique_constraint_dropped(self): if frappe.db.exists("DocType", "With_Unique"): frappe.delete_doc("DocType", "With_Unique") dt = self.new_doctype("With_Unique", unique=1) dt.insert() doc1 = frappe.new_doc("With_Unique") doc2 = frappe.new_doc("With_Unique") doc1.some_fieldname = "Something" doc1.name = "one" doc2.some_fieldname = "Something" doc2.name = "two" doc1.insert() self.assertRaises(frappe.UniqueValidationError, doc2.insert) dt.fields[0].unique = 0 dt.save() doc2.insert() doc1.delete() doc2.delete() def test_validate_search_fields(self): doc = self.new_doctype("Test Search Fields") doc.search_fields = "some_fieldname" doc.insert() self.assertEqual(doc.name, "Test Search Fields") # check if invalid fieldname is allowed or not doc.search_fields = "some_fieldname_1" self.assertRaises(frappe.ValidationError, doc.save) # check if no value fields are allowed in search fields field = doc.append("fields", {}) field.fieldname = "some_html_field" field.fieldtype = "HTML" field.label = "Some HTML Field" doc.search_fields = "some_fieldname,some_html_field" self.assertRaises(frappe.ValidationError, doc.save) def test_depends_on_fields(self): doc = self.new_doctype("Test Depends On", depends_on="eval:doc.__islocal == 0") doc.insert() # check if the assignment operation is allowed in depends_on field = doc.fields[0] field.depends_on = "eval:doc.__islocal = 0" self.assertRaises(frappe.ValidationError, doc.save) def test_all_depends_on_fields_conditions(self): import re docfields = frappe.get_all("DocField", or_filters={ "ifnull(depends_on, '')": ("!=", ''), "ifnull(collapsible_depends_on, '')": ("!=", '') }, fields=["parent", "depends_on", "collapsible_depends_on", "fieldname", "fieldtype"]) pattern = """[\w\.:_]+\s*={1}\s*[\w\.@'"]+""" for field in docfields: for depends_on in ["depends_on", "collapsible_depends_on"]: condition = field.get(depends_on) if condition: self.assertFalse(re.match(pattern, condition)) def test_sync_field_order(self): from frappe.modules.import_file import get_file_path import os # create test doctype test_doctype = frappe.get_doc({ "doctype": "DocType", "module": "Core", "fields": [ { "label": "Field 1", "fieldname": "field_1", "fieldtype": "Data" }, { "label": "Field 2", "fieldname": "field_2", "fieldtype": "Data" }, { "label": "Field 3", "fieldname": "field_3", "fieldtype": "Data" }, { "label": "Field 4", "fieldname": "field_4", "fieldtype": "Data" } ], "permissions": [{ "role": "System Manager", "read": 1 }], "name": "Test Field Order DocType", "__islocal": 1 }) path = get_file_path(test_doctype.module, test_doctype.doctype, test_doctype.name) initial_fields_order = ['field_1', 'field_2', 'field_3', 'field_4'] frappe.delete_doc_if_exists("DocType", "Test Field Order DocType") if os.path.isfile(path): os.remove(path) try: frappe.flags.allow_doctype_export = 1 test_doctype.save() # assert that field_order list is being created with the default order test_doctype_json = frappe.get_file_json(path) self.assertTrue(test_doctype_json.get("field_order")) self.assertEqual(len(test_doctype_json['fields']), len(test_doctype_json['field_order'])) self.assertListEqual([f['fieldname'] for f in test_doctype_json['fields']], test_doctype_json['field_order']) self.assertListEqual([f['fieldname'] for f in test_doctype_json['fields']], initial_fields_order) self.assertListEqual(test_doctype_json['field_order'], initial_fields_order) # remove field_order to test reload_doc/sync/migrate is backwards compatible without field_order del test_doctype_json['field_order'] with open(path, 'w+') as txtfile: txtfile.write(frappe.as_json(test_doctype_json)) # assert that field_order is actually removed from the json file test_doctype_json = frappe.get_file_json(path) self.assertFalse(test_doctype_json.get("field_order")) # make sure that migrate/sync is backwards compatible without field_order frappe.reload_doctype(test_doctype.name, force=True) test_doctype.reload() # assert that field_order list is being created with the default order again test_doctype.save() test_doctype_json = frappe.get_file_json(path) self.assertTrue(test_doctype_json.get("field_order")) self.assertEqual(len(test_doctype_json['fields']), len(test_doctype_json['field_order'])) self.assertListEqual([f['fieldname'] for f in test_doctype_json['fields']], test_doctype_json['field_order']) self.assertListEqual([f['fieldname'] for f in test_doctype_json['fields']], initial_fields_order) self.assertListEqual(test_doctype_json['field_order'], initial_fields_order) # reorder fields: swap row 1 and 3 test_doctype.fields[0], test_doctype.fields[2] = test_doctype.fields[2], test_doctype.fields[0] for i, f in enumerate(test_doctype.fields): f.idx = i + 1 # assert that reordering fields only affects `field_order` rather than `fields` attr test_doctype.save() test_doctype_json = frappe.get_file_json(path) self.assertListEqual([f['fieldname'] for f in test_doctype_json['fields']], initial_fields_order) self.assertListEqual(test_doctype_json['field_order'], ['field_3', 'field_2', 'field_1', 'field_4']) # reorder `field_order` in the json file: swap row 2 and 4 test_doctype_json['field_order'][1], test_doctype_json['field_order'][3] = test_doctype_json['field_order'][3], test_doctype_json['field_order'][1] with open(path, 'w+') as txtfile: txtfile.write(frappe.as_json(test_doctype_json)) # assert that reordering `field_order` from json file is reflected in DocType upon migrate/sync frappe.reload_doctype(test_doctype.name, force=True) test_doctype.reload() self.assertListEqual([f.fieldname for f in test_doctype.fields], ['field_3', 'field_4', 'field_1', 'field_2']) # insert row in the middle and remove first row (field 3) test_doctype.append("fields", { "label": "Field 5", "fieldname": "field_5", "fieldtype": "Data" }) test_doctype.fields[4], test_doctype.fields[3] = test_doctype.fields[3], test_doctype.fields[4] test_doctype.fields[3], test_doctype.fields[2] = test_doctype.fields[2], test_doctype.fields[3] test_doctype.remove(test_doctype.fields[0]) for i, f in enumerate(test_doctype.fields): f.idx = i + 1 test_doctype.save() test_doctype_json = frappe.get_file_json(path) self.assertListEqual([f['fieldname'] for f in test_doctype_json['fields']], ['field_1', 'field_2', 'field_4', 'field_5']) self.assertListEqual(test_doctype_json['field_order'], ['field_4', 'field_5', 'field_1', 'field_2']) except: raise finally: frappe.flags.allow_doctype_export = 0
35.152838
150
0.70795
4a0782e15f60970af79e4a97e02ab5d733d2ade5
3,434
py
Python
utils/preprocessing.py
HUFS-VLab/tf-SSS-AE
f693f1df2199a15623fb4c717c87dcd39461a6d5
[ "MIT" ]
null
null
null
utils/preprocessing.py
HUFS-VLab/tf-SSS-AE
f693f1df2199a15623fb4c717c87dcd39461a6d5
[ "MIT" ]
null
null
null
utils/preprocessing.py
HUFS-VLab/tf-SSS-AE
f693f1df2199a15623fb4c717c87dcd39461a6d5
[ "MIT" ]
null
null
null
import os import sys import glob import json import librosa import argparse import numpy as np def min_max_scaling(x): """ Args: S: np.array, Spectrogram. Shape=(f, t) Returns: S: np.array, scalied Spectrogram. Shape=(f, t) """ _max = np.max(x) _min = np.min(x) x = (x - _min + 1e-7) / (_max - _min) return x def time_average(S): """ Summation or Average by time Args: S : np.array, Spectrogram. Shape=(n_mfcc, time) or (frame_bins, time) Returns: spect : np.array, spectrum. Shape=(n_mfcc) or (frame_bins) """ spectrum = np.mean(S, axis=-1) return spectrum def preprocess(data_list, args): example = data_list[0] item_name = example['item'] item_type = example['type'] print(f">> target : {item_name}_{item_type}") save_dir_path = os.path.join(args.main_dir, f'seqlen_{args.seq_len}_mels_{args.n_mels}', args.dataset_name) save_path = os.path.join(save_dir_path, item_name) os.makedirs(save_path, exist_ok=True) for data in data_list: # Original wav_name = os.path.basename(data['wav']) wav_path = os.path.join(args.dataset_path, data['wav']+'.wav') sr = data['sr'] wav = librosa.load(wav_path, sr=sr)[0] n_fft = args.n_fft hop_length = args.hop_length S = librosa.feature.melspectrogram(y=wav, sr=sr, n_fft=n_fft,hop_length=hop_length, n_mels=args.n_mels) S_len = S.shape[1] """ Temporal Adaptive Average pooling """ q = int(S_len / args.seq_len) r = S_len % args.seq_len if q != 0: margin = (q + 1) * args.seq_len - S_len padded_S = np.zeros((S.shape[0], S.shape[1]+margin)).astype(np.float32) padded_S[:,:S_len] = S S = padded_S S_len += margin kernel_size = int(S_len / args.seq_len) spectrum_list = [] for i in range(args.seq_len): kernel_start = i * kernel_size kernel_end = kernel_start + kernel_size local_S = S[:,kernel_start:kernel_end] spectrum = time_average(local_S) spectrum_list.append(spectrum) sequence = np.stack(spectrum_list, 0) # Shape = (sequence_length, n_dims) sequence = min_max_scaling(sequence) np.save(f"{save_path}/{wav_name}.npy", sequence) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--main-dir', type=str, default='', help='-') parser.add_argument('--dataset-name', type=str, default='', help='-') parser.add_argument('--dataset-path', type=str, default='', help='-') parser.add_argument('--target-manifest', type=str, default='', help='-') parser.add_argument('--seq-len', type=int, default=32, help='-') parser.add_argument('--n-mels', type=int, default=80, help='-') parser.add_argument('--n-fft', type=int, default=2048, help='-') args, unknown = parser.parse_known_args() args.win_length = args.n_fft args.hop_length = int(args.n_fft / 4) with open(args.target_manifest, 'r') as f: data_list = json.load(f) print(">> Preproecssing..") preprocess(data_list, args) print(">> Done")
30.660714
112
0.576878
4a07836f56c7e56b27499cec911a172c6ee79b50
947
py
Python
tests/filters/test_value_blocklist_check.py
cuspymd/CredSweeper
376e7faff41d8b58f0d9e2a82955ad0929ee8290
[ "MIT" ]
1
2022-03-03T18:11:59.000Z
2022-03-03T18:11:59.000Z
tests/filters/test_value_blocklist_check.py
shadowscatcher/CredSweeper
0387ed76aca4a12154e15c49db8dc0901a014275
[ "MIT" ]
null
null
null
tests/filters/test_value_blocklist_check.py
shadowscatcher/CredSweeper
0387ed76aca4a12154e15c49db8dc0901a014275
[ "MIT" ]
null
null
null
import pytest from credsweeper.filters import ValueBlocklistCheck from tests.test_utils.dummy_line_data import get_line_data class TestValueBlocklistCheck: def test_value_blocklist_p(self, file_path: pytest.fixture, success_line: pytest.fixture) -> None: line_data = get_line_data(file_path, line=success_line, pattern=r"(?P<value>.*$)") assert ValueBlocklistCheck().run(line_data) is False @pytest.mark.parametrize("line", [ "string12", ]) def test_value_blocklist_n(self, file_path: pytest.fixture, line: str) -> None: line_data = get_line_data(file_path, line=line, pattern=r"(?P<value>.*$)") assert ValueBlocklistCheck().run(line_data) is True def test_value_blocklist_none_value_n(self, file_path: pytest.fixture, success_line: pytest.fixture) -> None: line_data = get_line_data(file_path, line=success_line) assert ValueBlocklistCheck().run(line_data) is True
43.045455
113
0.736008
4a0784f207726a72ff8b7cc9d99774694c2b4222
5,303
py
Python
app.py
Saket-Upadhyay/FlagCheckDiscordChal
a73f15bee5bcaa36b610253a06646e955c50b420
[ "MIT" ]
null
null
null
app.py
Saket-Upadhyay/FlagCheckDiscordChal
a73f15bee5bcaa36b610253a06646e955c50b420
[ "MIT" ]
null
null
null
app.py
Saket-Upadhyay/FlagCheckDiscordChal
a73f15bee5bcaa36b610253a06646e955c50b420
[ "MIT" ]
null
null
null
from flask import Flask from flask import request import hashlib as hl app = Flask(__name__) @app.route('/') def ma(): return """ <html> <head> <title> FrigidSec DPC Flag Check</title> <meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="keywords" content="FrigidSec"> <meta name="description" content="FrigidSec Discord Challenge Check"> <style> body {background-color:#ffffff;background-repeat:no-repeat;background-position:top left;background-attachment:fixed;} h1{text-align:center;font-family:Impact, sans-serif;color:#000000;background-color:#ffffff;} p {text-align:center;font-family:Georgia, serif;font-size:14px;font-style:normal;font-weight:bold;color:#000000;background-color:#ffffff;} </style> </head> <h1>FrigidSec DPC Flag Checker API</h1> <center><h2>Enter SHA256 hash of your flag below and click submit.</h2> <br> <form action="/whatisthisbehaviourmona" method="POST"> <input name="check"> <input type="submit"> </form></center> """ @app.route('/whatisthisbehaviourmona',methods=['POST','GET']) def hello_world(): FlagList=[] with open("flaglist.dat",'r') as ff: FlagList=ff.readlines() if request.method == "POST": REQ_DAT=request.values.get("check") print(REQ_DAT) print(FlagList) if str(REQ_DAT) == "" or str(REQ_DAT) == None or len(REQ_DAT) < 10: return """ <!DOCTYPE html> <html> <head> <title> FrigidSec DPC Flag Check</title> <meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="keywords" content="FrigidSec"> <meta name="description" content="FrigidSec Discord Challenge Check"> <style> body {background-color:#ffffff;background-repeat:no-repeat;background-position:top left;background-attachment:fixed;} h1{text-align:center;font-family:Impact, sans-serif;color:#000000;background-color:#ffffff;} p {text-align:center;font-family:Georgia, serif;font-size:14px;font-style:normal;font-weight:bold;color:#000000;background-color:#ffffff;} </style> </head> <body> <h1>FrigidSec DPC Flag Checker API</h1> <br> <h3>Are you sure you provided <a style=\"color:red;\">SHA-256</a> hash ?? Check again mate, it doesn't looks like one.</h3> </body> </html> """ elif str(REQ_DAT) in FlagList or str(str(REQ_DAT)+"\n") in FlagList: return """ <!DOCTYPE html> <html> <head> <title> FrigidSec DPC Flag Check</title> <meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="keywords" content="FrigidSec"> <meta name="description" content="FrigidSec Discord Challenge Check"> <style> body {background-color:#ffffff;background-repeat:no-repeat;background-position:top left;background-attachment:fixed;} h1{text-align:center;font-family:Impact, sans-serif;color:#000000;background-color:#ffffff;} p {text-align:center;font-family:Georgia, serif;font-size:14px;font-style:normal;font-weight:bold;color:#000000;background-color:#ffffff;} </style> </head> <h1>FrigidSec DPC Flag Checker API</h1> <center><h2>You got a <br> <a style=\"color:green;\">VALID</a> <br> flag! <br>Nice Job. Just don't get rusty over time!</h2></center> """ else: return """ <html> <head> <title> FrigidSec DPC Flag Check</title> <meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="keywords" content="FrigidSec"> <meta name="description" content="FrigidSec Discord Challenge Check"> <style> body {background-color:#ffffff;background-repeat:no-repeat;background-position:top left;background-attachment:fixed;} h1{text-align:center;font-family:Impact, sans-serif;color:#000000;background-color:#ffffff;} p {text-align:center;font-family:Georgia, serif;font-size:14px;font-style:normal;font-weight:bold;color:#000000;background-color:#ffffff;} </style> </head> <h1>FrigidSec DPC Flag Checker API</h1> <center><h2>You got a <br><a style=\"color:red;\">INVALID :(</a> <br>flag, but don't give up mate!</h2></center> """ else: return """ <html> <head> <title> FrigidSec DPC Flag Check</title> <meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="keywords" content="FrigidSec"> <meta name="description" content="FrigidSec Discord Challenge Check"> <style> body {background-color:#ffffff;background-repeat:no-repeat;background-position:top left;background-attachment:fixed;} h1{text-align:center;font-family:Impact, sans-serif;color:#000000;background-color:#ffffff;} p {text-align:center;font-family:Georgia, serif;font-size:14px;font-style:normal;font-weight:bold;color:#000000;background-color:#ffffff;} </style> </head> <h1>FrigidSec DPC Flag Checker API</h1> <center> <p>This API checks flag when you give SHA256 dump of your flag in ?check= parameter via POST</p> <p></p> <p>For example: </p> <p>https://frigidsec-dpc-flagcheck.herokuapp.com/whatisthisbehaviourmona?check=e525dd0a29c3b8e9b223d7cc79d1393dd2b8c92ca9761968233d944242939605</p> <br> <h3>But what are you doing here when we have provided a SIMPLE input field <a href="/">AT THIS PLACE</a>? Not everything is a CTF, sometimes it's just simple software. Is that too much to ask?</h3> </center> """ if __name__ == '__main__': app.run("0.0.0.0",8080)
37.878571
197
0.708844
4a07851f0b4b4ea80e7f0e2749d728a55b1131fd
1,248
py
Python
google/devtools/testing/v1/devtools-testing-v1-py/google/devtools/testing_v1/services/test_execution_service/transports/__init__.py
googleapis/googleapis-gen
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
[ "Apache-2.0" ]
7
2021-02-21T10:39:41.000Z
2021-12-07T07:31:28.000Z
google/devtools/testing/v1/devtools-testing-v1-py/google/devtools/testing_v1/services/test_execution_service/transports/__init__.py
googleapis/googleapis-gen
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
[ "Apache-2.0" ]
6
2021-02-02T23:46:11.000Z
2021-11-15T01:46:02.000Z
google/devtools/testing/v1/devtools-testing-v1-py/google/devtools/testing_v1/services/test_execution_service/transports/__init__.py
googleapis/googleapis-gen
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
[ "Apache-2.0" ]
4
2021-01-28T23:25:45.000Z
2021-08-30T01:55:16.000Z
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from typing import Dict, Type from .base import TestExecutionServiceTransport from .grpc import TestExecutionServiceGrpcTransport from .grpc_asyncio import TestExecutionServiceGrpcAsyncIOTransport # Compile a registry of transports. _transport_registry = OrderedDict() # type: Dict[str, Type[TestExecutionServiceTransport]] _transport_registry['grpc'] = TestExecutionServiceGrpcTransport _transport_registry['grpc_asyncio'] = TestExecutionServiceGrpcAsyncIOTransport __all__ = ( 'TestExecutionServiceTransport', 'TestExecutionServiceGrpcTransport', 'TestExecutionServiceGrpcAsyncIOTransport', )
36.705882
91
0.796474
4a07857eadc6db1db6a058fb9d14e9943a9dd7e6
5,873
py
Python
models/ssl.py
martinmanuel9/extreme_verification_latency
16f5ba2b1a37f6d60ed2089d6cab7331e688b0cc
[ "MIT" ]
null
null
null
models/ssl.py
martinmanuel9/extreme_verification_latency
16f5ba2b1a37f6d60ed2089d6cab7331e688b0cc
[ "MIT" ]
null
null
null
models/ssl.py
martinmanuel9/extreme_verification_latency
16f5ba2b1a37f6d60ed2089d6cab7331e688b0cc
[ "MIT" ]
1
2022-02-25T20:37:09.000Z
2022-02-25T20:37:09.000Z
#!/usr/bin/env python """ Application: COMPOSE Framework File name: ssl.py Author: Martin Manuel Lopez Advisor: Dr. Gregory Ditzler Creation: 08/05/2021 COMPOSE Origin: Muhammad Umer and Robi Polikar The University of Arizona Department of Electrical and Computer Engineering College of Engineering """ # MIT License # # Copyright (c) 2021 # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from sys import getsizeof from numpy.lib.type_check import real import pandas as pd class ssl(): """ ssl is a class of semi-supervise learning classifiers that may be used in stationary and non-stationary environments. Depending on the classifier chosen a variety of class balancing techniques are available to reduce SSL problem of assigning all data to one class. """ _verbose = 2 # controls output of screen which plots when possible and renders command line operations # 0 : Suppress all output # 1 : Give text updates to command window # 2 : Plot data when dimensionality allows and give text updates to command window _data =[] # N instances x D dimensions : Features of data with labeled data grouped at top of matrix _labels = [] _classifier = [] # Type of SSL classifier to use _classifierOpts = [] # Options that correspond with SSL Classifier selected - see individual methods for options _balance = [] # Type of class balancing to use _balanceOpts = [] # Options that correspond with Balance Function selected - see individual methods for options n_features=[] # Number of features in data (i.e. dimensionality of data) n_classes=[] # Number of classes different class labels n_instances=[] # Number of instances in data n_labeled=[] # Number of labeled instances in data n_unlabeled=[] # Number of unlabeled instances in data input_label_format=[] # Format of labels passed by user - 'integer' OR 'vector' input_label_ids=[] # Records the class identifiers of the labels passed by user label_format=[] # Current format of label # The cells below contain text strings that match the SSL # classifiers and class balance methods available in this object # If if other classifiers or balancing methods are added to this # class these cells must be modified to include those methods valid_classifier = ['s3vm', 'label_prop','label_spread', 'cluster_n_label', 'cluster_n_label_v2', 'label_prop_bal'] valid_balance = ['none','mass','bid'] #,'reg'} # may need to delete the reg as idk what it means here def set_ssl(self, verbose, *args): """ Sets COMPOSE dataset and information processing options Check if the input parameters are not empty for compose This checks if the dataset is empty and checks what option of feedback you want Gets dataset and verbose (the command to display options as COMPOSE processes) Verbose: 0 : no info is displayed 1 : Command Line progress updates 2 : Plots when possible and Command Line progress updates """ self._verbose = verbose # need to limit arguements to 2 for dataset and verbose max_args = 2 try: len(*args) <= max_args except ValueError: print("Number of input parameters must be a min of two. Input valid dataset and valid option to display information") # set object displayed info setting if self._verbose >= 0 and self._verbose <=2: self._verbose = verbose else: print("Only 3 options to display information: 0 - No Info ; 1 - Command Line Progress Updates; 2 - Plots when possilbe and Command Line Progress") return verbose def set_data(self, data, labels, *args): """ Load data and labels in ssl """ # check to see if the size of the data matches the size of the labels if getsizeof(data) == getsizeof(labels): self._data = data self._labels = labels # Obtain size information of data sizeData = getsizeof(data) # Obtain size info from data df_unlabeled = pd.DataFrame.sum(self.n_unlabeled, axis=1) # sum across each row unlabeled = df_unlabeled['0'].valuecounts() # count the instances that have zero which are the unlabeled self.n_labeled = self.n_instances - self.n_unlabeled # The remaining instances must be labeled
45.527132
158
0.657245
4a0785dcfdfdce586850930ef07c97a418a12d63
9,552
py
Python
nanotune/model/utils.py
microsoft/nanotune
68be8f5b74a52d57b74ccac228e120d9ab48e3e4
[ "MIT" ]
5
2021-02-24T14:32:37.000Z
2022-01-05T16:37:26.000Z
nanotune/model/utils.py
microsoft/nanotune
68be8f5b74a52d57b74ccac228e120d9ab48e3e4
[ "MIT" ]
149
2021-03-23T14:44:39.000Z
2022-03-31T06:09:07.000Z
nanotune/model/utils.py
LaudateCorpus1/nanotune
0ada354597b16f6dbb17ca7be01ab7668b6d5049
[ "MIT" ]
10
2021-03-29T13:36:38.000Z
2022-02-16T23:06:35.000Z
import os from typing import List, Optional import numpy.typing as npt import matplotlib.pyplot as plt import numpy as np import scipy.fftpack as fp import scipy.signal as sg from scipy.ndimage import gaussian_filter, generic_gradient_magnitude, sobel from skimage.transform import resize import nanotune as nt from nanotune.data.dataset import default_coord_names N_2D = nt.config["core"]["standard_shapes"]["2"] def generate_one_f_noise( how_many: int = 20000, save_to_file: bool = True, filename: Optional[str] = None, ) -> npt.NDArray[np.float64]: """ """ fx_1d = fp.frequenciesshift(fp.frequenciesfreq(1000, d=0.02)) condensed_data_all = np.empty( [len(nt.config["core"]["data_types"]) - 1, 0, np.prod(N_2D)] ) for niter in range(how_many): condensed_data = np.empty( [len(nt.config["core"]["data_types"]) - 1, 1, np.prod(N_2D)] ) fx, fy = np.meshgrid(fx_1d, fx_1d, indexing="ij") f = np.sqrt(fx ** 2 + fy ** 2) f[f > 0] = np.divide(1, f[f > 0]) # if low_pass_cutoff is not None: # f[f > low_pass_cutoff] = 0 # if high_pass_cutoff is not None: # f[f < high_pass_cutoff] = 0 exponents = np.random.uniform(low=0, high=2 * np.pi, size=f.shape) power_spect = np.multiply(f, np.exp(1j * exponents)) noise = np.abs(fp.ifrequencies2(power_spect)) noise = (noise - np.min(noise)) / (np.max(noise) - np.min(noise)) grad = generic_gradient_magnitude(noise, sobel) noise = resize(noise, N_2D, anti_aliasing=True, mode="constant").flatten() grad = resize(grad, N_2D, anti_aliasing=True, mode="constant").flatten() power_spect = resize( np.abs(power_spect), N_2D, anti_aliasing=True, mode="constant" ).flatten() index = nt.config["core"]["data_types"]["signal"] condensed_data[index, 0, :] = noise index = nt.config["core"]["data_types"]["frequencies"] condensed_data[index, 0, :] = power_spect index = nt.config["core"]["data_types"]["gradient"] condensed_data[index, 0, :] = grad condensed_data_all = np.concatenate( (condensed_data_all, condensed_data), axis=1 ) if save_to_file: if filename is None: filename = "one_over_f_noise.npy" path = os.path.join(nt.config["db_folder"], filename) np.save(path, condensed_data_all) return condensed_data_all def generate_white_noise( how_many: int = 20000, save_to_file: bool = True, filename: Optional[str] = None, ) -> npt.NDArray[np.float64]: """ """ condensed_data_all = np.empty( [len(nt.config["core"]["data_types"]) - 1, 0, np.prod(N_2D)] ) for niter in range(how_many): condensed_data = np.empty( [len(nt.config["core"]["data_types"]) - 1, 1, np.prod(N_2D)] ) coeff = np.random.normal(0, 1, N_2D) noise = np.abs(fp.ifrequencies2(coeff)) grad = generic_gradient_magnitude(noise, sobel) index = nt.config["core"]["data_types"]["signal"] condensed_data[index, 0, :] = noise.flatten() index = nt.config["core"]["data_types"]["frequencies"] condensed_data[index, 0, :] = coeff.flatten() index = nt.config["core"]["data_types"]["gradient"] condensed_data[index, 0, :] = grad.flatten() condensed_data_all = np.concatenate( (condensed_data_all, condensed_data), axis=1 ) if save_to_file: if filename is None: filename = "white_noise.npy" path = os.path.join(nt.config["db_folder"], filename) np.save(path, condensed_data_all) return condensed_data_all def generate_current_drop( how_many: int = 20000, save_to_file: bool = True, filename: Optional[str] = None, ) -> npt.NDArray[np.float64]: """ """ condensed_data_all = np.empty( [len(nt.config["core"]["data_types"]) - 1, 0, np.prod(N_2D)] ) for niter in range(how_many): condensed_data = np.empty( [len(nt.config["core"]["data_types"]) - 1, 1, np.prod(N_2D)] ) xm, ym = np.meshgrid(np.linspace(0, 50, 50), np.linspace(0, 50, 50)) drop = np.sqrt((xm + ym) ** 2) drop = (drop - np.min(drop)) / (np.max(drop) - np.min(drop)) amp = np.random.uniform(0, 10, 1) offset = np.random.uniform(-5, 5, 1) drop = np.tanh(amp * drop + offset) drop = (drop - np.min(drop)) / (np.max(drop) - np.min(drop)) drop_freq = fp.frequencies2(drop) drop_freq = fp.frequenciesshift(drop_freq) drop_freq = np.abs(drop_freq) grad = generic_gradient_magnitude(drop, sobel) index = nt.config["core"]["data_types"]["signal"] condensed_data[index, 0, :] = drop.flatten() index = nt.config["core"]["data_types"]["frequencies"] condensed_data[index, 0, :] = drop_freq.flatten() index = nt.config["core"]["data_types"]["gradient"] condensed_data[index, 0, :] = grad.flatten() condensed_data_all = np.concatenate( (condensed_data_all, condensed_data), axis=1 ) if save_to_file: if filename is None: filename = "current_drop.npy" path = os.path.join(nt.config["db_folder"], filename) np.save(path, condensed_data_all) return condensed_data_all def generate_random_telegraph_noise( how_many: int = 20000, save_to_file: bool = True, filename: Optional[str] = None, ) -> npt.NDArray[np.float64]: """ """ condensed_data_all = np.empty( [len(nt.config["core"]["data_types"]) - 1, 0, np.prod(N_2D)] ) for niter in range(how_many): condensed_data = np.empty( [len(nt.config["core"]["data_types"]) - 1, 1, np.prod(N_2D)] ) x = np.ones(N_2D) s = 1 # for n_switches in range(0, 1): lam = np.random.uniform(0, 0.2, 1) trnsp = np.random.randint(2, size=1) poisson = np.random.poisson(lam=lam, size=N_2D) poisson[poisson > 1] = 1 for ix in range(N_2D[0]): for iy in range(N_2D[0]): if poisson[ix, iy] == 1: s *= -1 x[ix, iy] *= s if trnsp: x = x.T x = (x + 1) / 2 noise_spect = fp.frequencies2(x) noise_spect = fp.frequenciesshift(noise_spect) noise_spect = np.abs(noise_spect) grad = generic_gradient_magnitude(x, sobel) index = nt.config["core"]["data_types"]["signal"] condensed_data[index, 0, :] = x.flatten() index = nt.config["core"]["data_types"]["frequencies"] condensed_data[index, 0, :] = noise_spect.flatten() index = nt.config["core"]["data_types"]["gradient"] condensed_data[index, 0, :] = grad.flatten() condensed_data_all = np.concatenate( (condensed_data_all, condensed_data), axis=1 ) if save_to_file: if filename is None: filename = "random_telegraph_noise.npy" path = os.path.join(nt.config["db_folder"], filename) np.save(path, condensed_data_all) return condensed_data_all # define normalized 2D gaussian def gauss2d(x=0, y=0, mx=0, my=0, sx=1, sy=1): norm = 1.0 / (2.0 * np.pi * sx * sy) norm = norm * np.exp( -((x - mx) ** 2.0 / (2.0 * sx ** 2.0) + (y - my) ** 2.0 / (2.0 * sy ** 2.0)) ) return norm def generate_random_blobs( how_many: int = 20000, save_to_file: bool = True, filename: Optional[str] = None, n_blobs: int = 15, stdx: Optional[List[float]] = None, stdy: Optional[List[float]] = None, ) -> npt.NDArray[np.float64]: """ """ if stdx is None: stdx = [0.3, 0.8] if stdy is None: stdy = [0.3, 0.8] condensed_data_all = np.empty( [len(nt.config["core"]["data_types"]) - 1, 0, np.prod(N_2D)] ) for niter in range(how_many): condensed_data = np.empty( [len(nt.config["core"]["data_types"]) - 1, 1, np.prod(N_2D)] ) x = np.linspace(-1, 1) y = np.linspace(-1, 1) x, y = np.meshgrid(x, y) z = np.zeros(N_2D) for n_blob in range(n_blobs): z += gauss2d( x, y, mx=np.random.uniform(-1, 1, 1), my=np.random.uniform(-1, 1, 1), sx=np.random.uniform(*stdx, 1), # type: ignore sy=np.random.uniform(*stdy, 1), # type: ignore ) z = (z - np.min(z)) / (np.max(z) - np.min(z)) noise_spect = fp.frequencies2(z) noise_spect = fp.frequenciesshift(noise_spect) noise_spect = np.abs(noise_spect) grad = generic_gradient_magnitude(z, sobel) index = nt.config["core"]["data_types"]["signal"] condensed_data[index, 0, :] = z.flatten() index = nt.config["core"]["data_types"]["frequencies"] condensed_data[index, 0, :] = noise_spect.flatten() index = nt.config["core"]["data_types"]["gradient"] condensed_data[index, 0, :] = grad.flatten() condensed_data_all = np.concatenate( (condensed_data_all, condensed_data), axis=1 ) if save_to_file: if filename is None: filename = "random_blobs.npy" path = os.path.join(nt.config["db_folder"], filename) np.save(path, condensed_data_all) return condensed_data_all
30.912621
84
0.57925
4a0785e2e68cf21e6d83218b634e0c8958251912
853
py
Python
forms.py
amesamoyers/PlanetaryGeologicMappers
a2d0afc1539790462119ea66bb670514fe3b7da5
[ "Unlicense" ]
null
null
null
forms.py
amesamoyers/PlanetaryGeologicMappers
a2d0afc1539790462119ea66bb670514fe3b7da5
[ "Unlicense" ]
null
null
null
forms.py
amesamoyers/PlanetaryGeologicMappers
a2d0afc1539790462119ea66bb670514fe3b7da5
[ "Unlicense" ]
null
null
null
from wtforms import TextField, StringField, Form, PasswordField from wtforms.validators import AnyOf, DataRequired, required from wtforms.widgets import TextArea class PageForm(Form): page_title = StringField(u"Title", [DataRequired(message = "No webpage title given.")], widget = TextArea()) page_name = StringField(u"Name", [DataRequired(message = "No webpage name given.")], widget = TextArea()) page_content = StringField(u"Content", [DataRequired(message = "No webpage content given.")], widget = TextArea()) class LoginForm(Form): admin_name = TextField(u'Admin Name', [required()]) admin_password = PasswordField(u'Admin Password', [required()])
40.619048
85
0.584994
4a07868fed30b57fe608bb5b36db75e2d1a29744
4,526
py
Python
backyard_flyer_solution.py
SagarmathaTech/jad-fcnd-term1-p2-motion-planning
281bfb87ee671094caa5f22861ab41f9884b7ca1
[ "MIT" ]
22
2018-05-31T22:54:15.000Z
2022-03-03T12:57:48.000Z
backyard_flyer_solution.py
SagarmathaTech/jad-fcnd-term1-p2-motion-planning
281bfb87ee671094caa5f22861ab41f9884b7ca1
[ "MIT" ]
3
2018-08-07T10:43:04.000Z
2022-03-10T06:52:27.000Z
backyard_flyer_solution.py
SagarmathaTech/jad-fcnd-term1-p2-motion-planning
281bfb87ee671094caa5f22861ab41f9884b7ca1
[ "MIT" ]
28
2018-03-26T17:19:57.000Z
2022-02-28T04:29:01.000Z
# -*- coding: utf-8 -*- """ Solution to the Backyard Flyer Project. """ import time from enum import Enum import numpy as np from udacidrone import Drone from udacidrone.connection import MavlinkConnection, WebSocketConnection # noqa: F401 from udacidrone.messaging import MsgID class States(Enum): MANUAL = 0 ARMING = 1 TAKEOFF = 2 WAYPOINT = 3 LANDING = 4 DISARMING = 5 class BackyardFlyer(Drone): def __init__(self, connection): super().__init__(connection) self.target_position = np.array([0.0, 0.0, 0.0]) self.all_waypoints = [] self.in_mission = True self.check_state = {} # initial state self.flight_state = States.MANUAL # register all your callbacks here self.register_callback(MsgID.LOCAL_POSITION, self.local_position_callback) self.register_callback(MsgID.LOCAL_VELOCITY, self.velocity_callback) self.register_callback(MsgID.STATE, self.state_callback) def local_position_callback(self): if self.flight_state == States.TAKEOFF: if -1.0 * self.local_position[2] > 0.95 * self.target_position[2]: self.all_waypoints = self.calculate_box() self.waypoint_transition() elif self.flight_state == States.WAYPOINT: if np.linalg.norm(self.target_position[0:2] - self.local_position[0:2]) < 1.0: if len(self.all_waypoints) > 0: self.waypoint_transition() else: if np.linalg.norm(self.local_velocity[0:2]) < 1.0: self.landing_transition() def velocity_callback(self): if self.flight_state == States.LANDING: if self.global_position[2] - self.global_home[2] < 0.1: if abs(self.local_position[2]) < 0.01: self.disarming_transition() def state_callback(self): if self.in_mission: if self.flight_state == States.MANUAL: self.arming_transition() elif self.flight_state == States.ARMING: if self.armed: self.takeoff_transition() elif self.flight_state == States.DISARMING: if ~self.armed & ~self.guided: self.manual_transition() def calculate_box(self): print("Setting Home") local_waypoints = [[10.0, 0.0, 3.0], [10.0, 10.0, 3.0], [0.0, 10.0, 3.0], [0.0, 0.0, 3.0]] return local_waypoints def arming_transition(self): print("arming transition") self.take_control() self.arm() self.set_home_position(self.global_position[0], self.global_position[1], self.global_position[2]) # set the current location to be the home position self.flight_state = States.ARMING def takeoff_transition(self): print("takeoff transition") # self.global_home = np.copy(self.global_position) # can't write to this variable! target_altitude = 3.0 self.target_position[2] = target_altitude self.takeoff(target_altitude) self.flight_state = States.TAKEOFF def waypoint_transition(self): print("waypoint transition") self.target_position = self.all_waypoints.pop(0) print('target position', self.target_position) self.cmd_position(self.target_position[0], self.target_position[1], self.target_position[2], 0.0) self.flight_state = States.WAYPOINT def landing_transition(self): print("landing transition") self.land() self.flight_state = States.LANDING def disarming_transition(self): print("disarm transition") self.disarm() self.release_control() self.flight_state = States.DISARMING def manual_transition(self): print("manual transition") self.stop() self.in_mission = False self.flight_state = States.MANUAL def start(self): self.start_log("Logs", "NavLog.txt") # self.connect() print("starting connection") # self.connection.start() super().start() # Only required if they do threaded # while self.in_mission: # pass self.stop_log() if __name__ == "__main__": conn = MavlinkConnection('tcp:127.0.0.1:5760', threaded=False, PX4=False) #conn = WebSocketConnection('ws://127.0.0.1:5760') drone = BackyardFlyer(conn) time.sleep(2) drone.start()
32.328571
107
0.617543
4a078699f572a5189ecac85685b523705cccf793
3,133
py
Python
_2michaeltaylor/settings.py
mjt145/2michaeltaylor
ab2d4fde1d614842ab367f95bad262d1c7ea2878
[ "MIT" ]
null
null
null
_2michaeltaylor/settings.py
mjt145/2michaeltaylor
ab2d4fde1d614842ab367f95bad262d1c7ea2878
[ "MIT" ]
null
null
null
_2michaeltaylor/settings.py
mjt145/2michaeltaylor
ab2d4fde1d614842ab367f95bad262d1c7ea2878
[ "MIT" ]
null
null
null
""" Django settings for _2michaeltaylor project. Generated by 'django-admin startproject' using Django 1.8.3. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.abspath(__file__)) MAIN_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'ie06ntlaluelb7lh5@4-qyksf6+_3pkle^jh0kco!5slnlabm0' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'core', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = '_2michaeltaylor.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(MAIN_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = '_2michaeltaylor.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Parse database configuration from $DATABASE_URL import dj_database_url DATABASES['default'] = dj_database_url.config() # Honor the 'X-Forwarded-Proto' header for request.is_secure() SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # Allow all host headers ALLOWED_HOSTS = ['*'] # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(MAIN_DIR, 'static'), ) STATIC_ROOT = 'staticfiles'
26.108333
71
0.713693
4a07876666a6d2387a3098c36153378ee88e83a9
123
py
Python
Lab_Dash/Route.py
SimonSchubotz/Electronic-Laboratory-Notebook
a5dc3daa76b07370c1ee5b7e74fb6c780c3d3c97
[ "Apache-2.0" ]
null
null
null
Lab_Dash/Route.py
SimonSchubotz/Electronic-Laboratory-Notebook
a5dc3daa76b07370c1ee5b7e74fb6c780c3d3c97
[ "Apache-2.0" ]
null
null
null
Lab_Dash/Route.py
SimonSchubotz/Electronic-Laboratory-Notebook
a5dc3daa76b07370c1ee5b7e74fb6c780c3d3c97
[ "Apache-2.0" ]
null
null
null
from channels.routing import ProtocolTypeRouter application = ProtocolTypeRouter({ }) """ Routing the dash application """
20.5
47
0.788618
4a078941483f4012cc20c4e55bcd43625789dd15
1,786
py
Python
tests/test_tasks/test_task_methods.py
hp2500/openml-python
62cc534cd18e6e011a88a83816fec95a90399a9b
[ "BSD-3-Clause" ]
1
2019-09-02T00:28:26.000Z
2019-09-02T00:28:26.000Z
tests/test_tasks/test_task_methods.py
hp2500/openml-python
62cc534cd18e6e011a88a83816fec95a90399a9b
[ "BSD-3-Clause" ]
null
null
null
tests/test_tasks/test_task_methods.py
hp2500/openml-python
62cc534cd18e6e011a88a83816fec95a90399a9b
[ "BSD-3-Clause" ]
1
2019-09-02T00:29:32.000Z
2019-09-02T00:29:32.000Z
from time import time import openml from openml.testing import TestBase # Common methods between tasks class OpenMLTaskMethodsTest(TestBase): def setUp(self): super(OpenMLTaskMethodsTest, self).setUp() def tearDown(self): super(OpenMLTaskMethodsTest, self).tearDown() def test_tagging(self): task = openml.tasks.get_task(1) tag = "testing_tag_{}_{}".format(self.id(), time()) task_list = openml.tasks.list_tasks(tag=tag) self.assertEqual(len(task_list), 0) task.push_tag(tag) task_list = openml.tasks.list_tasks(tag=tag) self.assertEqual(len(task_list), 1) self.assertIn(1, task_list) task.remove_tag(tag) task_list = openml.tasks.list_tasks(tag=tag) self.assertEqual(len(task_list), 0) def test_get_train_and_test_split_indices(self): openml.config.cache_directory = self.static_cache_dir task = openml.tasks.get_task(1882) train_indices, test_indices = task.get_train_test_split_indices(0, 0) self.assertEqual(16, train_indices[0]) self.assertEqual(395, train_indices[-1]) self.assertEqual(412, test_indices[0]) self.assertEqual(364, test_indices[-1]) train_indices, test_indices = task.get_train_test_split_indices(2, 2) self.assertEqual(237, train_indices[0]) self.assertEqual(681, train_indices[-1]) self.assertEqual(583, test_indices[0]) self.assertEqual(24, test_indices[-1]) self.assertRaisesRegexp(ValueError, "Fold 10 not known", task.get_train_test_split_indices, 10, 0) self.assertRaisesRegexp(ValueError, "Repeat 10 not known", task.get_train_test_split_indices, 0, 10)
38.826087
77
0.666853
4a078a6a2555fea3fbe7de9c61245a148170bce8
3,060
py
Python
mobilenet.py
xingmimfl/pytorch_Mobilenet
aaeacd2b21d1cf1c70f3e9f4a080aad5b06f3345
[ "MIT" ]
null
null
null
mobilenet.py
xingmimfl/pytorch_Mobilenet
aaeacd2b21d1cf1c70f3e9f4a080aad5b06f3345
[ "MIT" ]
1
2019-12-19T03:28:32.000Z
2019-12-19T03:28:32.000Z
mobilenet.py
xingmimfl/pytorch_Mobilenet
aaeacd2b21d1cf1c70f3e9f4a080aad5b06f3345
[ "MIT" ]
null
null
null
import torch import torch.nn as nn class Conv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, relu=True, bn=True, same_padding=False, bias=False): super(Conv2d, self).__init__() padding = int((kernel_size - 1) / 2) if same_padding else 0 self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, bias=bias) self.bn = nn.BatchNorm2d(out_channels) if bn else None self.relu = nn.ReLU(inplace=True) if relu else None def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.relu is not None: x = self.relu(x) return x class DepthwiseSepConv2d(nn.Module): def __init__(self, in_channels, out_channels, strides): super(DepthwiseSepConv2d, self).__init__() self.depthwise_conv = nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=strides, groups=in_channels, padding=1, bias=False) self.pointwise_conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, bias=False) self.depthwise_bn = nn.BatchNorm2d(in_channels) self.pointwise_bn = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.depthwise_conv(x) x = self.depthwise_bn(x) x = self.relu(x) x = self.pointwise_conv(x) x = self.pointwise_bn(x) x = self.relu(x) return x class MobileNet(nn.Module): def __init__(self, num_classes=1000, width_multiplier=1, Training=False): """ num_classes: number of predicted classes. Training: whether or not the model is being trained. """ super(MobileNet, self).__init__() self.features = nn.Sequential( Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=2, same_padding=True), DepthwiseSepConv2d(32, 64, 1), DepthwiseSepConv2d(64, 128, 2), DepthwiseSepConv2d(128, 128, 1), DepthwiseSepConv2d(128, 256, 2), DepthwiseSepConv2d(256, 256, 1), DepthwiseSepConv2d(256, 512, 2), DepthwiseSepConv2d(512, 512, 1), DepthwiseSepConv2d(512, 512, 1), DepthwiseSepConv2d(512, 512, 1), DepthwiseSepConv2d(512, 512, 1), DepthwiseSepConv2d(512, 512, 1), DepthwiseSepConv2d(512, 1024, 2), DepthwiseSepConv2d(1024, 1024, 1), nn.AvgPool2d(kernel_size=7) ) self.fc = nn.Linear(in_features=1024, out_features=num_classes) def forward(self,x): x = self.features(x) #print "x.size():\t", x.size() x = x.view(-1, 1024) x = self.fc(x) return x if __name__=="__main__": net = MobileNet() print net
37.317073
105
0.589542
4a078bac82355d56bd81fbf1d57909ebdca9ecbb
10,191
py
Python
components/micropython/modules/sha2017_backup/woezel.py
badgeteam/Firmware
6192b2902c70beb7a298a256d9087274d045fbc0
[ "Apache-2.0" ]
7
2019-02-11T10:02:14.000Z
2019-08-02T00:08:45.000Z
components/micropython/modules/sha2017_backup/woezel.py
badgeteam/Firmware
6192b2902c70beb7a298a256d9087274d045fbc0
[ "Apache-2.0" ]
17
2019-01-05T18:02:11.000Z
2019-03-09T21:46:43.000Z
components/micropython/modules/sha2017_backup/woezel.py
badgeteam/Firmware
6192b2902c70beb7a298a256d9087274d045fbc0
[ "Apache-2.0" ]
4
2019-02-15T16:03:20.000Z
2019-06-27T22:23:24.000Z
import sys import gc import uos as os import uerrno as errno import ujson as json import uzlib import upip_utarfile as tarfile gc.collect() debug = False install_path = None cleanup_files = [] gzdict_sz = 16 + 15 file_buf = bytearray(512) class NotFoundError(Exception): pass class LatestInstalledError(Exception): pass def op_split(path): if path == "": return ("", "") r = path.rsplit("/", 1) if len(r) == 1: return ("", path) head = r[0] if not head: head = "/" return (head, r[1]) def op_basename(path): return op_split(path)[1] # Expects *file* name def _makedirs(name, mode=0o777): ret = False s = "" comps = name.rstrip("/").split("/")[:-1] if comps[0] == "": s = "/" for c in comps: if s and s[-1] != "/": s += "/" s += c try: os.mkdir(s) ret = True except OSError as e: if e.args[0] != errno.EEXIST and e.args[0] != errno.EISDIR: raise ret = False return ret def save_file(fname, subf): global file_buf with open(fname, "wb") as outf: while True: sz = subf.readinto(file_buf) if not sz: break outf.write(file_buf, sz) def install_tar(f, prefix): meta = {} for info in f: #print(info) fname = info.name try: fname = fname[fname.index("/") + 1:] except ValueError: fname = "" save = True for p in ("setup.", "PKG-INFO", "README"): #print(fname, p) if fname.startswith(p) or ".egg-info" in fname: if fname.endswith("/requires.txt"): meta["deps"] = f.extractfile(info).read() save = False if debug: print("Skipping", fname) break if save: outfname = prefix + fname if info.type != tarfile.DIRTYPE: if debug: print("Extracting " + outfname) _makedirs(outfname) subf = f.extractfile(info) save_file(outfname, subf) return meta def expandhome(s): if "~/" in s: h = os.getenv("HOME") s = s.replace("~/", h + "/") return s import ussl import usocket def url_open(url): if debug: print(url) proto, _, host, urlpath = url.split('/', 3) try: ai = usocket.getaddrinfo(host, 443) except OSError as e: fatal("Unable to resolve %s (no Internet?)" % host, e) #print("Address infos:", ai) if len(ai) == 0: fatal("Unable to resolve %s (no Internet?)" % host, errno.EHOSTUNREACH) addr = ai[0][4] s = usocket.socket(ai[0][0]) try: #print("Connect address:", addr) s.connect(addr) if proto == "https:": s = ussl.wrap_socket(s, server_hostname=host) # MicroPython rawsocket module supports file interface directly s.write("GET /%s HTTP/1.0\r\nHost: %s\r\n\r\n" % (urlpath, host)) l = s.readline() protover, status, msg = l.split(None, 2) if status != b"200": if status == b"404" or status == b"301": raise NotFoundError("Package not found") raise ValueError(status) while 1: l = s.readline() if not l: raise ValueError("Unexpected EOF in HTTP headers") if l == b'\r\n': break except Exception as e: s.close() raise e return s def get_pkg_metadata(name): f = url_open("https://badge.team/eggs/get/%s/json" % name) try: return json.load(f) finally: f.close() def get_pkg_list(): f = url_open("https://badge.team/eggs/list/json") try: return json.load(f) finally: f.close() def search_pkg_list(query): f = url_open("https://badge.team/eggs/search/%s/json" % query) try: return json.load(f) finally: f.close() def fatal(msg, exc=None): print("Error:", msg) if exc and debug: raise exc sys.exit(1) def install_pkg(pkg_spec, install_path, force_reinstall): data = get_pkg_metadata(pkg_spec) already_installed = False try: os.stat("%s%s/" % (install_path, pkg_spec)) except OSError as e: if e.args[0] == errno.EINVAL: print("Package %s already installed" % (pkg_spec)) already_installed = True else: print("Package %s not yet installed" % (pkg_spec)) else: # fallback for unix version print("Package %s already installed" % (pkg_spec)) already_installed = True latest_ver = data["info"]["version"] verf = "%s%s/version" % (install_path, pkg_spec) if already_installed: try: with open(verf, "r") as fver: old_ver = fver.read() except: print("No version file found") else: if old_ver == latest_ver: if not force_reinstall: raise LatestInstalledError("Latest version installed") else: print("Removing previous rev. %s" % old_ver) for rm_file in os.listdir("%s%s" % (install_path, pkg_spec)): os.remove("%s%s/%s" % (install_path, pkg_spec, rm_file)) packages = data["releases"][latest_ver] del data gc.collect() assert len(packages) == 1 package_url = packages[0]["url"] print("Installing %s rev. %s from %s" % (pkg_spec, latest_ver, package_url)) package_fname = op_basename(package_url) f1 = url_open(package_url) try: f2 = uzlib.DecompIO(f1, gzdict_sz) f3 = tarfile.TarFile(fileobj=f2) meta = install_tar(f3, "%s%s/" % (install_path, pkg_spec)) finally: f1.close() del f3 del f2 with open(verf, "w") as fver: fver.write(latest_ver) del fver gc.collect() return meta def install(to_install, install_path=None, force_reinstall=False): # Calculate gzip dictionary size to use global gzdict_sz sz = gc.mem_free() + gc.mem_alloc() if sz <= 65536: # this will probably give errors with some packages, but we # just don't have enough memory. gzdict_sz = 16 + 13 if install_path is None: install_path = get_install_path() if install_path[-1] != "/": install_path += "/" if not isinstance(to_install, list): to_install = [to_install] print("Installing to: " + install_path) # sets would be perfect here, but don't depend on them installed = [] try: while to_install: if debug: print("Queue:", to_install) pkg_spec = to_install.pop(0) if pkg_spec in installed: continue meta = install_pkg(pkg_spec, install_path, force_reinstall) installed.append(pkg_spec) if debug: print(meta) deps = meta.get("deps", "").rstrip(" \t\n\r\v\f\x00") if deps: deps = deps.decode("utf-8").split("\n") to_install.extend(deps) except Exception as e: print("Error installing '{}': {}, packages may be partially installed".format( pkg_spec, e), file=sys.stderr) raise e def display_pkg(packages): for package in packages: print(package["name"]) print(" Slug: " + package["slug"]) print(" Version: " + package["revision"]) print(" Description: " + package["description"]) def search(query="*"): if query == "*": packages = get_pkg_list() else: packages = search_pkg_list(query) display_pkg(packages) def get_install_path(): global install_path if install_path is None: # sys.path[0] is current module's path install_path = sys.path[1] install_path = expandhome(install_path) return install_path def cleanup(): for fname in cleanup_files: try: os.unlink(fname) except OSError: print("Warning: Cannot delete " + fname) def help(): print("""\ woezel - Clone of the Simple PyPI package manager for MicroPython Usage: micropython -m woezel install [-p <path>] <package>... | -r <requirements.txt> import woezel woezel.install(package_or_list, [<path>]) woezel.search([query]) If <path> is not given, packages will be installed into sys.path[1] (can be set from MICROPYPATH environment variable, if current system supports that).""") print("Current value of sys.path[1]:", sys.path[1]) print("""\ Note: only MicroPython packages are supported for installation, woezel, like upip does not support arbitrary code in setup.py. """) def main(): global debug global install_path install_path = None if len(sys.argv) < 2 or sys.argv[1] == "-h" or sys.argv[1] == "--help": help() return if sys.argv[1] != "install": fatal("Only 'install' command supported") to_install = [] i = 2 while i < len(sys.argv) and sys.argv[i][0] == "-": opt = sys.argv[i] i += 1 if opt == "-h" or opt == "--help": help() return elif opt == "-p": install_path = sys.argv[i] i += 1 elif opt == "-r": list_file = sys.argv[i] i += 1 with open(list_file) as f: while True: l = f.readline() if not l: break if l[0] == "#": continue to_install.append(l.rstrip(" \t\n\r\v\f\x00")) elif opt == "--debug": debug = True else: fatal("Unknown/unsupported option: " + opt) to_install.extend(sys.argv[i:]) if not to_install: help() return install(to_install) if not debug: cleanup() if __name__ == "__main__": main()
27.469003
86
0.540281
4a078c75b2e45c2072b55df47666e43db044972b
348
py
Python
lazythumbs/tests/__init__.py
caktus/lazythumbs
006ac42f9f4ac600d4c85d0929f4e2c755d4f853
[ "MIT" ]
1
2017-07-24T22:06:25.000Z
2017-07-24T22:06:25.000Z
lazythumbs/tests/__init__.py
caktus/lazythumbs
006ac42f9f4ac600d4c85d0929f4e2c755d4f853
[ "MIT" ]
null
null
null
lazythumbs/tests/__init__.py
caktus/lazythumbs
006ac42f9f4ac600d4c85d0929f4e2c755d4f853
[ "MIT" ]
null
null
null
from lazythumbs.tests.test_server import RenderTest, GetViewTest from lazythumbs.tests.test_templatetag import LazythumbSyntaxTest, LazythumbGeometryCompileTest, LazythumbRenderTest from lazythumbs.tests.test_templatetag import ImgAttrsRenderTest from lazythumbs.tests.test_util import TestGeometry, TestComputeIMG, TestGetImgAttrs, TestGetFormat
69.6
116
0.893678
4a078d17125636f32bdc3c0268b56986ecfb7ef8
3,505
py
Python
setup.py
FelixdenBreejen/PySCIPOpt
a6dbfdfd565d29da705d147fddfc732c8bc5ca93
[ "MIT" ]
null
null
null
setup.py
FelixdenBreejen/PySCIPOpt
a6dbfdfd565d29da705d147fddfc732c8bc5ca93
[ "MIT" ]
null
null
null
setup.py
FelixdenBreejen/PySCIPOpt
a6dbfdfd565d29da705d147fddfc732c8bc5ca93
[ "MIT" ]
null
null
null
from setuptools import setup, Extension import os, platform, sys, re import numpy as np # look for environment variable that specifies path to SCIP scipoptdir = os.environ.get('SCIPOPTDIR', '').strip('"') extra_compile_args = [] extra_link_args = [] # determine include directory if os.path.exists(os.path.join(scipoptdir, 'src')): # SCIP seems to be installed in place includedir = os.path.abspath(os.path.join(scipoptdir, 'src')) else: # assume that SCIP is installed on the system includedir = os.path.abspath(os.path.join(scipoptdir, 'include')) print('Using include path <%s>.' % includedir) # determine library if os.path.exists(os.path.join(scipoptdir, 'lib/shared/libscipsolver.so')): # SCIP seems to be created with make libdir = os.path.abspath(os.path.join(scipoptdir, 'lib/shared')) libname = 'scipsolver' extra_compile_args.append('-DNO_CONFIG_HEADER') else: # assume that SCIP is installed on the system libdir = os.path.abspath(os.path.join(scipoptdir, 'lib')) libname = 'scip' if platform.system() in ['Windows']: libname = 'libscip' print('Using SCIP library <%s> at <%s>.' % (libname,libdir)) # set runtime libraries if platform.system() in ['Linux', 'Darwin']: extra_link_args.append('-Wl,-rpath,'+libdir) # enable debug mode if requested if "--debug" in sys.argv: extra_compile_args.append('-UNDEBUG') sys.argv.remove("--debug") use_cython = True packagedir = os.path.join('src', 'pyscipopt') with open(os.path.join(packagedir, '__init__.py'), 'r') as initfile: version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', initfile.read(), re.MULTILINE).group(1) try: from Cython.Build import cythonize except ImportError: if not os.path.exists(os.path.join(packagedir, 'scip.c')): print('Cython is required') quit(1) use_cython = False if not os.path.exists(os.path.join(packagedir, 'scip.pyx')): use_cython = False ext = '.pyx' if use_cython else '.c' extensions = [Extension('pyscipopt.scip', [os.path.join(packagedir, 'scip'+ext)], include_dirs=[includedir], library_dirs=[libdir], libraries=[libname], extra_compile_args = extra_compile_args, extra_link_args=extra_link_args )] if use_cython: extensions = cythonize(extensions, compiler_directives={'language_level': 3}) with open('README.md') as f: long_description = f.read() setup( name='PySCIPOpt', version=version, description='Python interface and modeling environment for SCIP', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/SCIP-Interfaces/PySCIPOpt', author='Zuse Institute Berlin', author_email='scip@zib.de', license='MIT', classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Science/Research', 'Intended Audience :: Education', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3', 'Programming Language :: Cython', 'Topic :: Scientific/Engineering :: Mathematics'], ext_modules=extensions, install_requires=['wheel'], packages=['pyscipopt'], package_dir={'pyscipopt': packagedir}, package_data={'pyscipopt': ['scip.pyx', 'scip.pxd', '*.pxi']}, include_dirs = [np.get_include()] )
33.380952
81
0.649358
4a078d4fe3e7eeba7438846e65e635e630f94abb
755
py
Python
src/ZPublisher/Publish.py
Mattlk13/Zope
b26ba322565f640f1c62b4a8d6b407cf5df5fdcd
[ "ZPL-2.1" ]
null
null
null
src/ZPublisher/Publish.py
Mattlk13/Zope
b26ba322565f640f1c62b4a8d6b407cf5df5fdcd
[ "ZPL-2.1" ]
1
2020-11-11T07:11:31.000Z
2020-11-11T07:11:31.000Z
src/ZPublisher/Publish.py
Mattlk13/Zope
b26ba322565f640f1c62b4a8d6b407cf5df5fdcd
[ "ZPL-2.1" ]
null
null
null
############################################################################## # # Copyright (c) 2002 Zope Foundation and Contributors. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## from zope.deferredimport import deprecated # BBB Zope 5.0 deprecated( 'Please import from ZPublisher.', Retry='ZPublisher:Retry', )
34.318182
78
0.6
4a078da5a233ca5c6490ecb64daa4f88a1b5b136
19,947
py
Python
author_tests.py
PhysicsUofRAUI/lifeLongLearning
36e098d4319d3500509861454fa3e27a67416802
[ "MIT" ]
null
null
null
author_tests.py
PhysicsUofRAUI/lifeLongLearning
36e098d4319d3500509861454fa3e27a67416802
[ "MIT" ]
38
2020-06-09T00:07:09.000Z
2021-02-06T17:18:20.000Z
author_tests.py
PhysicsUofRAUI/lifeLongLearning
36e098d4319d3500509861454fa3e27a67416802
[ "MIT" ]
null
null
null
import unittest from flask_testing import TestCase from config import TestConfiguration from app import create_app as c_app import os from flask import session, url_for, template_rendered from app.models import Author, Worksheet, WorksheetCategory from app.database import db from contextlib import contextmanager from werkzeug.security import generate_password_hash, check_password_hash def login_author(client, email, password): return client.post('/author_login', data=dict( email=email, password=password ), follow_redirects=True) def logout_author(client): return client.get('/author_logout', follow_redirects=True) def login(client, username, password): return client.post('/login', data=dict( username=username, password=password ), follow_redirects=True) def logout(client): return client.get('/logout', follow_redirects=True) @contextmanager def captured_templates(app): recorded = [] def record(sender, template, context, **extra): recorded.append((template, context)) template_rendered.connect(record, app) try: yield recorded finally: template_rendered.disconnect(record, app) class BasicTests(TestCase): ############################ #### setup and teardown #### ############################ def create_app(self): app = c_app(TestConfiguration) return app # executed prior to each test def setUp(self): pass # executed after each test def tearDown(self): pass ######################################### ####### Tests For Pages Admin Uses ###### ######################################### def test_add_author_page(self): response = self.client.get('/add_author', follow_redirects=True) self.assertEqual(response.status_code, 200) def test_edit_author_page(self): response = self.client.get('/edit_author/1', follow_redirects=True) self.assertEqual(response.status_code, 200) def test_delete_author_page(self): response = self.client.get('/delete_author/1', follow_redirects=True) self.assertEqual(response.status_code, 200) # checking if the user gets kicked out of protected views when not logged in # will check this to see if the request results in a redirect def test_add_author_page_r(self): response = self.client.get('/add_author', follow_redirects=False) self.assertEqual(response.status_code, 302) def test_edit_author_page_r(self): response = self.client.get('/edit_author/1', follow_redirects=False) self.assertEqual(response.status_code, 302) def test_delete_author_page_r(self): response = self.client.get('/delete_author/1', follow_redirects=False) self.assertEqual(response.status_code, 302) ######################################### ###### Tests For Pages Author Uses ###### ######################################### def test_author_change_email_nl(self): response = self.client.get('/author_change_email/1', follow_redirects=True) self.assertEqual(response.status_code, 200) response = self.client.get('/author_change_email/1', follow_redirects=False) self.assertEqual(response.status_code, 302) def test_author_change_about_nl(self): response = self.client.get('/author_change_about/1', follow_redirects=True) self.assertEqual(response.status_code, 200) response = self.client.get('/author_change_about/1', follow_redirects=False) self.assertEqual(response.status_code, 302) def test_author_change_password_nl(self): response = self.client.get('/author_change_password/1', follow_redirects=True) self.assertEqual(response.status_code, 200) response = self.client.get('/author_change_password/1', follow_redirects=False) self.assertEqual(response.status_code, 302) def test_author_change_screenname_nl(self): response = self.client.get('/author_change_screenname/1', follow_redirects=True) self.assertEqual(response.status_code, 200) response = self.client.get('/author_change_screenname/1', follow_redirects=False) self.assertEqual(response.status_code, 302) def test_author_dashboard_nl(self): response = self.client.get('/author_dashboard', follow_redirects=True) self.assertEqual(response.status_code, 200) response = self.client.get('/author_dashboard', follow_redirects=False) self.assertEqual(response.status_code, 302) class DatabaseEditTests(TestCase): def create_app(self): app = c_app(TestConfiguration) return app # executed prior to each test def setUp(self): self.app_context = self.app.app_context() self.app_context.push() db.create_all() login(self.client, os.getenv('LOGIN_USERNAME'), os.getenv('LOGIN_PASSWORD')) # executed after each test def tearDown(self): logout(self.client) db.session.remove() db.drop_all() self.app_context.pop() ######################################### ####### Tests For Pages Admin Uses ###### ######################################### def test_add_author_page_li(self): response = self.client.get('/add_author', follow_redirects=False) self.assertEqual(response.status_code, 200) response_1 = self.client.post('/add_author', follow_redirects=True, data=dict(name='Kody', email='kodyrogers21@gmail.com', about='I am a hacker', screenname='blah', password='password')) auth = Author.query.filter_by(name='Kody').first() self.assertEqual(response_1.status_code, 200) self.assertNotEqual(auth, None) self.assertEqual(auth.name, 'Kody') self.assertEqual(auth.screenname, None) self.assertEqual(auth.about, None) self.assertEqual(auth.email, 'kodyrogers21@gmail.com') self.assertEqual(check_password_hash(auth.password, 'password'), True) response_1 = self.client.post('/add_author', follow_redirects=True, data=dict(name='Kody1', email='kodyrogers@gmail.com', password='honkog')) auth_1 = Author.query.filter_by(name='Kody1').first() self.assertEqual(response_1.status_code, 200) self.assertNotEqual(auth_1, None) self.assertEqual(auth_1.name, 'Kody1') self.assertEqual(auth_1.screenname, None) self.assertEqual(auth_1.about, None) self.assertEqual(check_password_hash(auth_1.password, 'honkog'), True) def test_edit_author_page_li(self): author = Author(name='KJsa', password='password', email='kodya@hotmail.com') db.session.add(author) db.session.commit() response = self.client.get('/edit_author/1', follow_redirects=False) self.assertEqual(response.status_code, 200) response_1 = self.client.post('/edit_author/1', follow_redirects=True, data=dict(password='RockOn')) self.assertEqual(response_1.status_code, 200) edited_author = Author.query.filter_by(name='KJsa').first() self.assertNotEqual(edited_author, None) self.assertNotEqual(edited_author.password, generate_password_hash('RockOn')) self.assertEqual(edited_author.email, 'kodya@hotmail.com') response_2 = self.client.post('/edit_author/1', follow_redirects=True, data=dict(password='RockOn', about='hey hey', screenname='yoh', name='Kody', email='kody15@nhl.com')) self.assertEqual(response_2.status_code, 200) edited_author_1 = Author.query.filter_by(name='Kody').first() self.assertNotEqual(edited_author_1, None) self.assertNotEqual(edited_author_1.password, generate_password_hash('RockOn')) self.assertEqual(edited_author_1.email, 'kody15@nhl.com') self.assertEqual(edited_author_1.screenname, None) self.assertEqual(edited_author_1.about, None) def test_delete_author_page_li(self): auth_1 = Author(name='Kidkaid', email='kodyrogers21@gmail.com', password='pbkdf2:sha256:150000$73fMtgAp$1a1d8be4973cb2676c5f17275c43dc08583c8e450c94a282f9c443d34f72464c') db.session.add(auth_1) db.session.commit() response = self.client.get('/delete_author/1', follow_redirects=False) self.assertEqual(response.status_code, 302) auth_1 = Author.query.filter_by(name='kidkaid').first() self.assertEqual(auth_1, None) ######################################### ###### Tests For Pages Author Uses ###### ######################################### def test_author_change_about(self) : auth_1 = Author(name='KJsa', email='kodyrogers21@gmail.com', screenname='kod', about='What up?', password='pbkdf2:sha256:150000$73fMtgAp$1a1d8be4973cb2676c5f17275c43dc08583c8e450c94a282f9c443d34f72464c') db.session.add(auth_1) db.session.commit() login_author(self.client, email='kodyrogers21@gmail.com', password='RockOn') response = self.client.get(url_for('author.author_change_about', id=auth_1.id), follow_redirects=False) self.assertEqual(response.status_code, 200) response_1 = self.client.post('/author_change_about/1', data=dict(about='I love rock music'), follow_redirects=True) auth = Author.query.filter_by(name='KJsa').first() self.assertEqual(response_1.status_code, 200) self.assertEqual(response_1.status_code, 200) self.assertEqual(auth.email, 'kodyrogers21@gmail.com') self.assertEqual(auth.about, 'I love rock music') logout_author(self.client) def test_author_change_password(self) : auth_1 = Author(name='KJsa', email='kodyrogers21@gmail.com', screenname='kod', about='What up?', password='pbkdf2:sha256:150000$73fMtgAp$1a1d8be4973cb2676c5f17275c43dc08583c8e450c94a282f9c443d34f72464c') db.session.add(auth_1) db.session.commit() login_author(self.client, email='kodyrogers21@gmail.com', password='RockOn') response = self.client.get(url_for('author.author_change_password', id=auth_1.id), follow_redirects=False) self.assertEqual(response.status_code, 200) response_1 = self.client.post('/author_change_password/1', data=dict(password='weeeehooo'), follow_redirects=True) auth = Author.query.filter_by(name='KJsa').first() self.assertEqual(response_1.status_code, 200) self.assertEqual(response_1.status_code, 200) self.assertEqual(auth.email, 'kodyrogers21@gmail.com') self.assertEqual(check_password_hash(auth.password, 'weeeehooo'), True) logout_author(self.client) def test_author_change_screenname(self) : auth_1 = Author(name='KJsa', email='kodyrogers21@gmail.com', screenname='kod', about='What up?', password='pbkdf2:sha256:150000$73fMtgAp$1a1d8be4973cb2676c5f17275c43dc08583c8e450c94a282f9c443d34f72464c') db.session.add(auth_1) db.session.commit() login_author(self.client, email='kodyrogers21@gmail.com', password='RockOn') response = self.client.get(url_for('author.author_change_screenname', id=auth_1.id), follow_redirects=False) self.assertEqual(response.status_code, 200) response_1 = self.client.post('/author_change_screenname/1', data=dict(screenname='logical'), follow_redirects=True) auth = Author.query.filter_by(name='KJsa').first() self.assertEqual(response_1.status_code, 200) self.assertEqual(response_1.status_code, 200) self.assertEqual(auth.email, 'kodyrogers21@gmail.com') self.assertEqual(auth.password, 'pbkdf2:sha256:150000$73fMtgAp$1a1d8be4973cb2676c5f17275c43dc08583c8e450c94a282f9c443d34f72464c') self.assertEqual(auth.screenname, 'logical') logout_author(self.client) def test_author_change_email(self) : auth_1 = Author(name='KJsa', email='kodyrogers21@gmail.com', screenname='kod', about='What up?', password='pbkdf2:sha256:150000$73fMtgAp$1a1d8be4973cb2676c5f17275c43dc08583c8e450c94a282f9c443d34f72464c') db.session.add(auth_1) db.session.commit() login_author(self.client, email='kodyrogers21@gmail.com', password='RockOn') response = self.client.get(url_for('author.author_change_email', id=auth_1.id), follow_redirects=False) self.assertEqual(response.status_code, 200) response_1 = self.client.post('/author_change_email/1', data=dict(email='kody15@hotmail.com'), follow_redirects=True) auth = Author.query.filter_by(name='KJsa').first() self.assertEqual(response_1.status_code, 200) self.assertEqual(response_1.status_code, 200) self.assertEqual(auth.email, 'kody15@hotmail.com') self.assertEqual(auth.password, 'pbkdf2:sha256:150000$73fMtgAp$1a1d8be4973cb2676c5f17275c43dc08583c8e450c94a282f9c443d34f72464c') self.assertEqual(auth.screenname, 'kod') logout_author(self.client) def test_author_dashboard(self): w_cat = WorksheetCategory(name='dundk') db.session.add(w_cat) auth_1 = Author(name='KJsa', email='kodyrogers21@gmail.com', screenname='kod', about='What up?', password='pbkdf2:sha256:150000$73fMtgAp$1a1d8be4973cb2676c5f17275c43dc08583c8e450c94a282f9c443d34f72464c') db.session.add(auth_1) db.session.commit() worksheet = Worksheet(pdf_url='tudolsoos.pdf', name='tudoloods', author_id=1, author=auth_1, category_id=1, category=w_cat) db.session.add(worksheet) db.session.commit() worksheet = Worksheet.query.filter_by(name='tudoloods').first() w_cat = WorksheetCategory.query.filter_by(name='dundk').first() w_cat_1 = WorksheetCategory(name='dund32k') db.session.add(w_cat_1) w_cat_2 = WorksheetCategory(name='dundfsdk') db.session.add(w_cat_2) db.session.commit() auth_2 = Author(name='Kidkafdidf', email='kodyrogers29@gmail.com', password='pbkdf2:sha256:150000$JbvZOh4x$40097777eeefb55bc6987f4e6983d3401dca4d863a9a8971b36548d41af927dd') db.session.add(auth_2) auth_3 = Author(name='Kif', email='kodyrogers22@gmail.com', password='pbkdf2:sha256:150000$JbvZOh4x$40097777eeefb55bc6987f4e6983d3401dca4d863a9a8971b36548d41af927dd') db.session.add(auth_3) db.session.commit() worksheet_1 = Worksheet(pdf_url='tudolsoo.pdf', name='tloods', author_id=1, author=auth_1, category_id=1, category=w_cat) worksheet_2 = Worksheet(pdf_url='tudolsos.pdf', name='tudoldaghoods', author_id=2, author=auth_2, category_id=2, category=w_cat_1) worksheet_3 = Worksheet(pdf_url='tudolos.pdf', name='tudol', author_id=3, author=auth_3, category_id=3, category=w_cat_2) worksheet_4 = Worksheet(pdf_url='tudsoos.pdf', name='tudolsagdgsshjoods', author_id=2, author=auth_2, category_id=2, category=w_cat_1) worksheet_5 = Worksheet(pdf_url='tolsoos.pdf', name='tudoldfag', author_id=1, author=auth_1, category_id=1, category=w_cat) worksheet_6 = Worksheet(pdf_url='lsoos.pdf', name='tudosdag', author_id=2, author=auth_2, category_id=2, category=w_cat_1) worksheet_7 = Worksheet(pdf_url='tch.pdf', name='tudosgsggs', author_id=3, author=auth_3, category_id=3, category=w_cat_2) worksheet_8 = Worksheet(pdf_url='tudsfgos.pdf', name='montreal', author_id=2, author=auth_2, category_id=2, category=w_cat_1) worksheet_9 = Worksheet(pdf_url='tersoos.pdf', name='toronto', author_id=3, author=auth_3, category_id=3, category=w_cat_2) worksheet_10 = Worksheet(pdf_url='tudosgagos.pdf', name='ottowa', author_id=2, author=auth_2, category_id=2, category=w_cat_1) worksheet_11 = Worksheet(pdf_url='tusgsgos.pdf', name='saskatoon', author_id=1, author=auth_1, category_id=1, category=w_cat) worksheet_12 = Worksheet(pdf_url='tusgsssoos.pdf', name='winnipeg', author_id=2, author=auth_2, category_id=2, category=w_cat_1) db.session.add(worksheet_1) db.session.add(worksheet_2) db.session.add(worksheet_3) db.session.add(worksheet_4) db.session.add(worksheet_5) db.session.add(worksheet_6) db.session.add(worksheet_7) db.session.add(worksheet_8) db.session.add(worksheet_9) db.session.add(worksheet_10) db.session.add(worksheet_11) db.session.add(worksheet_12) db.session.commit() with self.app.test_client() as c: with captured_templates(self.app) as templates: c.post('/author_login', data=dict( email='kodyrogers21@gmail.com', password='RockOn' ), follow_redirects=True) r = c.get(url_for('author.author_dashboard', id=1)) self.assertEqual(r.status_code, 200) template, context = templates[0] self.assertEqual(context['worksheets'], [worksheet_11, worksheet_5, worksheet_1, worksheet]) c.get('/author_logout', follow_redirects=True) class UserLoginLogout(TestCase): ############################ #### setup and teardown #### ############################ def create_app(self): app = c_app(TestConfiguration) return app # executed prior to each test def setUp(self): self.app_context = self.app.app_context() self.app_context.push() db.create_all() # executed after each test def tearDown(self): db.session.remove() db.drop_all() self.app_context.pop() def test_author_login(self): author = Author(name='KJsa', email='kodyrogers21@gmail.com', screenname='kod', about='What up?', password='pbkdf2:sha256:150000$73fMtgAp$1a1d8be4973cb2676c5f17275c43dc08583c8e450c94a282f9c443d34f72464c') db.session.add(author) db.session.commit() with self.app.test_client() as c: response = c.post('/author_login', data=dict( email='kodyrogers21@gmail.com', password='RockOn' ), follow_redirects=True) self.assertEqual(response.status_code, 200) self.assertEqual(session['author_logged_in'], True) self.assertEqual(session['author_name'], 'KJsa') response_1 = c.get('/author_logout', follow_redirects=True) self.assertEqual(response_1.status_code, 200) self.assertEqual(session['author_logged_in'], False) class DatabaseModelsTests(TestCase): ############################ #### setup and teardown #### ############################ def create_app(self): app = c_app(TestConfiguration) return app # executed prior to each test def setUp(self): self.app_context = self.app.app_context() self.app_context.push() db.create_all() # executed after each test def tearDown(self): db.session.remove() db.drop_all() self.app_context.pop() def test_author_model(self) : author = Author(name='KJsa', email='kodyrogers21@gmail.com', screenname='kod', about='What up?', password='pbkdf2:sha256:150000$CgCWVBC6$4090facdcd3e093c7b458362daddbaa7b53387c6042ad46b5970dc7b6d00183c') db.session.add(author) db.session.commit() assert author in db.session if __name__ == "__main__": unittest.main()
37.706994
181
0.658244
4a078e0da1e1ec8cced001df1c5d6e294240e586
177
py
Python
docs/source/_filters/names.py
t-elisee/sepal-doc
6ef93090e18584037f1663bc36d9d1736aceb64b
[ "MIT" ]
2
2021-06-15T19:48:14.000Z
2022-03-19T03:24:55.000Z
docs/source/_filters/names.py
apuzzi/sepal-doc
ed76e626a544ce62034b734873e646396ed766a2
[ "MIT" ]
91
2021-03-11T10:41:43.000Z
2022-03-30T15:58:07.000Z
docs/source/_filters/names.py
apuzzi/sepal-doc
ed76e626a544ce62034b734873e646396ed766a2
[ "MIT" ]
15
2021-03-12T11:58:58.000Z
2022-03-01T10:24:41.000Z
from enchant.tokenize import Filter class Names(Filter): """If a word start with a Capital letter ignore it""" def _skip(self, word): return word[0].isupper()
22.125
57
0.672316
4a078f0d7e524b7bc696dea421f664cbfdac01bd
1,053
py
Python
configs/_base_/models/upernet_van.py
MenghaoGuo/VAN-Segmentation
e0053db0ca88a164bc868c08cb9d2e27d614ee2a
[ "Apache-2.0" ]
2
2022-02-25T03:05:35.000Z
2022-02-26T08:31:59.000Z
configs/_base_/models/upernet_van.py
MenghaoGuo/VAN-Segmentation
e0053db0ca88a164bc868c08cb9d2e27d614ee2a
[ "Apache-2.0" ]
null
null
null
configs/_base_/models/upernet_van.py
MenghaoGuo/VAN-Segmentation
e0053db0ca88a164bc868c08cb9d2e27d614ee2a
[ "Apache-2.0" ]
null
null
null
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='van_tiny', style='pytorch'), decode_head=dict( type='UPerHead', in_channels=[32, 64, 160, 256], in_index=[0, 1, 2, 3], pool_scales=(1, 2, 3, 6), channels=512, dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=dict( type='FCNHead', in_channels=160, in_index=2, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
29.25
74
0.592593
4a078f7fdc9a4db6e6d57059517bb05f81d6153f
3,846
py
Python
services/api-server/tests/unit/fakes/solvers_faker.py
colinRawlings/osparc-simcore
bf2f18d5bc1e574d5f4c238d08ad15156184c310
[ "MIT" ]
25
2018-04-13T12:44:12.000Z
2022-03-12T15:01:17.000Z
services/api-server/tests/unit/fakes/solvers_faker.py
colinRawlings/osparc-simcore
bf2f18d5bc1e574d5f4c238d08ad15156184c310
[ "MIT" ]
2,553
2018-01-18T17:11:55.000Z
2022-03-31T16:26:40.000Z
services/api-server/tests/unit/fakes/solvers_faker.py
mrnicegyu11/osparc-simcore
b6fa6c245dbfbc18cc74a387111a52de9b05d1f4
[ "MIT" ]
20
2018-01-18T19:45:33.000Z
2022-03-29T07:08:47.000Z
from dataclasses import dataclass from pathlib import Path from typing import Callable, Dict, Iterator, Tuple import packaging.version import yaml from fastapi import HTTPException, status from importlib_resources import files from models_library.services import ServiceDockerData from simcore_service_api_server.models.schemas.solvers import ( LATEST_VERSION, Solver, SolverKeyId, VersionStr, ) SKey = Tuple[SolverKeyId, VersionStr] @dataclass class SolversFaker: solvers: Dict[SKey, Solver] def get(self, key, *, url=None) -> Solver: return self.solvers[key].copy(update={"url": url}) def values(self, url_resolver: Callable) -> Iterator[Solver]: for s in self.solvers.values(): yield s.copy(update={"url": url_resolver(s)}) def get_by_name_and_version( self, name: str, version: str, url_resolver: Callable ) -> Solver: try: return next( s.copy(update={"url": url_resolver(s.id)}) for s in self.solvers.values() if s.id.endswith(name) and s.version == version ) except StopIteration as err: raise KeyError() from err def get_latest(self, name: str, url_resolver: Callable) -> Solver: _all = list(s for s in self.solvers.values() if s.id.endswith(name)) latest = sorted(_all, key=lambda s: packaging.version.parse(s.version))[-1] return latest.copy(update={"url": url_resolver(latest.id)}) @classmethod def load_images(cls) -> Iterator[ServiceDockerData]: mocks_dir: Path = files("simcore_service_api_server").joinpath("mocks") for filepath in mocks_dir.glob("*.y*ml"): image = yaml.safe_load(filepath.read_text()) yield ServiceDockerData.parse_obj(image) @classmethod def solver_items(cls) -> Iterator[Tuple[SKey, Solver]]: for image in cls.load_images(): solver = Solver.create_from_image(image) yield (solver.id, solver.version), solver @classmethod def create_from_mocks(cls) -> "SolversFaker": return cls(solvers=dict(cls.solver_items())) the_fake_impl = SolversFaker.create_from_mocks() # /files API fake implementations # GET /solvers async def list_solvers( url_for: Callable, ): def _url_resolver(solver: Solver): return url_for( "get_solver_release", solver_key=solver.id, version=solver.version ) return list(the_fake_impl.values(_url_resolver)) async def get_solver_by_name_and_version( solver_name: SolverKeyId, version: VersionStr, url_for: Callable, ): try: print(f"/{solver_name}/{version}", flush=True) def _url_resolver(solver: Solver): return url_for( "get_solver_release", solver_key=solver.id, version=solver.version ) if version == LATEST_VERSION: solver = the_fake_impl.get_latest(solver_name, _url_resolver) else: solver = the_fake_impl.get_by_name_and_version( solver_name, version, _url_resolver ) return solver except KeyError as err: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=f"Solver {solver_name}:{version} not found", ) from err async def get_solver( solver_name: SolverKeyId, version: VersionStr, url_for: Callable, ): try: solver = the_fake_impl.get( (solver_name, version), url=url_for("get_solver_release", solver_key=solver_name, version=version), ) return solver except KeyError as err: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=f"Solver {solver_name}:{version} not found", ) from err
29.358779
87
0.650026
4a078fc71d9c5eebf3dc38055b15e4bc3e5bde21
38,255
py
Python
src/sage/interfaces/phc.py
sheerluck/sage
b5e572b7d231f70c139d9978d68add80c4ef353d
[ "BSL-1.0" ]
1,742
2015-01-04T07:06:13.000Z
2022-03-30T11:32:52.000Z
src/sage/interfaces/phc.py
sheerluck/sage
b5e572b7d231f70c139d9978d68add80c4ef353d
[ "BSL-1.0" ]
66
2015-03-19T19:17:24.000Z
2022-03-16T11:59:30.000Z
src/sage/interfaces/phc.py
sheerluck/sage
b5e572b7d231f70c139d9978d68add80c4ef353d
[ "BSL-1.0" ]
495
2015-01-10T10:23:18.000Z
2022-03-24T22:06:11.000Z
r""" Interface to PHC. PHC computes numerical information about systems of polynomials over the complex numbers. PHC implements polynomial homotopy methods to exploit structure in order to better approximate all isolated solutions. The package also includes extra tools to handle positive dimensional solution components. AUTHORS: - PHC was written by J. Verschelde, R. Cools, and many others (?) - William Stein and Kelly ?? -- first version of interface to PHC - Marshall Hampton -- second version of interface to PHC - Marshall Hampton and Alex Jokela -- third version, path tracking """ # **************************************************************************** # Copyright (C) 2006 William Stein <wstein@gmail.com> # Copyright (C) 2008 Marshall Hampton <hamptonio@gmail.com> # # Distributed under the terms of the GNU General Public License (GPL) # as published by the Free Software Foundation; either version 2 of # the License, or (at your option) any later version. # https://www.gnu.org/licenses/ # **************************************************************************** import os import re import pexpect import random from sage.misc.all import tmp_filename from sage.rings.real_mpfr import RR from sage.rings.cc import CC from sage.rings.integer import Integer from sage.plot.line import line from sage.plot.point import point def get_solution_dicts(output_file_contents, input_ring, get_failures = True): """ Return a list of dictionaries of variable:value (key:value) pairs. Only used internally; see the solution_dict function in the PHC_Object class definition for details. INPUT: - output_file_contents -- phc solution output as a string - input_ring -- a PolynomialRing that variable names can be coerced into OUTPUT: a list of dictionaries of solutions EXAMPLES:: sage: from sage.interfaces.phc import * sage: R2.<x1,x2> = PolynomialRing(QQ,2) sage: test_sys = [(x1-1)^5-x2, (x2-1)^5-1] sage: sol = phc.blackbox(test_sys, R2) # optional -- phc sage: test = get_solution_dicts(sol.output_file_contents,R2) # optional -- phc sage: str(sum([q[x1].real() for q in test]))[0:4] # optional -- phc '25.0' """ output_list = output_file_contents.splitlines() solution_dicts = [] for solution_line in range(len(output_list)-1,-1,-1): if output_list[solution_line].find('THE SOLUTIONS') == 0: break try: var_number = int(output_list[solution_line+2].split(' ')[1]) # sol_number = int(output_list[solution_line+2].split(' ')[0]) except IndexError: var_number = int(output_list[solution_line+1].split(' ')[1]) # sol_number = int(output_list[solution_line+1].split(' ')[0]) for i in range(solution_line + 1,len(output_list)): if output_list[i].count('the solution for t') == 1: if output_list[i-3].count('success') > 0 or get_failures: temp_dict = {} for j in range(1,var_number+1): rawsplit = output_list[i+j].split(': ')[1].split(' ') for extras in range(rawsplit.count('')): rawsplit.remove('') temp_var = output_list[i+j].split(': ')[0].replace(' ','') temp_dict[input_ring(temp_var)] = CC(rawsplit[0],rawsplit[1]) solution_dicts.append(temp_dict) return solution_dicts def get_classified_solution_dicts(output_file_contents, input_ring, get_failures = True): """ Return a dictionary of lists of dictionaries of variable:value (key:value) pairs. Only used internally; see the classified_solution_dict function in the PHC_Object class definition for details. INPUT: - output_file_contents -- phc solution output as a string - input_ring -- a PolynomialRing that variable names can be coerced into OUTPUT: - a dictionary of lists if dictionaries of solutions, classifies by type EXAMPLES:: sage: from sage.interfaces.phc import * sage: R2.<x1,x2> = PolynomialRing(QQ,2) sage: test_sys = [(x1-2)^5-x2, (x2-1)^5-1] sage: sol = phc.blackbox(test_sys, R2) # optional -- phc sage: sol_classes = get_classified_solution_dicts(sol.output_file_contents,R2) # optional -- phc sage: len(sol_classes['real']) # optional -- phc 1 """ output_list = output_file_contents.splitlines() solution_dicts = {} solution_types = ['complex', 'real','failure'] for sol_type in solution_types: solution_dicts[sol_type] = [] for solution_line in range(len(output_list)-1,-1,-1): if output_list[solution_line].find('THE SOLUTIONS') == 0: break var_number = int(output_list[solution_line+2].split(' ')[1]) # sol_number = int(output_list[solution_line+2].split(' ')[0]) for i in range(solution_line + 1,len(output_list)): if output_list[i].count('the solution for t') == 1: phc_type = output_list[i+var_number+1].split(' = ')[-1] if phc_type.find('complex') != -1: phc_type = 'complex' elif phc_type.find('real') != -1: phc_type = 'real' else: phc_type = 'failure' temp_dict = {} for j in range(1,var_number+1): rawsplit = output_list[i+j].split(': ')[1].split(' ') for extras in range(rawsplit.count('')): rawsplit.remove('') temp_var = output_list[i+j].split(': ')[0].replace(' ','') if phc_type == 'real': temp_dict[input_ring(temp_var)] = RR(rawsplit[0]) else: temp_dict[input_ring(temp_var)] = CC(rawsplit[0],rawsplit[1]) solution_dicts[phc_type].append(temp_dict) return solution_dicts def get_variable_list(output_file_contents): """ Return the variables, as strings, in the order in which PHCpack has processed them. EXAMPLES:: sage: from sage.interfaces.phc import * sage: R2.<x1,x2> = PolynomialRing(QQ,2) sage: test_sys = [(x1-2)^5-x2, (x2-1)^5-1] sage: sol = phc.blackbox(test_sys, R2) # optional -- phc sage: get_variable_list(sol.output_file_contents) # optional -- phc ['x1', 'x2'] """ output_list = output_file_contents.splitlines() for solution_line in range(len(output_list)-1,-1,-1): if output_list[solution_line].find('THE SOLUTIONS') == 0: break var_number = int(output_list[solution_line+2].split(' ')[1]) varlist = [] for var_ind in range(var_number): var = output_list[solution_line + 8 + var_ind].split(' ')[1] varlist.append(var) return varlist class PHC_Object: def __init__(self, output_file_contents, input_ring): """ A container for data from the PHCpack program - lists of float solutions, etc. Currently the file contents are kept as a string; for really large outputs this would be bad. INPUT: - output_file_contents: the string output of PHCpack - input_ring: for coercion of the variables into the desired ring. EXAMPLES:: sage: from sage.interfaces.phc import phc sage: R2.<x,y> = PolynomialRing(QQ,2) sage: start_sys = [(x-1)^2+(y-1)-1, x^2+y^2-1] sage: sol = phc.blackbox(start_sys, R2) # optional -- phc sage: str(sum([x[0] for x in sol.solutions()]).real())[0:3] # optional -- phc '2.0' """ self.output_file_contents = output_file_contents self.input_ring = input_ring def save_as_start(self, start_filename = None, sol_filter = ''): """ Saves a solution as a phcpack start file. The usual output is just as a string, but it can be saved to a file as well. Even if saved to a file, it still returns the output string. EXAMPLES:: sage: from sage.interfaces.phc import phc sage: R2.<x,y> = PolynomialRing(QQ,2) sage: start_sys = [x^3-y^2,y^5-1] sage: sol = phc.blackbox(start_sys, R2) # optional -- phc sage: start_save = sol.save_as_start() # optional -- phc sage: end_sys = [x^7-2,y^5-x^2] # optional -- phc sage: sol = phc.start_from(start_save, end_sys, R2) # optional -- phc sage: len(sol.solutions()) # optional -- phc 15 """ start_data = '' output_list = self.output_file_contents.splitlines() for a_line in output_list: if a_line.find('ROOT COUNTS') != -1 or a_line.find('START SOLUTIONS') != -1: break else: start_data += a_line + '\n' for index in range(len(output_list)-1,0,-1): a_line = output_list[index] if a_line.find('THE SOLUTIONS') != -1: found_solutions = index break start_data += output_list[found_solutions] + '\n\n' try: var_number = int(output_list[found_solutions+1].split(' ')[1]) except Exception: # bad error handling var_number = int(output_list[found_solutions+2].split(' ')[1]) sol_count = 0 sol_data = '' for i in range(found_solutions + 2, len(output_list)): if output_list[i].count('the solution for t') == 1 and output_list[i+1+var_number].find(sol_filter) != -1: phc_type = output_list[i+var_number+1].split(' = ')[-1] if phc_type.find('no solution') == -1: sol_count += 1 for ind2 in range(i-3,i+var_number+2): sol_data += output_list[ind2] + '\n' jan_bar = '===========================================================================\n' sol_data += jan_bar start_data += str(sol_count) + ' ' + str(var_number) + '\n' start_data += jan_bar + sol_data if start_filename is not None: with open(start_filename, 'w') as start_file: start_file.write(start_data) return start_data def classified_solution_dicts(self): """ Return a dictionary of lists of dictionaries of solutions. Its not as crazy as it sounds; the keys are the types of solutions as classified by phcpack: regular vs. singular, complex vs. real INPUT: - None OUTPUT: - A dictionary of lists of dictionaries of solutions EXAMPLES:: sage: from sage.interfaces.phc import phc sage: R.<x,y> = PolynomialRing(CC,2) sage: p_sys = [x^10-y,y^2-1] sage: sol = phc.blackbox(p_sys,R) # optional -- phc sage: classifieds = sol.classified_solution_dicts() # optional -- phc sage: str(sum([q[y] for q in classifieds['real']]))[0:3] # optional -- phc '2.0' """ try: return self.__classified_sols except AttributeError: pass classified_sols = get_classified_solution_dicts(self.output_file_contents, self.input_ring) self.__classified_sols = classified_sols return classified_sols def solution_dicts(self, get_failures = False): """ Return a list of solutions in dictionary form: variable:value. INPUT: - self -- for access to self_out_file_contents, the string of raw PHCpack output. - get_failures (optional) -- a boolean. The default (False) is to not process failed homotopies. These either lie on positive-dimensional components or at infinity. OUTPUT: - solution_dicts: a list of dictionaries. Each dictionary element is of the form variable:value, where the variable is an element of the input_ring, and the value is in ComplexField. EXAMPLES:: sage: from sage.interfaces.phc import * sage: R.<x,y,z> = PolynomialRing(QQ,3) sage: fs = [x^2-1,y^2-x,z^2-y] sage: sol = phc.blackbox(fs,R) # optional -- phc sage: s_list = sol.solution_dicts() # optional -- phc sage: s_list.sort() # optional -- phc sage: s_list[0] # optional -- phc {y: 1.00000000000000, z: -1.00000000000000, x: 1.00000000000000} """ try: return self.__solution_dicts except AttributeError: pass solution_dicts = get_solution_dicts(self.output_file_contents, self.input_ring, get_failures = get_failures) self.__solution_dicts = solution_dicts return solution_dicts def solutions(self, get_failures = False): """ Return a list of solutions in the ComplexField. Use the variable_list function to get the order of variables used by PHCpack, which is usually different than the term order of the input_ring. INPUT: - self -- for access to self_out_file_contents, the string of raw PHCpack output. - get_failures (optional) -- a boolean. The default (False) is to not process failed homotopies. These either lie on positive-dimensional components or at infinity. OUTPUT: - solutions: a list of lists of ComplexField-valued solutions. EXAMPLES:: sage: from sage.interfaces.phc import * sage: R2.<x1,x2> = PolynomialRing(QQ,2) sage: test_sys = [x1^5-x1*x2^2-1, x2^5-x1*x2-1] sage: sol = phc.blackbox(test_sys, R2) # optional -- phc sage: len(sol.solutions()) # optional -- phc 25 """ try: return self.__solutions except AttributeError: pass solution_dicts = get_solution_dicts(self.output_file_contents, self.input_ring, get_failures = get_failures) self.__solution_dicts = solution_dicts solutions = [sol_dict.values() for sol_dict in solution_dicts] self.__solutions = solutions return solutions def variable_list(self): """ Return the variables, as strings, in the order in which PHCpack has processed them. EXAMPLES:: sage: from sage.interfaces.phc import * sage: R2.<x1,x2> = PolynomialRing(QQ,2) sage: test_sys = [x1^5-x1*x2^2-1, x2^5-x1*x2-1] sage: sol = phc.blackbox(test_sys, R2) # optional -- phc sage: sol.variable_list() # optional -- phc ['x1', 'x2'] """ try: return self.__var_list except AttributeError: pass var_list = get_variable_list(self.output_file_contents) self.__var_list = var_list return var_list class PHC: """ A class to interface with PHCpack, for computing numerical homotopies and root counts. EXAMPLES:: sage: from sage.interfaces.phc import phc sage: R.<x,y> = PolynomialRing(CDF,2) sage: testsys = [x^2 + 1, x*y - 1] sage: phc.mixed_volume(testsys) # optional -- phc 2 sage: v = phc.blackbox(testsys, R) # optional -- phc sage: sols = v.solutions() # optional -- phc sage: sols.sort() # optional -- phc sage: sols # optional -- phc [[-1.00000000000000*I, 1.00000000000000*I], [1.00000000000000*I, -1.00000000000000*I]] sage: sol_dict = v.solution_dicts() # optional -- phc sage: x_sols_from_dict = [d[x] for d in sol_dict] # optional -- phc sage: x_sols_from_dict.sort(); x_sols_from_dict # optional -- phc [-1.00000000000000*I, 1.00000000000000*I] sage: residuals = [[test_equation.change_ring(CDF).subs(sol) for test_equation in testsys] for sol in v.solution_dicts()] # optional -- phc sage: residuals # optional -- phc [[0, 0], [0, 0]] """ def _output_from_command_list(self, command_list, polys, verbose = False): """ A pexpect interface to phcpack, given a command list for interactive dialogs. The input file is supplied from the polynomial list, output file is also supplied. This is only used as a building block for the interface. INPUT: - command_list -- a list of commands to phc - polys -- a polynomial system as a list of polynomials OUTPUT: - an output string from phc EXAMPLES:: sage: from sage.interfaces.phc import * sage: R2.<x,y> = PolynomialRing(QQ,2) sage: start_sys = [(x-1)^2+(y-1)-1, x^2+y^2-1] # optional -- phc sage: a = phc._output_from_command_list(['phc -m','4','n','n','n'], start_sys) # optional -- phc """ # Get temporary file names (these will be in SAGE_HOME/.sage/tmp/pid) input_filename = tmp_filename() output_filename = tmp_filename() # Get the input polynomial text input = self._input_file(polys) if verbose: print("Writing the input file to %s" % input_filename) with open(input_filename, 'w') as file: file.write(input) if verbose: print("The following file will be the input polynomial file to phc.") print(input) # Create a phc process child_phc = pexpect.spawn(command_list[0]) # feed it the commands child_phc.sendline('y') child_phc.sendline(input_filename) child_phc.sendline(output_filename) for command_string in command_list[1:]: if verbose: print(command_string) child_phc.sendline(command_string) child_phc.expect('results') read_stuff = child_phc.read() if verbose: print(read_stuff) child_phc.close() if not os.path.exists(output_filename): raise RuntimeError("The output file does not exist; something went wrong running phc.") # Delete the input file os.unlink(input_filename) # Return the output filename return output_filename def _input_file(self, polys): """ This is used internally to implement the PHC interface. INPUT: - polys -- a list of polynomials in a Sage polynomial ring over a field that embeds into the complex numbers. OUTPUT: - a PHC input file (as a text string) that describes these - polynomials. EXAMPLES:: sage: from sage.interfaces.phc import * sage: R2.<x,y> = PolynomialRing(QQ,2) sage: start_sys = [(x-1)^2+(y-1)-1, x^2+y^2-1] sage: phc._input_file(start_sys) # optional -- phc '2\nx^2 - 2*x + y - 1;\nx^2 + y^2 - 1;\n' """ if not isinstance(polys, (list, tuple)): raise TypeError('polys must be a list or tuple') s = '%s\n'%len(polys) for f in polys: s += f._repr_() + ';\n' # note the semicolon *terminators* return s def _parse_path_file(self, input_filename, verbose = False): """ Takes a phpack output file containing path tracking information and parses it into a list of lists of dictionaries - i.e. a list of solutions paths, where each solution path is a list of dictionaries of variable and homotopy parameter values. INPUT: - input_filename -- file must have path-tracking information OUTPUT: - a list of lists of dictionaries, described above EXAMPLES:: sage: from sage.interfaces.phc import * sage: R2.<x,y> = PolynomialRing(QQ,2) sage: start_sys = [x^5-y^2,y^5-1] sage: sol = phc.blackbox(start_sys, R2) # optional -- phc sage: start_save = sol.save_as_start() # optional -- phc sage: end_sys = [x^5-2,y^5-x^2] # optional -- phc sage: path_track_filename = phc._path_track_file(start_save, end_sys, R2, c_skew = .001) # optional -- phc sage: sol_paths = phc._parse_path_file(path_track_filename) # optional -- phc sage: len(sol_paths) # optional -- phc 25 """ if not os.path.exists(input_filename): raise RuntimeError("The file containing output from phc (" + input_filename + ") cannot be found") fh = open(input_filename) line_idx = 0 begin = 0 count = 0 solutions_dicts = [] steps_dicts = [] # regular expressions for matching certain output types var_cnt_regex = re.compile('^ +([0-9]+)') output_regex = re.compile('^OUTPUT INFORMATION DURING') t_regex = re.compile(r'(^t +: +(-{0,1}[0-9]+\.[0-9]+E[-+][0-9]+) +(-{0,1}[0-9]+\.[0-9]+E[-+][0-9]+)$)', re.IGNORECASE) sols_regex = re.compile(r'(^ *(([a-z]|[0-9])+) +: +(-?[0-9]+\.[0-9]+E[-+][0-9]+) +(-?[0-9]+\.[0-9]+E[-+][0-9]+)$)', re.IGNORECASE) complete_regex= re.compile('^TIMING INFORMATION') breakfast = False a_line = fh.readline() end_test = '' while a_line: # processing.... a_line = a_line.replace("\n", '') if line_idx == 0: m = var_cnt_regex.match(a_line) if m: count = Integer(m.group(1)) if count > 0: m = output_regex.match(a_line) if m: begin = 1 if begin: m = t_regex.match(a_line) if m: # put the t-values into a dict # m.group(2) contains the real val # m.group(3) contains the imaginary val # fh_w.write( "T=> G1(" + m.group(2) + '),G2(' + m.group(3) + ")\n") # read off two lines - this should be 'm' and 'the solution for t :' a_line = fh.readline() end_test = a_line # store this to check for end of solution a_line = fh.readline() t_val = CC(m.group(2), m.group(3)) temp_dict = {} temp_dict["t"] = t_val for i in range(0, count): a_line = fh.readline() m = sols_regex.match(a_line) if m: # m.group(2) contains our var name # m.group(4) contains our real val # m.group(5) contains our imaginary val temp_dict[m.group(2)] = CC(m.group(4),m.group(5)) steps_dicts.append(temp_dict) # check if its the end of a solution if end_test.find('Length of path') != -1: if verbose: print("recording sol") if steps_dicts != []: solutions_dicts.append(steps_dicts) steps_dicts = [] m = complete_regex.match(a_line) if m: breakfast = True if breakfast: break line_idx += 1 a_line = fh.readline() fh.close() return solutions_dicts def _path_track_file(self, start_filename_or_string, polys, input_ring, c_skew = 0.001, verbose = False): """ Return the filename which contains path tracking output. EXAMPLES:: sage: from sage.interfaces.phc import * sage: R2.<x,y> = PolynomialRing(QQ,2) sage: start_sys = [x^6-y^2,y^5-1] sage: sol = phc.blackbox(start_sys, R2) # optional -- phc sage: start_save = sol.save_as_start() # optional -- phc sage: end_sys = [x^7-2,y^5-x^2] # optional -- phc sage: path_track_filename = phc._path_track_file(start_save, end_sys, R2, c_skew = .001) # optional -- phc sage: sol_paths = phc._parse_path_file(path_track_filename) # optional -- phc sage: len(sol_paths) # optional -- phc 30 """ # Probably unnecessarily redundant from the start_from function if start_filename_or_string.find('THE SOLUTIONS') != -1: start_filename = tmp_filename() with open(start_filename, 'w') as start_file: start_file.write(start_filename_or_string) elif os.path.exists(start_filename_or_string): start_filename = start_filename_or_string else: raise RuntimeError("There is something wrong with your start string or filename") return self._output_from_command_list(['phc','0','0','A',start_filename, 'y','1','0','n','k','2','a','1',str(c_skew),'0','0','2'], polys, verbose = verbose) def path_track(self, start_sys, end_sys, input_ring, c_skew = .001, saved_start = None): """ This function computes homotopy paths between the solutions of start_sys and end_sys. INPUT: - start_sys -- a square polynomial system, given as a list of polynomials - end_sys -- same type as start_sys - input_ring -- for coercion of the variables into the desired ring. - c_skew -- optional. the imaginary part of homotopy multiplier; nonzero values are often necessary to avoid intermediate path collisions - saved_start -- optional. A phc output file. If not given, start system solutions are computed via the phc.blackbox function. OUTPUT: - a list of paths as dictionaries, with the keys variables and t-values on the path. EXAMPLES:: sage: from sage.interfaces.phc import * sage: R2.<x,y> = PolynomialRing(QQ,2) sage: start_sys = [x^6-y^2,y^5-1] sage: sol = phc.blackbox(start_sys, R2) # optional -- phc sage: start_save = sol.save_as_start() # optional -- phc sage: end_sys = [x^7-2,y^5-x^2] # optional -- phc sage: sol_paths = phc.path_track(start_sys, end_sys, R2, saved_start = start_save) # optional -- phc sage: len(sol_paths) # optional -- phc 30 """ if not saved_start: sol = phc.blackbox(start_sys, input_ring) saved_start = sol.save_as_start() path_track_filename = phc._path_track_file(saved_start, end_sys, input_ring = input_ring, c_skew = c_skew) sol_paths = phc._parse_path_file(path_track_filename) os.unlink(path_track_filename) return sol_paths def plot_paths_2d(self, start_sys, end_sys, input_ring, c_skew = .001, endpoints = True, saved_start = None, rand_colors = False): """ This returns a graphics object of solution paths in the complex plane. INPUT: - start_sys -- a square polynomial system, given as a list of polynomials - end_sys -- same type as start_sys - input_ring -- for coercion of the variables into the desired ring. - c_skew -- optional. the imaginary part of homotopy multiplier; nonzero values are often necessary to avoid intermediate path collisions - endpoints -- optional. Whether to draw in the ends of paths as points. - saved_start -- optional. A phc output file. If not given, start system solutions are computed via the phc.blackbox function. OUTPUT: - lines and points of solution paths EXAMPLES:: sage: from sage.interfaces.phc import * sage: from sage.structure.sage_object import SageObject sage: R2.<x,y> = PolynomialRing(QQ,2) sage: start_sys = [x^5-y^2,y^5-1] sage: sol = phc.blackbox(start_sys, R2) # optional -- phc sage: start_save = sol.save_as_start() # optional -- phc sage: end_sys = [x^5-25,y^5-x^2] # optional -- phc sage: testing = phc.plot_paths_2d(start_sys, end_sys, R2) # optional -- phc sage: type(testing) # optional -- phc (normally use plot here) <class 'sage.plot.graphics.Graphics'> """ paths = phc.path_track(start_sys, end_sys, input_ring, c_skew = c_skew, saved_start = saved_start) path_lines = [] sol_pts = [] if rand_colors: r_color = {} for a_var in input_ring.gens(): var_name = str(a_var) r_color[var_name] = (random(),random(),random()) for a_sol in paths: for a_var in input_ring.gens(): var_name = str(a_var) temp_line = [] for data in a_sol: temp_line.append([data[var_name].real(), data[var_name].imag()]) if rand_colors: path_lines.append(line(temp_line, rgbcolor = r_color[var_name])) else: path_lines.append(line(temp_line)) if endpoints: sol_pts = [] for a_sol in paths: for a_var in input_ring.gens(): var_name = str(a_var) sol_pts.append(point([a_sol[0][var_name].real(), a_sol[0][var_name].imag()])) sol_pts.append(point([a_sol[-1][var_name].real(), a_sol[-1][var_name].imag()])) return sum(sol_pts) + sum(path_lines) else: return sum(path_lines) def mixed_volume(self, polys, verbose=False): """ Computes the mixed volume of the polynomial system given by the input polys. INPUT: - polys -- a list of multivariate polynomials (elements of a multivariate polynomial ring). - verbose -- print lots of verbose information about what this function does. OUTPUT: - The mixed volume. EXAMPLES:: sage: from sage.interfaces.phc import * sage: R2.<x,y,z> = PolynomialRing(QQ,3) sage: test_sys = [(x+y+z)^2-1,x^2-x,y^2-1] sage: phc.mixed_volume(test_sys) # optional -- phc 4 """ output_filename = self._output_from_command_list(['phc -m','4','n','n','n'], polys, verbose = verbose) with open(output_filename) as out: out.read() # All done out_lines = out.split('\n') for a_line in out_lines: # the two conditions below are necessary because of changes in output format if a_line.find('The mixed volume equals :') == 0 or a_line.find('common mixed volume :') == 0: if verbose: print('found line: ' + a_line) mixed_vol = Integer(a_line.split(':')[1]) break try: return mixed_vol except NameError: raise RuntimeError("Mixed volume not found in output; something went wrong running phc.") def start_from(self, start_filename_or_string, polys, input_ring, path_track_file = None, verbose = False): """ This computes solutions starting from a phcpack solution file. INPUT: - start_filename_or_string -- the filename for a phcpack start system, or the contents of such a file as a string. Variable names must match the inputring variables. The value of the homotopy variable t should be 1, not 0. - polys -- a list of multivariate polynomials (elements of a multivariate polynomial ring). - input_ring: for coercion of the variables into the desired ring. - path_track_file: whether to save path-tracking information - verbose -- print lots of verbose information about what this function does. OUTPUT: - A solution in the form of a PHCObject. EXAMPLES:: sage: from sage.interfaces.phc import * sage: R2.<x,y> = PolynomialRing(QQ,2) sage: start_sys = [x^6-y^2,y^5-1] sage: sol = phc.blackbox(start_sys, R2) # optional -- phc sage: start_save = sol.save_as_start() # optional -- phc sage: end_sys = [x^7-2,y^5-x^2] # optional -- phc sage: sol = phc.start_from(start_save, end_sys, R2) # optional -- phc sage: len(sol.solutions()) # optional -- phc 30 """ input_filename = tmp_filename() output_filename = tmp_filename() if start_filename_or_string.find('THE SOLUTIONS') != -1: start_filename = tmp_filename() with open(start_filename, 'w') as start_file: start_file.write(start_filename_or_string) elif os.path.exists(start_filename_or_string): start_filename = start_filename_or_string else: raise RuntimeError("There is something wrong with your start string or filename") # Get the input polynomial text input = self._input_file(polys) if verbose: print("Writing the input file to %s" % input_filename) with open(input_filename, 'w') as f: f.write(input) if verbose: print("The following file will be the input polynomial file to phc.") print(input) # Create a phc process child_phc = pexpect.spawn('phc') child_phc.sendline('y') child_phc.sendline(input_filename) child_phc.sendline(output_filename) child_phc.sendline('0') child_phc.sendline('0') child_phc.expect('Nonlinear Reduction') child_phc.sendline('A') child_phc.sendline(start_filename) child_phc.sendline('y') child_phc.sendline('1') child_phc.sendline('0') if verbose: phc_dialog = child_phc.read(size = 40) print(phc_dialog) child_phc.sendline('n') child_phc.sendline('0') if verbose: child_phc.expect('CURRENT CONTINUATION') phc_dialog = child_phc.read(size = 40) print(phc_dialog) child_phc.sendline('0') if path_track_file is None: child_phc.sendline('0') else: child_phc.sendline('2') child_phc.expect('results') dots = child_phc.read() if verbose: print("should be . : " + dots) #close down the process: child_phc.close() if not os.path.exists(output_filename): raise RuntimeError("The output file does not exist; something went wrong running phc.") # Read the output produced by PHC with open(output_filename) as f: out = f.read() # Delete the temporary files os.unlink(output_filename) os.unlink(input_filename) # All done return PHC_Object(out, input_ring) def blackbox(self, polys, input_ring, verbose = False): """ Return as a string the result of running PHC with the given polynomials under blackbox mode (the '-b' option). INPUT: - polys -- a list of multivariate polynomials (elements of a multivariate polynomial ring). - input_ring -- for coercion of the variables into the desired ring. - verbose -- print lots of verbose information about what this function does. OUTPUT: - a PHC_Object object containing the phcpack output string. EXAMPLES:: sage: from sage.interfaces.phc import * sage: R2.<x,y> = PolynomialRing(QQ,2) sage: start_sys = [x^6-y^2,y^5-1] sage: sol = phc.blackbox(start_sys, R2) # optional -- phc sage: len(sol.solutions()) # optional -- phc 30 """ # Get three temporary file names (these will be in SAGE_HOME/.sage/tmp/pid) input_filename = tmp_filename() output_filename = input_filename + ".phc" log_filename = tmp_filename() # Get the input polynomial text input = self._input_file(polys) if verbose: print("Writing the input file to %s" % input_filename) with open(input_filename, 'w') as f: f.write(input) if verbose: print("The following file will be the input polynomial file to phc.") print(input) # Create the phc command line> cmd = 'phc -b %s %s'%(input_filename, output_filename) if verbose: print("The phc command line is:") print(cmd) # Do it -- make the system call. e = os.system(cmd) # Was there an error? if e: from sage.misc.sage_ostools import have_program if not have_program('phc'): print(str(os.system('which phc')) + ' PHC needs to be installed and in your path') raise RuntimeError # todo -- why? etc. with open(log_filename) as f: msg = f.read() raise RuntimeError(msg + "\nError running phc.") if not os.path.exists(output_filename): raise RuntimeError("The output file does not exist; something went wrong running phc.") # Read the output produced by PHC with open(output_filename) as f: out = f.read() # All done return PHC_Object(out, input_ring) ################################ # The unique phc interface instance. phc = PHC()
39.848958
164
0.570174
4a079032a1da226c2dd508a2e2094461ee70a958
282
py
Python
backend/models/constants.py
DanielAguirre/metrics-mvp
a438fb6f1765fd40a61bf6bc2f8f147936c42d75
[ "MIT" ]
null
null
null
backend/models/constants.py
DanielAguirre/metrics-mvp
a438fb6f1765fd40a61bf6bc2f8f147936c42d75
[ "MIT" ]
null
null
null
backend/models/constants.py
DanielAguirre/metrics-mvp
a438fb6f1765fd40a61bf6bc2f8f147936c42d75
[ "MIT" ]
null
null
null
import pytz DEFAULT_TIME_STR_INTERVALS = [ ('03:00','07:00'), ('07:00','10:00'), ('10:00','16:00'), ('16:00','19:00'), ('19:00','03:00+1'), ] PACIFIC_TIMEZONE = pytz.timezone('US/Pacific') AGENCY = 'sf-muni' DEFAULT_STAT_KEYS = ['count', 'avg', 'min', 'median', 'max']
18.8
60
0.578014
4a07905668b54fa624cbc29413390b7e067ef08c
2,145
py
Python
salt/sdb/etcd_db.py
yuriks/salt
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
[ "Apache-2.0", "MIT" ]
1
2020-03-31T22:51:16.000Z
2020-03-31T22:51:16.000Z
salt/sdb/etcd_db.py
yuriks/salt
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
[ "Apache-2.0", "MIT" ]
null
null
null
salt/sdb/etcd_db.py
yuriks/salt
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
[ "Apache-2.0", "MIT" ]
1
2021-09-30T07:00:01.000Z
2021-09-30T07:00:01.000Z
# -*- coding: utf-8 -*- ''' etcd Database Module :maintainer: SaltStack :maturity: New :depends: python-etcd :platform: all .. versionadded:: 2015.5.0 This module allows access to the etcd database using an ``sdb://`` URI. This package is located at ``https://pypi.python.org/pypi/python-etcd``. Like all sdb modules, the etcd module requires a configuration profile to be configured in either the minion or master configuration file. This profile requires very little. In the example: .. code-block:: yaml myetcd: driver: etcd etcd.host: 127.0.0.1 etcd.port: 2379 The ``driver`` refers to the etcd module, ``etcd.host`` refers to the host that is hosting the etcd database and ``etcd.port`` refers to the port on that host. .. code-block:: yaml password: sdb://myetcd/mypassword ''' # import python libs from __future__ import absolute_import, print_function, unicode_literals import logging try: import salt.utils.etcd_util HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __func_alias__ = { 'set_': 'set' } __virtualname__ = 'etcd' def __virtual__(): ''' Only load the module if keyring is installed ''' if HAS_LIBS: return __virtualname__ return False def set_(key, value, service=None, profile=None): # pylint: disable=W0613 ''' Set a key/value pair in the etcd service ''' client = _get_conn(profile) client.set(key, value) return get(key, service, profile) def get(key, service=None, profile=None): # pylint: disable=W0613 ''' Get a value from the etcd service ''' client = _get_conn(profile) result = client.get(key) return result.value def delete(key, service=None, profile=None): # pylint: disable=W0613 ''' Get a value from the etcd service ''' client = _get_conn(profile) try: client.delete(key) return True except Exception: # pylint: disable=broad-except return False def _get_conn(profile): ''' Get a connection ''' return salt.utils.etcd_util.get_conn(profile)
21.887755
79
0.66993
4a0792bdb2be056d05608db13489b45edaf14f5a
8,336
py
Python
tests/test_dgilib_interface_communication.py
martinabr/pydgilib
9e27b11e74518375ae78959a71f896e92a51cdb1
[ "BSD-3-Clause" ]
2
2019-04-05T13:27:54.000Z
2020-10-09T22:56:22.000Z
tests/test_dgilib_interface_communication.py
martinabr/pydgilib
9e27b11e74518375ae78959a71f896e92a51cdb1
[ "BSD-3-Clause" ]
null
null
null
tests/test_dgilib_interface_communication.py
martinabr/pydgilib
9e27b11e74518375ae78959a71f896e92a51cdb1
[ "BSD-3-Clause" ]
1
2019-09-11T07:48:45.000Z
2019-09-11T07:48:45.000Z
"""This module holds the automated tests for DGILib Interface Communication.""" from pydgilib.dgilib import DGILib from pydgilib.dgilib_config import ( NUM_INTERFACES, INTERFACE_TIMESTAMP, INTERFACE_SPI, INTERFACE_USART, INTERFACE_I2C, INTERFACE_GPIO, INTERFACE_POWER_DATA, INTERFACE_POWER_SYNC, INTERFACE_RESERVED) from time import sleep import pytest verbosity = (0, 99) # Number of seconds to log data for in read and clear tests polling_duration = 1 INTERFACES = [INTERFACE_TIMESTAMP, INTERFACE_SPI, INTERFACE_USART, INTERFACE_I2C, INTERFACE_GPIO, INTERFACE_POWER_DATA, INTERFACE_POWER_SYNC, 80, # Not in documentation INTERFACE_RESERVED] INTERFACES_ENABLE = [INTERFACE_SPI, INTERFACE_USART, INTERFACE_I2C, INTERFACE_GPIO, INTERFACE_POWER_SYNC, 80, # Not in documentation INTERFACE_RESERVED] INTERFACES_SET_CONFIG = [INTERFACE_TIMESTAMP, INTERFACE_SPI, INTERFACE_USART, INTERFACE_I2C, INTERFACE_GPIO, INTERFACE_POWER_SYNC, 80, # Not in documentation INTERFACE_RESERVED] INTERFACES_WRITE = [INTERFACE_USART, INTERFACE_I2C, INTERFACE_GPIO, INTERFACE_RESERVED] @pytest.mark.parametrize("verbose", verbosity) def test_interface_list(verbose): """test_interface_list. DGILibInterfaceCommunication.interface_list """ with DGILib(verbose=verbose) as dgilib: interfaces = dgilib.interface_list() assert isinstance(interfaces, list) assert len(interfaces) < NUM_INTERFACES for interface in interfaces: assert interface in INTERFACES @pytest.mark.parametrize("verbose", verbosity) def test_interface_enable(verbose): """test_interface_enable. DGILibInterfaceCommunication.interface_enable """ with DGILib(verbose=verbose) as dgilib: interfaces = dgilib.interface_list() for interface_id in INTERFACES_ENABLE: if interface_id in interfaces: assert dgilib.interface_enable(interface_id) is None @pytest.mark.parametrize("verbose", verbosity) def test_interface_disable(verbose): """test_interface_disable. DGILibInterfaceCommunication.interface_disable """ with DGILib(verbose=verbose) as dgilib: interfaces = dgilib.interface_list() for interface_id in INTERFACES: if interface_id in interfaces: assert dgilib.interface_disable(interface_id) is None @pytest.mark.parametrize("verbose", verbosity) def test_interface_get_configuration(verbose): """test_interface_get_configuration. DGILibInterfaceCommunication.interface_get_configuration """ with DGILib(verbose=verbose) as dgilib: interfaces = dgilib.interface_list() for interface_id in INTERFACES: if interface_id in interfaces: config = dgilib.interface_get_configuration(interface_id) assert isinstance(config, tuple) assert len(config) == 2 assert isinstance(config[0], list) assert isinstance(config[1], list) @pytest.mark.parametrize("verbose", verbosity) def test_interface_set_configuration(verbose): """test_interface_set_configuration. DGILibInterfaceCommunication.interface_set_configuration Gets the configuration and sets it to the same values. """ with DGILib(verbose=verbose) as dgilib: interfaces = dgilib.interface_list() for interface_id in INTERFACES_SET_CONFIG: if interface_id in interfaces: config = dgilib.interface_get_configuration(interface_id) assert dgilib.interface_set_configuration( interface_id, *config) is None @pytest.mark.parametrize("verbose", verbosity) def test_interface_clear_buffer(verbose): """test_interface_clear_buffer. DGILibInterfaceCommunication.interface_clear_buffer """ # When not enabled with DGILib(verbose=verbose) as dgilib: interfaces = dgilib.interface_list() for interface_id in INTERFACES: if interface_id in interfaces: assert dgilib.interface_clear_buffer(interface_id) is None # When enabled with DGILib(verbose=verbose) as dgilib: interfaces = dgilib.interface_list() for interface_id in INTERFACES_ENABLE: if interface_id in interfaces: dgilib.interface_enable(interface_id) assert dgilib.interface_clear_buffer(interface_id) is None dgilib.interface_disable(interface_id) # When enabled and polling with DGILib(verbose=verbose) as dgilib: interfaces = dgilib.interface_list() for interface_id in INTERFACES_ENABLE: if interface_id in interfaces: dgilib.interface_enable(interface_id) dgilib.start_polling() sleep(polling_duration) assert dgilib.interface_clear_buffer(interface_id) is None dgilib.stop_polling() dgilib.interface_disable(interface_id) @pytest.mark.parametrize("verbose", verbosity) def test_interface_read_data(verbose): """test_interface_read_data. DGILibInterfaceCommunication.interface_read_data """ # When not enabled with DGILib(verbose=verbose) as dgilib: interfaces = dgilib.interface_list() for interface_id in INTERFACES_ENABLE: if interface_id in interfaces: data = dgilib.interface_read_data(interface_id) assert isinstance(data, tuple) assert len(data) == 2 assert isinstance(data[0], list) assert isinstance(data[1], list) assert len(data[0]) == len(data[1]) # When enabled with DGILib(verbose=verbose) as dgilib: interfaces = dgilib.interface_list() for interface_id in INTERFACES_ENABLE: if interface_id in interfaces: dgilib.interface_enable(interface_id) data = dgilib.interface_read_data(interface_id) assert isinstance(data, tuple) assert len(data) == 2 assert isinstance(data[0], list) assert isinstance(data[1], list) assert len(data[0]) == len(data[1]) dgilib.interface_disable(interface_id) # When enabled and polling with DGILib(verbose=verbose) as dgilib: interfaces = dgilib.interface_list() for interface_id in INTERFACES_ENABLE: if interface_id in interfaces: dgilib.interface_enable(interface_id) dgilib.start_polling() sleep(polling_duration) data = dgilib.interface_read_data(interface_id) assert isinstance(data, tuple) assert len(data) == 2 assert isinstance(data[0], list) assert isinstance(data[1], list) assert len(data[0]) == len(data[1]) dgilib.stop_polling() dgilib.interface_disable(interface_id) @pytest.mark.parametrize("verbose", verbosity) def test_interface_write_data(verbose): """test_interface_write_data. DGILibInterfaceCommunication.interface_write_data """ # When not enabled with DGILib(verbose=verbose) as dgilib: interfaces = dgilib.interface_list() for interface_id in INTERFACES_WRITE: if interface_id in interfaces: assert dgilib.interface_write_data(interface_id, [0]) is None # When enabled with DGILib(verbose=verbose) as dgilib: interfaces = dgilib.interface_list() for interface_id in INTERFACES_WRITE: if interface_id in interfaces: dgilib.interface_enable(interface_id) assert dgilib.interface_write_data(interface_id, [0]) is None dgilib.interface_disable(interface_id)
36.884956
79
0.644794
4a079322b499512cd37f392ca760ca6d5a26fa79
10,217
py
Python
tezpool.py
vogelito/tezpool
db480340f2f6d7d2dbe76406a3864fd09e61784a
[ "MIT" ]
null
null
null
tezpool.py
vogelito/tezpool
db480340f2f6d7d2dbe76406a3864fd09e61784a
[ "MIT" ]
null
null
null
tezpool.py
vogelito/tezpool
db480340f2f6d7d2dbe76406a3864fd09e61784a
[ "MIT" ]
null
null
null
#!/usr/bin/python3 #http://doc.tzalpha.net/api/rpc_proposal.html?highlight=june #http://doc.tzalpha.net/api/rpc.html#usage import json import requests import time import argparse import math import sys # Constants PRESERVED_CYCLES = 5 BLOCK_REWARD = 16 * 1000000. ENDORSMENT_REWARD = 2 * 1000000. BLOCKS_PER_CYCLE = 4096 # Force python3 if sys.version_info[0] < 3: print ('python2 not supported, please use python3') sys.exit (0) # Parse command line args parser = argparse.ArgumentParser(description='Tezos delegate redistribution script') parser.add_argument('-c', '--config', metavar='config.json', dest='cfile', action='store', default='config.json', help='set a config file (default: config.json)') parser.add_argument('action', metavar='action', action='store', type=str, choices=['updatependings', 'paypendings', 'updatedocs'], help='action to perform (updatependings, paypendings, updatedocs)') args = parser.parse_args () # Load the config file try: conf = json.load (open (args.cfile, 'r')) except: print ('Unable to load config file.') sys.exit () def try_get(uri, try_n=5): try: return requests.get (conf['host'] + uri) except: if try_n > 0: print ('Get failed, retrying %d' % try_n) return try_get(uri, try_n - 1) else: raise Exception('Reached max retries for get request: ' + uri) def formatBalance (bal): return str (int (bal) / 1000000) def getCurrentCycle (): return try_get ('/chains/main/blocks/head/helpers/current_level').json()['cycle'] def getBlockHashByIndex (idx): head = try_get ('/chains/main/blocks/head/header').json() head_level = head['level'] head_hash = head['hash'] return try_get ('/chains/main/blocks/' + head_hash + '~' + str (head_level - idx) + '/header').json()['hash'] def getFrozenBalance (cycle = None): if cycle == None: block = 'head' else: ccycle = getCurrentCycle () clevel = try_get ('/chains/main/blocks/head/helpers/levels_in_current_cycle?offset=-'+str(ccycle - cycle)).json() block = getBlockHashByIndex (clevel['last']) r = try_get ('/chains/main/blocks/' + block + '/context/delegates/' + conf['pkh'] + '/frozen_balance_by_cycle').json() if cycle != None: return list (filter (lambda y: y['cycle'] == cycle, r))[0] else: return r def getCycleSnapshot (cycle): #snapshot_block_offset = try_get ('/chains/main/blocks/head/context/raw/json/rolls/owner/snapshot/' + str(cycle)).json()[0] # Then multiply the result with 256 and sum the cycle index, we get the block of the snapshot #snapshot_block_index = ((cycle-PRESERVED_CYCLES-2)*4096)+((snapshot_block_offset+1)*256) snapshot_block_index = ((cycle-PRESERVED_CYCLES-2)*4096)+4095 # Get the delegate information for the given snapshot block_hash = getBlockHashByIndex (snapshot_block_index) delegate_info = try_get ("/chains/main/blocks/" + block_hash + "/context/delegates/" + conf['pkh']).json() delegated = [] # Get the delegated balance of each contract for x in delegate_info['delegated_contracts']: contract_info = try_get ("/chains/main/blocks/" + block_hash + "/context/contracts/" + x).json() contract_info2 = { "balance": contract_info['balance'], "manager": contract_info['manager'], "address": x, "alias": conf['deleguees'][x] if (x in conf['deleguees']) else None, "percentage": (int (10000. * 100. * float (contract_info['balance']) / float (delegate_info['staking_balance']))) / 10000. } delegated.append(contract_info2) # Append the delegate as contractor delegated.append({ "balance": delegate_info['balance'], "manager": conf['pkh'], "address": conf['pkh'], "alias": conf['name'], "percentage": (int (10000. * 100. * float (delegate_info['balance']) / float (delegate_info['staking_balance']))) / 10000. }) return { "cycle": cycle, "staking_balance": delegate_info['staking_balance'], "delegated": delegated } def getBakingAndEndorsmentRights (cycle, curcycle): nhead = curcycle * 4096 - cycle * 4096 if nhead < 0: nhead = "" else: nhead = "~" + str(nhead) bak = try_get ("/chains/main/blocks/head" + nhead + "/helpers/baking_rights?delegate=" + conf['pkh'] + '&cycle=' + str(cycle)).json() endors = try_get ("/chains/main/blocks/head" + nhead + "/helpers/endorsing_rights?delegate=" + conf['pkh'] + '&cycle=' + str(cycle)).json() b = list(filter(lambda x: x['priority'] == 0, bak)) e = endors return { 'blocks': b, 'endorsment': e, 'estimated_reward': len(b) * BLOCK_REWARD + len(e) * ENDORSMENT_REWARD } def getRewardForPastCycle (cycle): return getFrozenBalance (cycle) if args.action == 'updatedocs': curcycle = getCurrentCycle() # Load the old docs if any try: f = open ('docs/data.json', 'r') data = json.loads (f.read()) f.close () lastcycle = max(list(map(lambda y: y['cycle'], data['cycles']))) + 1 data['cycles'] = list (filter (lambda y: y['cycle'] <= lastcycle, data['cycles'])) except: data = { "cycles": [] } lastcycle = int (conf['startcycle']) print ('Starting from cycle', lastcycle) for cycle in range (lastcycle, getCurrentCycle() + PRESERVED_CYCLES + 1): print ('Updating docs data for cycle', cycle) snap = getCycleSnapshot(cycle) brights = getBakingAndEndorsmentRights(cycle, curcycle) data['cycles'].append ({ "cycle": cycle, "snapshot": snap, "rights": brights }) data['pkh'] = conf['pkh'] data['name'] = conf['name'] data['deleguees'] = conf['deleguees'] data['percentage'] = conf['percentage'] data['currentcycle'] = curcycle f = open ('docs/data.json', 'w') f.write (json.dumps(data, separators=(',',':'), indent=4)) f.close () print ('Up to date') elif args.action == 'updatependings': try: f = open ('paylog.json', 'r') data = json.loads (f.read()) f.close () except: data = { 'cycle': int (conf['startcycle']) - 1, 'frozen': 0, 'frozenminusfee': 0, 'pendingminusfee': 0, 'pending': 0, 'paid': 0, 'deleguees': {}, 'cycles': {} } curcycle = getCurrentCycle() data['frozen'] = 0 data['frozenminusfee'] = 0 for x in data['deleguees']: data['deleguees'][x]['frozen'] = 0 for cycle in range (data['cycle'] + 1, curcycle): print ('Updating for cycle', cycle) frozen = (curcycle - cycle - 1) < PRESERVED_CYCLES try: rew = getRewardForPastCycle (cycle) except: print ('Cant get reward for cycle', cycle) continue rewsubfee = int (int (rew['rewards']) - int (rew['rewards']) * (100 - conf['percentage']) / 100.) if not frozen: data['cycle'] = cycle data['pending'] += int (rew['rewards']) data['pendingminusfee'] += int (rewsubfee) else: data['frozen'] += int (rew['rewards']) data['frozenminusfee'] += int (rewsubfee) data['cycles'][str(cycle)] = { 'frozenminusfee': rewsubfee if frozen else 0, 'frozen': int (rew['rewards']) if frozen else 0, 'rewardminusfee': rewsubfee if not frozen else 0, 'reward': int (rew['rewards']) if not frozen else 0, } snap = getCycleSnapshot (cycle) for d in snap['delegated']: drew = int (rewsubfee * d['percentage'] / 100.) if not (d['address'] in data['deleguees']) and ((conf['private'] and d['alias'] != None) or (not conf['private'])): data['deleguees'][d['address']] = { 'address': d['address'], 'frozen': drew if frozen else 0, 'pending': drew if not frozen else 0, 'paid': 0, 'alias': d['alias'], 'cycles': { } } data['deleguees'][d['address']]['cycles'][str(cycle)] = { 'cycle': cycle, 'percentage': d['percentage'], 'balance': d['balance'], 'frozen': drew if frozen else 0, 'reward': drew if not frozen else 0 } elif (d['address'] in data['deleguees']) and ((conf['private'] and d['alias'] != None) or (not conf['private'])): data['deleguees'][d['address']]['frozen'] += drew if frozen else 0 data['deleguees'][d['address']]['pending'] += drew if not frozen else 0 data['deleguees'][d['address']]['cycles'][str(cycle)] = { 'cycle': cycle, 'percentage': d['percentage'], 'balance': d['balance'], 'frozen': drew if frozen else 0, 'reward': drew if not frozen else 0 } # Save the paylog f = open ('paylog.json', 'w') f.write (json.dumps (data, separators=(',',':'), indent=4)) f.close () f = open ('docs/paylog.json', 'w') f.write (json.dumps (data, separators=(',',':'), indent=4)) f.close () elif args.action == 'paypendings': f = open ('paylog.json', 'r') data = json.loads (f.read()) f.close () if data['pendingminusfee'] == 0: print ('No pending payments available') sys.exit(0) print ('There are', formatBalance(data['pendingminusfee']), 'XTZ pending in the pool') paydata = "" paiddeleguees = 0 for x in data['deleguees']: v = data['deleguees'][x] if float (formatBalance(v['pending'])) < float(conf['payout']['minpayout']): continue if conf['payout']['method'] == 'tezos-client': if x != conf['pkh']: print ('Sending', formatBalance(v['pending']), 'XTZ to', x) paydata += 'echo Sending ' + str (formatBalance(v['pending'])) + ' XTZ to ' + x + '\n' paydata += conf['payout']['tezos_client'] + ' transfer ' + str (formatBalance(v['pending'])) + ' from "' + conf['payout']['from_account'] + '" to "' + x + '"\n' paydata += 'sleep 1\n\n' else: print ('Not sending', formatBalance(v['pending']), 'XTZ to', x, 'because it\' the pool address') data['deleguees'][x]['paid'] += data['deleguees'][x]['pending'] data['paid'] += data['deleguees'][x]['pending'] data['pendingminusfee'] -= data['deleguees'][x]['pending'] data['pending'] -= data['deleguees'][x]['pending'] data['deleguees'][x]['pending'] = 0 paiddeleguees += 1 else: print('Payout method', conf['payout']['method'], 'is not available') sys.exit (0) if paiddeleguees == 0: print ('No payments to do, exiting') sys.exit (0) if conf['payout']['method'] == 'tezos-client': f = open ('payouts.sh', 'w') f.write (paydata) f.close () print ('payouts.sh written; exec the bash command inside to send the transactions.') f = open ('paylog.json', 'w') f.write (json.dumps (data, separators=(',',':'), indent=4)) f.close () f = open ('docs/paylog.json', 'w') f.write (json.dumps (data, separators=(',',':'), indent=4)) f.close () print ('paylog.json updated')
31.436923
204
0.645982
4a0793933df876a4631d39a2dd3d969543e39ab1
22,320
py
Python
nippy/nippy.py
UEF-BBC/nippy
05d0eb44e40b6c8f0c7cbabdc828410c2fad8b0c
[ "MIT" ]
38
2018-11-13T06:46:11.000Z
2022-03-15T08:26:43.000Z
nippy/nippy.py
UEF-BBC/nippy
05d0eb44e40b6c8f0c7cbabdc828410c2fad8b0c
[ "MIT" ]
1
2019-11-24T08:19:36.000Z
2019-11-24T08:19:36.000Z
nippy/nippy.py
UEF-BBC/nippy
05d0eb44e40b6c8f0c7cbabdc828410c2fad8b0c
[ "MIT" ]
16
2019-11-03T22:36:28.000Z
2022-03-06T10:46:32.000Z
# Semi-automatic preprocessing script for NIR data. This script contains the preprocessing functions and some utility # functions (like data export). # # jtorniainen, ioafara // Department of Applied Physics, University of Eastern Finland # 2020, MIT License import scipy.signal import scipy.io as io import scipy.ndimage as nd import numpy as np from sklearn.preprocessing import normalize, scale from . import handler import pickle import os from scipy import sparse from sklearn.base import TransformerMixin, BaseEstimator from sklearn.utils.validation import FLOAT_DTYPES class SavitzkyGolay(TransformerMixin, BaseEstimator): def __init__(self, *, filter_win=11, poly_order=3, deriv_order=0, delta=1.0, copy=True): self.copy = copy self.filter_win = filter_win self.poly_order = poly_order self.deriv_order = deriv_order self.delta = delta def fit(self, X, y=None): if sparse.issparse(X): raise ValueError('Sparse matrices not supported!"') return self def transform(self, X, copy=None): if sparse.issparse(X): raise ValueError('Sparse matrices not supported!"') # Make sure filter window length is odd filter_win = self.filter_win if self.filter_win % 2 == 0: filter_win += 1 copy = copy if copy is not None else self.copy X = self._validate_data(X, reset=True, accept_sparse='csr', copy=copy, estimator=self, dtype=FLOAT_DTYPES, force_all_finite='allow-nan') X = savgol(X.T, filter_win=filter_win, poly_order=self.poly_order, deriv_order=self.deriv_order, delta=self.delta).T return X def _more_tags(self): return {'allow_nan': True} class LocalStandardNormalVariate(TransformerMixin, BaseEstimator): def __init__(self, *, num_windows=3, copy=True): self.copy = copy self.num_windows = num_windows def fit(self, X, y=None): if sparse.issparse(X): raise ValueError('Sparse matrices not supported!"') return self def transform(self, X, copy=None): if sparse.issparse(X): raise ValueError('Sparse matrices not supported!"') copy = copy if copy is not None else self.copy X = self._validate_data(X, reset=True, accept_sparse='csr', copy=copy, estimator=self, dtype=FLOAT_DTYPES, force_all_finite='allow-nan') X = lsnv(X.T, num_windows=self.num_windows).T return X def _more_tags(self): return {'allow_nan': True} class Normalize(TransformerMixin, BaseEstimator): def __init__(self, *, imin=0, imax=1, copy=True): self.copy = copy self.imin = imin self.imax = imax def fit(self, X, y=None): if sparse.issparse(X): raise ValueError('Sparse matrices not supported!"') return self def transform(self, X, copy=None): if sparse.issparse(X): raise ValueError('Sparse matrices not supported!"') copy = copy if copy is not None else self.copy X = self._validate_data(X, reset=True, accept_sparse='csr', copy=copy, estimator=self, dtype=FLOAT_DTYPES, force_all_finite='allow-nan') X = norml(X.T, imin=self.imin, imax=self.imax).T return X def _more_tags(self): return {'allow_nan': True} class NoPreprocessing(TransformerMixin, BaseEstimator): def __init__(self, *, copy=True): self.copy = copy def fit(self, X, y=None): if sparse.issparse(X): raise ValueError('Sparse matrices not supported!"') return self def transform(self, X, copy=None): if sparse.issparse(X): raise ValueError('Sparse matrices not supported!"') copy = copy if copy is not None else self.copy X = self._validate_data(X, reset=True, accept_sparse='csr', copy=copy, estimator=self, dtype=FLOAT_DTYPES, force_all_finite='allow-nan') return X class Detrend(TransformerMixin, BaseEstimator): def __init__(self, *, bp=0, copy=True): self.copy = copy self.bp = bp def fit(self, X, y=None): if sparse.issparse(X): raise ValueError('Sparse matrices not supported!"') return self def transform(self, X, copy=None): if sparse.issparse(X): raise ValueError('Sparse matrices not supported!"') copy = copy if copy is not None else self.copy X = self._validate_data(X, reset=True, accept_sparse='csr', copy=copy, estimator=self, dtype=FLOAT_DTYPES, force_all_finite='allow-nan') X = detrend(X.T, bp=self.bp).T return X def _more_tags(self): return {'allow_nan': True} class MultipleScatterCorrection(TransformerMixin, BaseEstimator): def __init__(self, *, copy=True): self.copy = copy def fit(self, X, y=None): if sparse.issparse(X): raise ValueError('Sparse matrices not supported!"') return self def transform(self, X, copy=None): if sparse.issparse(X): raise ValueError('Sparse matrices not supported!"') copy = copy if copy is not None else self.copy X = self._validate_data(X, reset=True, accept_sparse='csr', copy=copy, estimator=self, dtype=FLOAT_DTYPES, force_all_finite='allow-nan') X = msc(X.T).T return X def _more_tags(self): return {'allow_nan': True} class RobustNormalVariate(TransformerMixin, BaseEstimator): def __init__(self, *, iqr1=75, iqr2=25, copy=True): self.copy = copy self.iqr1 = iqr1 self.iqr2 = iqr2 def fit(self, X, y=None): if sparse.issparse(X): raise ValueError('Sparse matrices not supported!"') return self def transform(self, X, copy=None): if sparse.issparse(X): raise ValueError('Sparse matrices not supported!"') copy = copy if copy is not None else self.copy X = self._validate_data(X, reset=True, accept_sparse='csr', copy=copy, estimator=self, dtype=FLOAT_DTYPES, force_all_finite='allow-nan') X = rnv(X.T, iqr=[self.iqr1, self.iqr2]).T return X def _more_tags(self): return {'allow_nan': True} class Baseline(TransformerMixin, BaseEstimator): def __init__(self, *, copy=True): self.copy = copy def fit(self, X, y=None): if sparse.issparse(X): raise ValueError('Sparse matrices not supported!"') return self def transform(self, X, copy=None): copy = copy if copy is not None else self.copy X = self._validate_data(X, reset=True, accept_sparse='csr', copy=copy, estimator=self, dtype=FLOAT_DTYPES, force_all_finite='allow-nan') X = baseline(X.T).T return X def _more_tags(self): return {'allow_nan': True} class StandardNormalVariate(TransformerMixin, BaseEstimator): def __init__(self, *, copy=True): self.copy = copy def fit(self, X, y=None): if sparse.issparse(X): raise ValueError('Sparse matrices not supported!"') return self def transform(self, X, copy=None): copy = copy if copy is not None else self.copy X = self._validate_data(X, reset=True, accept_sparse='csr', copy=copy, estimator=self, dtype=FLOAT_DTYPES, force_all_finite='allow-nan') X = snv(X.T).T return X def _more_tags(self): return {'allow_nan': True} class Preprocessor(object): """ Preprocessor object can be used to run nippy as an iterator (see documentation for examples). """ def __init__(self, wavelength, spectra, configuration_file): """ Args: wavelength <numpy.ndarray>: Vector of wavelengths. spectra <numpy.ndarray>: NIRS data matrix. configuration_file <str>: A path to the configuration file. """ self.wavelength = wavelength self.spectra = spectra self.configuration = handler.read_configuration(configuration_file) self.current_pipe_idx = 0 def __iter__(self): return self def __next__(self): """ Returns the next preprocessed dataset and a summary of preprocessing operations. """ if self.current_pipe_idx >= len(self.configuration): raise StopIteration else: this_idx = self.current_pipe_idx wavelength_, spectra_ = run_pipeline(self.wavelength.copy(), self.spectra.copy(), self.configuration[this_idx]) self.current_pipe_idx += 1 return wavelength_, spectra_, self.configuration[this_idx] # PREPROCESSING FUNCTIONS def baseline(spectra): """ Removes baseline (mean) from each spectrum. Args: spectra <numpy.ndarray>: NIRS data matrix. Returns: spectra <numpy.ndarray>: Mean-centered NIRS data matrix """ return spectra - np.mean(spectra, axis=0) def snv(spectra): """ Perform scatter correction using the standard normal variate. Args: spectra <numpy.ndarray>: NIRS data matrix. Returns: spectra <numpy.ndarray>: NIRS data with (S/R)NV applied. """ return (spectra - np.mean(spectra, axis=0)) / np.std(spectra, axis=0) def rnv(spectra, iqr=[75, 25]): """ Perform scatter correction using robust normal variate. Args: spectra <numpy.ndarray>: NIRS data matrix. iqr <list>: IQR ranges [lower, upper] for robust normal variate. Returns: spectra <numpy.ndarray>: NIRS data with (S/R)NV applied. """ return (spectra - np.median(spectra, axis=0)) / np.subtract(*np.percentile(spectra, iqr, axis=0)) def lsnv(spectra, num_windows=10): """ Perform local scatter correction using the standard normal variate. Args: spectra <numpy.ndarray>: NIRS data matrix. num_windows <int>: number of equispaced windows to use (window size (in points) is length / num_windows) Returns: spectra <numpy.ndarray>: NIRS data with local SNV applied. """ parts = np.array_split(spectra, num_windows, axis=0) for idx, part in enumerate(parts): parts[idx] = snv(part) return np.concatenate(parts, axis=0) def savgol(spectra, filter_win=11, poly_order=3, deriv_order=0, delta=1.0): """ Perform Savitzky–Golay filtering on the data (also calculates derivatives). This function is a wrapper for scipy.signal.savgol_filter. Args: spectra <numpy.ndarray>: NIRS data matrix. filter_win <int>: Size of the filter window in samples (default 11). poly_order <int>: Order of the polynomial estimation (default 3). deriv_order <int>: Order of the derivation (default 0). Returns: spectra <numpy.ndarray>: NIRS data smoothed with Savitzky-Golay filtering """ return scipy.signal.savgol_filter(spectra, filter_win, poly_order, deriv_order, delta=delta, axis=0) def trim(wavelength, spectra, bins): """ Trim spectra to a specified wavelength bin (or bins). Args: wavelength <numpy.ndarray>: Vector of wavelengths. spectra <numpy.ndarray>: NIRS data matrix. bins <list>: A bin or a list of bins defining the trim operation. Returns: spectra <numpy.ndarray>: NIRS data smoothed with Savitzky-Golay filtering """ if type(bins[0]) != list: bins = [bins] spectra_trim = np.array([]).reshape(0, spectra.shape[1]) wavelength_trim = np.array([]) for wave_range in bins: mask = np.bitwise_and(wavelength >= wave_range[0], wavelength <= wave_range[1]) spectra_trim = np.vstack((spectra_trim, spectra[mask, :])) wavelength_trim = np.hstack((wavelength_trim, wavelength[mask])) return wavelength_trim, spectra_trim def resample(wavelength, spectra, resampling_ratio): """ Resample spectra according to the resampling ratio. Args: wavelength <numpy.ndarray>: Vector of wavelengths. spectra <numpy.ndarray>: NIRS data matrix. resampling_ratio <float>: new length with respect to original length Returns: wavelength_ <numpy.ndarray>: Resampled wavelengths. spectra_ <numpy.ndarray>: Resampled NIR spectra """ new_length = int(np.round(wavelength.size * resampling_ratio)) spectra_, wavelength_ = scipy.signal.resample(spectra, new_length, wavelength) return wavelength_, spectra_ def norml(spectra, udefined=True, imin=0, imax=1): """ Perform spectral normalisation with user-defined limits. Args: spectra <numpy.ndarray>: NIRS data matrix. udefined <bool>: use user defined limits imin <float>: user defined minimum imax <float>: user defined maximum Returns: spectra <numpy.ndarray>: Normalized NIR spectra """ if udefined: f = (imax - imin)/(np.max(spectra) - np.min(spectra)) n = spectra.shape arr = np.empty((0, n[0]), dtype=float) #create empty array for spectra for i in range(0, n[1]): d = spectra[:,i] dnorm = imin + f*d arr = np.append(arr, [dnorm], axis=0) return np.transpose(arr) else: return spectra / np.linalg.norm(spectra, axis=0) def detrend(spectra, bp=0): """ Perform spectral detrending to remove linear trend from data. Args: spectra <numpy.ndarray>: NIRS data matrix. bp <list>: A sequence of break points. If given, an individual linear fit is performed for each part of data between two break points. Break points are specified as indices into data. Returns: spectra <numpy.ndarray>: Detrended NIR spectra """ return scipy.signal.detrend(spectra, bp=bp) def msc(spectra): """ Performs multiplicative scatter correction to the mean. Args: spectra <numpy.ndarray>: NIRS data matrix. Returns: spectra <numpy.ndarray>: Scatter corrected NIR spectra. """ spectra = scale(spectra, with_std=False, axis=0) # Demean reference = np.mean(spectra, axis=1) for col in range(spectra.shape[1]): a, b = np.polyfit(reference, spectra[:, col], deg=1) spectra[:, col] = (spectra[:, col] - b) / a return spectra def emsc(wave, spectra, remove_mean=False): """ Performs (basic) extended multiplicative scatter correction to the mean. Args: spectra <numpy.ndarray>: NIRS data matrix. Returns: spectra <numpy.ndarray>: Scatter corrected NIR spectra. """ if remove_mean: spectra = scale(spectra, with_std=False, axis=0) p1 = .5 * (wave[0] + wave[-1]) p2 = 2 / (wave[0] - wave[-1]) # Compute model terms model = np.ones((wave.size, 4)) model[:, 1] = p2 * (wave[0] - wave) - 1 model[:, 2] = (p2 ** 2) * ((wave - p1) ** 2) model[:, 3] = np.mean(spectra, axis=1) # Solve correction parameters params = np.linalg.lstsq(model, spectra)[0].T # Apply correction spectra = spectra - np.dot(params[:, :-1], model[:, :-1].T).T spectra = np.multiply(spectra, 1 / np.repeat(params[:, -1].reshape(1, -1), spectra.shape[0], axis=0)) return spectra def clip(wavelength, spectra, threshold, substitute=None): """ Removes or substitutes values above the given threshold. Args: wavelength <numpy.ndarray>: Vector of wavelengths. spectra <numpy.ndarray>: NIRS data matrix. threshold <float>: threshold value for rejection substitute <float>: substitute value for rejected values (None removes values from the spectra) Returns: wavelength <numpy.ndarray>: Vector of wavelengths. spectra <numpy.ndarray>: NIR spectra with threshold exceeding values removed. """ if substitute == None: # remove threshold violations mask = np.any(spectra > threshold, axis=1) spectra = spectra[~mask, :] wavelength = wavelength[~mask] else: # substitute threshold violations with a value spectra[spectra > threshold] = substitute return wavelength, spectra return wavelength, spectra def smooth(spectra, filter_win, window_type='flat', mode='reflect'): """ Smooths the spectra using convolution. Args: spectra <numpy.ndarray>: NIRS data matrix. filter_win <float>: length of the filter window in samples. window_type <str>: filtering window to use for convolution (see scipy.signal.windows) mode <str>: convolution mode Returns: spectra <numpy.ndarray>: Smoothed NIR spectra. """ if window_type == 'flat': window = np.ones(filter_win) else: window = scipy.signal.windows.get_window(window_type, filter_win) window = window / np.sum(window) for column in range(spectra.shape[1]): spectra[:, column] = nd.convolve(spectra[:, column], window, mode=mode) return spectra def derivate(spectra, order=1, delta=1): """ Computes Nth order derivates with the desired spacing using numpy.gradient. Args: spectra <numpy.ndarray>: NIRS data matrix. order <float>: Order of the derivation. delta <int>: Delta of the derivate (in samples). Returns: spectra <numpy.ndarray>: Derivated NIR spectra. """ for n in range(order): spectra = np.gradient(spectra, delta, axis=0) return spectra # UTILITY FUNCTIONS def export_pipelines_to_csv(output_path, datasets, pipelines, mkdir=False): """ Exports all datasets and the related pipelines to csv files. Args: filename <str> output directory. datasets <list> list of datasets processed by nippy. pipelines <list> list of nippy pipelines. mkdir <bool> create output directory if it does not exist. """ if mkdir and not os.path.isdir(output_path): os.mkdir(output_path) for idx, dataset in enumerate(datasets): filename = os.path.join(output_path, '{}.csv'.format(idx + 1)) np.savetxt(filename, np.hstack((dataset[0].reshape(-1, 1), dataset[1])), delimiter=',') with open(os.path.join(output_path, 'pipelines.log'), 'w') as f: for idx, pipe in enumerate(pipelines): f.write('{};{}\n'.format(idx + 1, str(pipe))) def export_pipelines_to_mat(output_path, datasets, pipelines, mkdir=False): """ Exports all datasets and the related pipelines to csv files. Args: filename <str> output directory. datasets <list> list of datasets processed by nippy. pipelines <list> list of nippy pipelines. mkdir <bool> create output directory if it does not exist. """ if mkdir and not os.path.isdir(output_path): os.mkdir(output_path) new_datasets = [] for idx, data, pipe in zip(range(len(datasets)), datasets, pipelines): dataset = {'data': data[1], 'wave': data[0], 'params': str(pipe)} io.savemat(os.path.join(output_path, '{}.mat'.format(idx + 1)), dataset) with open(os.path.join(output_path, 'pipelines.log'), 'w') as f: for idx, pipe in enumerate(pipelines): f.write('{};{}\n'.format(idx + 1, str(pipe))) def export_pipelines_to_pickle(filename, datasets, pipelines): """ Exports all datasets and the related pipelines to a pickle file. Args: filename <str> output filepath. datasets <list> list of datasets processed by nippy. pipelines <list> list of nippy pipelines. """ data = {'datasets': datasets, 'pipelines': pipelines} pickle.dump(data, open(filename, 'wb')) def run_pipeline(wavelength_, spectra_, pipeline): if 'CLIP' in pipeline.keys() and pipeline['CLIP'] != None: wavelength_, spectra_ = clip(wavelength_, spectra_, **pipeline['CLIP']) if 'BASELINE' in pipeline.keys() and pipeline['BASELINE'] != None: spectra_ = baseline(spectra_, **pipeline['BASELINE']) if 'SNV' in pipeline.keys() and pipeline['SNV'] != None: spectra_ = snv(spectra_, **pipeline['SNV']) if 'RNV' in pipeline.keys() and pipeline['RNV'] != None: spectra_ = rnv(spectra_, **pipeline['RNV']) if 'LSNV' in pipeline.keys() and pipeline['LSNV'] != None: spectra_ = lsnv(spectra_, **pipeline['LSNV']) if 'MSC' in pipeline.keys() and pipeline['MSC'] != None: spectra_ = msc(spectra_) if 'EMSC' in pipeline.keys() and pipeline['EMSC'] != None: spectra_ = emsc(wavelength_, spectra_) if 'NORML' in pipeline.keys() and pipeline['NORML'] != None: spectra_ = norml(spectra_, **pipeline['NORML']) if 'SAVGOL' in pipeline.keys() and pipeline['SAVGOL'] != None: spectra_ = savgol(spectra_, **pipeline['SAVGOL']) if 'SMOOTH' in pipeline.keys() and pipeline['SMOOTH'] != None: spectra_ = smooth(spectra_, **pipeline['SMOOTH']) if 'DERIVATE' in pipeline.keys() and pipeline['DERIVATE'] != None: spectra_ = derivate(spectra_, **pipeline['DERIVATE']) if 'DETREND' in pipeline.keys() and pipeline['DETREND'] != None: spectra_ = detrend(spectra_, **pipeline['DETREND']) if 'RESAMPLE' in pipeline.keys() and pipeline['RESAMPLE'] != None: wavelength_, spectra_ = resample(wavelength_, spectra_, **pipeline['RESAMPLE']) if 'TRIM' in pipeline.keys() and pipeline['TRIM'] != None: wavelength_, spectra_ = trim(wavelength_, spectra_, **pipeline['TRIM']) return wavelength_, spectra_ def nippy(wavelength, spectra, pipelines): """ Main processing script of nippy. Applies operations specified in the 'pipelines' parameter to the given spectra. Args: wavelength <numpy.ndarray>: Vector of wavelengths. spectra <numpy.ndarray>: NIRS data matrix. pipelines <list>: list of nippy pipelines. Returns: datasets <list>: a list containing different preprocessed versions of the original spectra and wavelength. """ datasets = [] for idx, pipeline in enumerate(pipelines): wavelength_, spectra_ = run_pipeline(wavelength.copy(), spectra.copy(), pipeline) print('Running pipe {}:\n{}\n'.format(idx + 1, pipeline)) datasets.append((wavelength_, spectra_)) return datasets
33.972603
144
0.643772
4a079467bead3863947a5088abba36624d3e5616
186
py
Python
examples/article/Code4.py
UnixJunkie/mordred
d65d3fa451aca3f32adf4124a83532978ae57e46
[ "BSD-3-Clause" ]
199
2017-04-26T07:40:32.000Z
2022-03-29T10:52:19.000Z
examples/article/Code4.py
UnixJunkie/mordred
d65d3fa451aca3f32adf4124a83532978ae57e46
[ "BSD-3-Clause" ]
87
2016-01-15T09:02:20.000Z
2022-03-21T23:18:08.000Z
examples/article/Code4.py
UnixJunkie/mordred
d65d3fa451aca3f32adf4124a83532978ae57e46
[ "BSD-3-Clause" ]
64
2018-03-07T13:21:47.000Z
2022-03-16T00:56:11.000Z
from distutils.version import StrictVersion from mordred.RingCount import RingCount # Start Code 4 presets = list(RingCount.preset(version=StrictVersion("1.0.0"))) print(len(presets))
23.25
64
0.795699
4a0794a6468fc0c8a4ce2d50bc189152025a84d4
2,133
py
Python
tests/display_module/test_conductor.py
MetaGenScope/metagenscope-server
609cd57c626c857c8efde8237a1f22f4d1e6065d
[ "MIT" ]
null
null
null
tests/display_module/test_conductor.py
MetaGenScope/metagenscope-server
609cd57c626c857c8efde8237a1f22f4d1e6065d
[ "MIT" ]
null
null
null
tests/display_module/test_conductor.py
MetaGenScope/metagenscope-server
609cd57c626c857c8efde8237a1f22f4d1e6065d
[ "MIT" ]
null
null
null
"""Test suite for DisplayModuleConductors.""" from uuid import uuid4 from app.display_modules.conductor import DisplayModuleConductor, SampleConductor from app.display_modules.sample_similarity import SampleSimilarityDisplayModule from app.tool_results.kraken import KrakenResultModule from app.tool_results.krakenhll import KrakenHLLResultModule from app.tool_results.metaphlan2 import Metaphlan2ResultModule from tests.base import BaseTestCase KRAKEN_NAME = KrakenResultModule.name() KRAKENHLL_NAME = KrakenHLLResultModule.name() METAPHLAN2_NAME = Metaphlan2ResultModule.name() class TestDisplayModuleConductor(BaseTestCase): """Test suite for display module Conductor.""" def test_downstream_modules(self): """Ensure downstream_modules is computed correctly.""" downstream_modules = DisplayModuleConductor.downstream_modules(KrakenResultModule) self.assertIn(SampleSimilarityDisplayModule, downstream_modules) class TestSampleConductor(BaseTestCase): """Test suite for display module Conductor.""" def test_get_valid_modules(self): """Ensure valid_modules is computed correctly.""" tools_present = set([KRAKEN_NAME, KRAKENHLL_NAME, METAPHLAN2_NAME]) downstream_modules = SampleConductor.downstream_modules(KrakenResultModule) sample_id = str(uuid4()) conductor = SampleConductor(sample_id, downstream_modules) valid_modules = conductor.get_valid_modules(tools_present) self.assertIn(SampleSimilarityDisplayModule, valid_modules) def test_partial_valid_modules(self): """Ensure valid_modules is computed correctly if tools are missing.""" tools_present = set([KRAKEN_NAME]) downstream_modules = SampleConductor.downstream_modules(KrakenResultModule) sample_id = str(uuid4()) conductor = SampleConductor(sample_id, downstream_modules) valid_modules = conductor.get_valid_modules(tools_present) self.assertTrue(SampleSimilarityDisplayModule not in valid_modules) class TestGroupConductor(BaseTestCase): """Test suite for display module Conductor.""" pass
40.245283
90
0.78106
4a0795da12bea7fcb0a4656aaafe8d424d3d63b5
3,382
py
Python
server/djangobackend/settings.py
valencialejo/agfzb-CloudAppDevelopment_Capstone
247a29cc28de5abb6f903b36396f6315a9ae4f7e
[ "Apache-2.0" ]
null
null
null
server/djangobackend/settings.py
valencialejo/agfzb-CloudAppDevelopment_Capstone
247a29cc28de5abb6f903b36396f6315a9ae4f7e
[ "Apache-2.0" ]
null
null
null
server/djangobackend/settings.py
valencialejo/agfzb-CloudAppDevelopment_Capstone
247a29cc28de5abb6f903b36396f6315a9ae4f7e
[ "Apache-2.0" ]
null
null
null
""" Django settings for djangobackend project. Generated by 'django-admin startproject' using Django 3.1.3. For more information on this file, see https://docs.djangoproject.com/en/3.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.1/ref/settings/ """ import os from pathlib import Path # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'ao5z(o(z@cvzodm99d32jkxa5e8a1!q_4sqss5-a%n6tg$#h$+' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True APPEND_SLASH = True ALLOWED_HOSTS = ['localhost','valencialejo.us-south.cf.appdomain.cloud'] # Application definition INSTALLED_APPS = [ 'djangoapp.apps.DjangoappConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'djangobackend.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.template.context_processors.media', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'djangobackend.wsgi.application' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.1/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static') MEDIA_ROOT = os.path.join(STATIC_ROOT, 'media') MEDIA_URL = '/media/'
26.421875
91
0.701656
4a0796563c70f9c21e5d266cbd94a3fcc30777af
1,630
py
Python
contest_manager/emails.py
zapme/contest-manager
462c1022faf869295d55c3676fdaf2c6917b0c4c
[ "BSD-4-Clause" ]
1
2021-05-05T05:31:32.000Z
2021-05-05T05:31:32.000Z
contest_manager/emails.py
zapme/contest-manager
462c1022faf869295d55c3676fdaf2c6917b0c4c
[ "BSD-4-Clause" ]
1
2021-03-23T16:22:50.000Z
2021-03-23T16:22:50.000Z
contest_manager/emails.py
zapme/contest-manager
462c1022faf869295d55c3676fdaf2c6917b0c4c
[ "BSD-4-Clause" ]
null
null
null
"""Contains e-mail templates for log submission receipts.""" OK_TEMPLATE = """We're happy to confirm that we have received your logs for {name}. Your receipt number for this particular submission is {receipt}. Please save this e-mail, which contains your receipt number, at least until the scores have been released for this contest. It contains important confirmation that you have submitted logs through our system in the unlikely event of a sudden loss of data. If you have made a mistake in submitting your log, please feel free to resubmit your logs using the same submission form. The prior entry will be replaced and only the last submission will be candidate for scoring. As a security consideration, we will send a notification to the email address left in the prior submission to let them know that their submission has been replaced. On behalf of the contest organizers, the Contest Manager thanks you for participating in the contest and hopes that you had great fun. 73, The WY4RC Contest Manager {time} """ DUP_TEMPLATE = """ We are letting you know that your log submission for {name} has been replaced by a new one. If this replacement is made by you, there is no need to take further action, and we thank you for providing corrected information. If you did not resubmit logs, someone else might have submitted logs on behalf of you without authorization. We encourage you submit your log immediately before the log due date, and provide the following receipt number to the site administrator *immediately* in order for them to take corrective action: {receipt}. 73 The WY4RC Contest Manager {time} """
36.222222
75
0.790798
4a079706ec90b71698a6b567c4484d02b6289f15
4,900
py
Python
kylinpy/sqla_dialect.py
liuyonghengheng/kylinpy
61b92b96619f5c6d9f0f92ec08cb4d5cfa272d10
[ "MIT" ]
null
null
null
kylinpy/sqla_dialect.py
liuyonghengheng/kylinpy
61b92b96619f5c6d9f0f92ec08cb4d5cfa272d10
[ "MIT" ]
null
null
null
kylinpy/sqla_dialect.py
liuyonghengheng/kylinpy
61b92b96619f5c6d9f0f92ec08cb4d5cfa272d10
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import itertools import sqlalchemy.exc from sqlalchemy import pool from sqlalchemy.engine import default from sqlalchemy.sql import compiler from kylinpy.exceptions import NoSuchTableError from kylinpy.kylindb import Connection from kylinpy.utils.keywords import CALCITE_KEYWORDS from kylinpy.utils.sqla_types import kylin_to_sqla SUPERSET_KEYWORDS = set([ '__timestamp', ]) class KylinIdentifierPreparer(compiler.IdentifierPreparer): compiler.IdentifierPreparer.reserved_words = \ set(itertools.chain(*[[e.lower(), e] for e in CALCITE_KEYWORDS])) compiler.IdentifierPreparer.reserved_words.update(SUPERSET_KEYWORDS) def __init__(self, dialect, initial_quote='"', final_quote=None, escape_quote='"', omit_schema=True): super(KylinIdentifierPreparer, self).__init__( dialect, initial_quote, final_quote, escape_quote, omit_schema, ) def format_label(self, label, name=None): return self.quote(name or label.name) class KylinSQLCompiler(compiler.SQLCompiler): _cached_metadata = set() def __init__(self, *args, **kwargs): super(KylinSQLCompiler, self).__init__(*args, **kwargs) def _compose_select_body(self, text, select, inner_columns, froms, byfrom, kwargs): text = super(KylinSQLCompiler, self)._compose_select_body( text, select, inner_columns, froms, byfrom, kwargs) return text def visit_column(self, *args, **kwargs): result = super(KylinSQLCompiler, self).visit_column(*args, **kwargs) return result def visit_label(self, *args, **kwargs): self.__class__._cached_metadata.add([c.name for c in args][0]) result = super(KylinSQLCompiler, self).visit_label(*args, **kwargs) return result class KylinDialect(default.DefaultDialect): def get_primary_keys(self, connection, table_name, schema=None, **kw): pass name = 'kylin' driver = 'kylin' statement_compiler = KylinSQLCompiler preparer = KylinIdentifierPreparer preexecute_pk_sequences = True supports_pk_autoincrement = True supports_sequences = True sequences_optional = True supports_native_decimal = True supports_default_values = True supports_native_boolean = True poolclass = pool.SingletonThreadPool supports_unicode_statements = True default_paramstyle = 'pyformat' def __init__(self, *args, **kwargs): super(KylinDialect, self).__init__(*args, **kwargs) @classmethod def dbapi(cls): return Connection def initialize(self, connection): self.server_version_info = None self.default_schema_name = None self.default_isolation_level = None self.returns_unicode_strings = True def create_connect_args(self, url): kwargs = { 'host': url.host, 'port': url.port or 7070, 'username': url.username, 'password': url.password, 'project': url.database or 'default', } kwargs.update(url.query) return [[], kwargs] def do_execute(self, cursor, statement, parameters, context=None): super(KylinDialect, self).do_execute(cursor, statement, parameters, context) def get_table_names(self, connection, schema=None, **kw): conn = connection.connect() tables = conn.connection.connection.get_all_tables(schema) return tables def get_schema_names(self, connection, schema=None, **kw): conn = connection.connect() schemas = conn.connection.connection.get_all_schemas() return schemas def has_table(self, connection, table_name, schema=None): # disable check table exists return False def has_sequence(self, connection, sequence_name, schema=None): return False def get_columns(self, connection, table_name, schema=None, **kw): conn = connection.connect() try: columns = conn.connection.connection.get_table_source(table_name, schema).columns return [{ 'name': col.name, 'type': kylin_to_sqla(col.datatype), } for col in columns] except NoSuchTableError: raise sqlalchemy.exc.NoSuchTableError def get_foreign_keys(self, connection, table_name, schema=None, **kw): return [] def get_indexes(self, connection, table_name, schema=None, **kw): return [] def get_view_names(self, connection, schema=None, **kw): return [] def get_pk_constraint(self, conn, table_name, schema=None, **kw): return {} def get_unique_constraints(self, connection, table_name, schema=None, **kw): return []
32.450331
93
0.682449
4a0797e578b5e4bbe6553cb65f685755a28a99a6
9,768
py
Python
src/tucuxi/sqs.py
unj-inovacao/tucuxi
104b1f178b38dcc625bd64643c0986a1cfee8f53
[ "MIT" ]
null
null
null
src/tucuxi/sqs.py
unj-inovacao/tucuxi
104b1f178b38dcc625bd64643c0986a1cfee8f53
[ "MIT" ]
7
2020-05-28T19:10:01.000Z
2020-08-14T17:34:13.000Z
src/tucuxi/sqs.py
unj-inovacao/tucuxi
104b1f178b38dcc625bd64643c0986a1cfee8f53
[ "MIT" ]
null
null
null
"""Some useful high-level methods to interact with AWS S3.""" import json import logging import re import time from functools import reduce from typing import Any from typing import Callable from typing import Dict from typing import Generator from typing import List from typing import Optional from typing import Tuple from boltons.iterutils import chunked_iter from .session import Session logger = logging.getLogger(__name__) class Sqs: """SQS Client.""" def __init__( self, queue_url: str, region: str = "us-east-1", session: Optional[Session] = None, ) -> None: """[summary] Args: queue_url (str): [description] region (str): [description]. Defaults to "us-east-1". session (Optional[Session]): [description]. Defaults to None. """ if not region: region = re.search(r"https://sqs\.(.*)\.a", queue_url).group( # type: ignore 1 ) if not session: session = Session() sess = session.get_session() self.client = sess.client("sqs", region_name=region) self.queue_url = queue_url def _batch( self, entries: Any, key: str, operation: Callable[..., Dict[str, str]], raise_on_error: bool = False, apply: Callable[..., Any] = lambda x: x, ) -> Dict[str, List[bool]]: """[summary] Args: entries (Any): [description] key (str): [description] operation (Callable[..., Dict[str, str]]): [description] raise_on_error (bool): [description]. Defaults to False. apply (Callable[..., Any]): [description]. Defaults to lambdax:x. Returns: Dict[str, List[bool]]: [description] Raises: Exception """ res_list = [] for i_chunk, chunk in enumerate(chunked_iter(entries, 10)): payload = [ {"Id": str(i_chunk * 10 + i), key: apply(m)} for i, m in enumerate(chunk) ] res = operation(QueueUrl=self.queue_url, Entries=payload) print(res) if raise_on_error and res.get("Failed"): raise (Exception) res_list.append(res) return reduce( lambda c, r: { key: c.get(key, []) + r.get(key, []) for key in ["Successful", "Failed"] }, res_list, # type: ignore ) def send_message(self, message: Any, delay: int = 10) -> Any: """[summary] Args: message (Any): [description] delay (int): [description]. Defaults to 10. Returns: Any: [description] """ logger.debug(f"Sending message to {self.queue_url}") return self.client.send_message( QueueUrl=self.queue_url, DelaySeconds=delay, MessageBody=json.dumps(message), ) def send_message_batch( self, messages: List[Any], raise_on_error: bool = False ) -> Dict[str, List[bool]]: """[summary] Args: messages (List[Any]): [description] raise_on_error (bool): [description]. Defaults to False. Returns: Dict[str, List[bool]]: [description] """ return self._batch( messages, "MessageBody", self.client.send_message_batch, raise_on_error, json.dumps, ) def listen( self, wait_time: int = 0, max_number_of_messages: int = 1, poll_interval: int = 30, auto_delete: bool = True, ) -> Generator[Tuple[str, Any], None, None]: """[summary] Args: wait_time (int): [description]. Defaults to 0. max_number_of_messages (int): [description]. Defaults to 1. poll_interval (int): [description]. Defaults to 30. auto_delete (bool): [description]. Defaults to True. Yields: Generator[tuple]: [description] """ # TODO Look for other packages to have ideas. Example, auto sending to error queue. logger.info(f"Starting to listen to {self.queue_url}") while True: # calling with WaitTimeSecconds of zero show the same behavior as # not specifiying a wait time, ie: short polling messages = self.client.receive_message( QueueUrl=self.queue_url, WaitTimeSeconds=wait_time, MaxNumberOfMessages=max_number_of_messages, ) if "Messages" in messages: logger.info("{} messages received".format(len(messages["Messages"]))) for m in messages["Messages"]: receipt_handle = m["ReceiptHandle"] m_body = m["Body"] # TODO Better exception handling try: params_dict = json.loads(m_body) except BaseException: logger.warning( "Unable to parse message - JSON is not formatted properly" ) continue logger.debug(f"Yielding message {receipt_handle}") if auto_delete: self.delete_message(receipt_handle) yield receipt_handle, params_dict else: if poll_interval: time.sleep(poll_interval) else: break def delete_message(self, receipt_handle: str) -> Any: """[summary] Args: receipt_handle (str): [description] Returns: Any: [description] """ logger.debug(f"Deleting message {receipt_handle} from {self.queue_url}") return self.client.delete_message( QueueUrl=self.queue_url, ReceiptHandle=receipt_handle ) def delete_message_batch( self, receipts: List[str], raise_on_error: bool = False ) -> Dict[str, List[bool]]: """[summary] Args: receipts (List[str]): [description] raise_on_error (bool): [description]. Defaults to False. Returns: Dict[str, List[bool]]: [description] """ return self._batch( receipts, "ReceiptHandle", self.client.delete_message_batch, raise_on_error ) # # TODO: Maybe remove original listen? # def listen_queue( # *args, sqs_session: Sqs, wait_time: int = 0, max_number_of_messages: int = 1, batch_size: int = 1, poll_interval: int = 30, # s3_session: Optional[Any] = None, error_queue: Optional[Sqs] = None, auto_delete: bool = True, destination_bucket: Optional[S3] = None, **kwargs): # """[summary] # Args: # sqs_session (Sqs): [description] # wait_time (int): [description]. Defaults to 0. # max_number_of_messages (int): [description]. Defaults to 1. # batch_size (int): [description]. Defaults to 1. # poll_interval (int): [description]. Defaults to 30. # s3_session (Optional[Any]): [description]. Defaults to None. # error_queue (Optional[Sqs]): [description]. Defaults to None. # auto_delete (bool): [description]. Defaults to True. # destination_bucket (Optional[S3]): [description]. Defaults to None. # """ # def func(f): # def w_f(*args, **kwargs): # while True: # messages = list() # for _ in range(batch_size): # message = sqs_session.sess.receive_message( # QueueUrl=sqs_session.queue_url, # WaitTimeSeconds=wait_time, # MaxNumberOfMessages=max_number_of_messages # ) # if "Messages" in message: # logger.info("{} messages received".format(len(messages["Messages"]))) # for m in messages["Messages"]: # receipt_handle = m["ReceiptHandle"] # m_body = m["Body"] # try: # if s3_session is not None: # message_content = s3_session.get_object(m_body) # else: # message_content = json.loads(m_body) # except Exception: # logger.warning( # "Unable to handle message", # stack_info=True # ) # continue # if auto_delete: # sqs_session.delete_message( # receipt_handle=receipt_handle # ) # messages.append((receipt_handle, message_content)) # else: # if poll_interval: # time.sleep(poll_interval) # else: # break # try: # f(*args, sqs_messages=messages, **kwargs) # except Exception as e: # logger.error(f"Exception {e} occurred", stack_info=True) # if error_queue is not None: # error_queue.send_message( # message=message, # ) # return w_f # return func
35.78022
156
0.507985
4a0798176672e89b1c53474567ecfd32fc237c5e
755
py
Python
data structures/linkedlist/2.6 palindrome.py
iFun/Algo
e9e2d42c72c595e0cd138dcb0150b6a1bdc7c073
[ "MIT" ]
null
null
null
data structures/linkedlist/2.6 palindrome.py
iFun/Algo
e9e2d42c72c595e0cd138dcb0150b6a1bdc7c073
[ "MIT" ]
null
null
null
data structures/linkedlist/2.6 palindrome.py
iFun/Algo
e9e2d42c72c595e0cd138dcb0150b6a1bdc7c073
[ "MIT" ]
null
null
null
# implement a function to check if a linked list is a palindrome from linkedlist import * def main(): ll = linked_list() ll.add_node(1) ll.add_node(2) ll.add_node(3) ll.add_node(3) ll.add_node(2) ll.add_node(1) slow_node = ll.head fast_node = ll.head stack = [] while fast_node is not None and fast_node.next is not None: stack.append(slow_node.data) slow_node = slow_node.next fast_node = fast_node.next.next #deal with when linkedlist is odd num if fast_node is not None: slow_node = slow_node.next while slow_node is not None: if(stack.pop() != slow_node.data): print("not palindrome") return False slow_node = slow_node.next print("palindrome") return True main()
18.875
65
0.675497
4a079934f0030a2dfafb41d9ea059a56e42918f2
246,407
py
Python
openbmctool.py
bluerise/testmaster
87682d7f6e3aced7c68ed73f39a19cdd325a9aa6
[ "Apache-2.0" ]
3
2020-12-05T11:45:30.000Z
2021-11-28T03:15:02.000Z
openbmctool.py
bluerise/testmaster
87682d7f6e3aced7c68ed73f39a19cdd325a9aa6
[ "Apache-2.0" ]
4
2019-03-13T09:23:28.000Z
2022-02-28T10:06:16.000Z
openbmctool.py
bluerise/testmaster
87682d7f6e3aced7c68ed73f39a19cdd325a9aa6
[ "Apache-2.0" ]
4
2019-02-05T23:53:07.000Z
2020-12-05T11:45:33.000Z
#!/usr/bin/env python3 """ Copyright 2017,2019 IBM Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import argparse import requests import getpass import json import os import urllib3 import time, datetime import binascii import subprocess import platform import zipfile import tarfile import tempfile import hashlib import re import uuid import ssl import socket import select import http.client from subprocess import check_output import traceback MAX_NBD_PACKET_SIZE = 131088 jsonHeader = {'Content-Type' : 'application/json'} xAuthHeader = {} baseTimeout = 60 serverTypeMap = { 'ActiveDirectory' : 'active_directory', 'OpenLDAP' : 'openldap' } class NBDPipe: def openHTTPSocket(self,args): try: _create_unverified_https_context = ssl._create_unverified_context except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default pass else: # Handle target environment that doesn't support HTTPS verification ssl._create_default_https_context = _create_unverified_https_context token = gettoken(args) self.conn = http.client.HTTPSConnection(args.host,port=443) uri = "/redfish/v1/Systems/system/LogServices/Dump/attachment/"+args.dumpNum self.conn.request("GET",uri, headers={"X-Auth-Token":token}) def openTCPSocket(self): # Create a TCP/IP socket self.tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Connect the socket to the port where the server is listening server_address = ('localhost', 1043) self.tcp.connect(server_address) def waitformessage(self): inputs = [self.conn.sock,self.tcp] outputs = [] message_queues = {} while True: readable, writable, exceptional = select.select( inputs, outputs, inputs) for s in readable: if s is self.conn.sock: data = self.conn.sock.recv(MAX_NBD_PACKET_SIZE) print("<<HTTP") if data: self.tcp.send(data) else: print ("BMC Closed the connection") self.conn.close() self.tcp.close() sys.exit(1) elif s is self.tcp: data = self.tcp.recv(MAX_NBD_PACKET_SIZE) print(">>TCP") if data: self.conn.sock.send(data) else: print("NBD server closed the connection") self.conn.sock.close() self.tcp.close() sys.exit(1) for s in exceptional: inputs.remove(s) print("Exceptional closing the socket") s.close() def getsize(host,args,session): url = "https://"+host+"/redfish/v1/Systems/system/LogServices/Dump/Entries/"+str(args.dumpNum) try: resp = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) if resp.status_code==200: size = resp.json()['AdditionalDataSizeBytes'] return size else: return "Failed get Size" except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) def gettoken(args): mysess = requests.session() resp = mysess.post('https://'+args.host+'/login', headers=jsonHeader,json={"data":[args.user,args.PW]},verify=False) if resp.status_code == 200: cookie = resp.headers['Set-Cookie'] match = re.search('SESSION=(\w+);', cookie) return match.group(1) def get_pid(name): try: pid = map(int, check_output(["pidof", "-s",name])) except Exception: pid = 0 return pid def findThisProcess( process_name ): ps = subprocess.Popen("ps -eaf | grep "+process_name, shell=True, stdout=subprocess.PIPE) output = ps.stdout.read() ps.stdout.close() ps.wait() pid = get_pid(process_name) return output def isThisProcessRunning( process_name ): pid = get_pid(process_name) if (pid == 0 ): return False else: return True def NBDSetup(host,args,session): user=os.getenv("SUDO_USER") if user is None: path = os.getcwd() nbdServerPath = path + "/nbd-server" if not os.path.exists(nbdServerPath): print("Error: this program did not run as sudo!\nplease copy nbd-server to current directory and run script again") exit() if isThisProcessRunning('nbd-server') == True: print("nbd-server already Running! killing the nbd-server") os.system('killall nbd-server') if (args.dumpSaveLoc is not None): if(os.path.exists(args.dumpSaveLoc)): print("Error: File already exists.") exit() fp= open(args.dumpSaveLoc,"w") sizeInBytes = getsize(host,args,session) #Round off size to mutiples of 1024 size = int(sizeInBytes) mod = size % 1024 if mod : roundoff = 1024 - mod size = size + roundoff cmd = 'chmod 777 ' + args.dumpSaveLoc os.system(cmd) #Run truncate to create file with given size cmd = 'truncate -s ' + str(size) + ' '+ args.dumpSaveLoc os.system(cmd) if user is None: cmd = './nbd-server 1043 '+ args.dumpSaveLoc else: cmd = 'nbd-server 1043 '+ args.dumpSaveLoc os.system(cmd) def hilight(textToColor, color, bold): """ Used to add highlights to various text for displaying in a terminal @param textToColor: string, the text to be colored @param color: string, used to color the text red or green @param bold: boolean, used to bold the textToColor @return: Buffered reader containing the modified string. """ if(sys.platform.__contains__("win")): if(color == "red"): os.system('color 04') elif(color == "green"): os.system('color 02') else: os.system('color') #reset to default return textToColor else: attr = [] if(color == "red"): attr.append('31') elif(color == "green"): attr.append('32') else: attr.append('0') if bold: attr.append('1') else: attr.append('0') return '\x1b[%sm%s\x1b[0m' % (';'.join(attr),textToColor) def connectionErrHandler(jsonFormat, errorStr, err): """ Error handler various connection errors to bmcs @param jsonFormat: boolean, used to output in json format with an error code. @param errorStr: string, used to color the text red or green @param err: string, the text from the exception """ if errorStr == "Timeout": if not jsonFormat: return("FQPSPIN0000M: Connection timed out. Ensure you have network connectivity to the bmc") else: conerror = {} conerror['CommonEventID'] = 'FQPSPIN0000M' conerror['sensor']="N/A" conerror['state']="N/A" conerror['additionalDetails'] = "N/A" conerror['Message']="Connection timed out. Ensure you have network connectivity to the BMC" conerror['LengthyDescription'] = "While trying to establish a connection with the specified BMC, the BMC failed to respond in adequate time. Verify the BMC is functioning properly, and the network connectivity to the BMC is stable." conerror['Serviceable']="Yes" conerror['CallHomeCandidate']= "No" conerror['Severity'] = "Critical" conerror['EventType'] = "Communication Failure/Timeout" conerror['VMMigrationFlag'] = "Yes" conerror["AffectedSubsystem"] = "Interconnect (Networking)" conerror["timestamp"] = str(int(time.time())) conerror["UserAction"] = "Verify network connectivity between the two systems and the bmc is functional." eventdict = {} eventdict['event0'] = conerror eventdict['numAlerts'] = '1' errorMessageStr = errorMessageStr = json.dumps(eventdict, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False) return(errorMessageStr) elif errorStr == "ConnectionError": if not jsonFormat: return("FQPSPIN0001M: " + str(err)) else: conerror = {} conerror['CommonEventID'] = 'FQPSPIN0001M' conerror['sensor']="N/A" conerror['state']="N/A" conerror['additionalDetails'] = str(err) conerror['Message']="Connection Error. View additional details for more information" conerror['LengthyDescription'] = "A connection error to the specified BMC occurred and additional details are provided. Review these details to resolve the issue." conerror['Serviceable']="Yes" conerror['CallHomeCandidate']= "No" conerror['Severity'] = "Critical" conerror['EventType'] = "Communication Failure/Timeout" conerror['VMMigrationFlag'] = "Yes" conerror["AffectedSubsystem"] = "Interconnect (Networking)" conerror["timestamp"] = str(int(time.time())) conerror["UserAction"] = "Correct the issue highlighted in additional details and try again" eventdict = {} eventdict['event0'] = conerror eventdict['numAlerts'] = '1' errorMessageStr = json.dumps(eventdict, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False) return(errorMessageStr) else: return("Unknown Error: "+ str(err)) def setColWidth(keylist, numCols, dictForOutput, colNames): """ Sets the output width of the columns to display @param keylist: list, list of strings representing the keys for the dictForOutput @param numcols: the total number of columns in the final output @param dictForOutput: dictionary, contains the information to print to the screen @param colNames: list, The strings to use for the column headings, in order of the keylist @return: A list of the column widths for each respective column. """ colWidths = [] for x in range(0, numCols): colWidths.append(0) for key in dictForOutput: for x in range(0, numCols): colWidths[x] = max(colWidths[x], len(str(dictForOutput[key][keylist[x]]))) for x in range(0, numCols): colWidths[x] = max(colWidths[x], len(colNames[x])) +2 return colWidths def loadPolicyTable(pathToPolicyTable): """ loads a json based policy table into a dictionary @param value: boolean, the value to convert @return: A string of "Yes" or "No" """ policyTable = {} if(os.path.exists(pathToPolicyTable)): with open(pathToPolicyTable, 'r') as stream: try: contents =json.load(stream) policyTable = contents['events'] except Exception as err: print(err) return policyTable def boolToString(value): """ converts a boolean value to a human readable string value @param value: boolean, the value to convert @return: A string of "Yes" or "No" """ if(value): return "Yes" else: return "No" def stringToInt(text): """ returns an integer if the string can be converted, otherwise returns the string @param text: the string to try to convert to an integer """ if text.isdigit(): return int(text) else: return text def naturalSort(text): """ provides a way to naturally sort a list @param text: the key to convert for sorting @return list containing the broken up string parts by integers and strings """ stringPartList = [] for c in re.split('(\d+)', text): stringPartList.append(stringToInt(c)) return stringPartList def tableDisplay(keylist, colNames, output): """ Logs into the BMC and creates a session @param keylist: list, keys for the output dictionary, ordered by colNames @param colNames: Names for the Table of the columns @param output: The dictionary of data to display @return: Session object """ colWidth = setColWidth(keylist, len(colNames), output, colNames) row = "" outputText = "" for i in range(len(colNames)): if (i != 0): row = row + "| " row = row + colNames[i].ljust(colWidth[i]) outputText += row + "\n" output_keys = list(output.keys()) output_keys.sort(key=naturalSort) for key in output_keys: row = "" for i in range(len(keylist)): if (i != 0): row = row + "| " row = row + output[key][keylist[i]].ljust(colWidth[i]) outputText += row + "\n" return outputText def checkFWactivation(host, args, session): """ Checks the software inventory for an image that is being activated. @return: True if an image is being activated, false is no activations are happening """ url="https://"+host+"/xyz/openbmc_project/software/enumerate" try: resp = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): print(connectionErrHandler(args.json, "Timeout", None)) return(True) except(requests.exceptions.ConnectionError) as err: print( connectionErrHandler(args.json, "ConnectionError", err)) return True fwInfo = resp.json()['data'] for key in fwInfo: if 'Activation' in fwInfo[key]: if 'Activating' in fwInfo[key]['Activation'] or 'Activating' in fwInfo[key]['RequestedActivation']: return True return False def login(host, username, pw,jsonFormat, allowExpiredPassword): """ Logs into the BMC and creates a session @param host: string, the hostname or IP address of the bmc to log into @param username: The user name for the bmc to log into @param pw: The password for the BMC to log into @param jsonFormat: boolean, flag that will only allow relevant data from user command to be display. This function becomes silent when set to true. @param allowExpiredPassword: true, if the requested operation should be allowed when the password is expired @return: Session object """ if(jsonFormat==False): print("Attempting login...") mysess = requests.session() try: r = mysess.post('https://'+host+'/login', headers=jsonHeader, json = {"data": [username, pw]}, verify=False, timeout=baseTimeout) if r.status_code == 200: cookie = r.headers['Set-Cookie'] match = re.search('SESSION=(\w+);', cookie) if match: xAuthHeader['X-Auth-Token'] = match.group(1) jsonHeader.update(xAuthHeader) loginMessage = json.loads(r.text) if (loginMessage['status'] != "ok"): print(loginMessage["data"]["description"].encode('utf-8')) sys.exit(1) if (('extendedMessage' in r.json()) and ('The password for this account must be changed' in r.json()['extendedMessage'])): if not allowExpiredPassword: print("The password for this system has expired and must be changed"+ "\nsee openbmctool.py set_password --help") logout(host, username, pw, mysess, jsonFormat) sys.exit(1) # if(sys.version_info < (3,0)): # urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) # if sys.version_info >= (3,0): # requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning) return mysess else: return None except(requests.exceptions.Timeout): return (connectionErrHandler(jsonFormat, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return (connectionErrHandler(jsonFormat, "ConnectionError", err)) def logout(host, username, pw, session, jsonFormat): """ Logs out of the bmc and terminates the session @param host: string, the hostname or IP address of the bmc to log out of @param username: The user name for the bmc to log out of @param pw: The password for the BMC to log out of @param session: the active session to use @param jsonFormat: boolean, flag that will only allow relevant data from user command to be display. This function becomes silent when set to true. """ try: r = session.post('https://'+host+'/logout', headers=jsonHeader,json = {"data": [username, pw]}, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): print(connectionErrHandler(jsonFormat, "Timeout", None)) if(jsonFormat==False): if r.status_code == 200: print('User ' +username + ' has been logged out') def fru(host, args, session): """ prints out the system inventory. deprecated see fruPrint and fruList @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the fru sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ #url="https://"+host+"/org/openbmc/inventory/system/chassis/enumerate" #print(url) #res = session.get(url, headers=httpHeader, verify=False) #print(res.text) #sample = res.text #inv_list = json.loads(sample)["data"] url="https://"+host+"/xyz/openbmc_project/inventory/enumerate" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) sample = res.text # inv_list.update(json.loads(sample)["data"]) # # #determine column width's # colNames = ["FRU Name", "FRU Type", "Has Fault", "Is FRU", "Present", "Version"] # colWidths = setColWidth(["FRU Name", "fru_type", "fault", "is_fru", "present", "version"], 6, inv_list, colNames) # # print("FRU Name".ljust(colWidths[0])+ "FRU Type".ljust(colWidths[1]) + "Has Fault".ljust(colWidths[2]) + "Is FRU".ljust(colWidths[3])+ # "Present".ljust(colWidths[4]) + "Version".ljust(colWidths[5])) # format the output # for key in sorted(inv_list.keys()): # keyParts = key.split("/") # isFRU = "True" if (inv_list[key]["is_fru"]==1) else "False" # # fruEntry = (keyParts[len(keyParts) - 1].ljust(colWidths[0]) + inv_list[key]["fru_type"].ljust(colWidths[1])+ # inv_list[key]["fault"].ljust(colWidths[2])+isFRU.ljust(colWidths[3])+ # inv_list[key]["present"].ljust(colWidths[4])+ inv_list[key]["version"].ljust(colWidths[5])) # if(isTTY): # if(inv_list[key]["is_fru"] == 1): # color = "green" # bold = True # else: # color='black' # bold = False # fruEntry = hilight(fruEntry, color, bold) # print (fruEntry) return sample def fruPrint(host, args, session): """ prints out all inventory @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the fru sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @return returns the total fru list. """ url="https://"+host+"/xyz/openbmc_project/inventory/enumerate" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) frulist={} # print(res.text) if res.status_code==200: frulist['Hardware'] = res.json()['data'] else: if not args.json: return "Error retrieving the system inventory. BMC message: {msg}".format(msg=res.json()['message']) else: return res.json() url="https://"+host+"/xyz/openbmc_project/software/enumerate" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) # print(res.text) if res.status_code==200: frulist['Software'] = res.json()['data'] else: if not args.json(): return "Error retrieving the system inventory. BMC message: {msg}".format(msg=res.json()['message']) else: return res.json() return frulist def fruList(host, args, session): """ prints out all inventory or only a specific specified item @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the fru sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ if(args.items==True): return fruPrint(host, args, session) else: return fruPrint(host, args, session) def fruStatus(host, args, session): """ prints out the status of all FRUs @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the fru sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url="https://"+host+"/xyz/openbmc_project/inventory/enumerate" try: res = session.get(url, headers=jsonHeader, verify=False) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) # print(res.text) frulist = res.json()['data'] frus = {} for key in frulist: component = frulist[key] isFru = False present = False func = False hasSels = False keyPieces = key.split('/') fruName = keyPieces[-1] if 'core' in fruName: #associate cores to cpus fruName = keyPieces[-2] + '-' + keyPieces[-1] if 'Functional' in component: if('Present' in component): if 'FieldReplaceable' in component: if component['FieldReplaceable'] == 1: isFru = True if "fan" in fruName: isFru = True; if component['Present'] == 1: present = True if component['Functional'] == 1: func = True if ((key + "/fault") in frulist): hasSels = True; if args.verbose: if hasSels: loglist = [] faults = frulist[key+"/fault"]['endpoints'] for item in faults: loglist.append(item.split('/')[-1]) frus[fruName] = {"compName": fruName, "Functional": boolToString(func), "Present":boolToString(present), "IsFru": boolToString(isFru), "selList": ', '.join(loglist).strip() } else: frus[fruName] = {"compName": fruName, "Functional": boolToString(func), "Present":boolToString(present), "IsFru": boolToString(isFru), "selList": "None" } else: frus[fruName] = {"compName": fruName, "Functional": boolToString(func), "Present":boolToString(present), "IsFru": boolToString(isFru), "hasSEL": boolToString(hasSels) } elif "power_supply" in fruName or "powersupply" in fruName: if component['Present'] ==1: present = True isFru = True if ((key + "/fault") in frulist): hasSels = True; if args.verbose: if hasSels: loglist = [] faults = frulist[key+"/fault"]['endpoints'] for item in faults: loglist.append(item.split('/')[-1]) frus[fruName] = {"compName": fruName, "Functional": "No", "Present":boolToString(present), "IsFru": boolToString(isFru), "selList": ', '.join(loglist).strip() } else: frus[fruName] = {"compName": fruName, "Functional": "Yes", "Present":boolToString(present), "IsFru": boolToString(isFru), "selList": "None" } else: frus[fruName] = {"compName": fruName, "Functional": boolToString(not hasSels), "Present":boolToString(present), "IsFru": boolToString(isFru), "hasSEL": boolToString(hasSels) } if not args.json: if not args.verbose: colNames = ["Component", "Is a FRU", "Present", "Functional", "Has Logs"] keylist = ["compName", "IsFru", "Present", "Functional", "hasSEL"] else: colNames = ["Component", "Is a FRU", "Present", "Functional", "Assoc. Log Number(s)"] keylist = ["compName", "IsFru", "Present", "Functional", "selList"] return tableDisplay(keylist, colNames, frus) else: return str(json.dumps(frus, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False)) def sensor(host, args, session): """ prints out all sensors @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the sensor sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url="https://"+host+"/xyz/openbmc_project/sensors/enumerate" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) #Get OCC status url="https://"+host+"/org/open_power/control/enumerate" try: occres = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) if not args.json: colNames = ['sensor', 'type', 'units', 'value', 'target'] sensors = res.json()["data"] output = {} for key in sensors: senDict = {} keyparts = key.split("/") # Associations like the following also show up here: # /xyz/openbmc_project/sensors/<type>/<name>/<assoc-name> # Skip them. # Note: keyparts[0] = '' which is why there are 7 segments. if len(keyparts) > 6: continue senDict['sensorName'] = keyparts[-1] senDict['type'] = keyparts[-2] try: senDict['units'] = sensors[key]['Unit'].split('.')[-1] except KeyError: senDict['units'] = "N/A" if('Scale' in sensors[key]): scale = 10 ** sensors[key]['Scale'] else: scale = 1 try: senDict['value'] = str(sensors[key]['Value'] * scale) except KeyError: if 'value' in sensors[key]: senDict['value'] = sensors[key]['value'] else: senDict['value'] = "N/A" if 'Target' in sensors[key]: senDict['target'] = str(sensors[key]['Target']) else: senDict['target'] = 'N/A' output[senDict['sensorName']] = senDict occstatus = occres.json()["data"] if '/org/open_power/control/occ0' in occstatus: occ0 = occstatus["/org/open_power/control/occ0"]['OccActive'] if occ0 == 1: occ0 = 'Active' else: occ0 = 'Inactive' output['OCC0'] = {'sensorName':'OCC0', 'type': 'Discrete', 'units': 'N/A', 'value': occ0, 'target': 'Active'} occ1 = occstatus["/org/open_power/control/occ1"]['OccActive'] if occ1 == 1: occ1 = 'Active' else: occ1 = 'Inactive' output['OCC1'] = {'sensorName':'OCC1', 'type': 'Discrete', 'units': 'N/A', 'value': occ0, 'target': 'Active'} else: output['OCC0'] = {'sensorName':'OCC0', 'type': 'Discrete', 'units': 'N/A', 'value': 'Inactive', 'target': 'Inactive'} output['OCC1'] = {'sensorName':'OCC1', 'type': 'Discrete', 'units': 'N/A', 'value': 'Inactive', 'target': 'Inactive'} keylist = ['sensorName', 'type', 'units', 'value', 'target'] return tableDisplay(keylist, colNames, output) else: return res.text + occres.text def sel(host, args, session): """ prints out the bmc alerts @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the sel sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url="https://"+host+"/xyz/openbmc_project/logging/entry/enumerate" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) return res.text def parseESEL(args, eselRAW): """ parses the esel data and gets predetermined search terms @param eselRAW: string, the raw esel string from the bmc @return: A dictionary containing the quick snapshot data unless args.fullEsel is listed then a full PEL log is returned """ eselParts = {} esel_bin = binascii.unhexlify(''.join(eselRAW.split()[16:])) #search terms contains the search term as the key and the return dictionary key as it's value searchTerms = { 'Signature Description':'signatureDescription', 'devdesc':'devdesc', 'Callout type': 'calloutType', 'Procedure':'procedure', 'Sensor Type': 'sensorType'} uniqueID = str(uuid.uuid4()) eselBinPath = tempfile.gettempdir() + os.sep + uniqueID + 'esel.bin' with open(eselBinPath, 'wb') as f: f.write(esel_bin) errlPath = "" #use the right errl file for the machine architecture arch = platform.machine() if(arch =='x86_64' or arch =='AMD64'): if os.path.exists('/opt/ibm/ras/bin/x86_64/errl'): errlPath = '/opt/ibm/ras/bin/x86_64/errl' elif os.path.exists('errl/x86_64/errl'): errlPath = 'errl/x86_64/errl' else: errlPath = 'x86_64/errl' elif (platform.machine()=='ppc64le'): if os.path.exists('/opt/ibm/ras/bin/ppc64le/errl'): errlPath = '/opt/ibm/ras/bin/ppc64le/errl' elif os.path.exists('errl/ppc64le/errl'): errlPath = 'errl/ppc64le/errl' else: errlPath = 'ppc64le/errl' else: print("machine architecture not supported for parsing eSELs") return eselParts if(os.path.exists(errlPath)): output= subprocess.check_output([errlPath, '-d', '--file='+eselBinPath]).decode('utf-8') # output = proc.communicate()[0] lines = output.split('\n') if(hasattr(args, 'fullEsel')): return output for i in range(0, len(lines)): lineParts = lines[i].split(':') if(len(lineParts)>1): #ignore multi lines, output formatting lines, and other information for term in searchTerms: if(term in lineParts[0]): temp = lines[i][lines[i].find(':')+1:].strip()[:-1].strip() if lines[i+1].find(':') != -1: if (len(lines[i+1].split(':')[0][1:].strip())==0): while(len(lines[i][:lines[i].find(':')].strip())>2): #has multiple lines, process and update line counter if((i+1) <= len(lines)): i+=1 else: i=i-1 break #Append the content from the next line removing the pretty display characters #Finds the first colon then starts 2 characters after, then removes all whitespace temp = temp + lines[i][lines[i].find(':')+2:].strip()[:-1].strip()[:-1].strip() if(searchTerms[term] in eselParts): eselParts[searchTerms[term]] = eselParts[searchTerms[term]] + ", " + temp else: eselParts[searchTerms[term]] = temp os.remove(eselBinPath) else: print("errl file cannot be found") return eselParts def getESELSeverity(esel): """ Finds the severity type in an eSEL from the User Header section. @param esel - the eSEL data @return severity - e.g. 'Critical' """ # everything but 1 and 2 are Critical # '1': 'recovered', # '2': 'predictive', # '4': 'unrecoverable', # '5': 'critical', # '6': 'diagnostic', # '7': 'symptom' severities = { '1': 'Informational', '2': 'Warning' } try: headerPosition = esel.index('55 48') # 'UH' # The severity is the last byte in the 8 byte section (a byte is ' bb') severity = esel[headerPosition:headerPosition+32].split(' ')[-1] type = severity[0] except ValueError: print("Could not find severity value in UH section in eSEL") type = 'x'; return severities.get(type, 'Critical') def sortSELs(events): """ sorts the sels by timestamp, then log entry number @param events: Dictionary containing events @return: list containing a list of the ordered log entries, and dictionary of keys """ logNumList = [] timestampList = [] eventKeyDict = {} eventsWithTimestamp = {} logNum2events = {} for key in events: if key == 'numAlerts': continue if 'callout' in key: continue timestamp = (events[key]['timestamp']) if timestamp not in timestampList: eventsWithTimestamp[timestamp] = [events[key]['logNum']] else: eventsWithTimestamp[timestamp].append(events[key]['logNum']) #map logNumbers to the event dictionary keys eventKeyDict[str(events[key]['logNum'])] = key timestampList = list(eventsWithTimestamp.keys()) timestampList.sort() for ts in timestampList: if len(eventsWithTimestamp[ts]) > 1: tmplist = eventsWithTimestamp[ts] tmplist.sort() logNumList = logNumList + tmplist else: logNumList = logNumList + eventsWithTimestamp[ts] return [logNumList, eventKeyDict] def parseAlerts(policyTable, selEntries, args): """ parses alerts in the IBM CER format, using an IBM policy Table @param policyTable: dictionary, the policy table entries @param selEntries: dictionary, the alerts retrieved from the bmc @return: A dictionary of the parsed entries, in chronological order """ eventDict = {} eventNum ="" count = 0 esel = "" eselParts = {} i2cdevice= "" eselSeverity = None 'prepare and sort the event entries' sels = {} for key in selEntries: if '/xyz/openbmc_project/logging/entry/' not in key: continue if 'callout' not in key: sels[key] = selEntries[key] sels[key]['logNum'] = key.split('/')[-1] sels[key]['timestamp'] = selEntries[key]['Timestamp'] sortedEntries = sortSELs(sels) logNumList = sortedEntries[0] eventKeyDict = sortedEntries[1] for logNum in logNumList: key = eventKeyDict[logNum] hasEsel=False i2creadFail = False if 'callout' in key: continue else: messageID = str(selEntries[key]['Message']) addDataPiece = selEntries[key]['AdditionalData'] calloutIndex = 0 calloutFound = False for i in range(len(addDataPiece)): if("CALLOUT_INVENTORY_PATH" in addDataPiece[i]): calloutIndex = i calloutFound = True fruCallout = str(addDataPiece[calloutIndex]).split('=')[1] if("CALLOUT_DEVICE_PATH" in addDataPiece[i]): i2creadFail = True fruCallout = str(addDataPiece[calloutIndex]).split('=')[1] # Fall back to "I2C"/"FSI" if dev path isn't in policy table if (messageID + '||' + fruCallout) not in policyTable: i2cdevice = str(addDataPiece[i]).strip().split('=')[1] i2cdevice = '/'.join(i2cdevice.split('/')[-4:]) if 'fsi' in str(addDataPiece[calloutIndex]).split('=')[1]: fruCallout = 'FSI' else: fruCallout = 'I2C' calloutFound = True if("CALLOUT_GPIO_NUM" in addDataPiece[i]): if not calloutFound: fruCallout = 'GPIO' calloutFound = True if("CALLOUT_IIC_BUS" in addDataPiece[i]): if not calloutFound: fruCallout = "I2C" calloutFound = True if("CALLOUT_IPMI_SENSOR_NUM" in addDataPiece[i]): if not calloutFound: fruCallout = "IPMI" calloutFound = True if("ESEL" in addDataPiece[i]): esel = str(addDataPiece[i]).strip().split('=')[1] eselSeverity = getESELSeverity(esel) if args.devdebug: eselParts = parseESEL(args, esel) hasEsel=True if("GPU" in addDataPiece[i]): fruCallout = '/xyz/openbmc_project/inventory/system/chassis/motherboard/gpu' + str(addDataPiece[i]).strip()[-1] calloutFound = True if("PROCEDURE" in addDataPiece[i]): fruCallout = str(hex(int(str(addDataPiece[i]).split('=')[1])))[2:] calloutFound = True if("RAIL_NAME" in addDataPiece[i]): calloutFound=True fruCallout = str(addDataPiece[i]).split('=')[1].strip() if("INPUT_NAME" in addDataPiece[i]): calloutFound=True fruCallout = str(addDataPiece[i]).split('=')[1].strip() if("SENSOR_TYPE" in addDataPiece[i]): calloutFound=True fruCallout = str(addDataPiece[i]).split('=')[1].strip() if(calloutFound): if fruCallout.strip() != "": policyKey = messageID +"||" + fruCallout # Also use the severity for hostboot errors if eselSeverity and messageID == 'org.open_power.Host.Error.Event': policyKey += '||' + eselSeverity # if not in the table, fall back to the original key if policyKey not in policyTable: policyKey = policyKey.replace('||'+eselSeverity, '') if policyKey not in policyTable: policyKey = messageID else: policyKey = messageID else: policyKey = messageID event = {} eventNum = str(count) if policyKey in policyTable: for pkey in policyTable[policyKey]: if(type(policyTable[policyKey][pkey])== bool): event[pkey] = boolToString(policyTable[policyKey][pkey]) else: if (i2creadFail and pkey == 'Message'): event[pkey] = policyTable[policyKey][pkey] + ' ' +i2cdevice else: event[pkey] = policyTable[policyKey][pkey] event['timestamp'] = selEntries[key]['Timestamp'] event['resolved'] = bool(selEntries[key]['Resolved']) if(hasEsel): if args.devdebug: event['eselParts'] = eselParts event['raweSEL'] = esel event['logNum'] = key.split('/')[-1] eventDict['event' + eventNum] = event else: severity = str(selEntries[key]['Severity']).split('.')[-1] if severity == 'Error': severity = 'Critical' eventDict['event'+eventNum] = {} eventDict['event' + eventNum]['error'] = "error: Not found in policy table: " + policyKey eventDict['event' + eventNum]['timestamp'] = selEntries[key]['Timestamp'] eventDict['event' + eventNum]['Severity'] = severity if(hasEsel): if args.devdebug: eventDict['event' +eventNum]['eselParts'] = eselParts eventDict['event' +eventNum]['raweSEL'] = esel eventDict['event' +eventNum]['logNum'] = key.split('/')[-1] eventDict['event' +eventNum]['resolved'] = bool(selEntries[key]['Resolved']) count += 1 return eventDict def selDisplay(events, args): """ displays alerts in human readable format @param events: Dictionary containing events @return: """ activeAlerts = [] historyAlerts = [] sortedEntries = sortSELs(events) logNumList = sortedEntries[0] eventKeyDict = sortedEntries[1] keylist = ['Entry', 'ID', 'Timestamp', 'Serviceable', 'Severity','Message'] if(args.devdebug): colNames = ['Entry', 'ID', 'Timestamp', 'Serviceable', 'Severity','Message', 'eSEL contents'] keylist.append('eSEL') else: colNames = ['Entry', 'ID', 'Timestamp', 'Serviceable', 'Severity', 'Message'] for log in logNumList: selDict = {} alert = events[eventKeyDict[str(log)]] if('error' in alert): selDict['Entry'] = alert['logNum'] selDict['ID'] = 'Unknown' selDict['Timestamp'] = datetime.datetime.fromtimestamp(int(alert['timestamp']/1000)).strftime("%Y-%m-%d %H:%M:%S") msg = alert['error'] polMsg = msg.split("policy table:")[0] msg = msg.split("policy table:")[1] msgPieces = msg.split("||") err = msgPieces[0] if(err.find("org.open_power.")!=-1): err = err.split("org.open_power.")[1] elif(err.find("xyz.openbmc_project.")!=-1): err = err.split("xyz.openbmc_project.")[1] else: err = msgPieces[0] callout = "" if len(msgPieces) >1: callout = msgPieces[1] if(callout.find("/org/open_power/")!=-1): callout = callout.split("/org/open_power/")[1] elif(callout.find("/xyz/openbmc_project/")!=-1): callout = callout.split("/xyz/openbmc_project/")[1] else: callout = msgPieces[1] selDict['Message'] = polMsg +"policy table: "+ err + "||" + callout selDict['Serviceable'] = 'Unknown' selDict['Severity'] = alert['Severity'] else: selDict['Entry'] = alert['logNum'] selDict['ID'] = alert['CommonEventID'] selDict['Timestamp'] = datetime.datetime.fromtimestamp(int(alert['timestamp']/1000)).strftime("%Y-%m-%d %H:%M:%S") selDict['Message'] = alert['Message'] selDict['Serviceable'] = alert['Serviceable'] selDict['Severity'] = alert['Severity'] eselOrder = ['refCode','signatureDescription', 'eselType', 'devdesc', 'calloutType', 'procedure'] if ('eselParts' in alert and args.devdebug): eselOutput = "" for item in eselOrder: if item in alert['eselParts']: eselOutput = eselOutput + item + ": " + alert['eselParts'][item] + " | " selDict['eSEL'] = eselOutput else: if args.devdebug: selDict['eSEL'] = "None" if not alert['resolved']: activeAlerts.append(selDict) else: historyAlerts.append(selDict) mergedOutput = activeAlerts + historyAlerts colWidth = setColWidth(keylist, len(colNames), dict(enumerate(mergedOutput)), colNames) output = "" if(len(activeAlerts)>0): row = "" output +="----Active Alerts----\n" for i in range(0, len(colNames)): if i!=0: row =row + "| " row = row + colNames[i].ljust(colWidth[i]) output += row + "\n" for i in range(0,len(activeAlerts)): row = "" for j in range(len(activeAlerts[i])): if (j != 0): row = row + "| " row = row + activeAlerts[i][keylist[j]].ljust(colWidth[j]) output += row + "\n" if(len(historyAlerts)>0): row = "" output+= "----Historical Alerts----\n" for i in range(len(colNames)): if i!=0: row =row + "| " row = row + colNames[i].ljust(colWidth[i]) output += row + "\n" for i in range(0, len(historyAlerts)): row = "" for j in range(len(historyAlerts[i])): if (j != 0): row = row + "| " row = row + historyAlerts[i][keylist[j]].ljust(colWidth[j]) output += row + "\n" # print(events[eventKeyDict[str(log)]]) return output def selPrint(host, args, session): """ prints out all bmc alerts @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the fru sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ if(args.policyTableLoc is None): if os.path.exists('policyTable.json'): ptableLoc = "policyTable.json" elif os.path.exists('/opt/ibm/ras/lib/policyTable.json'): ptableLoc = '/opt/ibm/ras/lib/policyTable.json' else: ptableLoc = 'lib/policyTable.json' else: ptableLoc = args.policyTableLoc policyTable = loadPolicyTable(ptableLoc) rawselEntries = "" if(hasattr(args, 'fileloc') and args.fileloc is not None): if os.path.exists(args.fileloc): with open(args.fileloc, 'r') as selFile: selLines = selFile.readlines() rawselEntries = ''.join(selLines) else: print("Error: File not found") sys.exit(1) else: rawselEntries = sel(host, args, session) loadFailed = False try: selEntries = json.loads(rawselEntries) except ValueError: loadFailed = True if loadFailed: cleanSels = json.dumps(rawselEntries).replace('\\n', '') #need to load json twice as original content was string escaped a second time selEntries = json.loads(json.loads(cleanSels)) selEntries = selEntries['data'] if 'description' in selEntries: if(args.json): return("{\n\t\"numAlerts\": 0\n}") else: return("No log entries found") else: if(len(policyTable)>0): events = parseAlerts(policyTable, selEntries, args) if(args.json): events["numAlerts"] = len(events) retValue = str(json.dumps(events, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False)) return retValue elif(hasattr(args, 'fullSel')): return events else: #get log numbers to order event entries sequentially return selDisplay(events, args) else: if(args.json): return selEntries else: print("error: Policy Table not found.") return selEntries def selList(host, args, session): """ prints out all all bmc alerts, or only prints out the specified alerts @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the fru sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ return(sel(host, args, session)) def selClear(host, args, session): """ clears all alerts @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the fru sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url="https://"+host+"/xyz/openbmc_project/logging/action/DeleteAll" data = "{\"data\": [] }" try: res = session.post(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) if res.status_code == 200: return "The Alert Log has been cleared. Please allow a few minutes for the action to complete." else: print("Unable to clear the logs, trying to clear 1 at a time") sels = json.loads(sel(host, args, session))['data'] for key in sels: if 'callout' not in key: logNum = key.split('/')[-1] url = "https://"+ host+ "/xyz/openbmc_project/logging/entry/"+logNum+"/action/Delete" try: session.post(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) sys.exit(1) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) sys.exit(1) return ('Sel clearing complete') def selSetResolved(host, args, session): """ sets a sel entry to resolved @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the fru sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url="https://"+host+"/xyz/openbmc_project/logging/entry/" + str(args.selNum) + "/attr/Resolved" data = "{\"data\": 1 }" try: res = session.put(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) if res.status_code == 200: return "Sel entry "+ str(args.selNum) +" is now set to resolved" else: return "Unable to set the alert to resolved" def selResolveAll(host, args, session): """ sets a sel entry to resolved @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the fru sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ rawselEntries = sel(host, args, session) loadFailed = False try: selEntries = json.loads(rawselEntries) except ValueError: loadFailed = True if loadFailed: cleanSels = json.dumps(rawselEntries).replace('\\n', '') #need to load json twice as original content was string escaped a second time selEntries = json.loads(json.loads(cleanSels)) selEntries = selEntries['data'] if 'description' in selEntries: if(args.json): return("{\n\t\"selsResolved\": 0\n}") else: return("No log entries found") else: d = vars(args) successlist = [] failedlist = [] for key in selEntries: if 'callout' not in key: d['selNum'] = key.split('/')[-1] resolved = selSetResolved(host,args,session) if 'Sel entry' in resolved: successlist.append(d['selNum']) else: failedlist.append(d['selNum']) output = "" successlist.sort() failedlist.sort() if len(successlist)>0: output = "Successfully resolved: " +', '.join(successlist) +"\n" if len(failedlist)>0: output += "Failed to resolve: " + ', '.join(failedlist) + "\n" return output def chassisPower(host, args, session): """ called by the chassis function. Controls the power state of the chassis, or gets the status @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the fru sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ if(args.powcmd == 'on'): if checkFWactivation(host, args, session): return ("Chassis Power control disabled during firmware activation") print("Attempting to Power on...:") url="https://"+host+"/xyz/openbmc_project/state/host0/attr/RequestedHostTransition" data = '{"data":"xyz.openbmc_project.State.Host.Transition.On"}' try: res = session.put(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) return res.text elif(args.powcmd == 'softoff'): if checkFWactivation(host, args, session): return ("Chassis Power control disabled during firmware activation") print("Attempting to Power off gracefully...:") url="https://"+host+"/xyz/openbmc_project/state/host0/attr/RequestedHostTransition" data = '{"data":"xyz.openbmc_project.State.Host.Transition.Off"}' try: res = session.put(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) return res.text elif(args.powcmd == 'hardoff'): if checkFWactivation(host, args, session): return ("Chassis Power control disabled during firmware activation") print("Attempting to Power off immediately...:") url="https://"+host+"/xyz/openbmc_project/state/chassis0/attr/RequestedPowerTransition" data = '{"data":"xyz.openbmc_project.State.Chassis.Transition.Off"}' try: res = session.put(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) return res.text elif(args.powcmd == 'status'): url="https://"+host+"/xyz/openbmc_project/state/chassis0/attr/CurrentPowerState" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) chassisState = json.loads(res.text)['data'].split('.')[-1] url="https://"+host+"/xyz/openbmc_project/state/host0/attr/CurrentHostState" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) hostState = json.loads(res.text)['data'].split('.')[-1] url="https://"+host+"/xyz/openbmc_project/state/bmc0/attr/CurrentBMCState" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) bmcState = json.loads(res.text)['data'].split('.')[-1] if(args.json): outDict = {"Chassis Power State" : chassisState, "Host Power State" : hostState, "BMC Power State":bmcState} return json.dumps(outDict, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False) else: return "Chassis Power State: " +chassisState + "\nHost Power State: " + hostState + "\nBMC Power State: " + bmcState else: return "Invalid chassis power command" def chassisIdent(host, args, session): """ called by the chassis function. Controls the identify led of the chassis. Sets or gets the state @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the fru sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ if(args.identcmd == 'on'): print("Attempting to turn identify light on...:") url="https://"+host+"/xyz/openbmc_project/led/groups/enclosure_identify/attr/Asserted" data = '{"data":true}' try: res = session.put(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) return res.text elif(args.identcmd == 'off'): print("Attempting to turn identify light off...:") url="https://"+host+"/xyz/openbmc_project/led/groups/enclosure_identify/attr/Asserted" data = '{"data":false}' try: res = session.put(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) return res.text elif(args.identcmd == 'status'): url="https://"+host+"/xyz/openbmc_project/led/groups/enclosure_identify" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) status = json.loads(res.text)['data'] if(args.json): return status else: if status['Asserted'] == 0: return "Identify light is off" else: return "Identify light is blinking" else: return "Invalid chassis identify command" def chassis(host, args, session): """ controls the different chassis commands @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the fru sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ if(hasattr(args, 'powcmd')): result = chassisPower(host,args,session) elif(hasattr(args, 'identcmd')): result = chassisIdent(host, args, session) else: return "This feature is not yet implemented" return result def getTask(host, args, session): """ Get operation on the Task Monitor URI @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the task sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ if args.taskURI is not None: url ='https://'+host+str(args.taskURI) try: r = session.post(url, headers=jsonHeader, verify=False, timeout=baseTimeout) if (r.status_code == 200 and not args.json): return r.text elif (r.status_code == 200 and args.json): return r.json() else: return ('Failed to retrieve the data on Task Monitor URI') except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) else: return 'You must specify the Task Monitor URI' def dumpRetrieve(host, args, session): """ Downloads dump of given dump type @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ dumpType = args.dumpType if (args.dumpType=="SystemDump"): dumpResp=systemDumpRetrieve(host,args,session) elif(args.dumpType=="bmc"): dumpResp=bmcDumpRetrieve(host,args,session) return dumpResp def dumpList(host, args, session): """ Lists dump of the given dump type @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ if (args.dumpType=="SystemDump"): dumpResp=systemDumpList(host,args,session) elif(args.dumpType=="bmc"): dumpResp=bmcDumpList(host,args,session) return dumpResp def dumpDelete(host, args, session): """ Deletes dump of the given dump type @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ if (args.dumpType=="SystemDump"): dumpResp=systemDumpDelete(host,args,session) elif(args.dumpType=="bmc"): dumpResp=bmcDumpDelete(host,args,session) return dumpResp def dumpDeleteAll(host, args, session): """ Deletes all dumps of the given dump type @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ if (args.dumpType=="SystemDump"): dumpResp=systemDumpDeleteAll(host,args,session) elif(args.dumpType=="bmc"): dumpResp=bmcDumpDeleteAll(host,args,session) return dumpResp def dumpCreate(host, args, session): """ Creates dump for the given dump type @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ if (args.dumpType=="SystemDump"): dumpResp=systemDumpCreate(host,args,session) elif(args.dumpType=="bmc"): dumpResp=bmcDumpCreate(host,args,session) return dumpResp def bmcDumpRetrieve(host, args, session): """ Downloads a dump file from the bmc @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ dumpNum = args.dumpNum if (args.dumpSaveLoc is not None): saveLoc = args.dumpSaveLoc else: saveLoc = tempfile.gettempdir() url ='https://'+host+'/download/dump/' + str(dumpNum) try: r = session.get(url, headers=jsonHeader, stream=True, verify=False, timeout=baseTimeout) if (args.dumpSaveLoc is not None): if os.path.exists(saveLoc): if saveLoc[-1] != os.path.sep: saveLoc = saveLoc + os.path.sep filename = saveLoc + host+'-dump' + str(dumpNum) + '.tar.xz' else: return 'Invalid save location specified' else: filename = tempfile.gettempdir()+os.sep + host+'-dump' + str(dumpNum) + '.tar.xz' with open(filename, 'wb') as f: for chunk in r.iter_content(chunk_size =1024): if chunk: f.write(chunk) return 'Saved as ' + filename except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) def bmcDumpList(host, args, session): """ Lists the number of dump files on the bmc @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url ='https://'+host+'/xyz/openbmc_project/dump/list' try: r = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) dumpList = r.json() formattedList = [] #remove items that aren't dump entries 'entry, internal, manager endpoints' if 'data' in dumpList: for entry in dumpList['data']: if 'entry' in entry: if entry.split('/')[-1].isnumeric(): formattedList.append(entry) dumpList['data']= formattedList return dumpList except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) def bmcDumpDelete(host, args, session): """ Deletes BMC dump files from the bmc @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ dumpList = [] successList = [] failedList = [] if args.dumpNum is not None: if isinstance(args.dumpNum, list): dumpList = args.dumpNum else: dumpList.append(args.dumpNum) for dumpNum in dumpList: url ='https://'+host+'/xyz/openbmc_project/dump/entry/'+str(dumpNum)+'/action/Delete' try: r = session.post(url, headers=jsonHeader, json = {"data": []}, verify=False, timeout=baseTimeout) if r.status_code == 200: successList.append(str(dumpNum)) else: failedList.append(str(dumpNum)) except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) output = "Successfully deleted dumps: " + ', '.join(successList) if(len(failedList)>0): output+= '\nFailed to delete dumps: ' + ', '.join(failedList) return output else: return 'You must specify an entry number to delete' def bmcDumpDeleteAll(host, args, session): """ Deletes All BMC dump files from the bmc @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ dumpResp = bmcDumpList(host, args, session) if 'FQPSPIN0000M' in dumpResp or 'FQPSPIN0001M'in dumpResp: return dumpResp dumpList = dumpResp['data'] d = vars(args) dumpNums = [] for dump in dumpList: dumpNum = dump.strip().split('/')[-1] if dumpNum.isdigit(): dumpNums.append(int(dumpNum)) d['dumpNum'] = dumpNums return bmcDumpDelete(host, args, session) def bmcDumpCreate(host, args, session): """ Creates a bmc dump file @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url = 'https://'+host+'/xyz/openbmc_project/dump/action/CreateDump' try: r = session.post(url, headers=jsonHeader, json = {"data": []}, verify=False, timeout=baseTimeout) info = r.json() if(r.status_code == 200 and not args.json): return ('Dump successfully created') elif(args.json): return info elif 'data' in info: if 'QuotaExceeded' in info['data']['description']: return 'BMC dump space is full. Please delete at least one existing dump entry and try again.' else: return "Failed to create a BMC dump. BMC Response:\n {resp}".format(resp=info) else: return "Failed to create a BMC dump. BMC Response:\n {resp}".format(resp=info) except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) def systemDumpRetrieve(host, args, session): """ Downloads system dump @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ NBDSetup(host,args,session) pipe = NBDPipe() pipe.openHTTPSocket(args) pipe.openTCPSocket() pipe.waitformessage() def systemDumpList(host, args, session): """ Lists system dumps @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url = "https://"+host+"/redfish/v1/Systems/system/LogServices/Dump/Entries" try: r = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) dumpList = r.json() return dumpList except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) def systemDumpDelete(host, args, session): """ Deletes system dump @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ dumpList = [] successList = [] failedList = [] if args.dumpNum is not None: if isinstance(args.dumpNum, list): dumpList = args.dumpNum else: dumpList.append(args.dumpNum) for dumpNum in dumpList: url = 'https://'+host+'/redfish/v1/Systems/system/LogServices/Dump/Entries/'+ str(dumpNum) try: r = session.delete(url, headers=jsonHeader, json = {"data": []}, verify=False, timeout=baseTimeout) if r.status_code == 200: successList.append(str(dumpNum)) else: failedList.append(str(dumpNum)) except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) output = "Successfully deleted dumps: " + ', '.join(successList) if(len(failedList)>0): output+= '\nFailed to delete dumps: ' + ', '.join(failedList) return output else: return 'You must specify an entry number to delete' def systemDumpDeleteAll(host, args, session): """ Deletes All system dumps @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url = 'https://'+host+'/redfish/v1/Systems/system/LogServices/Dump/Actions/LogService.ClearLog' try: r = session.post(url, headers=jsonHeader, json = {"data": []}, verify=False, timeout=baseTimeout) if(r.status_code == 200 and not args.json): return ('Dumps successfully cleared') elif(args.json): return r.json() else: return ('Failed to clear dumps') except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) def systemDumpCreate(host, args, session): """ Creates a system dump @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url = 'https://'+host+'/redfish/v1/Systems/system/LogServices/Dump/Actions/LogService.CollectDiagnosticData' params = {'DiagnosticDataType':'OEM', 'OEMDiagnosticDataType':'System'} try: r = session.post(url, headers=jsonHeader, params=params, data = json.dumps(params), verify=False, timeout=baseTimeout) if(r.status_code == 200): return r.json() else: return ('Failed to create dump') except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) def csdDumpInitiate(host, args, session): """ Starts the process of getting the current list of dumps then initiates the creation of one. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ errorInfo = "" dumpcount = 0 try: d = vars(args) d['json'] = True except Exception as e: errorInfo += "Failed to set the json flag to True \n Exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() try: for i in range(3): dumpInfo = bmcDumpList(host, args, session) if 'data' in dumpInfo: dumpcount = len(dumpInfo['data']) break else: errorInfo+= "Dump List Message returned: " + json.dumps(dumpInfo,indent=0, separators=(',', ':')).replace('\n','') +"\n" except Exception as e: errorInfo+= "Failed to collect the list of dumps.\nException: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() #Create a user initiated dump dumpFailure = True try: for i in range(3): dumpcreated = bmcDumpCreate(host, args, session) if 'message' in dumpcreated: if 'ok' in dumpcreated['message'].lower(): dumpFailure = False break elif 'data' in dumpcreated: if 'QuotaExceeded' in dumpcreated['data']['description']: print('Not enough dump space on the BMC to create a new dump. Please delete the oldest entry (lowest number) and rerun the collect_service_data command.') errorInfo+='Dump Space is full. No new dump was created with this collection' break else: errorInfo+= "Dump create message returned: " + json.dumps(dumpcreated,indent=0, separators=(',', ':')).replace('\n','') +"\n" else: errorInfo+= "Dump create message returned: " + json.dumps(dumpcreated,indent=0, separators=(',', ':')).replace('\n','') +"\n" else: errorInfo+= "Dump create message returned: " + json.dumps(dumpcreated,indent=0, separators=(',', ':')).replace('\n','') +"\n" except Exception as e: errorInfo+= "Dump create exception encountered: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() output = {} output['errors'] = errorInfo output['dumpcount'] = dumpcount if dumpFailure: output['dumpFailure'] = True return output def csdInventory(host, args,session, fileDir): """ Collects the BMC inventory, retrying if necessary @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param fileDir: string representation of the path to use for putting files created """ errorInfo = "===========Inventory =============\n" output={} inventoryCollected = False try: for i in range(3): frulist = fruPrint(host, args, session) if 'Hardware' in frulist: inventoryCollected = True break else: errorInfo += json.dumps(frulist, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False) + '\n' except Exception as e: errorInfo += "Inventory collection exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() if inventoryCollected: try: with open(fileDir +os.sep+'inventory.txt', 'w') as f: f.write(json.dumps(frulist, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False) + '\n') print("Inventory collected and stored in " + fileDir + os.sep + "inventory.txt") output['fileLoc'] = fileDir+os.sep+'inventory.txt' except Exception as e: print("Failed to write inventory to file.") errorInfo += "Error writing inventory to the file. Exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() output['errors'] = errorInfo return output def csdSensors(host, args,session, fileDir): """ Collects the BMC sensor readings, retrying if necessary @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param fileDir: string representation of the path to use for putting files created """ errorInfo = "===========Sensors =============\n" sensorsCollected = False output={} try: d = vars(args) d['json'] = False except Exception as e: errorInfo += "Failed to set the json flag to False \n Exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() try: for i in range(3): sensorReadings = sensor(host, args, session) if 'OCC0' in sensorReadings: sensorsCollected = True break else: errorInfo += sensorReadings except Exception as e: errorInfo += "Sensor reading collection exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() if sensorsCollected: try: with open(fileDir +os.sep+'sensorReadings.txt', 'w') as f: f.write(sensorReadings) print("Sensor readings collected and stored in " + fileDir + os.sep+ "sensorReadings.txt") output['fileLoc'] = fileDir+os.sep+'sensorReadings.txt' except Exception as e: print("Failed to write sensor readings to file system.") errorInfo += "Error writing sensor readings to the file. Exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() output['errors'] = errorInfo return output def csdLEDs(host,args, session, fileDir): """ Collects the BMC LED status, retrying if necessary @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param fileDir: string representation of the path to use for putting files created """ errorInfo = "===========LEDs =============\n" ledsCollected = False output={} try: d = vars(args) d['json'] = True except Exception as e: errorInfo += "Failed to set the json flag to False \n Exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() try: url="https://"+host+"/xyz/openbmc_project/led/enumerate" httpHeader = {'Content-Type':'application/json'} for i in range(3): try: ledRes = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) if ledRes.status_code == 200: ledsCollected = True leds = ledRes.json()['data'] break else: errorInfo += ledRes.text except(requests.exceptions.Timeout): errorInfo+=json.dumps( connectionErrHandler(args.json, "Timeout", None), sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False) + '\n' except(requests.exceptions.ConnectionError) as err: errorInfo += json.dumps(connectionErrHandler(args.json, "ConnectionError", err), sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False) + '\n' exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() except Exception as e: errorInfo += "LED status collection exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() if ledsCollected: try: with open(fileDir +os.sep+'ledStatus.txt', 'w') as f: f.write(json.dumps(leds, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False) + '\n') print("LED status collected and stored in " + fileDir + os.sep+ "ledStatus.txt") output['fileLoc'] = fileDir+os.sep+'ledStatus.txt' except Exception as e: print("Failed to write LED status to file system.") errorInfo += "Error writing LED status to the file. Exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() output['errors'] = errorInfo return output def csdSelShortList(host, args, session, fileDir): """ Collects the BMC log entries, retrying if necessary @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param fileDir: string representation of the path to use for putting files created """ errorInfo = "===========SEL Short List =============\n" selsCollected = False output={} try: d = vars(args) d['json'] = False except Exception as e: errorInfo += "Failed to set the json flag to False \n Exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() try: for i in range(3): sels = selPrint(host,args,session) if '----Active Alerts----' in sels or 'No log entries found' in sels or '----Historical Alerts----' in sels: selsCollected = True break else: errorInfo += sels + '\n' except Exception as e: errorInfo += "SEL short list collection exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() if selsCollected: try: with open(fileDir +os.sep+'SELshortlist.txt', 'w') as f: f.write(sels) print("SEL short list collected and stored in " + fileDir + os.sep+ "SELshortlist.txt") output['fileLoc'] = fileDir+os.sep+'SELshortlist.txt' except Exception as e: print("Failed to write SEL short list to file system.") errorInfo += "Error writing SEL short list to the file. Exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() output['errors'] = errorInfo return output def csdParsedSels(host, args, session, fileDir): """ Collects the BMC log entries, retrying if necessary @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param fileDir: string representation of the path to use for putting files created """ errorInfo = "===========SEL Parsed List =============\n" selsCollected = False output={} try: d = vars(args) d['json'] = True d['fullEsel'] = True except Exception as e: errorInfo += "Failed to set the json flag to True \n Exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() try: for i in range(3): parsedfullsels = json.loads(selPrint(host,args,session)) if 'numAlerts' in parsedfullsels: selsCollected = True break else: errorInfo += parsedfullsels + '\n' except Exception as e: errorInfo += "Parsed full SELs collection exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() if selsCollected: try: sortedSELs = sortSELs(parsedfullsels) with open(fileDir +os.sep+'parsedSELs.txt', 'w') as f: for log in sortedSELs[0]: esel = "" parsedfullsels[sortedSELs[1][str(log)]]['timestamp'] = datetime.datetime.fromtimestamp(int(parsedfullsels[sortedSELs[1][str(log)]]['timestamp']/1000)).strftime("%Y-%m-%d %H:%M:%S") if ('raweSEL' in parsedfullsels[sortedSELs[1][str(log)]] and args.devdebug): esel = parsedfullsels[sortedSELs[1][str(log)]]['raweSEL'] del parsedfullsels[sortedSELs[1][str(log)]]['raweSEL'] f.write(json.dumps(parsedfullsels[sortedSELs[1][str(log)]],sort_keys=True, indent=4, separators=(',', ': '))) if(args.devdebug and esel != ""): f.write(parseESEL(args, esel)) print("Parsed SELs collected and stored in " + fileDir + os.sep+ "parsedSELs.txt") output['fileLoc'] = fileDir+os.sep+'parsedSELs.txt' except Exception as e: print("Failed to write fully parsed SELs to file system.") errorInfo += "Error writing fully parsed SELs to the file. Exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() output['errors'] = errorInfo return output def csdFullEnumeration(host, args, session, fileDir): """ Collects a full enumeration of /xyz/openbmc_project/, retrying if necessary @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param fileDir: string representation of the path to use for putting files created """ errorInfo = "===========BMC Full Enumeration =============\n" bmcFullCollected = False output={} try: d = vars(args) d['json'] = True except Exception as e: errorInfo += "Failed to set the json flag to False \n Exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() try: print("Attempting to get a full BMC enumeration") url="https://"+host+"/xyz/openbmc_project/enumerate" httpHeader = {'Content-Type':'application/json'} for i in range(3): try: bmcRes = session.get(url, headers=jsonHeader, verify=False, timeout=180) if bmcRes.status_code == 200: bmcFullCollected = True fullEnumeration = bmcRes.json() break else: errorInfo += bmcRes.text except(requests.exceptions.Timeout): errorInfo+=json.dumps( connectionErrHandler(args.json, "Timeout", None), sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False) + '\n' except(requests.exceptions.ConnectionError) as err: errorInfo += json.dumps(connectionErrHandler(args.json, "ConnectionError", err), sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False) + '\n' exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() except Exception as e: errorInfo += "RAW BMC data collection exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() if bmcFullCollected: try: with open(fileDir +os.sep+'bmcFullRaw.txt', 'w') as f: f.write(json.dumps(fullEnumeration, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False) + '\n') print("RAW BMC data collected and saved into " + fileDir + os.sep+ "bmcFullRaw.txt") output['fileLoc'] = fileDir+os.sep+'bmcFullRaw.txt' except Exception as e: print("Failed to write RAW BMC data to file system.") errorInfo += "Error writing RAW BMC data collection to the file. Exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() output['errors'] = errorInfo return output def csdCollectAllDumps(host, args, session, fileDir): """ Collects all of the bmc dump files and stores them in fileDir @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param fileDir: string representation of the path to use for putting files created """ errorInfo = "===========BMC Dump Collection =============\n" dumpListCollected = False output={} dumpList = {} try: d = vars(args) d['json'] = True d['dumpSaveLoc'] = fileDir except Exception as e: errorInfo += "Failed to set the json flag to True, or failed to set the dumpSave Location \n Exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() print('Collecting bmc dump files') try: for i in range(3): dumpResp = bmcDumpList(host, args, session) if 'message' in dumpResp: if 'ok' in dumpResp['message'].lower(): dumpList = dumpResp['data'] dumpListCollected = True break else: errorInfo += "Status was not OK when retrieving the list of dumps available. \n Response: \n{resp}\n".format(resp=dumpResp) else: errorInfo += "Invalid response received from the BMC while retrieving the list of dumps available.\n {resp}\n".format(resp=dumpResp) except Exception as e: errorInfo += "BMC dump list exception: {eInfo}\n".format(eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() if dumpListCollected: output['fileList'] = [] for dump in dumpList: try: if '/xyz/openbmc_project/dump/internal/manager' not in dump: d['dumpNum'] = int(dump.strip().split('/')[-1]) print('retrieving dump file ' + str(d['dumpNum'])) filename = bmcDumpRetrieve(host, args, session).split('Saved as ')[-1] output['fileList'].append(filename) except Exception as e: print("Unable to collect dump: {dumpInfo}".format(dumpInfo=dump)) errorInfo += "Exception collecting a bmc dump {dumpInfo}\n {eInfo}\n".format(dumpInfo=dump, eInfo=e) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() output['errors'] = errorInfo return output def collectServiceData(host, args, session): """ Collects all data needed for service from the BMC @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the collectServiceData sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ global toolVersion filelist = [] errorInfo = "" #get current number of bmc dumps and create a new bmc dump dumpInitdata = csdDumpInitiate(host, args, session) if 'dumpFailure' in dumpInitdata: return 'Collect service data is stopping due to not being able to create a new dump. No service data was collected.' dumpcount = dumpInitdata['dumpcount'] errorInfo += dumpInitdata['errors'] #create the directory to put files try: args.silent = True myDir = tempfile.gettempdir()+os.sep + host + "--" + datetime.datetime.now().strftime("%Y-%m-%d_%H.%M.%S") os.makedirs(myDir) except Exception as e: print('Unable to create the temporary directory for data collection. Ensure sufficient privileges to create temporary directory. Aborting.') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] errorInfo += "Exception: Error: {err}, Details: {etype}, {fname}, {lineno}\n".format(err=e, etype=exc_type, fname=fname, lineno=exc_tb.tb_lineno) errorInfo += traceback.format_exc() return("Python exception: {eInfo}".format(eInfo = e)) #Collect Inventory inventoryData = csdInventory(host, args, session, myDir) if 'fileLoc' in inventoryData: filelist.append(inventoryData['fileLoc']) errorInfo += inventoryData['errors'] #Read all the sensor and OCC status sensorData = csdSensors(host,args,session,myDir) if 'fileLoc' in sensorData: filelist.append(sensorData['fileLoc']) errorInfo += sensorData['errors'] #Collect all of the LEDs status ledStatus = csdLEDs(host, args, session, myDir) if 'fileLoc' in ledStatus: filelist.append(ledStatus['fileLoc']) errorInfo += ledStatus['errors'] #Collect the bmc logs selShort = csdSelShortList(host, args, session, myDir) if 'fileLoc' in selShort: filelist.append(selShort['fileLoc']) errorInfo += selShort['errors'] parsedSELs = csdParsedSels(host, args, session, myDir) if 'fileLoc' in parsedSELs: filelist.append(parsedSELs['fileLoc']) errorInfo += parsedSELs['errors'] #collect RAW bmc enumeration bmcRaw = csdFullEnumeration(host, args, session, myDir) if 'fileLoc' in bmcRaw: filelist.append(bmcRaw['fileLoc']) errorInfo += bmcRaw['errors'] #wait for new dump to finish being created waitingForNewDump = True count = 0; print("Waiting for new BMC dump to finish being created. Wait time could be up to 5 minutes") while(waitingForNewDump): dumpList = bmcDumpList(host, args, session)['data'] if len(dumpList) > dumpcount: waitingForNewDump = False break; elif(count>150): print("Timed out waiting for bmc to make a new dump file. Continuing without it.") break; else: time.sleep(2) count += 1 #collect all of the dump files getBMCDumps = csdCollectAllDumps(host, args, session, myDir) if 'fileList' in getBMCDumps: filelist+= getBMCDumps['fileList'] errorInfo += getBMCDumps['errors'] #write the runtime errors to a file try: with open(myDir +os.sep+'openbmctoolRuntimeErrors.txt', 'w') as f: f.write(errorInfo) print("OpenBMC tool runtime errors collected and stored in " + myDir + os.sep+ "openbmctoolRuntimeErrors.txt") filelist.append(myDir+os.sep+'openbmctoolRuntimeErrors.txt') except Exception as e: print("Failed to write OpenBMC tool runtime errors to file system.") #create the zip file try: filename = myDir.split(tempfile.gettempdir()+os.sep)[-1] + "_" + toolVersion + '_openbmc.zip' zf = zipfile.ZipFile(myDir+os.sep + filename, 'w') for myfile in filelist: zf.write(myfile, os.path.basename(myfile)) zf.close() print("Zip file with all collected data created and stored in: {fileInfo}".format(fileInfo=myDir+os.sep+filename)) except Exception as e: print("Failed to create zip file with collected information") return "data collection finished" def healthCheck(host, args, session): """ runs a health check on the platform @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the bmc sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ #check fru status and get as json to easily work through d = vars(args) useJson = d['json'] d['json'] = True d['verbose']= False frus = json.loads(fruStatus(host, args, session)) hwStatus= "OK" performanceStatus = "OK" for key in frus: if frus[key]["Functional"] == "No" and frus[key]["Present"] == "Yes": hwStatus= "Degraded" if("power_supply" in key or "powersupply" in key): gpuCount =0 for comp in frus: if "gv100card" in comp: gpuCount +=1 if gpuCount > 4: hwStatus = "Critical" performanceStatus="Degraded" break; elif("fan" in key): hwStatus = "Degraded" else: performanceStatus = "Degraded" if useJson: output = {"Hardware Status": hwStatus, "Performance": performanceStatus} output = json.dumps(output, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False) else: output = ("Hardware Status: " + hwStatus + "\nPerformance: " +performanceStatus ) #SW407886: Clear the duplicate entries #collect the dups d['devdebug'] = False sels = json.loads(selPrint(host, args, session)) logNums2Clr = [] oldestLogNum={"logNum": "bogus" ,"key" : ""} count = 0 if sels['numAlerts'] > 0: for key in sels: if "numAlerts" in key: continue try: if "slave@00:00/00:00:00:06/sbefifo1-dev0/occ1-dev0" in sels[key]['Message']: count += 1 if count > 1: #preserve first occurrence if sels[key]['timestamp'] < sels[oldestLogNum['key']]['timestamp']: oldestLogNum['key']=key oldestLogNum['logNum'] = sels[key]['logNum'] else: oldestLogNum['key']=key oldestLogNum['logNum'] = sels[key]['logNum'] logNums2Clr.append(sels[key]['logNum']) except KeyError: continue if(count >0): logNums2Clr.remove(oldestLogNum['logNum']) #delete the dups if count >1: data = "{\"data\": [] }" for logNum in logNums2Clr: url = "https://"+ host+ "/xyz/openbmc_project/logging/entry/"+logNum+"/action/Delete" try: session.post(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): deleteFailed = True except(requests.exceptions.ConnectionError) as err: deleteFailed = True #End of defect resolve code d['json'] = useJson return output def bmc(host, args, session): """ handles various bmc level commands, currently bmc rebooting @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the bmc sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ if(args.type is not None): return bmcReset(host, args, session) if(args.info): return "Not implemented at this time" def bmcReset(host, args, session): """ controls resetting the bmc. warm reset reboots the bmc, cold reset removes the configuration and reboots. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the bmcReset sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ if checkFWactivation(host, args, session): return ("BMC reset control disabled during firmware activation") if(args.type == "warm"): print("\nAttempting to reboot the BMC...:") url="https://"+host+"/xyz/openbmc_project/state/bmc0/attr/RequestedBMCTransition" data = '{"data":"xyz.openbmc_project.State.BMC.Transition.Reboot"}' res = session.put(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) return res.text elif(args.type =="cold"): print("\nAttempting to reboot the BMC...:") url="https://"+host+"/xyz/openbmc_project/state/bmc0/attr/RequestedBMCTransition" data = '{"data":"xyz.openbmc_project.State.BMC.Transition.Reboot"}' res = session.put(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) return res.text else: return "invalid command" def gardClear(host, args, session): """ clears the gard records from the bmc @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the gardClear sub command @param session: the active session to use """ url="https://"+host+"/org/open_power/control/gard/action/Reset" data = '{"data":[]}' try: res = session.post(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) if res.status_code == 404: return "Command not supported by this firmware version" else: return res.text except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) def activateFWImage(host, args, session): """ activates a firmware image on the bmc @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the fwflash sub command @param session: the active session to use @param fwID: the unique ID of the fw image to activate """ fwID = args.imageID #determine the existing versions url="https://"+host+"/xyz/openbmc_project/software/enumerate" try: resp = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) existingSoftware = json.loads(resp.text)['data'] altVersionID = '' versionType = '' imageKey = '/xyz/openbmc_project/software/'+fwID if imageKey in existingSoftware: versionType = existingSoftware[imageKey]['Purpose'] for key in existingSoftware: if imageKey == key: continue if 'Purpose' in existingSoftware[key]: if versionType == existingSoftware[key]['Purpose']: altVersionID = key.split('/')[-1] url="https://"+host+"/xyz/openbmc_project/software/"+ fwID + "/attr/Priority" url1="https://"+host+"/xyz/openbmc_project/software/"+ altVersionID + "/attr/Priority" data = "{\"data\": 0}" data1 = "{\"data\": 1 }" try: resp = session.put(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) resp1 = session.put(url1, headers=jsonHeader, data=data1, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if(not args.json): if resp.status_code == 200 and resp1.status_code == 200: return 'Firmware flash and activation completed. Please reboot the bmc and then boot the host OS for the changes to take effect. ' else: return "Firmware activation failed." else: return resp.text + resp1.text def activateStatus(host, args, session): if checkFWactivation(host, args, session): return("Firmware is currently being activated. Do not reboot the BMC or start the Host OS") else: return("No firmware activations are pending") def extractFWimage(path, imageType): """ extracts the bmc image and returns information about the package @param path: the path and file name of the firmware image @param imageType: The type of image the user is trying to flash. Host or BMC @return: the image id associated with the package. returns an empty string on error. """ f = tempfile.TemporaryFile() tmpDir = tempfile.gettempdir() newImageID = "" if os.path.exists(path): try: imageFile = tarfile.open(path,'r') contents = imageFile.getmembers() for tf in contents: if 'MANIFEST' in tf.name: imageFile.extract(tf.name, path=tmpDir) with open(tempfile.gettempdir() +os.sep+ tf.name, 'r') as imageInfo: for line in imageInfo: if 'purpose' in line: purpose = line.split('=')[1] if imageType not in purpose.split('.')[-1]: print('The specified image is not for ' + imageType) print('Please try again with the image for ' + imageType) return "" if 'version' == line.split('=')[0]: version = line.split('=')[1].strip().encode('utf-8') m = hashlib.sha512() m.update(version) newImageID = m.hexdigest()[:8] break try: os.remove(tempfile.gettempdir() +os.sep+ tf.name) except OSError: pass return newImageID except tarfile.ExtractError as e: print('Unable to extract information from the firmware file.') print('Ensure you have write access to the directory: ' + tmpDir) return newImageID except tarfile.TarError as e: print('This is not a valid firmware file.') return newImageID print("This is not a valid firmware file.") return newImageID else: print('The filename and path provided are not valid.') return newImageID def getAllFWImageIDs(fwInvDict): """ gets a list of all the firmware image IDs @param fwInvDict: the dictionary to search for FW image IDs @return: list containing string representation of the found image ids """ idList = [] for key in fwInvDict: if 'Version' in fwInvDict[key]: idList.append(key.split('/')[-1]) return idList def fwFlash(host, args, session): """ updates the bmc firmware and pnor firmware @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the fwflash sub command @param session: the active session to use """ d = vars(args) if(args.type == 'bmc'): purp = 'BMC' else: purp = 'Host' #check power state of the machine. No concurrent FW updates allowed d['powcmd'] = 'status' powerstate = chassisPower(host, args, session) if 'Chassis Power State: On' in powerstate: return("Aborting firmware update. Host is powered on. Please turn off the host and try again.") #determine the existing images on the bmc url="https://"+host+"/xyz/openbmc_project/software/enumerate" try: resp = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) oldsoftware = json.loads(resp.text)['data'] #Extract the tar and get information from the manifest file newversionID = extractFWimage(args.fileloc, purp) if newversionID == "": return "Unable to verify FW image." #check if the new image is already on the bmc if newversionID not in getAllFWImageIDs(oldsoftware): #upload the file httpHeader = {'Content-Type':'application/octet-stream'} httpHeader.update(xAuthHeader) url="https://"+host+"/upload/image" data=open(args.fileloc,'rb').read() print("Uploading file to BMC") try: resp = session.post(url, headers=httpHeader, data=data, verify=False) except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if resp.status_code != 200: return "Failed to upload the file to the bmc" else: print("Upload complete.") #verify bmc processed the image software ={} for i in range(0, 5): url="https://"+host+"/xyz/openbmc_project/software/enumerate" try: resp = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) software = json.loads(resp.text)['data'] #check if bmc is done processing the new image if (newversionID in getAllFWImageIDs(software)): break else: time.sleep(15) #activate the new image print("Activating new image: "+newversionID) url="https://"+host+"/xyz/openbmc_project/software/"+ newversionID + "/attr/RequestedActivation" data = '{"data":"xyz.openbmc_project.Software.Activation.RequestedActivations.Active"}' try: resp = session.put(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) #wait for the activation to complete, timeout after ~1 hour i=0 while i < 360: url="https://"+host+"/xyz/openbmc_project/software/"+ newversionID data = '{"data":"xyz.openbmc_project.Software.Activation.RequestedActivations.Active"}' try: resp = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) fwInfo = json.loads(resp.text)['data'] if 'Activating' not in fwInfo['Activation'] and 'Activating' not in fwInfo['RequestedActivation']: print('') break else: sys.stdout.write('.') sys.stdout.flush() time.sleep(10) #check every 10 seconds return "Firmware flash and activation completed. Please reboot the bmc and then boot the host OS for the changes to take effect. " else: print("This image has been found on the bmc. Activating image: " + newversionID) d['imageID'] = newversionID return activateFWImage(host, args, session) def getFWInventoryAttributes(rawFWInvItem, ID): """ gets and lists all of the firmware in the system. @return: returns a dictionary containing the image attributes """ reqActivation = rawFWInvItem["RequestedActivation"].split('.')[-1] pendingActivation = "" if reqActivation == "None": pendingActivation = "No" else: pendingActivation = "Yes" firmwareAttr = {ID: { "Purpose": rawFWInvItem["Purpose"].split('.')[-1], "Version": rawFWInvItem["Version"], "RequestedActivation": pendingActivation, "ID": ID}} if "ExtendedVersion" in rawFWInvItem: firmwareAttr[ID]['ExtendedVersion'] = rawFWInvItem['ExtendedVersion'].split(',') else: firmwareAttr[ID]['ExtendedVersion'] = "" return firmwareAttr def parseFWdata(firmwareDict): """ creates a dictionary with parsed firmware data @return: returns a dictionary containing the image attributes """ firmwareInfoDict = {"Functional": {}, "Activated":{}, "NeedsActivated":{}} for key in firmwareDict['data']: #check for valid endpoint if "Purpose" in firmwareDict['data'][key]: id = key.split('/')[-1] if firmwareDict['data'][key]['Activation'].split('.')[-1] == "Active": fwActivated = True else: fwActivated = False if 'Priority' in firmwareDict['data'][key]: if firmwareDict['data'][key]['Priority'] == 0: firmwareInfoDict['Functional'].update(getFWInventoryAttributes(firmwareDict['data'][key], id)) elif firmwareDict['data'][key]['Priority'] >= 0 and fwActivated: firmwareInfoDict['Activated'].update(getFWInventoryAttributes(firmwareDict['data'][key], id)) else: firmwareInfoDict['NeedsActivated'].update(getFWInventoryAttributes(firmwareDict['data'][key], id)) else: firmwareInfoDict['NeedsActivated'].update(getFWInventoryAttributes(firmwareDict['data'][key], id)) emptySections = [] for key in firmwareInfoDict: if len(firmwareInfoDict[key])<=0: emptySections.append(key) for key in emptySections: del firmwareInfoDict[key] return firmwareInfoDict def displayFWInvenory(firmwareInfoDict, args): """ gets and lists all of the firmware in the system. @return: returns a string containing all of the firmware information """ output = "" if not args.json: for key in firmwareInfoDict: for subkey in firmwareInfoDict[key]: firmwareInfoDict[key][subkey]['ExtendedVersion'] = str(firmwareInfoDict[key][subkey]['ExtendedVersion']) if not args.verbose: output = "---Running Images---\n" colNames = ["Purpose", "Version", "ID"] keylist = ["Purpose", "Version", "ID"] output += tableDisplay(keylist, colNames, firmwareInfoDict["Functional"]) if "Activated" in firmwareInfoDict: output += "\n---Available Images---\n" output += tableDisplay(keylist, colNames, firmwareInfoDict["Activated"]) if "NeedsActivated" in firmwareInfoDict: output += "\n---Needs Activated Images---\n" output += tableDisplay(keylist, colNames, firmwareInfoDict["NeedsActivated"]) else: output = "---Running Images---\n" colNames = ["Purpose", "Version", "ID", "Pending Activation", "Extended Version"] keylist = ["Purpose", "Version", "ID", "RequestedActivation", "ExtendedVersion"] output += tableDisplay(keylist, colNames, firmwareInfoDict["Functional"]) if "Activated" in firmwareInfoDict: output += "\n---Available Images---\n" output += tableDisplay(keylist, colNames, firmwareInfoDict["Activated"]) if "NeedsActivated" in firmwareInfoDict: output += "\n---Needs Activated Images---\n" output += tableDisplay(keylist, colNames, firmwareInfoDict["NeedsActivated"]) return output else: return str(json.dumps(firmwareInfoDict, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False)) def firmwareList(host, args, session): """ gets and lists all of the firmware in the system. @return: returns a string containing all of the firmware information """ url="https://{hostname}/xyz/openbmc_project/software/enumerate".format(hostname=host) try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) firmwareDict = json.loads(res.text) #sort the received information firmwareInfoDict = parseFWdata(firmwareDict) #display the information return displayFWInvenory(firmwareInfoDict, args) def deleteFWVersion(host, args, session): """ deletes a firmware version on the BMC @param host: string, the hostname or IP address of the BMC @param args: contains additional arguments used by the fwflash sub command @param session: the active session to use @param fwID: the unique ID of the fw version to delete """ fwID = args.versionID print("Deleting version: "+fwID) url="https://"+host+"/xyz/openbmc_project/software/"+ fwID + "/action/Delete" data = "{\"data\": [] }" try: res = session.post(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) if res.status_code == 200: return ('The firmware version has been deleted') else: return ('Unable to delete the specified firmware version') def deleteFWAll(host, args, session): """ deletes ALL contents for firmware software catalog @param host: string, the hostname or IP address of the BMC @param args: contains additional arguments used by the fwflash sub command @param session: the active session to use """ print("Deleting ALL firmware versions") url="https://"+host+"/xyz/openbmc_project/software/action/DeleteAll" data = "{\"data\": [] }" try: res = session.post(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) if res.status_code == 200: return ('All firmware versions were deleted') else: return ('Uspecified error while deleting All firmware versions') def restLogging(host, args, session): """ Called by the logging function. Turns REST API logging on/off. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the logging sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url="https://"+host+"/xyz/openbmc_project/logging/rest_api_logs/attr/Enabled" if(args.rest_logging == 'on'): data = '{"data": 1}' elif(args.rest_logging == 'off'): data = '{"data": 0}' else: return "Invalid logging rest_api command" try: res = session.put(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) return res.text def remoteLogging(host, args, session): """ Called by the logging function. View config information for/disable remote logging (rsyslog). @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the logging sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url="https://"+host+"/xyz/openbmc_project/logging/config/remote" try: if(args.remote_logging == 'view'): res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) elif(args.remote_logging == 'disable'): res = session.put(url + '/attr/Port', headers=jsonHeader, json = {"data": 0}, verify=False, timeout=baseTimeout) res = session.put(url + '/attr/Address', headers=jsonHeader, json = {"data": ""}, verify=False, timeout=baseTimeout) else: return "Invalid logging remote_logging command" except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) return res.text def remoteLoggingConfig(host, args, session): """ Called by the logging function. Configures remote logging (rsyslog). @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the logging sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url="https://"+host+"/xyz/openbmc_project/logging/config/remote" try: res = session.put(url + '/attr/Port', headers=jsonHeader, json = {"data": args.port}, verify=False, timeout=baseTimeout) res = session.put(url + '/attr/Address', headers=jsonHeader, json = {"data": args.address}, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) return res.text def redfishSupportPresent(host, session): url = "https://" + host + "/redfish/v1" try: resp = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return False except(requests.exceptions.ConnectionError) as err: return False if resp.status_code != 200: return False else: return True def certificateUpdate(host, args, session): """ Called by certificate management function. update server/client/authority certificates Example: certificate update server https -f cert.pem certificate update authority ldap -f Root-CA.pem certificate update client ldap -f cert.pem @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the certificate update sub command @param session: the active session to use """ httpHeader = {'Content-Type': 'application/octet-stream'} httpHeader.update(xAuthHeader) data = open(args.fileloc, 'r').read() try: if redfishSupportPresent(host, session): if(args.type.lower() == 'server' and args.service.lower() != "https"): return "Invalid service type" if(args.type.lower() == 'client' and args.service.lower() != "ldap"): return "Invalid service type" if(args.type.lower() == 'authority' and args.service.lower() != "ldap"): return "Invalid service type" url = ""; if(args.type.lower() == 'server'): url = "https://" + host + \ "/redfish/v1/Managers/bmc/NetworkProtocol/HTTPS/Certificates" elif(args.type.lower() == 'client'): url = "https://" + host + \ "/redfish/v1/AccountService/LDAP/Certificates" elif(args.type.lower() == 'authority'): url = "https://" + host + \ "/redfish/v1/Managers/bmc/Truststore/Certificates" else: return "Unsupported certificate type" resp = session.post(url, headers=httpHeader, data=data, verify=False) else: url = "https://" + host + "/xyz/openbmc_project/certs/" + \ args.type.lower() + "/" + args.service.lower() resp = session.put(url, headers=httpHeader, data=data, verify=False) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if resp.status_code != 200: print(resp.text) return "Failed to update the certificate" else: print("Update complete.") def certificateDelete(host, args, session): """ Called by certificate management function to delete certificate Example: certificate delete server https certificate delete authority ldap certificate delete client ldap @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the certificate delete sub command @param session: the active session to use """ if redfishSupportPresent(host, session): return "Not supported, please use certificate replace instead"; httpHeader = {'Content-Type': 'multipart/form-data'} httpHeader.update(xAuthHeader) url = "https://" + host + "/xyz/openbmc_project/certs/" + args.type.lower() + "/" + args.service.lower() print("Deleting certificate url=" + url) try: resp = session.delete(url, headers=httpHeader) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if resp.status_code != 200: print(resp.text) return "Failed to delete the certificate" else: print("Delete complete.") def certificateReplace(host, args, session): """ Called by certificate management function. replace server/client/ authority certificates Example: certificate replace server https -f cert.pem certificate replace authority ldap -f Root-CA.pem certificate replace client ldap -f cert.pem @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the certificate replace sub command @param session: the active session to use """ cert = open(args.fileloc, 'r').read() try: if redfishSupportPresent(host, session): httpHeader = {'Content-Type': 'application/json'} httpHeader.update(xAuthHeader) url = ""; if(args.type.lower() == 'server' and args.service.lower() != "https"): return "Invalid service type" if(args.type.lower() == 'client' and args.service.lower() != "ldap"): return "Invalid service type" if(args.type.lower() == 'authority' and args.service.lower() != "ldap"): return "Invalid service type" if(args.type.lower() == 'server'): url = "/redfish/v1/Managers/bmc/NetworkProtocol/HTTPS/Certificates/1" elif(args.type.lower() == 'client'): url = "/redfish/v1/AccountService/LDAP/Certificates/1" elif(args.type.lower() == 'authority'): url = "/redfish/v1/Managers/bmc/Truststore/Certificates/1" replaceUrl = "https://" + host + \ "/redfish/v1/CertificateService/Actions/CertificateService.ReplaceCertificate" data ={"CertificateUri":{"@odata.id":url}, "CertificateType":"PEM", "CertificateString":cert} resp = session.post(replaceUrl, headers=httpHeader, json=data, verify=False) else: httpHeader = {'Content-Type': 'application/octet-stream'} httpHeader.update(xAuthHeader) url = "https://" + host + "/xyz/openbmc_project/certs/" + \ args.type.lower() + "/" + args.service.lower() resp = session.delete(url, headers=httpHeader) resp = session.put(url, headers=httpHeader, data=cert, verify=False) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if resp.status_code != 200: print(resp.text) return "Failed to replace the certificate" else: print("Replace complete.") return resp.text def certificateDisplay(host, args, session): """ Called by certificate management function. display server/client/ authority certificates Example: certificate display server certificate display authority certificate display client @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the certificate display sub command @param session: the active session to use """ if not redfishSupportPresent(host, session): return "Not supported"; httpHeader = {'Content-Type': 'application/octet-stream'} httpHeader.update(xAuthHeader) if(args.type.lower() == 'server'): url = "https://" + host + \ "/redfish/v1/Managers/bmc/NetworkProtocol/HTTPS/Certificates/1" elif(args.type.lower() == 'client'): url = "https://" + host + \ "/redfish/v1/AccountService/LDAP/Certificates/1" elif(args.type.lower() == 'authority'): url = "https://" + host + \ "/redfish/v1/Managers/bmc/Truststore/Certificates/1" try: resp = session.get(url, headers=httpHeader, verify=False) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if resp.status_code != 200: print(resp.text) return "Failed to display the certificate" else: print("Display complete.") return resp.text def certificateList(host, args, session): """ Called by certificate management function. Example: certificate list @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the certificate list sub command @param session: the active session to use """ if not redfishSupportPresent(host, session): return "Not supported"; httpHeader = {'Content-Type': 'application/octet-stream'} httpHeader.update(xAuthHeader) url = "https://" + host + \ "/redfish/v1/CertificateService/CertificateLocations/" try: resp = session.get(url, headers=httpHeader, verify=False) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if resp.status_code != 200: print(resp.text) return "Failed to list certificates" else: print("List certificates complete.") return resp.text def certificateGenerateCSR(host, args, session): """ Called by certificate management function. Generate CSR for server/ client certificates Example: certificate generatecsr server NJ w3.ibm.com US IBM IBM-UNIT NY EC prime256v1 cp abc.com an.com,bm.com gn sn un in certificate generatecsr client NJ w3.ibm.com US IBM IBM-UNIT NY EC prime256v1 cp abc.com an.com,bm.com gn sn un in @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the certificate replace sub command @param session: the active session to use """ if not redfishSupportPresent(host, session): return "Not supported"; httpHeader = {'Content-Type': 'application/octet-stream'} httpHeader.update(xAuthHeader) url = ""; if(args.type.lower() == 'server'): url = "/redfish/v1/Managers/bmc/NetworkProtocol/HTTPS/Certificates/" usage_list = ["ServerAuthentication"] elif(args.type.lower() == 'client'): url = "/redfish/v1/AccountService/LDAP/Certificates/" usage_list = ["ClientAuthentication"] elif(args.type.lower() == 'authority'): url = "/redfish/v1/Managers/bmc/Truststore/Certificates/" print("Generating CSR url=" + url) generateCSRUrl = "https://" + host + \ "/redfish/v1/CertificateService/Actions/CertificateService.GenerateCSR" try: alt_name_list = args.alternativeNames.split(",") data ={"CertificateCollection":{"@odata.id":url}, "CommonName":args.commonName, "City":args.city, "Country":args.country, "Organization":args.organization, "OrganizationalUnit":args.organizationUnit, "State":args.state, "KeyPairAlgorithm":args.keyPairAlgorithm, "KeyCurveId":args.keyCurveId, "AlternativeNames":alt_name_list, "ContactPerson":args.contactPerson, "Email":args.email, "GivenName":args.givenname, "Initials":args.initials, "KeyUsage":usage_list, "Surname":args.surname, "UnstructuredName":args.unstructuredname} resp = session.post(generateCSRUrl, headers=httpHeader, json=data, verify=False) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if resp.status_code != 200: print(resp.text) return "Failed to generate CSR" else: print("GenerateCSR complete.") return resp.text def enableLDAPConfig(host, args, session): """ Called by the ldap function. Configures LDAP. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ if(isRedfishSupport): return enableLDAP(host, args, session) else: return enableLegacyLDAP(host, args, session) def enableLegacyLDAP(host, args, session): """ Called by the ldap function. Configures LDAP on Lagecy systems. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url='https://'+host+'/xyz/openbmc_project/user/ldap/action/CreateConfig' scope = { 'sub' : 'xyz.openbmc_project.User.Ldap.Create.SearchScope.sub', 'one' : 'xyz.openbmc_project.User.Ldap.Create.SearchScope.one', 'base': 'xyz.openbmc_project.User.Ldap.Create.SearchScope.base' } serverType = { 'ActiveDirectory' : 'xyz.openbmc_project.User.Ldap.Create.Type.ActiveDirectory', 'OpenLDAP' : 'xyz.openbmc_project.User.Ldap.Create.Type.OpenLdap' } data = {"data": [args.uri, args.bindDN, args.baseDN, args.bindPassword, scope[args.scope], serverType[args.serverType]]} try: res = session.post(url, headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) return res.text def enableLDAP(host, args, session): """ Called by the ldap function. Configures LDAP for systems with latest user-manager design changes @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ scope = { 'sub' : 'xyz.openbmc_project.User.Ldap.Config.SearchScope.sub', 'one' : 'xyz.openbmc_project.User.Ldap.Config.SearchScope.one', 'base': 'xyz.openbmc_project.User.Ldap.Config.SearchScope.base' } serverType = { 'ActiveDirectory' : 'xyz.openbmc_project.User.Ldap.Config.Type.ActiveDirectory', 'OpenLDAP' : 'xyz.openbmc_project.User.Ldap.Config.Type.OpenLdap' } url = "https://"+host+"/xyz/openbmc_project/user/ldap/" serverTypeEnabled = getLDAPTypeEnabled(host,session) serverTypeToBeEnabled = args.serverType #If the given LDAP type is already enabled, then return if (serverTypeToBeEnabled == serverTypeEnabled): return("Server type " + serverTypeToBeEnabled + " is already enabled...") try: # Copy the role map from the currently enabled LDAP server type # to the newly enabled server type # Disable the currently enabled LDAP server type. Unless # it is disabled, we cannot enable a new LDAP server type if (serverTypeEnabled is not None): if (serverTypeToBeEnabled != serverTypeEnabled): res = syncRoleMap(host,args,session,serverTypeEnabled,serverTypeToBeEnabled) data = "{\"data\": 0 }" res = session.put(url + serverTypeMap[serverTypeEnabled] + '/attr/Enabled', headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) data = {"data": args.baseDN} res = session.put(url + serverTypeMap[serverTypeToBeEnabled] + '/attr/LDAPBaseDN', headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) if (res.status_code != requests.codes.ok): print("Updates to the property LDAPBaseDN failed...") return(res.text) data = {"data": args.bindDN} res = session.put(url + serverTypeMap[serverTypeToBeEnabled] + '/attr/LDAPBindDN', headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) if (res.status_code != requests.codes.ok): print("Updates to the property LDAPBindDN failed...") return(res.text) data = {"data": args.bindPassword} res = session.put(url + serverTypeMap[serverTypeToBeEnabled] + '/attr/LDAPBindDNPassword', headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) if (res.status_code != requests.codes.ok): print("Updates to the property LDAPBindDNPassword failed...") return(res.text) data = {"data": scope[args.scope]} res = session.put(url + serverTypeMap[serverTypeToBeEnabled] + '/attr/LDAPSearchScope', headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) if (res.status_code != requests.codes.ok): print("Updates to the property LDAPSearchScope failed...") return(res.text) data = {"data": args.uri} res = session.put(url + serverTypeMap[serverTypeToBeEnabled] + '/attr/LDAPServerURI', headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) if (res.status_code != requests.codes.ok): print("Updates to the property LDAPServerURI failed...") return(res.text) data = {"data": args.groupAttrName} res = session.put(url + serverTypeMap[serverTypeToBeEnabled] + '/attr/GroupNameAttribute', headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) if (res.status_code != requests.codes.ok): print("Updates to the property GroupNameAttribute failed...") return(res.text) data = {"data": args.userAttrName} res = session.put(url + serverTypeMap[serverTypeToBeEnabled] + '/attr/UserNameAttribute', headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) if (res.status_code != requests.codes.ok): print("Updates to the property UserNameAttribute failed...") return(res.text) #After updating the properties, enable the new server type data = "{\"data\": 1 }" res = session.put(url + serverTypeMap[serverTypeToBeEnabled] + '/attr/Enabled', headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) return res.text def disableLDAP(host, args, session): """ Called by the ldap function. Deletes the LDAP Configuration. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ try: if (isRedfishSupport) : url = "https://"+host+"/xyz/openbmc_project/user/ldap/" serverTypeEnabled = getLDAPTypeEnabled(host,session) if (serverTypeEnabled is not None): #To keep the role map in sync, #If the server type being disabled has role map, then # - copy the role map to the other server type(s) for serverType in serverTypeMap.keys(): if (serverType != serverTypeEnabled): res = syncRoleMap(host,args,session,serverTypeEnabled,serverType) #Disable the currently enabled LDAP server type data = "{\"data\": 0 }" res = session.put(url + serverTypeMap[serverTypeEnabled] + '/attr/Enabled', headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) else: return("LDAP server has not been enabled...") else : url='https://'+host+'/xyz/openbmc_project/user/ldap/config/action/delete' data = {"data": []} res = session.post(url, headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) return res.text def enableDHCP(host, args, session): """ Called by the network function. Enables DHCP. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://"+host+"/xyz/openbmc_project/network/"+args.Interface+\ "/attr/DHCPEnabled" data = "{\"data\": 1 }" try: res = session.put(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if res.status_code == 403: return "The specified Interface"+"("+args.Interface+")"+\ " doesn't exist" return res.text def disableDHCP(host, args, session): """ Called by the network function. Disables DHCP. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://"+host+"/xyz/openbmc_project/network/"+args.Interface+\ "/attr/DHCPEnabled" data = "{\"data\": 0 }" try: res = session.put(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if res.status_code == 403: return "The specified Interface"+"("+args.Interface+")"+\ " doesn't exist" return res.text def getHostname(host, args, session): """ Called by the network function. Prints out the Hostname. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://"+host+"/xyz/openbmc_project/network/config/attr/HostName" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) return res.text def setHostname(host, args, session): """ Called by the network function. Sets the Hostname. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://"+host+"/xyz/openbmc_project/network/config/attr/HostName" data = {"data": args.HostName} try: res = session.put(url, headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) return res.text def getDomainName(host, args, session): """ Called by the network function. Prints out the DomainName. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://"+host+"/xyz/openbmc_project/network/"+args.Interface+\ "/attr/DomainName" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if res.status_code == 404: return "The DomainName is not configured on Interface"+"("+args.Interface+")" return res.text def setDomainName(host, args, session): """ Called by the network function. Sets the DomainName. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://"+host+"/xyz/openbmc_project/network/"+args.Interface+\ "/attr/DomainName" data = {"data": args.DomainName.split(",")} try: res = session.put(url, headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if res.status_code == 403: return "Failed to set Domain Name" return res.text def getMACAddress(host, args, session): """ Called by the network function. Prints out the MACAddress. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://"+host+"/xyz/openbmc_project/network/"+args.Interface+\ "/attr/MACAddress" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if res.status_code == 404: return "Failed to get MACAddress" return res.text def setMACAddress(host, args, session): """ Called by the network function. Sets the MACAddress. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://"+host+"/xyz/openbmc_project/network/"+args.Interface+\ "/attr/MACAddress" data = {"data": args.MACAddress} try: res = session.put(url, headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if res.status_code == 403: return "Failed to set MACAddress" return res.text def getDefaultGateway(host, args, session): """ Called by the network function. Prints out the DefaultGateway. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://"+host+"/xyz/openbmc_project/network/config/attr/DefaultGateway" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if res.status_code == 404: return "Failed to get Default Gateway info" return res.text def setDefaultGateway(host, args, session): """ Called by the network function. Sets the DefaultGateway. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://"+host+"/xyz/openbmc_project/network/config/attr/DefaultGateway" data = {"data": args.DefaultGW} try: res = session.put(url, headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if res.status_code == 403: return "Failed to set Default Gateway" return res.text def viewNWConfig(host, args, session): """ Called by the ldap function. Prints out network configured properties @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use @return returns LDAP's configured properties. """ url = "https://"+host+"/xyz/openbmc_project/network/enumerate" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) except(requests.exceptions.RequestException) as err: return connectionErrHandler(args.json, "RequestException", err) if res.status_code == 404: return "LDAP server config has not been created" return res.text def getDNS(host, args, session): """ Called by the network function. Prints out DNS servers on the interface @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://" + host + "/xyz/openbmc_project/network/" + args.Interface\ + "/attr/Nameservers" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if res.status_code == 404: return "The NameServer is not configured on Interface"+"("+args.Interface+")" return res.text def setDNS(host, args, session): """ Called by the network function. Sets DNS servers on the interface. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://" + host + "/xyz/openbmc_project/network/" + args.Interface\ + "/attr/Nameservers" data = {"data": args.DNSServers.split(",")} try: res = session.put(url, headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if res.status_code == 403: return "Failed to set DNS" return res.text def getNTP(host, args, session): """ Called by the network function. Prints out NTP servers on the interface @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://" + host + "/xyz/openbmc_project/network/" + args.Interface\ + "/attr/NTPServers" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if res.status_code == 404: return "The NTPServer is not configured on Interface"+"("+args.Interface+")" return res.text def setNTP(host, args, session): """ Called by the network function. Sets NTP servers on the interface. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://" + host + "/xyz/openbmc_project/network/" + args.Interface\ + "/attr/NTPServers" data = {"data": args.NTPServers.split(",")} try: res = session.put(url, headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if res.status_code == 403: return "Failed to set NTP" return res.text def addIP(host, args, session): """ Called by the network function. Configures IP address on given interface @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://" + host + "/xyz/openbmc_project/network/" + args.Interface\ + "/action/IP" protocol = { 'ipv4': 'xyz.openbmc_project.Network.IP.Protocol.IPv4', 'ipv6': 'xyz.openbmc_project.Network.IP.Protocol.IPv6' } data = {"data": [protocol[args.type], args.address, int(args.prefixLength), args.gateway]} try: res = session.post(url, headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if res.status_code == 404: return "The specified Interface" + "(" + args.Interface + ")" +\ " doesn't exist" return res.text def getIP(host, args, session): """ Called by the network function. Prints out IP address of given interface @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://" + host+"/xyz/openbmc_project/network/" + args.Interface +\ "/enumerate" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if res.status_code == 404: return "The specified Interface" + "(" + args.Interface + ")" +\ " doesn't exist" return res.text def deleteIP(host, args, session): """ Called by the network function. Deletes the IP address from given Interface @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url = "https://"+host+"/xyz/openbmc_project/network/" + args.Interface+\ "/enumerate" data = {"data": []} try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if res.status_code == 404: return "The specified Interface" + "(" + args.Interface + ")" +\ " doesn't exist" objDict = json.loads(res.text) if not objDict['data']: return "No object found for given address on given Interface" for obj in objDict['data']: try: if args.address in objDict['data'][obj]['Address']: url = "https://"+host+obj+"/action/Delete" try: res = session.post(url, headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) return res.text else: continue except KeyError: continue return "No object found for address " + args.address + \ " on Interface(" + args.Interface + ")" def addVLAN(host, args, session): """ Called by the network function. Creates VLAN on given interface. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://" + host+"/xyz/openbmc_project/network/action/VLAN" data = {"data": [args.Interface,int(args.Identifier)]} try: res = session.post(url, headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if res.status_code == 400: return "Adding VLAN to interface" + "(" + args.Interface + ")" +\ " failed" return res.text def deleteVLAN(host, args, session): """ Called by the network function. Creates VLAN on given interface. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://" + host+"/xyz/openbmc_project/network/"+args.Interface+"/action/Delete" data = {"data": []} try: res = session.post(url, headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if res.status_code == 404: return "The specified VLAN"+"("+args.Interface+")" +" doesn't exist" return res.text def viewDHCPConfig(host, args, session): """ Called by the network function. Shows DHCP configured Properties. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url="https://"+host+"/xyz/openbmc_project/network/config/dhcp" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) return res.text def configureDHCP(host, args, session): """ Called by the network function. Configures/updates DHCP Properties. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ try: url="https://"+host+"/xyz/openbmc_project/network/config/dhcp" if(args.DNSEnabled == True): data = '{"data": 1}' else: data = '{"data": 0}' res = session.put(url + '/attr/DNSEnabled', headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) if(args.HostNameEnabled == True): data = '{"data": 1}' else: data = '{"data": 0}' res = session.put(url + '/attr/HostNameEnabled', headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) if(args.NTPEnabled == True): data = '{"data": 1}' else: data = '{"data": 0}' res = session.put(url + '/attr/NTPEnabled', headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) if(args.SendHostNameEnabled == True): data = '{"data": 1}' else: data = '{"data": 0}' res = session.put(url + '/attr/SendHostNameEnabled', headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) return res.text def nwReset(host, args, session): """ Called by the network function. Resets networks setting to factory defaults. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use """ url = "https://"+host+"/xyz/openbmc_project/network/action/Reset" data = '{"data":[] }' try: res = session.post(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) return res.text def getLDAPTypeEnabled(host,session): """ Called by LDAP related functions to find the LDAP server type that has been enabled. Returns None if LDAP has not been configured. @param host: string, the hostname or IP address of the bmc @param session: the active session to use """ enabled = False url = 'https://'+host+'/xyz/openbmc_project/user/ldap/' for key,value in serverTypeMap.items(): data = {"data": []} try: res = session.get(url + value + '/attr/Enabled', headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): print(connectionErrHandler(args.json, "Timeout", None)) return except(requests.exceptions.ConnectionError) as err: print(connectionErrHandler(args.json, "ConnectionError", err)) return enabled = res.json()['data'] if (enabled): return key def syncRoleMap(host,args,session,fromServerType,toServerType): """ Called by LDAP related functions to sync the role maps Returns False if LDAP has not been configured. @param host: string, the hostname or IP address of the bmc @param session: the active session to use @param fromServerType : Server type whose role map has to be copied @param toServerType : Server type to which role map has to be copied """ url = "https://"+host+"/xyz/openbmc_project/user/ldap/" try: #Note: If the fromServerType has no role map, then #the toServerType will not have any role map. #delete the privilege mapping from the toServerType and #then copy the privilege mapping from fromServerType to #toServerType. args.serverType = toServerType res = deleteAllPrivilegeMapping(host, args, session) data = {"data": []} res = session.get(url + serverTypeMap[fromServerType] + '/role_map/enumerate', headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) #Previously enabled server type has no role map if (res.status_code != requests.codes.ok): #fromServerType has no role map; So, no need to copy #role map to toServerType. return objDict = json.loads(res.text) dataDict = objDict['data'] for key,value in dataDict.items(): data = {"data": [value["GroupName"], value["Privilege"]]} res = session.post(url + serverTypeMap[toServerType] + '/action/Create', headers=jsonHeader, json = data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) return res.text def createPrivilegeMapping(host, args, session): """ Called by the ldap function. Creates the group and the privilege mapping. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ try: if (isRedfishSupport): url = 'https://'+host+'/xyz/openbmc_project/user/ldap/' #To maintain the interface compatibility between op930 and op940, the server type has been made #optional. If the server type is not specified, then create the role-mapper for the currently #enabled server type. serverType = args.serverType if (serverType is None): serverType = getLDAPTypeEnabled(host,session) if (serverType is None): return("LDAP server has not been enabled. Please specify LDAP serverType to proceed further...") data = {"data": [args.groupName,args.privilege]} res = session.post(url + serverTypeMap[serverType] + '/action/Create', headers=jsonHeader, json = data, verify=False, timeout=baseTimeout) else: url = 'https://'+host+'/xyz/openbmc_project/user/ldap/action/Create' data = {"data": [args.groupName,args.privilege]} res = session.post(url, headers=jsonHeader, json = data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) return res.text def listPrivilegeMapping(host, args, session): """ Called by the ldap function. Lists the group and the privilege mapping. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ if (isRedfishSupport): serverType = args.serverType if (serverType is None): serverType = getLDAPTypeEnabled(host,session) if (serverType is None): return("LDAP has not been enabled. Please specify LDAP serverType to proceed further...") url = 'https://'+host+'/xyz/openbmc_project/user/ldap/'+serverTypeMap[serverType]+'/role_map/enumerate' else: url = 'https://'+host+'/xyz/openbmc_project/user/ldap/enumerate' data = {"data": []} try: res = session.get(url, headers=jsonHeader, json = data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) return res.text def deletePrivilegeMapping(host, args, session): """ Called by the ldap function. Deletes the mapping associated with the group. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ ldapNameSpaceObjects = listPrivilegeMapping(host, args, session) ldapNameSpaceObjects = json.loads(ldapNameSpaceObjects)["data"] path = '' data = {"data": []} if (isRedfishSupport): if (args.serverType is None): serverType = getLDAPTypeEnabled(host,session) if (serverType is None): return("LDAP has not been enabled. Please specify LDAP serverType to proceed further...") # search for the object having the mapping for the given group for key,value in ldapNameSpaceObjects.items(): if value['GroupName'] == args.groupName: path = key break if path == '': return "No privilege mapping found for this group." # delete the object url = 'https://'+host+path+'/action/Delete' else: # not interested in the config objet ldapNameSpaceObjects.pop('/xyz/openbmc_project/user/ldap/config', None) # search for the object having the mapping for the given group for key,value in ldapNameSpaceObjects.items(): if value['GroupName'] == args.groupName: path = key break if path == '': return "No privilege mapping found for this group." # delete the object url = 'https://'+host+path+'/action/delete' try: res = session.post(url, headers=jsonHeader, json = data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) return res.text def deleteAllPrivilegeMapping(host, args, session): """ Called by the ldap function. Deletes all the privilege mapping and group defined. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ ldapNameSpaceObjects = listPrivilegeMapping(host, args, session) ldapNameSpaceObjects = json.loads(ldapNameSpaceObjects)["data"] path = '' data = {"data": []} if (isRedfishSupport): if (args.serverType is None): serverType = getLDAPTypeEnabled(host,session) if (serverType is None): return("LDAP has not been enabled. Please specify LDAP serverType to proceed further...") else: # Remove the config object. ldapNameSpaceObjects.pop('/xyz/openbmc_project/user/ldap/config', None) try: # search for GroupName property and delete if it is available. for path in ldapNameSpaceObjects.keys(): # delete the object url = 'https://'+host+path+'/action/Delete' res = session.post(url, headers=jsonHeader, json = data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) return res.text def viewLDAPConfig(host, args, session): """ Called by the ldap function. Prints out active LDAP configuration properties @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the ldap subcommand args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @param session: the active session to use @return returns LDAP's configured properties. """ try: if (isRedfishSupport): url = "https://"+host+"/xyz/openbmc_project/user/ldap/" serverTypeEnabled = getLDAPTypeEnabled(host,session) if (serverTypeEnabled is not None): data = {"data": []} res = session.get(url + serverTypeMap[serverTypeEnabled], headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) else: return("LDAP server has not been enabled...") else : url = "https://"+host+"/xyz/openbmc_project/user/ldap/config" res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) if res.status_code == 404: return "LDAP server config has not been created" return res.text def str2bool(v): if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') def localUsers(host, args, session): """ Enables and disables local BMC users. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the logging sub command @param session: the active session to use """ url="https://{hostname}/xyz/openbmc_project/user/enumerate".format(hostname=host) try: res = session.get(url, headers=jsonHeader, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) usersDict = json.loads(res.text) if not usersDict['data']: return "No users found" output = "" for user in usersDict['data']: # Skip LDAP and another non-local users if 'UserEnabled' not in usersDict['data'][user]: continue name = user.split('/')[-1] url = "https://{hostname}{user}/attr/UserEnabled".format(hostname=host, user=user) if args.local_users == "queryenabled": try: res = session.get(url, headers=jsonHeader,verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) result = json.loads(res.text) output += ("User: {name} Enabled: {result}\n").format(name=name, result=result['data']) elif args.local_users in ["enableall", "disableall"]: action = "" if args.local_users == "enableall": data = '{"data": true}' action = "Enabling" else: data = '{"data": false}' action = "Disabling" output += "{action} {name}\n".format(action=action, name=name) try: resp = session.put(url, headers=jsonHeader, data=data, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return connectionErrHandler(args.json, "Timeout", None) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) else: return "Invalid local users argument" return output def setPassword(host, args, session): """ Set local user password @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the logging sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @return: Session object """ try: if(isRedfishSupport): url = "https://" + host + "/redfish/v1/AccountService/Accounts/"+ \ args.user data = {"Password":args.password} res = session.patch(url, headers=jsonHeader, json=data, verify=False, timeout=baseTimeout) else: url = "https://" + host + "/xyz/openbmc_project/user/" + args.user + \ "/action/SetPassword" res = session.post(url, headers=jsonHeader, json={"data": [args.password]}, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) except(requests.exceptions.RequestException) as err: return connectionErrHandler(args.json, "RequestException", err) return res.status_code def getThermalZones(host, args, session): """ Get the available thermal control zones @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used to get the thermal control zones @param session: the active session to use @return: Session object """ url = "https://" + host + "/xyz/openbmc_project/control/thermal/enumerate" try: res = session.get(url, headers=jsonHeader, verify=False, timeout=30) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) except(requests.exceptions.RequestException) as err: return connectionErrHandler(args.json, "RequestException", err) if (res.status_code == 404): return "No thermal control zones found" zonesDict = json.loads(res.text) if not zonesDict['data']: return "No thermal control zones found" for zone in zonesDict['data']: z = ",".join(str(zone.split('/')[-1]) for zone in zonesDict['data']) return "Zones: [ " + z + " ]" def getThermalMode(host, args, session): """ Get thermal control mode @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used to get the thermal control mode @param session: the active session to use @param args.zone: the zone to get the mode on @return: Session object """ url = "https://" + host + "/xyz/openbmc_project/control/thermal/" + \ args.zone try: res = session.get(url, headers=jsonHeader, verify=False, timeout=30) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) except(requests.exceptions.RequestException) as err: return connectionErrHandler(args.json, "RequestException", err) if (res.status_code == 404): return "Thermal control zone(" + args.zone + ") not found" propsDict = json.loads(res.text) if not propsDict['data']: return "No thermal control properties found on zone(" + args.zone + ")" curMode = "Current" supModes = "Supported" result = "\n" for prop in propsDict['data']: if (prop.casefold() == curMode.casefold()): result += curMode + " Mode: " + propsDict['data'][curMode] + "\n" if (prop.casefold() == supModes.casefold()): s = ", ".join(str(sup) for sup in propsDict['data'][supModes]) result += supModes + " Modes: [ " + s + " ]\n" return result def setThermalMode(host, args, session): """ Set thermal control mode @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used for setting the thermal control mode @param session: the active session to use @param args.zone: the zone to set the mode on @param args.mode: the mode to enable @return: Session object """ url = "https://" + host + "/xyz/openbmc_project/control/thermal/" + \ args.zone + "/attr/Current" # Check args.mode against supported modes using `getThermalMode` output modes = getThermalMode(host, args, session) modes = os.linesep.join([m for m in modes.splitlines() if m]) modes = modes.replace("\n", ";").strip() modesDict = dict(m.split(': ') for m in modes.split(';')) sModes = ''.join(s for s in modesDict['Supported Modes'] if s not in '[ ]') if args.mode.casefold() not in \ (m.casefold() for m in sModes.split(',')) or not args.mode: result = ("Unsupported mode('" + args.mode + "') given, " + "select a supported mode: \n" + getThermalMode(host, args, session)) return result data = '{"data":"' + args.mode + '"}' try: res = session.get(url, headers=jsonHeader, verify=False, timeout=30) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) except(requests.exceptions.RequestException) as err: return connectionErrHandler(args.json, "RequestException", err) if (data and res.status_code != 404): try: res = session.put(url, headers=jsonHeader, data=data, verify=False, timeout=30) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) except(requests.exceptions.ConnectionError) as err: return connectionErrHandler(args.json, "ConnectionError", err) except(requests.exceptions.RequestException) as err: return connectionErrHandler(args.json, "RequestException", err) if res.status_code == 403: return "The specified thermal control zone(" + args.zone + ")" + \ " does not exist" return res.text else: return "Setting thermal control mode(" + args.mode + ")" + \ " not supported or operation not available" def createCommandParser(): """ creates the parser for the command line along with help for each command and subcommand @return: returns the parser for the command line """ parser = argparse.ArgumentParser(description='Process arguments') parser.add_argument("-H", "--host", help='A hostname or IP for the BMC') parser.add_argument("-U", "--user", help='The username to login with') group = parser.add_mutually_exclusive_group() group.add_argument("-A", "--askpw", action='store_true', help='prompt for password') group.add_argument("-P", "--PW", help='Provide the password in-line') group.add_argument("-E", "--PWenvvar", action='store_true', help='Get password from envvar OPENBMCTOOL_PASSWORD') parser.add_argument('-j', '--json', action='store_true', help='output json data only') parser.add_argument('-t', '--policyTableLoc', help='The location of the policy table to parse alerts') parser.add_argument('-c', '--CerFormat', action='store_true', help=argparse.SUPPRESS) parser.add_argument('-T', '--procTime', action='store_true', help= argparse.SUPPRESS) parser.add_argument('-V', '--version', action='store_true', help='Display the version number of the openbmctool') subparsers = parser.add_subparsers(title='subcommands', description='valid subcommands',help="sub-command help", dest='command') #fru command parser_inv = subparsers.add_parser("fru", help='Work with platform inventory') inv_subparser = parser_inv.add_subparsers(title='subcommands', description='valid inventory actions', help="valid inventory actions", dest='command') inv_subparser.required = True #fru print inv_print = inv_subparser.add_parser("print", help="prints out a list of all FRUs") inv_print.set_defaults(func=fruPrint) #fru list [0....n] inv_list = inv_subparser.add_parser("list", help="print out details on selected FRUs. Specifying no items will list the entire inventory") inv_list.add_argument('items', nargs='?', help="print out details on selected FRUs. Specifying no items will list the entire inventory") inv_list.set_defaults(func=fruList) #fru status inv_status = inv_subparser.add_parser("status", help="prints out the status of all FRUs") inv_status.add_argument('-v', '--verbose', action='store_true', help='Verbose output') inv_status.set_defaults(func=fruStatus) #sensors command parser_sens = subparsers.add_parser("sensors", help="Work with platform sensors") sens_subparser=parser_sens.add_subparsers(title='subcommands', description='valid sensor actions', help='valid sensor actions', dest='command') sens_subparser.required = True #sensor print sens_print= sens_subparser.add_parser('print', help="prints out a list of all Sensors.") sens_print.set_defaults(func=sensor) #sensor list[0...n] sens_list=sens_subparser.add_parser("list", help="Lists all Sensors in the platform. Specify a sensor for full details. ") sens_list.add_argument("sensNum", nargs='?', help="The Sensor number to get full details on" ) sens_list.set_defaults(func=sensor) #thermal control commands parser_therm = subparsers.add_parser("thermal", help="Work with thermal control parameters") therm_subparser=parser_therm.add_subparsers(title='subcommands', description='Thermal control actions to work with', help='Valid thermal control actions to work with', dest='command') #thermal control zones parser_thermZones = therm_subparser.add_parser("zones", help="Get a list of available thermal control zones") parser_thermZones.set_defaults(func=getThermalZones) #thermal control modes parser_thermMode = therm_subparser.add_parser("modes", help="Work with thermal control modes") thermMode_sub = parser_thermMode.add_subparsers(title='subactions', description='Work with thermal control modes', help="Work with thermal control modes") #get thermal control mode parser_getThermMode = thermMode_sub.add_parser("get", help="Get current and supported thermal control modes") parser_getThermMode.add_argument('-z', '--zone', required=True, help='Thermal zone to work with') parser_getThermMode.set_defaults(func=getThermalMode) #set thermal control mode parser_setThermMode = thermMode_sub.add_parser("set", help="Set the thermal control mode") parser_setThermMode.add_argument('-z', '--zone', required=True, help='Thermal zone to work with') parser_setThermMode.add_argument('-m', '--mode', required=True, help='The supported thermal control mode') parser_setThermMode.set_defaults(func=setThermalMode) #sel command parser_sel = subparsers.add_parser("sel", help="Work with platform alerts") sel_subparser = parser_sel.add_subparsers(title='subcommands', description='valid SEL actions', help = 'valid SEL actions', dest='command') sel_subparser.required = True #sel print sel_print = sel_subparser.add_parser("print", help="prints out a list of all sels in a condensed list") sel_print.add_argument('-d', '--devdebug', action='store_true', help=argparse.SUPPRESS) sel_print.add_argument('-v', '--verbose', action='store_true', help="Changes the output to being very verbose") sel_print.add_argument('-f', '--fileloc', help='Parse a file instead of the BMC output') sel_print.set_defaults(func=selPrint) #sel list sel_list = sel_subparser.add_parser("list", help="Lists all SELs in the platform. Specifying a specific number will pull all the details for that individual SEL") sel_list.add_argument("selNum", nargs='?', type=int, help="The SEL entry to get details on") sel_list.set_defaults(func=selList) sel_get = sel_subparser.add_parser("get", help="Gets the verbose details of a specified SEL entry") sel_get.add_argument('selNum', type=int, help="the number of the SEL entry to get") sel_get.set_defaults(func=selList) sel_clear = sel_subparser.add_parser("clear", help="Clears all entries from the SEL") sel_clear.set_defaults(func=selClear) sel_setResolved = sel_subparser.add_parser("resolve", help="Sets the sel entry to resolved") sel_setResolved.add_argument('-n', '--selNum', type=int, help="the number of the SEL entry to resolve") sel_ResolveAll_sub = sel_setResolved.add_subparsers(title='subcommands', description='valid subcommands',help="sub-command help", dest='command') sel_ResolveAll = sel_ResolveAll_sub.add_parser('all', help='Resolve all SEL entries') sel_ResolveAll.set_defaults(func=selResolveAll) sel_setResolved.set_defaults(func=selSetResolved) parser_chassis = subparsers.add_parser("chassis", help="Work with chassis power and status") chas_sub = parser_chassis.add_subparsers(title='subcommands', description='valid subcommands',help="sub-command help", dest='command') parser_chassis.add_argument('status', action='store_true', help='Returns the current status of the platform') parser_chassis.set_defaults(func=chassis) parser_chasPower = chas_sub.add_parser("power", help="Turn the chassis on or off, check the power state") parser_chasPower.add_argument('powcmd', choices=['on','softoff', 'hardoff', 'status'], help='The value for the power command. on, off, or status') parser_chasPower.set_defaults(func=chassisPower) #control the chassis identify led parser_chasIdent = chas_sub.add_parser("identify", help="Control the chassis identify led") parser_chasIdent.add_argument('identcmd', choices=['on', 'off', 'status'], help='The control option for the led: on, off, blink, status') parser_chasIdent.set_defaults(func=chassisIdent) #collect service data parser_servData = subparsers.add_parser("collect_service_data", help="Collect all bmc data needed for service") parser_servData.add_argument('-d', '--devdebug', action='store_true', help=argparse.SUPPRESS) parser_servData.set_defaults(func=collectServiceData) #system quick health check parser_healthChk = subparsers.add_parser("health_check", help="Work with platform sensors") parser_healthChk.set_defaults(func=healthCheck) #tasks parser_tasks = subparsers.add_parser("task", help="Work with tasks") tasks_sub = parser_tasks.add_subparsers(title='subcommands', description='valid subcommands',help="sub-command help", dest='command') tasks_sub.required = True get_Task = tasks_sub.add_parser('get', help="Get on Task Monitor URL") get_Task.add_argument("-u", "--taskURI", help="Task Monitor URI") get_Task.set_defaults(func=getTask) #work with dumps parser_bmcdump = subparsers.add_parser("dump", help="Work with dumps") parser_bmcdump.add_argument("-t", "--dumpType", default='bmc', choices=['bmc','SystemDump'],help="Type of dump") bmcDump_sub = parser_bmcdump.add_subparsers(title='subcommands', description='valid subcommands',help="sub-command help", dest='command') bmcDump_sub.required = True dump_Create = bmcDump_sub.add_parser('create', help="Create a dump of given type") dump_Create.set_defaults(func=dumpCreate) dump_list = bmcDump_sub.add_parser('list', help="list all dumps") dump_list.set_defaults(func=dumpList) parserdumpdelete = bmcDump_sub.add_parser('delete', help="Delete dump") parserdumpdelete.add_argument("-n", "--dumpNum", nargs='*', type=int, help="The Dump entry to delete") parserdumpdelete.set_defaults(func=dumpDelete) bmcDumpDelsub = parserdumpdelete.add_subparsers(title='subcommands', description='valid subcommands',help="sub-command help", dest='command') deleteAllDumps = bmcDumpDelsub.add_parser('all', help='Delete all dumps') deleteAllDumps.set_defaults(func=dumpDeleteAll) parser_dumpretrieve = bmcDump_sub.add_parser('retrieve', help='Retrieve a dump file') parser_dumpretrieve.add_argument("-n,", "--dumpNum", help="The Dump entry to retrieve") parser_dumpretrieve.add_argument("-s", "--dumpSaveLoc", help="The location to save the bmc dump file or file path for system dump") parser_dumpretrieve.set_defaults(func=dumpRetrieve) #bmc command for reseting the bmc parser_bmc = subparsers.add_parser('bmc', help="Work with the bmc") bmc_sub = parser_bmc.add_subparsers(title='subcommands', description='valid subcommands',help="sub-command help", dest='command') parser_BMCReset = bmc_sub.add_parser('reset', help='Reset the bmc' ) parser_BMCReset.add_argument('type', choices=['warm','cold'], help="Warm: Reboot the BMC, Cold: CLEAR config and reboot bmc") parser_bmc.add_argument('info', action='store_true', help="Displays information about the BMC hardware, including device revision, firmware revision, IPMI version supported, manufacturer ID, and information on additional device support.") parser_bmc.set_defaults(func=bmc) #add alias to the bmc command parser_mc = subparsers.add_parser('mc', help="Work with the management controller") mc_sub = parser_mc.add_subparsers(title='subcommands', description='valid subcommands',help="sub-command help", dest='command') parser_MCReset = mc_sub.add_parser('reset', help='Reset the bmc' ) parser_MCReset.add_argument('type', choices=['warm','cold'], help="Reboot the BMC") #parser_MCReset.add_argument('cold', action='store_true', help="Reboot the BMC and CLEAR the configuration") parser_mc.add_argument('info', action='store_true', help="Displays information about the BMC hardware, including device revision, firmware revision, IPMI version supported, manufacturer ID, and information on additional device support.") parser_MCReset.set_defaults(func=bmcReset) parser_mc.set_defaults(func=bmc) #gard clear parser_gc = subparsers.add_parser("gardclear", help="Used to clear gard records") parser_gc.set_defaults(func=gardClear) #firmware_flash parser_fw = subparsers.add_parser("firmware", help="Work with the system firmware") fwflash_subproc = parser_fw.add_subparsers(title='subcommands', description='valid firmware commands', help='sub-command help', dest='command') fwflash_subproc.required = True fwflash = fwflash_subproc.add_parser('flash', help="Flash the system firmware") fwflash.add_argument('type', choices=['bmc', 'pnor'], help="image type to flash") fwflash.add_argument('-f', '--fileloc', required=True, help="The absolute path to the firmware image") fwflash.set_defaults(func=fwFlash) fwActivate = fwflash_subproc.add_parser('activate', help="Activate existing image on the bmc") fwActivate.add_argument('imageID', help="The image ID to activate from the firmware list. Ex: 63c95399") fwActivate.set_defaults(func=activateFWImage) fwActivateStatus = fwflash_subproc.add_parser('activation_status', help="Check Status of activations") fwActivateStatus.set_defaults(func=activateStatus) fwList = fwflash_subproc.add_parser('list', help="List all of the installed firmware") fwList.add_argument('-v', '--verbose', action='store_true', help='Verbose output') fwList.set_defaults(func=firmwareList) fwprint = fwflash_subproc.add_parser('print', help="List all of the installed firmware") fwprint.add_argument('-v', '--verbose', action='store_true', help='Verbose output') fwprint.set_defaults(func=firmwareList) fwDelete = fwflash_subproc.add_parser('delete', help="Delete an existing firmware version") fwDelete.add_argument('versionID', help="The version ID to delete from the firmware list. Ex: 63c95399") fwDelete.set_defaults(func=deleteFWVersion) fwDeleteAll = fwflash_subproc.add_parser('deleteAll', help="Delete ALL firmware versions") fwDeleteAll.set_defaults(func=deleteFWAll) #logging parser_logging = subparsers.add_parser("logging", help="logging controls") logging_sub = parser_logging.add_subparsers(title='subcommands', description='valid subcommands',help="sub-command help", dest='command') #turn rest api logging on/off parser_rest_logging = logging_sub.add_parser("rest_api", help="turn rest api logging on/off") parser_rest_logging.add_argument('rest_logging', choices=['on', 'off'], help='The control option for rest logging: on, off') parser_rest_logging.set_defaults(func=restLogging) #remote logging parser_remote_logging = logging_sub.add_parser("remote_logging", help="Remote logging (rsyslog) commands") parser_remote_logging.add_argument('remote_logging', choices=['view', 'disable'], help='Remote logging (rsyslog) commands') parser_remote_logging.set_defaults(func=remoteLogging) #configure remote logging parser_remote_logging_config = logging_sub.add_parser("remote_logging_config", help="Configure remote logging (rsyslog)") parser_remote_logging_config.add_argument("-a", "--address", required=True, help="Set IP address of rsyslog server") parser_remote_logging_config.add_argument("-p", "--port", required=True, type=int, help="Set Port of rsyslog server") parser_remote_logging_config.set_defaults(func=remoteLoggingConfig) #certificate management parser_cert = subparsers.add_parser("certificate", help="Certificate management") certMgmt_subproc = parser_cert.add_subparsers(title='subcommands', description='valid certificate commands', help='sub-command help', dest='command') certUpdate = certMgmt_subproc.add_parser('update', help="Update the certificate") certUpdate.add_argument('type', choices=['server', 'client', 'authority'], help="certificate type to update") certUpdate.add_argument('service', choices=['https', 'ldap'], help="Service to update") certUpdate.add_argument('-f', '--fileloc', required=True, help="The absolute path to the certificate file") certUpdate.set_defaults(func=certificateUpdate) certDelete = certMgmt_subproc.add_parser('delete', help="Delete the certificate") certDelete.add_argument('type', choices=['server', 'client', 'authority'], help="certificate type to delete") certDelete.add_argument('service', choices=['https', 'ldap'], help="Service to delete the certificate") certDelete.set_defaults(func=certificateDelete) certReplace = certMgmt_subproc.add_parser('replace', help="Replace the certificate") certReplace.add_argument('type', choices=['server', 'client', 'authority'], help="certificate type to replace") certReplace.add_argument('service', choices=['https', 'ldap'], help="Service to replace the certificate") certReplace.add_argument('-f', '--fileloc', required=True, help="The absolute path to the certificate file") certReplace.set_defaults(func=certificateReplace) certDisplay = certMgmt_subproc.add_parser('display', help="Print the certificate") certDisplay.add_argument('type', choices=['server', 'client', 'authority'], help="certificate type to display") certDisplay.set_defaults(func=certificateDisplay) certList = certMgmt_subproc.add_parser('list', help="Certificate list") certList.set_defaults(func=certificateList) certGenerateCSR = certMgmt_subproc.add_parser('generatecsr', help="Generate CSR") certGenerateCSR.add_argument('type', choices=['server', 'client', 'authority'], help="Generate CSR") certGenerateCSR.add_argument('city', help="The city or locality of the organization making the request") certGenerateCSR.add_argument('commonName', help="The fully qualified domain name of the component that is being secured.") certGenerateCSR.add_argument('country', help="The country of the organization making the request") certGenerateCSR.add_argument('organization', help="The name of the organization making the request.") certGenerateCSR.add_argument('organizationUnit', help="The name of the unit or division of the organization making the request.") certGenerateCSR.add_argument('state', help="The state, province, or region of the organization making the request.") certGenerateCSR.add_argument('keyPairAlgorithm', choices=['RSA', 'EC'], help="The type of key pair for use with signing algorithms.") certGenerateCSR.add_argument('keyCurveId', help="The curve ID to be used with the key, if needed based on the value of the 'KeyPairAlgorithm' parameter.") certGenerateCSR.add_argument('contactPerson', help="The name of the user making the request") certGenerateCSR.add_argument('email', help="The email address of the contact within the organization") certGenerateCSR.add_argument('alternativeNames', help="Additional hostnames of the component that is being secured") certGenerateCSR.add_argument('givenname', help="The given name of the user making the request") certGenerateCSR.add_argument('surname', help="The surname of the user making the request") certGenerateCSR.add_argument('unstructuredname', help="he unstructured name of the subject") certGenerateCSR.add_argument('initials', help="The initials of the user making the request") certGenerateCSR.set_defaults(func=certificateGenerateCSR) # local users parser_users = subparsers.add_parser("local_users", help="Work with local users") parser_users.add_argument('local_users', choices=['disableall','enableall', 'queryenabled'], help="Disable, enable or query local user accounts") parser_users.add_argument('-v', '--verbose', action='store_true', help='Verbose output') parser_users.set_defaults(func=localUsers) #LDAP parser_ldap = subparsers.add_parser("ldap", help="LDAP controls") ldap_sub = parser_ldap.add_subparsers(title='subcommands', description='valid subcommands',help="sub-command help", dest='command') #configure and enable LDAP parser_ldap_config = ldap_sub.add_parser("enable", help="Configure and enables the LDAP") parser_ldap_config.add_argument("-a", "--uri", required=True, help="Set LDAP server URI") parser_ldap_config.add_argument("-B", "--bindDN", required=True, help="Set the bind DN of the LDAP server") parser_ldap_config.add_argument("-b", "--baseDN", required=True, help="Set the base DN of the LDAP server") parser_ldap_config.add_argument("-p", "--bindPassword", required=True, help="Set the bind password of the LDAP server") parser_ldap_config.add_argument("-S", "--scope", choices=['sub','one', 'base'], help='Specifies the search scope:subtree, one level or base object.') parser_ldap_config.add_argument("-t", "--serverType", required=True, choices=['ActiveDirectory','OpenLDAP'], help='Specifies the configured server is ActiveDirectory(AD) or OpenLdap') parser_ldap_config.add_argument("-g","--groupAttrName", required=False, default='', help="Group Attribute Name") parser_ldap_config.add_argument("-u","--userAttrName", required=False, default='', help="User Attribute Name") parser_ldap_config.set_defaults(func=enableLDAPConfig) # disable LDAP parser_disable_ldap = ldap_sub.add_parser("disable", help="disables the LDAP") parser_disable_ldap.set_defaults(func=disableLDAP) # view-config parser_ldap_config = \ ldap_sub.add_parser("view-config", help="prints out a list of all \ LDAPS's configured properties") parser_ldap_config.set_defaults(func=viewLDAPConfig) #create group privilege mapping parser_ldap_mapper = ldap_sub.add_parser("privilege-mapper", help="LDAP group privilege controls") parser_ldap_mapper_sub = parser_ldap_mapper.add_subparsers(title='subcommands', description='valid subcommands', help="sub-command help", dest='command') parser_ldap_mapper_create = parser_ldap_mapper_sub.add_parser("create", help="Create mapping of ldap group and privilege") parser_ldap_mapper_create.add_argument("-t", "--serverType", choices=['ActiveDirectory','OpenLDAP'], help='Specifies the configured server is ActiveDirectory(AD) or OpenLdap') parser_ldap_mapper_create.add_argument("-g","--groupName",required=True,help="Group Name") parser_ldap_mapper_create.add_argument("-p","--privilege",choices=['priv-admin','priv-operator','priv-user','priv-callback'],required=True,help="Privilege") parser_ldap_mapper_create.set_defaults(func=createPrivilegeMapping) #list group privilege mapping parser_ldap_mapper_list = parser_ldap_mapper_sub.add_parser("list",help="List privilege mapping") parser_ldap_mapper_list.add_argument("-t", "--serverType", choices=['ActiveDirectory','OpenLDAP'], help='Specifies the configured server is ActiveDirectory(AD) or OpenLdap') parser_ldap_mapper_list.set_defaults(func=listPrivilegeMapping) #delete group privilege mapping parser_ldap_mapper_delete = parser_ldap_mapper_sub.add_parser("delete",help="Delete privilege mapping") parser_ldap_mapper_delete.add_argument("-t", "--serverType", choices=['ActiveDirectory','OpenLDAP'], help='Specifies the configured server is ActiveDirectory(AD) or OpenLdap') parser_ldap_mapper_delete.add_argument("-g","--groupName",required=True,help="Group Name") parser_ldap_mapper_delete.set_defaults(func=deletePrivilegeMapping) #deleteAll group privilege mapping parser_ldap_mapper_delete = parser_ldap_mapper_sub.add_parser("purge",help="Delete All privilege mapping") parser_ldap_mapper_delete.add_argument("-t", "--serverType", choices=['ActiveDirectory','OpenLDAP'], help='Specifies the configured server is ActiveDirectory(AD) or OpenLdap') parser_ldap_mapper_delete.set_defaults(func=deleteAllPrivilegeMapping) # set local user password parser_set_password = subparsers.add_parser("set_password", help="Set password of local user") parser_set_password.add_argument( "-p", "--password", required=True, help="Password of local user") parser_set_password.set_defaults(func=setPassword) # network parser_nw = subparsers.add_parser("network", help="network controls") nw_sub = parser_nw.add_subparsers(title='subcommands', description='valid subcommands', help="sub-command help", dest='command') # enable DHCP parser_enable_dhcp = nw_sub.add_parser("enableDHCP", help="enables the DHCP on given " "Interface") parser_enable_dhcp.add_argument("-I", "--Interface", required=True, help="Name of the ethernet interface(it can" "be obtained by the " "command:network view-config)" "Ex: eth0 or eth1 or VLAN(VLAN=eth0_50 etc)") parser_enable_dhcp.set_defaults(func=enableDHCP) # disable DHCP parser_disable_dhcp = nw_sub.add_parser("disableDHCP", help="disables the DHCP on given " "Interface") parser_disable_dhcp.add_argument("-I", "--Interface", required=True, help="Name of the ethernet interface(it can" "be obtained by the " "command:network view-config)" "Ex: eth0 or eth1 or VLAN(VLAN=eth0_50 etc)") parser_disable_dhcp.set_defaults(func=disableDHCP) # get HostName parser_gethostname = nw_sub.add_parser("getHostName", help="prints out HostName") parser_gethostname.set_defaults(func=getHostname) # set HostName parser_sethostname = nw_sub.add_parser("setHostName", help="sets HostName") parser_sethostname.add_argument("-H", "--HostName", required=True, help="A HostName for the BMC") parser_sethostname.set_defaults(func=setHostname) # get domainname parser_getdomainname = nw_sub.add_parser("getDomainName", help="prints out DomainName of " "given Interface") parser_getdomainname.add_argument("-I", "--Interface", required=True, help="Name of the ethernet interface(it " "can be obtained by the " "command:network view-config)" "Ex: eth0 or eth1 or VLAN(VLAN=eth0_50 etc)") parser_getdomainname.set_defaults(func=getDomainName) # set domainname parser_setdomainname = nw_sub.add_parser("setDomainName", help="sets DomainName of given " "Interface") parser_setdomainname.add_argument("-D", "--DomainName", required=True, help="Ex: DomainName=Domain1,Domain2,...") parser_setdomainname.add_argument("-I", "--Interface", required=True, help="Name of the ethernet interface(it " "can be obtained by the " "command:network view-config)" "Ex: eth0 or eth1 or VLAN(VLAN=eth0_50 etc)") parser_setdomainname.set_defaults(func=setDomainName) # get MACAddress parser_getmacaddress = nw_sub.add_parser("getMACAddress", help="prints out MACAddress the " "given Interface") parser_getmacaddress.add_argument("-I", "--Interface", required=True, help="Name of the ethernet interface(it " "can be obtained by the " "command:network view-config)" "Ex: eth0 or eth1 or VLAN(VLAN=eth0_50 etc)") parser_getmacaddress.set_defaults(func=getMACAddress) # set MACAddress parser_setmacaddress = nw_sub.add_parser("setMACAddress", help="sets MACAddress") parser_setmacaddress.add_argument("-MA", "--MACAddress", required=True, help="A MACAddress for the given " "Interface") parser_setmacaddress.add_argument("-I", "--Interface", required=True, help="Name of the ethernet interface(it can" "be obtained by the " "command:network view-config)" "Ex: eth0 or eth1 or VLAN(VLAN=eth0_50 etc)") parser_setmacaddress.set_defaults(func=setMACAddress) # get DefaultGW parser_getdefaultgw = nw_sub.add_parser("getDefaultGW", help="prints out DefaultGateway " "the BMC") parser_getdefaultgw.set_defaults(func=getDefaultGateway) # set DefaultGW parser_setdefaultgw = nw_sub.add_parser("setDefaultGW", help="sets DefaultGW") parser_setdefaultgw.add_argument("-GW", "--DefaultGW", required=True, help="A DefaultGateway for the BMC") parser_setdefaultgw.set_defaults(func=setDefaultGateway) # view network Config parser_ldap_config = nw_sub.add_parser("view-config", help="prints out a " "list of all network's configured " "properties") parser_ldap_config.set_defaults(func=viewNWConfig) # get DNS parser_getDNS = nw_sub.add_parser("getDNS", help="prints out DNS servers on the " "given interface") parser_getDNS.add_argument("-I", "--Interface", required=True, help="Name of the ethernet interface(it can" "be obtained by the " "command:network view-config)" "Ex: eth0 or eth1 or VLAN(VLAN=eth0_50 etc)") parser_getDNS.set_defaults(func=getDNS) # set DNS parser_setDNS = nw_sub.add_parser("setDNS", help="sets DNS servers on the given " "interface") parser_setDNS.add_argument("-d", "--DNSServers", required=True, help="Ex: DNSSERVERS=DNS1,DNS2,...") parser_setDNS.add_argument("-I", "--Interface", required=True, help="Name of the ethernet interface(it can" "be obtained by the " "command:network view-config)" "Ex: eth0 or eth1 or VLAN(VLAN=eth0_50 etc)") parser_setDNS.set_defaults(func=setDNS) # get NTP parser_getNTP = nw_sub.add_parser("getNTP", help="prints out NTP servers on the " "given interface") parser_getNTP.add_argument("-I", "--Interface", required=True, help="Name of the ethernet interface(it can" "be obtained by the " "command:network view-config)" "Ex: eth0 or eth1 or VLAN(VLAN=eth0_50 etc)") parser_getNTP.set_defaults(func=getNTP) # set NTP parser_setNTP = nw_sub.add_parser("setNTP", help="sets NTP servers on the given " "interface") parser_setNTP.add_argument("-N", "--NTPServers", required=True, help="Ex: NTPSERVERS=NTP1,NTP2,...") parser_setNTP.add_argument("-I", "--Interface", required=True, help="Name of the ethernet interface(it can" "be obtained by the " "command:network view-config)" "Ex: eth0 or eth1 or VLAN(VLAN=eth0_50 etc)") parser_setNTP.set_defaults(func=setNTP) # configure IP parser_ip_config = nw_sub.add_parser("addIP", help="Sets IP address to" "given interface") parser_ip_config.add_argument("-a", "--address", required=True, help="IP address of given interface") parser_ip_config.add_argument("-gw", "--gateway", required=False, default='', help="The gateway for given interface") parser_ip_config.add_argument("-l", "--prefixLength", required=True, help="The prefixLength of IP address") parser_ip_config.add_argument("-p", "--type", required=True, choices=['ipv4', 'ipv6'], help="The protocol type of the given" "IP address") parser_ip_config.add_argument("-I", "--Interface", required=True, help="Name of the ethernet interface(it can" "be obtained by the " "command:network view-config)" "Ex: eth0 or eth1 or VLAN(VLAN=eth0_50 etc)") parser_ip_config.set_defaults(func=addIP) # getIP parser_getIP = nw_sub.add_parser("getIP", help="prints out IP address" "of given interface") parser_getIP.add_argument("-I", "--Interface", required=True, help="Name of the ethernet interface(it can" "be obtained by the command:network view-config)" "Ex: eth0 or eth1 or VLAN(VLAN=eth0_50 etc)") parser_getIP.set_defaults(func=getIP) # rmIP parser_rmIP = nw_sub.add_parser("rmIP", help="deletes IP address" "of given interface") parser_rmIP.add_argument("-a", "--address", required=True, help="IP address to remove form given Interface") parser_rmIP.add_argument("-I", "--Interface", required=True, help="Name of the ethernet interface(it can" "be obtained by the command:network view-config)" "Ex: eth0 or eth1 or VLAN(VLAN=eth0_50 etc)") parser_rmIP.set_defaults(func=deleteIP) # add VLAN parser_create_vlan = nw_sub.add_parser("addVLAN", help="enables VLAN " "on given interface with given " "VLAN Identifier") parser_create_vlan.add_argument("-I", "--Interface", required=True, choices=['eth0', 'eth1'], help="Name of the ethernet interface") parser_create_vlan.add_argument("-n", "--Identifier", required=True, help="VLAN Identifier") parser_create_vlan.set_defaults(func=addVLAN) # delete VLAN parser_delete_vlan = nw_sub.add_parser("deleteVLAN", help="disables VLAN " "on given interface with given " "VLAN Identifier") parser_delete_vlan.add_argument("-I", "--Interface", required=True, help="Name of the ethernet interface(it can" "be obtained by the " "command:network view-config)" "Ex: eth0 or eth1 or VLAN(VLAN=eth0_50 etc)") parser_delete_vlan.set_defaults(func=deleteVLAN) # viewDHCPConfig parser_viewDHCPConfig = nw_sub.add_parser("viewDHCPConfig", help="Shows DHCP configured " "Properties") parser_viewDHCPConfig.set_defaults(func=viewDHCPConfig) # configureDHCP parser_configDHCP = nw_sub.add_parser("configureDHCP", help="Configures/updates DHCP " "Properties") parser_configDHCP.add_argument("-d", "--DNSEnabled", type=str2bool, required=True, help="Sets DNSEnabled property") parser_configDHCP.add_argument("-n", "--HostNameEnabled", type=str2bool, required=True, help="Sets HostNameEnabled property") parser_configDHCP.add_argument("-t", "--NTPEnabled", type=str2bool, required=True, help="Sets NTPEnabled property") parser_configDHCP.add_argument("-s", "--SendHostNameEnabled", type=str2bool, required=True, help="Sets SendHostNameEnabled property") parser_configDHCP.set_defaults(func=configureDHCP) # network factory reset parser_nw_reset = nw_sub.add_parser("nwReset", help="Resets networks setting to " "factory defaults. " "note:Reset settings will be applied " "after BMC reboot") parser_nw_reset.set_defaults(func=nwReset) return parser def main(argv=None): """ main function for running the command line utility as a sub application """ global toolVersion toolVersion = "1.19" global isRedfishSupport parser = createCommandParser() args = parser.parse_args(argv) totTimeStart = int(round(time.time()*1000)) if(sys.version_info < (3,0)): urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) if sys.version_info >= (3,0): requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning) if (args.version): print("Version: "+ toolVersion) sys.exit(0) if (hasattr(args, 'fileloc') and args.fileloc is not None and 'print' in args.command): mysess = None print(selPrint('N/A', args, mysess)) else: if(hasattr(args, 'host') and hasattr(args,'user')): if (args.askpw): pw = getpass.getpass() elif(args.PW is not None): pw = args.PW elif(args.PWenvvar): pw = os.environ['OPENBMCTOOL_PASSWORD'] else: print("You must specify a password") sys.exit() logintimeStart = int(round(time.time()*1000)) mysess = login(args.host, args.user, pw, args.json, args.command == 'set_password') if(mysess == None): print("Login Failed!") sys.exit() if(sys.version_info < (3,0)): if isinstance(mysess, basestring): print(mysess) sys.exit(1) elif sys.version_info >= (3,0): if isinstance(mysess, str): print(mysess) sys.exit(1) logintimeStop = int(round(time.time()*1000)) isRedfishSupport = redfishSupportPresent(args.host,mysess) commandTimeStart = int(round(time.time()*1000)) output = args.func(args.host, args, mysess) commandTimeStop = int(round(time.time()*1000)) if isinstance(output, dict): print(json.dumps(output, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False)) else: print(output) if (mysess is not None): logout(args.host, args.user, pw, mysess, args.json) if(args.procTime): print("Total time: " + str(int(round(time.time()*1000))- totTimeStart)) print("loginTime: " + str(logintimeStop - logintimeStart)) print("command Time: " + str(commandTimeStop - commandTimeStart)) else: print("usage:\n" " OPENBMCTOOL_PASSWORD=secret # if using -E\n" " openbmctool.py [-h] -H HOST -U USER {-A | -P PW | -E} [-j]\n" + "\t[-t POLICYTABLELOC] [-V]\n" + "\t{fru,sensors,sel,chassis,collect_service_data, \ health_check,dump,bmc,mc,gardclear,firmware,logging}\n" + "\t...\n" + "openbmctool.py: error: the following arguments are required: -H/--host, -U/--user") sys.exit() if __name__ == '__main__': """ main function when called from the command line """ import sys isTTY = sys.stdout.isatty() assert sys.version_info >= (2,7) main()
45.555001
244
0.623148
4a0799a82dfbc8f8d048a6622c159e0ca6ee1f96
1,471
py
Python
microhttp_restful/tests/helpers/testcases.py
meyt/microhttp-restful
68be4cd882fb0f5eabc18a9b5b9b3aaf3239d8a7
[ "MIT" ]
1
2018-09-26T08:56:13.000Z
2018-09-26T08:56:13.000Z
microhttp_restful/tests/helpers/testcases.py
meyt/microhttp-restful
68be4cd882fb0f5eabc18a9b5b9b3aaf3239d8a7
[ "MIT" ]
13
2017-11-08T14:05:56.000Z
2019-01-31T12:11:31.000Z
microhttp_restful/tests/helpers/testcases.py
meyt/microhttp-restful
68be4cd882fb0f5eabc18a9b5b9b3aaf3239d8a7
[ "MIT" ]
null
null
null
from unittest import TestCase from webtest import TestApp from microhttp.ext import db from microhttp_restful.tests.helpers import MockApplication, DeclarativeBase class WebTestMetaDataMixin: def metadata(self, url, params='', headers=None, extra_environ=None, status=None, upload_files=None, expect_errors=False, content_type=None): # noinspection PyUnresolvedReferences return self._gen_request('METADATA', url, params=params, headers=headers, extra_environ=extra_environ, status=status, upload_files=upload_files, expect_errors=expect_errors, content_type=content_type) class WebTestApp(TestApp, WebTestMetaDataMixin): pass class WebAppTestCase(TestCase): application = None session = None @classmethod def setUpClass(cls): super().setUpClass() cls.application = MockApplication() cls.application.configure(force=True) cls.wsgi_app = WebTestApp(cls.application, lint=False) cls.session = db.get_session() DeclarativeBase.metadata.create_all(bind=cls.session.get_bind()) @classmethod def tearDownClass(cls): super().tearDownClass() cls.session.close() cls.session.get_bind().dispose() with db.get_database_manager() as manager: manager.drop_database()
31.978261
81
0.647179
4a0799c1f3d951add5436af44bd338174e1eeb4b
1,593
py
Python
bayesnet/math/product.py
ctgk/bayes
96eab9305eaeecc5a5b032cdf92a8285de4f60bf
[ "MIT" ]
21
2019-01-08T05:58:41.000Z
2021-11-26T14:24:11.000Z
bayesnet/math/product.py
ctgk/bayes
96eab9305eaeecc5a5b032cdf92a8285de4f60bf
[ "MIT" ]
null
null
null
bayesnet/math/product.py
ctgk/bayes
96eab9305eaeecc5a5b032cdf92a8285de4f60bf
[ "MIT" ]
11
2019-05-04T13:44:19.000Z
2021-08-05T04:26:19.000Z
import numpy as np from bayesnet.tensor.constant import Constant from bayesnet.tensor.tensor import Tensor from bayesnet.function import Function class Product(Function): def __init__(self, axis=None, keepdims=False): if isinstance(axis, int): axis = (axis,) elif isinstance(axis, tuple): axis = tuple(sorted(axis)) self.axis = axis self.keepdims = keepdims def forward(self, x): x = self._convert2tensor(x) self.x = x self.output = np.prod(self.x.value, axis=self.axis, keepdims=True) if not self.keepdims: output = np.squeeze(self.output) if output.size == 1: output = output.item() else: output = self.output if isinstance(self.x, Constant): return Constant(output) return Tensor(output, function=self) def backward(self, delta): if not self.keepdims and self.axis is not None: for ax in self.axis: delta = np.expand_dims(delta, ax) dx = delta * self.output / self.x.value self.x.backward(dx) def prod(x, axis=None, keepdims=False): """ product of all element in the array Parameters ---------- x : tensor_like input array axis : int, tuple of ints axis or axes along which a product is performed keepdims : bool keep dimensionality or not Returns ------- product : tensor_like product of all element """ return Product(axis=axis, keepdims=keepdims).forward(x)
27.465517
74
0.596359
4a079a4908d51557fe9413ce63f9c0dbfea53926
3,691
py
Python
tensorlayer/lazy_imports.py
Howdy-Personally/tensorlayer-master
bb92e4e187419d5e7ded8331d5c7cbf5615ee744
[ "Apache-2.0" ]
4,484
2017-12-27T03:28:35.000Z
2021-12-02T14:42:58.000Z
tensorlayer/lazy_imports.py
Mesica/tensorlayer
c5def14c4d66d150863f975d9001a5e1891d003f
[ "Apache-2.0" ]
549
2017-12-28T07:19:52.000Z
2021-11-05T02:34:20.000Z
tensorlayer/lazy_imports.py
Mesica/tensorlayer
c5def14c4d66d150863f975d9001a5e1891d003f
[ "Apache-2.0" ]
1,076
2017-12-27T12:25:46.000Z
2021-11-24T09:12:36.000Z
#! /usr/bin/python # -*- coding: utf-8 -*- """This module provides lazy import functionality to improve the import performance of nitime. For example, some parts of nitime leverage and import matplotlib, which is quite a big package, yet most of the nitime code does not depend on matplotlib. By lazily-loading a module, we defer the overhead of importing it until the first time it is actually used, thereby speeding up nitime imports. A generic :class:`LazyImport` class is implemented which takes the module name as a parameter, and acts as a proxy for that module, importing it only when the module is used, but effectively acting as the module in every other way (including inside IPython with respect to introspection and tab completion) with the *exception* of reload() - reloading a :class:`LazyImport` raises an :class:`ImportError`. Commonly used nitime lazy imports are also defined in :mod:`nitime.lazy`, so they can be reused throughout nitime. """ import os import sys import types class LazyImport(types.ModuleType): """ This class takes the module name as a parameter, and acts as a proxy for that module, importing it only when the module is used, but effectively acting as the module in every other way (including inside IPython with respect to introspection and tab completion) with the *exception* of reload()- reloading a :class:`LazyImport` raises an :class:`ImportError`. >>> mlab = LazyImport('matplotlib.mlab') No import happens on the above line, until we do something like call an ``mlab`` method or try to do tab completion or introspection on ``mlab`` in IPython. >>> mlab <module 'matplotlib.mlab' will be lazily loaded> Now the :class:`LazyImport` will do an actual import, and call the dist function of the imported module. >>> mlab.dist(1969,2011) 42.0 """ def __getattribute__(self, x): # This method will be called only once, since we'll change # self.__class__ to LoadedLazyImport, and __getattribute__ will point # to module.__getattribute__ name = object.__getattribute__(self, '__name__') __import__(name) # if name above is 'package.foo.bar', package is returned, the docs # recommend that in order to get back the full thing, that we import # and then lookup the full name is sys.modules, see: # http://docs.python.org/library/functions.html#__import__ module = sys.modules[name] # Now that we've done the import, cutout the middleman and make self # act as the imported module class LoadedLazyImport(types.ModuleType): __getattribute__ = module.__getattribute__ __repr__ = module.__repr__ object.__setattr__(self, '__class__', LoadedLazyImport) # The next line will make "reload(l)" a silent no-op return module.__getattribute__(x) def __repr__(self): return "<module '%s' will be lazily loaded>" % object.__getattribute__(self, '__name__') if 'READTHEDOCS' in os.environ: lazy_doc = """ WARNING: To get Sphinx documentation to build we disable LazyImports, which makes Sphinx incorrectly report this class as having a base class of object. In reality, :class:`LazyImport`'s base class is :class:`types.ModuleType`. """ lazy_doc += LazyImport.__doc__ class LazyImport(object): __doc__ = lazy_doc def __init__(self, x): __import__(x) self.module = sys.modules[x] def __getattr__(self, x): return self.module.__getattribute__(x)
36.91
96
0.686806
4a079a506f19e19b6a637e6a3c37c667ec774665
2,493
py
Python
code-files/frosch2010_CC_language.py
Frosch2010/discord-color-cards
0669e4aa9c73f8db9e148c88dad85c44889b3216
[ "MIT" ]
1
2021-04-02T19:24:09.000Z
2021-04-02T19:24:09.000Z
code-files/frosch2010_CC_language.py
Frosch2010/discord-color-cards
0669e4aa9c73f8db9e148c88dad85c44889b3216
[ "MIT" ]
1
2021-04-03T12:50:12.000Z
2021-04-05T21:47:15.000Z
code-files/frosch2010_CC_language.py
Frosch2010/discord-color-cards
0669e4aa9c73f8db9e148c88dad85c44889b3216
[ "MIT" ]
null
null
null
class cc_language: #General cc_wrong_arguments = "" cc_wrong_game_command = "" cc_shutdown_bot = "" #Game cc_game_already_running = "" cc_cards_per_player_set_to = "" cc_no_game_running = "" cc_user_already_joined = "" cc_user_joined_game = "" cc_more_players_needed = "" cc_user_started_game = "" cc_user_not_part = "" cc_player_won = "" cc_user_leave_no_part = "" cc_game_end_because_user_left = "" cc_user_left = "" cc_user_cant_leave_his_turn = "" cc_user_no_turn = "" cc_card_not_exist = "" cc_user_cant_lay_card = "" cc_user_your_turn = "" cc_wish_without_color = "" cc_wish_unknown_color = "" cc_input_only_numbers = "" cc_input_no_number_arg = "" cc_game_stopped_by = "" cc_game_cant_stopped = "" cc_game_player_has_cc = "" cc_game_player_can_lay = "" cc_game_player_cant_lay = "" cc_please_choose_wish_color_react = "" cc_please_choose_card_color_react = "" cc_please_choose_card_num_react = "" cc_false_choose_color_react = "" cc_false_choose_number_react = "" cc_no_kick_user = "" cc_kick_user_isnt_player = "" cc_cant_kick_current_player = "" cc_user_kicked = "" cc_suspend_player_cant_lay_direct_chat = "" cc_suspend_player_cant_lay = "" cc_suspend_player_false_card = "" cc_suspend_player_must_counter = "" cc_suspend_player_counter_cant_get_new_cards = "" cc_suspend_player_cant_get_new_cards = "" cc_suspend_player_want_sit_out = "" cc_suspend_player_cant_sit_out = "" cc_suspend_player_cant_skip = "" cc_plus_card_player_can_lay = "" cc_plus_card_player_cant_lay = "" cc_plus_card_player_lay_false_card = "" cc_plus_card_player_cant_lay_false_card = "" cc_plus_card_player_cant_take = "" cc_plus_card_player_take = "" cc_plus_card_player_cant_skip = "" cc_plus_card_player_cant_get_new_cards = "" cc_plus_card_player_counter_cant_get_new_cards = "" #Generate card-str cc_timer_action_sit_out = "" cc_timer_action_take_plus_cards = "" cc_your_cards = "" cc_current_mid_card = "" cc_player_sequence = "" cc_players_turn = "" cc_player_laid_card = "" cc_player_picked_up_card = "" #Voice cc_voice_players_turn = "" cc_voice_player_won = "" cc_voice_player_sit_out = ""
25.438776
56
0.669073
4a079b233a3899a6940acf43a1dc2786fa6318c0
377
py
Python
Simulation_Settings.py
alpertucanberk/2D_Genetic_Algorithm_Agents
786aae62618d5f0291e89ea825919d4e1bbab694
[ "MIT" ]
null
null
null
Simulation_Settings.py
alpertucanberk/2D_Genetic_Algorithm_Agents
786aae62618d5f0291e89ea825919d4e1bbab694
[ "MIT" ]
null
null
null
Simulation_Settings.py
alpertucanberk/2D_Genetic_Algorithm_Agents
786aae62618d5f0291e89ea825919d4e1bbab694
[ "MIT" ]
null
null
null
from Neural_Network import forward_propagation NUM_STEPS_PER_GAME = 200 SCREEN_WIDTH = 250 SCREEN_HEIGHT = 250 WALL_THICKNESS = 5 AGENT_SIZE = 2 FOOD_SIZE = 1 FOOD_DENSITY = 12 MIN_AGENT_DIST = 5 LAYER_SIZES = [4] NEURAL_NETWORK = forward_propagation MUTATION_RATE = 3 NUM_GENES_MUTATED = 5 num_individuals_per_pop = 20 num_generations = 200 num_parents_mating = 10
13.464286
46
0.795756
4a079e19531e3ad878e98335cea8cd0e0d6ef857
1,948
py
Python
gpvdm_gui/gui/inp_util.py
roderickmackenzie/gpvdm
914fd2ee93e7202339853acaec1d61d59b789987
[ "BSD-3-Clause" ]
12
2016-09-13T08:58:13.000Z
2022-01-17T07:04:52.000Z
gpvdm_gui/gui/inp_util.py
roderickmackenzie/gpvdm
914fd2ee93e7202339853acaec1d61d59b789987
[ "BSD-3-Clause" ]
3
2017-11-11T12:33:02.000Z
2019-03-08T00:48:08.000Z
gpvdm_gui/gui/inp_util.py
roderickmackenzie/gpvdm
914fd2ee93e7202339853acaec1d61d59b789987
[ "BSD-3-Clause" ]
6
2019-01-03T06:17:12.000Z
2022-01-01T15:59:00.000Z
# # General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall # model for 1st, 2nd and 3rd generation solar cells. # Copyright (C) 2008-2022 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com # # https://www.gpvdm.com # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License v2.0, as published by # the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # ## @package inp_util # utility functions for inp these functions should not touch the disk. # def inp_file_to_list(lines): sub_items=[] items=[] for l in lines: if l.startswith("#") and len(sub_items)!=0: items.append(sub_items) sub_items=[] if l=="#end" or l=="#ver": break sub_items.append(l) return items def inp_get_all_tokens(lines): ret=[] for l in lines: if l.startswith("#"): if l!="#end" and l!="#ver": ret.append(l) return ret def inp_search_token_value_multiline(lines, token): ret=[] for i in range(0, len(lines)): if lines[i]==token: pos=i+1 while (lines[pos][0]!="#"): ret.append(lines[pos]) pos=pos+1 return ret return False def inp_check_ver(file_path, ver): """Check ver of file""" lines=inp_load_file(file_path) if lines==False: return False for i in range(0, len(lines)): if lines[i]=="#ver": if len(lines)>i+2: if lines[i+1]==ver: if lines[i+2]=="#end": return True return False return False
24.658228
89
0.680698
4a079e5634b5dfa1d0f96060237febbd32a708e3
1,764
py
Python
Code/gov/spiders/gov_spider.py
GurKirat21/Profile-Exposer
ecbf517d4dd8932829f21fb516e81acea3e9daf9
[ "Apache-2.0" ]
8
2020-09-30T20:03:09.000Z
2020-10-25T10:23:54.000Z
Code/gov/spiders/gov_spider.py
GurKirat21/Profile-Exposer
ecbf517d4dd8932829f21fb516e81acea3e9daf9
[ "Apache-2.0" ]
1
2020-10-04T11:27:29.000Z
2020-10-04T11:27:29.000Z
Code/gov/spiders/gov_spider.py
GurKirat21/Profile-Exposer
ecbf517d4dd8932829f21fb516e81acea3e9daf9
[ "Apache-2.0" ]
89
2020-09-30T20:03:23.000Z
2021-05-01T08:01:26.000Z
import scrapy from bs4 import BeautifulSoup import re from urllib.parse import urlparse from functions import pred import scrape class govSpider(scrapy.Spider): name = "mygovscraper" allowed_domains = [] start_urls = [] def __init__(self, filename="starter_sites.txt", *args, **kwargs): super(govSpider, self).__init__(*args, **kwargs) if(filename): with open(filename,'r') as f: for u in f: u = u.strip() self.start_urls.append(u) self.allowed_domains.append(urlparse(u).netloc) print(f"Crawler has started crawling with {len(self.start_urls)} inital site(s). Please wait for timeout or press ctrl+c repeatedly to force stop.") def start_requests(self): for url in self.start_urls: yield scrapy.Request(url=url,callback = self.parse) def parse(self,response): # self.logger.info("Scraped %s", response.url) f = open('log.txt', 'a') f.write("Scraped {}\n".format(response.url)) f.close() soup = BeautifulSoup(response.text, 'html.parser') scrape.parse_soup(response.url,soup.body) for href in soup.find_all('a'): try: raw = href["href"] tag = href.text except: continue if(raw[0]=='h' or raw[0]=='/'): if(pred(tag)): # print(tag) # f2 = open("tags.txt", 'a') # f2.write(tag) # f2.write("\n") # f2.close() new = response.urljoin(raw) yield scrapy.Request(new, self.parse)
32.666667
156
0.52381
4a079ebf3487f49f7161f5d8a42940528b927c63
1,435
py
Python
src/abaqus/Sketcher/ConstrainedSketchParameter/Parameter.py
Haiiliin/PyAbaqus
f20db6ebea19b73059fe875a53be370253381078
[ "MIT" ]
7
2022-01-21T09:15:45.000Z
2022-02-15T09:31:58.000Z
src/abaqus/Sketcher/ConstrainedSketchParameter/Parameter.py
Haiiliin/PyAbaqus
f20db6ebea19b73059fe875a53be370253381078
[ "MIT" ]
null
null
null
src/abaqus/Sketcher/ConstrainedSketchParameter/Parameter.py
Haiiliin/PyAbaqus
f20db6ebea19b73059fe875a53be370253381078
[ "MIT" ]
null
null
null
from .ConstrainedSketchParameter import ConstrainedSketchParameter class Parameter(ConstrainedSketchParameter): def __init__(self, name: str, path: str = '', expression: str = '', previous: str = ''): """This method creates a parameter and optionally associates a dimension with this parameter. Notes ----- This function can be accessed by: .. code-block:: python mdb.models[name].sketches[name].Parameter ---------- Parameters ---------- name A String specifying the name of the ConstrainedSketch object. No two parameters in the same ConstrainedSketch can have the same name. path A String specifying the ConstrainedSketchDimension object with which this parameter is associated. expression A String specifying the expression or value associated with the ConstrainedSketch. previous A String specifying the name of the previous ConstrainedSketch, if it exists. The *previous* argument implies an order among the parameters. No two parameters can reference the same parameter as the previous parameter. Returns ------- sketch: ConstrainedSketch A ConstrainedSketch object. """ pass
33.372093
99
0.595819
4a079f022f8bcc8c3bd0c103b18d731cf3c9a9db
2,836
py
Python
python/commonil.py
mattrepl/binaryninja-api
ac8bb0fe99c87b27bf20feb5a405480ae7286755
[ "MIT" ]
null
null
null
python/commonil.py
mattrepl/binaryninja-api
ac8bb0fe99c87b27bf20feb5a405480ae7286755
[ "MIT" ]
null
null
null
python/commonil.py
mattrepl/binaryninja-api
ac8bb0fe99c87b27bf20feb5a405480ae7286755
[ "MIT" ]
null
null
null
# Copyright (c) 2019-2021 Vector 35 Inc # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from dataclasses import dataclass # This file contains a list of top level abstract classes for implementing BNIL instructions @dataclass(frozen=True, repr=False) class Constant: pass @dataclass(frozen=True, repr=False) class BinaryOperation: pass @dataclass(frozen=True, repr=False) class UnaryOperation: pass @dataclass(frozen=True, repr=False) class Comparison(BinaryOperation): pass @dataclass(frozen=True, repr=False) class SSA: pass @dataclass(frozen=True, repr=False) class Phi(SSA): pass @dataclass(frozen=True, repr=False) class FloatingPoint: pass @dataclass(frozen=True, repr=False) class ControlFlow: pass @dataclass(frozen=True, repr=False) class Terminal(ControlFlow): pass @dataclass(frozen=True, repr=False) class Loop(ControlFlow): pass @dataclass(frozen=True, repr=False) class Call(ControlFlow): pass @dataclass(frozen=True, repr=False) class Syscall(Call): pass @dataclass(frozen=True, repr=False) class Tailcall(Call): pass @dataclass(frozen=True, repr=False) class Return(Terminal): pass @dataclass(frozen=True, repr=False) class Signed: pass @dataclass(frozen=True, repr=False) class Arithmetic: pass @dataclass(frozen=True, repr=False) class Carry(Arithmetic): pass @dataclass(frozen=True, repr=False) class DoublePrecision(Arithmetic): pass @dataclass(frozen=True, repr=False) class Memory: pass @dataclass(frozen=True, repr=False) class Load: pass @dataclass(frozen=True, repr=False) class Store: pass @dataclass(frozen=True, repr=False) class RegisterStack: pass @dataclass(frozen=True, repr=False) class SetVar: pass @dataclass(frozen=True, repr=False) class StackOperation: pass @dataclass(frozen=True, repr=False) class SetReg: pass
19.162162
92
0.767983
4a079f460b4277e47313d9bf9f4d9ede68f33f74
18,510
py
Python
window/lp_generator.py
rkoco/lp-mapf
8ffa93bd33feb244ac2db7230ea3b9ff2deb7038
[ "MIT" ]
null
null
null
window/lp_generator.py
rkoco/lp-mapf
8ffa93bd33feb244ac2db7230ea3b9ff2deb7038
[ "MIT" ]
null
null
null
window/lp_generator.py
rkoco/lp-mapf
8ffa93bd33feb244ac2db7230ea3b9ff2deb7038
[ "MIT" ]
null
null
null
import os import clingo import asp_solver import json class Problem: def __init__(self, window_bound): self.obstacles = [] self.map = [] self.height = 0 self.width = 0 self.num_agents = 0 self.agents_pos = [] self.max_distance = -1 self.heuristic = [] self.heuristic_initial = [] self.best_dirs = [] self.dirX = [1,0,-1,0] self.dirY = [0,1,0,-1] # Directions name for each (they are swapped because the search is done backwards) self.dir_name = ['left', 'down', 'right', 'up'] self.instance_number = '' self.opt_sumtime = 0 self.opt_timestep = -1 self.sol = [] self.max_time = -1 self.total_cost = 0 self.agent_cost = [] self.min_sum = 0 self.solved = False self.window_bound = window_bound def read_instance(self, inp): with open(inp, 'r') as in_file: self.instance_number = in_file.readline().strip() in_file.readline() line = in_file.readline().strip().split(',') self.height = int(line[0]) self.width = int(line[1]) for y in range(self.height): line = in_file.readline().strip() row = [] x = 0 for cell in line: if cell != '.': row.append(1) self.obstacles.append((y,x)) else: row.append(0) x+=1 self.map.append(row) in_file.readline() self.num_agents = int(in_file.readline()) for a in range(self.num_agents): agent_map = [] agent_map_init = [] agent_dirs = [] for y in range(self.height): agent_row = [] agent_row_init = [] row_dirs = [] for x in range(self.width): agent_row.append(-1) #infty heuristic (not defined) agent_row_init.append(-1) row_dirs.append([]) agent_map.append(agent_row) agent_map_init.append(agent_row_init) agent_dirs.append(row_dirs) self.heuristic.append(agent_map) self.heuristic_initial.append(agent_map_init) self.best_dirs.append(agent_dirs) line = in_file.readline().split(',') pos = (int(line[3]),int(line[4]),int(line[1]),int(line[2])) self.agents_pos.append(pos) def read_map(self, inp): self.obstacles = [] self.map = [] with open(inp, 'r') as in_file: line = in_file.readline().split(',') self.height = int(line[0]) self.width = int(line[1]) y = 0 for l in in_file.readlines(): line = l.split(',') row = [] x = 0 for cell in line: row.append(int(cell)) if int(cell) == 1: obs = (y, x) self.obstacles.append(obs) x += 1 y += 1 self.map.append(row) def read_agents(self, inp): self.agents_pos = [] with open(inp, 'r') as in_file: line = in_file.readline() self.num_agents = int(line) #Put heuristics (and best dirs) for each agent for each cell for i in range(self.num_agents): agent_map = [] agent_map_init = [] agent_dirs = [] for y in range(self.height): agent_row = [] agent_row_init = [] row_dirs = [] for x in range(self.width): agent_row.append(-1) #infty heuristic (not defined) agent_row_init.append(-1) row_dirs.append([]) agent_map.append(agent_row) agent_map_init.append(agent_row_init) agent_dirs.append(row_dirs) self.heuristic.append(agent_map) self.heuristic_initial.append(agent_map_init) self.best_dirs.append(agent_dirs) for l in in_file.readlines(): line = l.split(',') pos = (int(line[0]),int(line[1]),int(line[2]),int(line[3])) self.agents_pos.append(pos) def dijkstra_init(self, ag_id): posY = self.agents_pos[ag_id][0] posX = self.agents_pos[ag_id][1] obj = (posY, posX) open_list = [] open_list.append(obj) self.heuristic_initial[ag_id][posY][posX] = 0 while True: if not open_list: break u = open_list.pop(0) #print(u) ux = u[1] uy = u[0] #Succesors for i in range(4): vx = ux + self.dirX[i] vy = uy + self.dirY[i] #Check if pos is valid: if vx < self.width and vx >= 0 and vy < self.height and vy >= 0 and self.map[vy][vx] != 1: v_cost = self.heuristic_initial[ag_id][uy][ux] + 1 gv = self.heuristic_initial[ag_id][vy][vx] if gv == -1 or v_cost < gv: self.heuristic_initial[ag_id][vy][vx] = v_cost #reset the list, there is a better path open_list.append((vy,vx)) def solve_agent(self, ag_id): posY = self.agents_pos[ag_id][2] posX = self.agents_pos[ag_id][3] obj = (posY, posX) open_list = [] open_list.append(obj) self.heuristic[ag_id][posY][posX] = 0 self.best_dirs[ag_id][posY][posX] = ['wait'] while True: if not open_list: break u = open_list.pop(0) #print(u) ux = u[1] uy = u[0] #Succesors for i in range(4): vx = ux + self.dirX[i] vy = uy + self.dirY[i] #Check if pos is valid: if vx < self.width and vx >= 0 and vy < self.height and vy >= 0 and self.map[vy][vx] != 1: v_cost = self.heuristic[ag_id][uy][ux] + 1 gv = self.heuristic[ag_id][vy][vx] if gv == -1 or v_cost < gv: self.heuristic[ag_id][vy][vx] = v_cost #reset the list, there is a better path self.best_dirs[ag_id][vy][vx] = [] self.best_dirs[ag_id][vy][vx].append(self.dir_name[i]) open_list.append((vy,vx)) if v_cost == gv: #If the cost is the same, the new path is equivalent and also is a best move self.best_dirs[ag_id][vy][vx].append(self.dir_name[i]) def calc_time(self): self.opt_sumtime = 0 self.opt_timestep = -1 for ag in range(self.num_agents): posX = self.agents_pos[ag][0] posY = self.agents_pos[ag][1] best_time = self.heuristic[ag][posX][posY] if (self.opt_timestep == -1 or best_time > self.opt_timestep): self.opt_timestep = best_time self.opt_sumtime += best_time print(self.opt_timestep) def gen_solution(self): self.sol = [] self.total_cost = 0 self.agent_cost = [] self.max_time = 0 self.min_sum = 0 for ag in range(self.num_agents): print('solving for ag', ag) self.solve_agent(ag) self.dijkstra_init(ag) self.agent_cost.append(0) #for ag in range(self.num_agents): #self.dijkstra_init(ag) for ag in range(self.num_agents): posY = self.agents_pos[ag][0] posX = self.agents_pos[ag][1] ag_sol = [(posX,posY)] t = 0 while True: best_dir = self.best_dirs[ag][posY][posX] if len(best_dir) > 0: best_dir = best_dir[0] else: print('????') print(ag,posY,posX,best_dir) if best_dir == 'left': posX -= 1 elif best_dir == 'down': posY -= 1 elif best_dir == 'right': posX += 1 elif best_dir == 'up': posY +=1 elif best_dir == 'wait': if self.max_time < t: self.max_time = t break self.total_cost += 1 self.agent_cost[ag] += 1 #print((posX,posY)) ag_sol.append((posX,posY)) t+=1 self.sol.append(ag_sol) self.min_sum = self.total_cost - self.max_time #print(self.sol) #print('----') #print(self.max_time) self.solved = True def write_to_lp_window(self, outp, positions, penalty): with open('{0}{1}.lp'.format(outp, ''), 'w') as out_file: print(os.path.abspath(out_file.name)) out_file.write('#const window_bound = {0}.\n'.format(self.window_bound)) out_file.write('window_time(1..window_bound).\n\n') #write the map out_file.write('rangeX(0..{0}).\n'.format(self.width-1)) out_file.write('rangeY(0..{0}).\n\n'.format(self.height-1)) out_file.write('%% Obstacles in map: \n') for obs in self.obstacles: out_file.write('obstacle({0},{1}).\n'.format(obs[1], obs[0])) out_file.write('\n') #goal positions: out_file.write('%% Goal positions: \n') for ag in range(self.num_agents): out_file.write('goal({0},{1},{2}).\n'.format(ag, self.agents_pos[ag][3], self.agents_pos[ag][2])) out_file.write('\n') #dijkstra values ''' out_file.write('%% Dijkstra values: \n') for ag in range(self.num_agents): h_val = self.heuristic[ag][self.agents_pos[ag][0]][self.agents_pos[ag][1]] out_file.write('dijkstra({0},{1}).\n'.format(ag, h_val)) out_file.write('\n') ''' #write the agents: out_file.write('%% Agents: \n') for ag in range(self.num_agents): out_file.write('robot({0}).\n'.format(ag)) out_file.write('\n') out_file.write('%% Initial positions: \n') for ag in range(self.num_agents): out_file.write('on({0},{1},{2},0).\n'.format(ag, positions[ag][1], positions[ag][0])) out_file.write('\n') #min cost for ag in range(self.num_agents): posX = positions[ag][1] posY = positions[ag][0] obj = (posY, posX,0) open_list = [] open_list.append(obj) in_range = False printed_pos = set([(posX,posY)]) h = self.heuristic[ag][posY][posX] out_file.write('cost_to_go({0},{1},{2},{3}).\n'.format(ag, posX, posY, h)) if h == 0: in_range = True out_file.write('exit_penalty({0},{1}).\n'.format(ag,penalty[ag])) while True: if not open_list: break u = open_list.pop(0) #print(u) ux = u[1] uy = u[0] l = u[2] if u[2] == self.window_bound: break #Succesors for i in range(4): vx = ux + self.dirX[i] vy = uy + self.dirY[i] #Check if pos is valid: if vx < self.width and vx >= 0 and vy < self.height and vy >= 0 and self.map[vy][vx] != 1 and (vx,vy) not in printed_pos: h1 = self.heuristic[ag][vy][vx] out_file.write('cost_to_go({0},{1},{2},{3}).\n'.format(ag, vx, vy, h1)) printed_pos.add((vx,vy)) open_list.append((vy,vx,l+1)) if h1 == 0: out_file.write('cost_to_go({0},{1},{2},{3}).\n'.format(ag, self.agents_pos[ag][3], self.agents_pos[ag][2], 0)) in_range = True if in_range: out_file.write('in_range({0}).\n'.format(ag)) #print(ag) #print(printed_pos) out_file.write('\n\n') def read_sol(self, inp): self.ag_sol = [] for ag in range(self.num_agents): self.ag_sol.append([]) with open(inp, 'r') as in_file: preds = in_file.readline().split() for p in preds: if 'en' in p: info=p.replace("on(","") info=info.replace(")","") tup = info.split(",") tup = [int(tup[0][1:])-1,int(tup[1]),int(tup[2]),int(tup[3])] self.ag_sol[tup[0]].append((tup[1],tup[2], tup[3])) for ag in range(self.num_agents): self.ag_sol[ag].sort(key=lambda tup: tup[2]) print(self.ag_sol[ag]) sol_cost = 0 for ag in range(self.num_agents): #print(self.ag_sol[ag]) final_pos = (self.agents_pos[ag][3], self.agents_pos[ag][2]) for pos in self.ag_sol[ag]: if pos[0] == final_pos[0] and pos[1] == final_pos[1]: break sol_cost += 1 print(solv.sol_cost) def check_solved(self, positions): solved_agents = [] for ag in range(self.num_agents): posX = positions[ag][1] posY = positions[ag][0] if self.agents_pos[ag][3] != positions[ag][1]: solved_agents.append(False) elif self.agents_pos[ag][2] != positions[ag][0]: solved_agents.append(False) else: solved_agents.append(True) return solved_agents def clingo_solve(self, inp): print('solving with clingo...') num = self.max_time while True: solv = asp_solver.IncrementalSolver(inp, num, self.num_agents, self.min_sum, self.total_cost, 4, True) clingo.clingo_main(solv, [inp, 'bases/baseH.lp','--opt-strat=usc,disjoint' ,'--outf=3' , '--time-limit=300', '-c','bound={0}'.format(num)]) if solv.sol_cost > 0: ms = int(solv.theoric_makespan) break if ms > num: num = ms solv = asp_solver.IncrementalSolver(inp, num, self.num_agents, self.min_sum, self.total_cost, 4, True) clingo.clingo_main(solv, [inp, 'bases/baseH.lp','--opt-strat=usc,disjoint' ,'--outf=3' , '--time-limit=300', '-c','bound={0}'.format(num)]) break num += 1 self.sol = solv.resp self.check_makespan() print('-----------------') print('Estadisticas Clingo:') print(json.dumps(solv.stats, sort_keys=True, indent=4, separators=(',', ': '))) print('Encontrada Solucion') print('\tCosto total: {0}'.format(solv.sol_cost)) print('\tMakespan: {0}'.format(self.sol_time)) print('-----------------') def check_makespan(self): makespan = -1 for ag in self.sol: last_x = -1 last_y = -1 step = 0 wait_on_goal = 0 for pos in ag: if last_x == pos[0] and last_y == pos[1]: wait_on_goal += 1 else: wait_on_goal = 0 last_x = pos[0] last_y = pos[1] step+=1 ag_makespan = step - wait_on_goal if ag_makespan > makespan: makespan = ag_makespan #print(makespan) self.sol_time = makespan - 1 ''' posX = positions[ag][1] posY = positions[ag][0] in_range = False #print('holi') #print(self.agents_pos[ag][3], self.agents_pos[ag][2]) if posX == self.agents_pos[ag][3] and posY == self.agents_pos[ag][2]: out_file.write('cost_to_go({0},{1},{2},{3}).\n'.format(ag, self.agents_pos[ag][3], self.agents_pos[ag][2], 0)) in_range = True printed_pos = set([(-1,-1)]) for i in range(self.window_bound+1): for j in range(self.window_bound+1): for d in range(1): x = posX + i * self.dirX[d] y = posY + j * self.dirY[d] if ag == 0: print(x,y) if x < 0 or y < 0 or x >= self.width or y >= self.height or (i + j > self.window_bound+1) or (x,y) in printed_pos: continue h1 = self.heuristic[ag][y][x] if h1 != -1: out_file.write('cost_to_go({0},{1},{2},{3}).\n'.format(ag, x, y, h1)) printed_pos.add((x,y)) if x == self.agents_pos[ag][3] and y == self.agents_pos[ag][2]: out_file.write('cost_to_go({0},{1},{2},{3}).\n'.format(ag, self.agents_pos[ag][3], self.agents_pos[ag][2], 0)) in_range = True ''' #??
36.081871
159
0.449271
4a07a052dd52049314cc531762643b9fca10f4fc
87
py
Python
tests/test_beanie.py
mikeckennedy/beanie
3a3b52f7c4fcb07d51f5afb6b88f56161526c963
[ "Apache-2.0" ]
null
null
null
tests/test_beanie.py
mikeckennedy/beanie
3a3b52f7c4fcb07d51f5afb6b88f56161526c963
[ "Apache-2.0" ]
null
null
null
tests/test_beanie.py
mikeckennedy/beanie
3a3b52f7c4fcb07d51f5afb6b88f56161526c963
[ "Apache-2.0" ]
null
null
null
from beanie import __version__ def test_version(): assert __version__ == "1.8.2"
14.5
33
0.712644
4a07a08b36014037a1a51d2dc8f62805742c2fbd
870
py
Python
tests/test_model_parser.py
bcaitech1/p4-mod-model_diet
36d8a747e12c375b07d132ed4d08f9fc77126a8b
[ "MIT" ]
1
2021-11-30T12:01:55.000Z
2021-11-30T12:01:55.000Z
tests/test_model_parser.py
bcaitech1/p4-mod-model_diet
36d8a747e12c375b07d132ed4d08f9fc77126a8b
[ "MIT" ]
null
null
null
tests/test_model_parser.py
bcaitech1/p4-mod-model_diet
36d8a747e12c375b07d132ed4d08f9fc77126a8b
[ "MIT" ]
null
null
null
"""Model parse test. - Author: Jongkuk Lim - Contact: lim.jeikei@gmail.com """ import os import torch from src.model import Model class TestModelParser: """Test model parser.""" # pylint: disable=no-self-use INPUT = torch.rand(1, 3, 32, 32) def test_show_case(self): """Test show case model.""" model = Model(os.path.join("model_configs", "show_case.yaml")) assert model(TestModelParser.INPUT).shape == torch.Size([1, 10]) def test_vgg(self): """Test vgg model.""" model = Model(os.path.join("model_configs", "vgg.yaml")) assert model(TestModelParser.INPUT).shape == torch.Size([1, 10]) def test_example(self): """Test example model.""" model = Model(os.path.join("model_configs", "example.yaml")) assert model(TestModelParser.INPUT).shape == torch.Size([1, 10])
24.857143
72
0.628736
4a07a0b559147597e70c9cf3ef586d755358221f
1,763
py
Python
algorithm/neural-network/tfXOR.py
mk43/machine-learning
1ca1baf797fe6f593a88ad4e0d7ac7e5c24ce139
[ "Apache-2.0" ]
6
2018-02-22T00:27:44.000Z
2019-11-21T18:12:48.000Z
algorithm/neural-network/tfXOR.py
mk43/machine-learning
1ca1baf797fe6f593a88ad4e0d7ac7e5c24ce139
[ "Apache-2.0" ]
null
null
null
algorithm/neural-network/tfXOR.py
mk43/machine-learning
1ca1baf797fe6f593a88ad4e0d7ac7e5c24ce139
[ "Apache-2.0" ]
4
2018-02-19T05:59:23.000Z
2020-04-08T08:53:02.000Z
# coding: utf-8 import numpy as np import tensorflow as tf def sigmoid(x): return 1 / (1 + np.power(np.e, -2 * (x))) def add_layer(inputs, in_size, out_size, activation_function=None, ): Weights = tf.Variable(tf.random_normal([in_size, out_size])) biases = tf.Variable(tf.zeros([1, out_size]) + 0.1) Wx_plus_b = tf.matmul(inputs, Weights) + biases if activation_function is None: outputs = Wx_plus_b else: outputs = activation_function(Wx_plus_b) return outputs if __name__ == "__main__": x1 = np.asarray([0, 0, 1, 1]) x2 = np.asarray([0, 1, 0, 1]) X = np.row_stack((x1, x2)) y = np.asarray([0, 1, 1, 0]).reshape(1, 4) data_X = tf.placeholder(tf.float32, [None, 2]) data_y = tf.placeholder(tf.float32, [None, 1]) layer_one = add_layer(data_X, 2, 2, activation_function=sigmoid) prediction = add_layer(layer_one, 2, 1, activation_function=sigmoid) # layer_one = add_layer(data_X, 2, 2, activation_function=tf.nn.sigmoid) # prediction = add_layer(layer_one, 2, 1, activation_function=tf.nn.sigmoid) loss = tf.reduce_mean(tf.reduce_sum(- data_y * tf.log(prediction) - (1 - data_y) * tf.log(1 - prediction))) train = tf.train.GradientDescentOptimizer(0.1).minimize(loss) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(4000): sess.run(train, feed_dict={data_X: X.T, data_y: y.T}) print(sess.run(prediction, feed_dict={data_X: X.T, data_y: y.T})) # output: # [[0.00200064] # [0.9985947 ] # [0.9985983 ] # [0.00144795]] # -------------- # [[0.01765717] # [0.98598236] # [0.98598194] # [0.0207849 ]] # -------------- # [[0.00104381] # [0.9991435 ] # [0.49951136] # [0.5003463 ]]
29.383333
111
0.628474
4a07a0dd83ae4cb05580d85663a00fc7582bdeaa
10,505
py
Python
utils/cmpcodesize/cmpcodesize/compare.py
francisvm/swift
15e209ea2fde679ee78438d4ba949144acb7fee4
[ "Apache-2.0" ]
2
2016-03-05T00:19:14.000Z
2018-09-07T19:34:56.000Z
utils/cmpcodesize/cmpcodesize/compare.py
francisvm/swift
15e209ea2fde679ee78438d4ba949144acb7fee4
[ "Apache-2.0" ]
153
2018-01-21T15:24:47.000Z
2018-09-13T12:46:16.000Z
utils/cmpcodesize/cmpcodesize/compare.py
francisvm/swift
15e209ea2fde679ee78438d4ba949144acb7fee4
[ "Apache-2.0" ]
11
2017-12-13T08:08:15.000Z
2019-06-18T14:27:32.000Z
# ====--- compare.py - Compare built products' sizes -*- coding: utf-8 -*-===// # # This source file is part of the Swift.org open source project # # Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors # Licensed under Apache License v2.0 with Runtime Library Exception # # See https://swift.org/LICENSE.txt for license information # See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors from __future__ import print_function import collections import os import re import subprocess from operator import itemgetter categories = [ # Cpp ["CPP", re.compile('^(__Z|_+swift)')], # Objective-C ["ObjC", re.compile('^[+-]\[')], # Swift ["Partial Apply", re.compile('^__(TPA|T0.*T[aA]$)')], ["Protocol Witness", re.compile('^__(TTW|T0.*TW$)')], ["Value Witness", re.compile('^__(Tw|T0.*w..$)')], ["Type Metadata", re.compile('^__(TM|T0.*(N|M.)$)')], # Function signature specialization of a generic specialization. ["FuncSigGen Spec", re.compile( '^__(TTSf.*__TTSg|T0.*T[gGpP]q?[0-9].*Tfq?[0-9])')], ["Generic Spec", re.compile('^__(TTSg|T0.*T[gG]q?[0-9])')], ["Partial Spec", re.compile('^__(T0.*T[pP]q?[0-9])')], ["FuncSig Spec", re.compile('^__(TTSf|T0.*Tfq?[0-9])')], ["Generic Function", re.compile( '__(T[^0].*q(x|d?[0-9]*_)|T0.*q(z|d?[0-9]*_))')], ["Static Func", re.compile('^__(TZF|T0.*FZ)')], ["Swift @objc Func", re.compile('^__(TTo|T0.*To$)')], ["Accessor", re.compile('^__(TW[atTlI]|T0.*W[atTlI]$)')], ["Getter/Setter", re.compile('^__(T[Fvi][gsmwWl]|T0.*f[gGsmwWal]$)')], ["Swift Function", re.compile('^__(TF|T0.*(F|f.|f[AuU][0-9]*_)$)')], ["Unknown", re.compile('')] ] def add_function(sizes, function, start_addr, end_addr, group_by_prefix): if not function or start_addr is None or end_addr is None: return size = end_addr - start_addr if group_by_prefix: if function.endswith('_merged'): function = function[:-7] for cat in categories: cat_name = cat[0] pattern = cat[1] if pattern.match(function): sizes[cat_name] += size return assert False, "function name not matching any pattern" else: sizes[function] += size def flatten(*args): for x in args: if hasattr(x, '__iter__'): for y in flatten(*x): yield y else: yield x def read_sizes(sizes, file_name, function_details, group_by_prefix): # Check if multiple architectures are supported by the object file. # Prefer arm64 if available. architectures = subprocess.check_output( ["otool", "-V", "-f", file_name]).split("\n") arch = None arch_pattern = re.compile('architecture ([\S]+)') for architecture in architectures: arch_match = arch_pattern.match(architecture) if arch_match: if arch is None: arch = arch_match.group(1) if "arm64" in arch: arch = "arm64" if arch is not None: arch_params = ["-arch", arch] else: arch_params = [] if function_details: content = subprocess.check_output( flatten([ "otool", arch_params, "-l", "-v", "-t", file_name] )).split("\n") content += subprocess.check_output(flatten( ["otool", arch_params, "-v", "-s", "__TEXT", "__textcoal_nt", file_name])).split("\n") else: content = subprocess.check_output( flatten(["otool", arch_params, "-l", file_name])).split("\n") sect_name = None curr_func = None start_addr = None end_addr = None section_pattern = re.compile(' +sectname ([\S]+)') size_pattern = re.compile(' +size ([\da-fx]+)') asmline_pattern = re.compile('^([0-9a-fA-F]+)\s') label_pattern = re.compile('^((\-*\[[^\]]*\])|[^\/\s]+):$') for line in content: asmline_match = asmline_pattern.match(line) if asmline_match: addr = int(asmline_match.group(1), 16) if start_addr is None: start_addr = addr end_addr = addr elif line == "Section": sect_name = None else: label_match = label_pattern.match(line) size_match = size_pattern.match(line) section_match = section_pattern.match(line) if label_match: func_name = label_match.group(1) add_function(sizes, curr_func, start_addr, end_addr, group_by_prefix) curr_func = func_name start_addr = None end_addr = None elif size_match and sect_name and group_by_prefix: size = int(size_match.group(1), 16) sizes[sect_name] += size elif section_match: sect_name = section_match.group(1) if sect_name == "__textcoal_nt": sect_name = "__text" add_function(sizes, curr_func, start_addr, end_addr, group_by_prefix) def compare_sizes(old_sizes, new_sizes, name_key, title, total_size_key=""): old_size = old_sizes[name_key] new_size = new_sizes[name_key] if total_size_key: old_total_size = old_sizes[total_size_key] new_total_size = new_sizes[total_size_key] if old_size is not None and new_size is not None: if old_size != 0: perc = "%.1f%%" % ( (1.0 - float(new_size) / float(old_size)) * 100.0) else: perc = "- " if total_size_key: print("%-26s%16s: %8d (%2d%%) %8d (%2d%%) %7s" % (title, name_key, old_size, old_size * 100.0 / old_total_size, new_size, new_size * 100.0 / new_total_size, perc)) else: print("%-26s%16s: %14d %14d %7s" % (title, name_key, old_size, new_size, perc)) def compare_sizes_of_file(old_files, new_files, all_sections, list_categories): old_sizes = collections.defaultdict(int) new_sizes = collections.defaultdict(int) for old_file in old_files: read_sizes(old_sizes, old_file, list_categories, True) for new_file in new_files: read_sizes(new_sizes, new_file, list_categories, True) if len(old_files) == 1 and len(new_files) == 1: old_base = os.path.basename(old_files[0]) new_base = os.path.basename(new_files[0]) title = old_base if old_base != new_base: title += "-" + new_base else: title = "old-new" compare_sizes(old_sizes, new_sizes, "__text", title, "") if list_categories: for cat in categories: cat_name = cat[0] compare_sizes(old_sizes, new_sizes, cat_name, "", "__text") if all_sections: section_title = " section" compare_sizes(old_sizes, new_sizes, "__textcoal_nt", section_title) compare_sizes(old_sizes, new_sizes, "__stubs", section_title) compare_sizes(old_sizes, new_sizes, "__const", section_title) compare_sizes(old_sizes, new_sizes, "__cstring", section_title) compare_sizes(old_sizes, new_sizes, "__objc_methname", section_title) compare_sizes(old_sizes, new_sizes, "__const", section_title) compare_sizes(old_sizes, new_sizes, "__objc_const", section_title) compare_sizes(old_sizes, new_sizes, "__data", section_title) compare_sizes(old_sizes, new_sizes, "__swift1_proto", section_title) compare_sizes(old_sizes, new_sizes, "__common", section_title) compare_sizes(old_sizes, new_sizes, "__bss", section_title) def list_function_sizes(size_array): for pair in sorted(size_array, key=itemgetter(1)): name = pair[0] size = pair[1] yield "%8d %s" % (size, name) def compare_function_sizes(old_files, new_files): old_sizes = collections.defaultdict(int) new_sizes = collections.defaultdict(int) for name in old_files: read_sizes(old_sizes, name, True, False) for name in new_files: read_sizes(new_sizes, name, True, False) only_in_file1 = [] only_in_file2 = [] in_both = [] only_in_file1size = 0 only_in_file2size = 0 in_both_size = 0 for func, old_size in old_sizes.items(): new_size = new_sizes[func] if new_size != 0: in_both.append((func, old_size, new_size)) else: only_in_file1.append((func, old_size)) only_in_file1size += old_size for func, new_size in new_sizes.items(): old_size = old_sizes[func] if old_size == 0: only_in_file2.append((func, new_size)) only_in_file2size += new_size if only_in_file1: print("Only in old file(s)") print(os.linesep.join(list_function_sizes(only_in_file1))) print("Total size of functions only in old file: {}".format( only_in_file1size)) print() if only_in_file2: print("Only in new files(s)") print(os.linesep.join(list_function_sizes(only_in_file2))) print("Total size of functions only in new file: {}".format( only_in_file2size)) print() if in_both: size_increase = 0 size_decrease = 0 print("%8s %8s %8s" % ("old", "new", "diff")) for triple in sorted( in_both, key=lambda tup: (tup[2] - tup[1], tup[1])): func = triple[0] old_size = triple[1] new_size = triple[2] diff = new_size - old_size if diff > 0: size_increase += diff else: size_decrease -= diff if diff == 0: in_both_size += new_size print("%8d %8d %8d %s" % (old_size, new_size, new_size - old_size, func)) print("Total size of functions " + "with the same size in both files: {}".format(in_both_size)) print("Total size of functions " + "that got smaller: {}".format(size_decrease)) print("Total size of functions " + "that got bigger: {}".format(size_increase)) print("Total size change of functions present " + "in both files: {}".format(size_increase - size_decrease))
35.853242
79
0.582389
4a07a13a0ae2871d96f4550ac24061e5687c86f3
11,243
py
Python
teal/teal.py
bustawin/teal
0c128fce0a1d9992199626bd8447532cce476c18
[ "BSD-3-Clause" ]
null
null
null
teal/teal.py
bustawin/teal
0c128fce0a1d9992199626bd8447532cce476c18
[ "BSD-3-Clause" ]
null
null
null
teal/teal.py
bustawin/teal
0c128fce0a1d9992199626bd8447532cce476c18
[ "BSD-3-Clause" ]
null
null
null
import inspect from typing import Dict, Type import click_spinner import ereuse_utils import flask_cors from anytree import Node from apispec import APISpec from click import option from ereuse_utils import ensure_utf8 from flask import Flask, jsonify from flask.globals import _app_ctx_stack from flask_sqlalchemy import SQLAlchemy from marshmallow import ValidationError from werkzeug.exceptions import HTTPException, UnprocessableEntity from teal.auth import Auth from teal.cli import TealCliRunner from teal.client import Client from teal.config import Config as ConfigClass from teal.db import SchemaSQLAlchemy from teal.json_util import TealJSONEncoder from teal.request import Request from teal.resource import Converters, LowerStrConverter, Resource class Teal(Flask): """ An opinionated REST and JSON first server built on Flask using MongoDB and Marshmallow. """ test_client_class = Client request_class = Request json_encoder = TealJSONEncoder cli_context_settings = {'help_option_names': ('-h', '--help')} test_cli_runner_class = TealCliRunner def __init__(self, config: ConfigClass, db: SQLAlchemy, schema: str = None, import_name=__name__.split('.')[0], static_url_path=None, static_folder='static', static_host=None, host_matching=False, subdomain_matching=False, template_folder='templates', instance_path=None, instance_relative_config=False, root_path=None, use_init_db=True, Auth: Type[Auth] = Auth): """ :param config: :param db: :param schema: A string describing the main PostgreSQL's schema. ``None`` disables this functionality. If you use a factory of apps (for example by using :func:`teal.teal.prefixed_database_factory`) and then set this value differently per each app (as each app has a separate config) you effectively create a `multi-tenant app <https:// news.ycombinator.com/item?id=4268792>`_. Your models by default will be created in this ``SCHEMA``, unless you set something like:: class User(db.Model): __table_args__ = {'schema': 'users'} In which case this will be created in the ``users`` schema. Schemas are interesting over having multiple databases (i.e. using flask-sqlalchemy's data binding) because you can have relationships between them. Note that this only works with PostgreSQL. :param import_name: :param static_url_path: :param static_folder: :param static_host: :param host_matching: :param subdomain_matching: :param template_folder: :param instance_path: :param instance_relative_config: :param root_path: :param Auth: """ self.schema = schema ensure_utf8(self.__class__.__name__) super().__init__(import_name, static_url_path, static_folder, static_host, host_matching, subdomain_matching, template_folder, instance_path, instance_relative_config, root_path) self.config.from_object(config) flask_cors.CORS(self) # Load databases self.auth = Auth() self.url_map.converters[Converters.lower.name] = LowerStrConverter self.load_resources() self.register_error_handler(HTTPException, self._handle_standard_error) self.register_error_handler(ValidationError, self._handle_validation_error) self.db = db db.init_app(self) if use_init_db: self.cli.command('init-db', context_settings=self.cli_context_settings)(self.init_db) self.spec = None # type: APISpec self.apidocs() # noinspection PyAttributeOutsideInit def load_resources(self): self.resources = {} # type: Dict[str, Resource] """ The resources definitions loaded on this App, referenced by their type name. """ self.tree = {} # type: Dict[str, Node] """ A tree representing the hierarchy of the instances of ResourceDefinitions. ResourceDefinitions use these nodes to traverse their hierarchy. Do not use the normal python class hierarchy as it is global, thus unreliable if you run different apps with different schemas (for example, an extension that is only added on the third app adds a new type of user). """ for ResourceDef in self.config['RESOURCE_DEFINITIONS']: resource_def = ResourceDef(self) # type: Resource self.register_blueprint(resource_def) if resource_def.cli_commands: @self.cli.group(resource_def.cli_name, context_settings=self.cli_context_settings, short_help='{} management.'.format(resource_def.type)) def dummy_group(): pass for cli_command, *args in resource_def.cli_commands: # Register CLI commands # todo cli commands with multiple arguments end-up reversed # when teal has been executed multiple times (ex. testing) # see _param_memo func in click package dummy_group.command(*args)(cli_command) # todo should we use resource_def.name instead of type? # are we going to have collisions? (2 resource_def -> 1 schema) self.resources[resource_def.type] = resource_def self.tree[resource_def.type] = Node(resource_def.type) # Link tree nodes between them for _type, node in self.tree.items(): resource_def = self.resources[_type] _, Parent, *superclasses = inspect.getmro(resource_def.__class__) if Parent is not Resource: node.parent = self.tree[Parent.type] @staticmethod def _handle_standard_error(e: HTTPException): """ Handles HTTPExceptions by transforming them to JSON. """ try: response = jsonify(e) response.status_code = e.code except (AttributeError, TypeError) as e: code = getattr(e, 'code', 500) response = jsonify({ 'message': str(e), 'code': code, 'type': e.__class__.__name__ }) response.status_code = code return response @staticmethod def _handle_validation_error(e: ValidationError): data = { 'message': e.messages, 'code': UnprocessableEntity.code, 'type': e.__class__.__name__ } response = jsonify(data) response.status_code = UnprocessableEntity.code return response @option('--erase/--no-erase', default=False, help='Delete all contents from the database (including common schemas)?') @option('--exclude-schema', default=None, help='Schema to exclude creation (and deletion if --erase is set). ' 'Required the SchemaSQLAlchemy.') def init_db(self, erase: bool = False, exclude_schema=None): """ Initializes a database from scratch, creating tables and needed resources. Note that this does not create the database per se. If executing this directly, remember to use an app_context. Resources can hook functions that will be called when this method executes, by subclassing :meth:`teal.resource. Resource.load_resource`. """ assert _app_ctx_stack.top, 'Use an app context.' print('Initializing database...'.ljust(30), end='') with click_spinner.spinner(): if erase: if exclude_schema: # Using then a schema teal sqlalchemy assert isinstance(self.db, SchemaSQLAlchemy) self.db.drop_schema() else: # using regular flask sqlalchemy self.db.drop_all() self._init_db(exclude_schema) self._init_resources() self.db.session.commit() print('done.') def _init_db(self, exclude_schema=None) -> bool: """Where the database is initialized. You can override this. :return: A flag stating if the database has been created (can be False in case check is True and the schema already exists). """ if exclude_schema: # Using then a schema teal sqlalchemy assert isinstance(self.db, SchemaSQLAlchemy) self.db.create_all(exclude_schema=exclude_schema) else: # using regular flask sqlalchemy self.db.create_all() return True def _init_resources(self, **kw): for resource in self.resources.values(): resource.init_db(self.db, **kw) def apidocs(self): """Apidocs configuration and generation.""" self.spec = APISpec( plugins=( 'apispec.ext.flask', 'apispec.ext.marshmallow', ), **self.config.get_namespace('API_DOC_CONFIG_') ) for name, resource in self.resources.items(): if resource.SCHEMA: self.spec.definition(name, schema=resource.SCHEMA, extra_fields=self.config.get_namespace('API_DOC_CLASS_')) self.add_url_rule('/apidocs', view_func=self.apidocs_endpoint) def apidocs_endpoint(self): """An endpoint that prints a JSON OpenApi 2.0 specification.""" if not getattr(self, '_apidocs', None): # We are forced to to this under a request context for path, view_func in self.view_functions.items(): if path != 'static': self.spec.add_path(view=view_func) self._apidocs = self.spec.to_dict() return jsonify(self._apidocs) class DumpeableHTTPException(ereuse_utils.Dumpeable): """Exceptions that inherit this class will be able to dump to dicts and JSONs. """ def dump(self): # todo this is heavily ad-hoc and should be more generic value = super().dump() value['type'] = self.__class__.__name__ value['code'] = self.code value.pop('exc', None) value.pop('response', None) if 'data' in value: value['fields'] = value['data']['messages'] del value['data'] if 'message' not in value: value['message'] = value.pop('description', str(self)) return value # Add dump capacity to Werkzeug's HTTPExceptions HTTPException.__bases__ = HTTPException.__bases__ + (DumpeableHTTPException,)
39.449123
97
0.60491
4a07a162ff79e4b57d834f09cafad42f8bf238d0
174
py
Python
guiauto/gui/base_test.py
saasaa831/guidesktop
68abe5e896c4d29cf12898abd3b27c60553a3948
[ "Apache-2.0" ]
null
null
null
guiauto/gui/base_test.py
saasaa831/guidesktop
68abe5e896c4d29cf12898abd3b27c60553a3948
[ "Apache-2.0" ]
null
null
null
guiauto/gui/base_test.py
saasaa831/guidesktop
68abe5e896c4d29cf12898abd3b27c60553a3948
[ "Apache-2.0" ]
null
null
null
class BaseTest: driver = None general = None def __init__(self, driver, parent_handle): self.driver = driver self.parent_handle = parent_handle
19.333333
46
0.655172
4a07a183e39459e6c943a88ee76ed6bd5dc8a53a
26,879
py
Python
deploy_config_generator/output/__init__.py
ApplauseOSS/deploy-config-generator
04674ba02f5a797e25c682aa9ff755989741a0c1
[ "MIT" ]
3
2019-04-05T14:16:17.000Z
2021-06-25T20:53:03.000Z
deploy_config_generator/output/__init__.py
ApplauseOSS/deploy-config-generator
04674ba02f5a797e25c682aa9ff755989741a0c1
[ "MIT" ]
6
2019-04-04T20:20:16.000Z
2021-09-27T21:04:39.000Z
deploy_config_generator/output/__init__.py
ApplauseOSS/deploy-config-generator
04674ba02f5a797e25c682aa9ff755989741a0c1
[ "MIT" ]
null
null
null
import copy import inspect import os.path import re import six from deploy_config_generator.site_config import SiteConfig from deploy_config_generator.display import Display from deploy_config_generator.template import Template from deploy_config_generator.errors import DeployConfigGenerationError, DeployConfigError, ConfigError from deploy_config_generator.utils import show_traceback class OutputPluginBase(object): ''' Base class for output plugins ''' _vars = None _output_dir = None _display = None _section = None _plugin_config = None _fields = None _config_version = None COMMON_DEFAULT_CONFIG = dict( enabled=True, ) PRIORITY = 1 def __init__(self, varset, output_dir, config_version): self._vars = varset self._output_dir = output_dir self._display = Display() self._template = Template() self._site_config = SiteConfig() self._config_version = config_version self.build_config() # Comparison functions for sorting plugins # Sort first by priority and then by name (for consistency) def __lt__(self, other): return (self.PRIORITY < other.PRIORITY or (self.PRIORITY == other.PRIORITY and self.NAME < other.NAME)) def __gt__(self, other): return (self.PRIORITY > other.PRIORITY or (self.PRIORITY == other.PRIORITY and self.NAME > other.NAME)) def __le__(self, other): return (self.PRIORITY <= other.PRIORITY or (self.PRIORITY == other.PRIORITY and self.NAME <= other.NAME)) def __ge__(self, other): return (self.PRIORITY >= other.PRIORITY or (self.PRIORITY == other.PRIORITY and self.NAME >= other.NAME)) def __eq__(self, other): return (self.PRIORITY == other.PRIORITY or (self.PRIORITY == other.PRIORITY and self.NAME == other.NAME)) def __ne__(self, other): return (self.PRIORITY != other.PRIORITY or (self.PRIORITY == other.PRIORITY and self.NAME != other.NAME)) def build_config(self): ''' Build the plugin config ''' self._plugin_config = self.COMMON_DEFAULT_CONFIG.copy() self._plugin_config.update(self.DEFAULT_CONFIG) # Helper var to tidy up the code self._fields = copy.deepcopy(self._plugin_config['fields']) # Convert field definitions into PluginField objects for section in self._fields: section_fields = self._fields[section] for k, v in section_fields.items(): section_fields[k] = PluginField(k, v, self._config_version, self._template) self.build_config_site() def build_config_site(self): ''' Merge in plugin config values from site config This will also do a deep merge of deeply nested field definitions ''' if self.NAME in self._site_config.plugins: for k, v in self._site_config['plugins'][self.NAME].items(): if k == 'fields': for section in v: for field_name, field in v[section].items(): # Create section if it doesn't exist if section not in self._fields: self._fields[section] = {} # Update existing field config or create new if field_name in self._fields[section]: self._fields[section][field_name].update_config(field) else: self._fields[section][field_name] = PluginField(field_name, field, self._config_version, self._template) else: if k in self._plugin_config: if isinstance(v, dict): self._plugin_config[k] = v.copy() elif isinstance(v, list): self._plugin_config[k] = v[:] else: self._plugin_config[k] = v else: raise ConfigError('unrecognized config option: %s' % k) def set_section(self, section): ''' Sets the active section of the deploy config This is used to figure out which set of fields to process ''' self._section = section def has_field(self, field): ''' Check if a field exists in the current section for this plugin ''' if self._section in self._fields and field in self._fields[self._section]: if self._fields[self._section][field].is_valid_for_config_version(): return True return False def get_required_fields(self): ''' Return a list of fields in the current section with required=True ''' ret = [] if self._section in self._fields: for k, v in self._fields[self._section].items(): if v.required and v.default is None and v.is_valid_for_config_version(): ret.append(k) return ret def is_field_locked(self, field): ''' Check if a field has been marked as 'locked' (cannot be overridden by user) ''' if self._section in self._fields and field in self._fields[self._section]: if self._fields[self._section][field].locked: return True return False def is_needed(self, app): ''' Determine whether this plugin is needed based on the provided deploy config ''' # We aren't needed if we're marked as disabled (enabled: False) if self._plugin_config.get('enabled', True) is False: return False # We aren't needed if we have no fields for the current section if self._section not in self._fields: return False # We are needed if we're the configured default plugin if self._site_config.default_output == self.NAME: return True # Check if any of our required top-level fields are provided for field in self.get_required_fields(): if field in app: return True # If nothing above matched, then we're probably not needed return False def merge_with_field_defaults(self, app): ''' Merge user-provided values with configured field defaults ''' ret = {} # Apply defaults/transforms for field, value in self._fields[self._section].items(): ret[field] = value.apply_default(app.get(field, None)) ret[field] = value.apply_transform(ret.get(field, None)) return ret def validate_fields(self, app): ''' Validate the provided app config against plugin field definitions ''' # Check that all required top-level fields are provided req_fields = self.get_required_fields() for field in req_fields: if field not in app: raise DeployConfigError("required field '%s' not defined" % field) # Check field/subfield types, required, and if field is locked unmatched = [] for field, value in app.items(): if self.has_field(field): if self.is_field_locked(field): raise DeployConfigError("the field '%s' has been locked by the plugin config and cannot be overridden" % field) field_unmatched = self._fields[self._section][field].validate(value) unmatched.extend(field_unmatched) else: unmatched.append(field) return unmatched def build_app_vars(self, index, app, path=''): # Build vars for template app_vars = { 'PLUGIN_NAME': self.NAME, 'APP_INDEX': index, # App config 'APP': self.merge_with_field_defaults(app), # Parsed vars 'VARS': dict(self._vars), } return app_vars def pre_process(self, config): pass def generate(self, config): ''' Write out the generated config to disk ''' try: self.pre_process(config) for section in config: if section in self._fields: self.set_section(section) for idx, app in enumerate(config[section]): # We want a 1-based index for the output files index = idx + 1 if self.is_needed(app): # Build vars for template app_vars = self.build_app_vars(index, app) # Check conditionals for field, value in self._fields[self._section].items(): app_vars['APP'][field] = value.check_conditionals(app_vars['APP'].get(field, None), app_vars) # Generate output output = self.generate_output(app_vars) path_suffix = None if isinstance(output, (tuple, list)): output, path_suffix = output if output is None: continue path = os.path.join(self._output_dir, '%s-%03d%s%s' % (self.NAME, index, ('-%s' % path_suffix if path_suffix else ''), self.FILE_EXT)) self._display.v('Writing output file %s' % path) with open(path, 'w') as f: f.write(output) except Exception as e: show_traceback(self._display.get_verbosity()) raise DeployConfigGenerationError(str(e)) def generate_output(self, app_vars): ''' Generate output content By default, this renders the Jinja template defined in the 'TEMPLATE' class var. However, it can be overridden by an output plugin to provide a custom method for generating the output. ''' output = self._template.render_template(inspect.cleandoc(self.TEMPLATE), app_vars) return output class PluginField(object): ''' Class representing a field from a deploy config that's supported by an output plugin ''' _name = None _config = None _parent = None _config_version = None BASE_CONFIG = { # Whether field is required 'required': False, # Default value 'default': None, # Whether field is locked (value cannot be provided by user) 'locked': False, # Expected type for field 'type': None, # Transformation (for strings) # This should be a dict containing one of the following keys: # * prefix - prefix to add to value # * suffix - suffix to add to value 'transform': None, # Expected type for sub-items (for lists and free-form dicts) 'subtype': None, # How to combine defaults # * None - no combining, user value replaces default # * 'append' - default value is included at end of list # * 'prepend' - default value is included at beginning of list # * 'merge' - user value is merged with default value (for lists/dicts) 'default_action': None, # Key to use for merging (for lists of dicts) 'merge_key': None, # Minimum/maximum config version that field is valid for 'min_version': None, 'max_version': None, # Field definitions (for dicts) 'fields': None, # Whether the field supports a conditional (for dicts) 'conditional': False, # Field name to use for conditional 'conditional_key': 'condition', # Loop var (for use in conditionals) 'loop_var': 'item', # Validation regex pattern (for strings) 'validation_pattern': None, } def __init__(self, name, config, config_version, template, parent=None): self._name = name self._parent = parent self._config_version = config_version self._template = template self._config = self.BASE_CONFIG.copy() if config is not None: self._config.update(copy.deepcopy(config)) self.convert_fields() def __getattr__(self, key, default=None): return self._config.get(key, default) __getitem__ = __getattr__ get = __getattr__ def __setattr__(self, key, value): if self._config is not None and key in self._config: self._config[key] = value else: super(PluginField, self).__setattr__(key, value) def __contains__(self, key): return (key in self._config) def __str__(self): return '<PluginField name=%s config=%s>' % (self._name, self._config) __repr__ = __str__ def is_valid_for_config_version(self): ''' Compare min/max version for field to config version ''' if self._config_version is None: return True if self._config['min_version'] is not None: if float(self._config_version) < float(self._config['min_version']): return False if self._config['max_version'] is not None: if float(self._config_version) > float(self._config['max_version']): return False return True def convert_fields(self): ''' Replace items in 'fields' dict with PluginField objects ''' if self._config['fields'] is not None: for k, v in self._config['fields'].items(): self._config['fields'][k] = PluginField(k, v, self._config_version, self._template, parent=self) def update_config(self, config): ''' Deep merge field attributes from site config with current config ''' for k, v in config.items(): if k == 'fields': for field_name, field in v.items(): # Update existing field config or create new if self.fields is not None and field_name in self.fields: self.fields[field_name].update_config(field) else: if self.fields is None: self.fields = {} self.fields[field_name] = PluginField(field_name, field, self._config_version, self._template, parent=self) else: if isinstance(v, dict): if self._config[k] is None: self._config[k] = {} self._config[k].update(v) elif isinstance(v, list): self._config[k] = v[:] else: self._config[k] = v def get_full_name(self): ''' Construct full name of field from parent(s) This is used when generating exceptions ''' field_name = self._name parent = self._parent while parent is not None: field_name = '%s.%s' % (parent._name, field_name) parent = parent._parent return field_name def convert_bool(self, value): if value in ('true', 'True', 'yes', 'on'): return True if value in ('false', 'False', 'no', 'off'): return False return None def validate_check_type(self, value, expected_type=None): ''' Determine the type of the passed value ''' if isinstance(value, list): return 'list' if isinstance(value, dict): return 'dict' if isinstance(value, bool): return 'bool' if isinstance(value, six.integer_types): return 'int' if isinstance(value, float): return 'float' if isinstance(value, six.string_types): # Values from variables always come in as a string, so we need special # logic to determine their actual type based on the field type try: if expected_type == 'float' and float(value) is not None: return 'float' if expected_type == 'int' and int(value) is not None: return 'int' if expected_type == 'bool' and self.convert_bool(value) is not None: return 'bool' except Exception: pass return 'str' raise DeployConfigError('unsupported type: %s' % type(value)) def validate(self, value, use_subtype=False): ''' Validate passed value against field config ''' unmatched = [] if value is None: return unmatched field_type = self.type if use_subtype: # Use the field subtype field_type = self.subtype # Nothing to validate if no field type is specified if field_type is None: return unmatched value_type = self.validate_check_type(value, field_type) if value_type != field_type: # TODO: replace this with the ability to specify multiple types for a field # Hack to allow an int value to satisfy a float if field_type == 'float' and value_type == 'int': pass else: raise DeployConfigError("value for field '%s' is wrong type, expected '%s' and got: %s" % (self.get_full_name(), field_type, value_type)) if field_type == 'list' and self.subtype is not None: # Validate each list item separately if a field subtype is specified for value_item in value: # Use field's subtype for list items item_unmatched = self.validate(value_item, use_subtype=True) unmatched.extend(item_unmatched) elif field_type == 'dict': # Recursively validate sub-field values if self.fields is not None: for k, v in value.items(): if k not in self.fields or not self.fields[k].is_valid_for_config_version(): unmatched.append('%s.%s' % (self.get_full_name(), k)) continue field_unmatched = self.fields[k].validate(v) unmatched.extend(field_unmatched) # Check for required and locked sub-fields for tmp_field_name, tmp_field in self.fields.items(): if tmp_field.required and value.get(tmp_field_name, None) is None and tmp_field.default is None: raise DeployConfigError("field '%s' is required, but no value provided" % tmp_field.get_full_name()) if tmp_field.locked and value.get(tmp_field_name, None) is not None: raise DeployConfigError("field '%s' is locked, but a value was provided" % tmp_field.get_full_name()) # Validate free-form value type elif self.subtype is not None and not use_subtype: for value_item in value.values(): item_unmatched = self.validate(value_item, use_subtype=True) unmatched.extend(item_unmatched) elif field_type == 'str': if self.validation_pattern is not None: if not re.match(self.validation_pattern, value): raise DeployConfigError("value for field '%s' did not match validation pattern: %s" % (self.get_full_name(), self.validation_pattern)) return unmatched def apply_transform(self, value, use_subtype=False): ''' Apply transformations to string values ''' if value is None: return value field_type = self.type if use_subtype: field_type = self.subtype value_type = self.validate_check_type(value) ret = None if value_type == 'list': # Apply transformations to all items in the list ret = [] for value_item in value: ret.append(self.apply_transform(value_item, use_subtype=True)) elif value_type == 'dict': ret = {} if self.fields is not None: # Recursively apply transformations to sub-fields for field in self.fields: if field in value: ret[field] = self.fields[field].apply_transform(value[field]) else: ret = value elif value_type == 'str': # Convert types for values that came in from a variable (which always # produces a string) if field_type == 'bool': # Convert values to boolean if they're expected to be boolean ret = self.convert_bool(value) elif field_type == 'float': ret = float(value) elif field_type == 'int': ret = int(value) elif isinstance(self.transform, dict): if 'prefix' in self.transform: ret = self.transform['prefix'] + value elif 'suffix' in self.transform: ret = value + self.transform['suffix'] else: ret = value else: if field_type == 'float' and value_type == 'int': # An int can satisfy a 'float' field, but we want to make sure # that it's a float for output ret = float(value) else: ret = value return ret def apply_default_list(self, value, field_type): ''' Apply default values for a list (helper function) ''' ret = [] if self.subtype is not None: if value: for value_item in value: new_val = self.apply_default(value_item, use_subtype=True) if new_val is not None: ret.append(new_val) else: if value: ret = value[:] if self.default is not None: def_val = self.default if not isinstance(def_val, list): def_val = [def_val] # User values are merged with default values if self.default_action == 'merge': # Create a copy of the default values, since we'll be modifying it def_val = def_val[:] # Iterate over user values and compare against default values for tmp_value in ret: for idx, tmp_def_val in enumerate(def_val): if self.subtype == 'dict' and self.merge_key is not None: # Delete default value if the merge key value matches the current value if tmp_value.get(self.merge_key, "MERGE_KEY_USER") == tmp_def_val.get(self.merge_key, "MERGE_KEY_DEFAULT"): del def_val[idx] break else: # Delete default value if it matches the current value if tmp_value == tmp_def_val: del def_val[idx] break # Prepend remaining defaults to user values ret = def_val + ret # User values go after default value elif self.default_action == 'prepend': ret = def_val + ret # User values go before default value elif self.default_action == 'append': ret = ret + def_val elif not ret: ret = self.default return ret def apply_default(self, value, use_subtype=False): ''' Apply default values from the field config ''' ret = None field_type = self.type if use_subtype: # Use the field subtype field_type = self.subtype if field_type == 'list': ret = self.apply_default_list(value, field_type) elif field_type == 'dict': # Recursively apply defaults for sub-fields ret = {} if self.fields is not None: if value is None: value = {} for field in self.fields: ret[field] = self.fields[field].apply_default(value.get(field, None)) else: # Don't apply defaults for subtype if use_subtype: ret = value else: if value is None: ret = self.default else: if self.default_action == 'merge': ret = self.default.copy() ret.update(value) else: ret = value.copy() else: # Use default if no value was provided if value is None: ret = self.default else: ret = value return ret def check_conditionals(self, value, app_vars, use_subtype=False): ''' Check conditionals and filter value ''' ret = None field_type = self.type if use_subtype: # Use the field subtype field_type = self.subtype if field_type == 'list': ret = [] for idx, item in enumerate(value): if self.loop_var: # Add loop item and index vars app_vars = app_vars.copy() app_vars.update({self.loop_var: item, ('%s_index' % self.loop_var): idx}) tmp_value = self.check_conditionals(item, app_vars, use_subtype=True) # Don't add item to returned data if its condition evaluated to False if tmp_value is not None: ret.append(tmp_value) elif field_type == 'dict': ret = {} if self.fields is not None: if value is None: value = {} for field in self.fields: ret[field] = self.fields[field].check_conditionals(value.get(field, None), app_vars) else: ret = value if self.conditional and self.conditional_key in ret: if ret[self.conditional_key] is not None: if not self._template.evaluate_condition(ret[self.conditional_key], app_vars): return None # Remove the conditional key from the returned data del ret[self.conditional_key] else: ret = value return ret
40.602719
162
0.55322
4a07a201150c9c6db78a02308f6259bc3d918d6a
23,738
py
Python
official/vision/beta/projects/movinet/modeling/movinet_layers_test.py
hjkim-haga/TF-OD-API
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
[ "Apache-2.0" ]
null
null
null
official/vision/beta/projects/movinet/modeling/movinet_layers_test.py
hjkim-haga/TF-OD-API
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
[ "Apache-2.0" ]
null
null
null
official/vision/beta/projects/movinet/modeling/movinet_layers_test.py
hjkim-haga/TF-OD-API
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
[ "Apache-2.0" ]
null
null
null
<<<<<<< HEAD # Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for movinet_layers.py.""" from absl.testing import parameterized import tensorflow as tf from official.vision.beta.modeling.layers import nn_layers from official.vision.beta.projects.movinet.modeling import movinet_layers class MovinetLayersTest(parameterized.TestCase, tf.test.TestCase): def test_squeeze3d(self): squeeze = movinet_layers.Squeeze3D() inputs = tf.ones([5, 1, 1, 1, 3]) predicted = squeeze(inputs) expected = tf.ones([5, 3]) self.assertEqual(predicted.shape, expected.shape) self.assertAllEqual(predicted, expected) def test_mobile_conv2d(self): conv2d = movinet_layers.MobileConv2D( filters=3, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_initializer='ones', use_bias=False, use_depthwise=False, use_temporal=False, use_buffered_input=True, ) inputs = tf.ones([1, 2, 2, 2, 3]) predicted = conv2d(inputs) expected = tf.constant( [[[[[12., 12., 12.], [12., 12., 12.]], [[12., 12., 12.], [12., 12., 12.]]], [[[12., 12., 12.], [12., 12., 12.]], [[12., 12., 12.], [12., 12., 12.]]]]]) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) def test_mobile_conv2d_temporal(self): conv2d = movinet_layers.MobileConv2D( filters=3, kernel_size=(3, 1), strides=(1, 1), padding='causal', kernel_initializer='ones', use_bias=False, use_depthwise=True, use_temporal=True, use_buffered_input=True, ) inputs = tf.ones([1, 2, 2, 1, 3]) paddings = [[0, 0], [2, 0], [0, 0], [0, 0], [0, 0]] padded_inputs = tf.pad(inputs, paddings) predicted = conv2d(padded_inputs) expected = tf.constant( [[[[[1., 1., 1.]], [[1., 1., 1.]]], [[[2., 2., 2.]], [[2., 2., 2.]]]]]) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) def test_stream_buffer(self): conv3d_stream = nn_layers.Conv3D( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), padding='causal', kernel_initializer='ones', use_bias=False, use_buffered_input=True, ) buffer = movinet_layers.StreamBuffer(buffer_size=2) conv3d = nn_layers.Conv3D( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), padding='causal', kernel_initializer='ones', use_bias=False, use_buffered_input=False, ) inputs = tf.ones([1, 4, 2, 2, 3]) expected = conv3d(inputs) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = buffer(frame, states=states) x = conv3d_stream(x) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllClose( predicted, [[[[[12., 12., 12.]]], [[[24., 24., 24.]]], [[[36., 36., 36.]]], [[[36., 36., 36.]]]]]) def test_stream_conv_block_2plus1d(self): conv_block = movinet_layers.ConvBlock( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, kernel_initializer='ones', use_bias=False, activation='relu', conv_type='2plus1d', ) stream_conv_block = movinet_layers.StreamConvBlock( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, kernel_initializer='ones', use_bias=False, activation='relu', conv_type='2plus1d', ) inputs = tf.ones([1, 4, 2, 2, 3]) expected = conv_block(inputs) predicted_disabled, _ = stream_conv_block(inputs) self.assertEqual(predicted_disabled.shape, expected.shape) self.assertAllClose(predicted_disabled, expected) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = stream_conv_block(frame, states=states) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllClose( predicted, [[[[[35.9640400, 35.9640400, 35.9640400]]], [[[71.9280700, 71.9280700, 71.9280700]]], [[[107.892105, 107.892105, 107.892105]]], [[[107.892105, 107.892105, 107.892105]]]]]) def test_stream_conv_block_3d_2plus1d(self): conv_block = movinet_layers.ConvBlock( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, kernel_initializer='ones', use_bias=False, activation='relu', conv_type='3d_2plus1d', ) stream_conv_block = movinet_layers.StreamConvBlock( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, kernel_initializer='ones', use_bias=False, activation='relu', conv_type='3d_2plus1d', ) inputs = tf.ones([1, 4, 2, 2, 3]) expected = conv_block(inputs) predicted_disabled, _ = stream_conv_block(inputs) self.assertEqual(predicted_disabled.shape, expected.shape) self.assertAllClose(predicted_disabled, expected) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = stream_conv_block(frame, states=states) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllClose( predicted, [[[[[35.9640400, 35.9640400, 35.9640400]]], [[[71.9280700, 71.9280700, 71.9280700]]], [[[107.892105, 107.892105, 107.892105]]], [[[107.892105, 107.892105, 107.892105]]]]]) def test_stream_conv_block(self): conv_block = movinet_layers.ConvBlock( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, kernel_initializer='ones', use_bias=False, activation='relu', ) stream_conv_block = movinet_layers.StreamConvBlock( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, kernel_initializer='ones', use_bias=False, activation='relu', ) inputs = tf.ones([1, 4, 2, 2, 3]) expected = conv_block(inputs) predicted_disabled, _ = stream_conv_block(inputs) self.assertEqual(predicted_disabled.shape, expected.shape) self.assertAllClose(predicted_disabled, expected) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = stream_conv_block(frame, states=states) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllClose( predicted, [[[[[11.994005, 11.994005, 11.994005]]], [[[23.988010, 23.988010, 23.988010]]], [[[35.982014, 35.982014, 35.982014]]], [[[35.982014, 35.982014, 35.982014]]]]]) def test_stream_squeeze_excitation(self): se = movinet_layers.StreamSqueezeExcitation( 3, causal=True, kernel_initializer='ones') inputs = tf.range(4, dtype=tf.float32) + 1. inputs = tf.reshape(inputs, [1, 4, 1, 1, 1]) inputs = tf.tile(inputs, [1, 1, 2, 1, 3]) expected, _ = se(inputs) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = se(frame, states=states) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected, 1e-5, 1e-5) self.assertAllClose( predicted, [[[[[0.9998109, 0.9998109, 0.9998109]], [[0.9998109, 0.9998109, 0.9998109]]], [[[1.9999969, 1.9999969, 1.9999969]], [[1.9999969, 1.9999969, 1.9999969]]], [[[3., 3., 3.]], [[3., 3., 3.]]], [[[4., 4., 4.]], [[4., 4., 4.]]]]], 1e-5, 1e-5) def test_stream_movinet_block(self): block = movinet_layers.MovinetBlock( out_filters=3, expand_filters=6, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, ) inputs = tf.range(4, dtype=tf.float32) + 1. inputs = tf.reshape(inputs, [1, 4, 1, 1, 1]) inputs = tf.tile(inputs, [1, 1, 2, 1, 3]) expected, _ = block(inputs) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = block(frame, states=states) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) def test_stream_classifier_head(self): head = movinet_layers.Head(project_filters=5) classifier_head = movinet_layers.ClassifierHead( head_filters=10, num_classes=4) inputs = tf.range(4, dtype=tf.float32) + 1. inputs = tf.reshape(inputs, [1, 4, 1, 1, 1]) inputs = tf.tile(inputs, [1, 1, 2, 1, 3]) x, _ = head(inputs) expected = classifier_head(x) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} for frame in frames: x, states = head(frame, states=states) predicted = classifier_head(x) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) if __name__ == '__main__': tf.test.main() ======= # Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for movinet_layers.py.""" from absl.testing import parameterized import tensorflow as tf from official.vision.beta.modeling.layers import nn_layers from official.vision.beta.projects.movinet.modeling import movinet_layers class MovinetLayersTest(parameterized.TestCase, tf.test.TestCase): def test_squeeze3d(self): squeeze = movinet_layers.Squeeze3D() inputs = tf.ones([5, 1, 1, 1, 3]) predicted = squeeze(inputs) expected = tf.ones([5, 3]) self.assertEqual(predicted.shape, expected.shape) self.assertAllEqual(predicted, expected) def test_mobile_conv2d(self): conv2d = movinet_layers.MobileConv2D( filters=3, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_initializer='ones', use_bias=False, use_depthwise=False, use_temporal=False, use_buffered_input=True, ) inputs = tf.ones([1, 2, 2, 2, 3]) predicted = conv2d(inputs) expected = tf.constant( [[[[[12., 12., 12.], [12., 12., 12.]], [[12., 12., 12.], [12., 12., 12.]]], [[[12., 12., 12.], [12., 12., 12.]], [[12., 12., 12.], [12., 12., 12.]]]]]) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) def test_mobile_conv2d_temporal(self): conv2d = movinet_layers.MobileConv2D( filters=3, kernel_size=(3, 1), strides=(1, 1), padding='causal', kernel_initializer='ones', use_bias=False, use_depthwise=True, use_temporal=True, use_buffered_input=True, ) inputs = tf.ones([1, 2, 2, 1, 3]) paddings = [[0, 0], [2, 0], [0, 0], [0, 0], [0, 0]] padded_inputs = tf.pad(inputs, paddings) predicted = conv2d(padded_inputs) expected = tf.constant( [[[[[1., 1., 1.]], [[1., 1., 1.]]], [[[2., 2., 2.]], [[2., 2., 2.]]]]]) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) def test_stream_buffer(self): conv3d_stream = nn_layers.Conv3D( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), padding='causal', kernel_initializer='ones', use_bias=False, use_buffered_input=True, ) buffer = movinet_layers.StreamBuffer(buffer_size=2) conv3d = nn_layers.Conv3D( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), padding='causal', kernel_initializer='ones', use_bias=False, use_buffered_input=False, ) inputs = tf.ones([1, 4, 2, 2, 3]) expected = conv3d(inputs) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = buffer(frame, states=states) x = conv3d_stream(x) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllClose( predicted, [[[[[12., 12., 12.]]], [[[24., 24., 24.]]], [[[36., 36., 36.]]], [[[36., 36., 36.]]]]]) def test_stream_conv_block_2plus1d(self): conv_block = movinet_layers.ConvBlock( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, kernel_initializer='ones', use_bias=False, activation='relu', conv_type='2plus1d', ) stream_conv_block = movinet_layers.StreamConvBlock( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, kernel_initializer='ones', use_bias=False, activation='relu', conv_type='2plus1d', ) inputs = tf.ones([1, 4, 2, 2, 3]) expected = conv_block(inputs) predicted_disabled, _ = stream_conv_block(inputs) self.assertEqual(predicted_disabled.shape, expected.shape) self.assertAllClose(predicted_disabled, expected) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = stream_conv_block(frame, states=states) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllClose( predicted, [[[[[35.9640400, 35.9640400, 35.9640400]]], [[[71.9280700, 71.9280700, 71.9280700]]], [[[107.892105, 107.892105, 107.892105]]], [[[107.892105, 107.892105, 107.892105]]]]]) def test_stream_conv_block_3d_2plus1d(self): conv_block = movinet_layers.ConvBlock( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, kernel_initializer='ones', use_bias=False, activation='relu', conv_type='3d_2plus1d', ) stream_conv_block = movinet_layers.StreamConvBlock( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, kernel_initializer='ones', use_bias=False, activation='relu', conv_type='3d_2plus1d', ) inputs = tf.ones([1, 4, 2, 2, 3]) expected = conv_block(inputs) predicted_disabled, _ = stream_conv_block(inputs) self.assertEqual(predicted_disabled.shape, expected.shape) self.assertAllClose(predicted_disabled, expected) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = stream_conv_block(frame, states=states) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllClose( predicted, [[[[[35.9640400, 35.9640400, 35.9640400]]], [[[71.9280700, 71.9280700, 71.9280700]]], [[[107.892105, 107.892105, 107.892105]]], [[[107.892105, 107.892105, 107.892105]]]]]) def test_stream_conv_block(self): conv_block = movinet_layers.ConvBlock( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, kernel_initializer='ones', use_bias=False, activation='relu', ) stream_conv_block = movinet_layers.StreamConvBlock( filters=3, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, kernel_initializer='ones', use_bias=False, activation='relu', ) inputs = tf.ones([1, 4, 2, 2, 3]) expected = conv_block(inputs) predicted_disabled, _ = stream_conv_block(inputs) self.assertEqual(predicted_disabled.shape, expected.shape) self.assertAllClose(predicted_disabled, expected) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = stream_conv_block(frame, states=states) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllClose( predicted, [[[[[11.994005, 11.994005, 11.994005]]], [[[23.988010, 23.988010, 23.988010]]], [[[35.982014, 35.982014, 35.982014]]], [[[35.982014, 35.982014, 35.982014]]]]]) def test_stream_squeeze_excitation(self): se = movinet_layers.StreamSqueezeExcitation( 3, causal=True, kernel_initializer='ones') inputs = tf.range(4, dtype=tf.float32) + 1. inputs = tf.reshape(inputs, [1, 4, 1, 1, 1]) inputs = tf.tile(inputs, [1, 1, 2, 1, 3]) expected, _ = se(inputs) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = se(frame, states=states) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected, 1e-5, 1e-5) self.assertAllClose( predicted, [[[[[0.9998109, 0.9998109, 0.9998109]], [[0.9998109, 0.9998109, 0.9998109]]], [[[1.9999969, 1.9999969, 1.9999969]], [[1.9999969, 1.9999969, 1.9999969]]], [[[3., 3., 3.]], [[3., 3., 3.]]], [[[4., 4., 4.]], [[4., 4., 4.]]]]], 1e-5, 1e-5) def test_stream_squeeze_excitation_2plus3d(self): se = movinet_layers.StreamSqueezeExcitation( 3, se_type='2plus3d', causal=True, activation='hard_swish', gating_activation='hard_sigmoid', kernel_initializer='ones') inputs = tf.range(4, dtype=tf.float32) + 1. inputs = tf.reshape(inputs, [1, 4, 1, 1, 1]) inputs = tf.tile(inputs, [1, 1, 2, 1, 3]) expected, _ = se(inputs) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = se(frame, states=states) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) self.assertAllClose( predicted, [[[[[1., 1., 1.]], [[1., 1., 1.]]], [[[2., 2., 2.]], [[2., 2., 2.]]], [[[3., 3., 3.]], [[3., 3., 3.]]], [[[4., 4., 4.]], [[4., 4., 4.]]]]]) def test_stream_movinet_block(self): block = movinet_layers.MovinetBlock( out_filters=3, expand_filters=6, kernel_size=(3, 3, 3), strides=(1, 2, 2), causal=True, ) inputs = tf.range(4, dtype=tf.float32) + 1. inputs = tf.reshape(inputs, [1, 4, 1, 1, 1]) inputs = tf.tile(inputs, [1, 1, 2, 1, 3]) expected, _ = block(inputs) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} predicted = [] for frame in frames: x, states = block(frame, states=states) predicted.append(x) predicted = tf.concat(predicted, axis=1) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) def test_stream_classifier_head(self): head = movinet_layers.Head(project_filters=5) classifier_head = movinet_layers.ClassifierHead( head_filters=10, num_classes=4) inputs = tf.range(4, dtype=tf.float32) + 1. inputs = tf.reshape(inputs, [1, 4, 1, 1, 1]) inputs = tf.tile(inputs, [1, 1, 2, 1, 3]) x, _ = head(inputs) expected = classifier_head(x) for num_splits in [1, 2, 4]: frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1) states = {} for frame in frames: x, states = head(frame, states=states) predicted = classifier_head(x) self.assertEqual(predicted.shape, expected.shape) self.assertAllClose(predicted, expected) if __name__ == '__main__': tf.test.main() >>>>>>> 0650ea24129892fb026a27b37028b500fb9383fa
30.708926
75
0.58122
4a07a21ad1b43836427de7e97f96a4df4c5c6e19
174
py
Python
webapp/app/logs/__init__.py
alan-turing-institute/CROP
467956ba8e273daa6afbfafd89bd2c3462a8156e
[ "MIT" ]
9
2020-02-11T17:57:47.000Z
2022-03-22T14:24:55.000Z
webapp/app/logs/__init__.py
alan-turing-institute/CROP
467956ba8e273daa6afbfafd89bd2c3462a8156e
[ "MIT" ]
64
2020-02-11T17:35:36.000Z
2022-03-31T13:19:08.000Z
webapp/app/logs/__init__.py
alan-turing-institute/CROP
467956ba8e273daa6afbfafd89bd2c3462a8156e
[ "MIT" ]
2
2020-08-16T06:10:24.000Z
2021-04-15T10:11:51.000Z
from flask import Blueprint blueprint = Blueprint( 'logs_blueprint', __name__, url_prefix='/logs', template_folder='templates', static_folder='static' )
17.4
32
0.695402
4a07a258000adccc5889a116274e96c9df102886
714
py
Python
coin_dectector/web.py
jorisroovers/opencv-playground
4a5d179be422ea58f05ad1b050724e27b5a75820
[ "Apache-2.0" ]
null
null
null
coin_dectector/web.py
jorisroovers/opencv-playground
4a5d179be422ea58f05ad1b050724e27b5a75820
[ "Apache-2.0" ]
null
null
null
coin_dectector/web.py
jorisroovers/opencv-playground
4a5d179be422ea58f05ad1b050724e27b5a75820
[ "Apache-2.0" ]
null
null
null
from coins import detector from flask import Flask from flask import request, jsonify, render_template, send_from_directory app = Flask(__name__) @app.route("/") def index(): return render_template("index.html", name="joris") @app.route('/assets/<path:path>') def assets(path): return send_from_directory('assets', path) @app.route('/generated/<path:path>') def generated(path): return send_from_directory('generated', path) @app.route("/detect", methods=['POST']) def detect(): data = request.json dst_path = detector.detect('assets/coins.png', float(data['param1']), float(data['param2'])) return jsonify(**{"url": dst_path}) if __name__ == "__main__": app.run(debug=True)
22.3125
96
0.697479
4a07a3474c184c19daedaba6ca3716aea54cec3f
821
py
Python
config_example.py
stevemason/mqtt-audio-alert
439e0b34ec7bfa14144a42496c72d82cc36ebc04
[ "Apache-2.0" ]
null
null
null
config_example.py
stevemason/mqtt-audio-alert
439e0b34ec7bfa14144a42496c72d82cc36ebc04
[ "Apache-2.0" ]
null
null
null
config_example.py
stevemason/mqtt-audio-alert
439e0b34ec7bfa14144a42496c72d82cc36ebc04
[ "Apache-2.0" ]
null
null
null
"""Private config items for mqtt-audio-alert.""" sounds = { # 'NAMEOFSOUND1': '/PATH/TO/AUDIOFILE.mp3', # 'NAMEOFSOUND2': '/PATH/TO/AUDIOFILE.mp3' } # Time ranged where sounds are permitted to play. # Multiple time ranges are allowed. active_times = [ #['07:00', '12:00'], #['13:15', '14:15'], ['00:00', '23:59'] ] mpg123 = '/usr/bin/mpg123' #audiodevice = 'hw:1,0' audiodevice = '' # leave blank for default device topic = 'mqtt-audio-alert' # which MQTT topic to subscribe to client_id = 'mqtt-audio-alert1' # leave blank for default client_id mqtt_host = '' # address of MQTT broker mqtt_port = 1883 log_file = './mqtt-audio-alert.log' username = '' # leave blank for no username password = '' # leave blank for no password #cert = "./root-ca.crt" cert = '' # leave blank for no TLS
25.65625
68
0.652862
4a07a3b31e1c17469f33e7fa3f0eee660d333d8b
11,265
py
Python
ssasse_platform/ActiveScanningEngine/custom_scans/dnp3_read_analog_inputs.py
aashok3/ssass-e
77da9a4c1cef7006fe4a9c6a64f46a0eaade87ca
[ "BSD-3-Clause" ]
4
2021-02-16T17:27:37.000Z
2022-01-25T09:29:30.000Z
ssasse_platform/ActiveScanningEngine/custom_scans/dnp3_read_analog_inputs.py
aashok3/ssass-e
77da9a4c1cef7006fe4a9c6a64f46a0eaade87ca
[ "BSD-3-Clause" ]
3
2021-05-05T16:38:54.000Z
2021-06-04T20:05:28.000Z
ssasse_platform/ActiveScanningEngine/custom_scans/dnp3_read_analog_inputs.py
aashok3/ssass-e
77da9a4c1cef7006fe4a9c6a64f46a0eaade87ca
[ "BSD-3-Clause" ]
5
2021-04-16T21:50:57.000Z
2021-05-25T16:36:26.000Z
# -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # # Copyright (2021) Battelle Memorial Institute # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # }}} import binascii import os import socket import sys import crcmod import time import itertools import struct import json import subprocess import shlex import pyshark import re import xml.etree.ElementTree as ET from xml.dom import minidom import logging _log = logging.getLogger(__name__) from multiprocessing import Process, Queue CRC_Fun = crcmod.predefined.mkPredefinedCrcFun("crc-16-dnp") pkt_count = 1 config_path = os.path.join(os.getcwd(), "ssasse_platform", "ActiveScanningEngine", "config.json") #config_path = "../config.json" print(config_path) fr = open(config_path, "r") CONFIG = json.loads(fr.read()) fr.close() def my_function(q, cap_filter): global pkt_count capture = pyshark.LiveCapture(CONFIG['scanning_interface'], display_filter=cap_filter, use_xml=True) try: capture.apply_on_packets(parse_packet, packet_count=2) q.put(pkt_count) except Exception as exc: _log.error(exc) def parse_packet(packet): global pkt_count doc = minidom.parseString(str(packet, 'utf-8')) f = open("packet" + str(pkt_count) + ".xml", "w") doc.writexml(f) f.close() pkt_count += 1 def check_crc(buff, count): count -= 2 tmp_buff = buff[:-2] crc = CRC_Fun(bytes(tmp_buff)) count += 2 if hex(buff[count-2]) != hex(crc & 0xff) or hex(buff[count-1]) != hex(crc >> 8): return 1 else: return 0 def isNthBitSet(integer, n): if integer & (1 << (n - 1)): return True else: return False def mygrouper(n, iterable): args = [iter(iterable)] * n return ([e for e in t if e != None] for t in itertools.izip_longest(*args)) def dnp3_request_link_status(master, slave, ip, port): DNP_COMS = False SOCK_ERR_FLAG = False dnp3_data_link_header = [0x05, 0x64, 0x05, 0xc9] ip_address = ip dnp3_slave = slave dnp3_master = master dnp3_data_link_header.append(dnp3_slave & 0xff) dnp3_data_link_header.append(dnp3_slave >> 8) dnp3_data_link_header.append(dnp3_master & 0xff) dnp3_data_link_header.append(dnp3_master >> 8) req_info = bytearray(struct.pack('B B B B B B B B', *dnp3_data_link_header)) dnp3_data_link_checksum = CRC_Fun(bytes(req_info)) req_info.append(dnp3_data_link_checksum & 0xff) req_info.append(dnp3_data_link_checksum >> 8) dnp_port = port #Open connection sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_address = (ip_address, dnp_port) sock.settimeout(10) #Send packet and receive response #print("GOT HERE") try: #print('sending {!r}'.format(binascii.hexlify(req_info))) sock.connect(server_address) #print "GOT HERE1" sock.sendall(req_info) res = sock.recv(1024) is_Status = 0 crc_check = 0 tmp_dnp_data = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] tmp_dnp_data_counter = 0 if (res): length_offset = 2 DL_control_offset = 3 for i in range(len(res)): if res[i] == 0x05 and res[i+1] == 0x64: if res[i+DL_control_offset] == 0x0b: is_Status = 1 for j in range(i+int(res[i+length_offset]) + 5): tmp_dnp_data[tmp_dnp_data_counter] = res[j] tmp_dnp_data_counter += 1 tmp_dnp_data = bytearray(tmp_dnp_data) else: is_Status = 0 if is_Status == 1: crc_check = check_crc(tmp_dnp_data, tmp_dnp_data_counter) if crc_check == 0: DNP_COMS = True except socket.error as error: _log.error("Not able to establish connection on port {} with {}: Socket Error: {}".format(port, ip, error)) if str(error) != "[Errno 104] Connection reset by peer": SOCK_ERR_FLAG = True finally: # print('closing socket') sock.close() return (DNP_COMS, SOCK_ERR_FLAG) def dnp3_read_analog_inputs(ip_address, dnp3_port, dnp3_master, dnp3_slave): global pkt_count #print("dnp3_read_device_attributes: {}".format(kwargs)) #print "Got to dnp3_read_device_attributes" dnp3_data_link_header = [0x05, 0x64, 0x0b, 0xc4] dnp3_data_link_header.append(dnp3_slave & 0xff) dnp3_data_link_header.append(dnp3_slave >> 8) dnp3_data_link_header.append(dnp3_master & 0xff) dnp3_data_link_header.append(dnp3_master >> 8) dnp3_data = [0xc0, 0xc0, 0x01, 0x1e, 0x00, 0x06] #---------------MAIN-------------------- #Calculate Checksums packed_dnp3_data_link_header = bytearray(struct.pack('B B B B B B B B', *dnp3_data_link_header)) dnp3_data_link_checksum = CRC_Fun(bytes(packed_dnp3_data_link_header)) packed_dnp3_data_link_header.append(dnp3_data_link_checksum & 0xff) packed_dnp3_data_link_header.append(dnp3_data_link_checksum >> 8) packed_dnp3_application_data = bytearray(struct.pack('B B B B B B', *dnp3_data)) dnp3_data_checksum = CRC_Fun(bytes(packed_dnp3_application_data)) packed_dnp3_application_data.append(dnp3_data_checksum & 0xff) packed_dnp3_application_data.append(dnp3_data_checksum >> 8) #Build Packet Data req_info = packed_dnp3_data_link_header + packed_dnp3_application_data #print("Before Request Link Status") retry_count = 2 while retry_count > 0: time.sleep(10) DNP3_COMS, SOCK_ERR_FLAG = dnp3_request_link_status(dnp3_master, dnp3_slave, ip_address, dnp3_port) if DNP3_COMS == True or SOCK_ERR_FLAG == True: break retry_count -= 1 results = dict.fromkeys(['TARGET_IPADDR', 'SCAN_NAME', 'DNP3_COMMS', 'MULTIPLE_ANINP_OBJ', 'DEFAULT_ANINP_VAR', 'SCAN_RESULT', 'SCAN_RESULT_DESC']) results['TARGET_IPADDR'] = ip_address results['SCAN_NAME'] = 'dnp3_read_analog_inputs' #print("After Request Link Status") #Sleep to provide time for connection to close properly if SOCK_ERR_FLAG == True: results['SCAN_RESULT'] = -1 results['SCAN_RESULT_DESC'] = 'Socket error connecting to {0}:{1}'.format(ip_address, dnp3_port) return results if DNP3_COMS: #print('dnp3_coms == true') time.sleep(3) #Open connection sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_address = (ip_address, dnp3_port) sock.settimeout(10) #Send packet and receive response #print "before results" objvar_list = [] objvar_counter = 0 #Check if DNP3 Communication is even possible if not DNP3_COMS: results['DNP3_COMMS'] = 0 results['SCAN_RESULT'] = 0 results['SCAN_RESULT_DESC'] = 'No Link Status returned from DNP3 slave at {0}. It is possible that the slave device does not accept the scanner as a master'.format(ip_address) return results else: results['DNP3_COMMS'] = 1 try: cap_filter = "dnp3 and ip.addr == " + str(ip_address) queue = Queue() p = Process(target=my_function, args=(queue, cap_filter)) p.start() #Give tshark a second to start time.sleep(3) sock.connect(server_address) sock.sendall(req_info) res = sock.recv(1024) p.join() #this blocks until the process terminate pkt_count = queue.get() for i in range(1, pkt_count): tree = ET.parse('packet' + str(i) + '.xml') root = tree.getroot() #ET.dump(root) for child in root: if child.get('name') == 'dnp3': for child2 in child: if 'Application Layer: ' in child2.get('show'): for child3 in child2: if child3.get('show') == 'RESPONSE Data Objects': for child4 in child3: if child4.get('name') == 'dnp3.al.obj': pattern = re.compile("\(Obj:[0-9]+, Var:[0-9]+\)") objvar = pattern.findall(child4.get('showname'))[0] if objvar != None: objvar_counter += 1 results['DEFAULT_ANINP_VAR'] = objvar.split(':')[2][0:-1] os.remove('packet' + str(i) + '.xml') if objvar_counter > 1: results['MULTIPLE_ANINP_OBJ'] = 1 results['DEFAULT_ANINP_VAR'] = None except socket.error as error: _log.error("Not able to establish connection on port {} with {}: Socket Error: {}".format(dnp3_port, ip_address, error)) SOCK_ERR_FLAG = True finally: sock.close() if not SOCK_ERR_FLAG: results['SCAN_RESULT'] = 1 results['SCAN_RESULT_DESC'] = 'Success' else: results['SCAN_RESULT'] = -1 results['SCAN_RESULT_DESC'] = 'Socket error connecting to {0} on port {1}'.format(ip_address, dnp3_port) return results def main(): results = dnp3_read_analog_inputs(sys.argv[1],int(sys.argv[2]),int(sys.argv[3]),int(sys.argv[4])) results_dict = json.dumps(results) print(results_dict) if __name__ == '__main__': main()
35.536278
183
0.624767
4a07a41e70d99b09385e334fcefe414287d03a03
3,809
py
Python
tensorflow/python/keras/utils/dataset_creator_test.py
koreybea/tensorflow
e252fffb16f2706688604dc91c426bae367ae5e8
[ "Apache-2.0" ]
6
2021-03-30T07:42:04.000Z
2022-03-23T02:42:36.000Z
tensorflow/python/keras/utils/dataset_creator_test.py
koreybea/tensorflow
e252fffb16f2706688604dc91c426bae367ae5e8
[ "Apache-2.0" ]
7
2021-02-21T21:05:59.000Z
2022-02-10T01:39:06.000Z
tensorflow/python/keras/utils/dataset_creator_test.py
koreybea/tensorflow
e252fffb16f2706688604dc91c426bae367ae5e8
[ "Apache-2.0" ]
4
2019-06-15T01:13:28.000Z
2020-12-16T02:28:45.000Z
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for dataset_creator.""" from tensorflow.python.compat import v2_compat from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import multi_worker_test_base from tensorflow.python.distribute import parameter_server_strategy_v2 from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver from tensorflow.python.keras.engine import sequential from tensorflow.python.keras.layers import core as core_layers from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.keras.utils import dataset_creator from tensorflow.python.platform import test from tensorflow.python.training.server_lib import ClusterSpec class DatasetCreatorTest(test.TestCase): def test_dataset_creator(self): with self.assertRaisesRegex( TypeError, "`dataset_fn` for `DatasetCreator` must be a `callable`."): dataset_creator.DatasetCreator(2) dataset_fn = lambda: 3 with self.assertRaisesRegex( TypeError, "The `callable` provided to `DatasetCreator` must return " "a Dataset."): dataset_creator.DatasetCreator(dataset_fn)() dataset_fn = lambda: dataset_ops.DatasetV2.from_tensor_slices([1, 1]) got = dataset_creator.DatasetCreator(dataset_fn)() self.assertEqual( next(iter(got)), next(iter(dataset_ops.DatasetV2.from_tensor_slices([1, 1])))) def _get_dataset_fn(self): def dataset_fn(input_context): global_batch_size = 64 batch_size = input_context.get_per_replica_batch_size(global_batch_size) dataset = dataset_ops.DatasetV2.from_tensors(([1.], [1.])).repeat() dataset = dataset.shard(input_context.num_input_pipelines, input_context.input_pipeline_id) dataset = dataset.batch(batch_size) dataset = dataset.prefetch(2) return dataset return dataset_fn def test_dataset_creator_model_fit_without_strategy(self): model = sequential.Sequential([core_layers.Dense(10)]) model.compile(gradient_descent.SGD(), loss="mse") history = model.fit( dataset_creator.DatasetCreator(self._get_dataset_fn()), epochs=10, steps_per_epoch=10, verbose=0) self.assertLen(history.history["loss"], 10) def test_dataset_creator_usage_in_parameter_server_model_fit(self): cluster_def = multi_worker_test_base.create_in_process_cluster( num_workers=2, num_ps=1, rpc_layer="grpc") cluster_def["chief"] = [ "localhost:%d" % multi_worker_test_base.pick_unused_port() ] strategy = parameter_server_strategy_v2.ParameterServerStrategyV2( SimpleClusterResolver(ClusterSpec(cluster_def), rpc_layer="grpc")) with strategy.scope(): model = sequential.Sequential([core_layers.Dense(10)]) model.compile(gradient_descent.SGD(), loss="mse") history = model.fit( dataset_creator.DatasetCreator(self._get_dataset_fn()), epochs=10, steps_per_epoch=10, verbose=0) self.assertLen(history.history["loss"], 10) if __name__ == "__main__": v2_compat.enable_v2_behavior() test.main()
39.268041
80
0.7288
4a07a545e98a3229144a79d968cbf7bfb9fb0a18
1,988
py
Python
test/functional/p2p_invalid_locator.py
xiaolin1579/vektorcoin
6e33506d8fba8883f401a89af0b7a76d44fb8bed
[ "MIT" ]
1
2021-02-16T10:45:46.000Z
2021-02-16T10:45:46.000Z
test/functional/p2p_invalid_locator.py
xiaolin1579/vektorcoin
6e33506d8fba8883f401a89af0b7a76d44fb8bed
[ "MIT" ]
null
null
null
test/functional/p2p_invalid_locator.py
xiaolin1579/vektorcoin
6e33506d8fba8883f401a89af0b7a76d44fb8bed
[ "MIT" ]
1
2021-02-09T14:29:27.000Z
2021-02-09T14:29:27.000Z
#!/usr/bin/env python3 # Copyright (c) 2015-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node responses to invalid locators. """ from test_framework.messages import msg_getheaders, msg_getblocks, MAX_LOCATOR_SZ from test_framework.mininode import P2PInterface from test_framework.test_framework import VEKTORCOINTestFramework class InvalidLocatorTest(VEKTORCOINTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = False def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): node = self.nodes[0] # convenience reference to the node node.generate(1) # Get node out of IBD self.log.info('Test max locator size') block_count = node.getblockcount() for msg in [msg_getheaders(), msg_getblocks()]: self.log.info('Wait for disconnect when sending {} hashes in locator'.format(MAX_LOCATOR_SZ + 1)) node.add_p2p_connection(P2PInterface()) msg.locator.vHave = [int(node.getblockhash(i - 1), 16) for i in range(block_count, block_count - (MAX_LOCATOR_SZ + 1), -1)] node.p2p.send_message(msg) node.p2p.wait_for_disconnect() node.disconnect_p2ps() self.log.info('Wait for response when sending {} hashes in locator'.format(MAX_LOCATOR_SZ)) node.add_p2p_connection(P2PInterface()) msg.locator.vHave = [int(node.getblockhash(i - 1), 16) for i in range(block_count, block_count - (MAX_LOCATOR_SZ), -1)] node.p2p.send_message(msg) if type(msg) == msg_getheaders: node.p2p.wait_for_header(int(node.getbestblockhash(), 16)) else: node.p2p.wait_for_block(int(node.getbestblockhash(), 16)) if __name__ == '__main__': InvalidLocatorTest().main()
42.297872
135
0.682596
4a07a76b663c9d137900b84dac9edf12caff5dd1
556
py
Python
04_introduccion-al-computo-con-python/modulo_II/clase03-busqueda-binaria.py
Aibique-Forks/articicial-inteligence-and-data-science
fbdc866e4e46060cbde5b887806bdfeac645838e
[ "MIT" ]
30
2020-06-19T16:21:04.000Z
2022-02-19T01:48:39.000Z
04_introduccion-al-computo-con-python/modulo_II/clase03-busqueda-binaria.py
Aibique-Forks/articicial-inteligence-and-data-science
fbdc866e4e46060cbde5b887806bdfeac645838e
[ "MIT" ]
87
2021-02-12T04:42:13.000Z
2021-09-20T04:25:29.000Z
04_introduccion-al-computo-con-python/modulo_II/clase03-busqueda-binaria.py
Aibique-Forks/articicial-inteligence-and-data-science
fbdc866e4e46060cbde5b887806bdfeac645838e
[ "MIT" ]
11
2020-08-13T04:04:01.000Z
2022-01-20T20:10:43.000Z
""" Tema: Busqueda Binaria. Curso: Pensamiento computacional. Plataforma: Platzi. Profesor: David Aroesti. Alumno: @edinsonrequena. """ objetivo = int(input('Type a number: ')) epsilon = 0.001 bajo = 0.0 alto = max(1.0, objetivo) respuesta = (alto + bajo) / 2 while abs(respuesta**2 - objetivo) >= epsilon: print(f'bajo={bajo}, alto={alto}, respuesta={respuesta}') if respuesta**2 < objetivo: bajo = respuesta else: alto = respuesta respuesta = (alto + bajo) / 2 print(f'La raiz cuadrada de {objetivo} es {respuesta}')
21.384615
61
0.656475
4a07a7fe0d4105cab2f625b7153e4a7c8aa4bcb3
4,323
py
Python
ytpld.py
D54/youtube-pldump
be388f2963c40f9306fa45b6f4746f6c00cbdfbf
[ "MIT" ]
null
null
null
ytpld.py
D54/youtube-pldump
be388f2963c40f9306fa45b6f4746f6c00cbdfbf
[ "MIT" ]
null
null
null
ytpld.py
D54/youtube-pldump
be388f2963c40f9306fa45b6f4746f6c00cbdfbf
[ "MIT" ]
1
2018-03-04T12:04:24.000Z
2018-03-04T12:04:24.000Z
from json import load, dump from requests import post, get from urllib.parse import urlparse, parse_qs, urlencode, urlunparse from webbrowser import open as webopen from http.server import BaseHTTPRequestHandler, HTTPServer from sys import exit from datetime import datetime from yaml import dump as ydump with open('client_secret.json') as f: cs = load(f)['installed'] def show_auth_page(): u = list(urlparse(cs['auth_uri'])) u[4] = urlencode({ 'client_id': cs['client_id'], 'redirect_uri': 'http://localhost:10000', 'response_type': 'code', 'scope': 'https://www.googleapis.com/auth/youtube.readonly' }) webopen(urlunparse(u)) def listen_for_code(): finished = False re = None class S(BaseHTTPRequestHandler): def do_GET(self): nonlocal finished, re url = urlparse(self.path) if url.path == '/': re = parse_qs(url.query) self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() self.wfile.write("<html><head><title>OAuth</title></head><body><h1>Now you can close this window/tab.</h1></body></html>".encode('utf-8')) finished = True else: self.send_response(204) self.end_headers() httpd = HTTPServer(('localhost', 10000), S) while not finished: httpd.handle_request() return {k: ', '.join(v) for k, v in re.items()} def request_token(code): r = post(cs['token_uri'], data={ 'code': code, 'client_id': cs['client_id'], 'client_secret': cs['client_secret'], 'redirect_uri': 'http://localhost:10000', 'grant_type': 'authorization_code' }) d = int(datetime.strptime(r.headers['Date'], '%a, %d %b %Y %H:%M:%S %Z').timestamp()) r = r.json() r.pop('token_type') e = r.pop('expires_in') r['expires_at'] = d + e return r def refresh_token(refresh_token): r = post(cs['token_uri'], data={ 'refresh_token': refresh_token, 'client_id': cs['client_id'], 'client_secret': cs['client_secret'], 'grant_type': 'refresh_token' }) d = int(datetime.strptime(r.headers['Date'], '%a, %d %b %Y %H:%M:%S %Z').timestamp()) r = r.json() r.pop('token_type') e = r.pop('expires_in') r['expires_at'] = d + e return r def auth(): global cred show_auth_page() code = listen_for_code() if 'error' in code: print('An error occured during the authentication process:') print(code['error']) exit(1) cred = request_token(code['code']) with open('credentials.json', 'w') as f: dump(cred, f) def refresh(): new_cred = refresh_token(cred['refresh_token']) cred.update(new_cred) with open('credentials.json', 'w') as f: dump(cred, f) def apireq(path, params={}): _params = {'part': 'snippet'} _params.update(params) baseURL = 'https://www.googleapis.com/youtube/v3' r = get(baseURL + path, params=_params, headers={'Authorization': 'Bearer %s' % cred['access_token']}) if r.status_code == 401: refresh() return apireq(path, params) return r.json() def apireqlist(path, params={}): _params = {'maxResults': 50} _params.update(params) r = apireq(path, _params) re = r['items'] if 'nextPageToken' in r: _params['pageToken'] = r['nextPageToken'] re += apireqlist(path, _params) return re try: with open('credentials.json') as f: cred = load(f) except FileNotFoundError as e: auth() playlists = apireqlist('/playlists', {'mine': 'true'}) out = [{'id': x['id'], 'title': x['snippet']['title']} for x in playlists] out = sorted(out, key=lambda x: x['title']) for pl in out: print('Downloading [%s] ' % pl['title'], end='') items = apireqlist('/playlistItems', {'playlistId': pl['id']}) pl['items'] = [{'id': x['snippet']['resourceId']['videoId'], 'title': x['snippet']['title']} for x in items] print(' Done') with open('dump.yaml', 'w') as f: ydump(out, f, width=250)
30.020833
155
0.573213
4a07a80706a19809cd2a12cc691382456220c900
214
py
Python
8kyu/are_you_playing_banjo.py
nhsz/codewars
82703959e910254d6feff4162f78c6dbd7a1c3ed
[ "MIT" ]
1
2018-12-02T23:04:38.000Z
2018-12-02T23:04:38.000Z
8kyu/are_you_playing_banjo.py
nhsz/codewars
82703959e910254d6feff4162f78c6dbd7a1c3ed
[ "MIT" ]
null
null
null
8kyu/are_you_playing_banjo.py
nhsz/codewars
82703959e910254d6feff4162f78c6dbd7a1c3ed
[ "MIT" ]
null
null
null
# http://www.codewars.com/kata/53af2b8861023f1d88000832/ def are_you_playing_banjo(name): if name[0].lower() == "r": return name + " plays banjo" else: return name + " does not play banjo"
26.75
56
0.640187
4a07a8200357cc116b3986c9008cd09c32e90ef1
7,509
py
Python
tutorials/Tutorial3_Basic_QA_Pipeline_without_Elasticsearch.py
mpangrazzi/haystack
eb514a6167b84a4b6923dfc397c7a40ab3da2e44
[ "Apache-2.0" ]
null
null
null
tutorials/Tutorial3_Basic_QA_Pipeline_without_Elasticsearch.py
mpangrazzi/haystack
eb514a6167b84a4b6923dfc397c7a40ab3da2e44
[ "Apache-2.0" ]
null
null
null
tutorials/Tutorial3_Basic_QA_Pipeline_without_Elasticsearch.py
mpangrazzi/haystack
eb514a6167b84a4b6923dfc397c7a40ab3da2e44
[ "Apache-2.0" ]
null
null
null
# ## Task: Build a Question Answering pipeline without Elasticsearch # # Haystack provides alternatives to Elasticsearch for developing quick prototypes. # # You can use an `InMemoryDocumentStore` or a `SQLDocumentStore`(with SQLite) as the document store. # # If you are interested in more feature-rich Elasticsearch, then please refer to the Tutorial 1. from haystack.document_stores import InMemoryDocumentStore, SQLDocumentStore from haystack.nodes import FARMReader, TransformersReader, TfidfRetriever from haystack.utils import clean_wiki_text, convert_files_to_docs, fetch_archive_from_http, print_answers def tutorial3_basic_qa_pipeline_without_elasticsearch(): # In-Memory Document Store document_store = InMemoryDocumentStore() # or, alternatively, SQLite Document Store # document_store = SQLDocumentStore(url="sqlite:///qa.db") # ## Preprocessing of documents # # Haystack provides a customizable pipeline for: # - converting files into texts # - cleaning texts # - splitting texts # - writing them to a Document Store # In this tutorial, we download Wikipedia articles on Game of Thrones, apply a basic cleaning function, and index # them in Elasticsearch. # Let's first get some documents that we want to query # Here: 517 Wikipedia articles for Game of Thrones doc_dir = "data/tutorial3" s3_url = "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/documents/wiki_gameofthrones_txt3.zip" fetch_archive_from_http(url=s3_url, output_dir=doc_dir) # convert files to dicts containing documents that can be indexed to our datastore docs = convert_files_to_docs(dir_path=doc_dir, clean_func=clean_wiki_text, split_paragraphs=True) # You can optionally supply a cleaning function that is applied to each doc (e.g. to remove footers) # It must take a str as input, and return a str. # Now, let's write the docs to our DB. document_store.write_documents(docs) # ## Initalize Retriever, Reader & Pipeline # # ### Retriever # # Retrievers help narrowing down the scope for the Reader to smaller units of text where # a given question could be answered. # # With InMemoryDocumentStore or SQLDocumentStore, you can use the TfidfRetriever. For more # retrievers, please refer to the tutorial-1. # An in-memory TfidfRetriever based on Pandas dataframes retriever = TfidfRetriever(document_store=document_store) # ### Reader # # A Reader scans the texts returned by retrievers in detail and extracts the k best answers. They are based # on powerful, but slower deep learning models. # # Haystack currently supports Readers based on the frameworks FARM and Transformers. # With both you can either load a local model or one from Hugging Face's model hub (https://huggingface.co/models). # **Here:** a medium sized RoBERTa QA model using a Reader based on # FARM (https://huggingface.co/deepset/roberta-base-squad2) # **Alternatives (Reader):** TransformersReader (leveraging the `pipeline` of the Transformers package) # **Alternatives (Models):** e.g. "distilbert-base-uncased-distilled-squad" (fast) or # "deepset/bert-large-uncased-whole-word-masking-squad2" (good accuracy) # **Hint:** You can adjust the model to return "no answer possible" with the no_ans_boost. # Higher values mean the model prefers "no answer possible". # #### FARMReader # # Load a local model or any of the QA models on # Hugging Face's model hub (https://huggingface.co/models) reader = FARMReader(model_name_or_path="deepset/roberta-base-squad2", use_gpu=True) # #### TransformersReader # Alternative: # reader = TransformersReader(model_name_or_path="distilbert-base-uncased-distilled-squad", tokenizer="distilbert-base-uncased", use_gpu=-1) # ### Pipeline # # With a Haystack `Pipeline` you can stick together your building blocks to a search pipeline. # Under the hood, `Pipelines` are Directed Acyclic Graphs (DAGs) that you can easily customize for your own use cases. # To speed things up, Haystack also comes with a few predefined Pipelines. One of them is the `ExtractiveQAPipeline` that combines a retriever and a reader to answer our questions. # You can learn more about `Pipelines` in the [docs](https://haystack.deepset.ai/docs/latest/pipelinesmd). from haystack.pipelines import ExtractiveQAPipeline pipe = ExtractiveQAPipeline(reader, retriever) ## Voilà! Ask a question! prediction = pipe.run( query="Who is the father of Arya Stark?", params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 5}} ) # prediction = pipe.run(query="Who created the Dothraki vocabulary?", params={"Reader": {"top_k": 5}}) # prediction = pipe.run(query="Who is the sister of Sansa?", params={"Reader": {"top_k": 5}}) # Now you can either print the object directly print("\n\nRaw object:\n") from pprint import pprint pprint(prediction) # Sample output: # { # 'answers': [ <Answer: answer='Eddard', type='extractive', score=0.9919578731060028, offsets_in_document=[{'start': 608, 'end': 615}], offsets_in_context=[{'start': 72, 'end': 79}], document_id='cc75f739897ecbf8c14657b13dda890e', meta={'name': '454_Music_of_Game_of_Thrones.txt'}}, context='...' >, # <Answer: answer='Ned', type='extractive', score=0.9767240881919861, offsets_in_document=[{'start': 3687, 'end': 3801}], offsets_in_context=[{'start': 18, 'end': 132}], document_id='9acf17ec9083c4022f69eb4a37187080', meta={'name': '454_Music_of_Game_of_Thrones.txt'}}, context='...' >, # ... # ] # 'documents': [ <Document: content_type='text', score=0.8034909798951382, meta={'name': '332_Sansa_Stark.txt'}, embedding=None, id=d1f36ec7170e4c46cde65787fe125dfe', content='\n===\'\'A Game of Thrones\'\'===\nSansa Stark begins the novel by being betrothed to Crown ...'>, # <Document: content_type='text', score=0.8002150354529785, meta={'name': '191_Gendry.txt'}, embedding=None, id='dd4e070a22896afa81748d6510006d2', 'content='\n===Season 2===\nGendry travels North with Yoren and other Night's Watch recruits, including Arya ...'>, # ... # ], # 'no_ans_gap': 11.688868522644043, # 'node_id': 'Reader', # 'params': {'Reader': {'top_k': 5}, 'Retriever': {'top_k': 5}}, # 'query': 'Who is the father of Arya Stark?', # 'root_node': 'Query' # } # Note that the documents contained in the above object are the documents filtered by the Retriever from # the document store. Although the answers were extracted from these documents, it's possible that many # answers were taken from a single one of them, and that some of the documents were not source of any answer. # Or use a util to simplify the output # Change `minimum` to `medium` or `all` to raise the level of detail print("\n\nSimplified output:\n") print_answers(prediction, details="minimum") if __name__ == "__main__": tutorial3_basic_qa_pipeline_without_elasticsearch() # This Haystack script was made with love by deepset in Berlin, Germany # Haystack: https://github.com/deepset-ai/haystack # deepset: https://deepset.ai/
53.255319
307
0.691836
4a07a8a2e1798487e0a37311eeb08f9598b5a420
1,163
py
Python
lab0/algebra_utils.py
rdugue/MIT_AI_LABS
97d30195aa842f8edf0fb863ceae2599fe4f669e
[ "MIT" ]
1
2017-05-01T10:07:02.000Z
2017-05-01T10:07:02.000Z
lab0/algebra_utils.py
rdugue/MIT_AI_LABS
97d30195aa842f8edf0fb863ceae2599fe4f669e
[ "MIT" ]
null
null
null
lab0/algebra_utils.py
rdugue/MIT_AI_LABS
97d30195aa842f8edf0fb863ceae2599fe4f669e
[ "MIT" ]
1
2018-02-20T17:24:34.000Z
2018-02-20T17:24:34.000Z
""" These are functions for transferring algebra.py's test cases over the Internet. You shouldn't need to mess with these. """ from algebra import simplify_if_possible, Sum, Product, Expression def distribution(val): if isinstance(val, Expression): raise ValueError("expression has already been decoded") return encode_sumprod(simplify_if_possible(decode_sumprod(val))) def encode_sumprod(lst): retVal = [] if isinstance(lst, Sum): retVal.append('Sum') elif isinstance(lst, Product): retVal.append('Product') for elt in lst: if isinstance(elt, (Sum, Product)): retVal.append( encode_sumprod(elt) ) else: retVal.append(elt) return retVal def decode_sumprod(lst): retVal = [] for elt in lst[1:]: if isinstance(elt, (list, tuple)): retVal.append(decode_sumprod(elt)) else: retVal.append(elt) if lst[0] == 'Sum': retVal = Sum(retVal) elif lst[0] == 'Product': retVal = Product(retVal) else: raise Exception, "Error: List was not an encoded Sum or Product!" return retVal
24.229167
73
0.628547
4a07aa88c15e5ad0c0de9b2cd591a664c87b0ecd
478
py
Python
myproject/cookie_app/migrations/0009_auto_20141211_0631.py
nathanielbecker/business-contacter-django-app
369270f46087b7b593f5b4cff6bddd89707cdc62
[ "Apache-2.0" ]
null
null
null
myproject/cookie_app/migrations/0009_auto_20141211_0631.py
nathanielbecker/business-contacter-django-app
369270f46087b7b593f5b4cff6bddd89707cdc62
[ "Apache-2.0" ]
null
null
null
myproject/cookie_app/migrations/0009_auto_20141211_0631.py
nathanielbecker/business-contacter-django-app
369270f46087b7b593f5b4cff6bddd89707cdc62
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('cookie_app', '0008_auto_20141120_0807'), ] operations = [ migrations.AlterField( model_name='barebones_crud', name='FollowUp', field=models.BooleanField(default=False, verbose_name='pizzafff'), preserve_default=True, ), ]
22.761905
78
0.625523
4a07ab32ac709f82a80af9324ca7877f04086ec5
14,276
py
Python
google/cloud/forseti/common/gcp_type/iam_policy.py
johnrevans6/forseti-security
d4b907a076ef4caaea9d3232c8fd0ad5822cd2d6
[ "Apache-2.0" ]
null
null
null
google/cloud/forseti/common/gcp_type/iam_policy.py
johnrevans6/forseti-security
d4b907a076ef4caaea9d3232c8fd0ad5822cd2d6
[ "Apache-2.0" ]
null
null
null
google/cloud/forseti/common/gcp_type/iam_policy.py
johnrevans6/forseti-security
d4b907a076ef4caaea9d3232c8fd0ad5822cd2d6
[ "Apache-2.0" ]
null
null
null
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """GCP IAM Policy. See: https://cloud.google.com/iam/reference/rest/v1/Policy """ import re from google.cloud.forseti.common.gcp_type import errors from google.cloud.forseti.common.util import logger from google.cloud.forseti.common.util.regular_exp import escape_and_globify LOGGER = logger.get_logger(__name__) def _get_iam_members(members): """Get a list of this binding's members as IamPolicyMembers. Args: members (list): A list of members (strings). Returns: list: A list of IamPolicyMembers. """ return [IamPolicyMember.create_from(m) for m in members] class IamPolicy(object): """GCP IAM Policy.""" def __init__(self): """Initialize.""" self.audit_configs = None self.bindings = [] @classmethod def create_from(cls, policy_json): """Create an IamPolicy object from json representation. Args: policy_json (dict): The json representing the IAM policy. Returns: IamPolicy: An IamPolicy. """ policy = cls() if not policy_json: raise errors.InvalidIamPolicyError( 'Invalid policy {}'.format(policy_json)) policy.bindings = [IamPolicyBinding.create_from(b) for b in policy_json.get('bindings', [])] if 'auditConfigs' in policy_json: policy.audit_configs = IamAuditConfig.create_from( policy_json.get('auditConfigs')) return policy def __eq__(self, other): """Tests equality of IamPolicy. Args: other (object): Object to compare. Returns: bool: True if equals, False otherwise. """ if not isinstance(other, type(self)): return NotImplemented return (self.bindings == other.bindings and self.audit_configs == other.audit_configs) def __ne__(self, other): """Tests inequality of IamPolicy. Args: other (object): Object to compare. Returns: bool: True if not equals, False otherwise. """ return not self == other def __repr__(self): """String representation of IamPolicy. Returns: str: Representation of IamPolicy """ if self.audit_configs: return 'IamPolicy: <bindings={}, audit_configs={}>'.format( self.bindings, self.audit_configs) return 'IamPolicy: <bindings={}>'.format(self.bindings) def is_empty(self): """Tests whether this policy's bindings are empty. Returns: bool: True if bindings are empty; False otherwise. """ return not bool(self.bindings) class IamPolicyBinding(object): """IAM Policy Binding.""" def __init__(self, role_name, members=None): """Initialize. Args: role_name (str): The string name of the role. members (list): The role members of the policy binding. """ if not role_name or not members: raise errors.InvalidIamPolicyBindingError( ('Invalid IAM policy binding: ' 'role_name={}, members={}'.format(role_name, members))) self.role_name = role_name self.members = _get_iam_members(members) self.role_pattern = re.compile(escape_and_globify(role_name), flags=re.IGNORECASE) def __eq__(self, other): """Tests equality of IamPolicyBinding. Args: other (object): Object to compare. Returns: bool: Whether objects are equal. """ if not isinstance(other, type(self)): return NotImplemented return (self.role_name == other.role_name and self.members == other.members) def __ne__(self, other): """Tests inequality of IamPolicyBinding. Args: other (object): Object to compare. Returns: bool: Whether objects are not equal. """ return not self == other def __repr__(self): """String representation of IamPolicyBinding. Returns: str: The representation of IamPolicyBinding. """ return 'IamBinding: <role_name={}, members={}>'.format( self.role_name, self.members) @classmethod def create_from(cls, binding): """Create an IamPolicyBinding from a binding dict. Args: binding (dict): The binding (role mapped to members). Returns: IamPolicyBinding: A new IamPolicyBinding created with the role and members. """ if isinstance(binding, type(cls)): return binding try: return cls(binding.get('role'), binding.get('members')) except errors.InvalidIamPolicyMemberError: LOGGER.debug( 'Invalid IAM policy member: %s.', binding.get('members')) return None def merge_members(self, other): """Add `other` members to mine if the role names are the same. Use case: merging members from ancestor bindings with the same role name. Args: other (IamPolicyBinding): the other IAM policy binding """ if not isinstance(other, type(self)): raise errors.InvalidIamPolicyBindingError( 'Cannot merge, other is not of type \'IamPolicyBinding\'') if other.role_name != self.role_name: return for member in other.members: if member not in self.members: self.members.append(member) class IamPolicyMember(object): """IAM Policy Member. See https://cloud.google.com/iam/reference/rest/v1/Policy#Binding. Parse an identity from a policy binding. """ ALL_USERS = 'allUsers' ALL_AUTH_USERS = 'allAuthenticatedUsers' member_types = {ALL_USERS, ALL_AUTH_USERS, 'user', 'group', 'serviceAccount', 'domain'} def __init__(self, member_type, member_name=None): """Initialize. Args: member_type (str): The string member type (see `member_types`). member_name (str): The string member name. """ if not member_type or not self._member_type_exists(member_type): raise errors.InvalidIamPolicyMemberError( 'Invalid policy member: {}'.format(member_type)) self.type = member_type self.name = member_name self.name_pattern = None if member_name: self.name_pattern = re.compile(escape_and_globify(self.name), flags=re.IGNORECASE) def __eq__(self, other): """Tests equality of IamPolicyMember. Args: other (object): The object to compare. Returns: bool: Whether the objects are equal. """ if not isinstance(other, type(self)): return NotImplemented return (self.type == other.type and self.name == other.name) def __ne__(self, other): """Tests inequality of IamPolicyMember. Args: other (object): The object to compare. Returns: bool: Whether the objects are not equal. """ return not self == other def __hash__(self): """Hash function for IamPolicyMember. Returns: hash: The hashed object. """ return hash((self.type, self.name)) def __repr__(self): """String representation of IamPolicyMember. Returns: str: The representation of IamPolicyMember. """ return '%s:%s' % (self.type, self.name) def _member_type_exists(self, member_type): """Determine if the member type exists in valid member types. Args: member_type (str): Member type. Returns: bool: If member type is valid. """ return member_type in self.member_types @classmethod def create_from(cls, member): """Create an IamPolicyMember from the member identity string. Args: member (str): The IAM policy binding member. Returns: IamPolicyMember: Created from the member string. """ identity_parts = member.split(':') member_name = None if len(identity_parts) > 1: member_name = identity_parts[1] return cls(identity_parts[0], member_name=member_name) def _is_matching_domain(self, other): """Determine whether IAM policy member belongs to domain. This applies to a situation where a rule has a `domain` style `members` specification and the policy to check specifies users. Args: other (IamPolicyMember): The policy binding member to check. Returns: bool: True if `other` is a member of the domain, False otherwise. """ if self.type != 'domain' or other.type != 'user': return False try: _, domain = other.name.rsplit('@', 1) except ValueError: return False return self.name == domain def matches(self, other): """Determine if another member matches. Args: other (str): The policy binding member name. Returns: bool: True if the member matches this member, otherwise False. """ other_member = None if isinstance(other, type(self)): other_member = other else: other_member = IamPolicyMember.create_from(other) # Bucket IAM supports a special "allUsers" member, whose value is simply # "allUsers", without a colon separator and a second fragment. if (self.type == self.ALL_USERS and other_member.type == self.ALL_USERS): return True # Match if: # {member_type}:{member_name} regex-matches self's # {member_type}:{member_name} . if (self.type == other_member.type and self.name_pattern.match(other_member.name)): return True if self._is_matching_domain(other_member): return True return False class IamAuditConfig(object): """IAM Audit Config. Captures the mapping from service to log type to exempted members for a project, folder or organization. """ ALL_SERVICES = 'allServices' VALID_LOG_TYPES = frozenset(['AUDIT_READ', 'DATA_READ', 'DATA_WRITE']) def __init__(self, service_configs): """Initialize. Args: service_configs (dict): A dictionary mapping service names to dictionaries mapping log types to sets of exempeted members. """ self.service_configs = service_configs def __eq__(self, other): """Tests equality of IamAuditConfig. Args: other (object): Object to compare. Returns: bool: Whether objects are equal. """ if not isinstance(other, type(self)): return NotImplemented return self.service_configs == other.service_configs def __ne__(self, other): """Tests inequality of IamAuditConfig. Args: other (object): Object to compare. Returns: bool: Whether objects are not equal. """ return not self == other def __repr__(self): """String representation of IamAuditConfig. Returns: str: The representation of IamAuditConfig. """ return 'IamAuditConfig: <service_configs={}>'.format( self.service_configs) @classmethod def create_from(cls, audit_configs_list): """Creates an IamAuditConfig from a list of auditConfig dicts. Args: audit_configs_list (list): A list of auditConfigs for each service. Returns: IamAuditConfig: A new IamAuditConfig created with the service audit configs. """ service_configs = {} for audit_config in audit_configs_list: service_name = audit_config.get('service') log_configs = {} for log_config in audit_config.get('auditLogConfigs'): log_configs[log_config.get('logType')] = set( log_config.get('exemptedMembers', [])) if not service_name or not log_configs or None in log_configs: raise errors.InvalidIamAuditConfigError( 'Invalid IAM audit config: {}'.format(audit_config)) service_configs[service_name] = log_configs return cls(service_configs) def merge_configs(self, other): """Adds `other` audit configs to mine, combining exempted member. Use case: merging audit configs from ancestor IAM policies. Args: other (IamAuditConfig): the other IAM audit configs """ if not isinstance(other, type(self)): raise errors.InvalidIamAuditConfigError( 'Cannot merge, other is not of type \'IamAuditConfig\'') for service_name, log_configs in other.service_configs.iteritems(): if service_name not in self.service_configs: self.service_configs[service_name] = {} service_config = self.service_configs[service_name] for log_type, exemptions in log_configs.iteritems(): service_config[log_type] = exemptions.union(service_config.get( log_type, set()))
31.238512
80
0.601639
4a07abe5cccad3f96b7e7b5a16e2547f1944405f
157
py
Python
tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_PolyTrend_Seasonal_Hour_SVR.py
jmabry/pyaf
afbc15a851a2445a7824bf255af612dc429265af
[ "BSD-3-Clause" ]
null
null
null
tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_PolyTrend_Seasonal_Hour_SVR.py
jmabry/pyaf
afbc15a851a2445a7824bf255af612dc429265af
[ "BSD-3-Clause" ]
1
2019-11-30T23:39:38.000Z
2019-12-01T04:34:35.000Z
tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_PolyTrend_Seasonal_Hour_SVR.py
jmabry/pyaf
afbc15a851a2445a7824bf255af612dc429265af
[ "BSD-3-Clause" ]
null
null
null
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod testmod.build_model( ['Logit'] , ['PolyTrend'] , ['Seasonal_Hour'] , ['SVR'] );
39.25
79
0.745223