id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
6,700 | test users details | import responses
RE_BASE = 'https://pytenable.tenable.ad/api'
@responses.activate
def test_users_list(api):
responses.add(responses.GET,
f'{RE_BASE}/users',
json=[{
'id': 1,
'surname': 'surname',
'name': 'name',
'email': 'test@domain.com',
'lockedOut': True,
'department': 'AD',
'biography': 'some biography',
'active': True,
'picture': [1, 2],
'roles': [1, 2],
'identifier': 'some identifier',
'provider': 'tenable',
'eulaVersion': 1
}]
)
resp = api.users.list()
assert isinstance(resp, list)
assert len(resp) == 1
assert resp[0]['name'] == 'name'
assert resp[0]['surname'] == 'surname'
@responses.activate
def test_users_create(api):
responses.add(responses.POST,
f'{RE_BASE}/users',
json=[{
'id': 1,
'surname': 'surname',
'name': 'name',
'email': 'test@domain.com',
'lockedOut': True,
'department': 'AD',
'biography': 'some biography',
'active': True,
'picture': [1, 2],
'roles': [1, 2],
'identifier': 'some identifier',
'provider': 'tenable',
'eulaVersion': 1
}]
)
resp = api.users.create(name='name',
email='test@domain.com',
password='password',
active=True)
assert isinstance(resp, list)
assert len(resp) == 1
assert resp[0]['name'] == 'name'
assert resp[0]['active'] is True
@responses.activate
def test_users_info(api):
responses.add(responses.GET,
f'{RE_BASE}/users/whoami',
json={
'id': 1,
'surname': 'surname',
'name': 'name',
'email': 'test@domain.com',
'lockedOut': True,
'department': 'AD',
'roles': [{
'id': 1,
'name': 'Admin',
'description': 'full access',
'permissions': [{
'entityName': 'entityName',
'action': 'action',
'entityIds': [1, 2],
'dynamicId': 'some id'
}]
}],
'biography': 'some biography',
'active': True,
'picture': [1, 2],
'identifier': 'some identifier',
'provider': 'tenable',
'eulaVersion': 1
}
)
resp = api.users.info()
assert isinstance(resp, dict)
assert resp['name'] == 'name'
assert resp['active'] is True
assert resp['roles'][0]['id'] == 1
assert resp['roles'][0]['name'] == 'Admin'
assert resp['roles'][0]['description'] == 'full access'
assert resp['roles'][0]['permissions'][0]['entity_name'] == 'entityName'
assert resp['roles'][0]['permissions'][0]['action'] == 'action'
assert resp['roles'][0]['permissions'][0]['entity_ids'] == [1, 2]
assert resp['roles'][0]['permissions'][0]['dynamic_id'] == 'some id'
@responses.activate
def METHOD_NAME(api):
responses.add(responses.GET,
f'{RE_BASE}/users/1',
json={
'id': 1,
'surname': 'surname',
'name': 'name',
'email': 'test@domain.com',
'lockedOut': True,
'department': 'AD',
'biography': 'some biography',
'active': True,
'picture': [1, 2],
'roles': [1, 2],
'identifier': 'some identifier',
'provider': 'tenable',
'eulaVersion': 1
}
)
resp = api.users.details('1')
assert isinstance(resp, dict)
assert resp['id'] == 1
assert resp['name'] == 'name'
assert resp['surname'] == 'surname'
@responses.activate
def test_users_update(api):
responses.add(responses.PATCH,
f'{RE_BASE}/users/1',
json={
'id': 1,
'surname': 'surname',
'name': 'name',
'email': 'test@domain.com',
'lockedOut': True,
'department': 'AD',
'biography': 'some biography',
'active': False,
'picture': [1, 2],
'roles': [1, 2],
'identifier': 'some identifier',
'provider': 'tenable',
'eulaVersion': 1
}
)
resp = api.users.update('1',
name='name',
email='test@domain.com',
password='password',
surname='surname',
department='AD',
biography='some biography',
active=False,
picture=[1, 2])
assert isinstance(resp, dict)
assert resp['name'] == 'name'
assert resp['active'] is False
@responses.activate
def test_users_delete(api):
responses.add(responses.DELETE,
f'{RE_BASE}/users/1',
json=None
)
resp = api.users.delete(1)
assert resp is None
@responses.activate
def test_users_create_password(api):
responses.add(responses.POST,
f'{RE_BASE}/users/forgotten-password',
json=None)
resp = api.users.create_password('test@domain.com')
assert resp is None
@responses.activate
def test_users_retrieve_password(api):
responses.add(responses.POST,
f'{RE_BASE}/users/retrieve-password',
json=None)
resp = api.users.retrieve_password(token='token',
new_password='new password')
assert resp is None
@responses.activate
def test_users_change_password(api):
responses.add(responses.PATCH,
f'{RE_BASE}/users/password',
json=None)
resp = api.users.change_password(old_password='old password',
new_password='new password')
assert resp is None
@responses.activate
def test_users_update_user_role(api):
responses.add(responses.PUT,
f'{RE_BASE}/users/1/roles',
json={
'roles': [1, 2]
})
resp = api.users.update_user_roles('1', roles=[1, 2])
assert isinstance(resp, dict)
assert resp['roles'] == [1, 2] |
6,701 | test schedule hbase backup start time | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from awscli.testutils import mock
from tests.unit.customizations.emr import EMRBaseAWSCommandParamsTest as \
BaseAWSCommandParamsTest
from copy import deepcopy
class TestScheduleHBaseBackup(BaseAWSCommandParamsTest):
prefix = 'emr schedule-hbase-backup'
default_steps = [{
'HadoopJarStep': {
'Args': [
'emr.hbase.backup.Main',
'--set-scheduled-backup',
'true',
'--backup-dir',
's3://abc/',
'--full-backup-time-interval',
'10',
'--full-backup-time-unit',
'minutes',
'--start-time',
'now'
],
'Jar': '/home/hadoop/lib/hbase.jar'
},
'Name': 'Modify Backup Schedule',
'ActionOnFailure': 'CANCEL_AND_WAIT'
}]
def test_schedule_hbase_backup_full(self):
args = ' --cluster-id j-ABCD --dir s3://abc/ --type full' +\
' --interval 10 --unit minutes'
cmdline = self.prefix + args
result = {'JobFlowId': 'j-ABCD', 'Steps': self.default_steps}
self.assert_params_for_cmd(cmdline, result)
def test_schedule_hbase_backup_full_upper_case(self):
args = ' --cluster-id j-ABCD --dir s3://abc/ --type FULL' +\
' --interval 10 --unit minutes'
cmdline = self.prefix + args
result = {'JobFlowId': 'j-ABCD', 'Steps': self.default_steps}
self.assert_params_for_cmd(cmdline, result)
def test_schedule_hbase_backup_incremental_upper_case(self):
args = ' --cluster-id j-ABCD --dir s3://abc/ --type INCREMENTAL' +\
' --interval 10 --unit HOURS'
cmdline = self.prefix + args
steps = deepcopy(self.default_steps)
args = steps[0]['HadoopJarStep']['Args']
args[5] = '--incremental-backup-time-interval'
args[7] = '--incremental-backup-time-unit'
args[8] = 'hours'
steps[0]['HadoopJarStep']['Args'] = args
result = {'JobFlowId': 'j-ABCD', 'Steps': steps}
self.assert_params_for_cmd(cmdline, result)
def test_schedule_hbase_backup_incremental(self):
args = ' --cluster-id j-ABCD --dir s3://abc/ --type incremental' +\
' --interval 10 --unit minutes'
cmdline = self.prefix + args
steps = deepcopy(self.default_steps)
args = steps[0]['HadoopJarStep']['Args']
args[5] = '--incremental-backup-time-interval'
args[7] = '--incremental-backup-time-unit'
steps[0]['HadoopJarStep']['Args'] = args
result = {'JobFlowId': 'j-ABCD', 'Steps': steps}
self.assert_params_for_cmd(cmdline, result)
def test_schedule_hbase_backup_wrong_type(self):
args = ' --cluster-id j-ABCD --dir s3://abc/ --type wrong_type' +\
' --interval 10 --unit minutes'
cmdline = self.prefix + args
expected_error_msg = '\naws: error: invalid type. type should be' +\
' either full or incremental.\n'
result = self.run_cmd(cmdline, 255)
self.assertEqual(expected_error_msg, result[1])
def test_schedule_hbase_backup_wrong_unit(self):
args = ' --cluster-id j-ABCD --dir s3://abc/ --type full' +\
' --interval 10 --unit wrong_unit'
cmdline = self.prefix + args
expected_error_msg = '\naws: error: invalid unit. unit should be' +\
' one of the following values: minutes,' +\
' hours or days.\n'
result = self.run_cmd(cmdline, 255)
self.assertEqual(expected_error_msg, result[1])
def test_schedule_hbase_backup_consistent(self):
args = ' --cluster-id j-ABCD --dir s3://abc/ --type full' +\
' --interval 10 --unit minutes --consistent'
cmdline = self.prefix + args
steps = deepcopy(self.default_steps)
steps[0]['HadoopJarStep']['Args'].insert(5, '--consistent')
result = {'JobFlowId': 'j-ABCD', 'Steps': steps}
self.assert_params_for_cmd(cmdline, result)
def METHOD_NAME(self):
args = ' --cluster-id j-ABCD --dir s3://abc/ --type full --interval' +\
' 10 --unit minutes --start-time 2014-04-18T10:43:24-07:00'
cmdline = self.prefix + args
steps = deepcopy(self.default_steps)
steps[0]['HadoopJarStep']['Args'][10] = '2014-04-18T10:43:24-07:00'
result = {'JobFlowId': 'j-ABCD', 'Steps': steps}
self.assert_params_for_cmd(cmdline, result)
@mock.patch('awscli.customizations.emr.'
'emrutils.get_release_label')
def test_unsupported_command_on_release_based_cluster_error(
self, grl_patch):
grl_patch.return_value = 'emr-4.0'
args = ' --cluster-id j-ABCD --dir s3://abc/ --type full' +\
' --interval 10 --unit minutes'
cmdline = self.prefix + args
expected_error_msg = ("\naws: error: schedule-hbase-backup"
" is not supported with 'emr-4.0' release.\n")
result = self.run_cmd(cmdline, 255)
self.assertEqual(result[1], expected_error_msg)
if __name__ == "__main__":
unittest.main() |
6,702 | vol mol phase | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES).
#
# Copyright (c) 2018-2023 by the software owners: The Regents of the
# University of California, through Lawrence Berkeley National Laboratory,
# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon
# University, West Virginia University Research Corporation, et al.
# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md
# for full copyright and license information.
#################################################################################
"""
Mock-up EoS module for testing generic property packages
"""
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
from pyomo.environ import Var, sqrt, units as pyunits
from idaes.models.properties.modular_properties.eos.eos_base import EoSBase
class DummyEoS(EoSBase):
# Add attribute indicating support for electrolyte systems
electrolyte_support = True
@staticmethod
def common(b, pobj):
# Create dummy var to be returned by expression calls
# This Var is used to create expressions where required.
if not hasattr(b, "dummy_var"):
b.dummy_var = Var(initialize=42)
# Counter for how many times this method is called
# This is used to ensure that the method has been called by checking
# that the counter has advanced
if hasattr(b, "eos_common"):
b.eos_common += 1
else:
b.eos_common = 1
@staticmethod
def calculate_scaling_factors(b, pobj):
pass
@staticmethod
def build_parameters(b):
if not hasattr(b, "dummy_param"):
b.dummy_param = Var(initialize=42)
@staticmethod
def act_phase_comp(b, p, j):
return 42
@staticmethod
def act_coeff_phase_comp(b, p, j):
return 1
@staticmethod
def compress_fact_phase(b, p):
return 42
@staticmethod
def cp_mass_phase(b, p):
return 42
@staticmethod
def cp_mol_phase(b, p):
return 42
@staticmethod
def cp_mol_phase_comp(b, p, j):
return 42
@staticmethod
def cv_mass_phase(b, p):
return 42
@staticmethod
def cv_mol_phase(b, p):
return 42
@staticmethod
def cv_mol_phase_comp(b, p, j):
return 42
@staticmethod
def dens_mass_phase(b, p):
return 42
@staticmethod
def dens_mol_phase(b, p):
return 55e3 * pyunits.mol / pyunits.m**3
@staticmethod
def energy_internal_mol_phase(b, p):
return 2e2 * b.temperature
@staticmethod
def energy_internal_mol_phase_comp(b, p, j):
return 2e2 * b.temperature
@staticmethod
def enth_mol_phase(b, p):
return 1e2 * b.temperature
@staticmethod
def enth_mol_phase_comp(b, p, j):
return 1e2 * b.temperature
@staticmethod
def entr_mol_phase(b, p):
return 42
@staticmethod
def entr_mol_phase_comp(b, p, j):
return 42
@staticmethod
def fug_phase_comp(b, p, j):
return 42
@staticmethod
def fug_coeff_phase_comp(b, p, j):
return 42
@staticmethod
def gibbs_mol_phase(b, p):
return 42
@staticmethod
def gibbs_mol_phase_comp(b, p, j):
return 42
@staticmethod
def isothermal_speed_sound_phase(b, p):
return 250
@staticmethod
def isentropic_speed_sound_phase(b, p):
return sqrt(b.heat_capacity_ratio_phase[p]) * b.isothermal_speed_sound_phase[p]
@staticmethod
def METHOD_NAME(b, p):
return 42
@staticmethod
def vol_mol_phase_comp(b, p, j):
return 42
@staticmethod
def log_act_phase_comp(b, p, j):
return 1
@staticmethod
def log_act_phase_solvents(b, p):
return 1 |
6,703 | test exe exists true when homedir is | from __future__ import annotations
import multiprocessing
import os.path
import sys
from unittest import mock
import pytest
import pre_commit.constants as C
from pre_commit import lang_base
from pre_commit import parse_shebang
from pre_commit.prefix import Prefix
from pre_commit.util import CalledProcessError
@pytest.fixture
def find_exe_mck():
with mock.patch.object(parse_shebang, 'find_executable') as mck:
yield mck
@pytest.fixture
def homedir_mck():
def fake_expanduser(pth):
assert pth == '~'
return os.path.normpath('/home/me')
with mock.patch.object(os.path, 'expanduser', fake_expanduser):
yield
@pytest.fixture
def no_sched_getaffinity():
# Simulates an OS without os.sched_getaffinity available (mac/windows)
# https://docs.python.org/3/library/os.html#interface-to-the-scheduler
with mock.patch.object(
os,
'sched_getaffinity',
create=True,
side_effect=AttributeError,
):
yield
def test_exe_exists_does_not_exist(find_exe_mck, homedir_mck):
find_exe_mck.return_value = None
assert lang_base.exe_exists('ruby') is False
def test_exe_exists_exists(find_exe_mck, homedir_mck):
find_exe_mck.return_value = os.path.normpath('/usr/bin/ruby')
assert lang_base.exe_exists('ruby') is True
def test_exe_exists_false_if_shim(find_exe_mck, homedir_mck):
find_exe_mck.return_value = os.path.normpath('/foo/shims/ruby')
assert lang_base.exe_exists('ruby') is False
def test_exe_exists_false_if_homedir(find_exe_mck, homedir_mck):
find_exe_mck.return_value = os.path.normpath('/home/me/somedir/ruby')
assert lang_base.exe_exists('ruby') is False
def test_exe_exists_commonpath_raises_ValueError(find_exe_mck, homedir_mck):
find_exe_mck.return_value = os.path.normpath('/usr/bin/ruby')
with mock.patch.object(os.path, 'commonpath', side_effect=ValueError):
assert lang_base.exe_exists('ruby') is True
def METHOD_NAME(find_exe_mck):
find_exe_mck.return_value = os.path.normpath('/usr/bin/ruby')
with mock.patch.object(os.path, 'expanduser', return_value=os.sep):
assert lang_base.exe_exists('ruby') is True
def test_basic_get_default_version():
assert lang_base.basic_get_default_version() == C.DEFAULT
def test_basic_health_check():
assert lang_base.basic_health_check(Prefix('.'), 'default') is None
def test_failed_setup_command_does_not_unicode_error():
script = (
'import sys\n'
"sys.stderr.buffer.write(b'\\x81\\xfe')\n"
'raise SystemExit(1)\n'
)
# an assertion that this does not raise `UnicodeError`
with pytest.raises(CalledProcessError):
lang_base.setup_cmd(Prefix('.'), (sys.executable, '-c', script))
def test_environment_dir(tmp_path):
ret = lang_base.environment_dir(Prefix(tmp_path), 'langenv', 'default')
assert ret == f'{tmp_path}{os.sep}langenv-default'
def test_assert_version_default():
with pytest.raises(AssertionError) as excinfo:
lang_base.assert_version_default('lang', '1.2.3')
msg, = excinfo.value.args
assert msg == (
'for now, pre-commit requires system-installed lang -- '
'you selected `language_version: 1.2.3`'
)
def test_assert_no_additional_deps():
with pytest.raises(AssertionError) as excinfo:
lang_base.assert_no_additional_deps('lang', ['hmmm'])
msg, = excinfo.value.args
assert msg == (
'for now, pre-commit does not support additional_dependencies for '
'lang -- '
"you selected `additional_dependencies: ['hmmm']`"
)
def test_no_env_noop(tmp_path):
before = os.environ.copy()
with lang_base.no_env(Prefix(tmp_path), '1.2.3'):
inside = os.environ.copy()
after = os.environ.copy()
assert before == inside == after
def test_target_concurrency_sched_getaffinity(no_sched_getaffinity):
with mock.patch.object(
os,
'sched_getaffinity',
return_value=set(range(345)),
):
with mock.patch.dict(os.environ, clear=True):
assert lang_base.target_concurrency() == 345
def test_target_concurrency_without_sched_getaffinity(no_sched_getaffinity):
with mock.patch.object(multiprocessing, 'cpu_count', return_value=123):
with mock.patch.dict(os.environ, {}, clear=True):
assert lang_base.target_concurrency() == 123
def test_target_concurrency_testing_env_var():
with mock.patch.dict(
os.environ, {'PRE_COMMIT_NO_CONCURRENCY': '1'}, clear=True,
):
assert lang_base.target_concurrency() == 1
def test_target_concurrency_on_travis():
with mock.patch.dict(os.environ, {'TRAVIS': '1'}, clear=True):
assert lang_base.target_concurrency() == 2
def test_target_concurrency_cpu_count_not_implemented(no_sched_getaffinity):
with mock.patch.object(
multiprocessing, 'cpu_count', side_effect=NotImplementedError,
):
with mock.patch.dict(os.environ, {}, clear=True):
assert lang_base.target_concurrency() == 1
def test_shuffled_is_deterministic():
seq = [str(i) for i in range(10)]
expected = ['4', '0', '5', '1', '8', '6', '2', '3', '7', '9']
assert lang_base._shuffled(seq) == expected
def test_xargs_require_serial_is_not_shuffled():
ret, out = lang_base.run_xargs(
('echo',), [str(i) for i in range(10)],
require_serial=True,
color=False,
)
assert ret == 0
assert out.strip() == b'0 1 2 3 4 5 6 7 8 9'
def test_basic_run_hook(tmp_path):
ret, out = lang_base.basic_run_hook(
Prefix(tmp_path),
'echo hi',
['hello'],
['file', 'file', 'file'],
is_local=False,
require_serial=False,
color=False,
)
assert ret == 0
out = out.replace(b'\r\n', b'\n')
assert out == b'hi hello file file file\n' |
6,704 | test currentitem | # SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Tests for webelement.tabhistory."""
import dataclasses
from typing import Any
import pytest
pytest.importorskip('qutebrowser.qt.webkit')
from qutebrowser.qt.core import QUrl, QPoint
# pylint: disable=no-name-in-module
from qutebrowser.qt.webkit import QWebHistory
# pylint: enable=no-name-in-module
from qutebrowser.browser.webkit import tabhistory
from qutebrowser.misc.sessions import TabHistoryItem as Item
from qutebrowser.utils import qtutils
pytestmark = pytest.mark.qt_log_ignore('QIODevice::read.*: device not open')
ITEMS = [
Item(QUrl('https://www.heise.de/'), 'heise'),
Item(QUrl('about:blank'), 'blank', active=True),
Item(QUrl('http://example.com/%E2%80%A6'), 'percent'),
Item(QUrl('http://example.com/?foo=bar'), 'arg',
original_url=QUrl('http://original.url.example.com/'),
user_data={'foo': 23, 'bar': 42}),
# From https://github.com/OtterBrowser/otter-browser/issues/709#issuecomment-74749471
Item(QUrl('http://github.com/OtterBrowser/24/134/2344/otter-browser/'
'issues/709/'),
'Page not found | github',
user_data={'zoom': 149, 'scroll-pos': QPoint(0, 0)}),
Item(QUrl('https://mail.google.com/mail/u/0/#label/some+label/'
'234lkjsd0932lkjf884jqwerdf4'),
'"some label" - email@gmail.com - Gmail"',
user_data={'zoom': 120, 'scroll-pos': QPoint(0, 0)}),
]
@dataclasses.dataclass
class Objects:
history: QWebHistory
user_data: Any
@pytest.fixture
def empty_history(webpage):
"""Fixture providing an empty QWebHistory."""
hist = webpage.history()
assert hist.count() == 0
return hist
@pytest.fixture
def objects(empty_history):
"""Fixture providing a history (and userdata) filled with example data."""
stream, _data, user_data = tabhistory.serialize(ITEMS)
qtutils.deserialize_stream(stream, empty_history)
return Objects(history=empty_history, user_data=user_data)
def test_count(objects):
"""Check if the history's count was loaded correctly."""
assert objects.history.count() == len(ITEMS)
@pytest.mark.parametrize('i', range(len(ITEMS)))
def test_valid(objects, i):
"""Check if all items are valid."""
assert objects.history.itemAt(i).isValid()
@pytest.mark.parametrize('i', range(len(ITEMS)))
def test_no_userdata(objects, i):
"""Check if all items have no user data."""
assert objects.history.itemAt(i).userData() is None
def test_userdata(objects):
"""Check if all user data has been restored to user_data."""
userdata_items = [item.user_data for item in ITEMS]
assert userdata_items == objects.user_data
def METHOD_NAME(objects):
"""Check if the current item index was loaded correctly."""
assert objects.history.currentItemIndex() == 1
@pytest.mark.parametrize('i, item', enumerate(ITEMS))
def test_urls(objects, i, item):
"""Check if the URLs were loaded correctly."""
assert objects.history.itemAt(i).url() == item.url
@pytest.mark.parametrize('i, item', enumerate(ITEMS))
def test_original_urls(objects, i, item):
"""Check if the original URLs were loaded correctly."""
assert objects.history.itemAt(i).originalUrl() == item.original_url
@pytest.mark.parametrize('i, item', enumerate(ITEMS))
def test_titles(objects, i, item):
"""Check if the titles were loaded correctly."""
assert objects.history.itemAt(i).title() == item.title
def test_no_active_item():
"""Check tabhistory.serialize with no active item."""
items = [Item(QUrl(), '')]
with pytest.raises(ValueError):
tabhistory.serialize(items)
def test_two_active_items():
"""Check tabhistory.serialize with two active items."""
items = [Item(QUrl(), '', active=True),
Item(QUrl(), ''),
Item(QUrl(), '', active=True)]
with pytest.raises(ValueError):
tabhistory.serialize(items)
def test_empty(empty_history):
"""Check tabhistory.serialize with no items."""
items = []
stream, _data, user_data = tabhistory.serialize(items)
qtutils.deserialize_stream(stream, empty_history)
assert empty_history.count() == 0
assert empty_history.currentItemIndex() == 0
assert not user_data |
6,705 | create user | import sys
from datetime import datetime, timedelta
from typing import Any, Dict
import click
from flask import Flask, current_app
from flask.cli import FlaskGroup, ScriptInfo, with_appcontext
from alerta.app import config, create_app, db, key_helper, qb
from alerta.auth.utils import generate_password_hash
from alerta.models.enums import Scope
from alerta.models.key import ApiKey
from alerta.models.user import User
from alerta.version import __version__
def _create_app(config_override: Dict[str, Any] = None, environment: str = None) -> Flask:
app = Flask(__name__)
app.config['ENVIRONMENT'] = environment
config.init_app(app)
app.config.update(config_override or {})
db.init_db(app)
qb.init_app(app)
key_helper.init_app(app)
return app
@click.group(cls=FlaskGroup, add_version_option=False)
@click.version_option(version=__version__)
@click.pass_context
def cli(ctx):
"""
Management command-line tool for Alerta server.
"""
if ctx.invoked_subcommand in ['routes', 'run', 'shell']:
# Load HTTP endpoints for standard Flask commands
ctx.obj = ScriptInfo(create_app=create_app)
else:
# Do not load HTTP endpoints for management commands
ctx.obj = ScriptInfo(create_app=_create_app)
@cli.command('key', short_help='Create an admin API key')
@click.option('--username', '-u', help='Admin user')
@click.option('--key', '-K', 'want_key', help='API key (default=random string)')
@click.option('--scope', 'scopes', multiple=True, help='List of permissions eg. admin:keys, write:alerts')
@click.option('--duration', metavar='SECONDS', type=int, help='Duration API key is valid')
@click.option('--text', help='Description of API key use')
@click.option('--customer', help='Customer')
@click.option('--all', is_flag=True, help='Create API keys for all admins')
@click.option('--force', is_flag=True, help='Do not skip if API key already exists')
@with_appcontext
def key(username, want_key, scopes, duration, text, customer, all, force):
"""
Create an admin API key.
"""
if username and username not in current_app.config['ADMIN_USERS']:
raise click.UsageError(f'User {username} not an admin')
if all and want_key:
raise click.UsageError('Can only set API key with "--username".')
scopes = [Scope(s) for s in scopes] or [Scope.admin, Scope.write, Scope.read]
expires = datetime.utcnow() + timedelta(seconds=duration) if duration else None
text = text or 'Created by alertad script'
def create_key(admin, key=None):
key = ApiKey(
user=admin,
key=key,
scopes=scopes,
expire_time=expires,
text=text,
customer=customer
)
try:
key = key.create()
except Exception as e:
click.echo(f'ERROR: {e}')
else:
return key
if all:
for admin in current_app.config['ADMIN_USERS']:
keys = [k for k in ApiKey.find_by_user(admin) if k.scopes == scopes]
if keys and not force:
key = keys[0]
else:
key = create_key(admin)
click.echo(f'{key.key:40} {key.user}')
elif username:
keys = [k for k in ApiKey.find_by_user(username) if k.scopes == scopes]
if want_key:
found_key = [k for k in keys if k.key == want_key]
if found_key:
key = found_key[0]
else:
key = create_key(username, key=want_key)
else:
if keys and not force:
key = keys[0]
else:
key = create_key(username)
if key:
click.echo(key.key)
else:
sys.exit(1)
else:
raise click.UsageError("Must set '--username' or use '--all'")
@cli.command('keys', short_help='List admin API keys')
@with_appcontext
def keys():
"""
List admin API keys.
"""
for admin in current_app.config['ADMIN_USERS']:
try:
keys = [k for k in ApiKey.find_by_user(admin) if Scope.admin in k.scopes]
except Exception as e:
click.echo(f'ERROR: {e}')
else:
for key in keys:
click.echo(f'{key.key:40} {key.user}')
class CommandWithOptionalPassword(click.Command):
def parse_args(self, ctx, args):
for i, a in enumerate(args):
if args[i] == '--password':
try:
password = args[i + 1] if not args[i + 1].startswith('--') else None
except IndexError:
password = None
if not password:
password = click.prompt('Password', hide_input=True, confirmation_prompt=True)
args.insert(i + 1, password)
return super().parse_args(ctx, args)
@cli.command('user', cls=CommandWithOptionalPassword, short_help='Create admin user')
@click.option('--name', help='Name of admin (default=email)')
@click.option('--email', '--username', help='Email address (login username)')
@click.option('--password', help='Password (will prompt if not supplied)')
@click.option('--text', help='Description of admin')
@click.option('--all', is_flag=True, help='Create users for all admins')
@with_appcontext
def user(name, email, password, text, all):
"""
Create admin users (BasicAuth only).
"""
if current_app.config['AUTH_PROVIDER'] != 'basic':
raise click.UsageError(f"Not required for {current_app.config['AUTH_PROVIDER']} admin users")
if email and email not in current_app.config['ADMIN_USERS']:
raise click.UsageError(f'User {email} not an admin')
if (email or all) and not password:
password = click.prompt('Password', hide_input=True)
text = text or 'Created by alertad script'
def METHOD_NAME(name, login):
email = login if '@' in login else None
user = User(
name=name or login,
login=login,
password=generate_password_hash(password),
roles=current_app.config['ADMIN_ROLES'],
text=text,
email=email,
email_verified=bool(email)
)
try:
user = user.create()
except Exception as e:
click.echo(f'ERROR: {e}')
else:
return user
if all:
for admin in current_app.config['ADMIN_USERS']:
user = User.find_by_username(admin)
if not user:
user = METHOD_NAME(name=admin, login=admin)
click.echo(f'{user.id} {user.login}')
elif email:
user = METHOD_NAME(name, login=email)
if user:
click.echo(user.id)
else:
sys.exit(1)
else:
raise click.UsageError("Must set '--email' or use '--all'")
@cli.command('users', short_help='List admin users')
@with_appcontext
def users():
"""
List admin users.
"""
for admin in current_app.config['ADMIN_USERS']:
try:
user = User.find_by_username(admin)
except Exception as e:
click.echo(f'ERROR: {e}')
else:
if user:
click.echo(f'{user.id} {user.login}') |
6,706 | test api gateway rest headers serializer | from uuid import uuid4
import pytest
from requests import Request
from aws_lambda_powertools.shared.cookies import Cookie
from tests.e2e.utils import data_fetcher
from tests.e2e.utils.auth import build_iam_auth
@pytest.fixture
def alb_basic_listener_endpoint(infrastructure: dict) -> str:
dns_name = infrastructure.get("ALBDnsName")
port = infrastructure.get("ALBBasicListenerPort", "")
return f"http://{dns_name}:{port}"
@pytest.fixture
def alb_multi_value_header_listener_endpoint(infrastructure: dict) -> str:
dns_name = infrastructure.get("ALBDnsName")
port = infrastructure.get("ALBMultiValueHeaderListenerPort", "")
return f"http://{dns_name}:{port}"
@pytest.fixture
def apigw_rest_endpoint(infrastructure: dict) -> str:
return infrastructure.get("APIGatewayRestUrl", "")
@pytest.fixture
def apigw_http_endpoint(infrastructure: dict) -> str:
return infrastructure.get("APIGatewayHTTPUrl", "")
@pytest.fixture
def lambda_function_url_endpoint(infrastructure: dict) -> str:
return infrastructure.get("LambdaFunctionUrl", "")
@pytest.mark.xdist_group(name="event_handler")
def test_alb_headers_serializer(alb_basic_listener_endpoint):
# GIVEN
url = f"{alb_basic_listener_endpoint}/todos"
body = "Hello World"
status_code = 200
headers = {"Content-Type": "text/plain", "Vary": ["Accept-Encoding", "User-Agent"]}
cookies = [
Cookie(name="session_id", value=str(uuid4()), secure=True, http_only=True),
Cookie(name="ab_experiment", value="3"),
]
last_cookie = cookies[-1]
# WHEN
response = data_fetcher.get_http_response(
Request(
method="POST",
url=url,
json={"body": body, "status_code": status_code, "headers": headers, "cookies": list(map(str, cookies))},
),
)
# THEN
assert response.status_code == status_code
# response.content is a binary string, needs to be decoded to compare with the real string
assert response.content.decode("ascii") == body
# Only the last header should be set
for key, value in headers.items():
assert key in response.headers
new_value = value if isinstance(value, str) else sorted(value)[-1]
assert response.headers[key] == new_value
# Only the last cookie should be set
assert len(response.cookies.items()) == 1
assert last_cookie.name in response.cookies
assert response.cookies.get(last_cookie.name) == last_cookie.value
@pytest.mark.xdist_group(name="event_handler")
def test_alb_multi_value_headers_serializer(alb_multi_value_header_listener_endpoint):
# GIVEN
url = f"{alb_multi_value_header_listener_endpoint}/todos"
body = "Hello World"
status_code = 200
headers = {"Content-Type": "text/plain", "Vary": ["Accept-Encoding", "User-Agent"]}
cookies = [
Cookie(name="session_id", value=str(uuid4()), secure=True, http_only=True),
Cookie(name="ab_experiment", value="3"),
]
# WHEN
response = data_fetcher.get_http_response(
Request(
method="POST",
url=url,
json={"body": body, "status_code": status_code, "headers": headers, "cookies": list(map(str, cookies))},
),
)
# THEN
assert response.status_code == status_code
# response.content is a binary string, needs to be decoded to compare with the real string
assert response.content.decode("ascii") == body
for key, value in headers.items():
assert key in response.headers
new_value = value if isinstance(value, str) else ", ".join(sorted(value))
# ALB sorts the header values randomly, so we have to re-order them for comparison here
returned_value = ", ".join(sorted(response.headers[key].split(", ")))
assert returned_value == new_value
for cookie in cookies:
assert cookie.name in response.cookies
assert response.cookies.get(cookie.name) == cookie.value
@pytest.mark.xdist_group(name="event_handler")
def METHOD_NAME(apigw_rest_endpoint):
# GIVEN
url = f"{apigw_rest_endpoint}todos"
body = "Hello World"
status_code = 200
headers = {"Content-Type": "text/plain", "Vary": ["Accept-Encoding", "User-Agent"]}
cookies = [
Cookie(name="session_id", value=str(uuid4()), secure=True, http_only=True),
Cookie(name="ab_experiment", value="3"),
]
# WHEN
response = data_fetcher.get_http_response(
Request(
method="POST",
url=url,
json={"body": body, "status_code": status_code, "headers": headers, "cookies": list(map(str, cookies))},
),
)
# THEN
assert response.status_code == status_code
# response.content is a binary string, needs to be decoded to compare with the real string
assert response.content.decode("ascii") == body
for key, value in headers.items():
assert key in response.headers
new_value = value if isinstance(value, str) else ", ".join(sorted(value))
assert response.headers[key] == new_value
for cookie in cookies:
assert cookie.name in response.cookies
assert response.cookies.get(cookie.name) == cookie.value
@pytest.mark.xdist_group(name="event_handler")
def test_api_gateway_http_headers_serializer(apigw_http_endpoint):
# GIVEN
url = f"{apigw_http_endpoint}todos"
body = "Hello World"
status_code = 200
headers = {"Content-Type": "text/plain", "Vary": ["Accept-Encoding", "User-Agent"]}
cookies = [
Cookie(name="session_id", value=str(uuid4()), secure=True, http_only=True),
Cookie(name="ab_experiment", value="3"),
]
# WHEN
response = data_fetcher.get_http_response(
Request(
method="POST",
url=url,
json={"body": body, "status_code": status_code, "headers": headers, "cookies": list(map(str, cookies))},
auth=build_iam_auth(url=url, aws_service="execute-api"),
),
)
# THEN
assert response.status_code == status_code
# response.content is a binary string, needs to be decoded to compare with the real string
assert response.content.decode("ascii") == body
for key, value in headers.items():
assert key in response.headers
new_value = value if isinstance(value, str) else ", ".join(sorted(value))
assert response.headers[key] == new_value
for cookie in cookies:
assert cookie.name in response.cookies
assert response.cookies.get(cookie.name) == cookie.value
@pytest.mark.xdist_group(name="event_handler")
def test_lambda_function_url_headers_serializer(lambda_function_url_endpoint):
# GIVEN
url = f"{lambda_function_url_endpoint}todos" # the function url endpoint already has the trailing /
body = "Hello World"
status_code = 200
headers = {"Content-Type": "text/plain", "Vary": ["Accept-Encoding", "User-Agent"]}
cookies = [
Cookie(name="session_id", value=str(uuid4()), secure=True, http_only=True),
Cookie(name="ab_experiment", value="3"),
]
# WHEN
response = data_fetcher.get_http_response(
Request(
method="POST",
url=url,
json={"body": body, "status_code": status_code, "headers": headers, "cookies": list(map(str, cookies))},
auth=build_iam_auth(url=url, aws_service="lambda"),
),
)
# THEN
assert response.status_code == status_code
# response.content is a binary string, needs to be decoded to compare with the real string
assert response.content.decode("ascii") == body
for key, value in headers.items():
assert key in response.headers
new_value = value if isinstance(value, str) else ", ".join(sorted(value))
assert response.headers[key] == new_value
for cookie in cookies:
assert cookie.name in response.cookies
assert response.cookies.get(cookie.name) == cookie.value |
6,707 | test style parsing | from selenium.webdriver.common.by import By
from nicegui import ui
from .screen import Screen
def test_classes(screen: Screen):
label = ui.label('Some label')
def assert_classes(classes: str) -> None:
assert screen.selenium.find_element(By.XPATH,
f'//*[normalize-space(@class)="{classes}" and text()="Some label"]')
screen.open('/')
screen.wait(0.5)
assert_classes('')
label.classes('one')
assert_classes('one')
label.classes('one')
assert_classes('one')
label.classes('two three')
assert_classes('one two three')
label.classes(remove='two')
assert_classes('one three')
label.classes(replace='four')
assert_classes('four')
def METHOD_NAME():
# pylint: disable=protected-access
assert ui.element._parse_style(None) == {} # pylint: disable=use-implicit-booleaness-not-comparison
assert ui.element._parse_style('color: red; background-color: blue') == {'color': 'red', 'background-color': 'blue'}
assert ui.element._parse_style('width:12em;height:34.5em') == {'width': '12em', 'height': '34.5em'}
assert ui.element._parse_style('transform: translate(120.0px, 50%)') == {'transform': 'translate(120.0px, 50%)'}
assert ui.element._parse_style('box-shadow: 0 0 0.5em #1976d2') == {'box-shadow': '0 0 0.5em #1976d2'}
def test_props_parsing():
# pylint: disable=protected-access
assert ui.element._parse_props(None) == {} # pylint: disable=use-implicit-booleaness-not-comparison
assert ui.element._parse_props('one two=1 three="abc def"') == {'one': True, 'two': '1', 'three': 'abc def'}
assert ui.element._parse_props('loading percentage=12.5') == {'loading': True, 'percentage': '12.5'}
assert ui.element._parse_props('size=50%') == {'size': '50%'}
assert ui.element._parse_props('href=http://192.168.42.100/') == {'href': 'http://192.168.42.100/'}
assert ui.element._parse_props('hint="Your \\"given\\" name"') == {'hint': 'Your "given" name'}
assert ui.element._parse_props('input-style="{ color: #ff0000 }"') == {'input-style': '{ color: #ff0000 }'}
def test_style(screen: Screen):
label = ui.label('Some label')
def assert_style(style: str) -> None:
assert screen.selenium.find_element(By.XPATH, f'//*[normalize-space(@style)="{style}" and text()="Some label"]')
screen.open('/')
screen.wait(0.5)
assert_style('')
label.style('color: red')
assert_style('color: red;')
label.style('color: red')
assert_style('color: red;')
label.style('color: blue')
assert_style('color: blue;')
label.style('font-weight: bold')
assert_style('color: blue; font-weight: bold;')
label.style(remove='color: blue')
assert_style('font-weight: bold;')
label.style(replace='text-decoration: underline')
assert_style('text-decoration: underline;')
label.style('color: blue;')
assert_style('text-decoration: underline; color: blue;')
def test_props(screen: Screen):
input_ = ui.input()
def assert_props(*props: str) -> None:
class_conditions = [f'contains(@class, "q-field--{prop}")' for prop in props]
assert screen.selenium.find_element(By.XPATH, f'//label[{" and ".join(class_conditions)}]')
screen.open('/')
screen.wait(0.5)
assert_props('standard')
input_.props('dark')
assert_props('standard', 'dark')
input_.props('dark')
assert_props('standard', 'dark')
input_.props(remove='dark')
assert_props('standard')
def test_move(screen: Screen):
with ui.card() as a:
ui.label('A')
x = ui.label('X')
with ui.card() as b:
ui.label('B')
ui.button('Move X to A', on_click=lambda: x.move(a))
ui.button('Move X to B', on_click=lambda: x.move(b))
ui.button('Move X to top', on_click=lambda: x.move(target_index=0))
screen.open('/')
assert screen.find('A').location['y'] < screen.find('X').location['y'] < screen.find('B').location['y']
screen.click('Move X to B')
screen.wait(0.5)
assert screen.find('A').location['y'] < screen.find('B').location['y'] < screen.find('X').location['y']
screen.click('Move X to A')
screen.wait(0.5)
assert screen.find('A').location['y'] < screen.find('X').location['y'] < screen.find('B').location['y']
screen.click('Move X to top')
screen.wait(0.5)
assert screen.find('X').location['y'] < screen.find('A').location['y'] < screen.find('B').location['y']
def test_xss(screen: Screen):
ui.label('</script><script>alert(1)</script>')
ui.label('<b>Bold 1</b>, `code`, copy&paste, multi\nline')
ui.button('Button', on_click=lambda: (
ui.label('</script><script>alert(2)</script>'),
ui.label('<b>Bold 2</b>, `code`, copy&paste, multi\nline'),
))
screen.open('/')
screen.click('Button')
screen.should_contain('</script><script>alert(1)</script>')
screen.should_contain('</script><script>alert(2)</script>')
screen.should_contain('<b>Bold 1</b>, `code`, copy&paste, multi\nline')
screen.should_contain('<b>Bold 2</b>, `code`, copy&paste, multi\nline') |
6,708 | strip empty | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2018 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from marshmallow import Schema, pre_dump, post_dump, fields, missing
from inspire_dojson.utils import get_recid_from_ref, strip_empty_values
from inspire_utils.helpers import force_list
from inspirehep.modules.records.serializers.fields import ListWithLimit, NestedWithoutEmptyObjects
from inspirehep.modules.records.utils import get_linked_records_in_field
from inspire_utils.record import get_value
from .author import AuthorSchemaV1
from .collaboration import CollaborationSchemaV1
from .collaboration_with_suffix import CollaborationWithSuffixSchemaV1
from .publication_info_item import PublicationInfoItemSchemaV1
class ReferenceItemSchemaV1(Schema):
authors = ListWithLimit(
NestedWithoutEmptyObjects(AuthorSchemaV1, dump_only=True, default=[]), limit=10)
collaborations = fields.List(fields.Nested(
CollaborationSchemaV1, dump_only=True), attribute="collaborations")
collaborations_with_suffix = fields.List(fields.Nested(
CollaborationWithSuffixSchemaV1, dump_only=True), attribute="collaborations")
control_number = fields.Raw()
label = fields.Raw()
urls = fields.Raw()
publication_info = fields.List(
NestedWithoutEmptyObjects(PublicationInfoItemSchemaV1, dump_only=True))
titles = fields.Method('get_titles')
misc = fields.Method('get_misc')
arxiv_eprint = fields.Method('get_arxiv_eprints')
dois = fields.Method('get_dois')
@pre_dump(pass_many=True)
def filter_references(self, data, many):
reference_records = self.get_resolved_references_by_control_number(
data)
if not many:
return self.get_resolved_reference(data, reference_records)
references = []
for reference in data:
resolved_reference = self.get_resolved_reference(
reference, reference_records)
references.append(resolved_reference)
return references
@pre_dump
def force_each_collaboration_to_be_object(self, data):
if not data.get('record'):
collaborations = get_value(data, 'reference.collaborations')
if collaborations:
data['reference']['collaborations'] = [{'value': collaboration}
for collaboration in collaborations]
return data
def get_resolved_reference(self, data, reference_records):
reference_record_id = self.get_reference_record_id(data)
reference_record = reference_records.get(reference_record_id)
reference = self.get_reference_or_linked_reference_with_label(
data, reference_record)
return reference
def get_reference_record_id(self, data):
return get_recid_from_ref(data.get('record'))
def get_resolved_references_by_control_number(self, data):
data = force_list(data)
resolved_records = get_linked_records_in_field(
{'references': data}, 'references.record')
return {
record['control_number']: record
for record in resolved_records
}
def get_reference_or_linked_reference_with_label(self, data, reference_record):
if reference_record:
reference_record.update({
'label': data.get('reference', {}).get('label', missing)
})
return reference_record
return data.get('reference')
def get_titles(self, data):
title = data.pop('title', None)
if title:
data['titles'] = force_list(title)
return data.get('titles', missing)
def get_dois(self, data):
dois = data.get('dois', None)
control_number = data.get('control_number')
if dois and not control_number:
data['dois'] = force_list(
{'value': get_value(data, 'dois[0]', default=missing)})
elif dois:
data['dois'] = force_list(
{'value': get_value(data, 'dois[0].value', default=missing)})
return data.get('dois', missing)
def get_arxiv_eprints(self, data):
arxiv_eprint = data.pop('arxiv_eprint', None)
arxiv_eprints = data.get('arxiv_eprints')
if arxiv_eprint:
data['arxiv_eprint'] = force_list({'value': arxiv_eprint})
elif arxiv_eprints:
data['arxiv_eprint'] = force_list(
{'value': get_value(data, 'arxiv_eprints[0].value', default=missing)})
data.pop('arxiv_eprints', None)
return data.get('arxiv_eprint', missing)
def get_misc(self, data):
titles = data.get('titles')
title = data.get('title')
misc = data.get('misc')
if not title and not titles and misc:
return misc[0]
return missing
@post_dump
def METHOD_NAME(self, data):
return strip_empty_values(data) |
6,709 | handle start monitor | from flask import Flask, render_template, request
from flask_socketio import SocketIO
import jwt
import json
from .Settings import Settings
from .IrohaConnector import IrohaConnector
class SocketIoValidator:
def __init__(self):
self.moduleName = 'SocketIoValidator'
# self.the_cb = None
self.iroha_dic = {}
# load settings
self.settings = Settings()
self.app = Flask(__name__)
self.app.config['SECRET_KEY'] = 'secret!'
# socketio = SocketIO(app)
print(f'socket port: {self.settings.validatorSettings.port}')
self.socketio = SocketIO(self.app, host='0.0.0.0', port=self.settings.validatorSettings.port, logger=True, engineio_logger=True)
self.privateKeyFile = 'connector.priv'
self.algorithm = 'ES256'
self.emitType = "eventReceived"
@self.socketio.on('connect')
def handle_connect():
print(f'on connect (sessionid: {request.sid})')
self.session_dict[request.sid] = self.getValidatorInstance()
@self.socketio.on('disconnect')
def handle_disconnect():
print('on disconnect')
del self.session_dict[request.sid]
@self.socketio.on('startMonitor')
def METHOD_NAME():
print('on startMonitor')
# clientId = None
# cb = None
# self.session_dict[request.sid].startMonitor(clientId, cb)
# def the_cb(resp): return self.cb_helper(request.sid, resp)
self.session_dict[request.sid].startMonitor()
@self.socketio.on('stopMonitor')
def handle_stopMonitor():
print('on stopMonitor')
self.session_dict[request.sid].stopMonitor()
@self.socketio.on('nop')
def handle_nop():
print('received nop')
self.session_dict[request.sid].nop()
@self.socketio.on('test-event')
def handle_event():
self.session_dict[request.sid].cb('data-from-blockchain')
@self.socketio.on('sendAsyncRequest')
def handle_sendAsyncRequest(requestData):
print('received sendAsyncRequest')
print(f"##requestData: {requestData}")
result = self.session_dict[request.sid].sendAsyncRequest(requestData)
resp_obj = self.build_res_obj(200, requestData["reqID"], result)
#respJson = json.dumps(resp_obj)
self.socketio.emit("response", resp_obj)
@self.socketio.on('request2')
def handle_execSyncFunction(requestData):
print('received request2')
print(f"##requestData: {requestData}")
result = self.session_dict[request.sid].execSyncFunction(None, None, requestData)
resp_obj = self.build_res_obj(200, requestData["reqID"], result)
#respJson = json.dumps(resp_obj)
self.socketio.emit("response", resp_obj)
self.session_dict = {}
# build response object of execSyncFunction
def build_res_obj(self, status_code, req_id, result):
print(f"##build_res_obj result: {result}")
signed_results = self.sign(result)
responseData = {}
res_obj = {}
res_obj["status"] = status_code
res_obj["data"] = signed_results
responseData["resObj"] = res_obj
if req_id is not None:
responseData["id"] = req_id
return responseData
def run(self):
"""Run Validator"""
# self.init_iroha();
self.socketio.run(self.app, host='0.0.0.0', port=self.settings.validatorSettings.port)
# def cb_helper(self, sessionid, answer):
# print(f'cb helper: {self.session_dict[request.sid]}')
# return self.session_dict[sessionid].cb(answer)
def getValidatorInstance(self):
print(f'##called getValidatorInstance()')
return IrohaConnector(self.socketio, request.sid, self.iroha_dic, self)
def sign(self, data):
""" sign data """
print(f'##called sign()')
with open(self.privateKeyFile, 'br') as fh:
private_key = fh.read()
print(f"raw data: {data}")
encoded_jwt = jwt.encode(data, private_key, algorithm="ES256")
print(f"encoded_jwt: {encoded_jwt}")
return encoded_jwt
# build result of monitoring
def build_monitoring_result(self, blockData):
signedBlockData = self.sign({"blockData":json.dumps(blockData, default=str)})
# Notify only if transaction exists
retObj = {
"status" : 200,
"blockData" : signedBlockData
}
print(f'##build_monitoring_result retObj : {retObj}')
return retObj
# send result of monitoring using socket
def publish_event(self, event):
resp_obj = self.build_monitoring_result(event)
self.socketio.emit(self.emitType, resp_obj |
6,710 | sgd by oneflow | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import tempfile
import os
# dynamic memory allocation can't be tested in unittest
os.environ["ONEFLOW_ONE_EMBEDDING_USE_DYNAMIC_MEMORY_ALLOCATION"] = "0"
import numpy as np
from oneflow.test_utils.test_util import GenArgDict
from optimizer_test_util import clip_grad_norm_np
import oneflow as flow
from oneflow.nn.parameter import Parameter
def compare_with_numpy_sgd(
test_case,
momentum,
weight_decay,
scale,
learning_rate,
train_iters,
use_optional_tensor,
):
# if use_optional_tensor, pass lr as tensor to sgd_update, else pass as attr.
num_rows = 500
embedding_size = 128
model_shape = (num_rows, embedding_size)
line_size = embedding_size * 2 if momentum > 0 else embedding_size
num_valid_seq = np.random.randint(1, num_rows, (train_iters))
skip_if_seq = [np.random.randint(2) for i in range(train_iters)]
random_grad_seq = []
for _ in range(train_iters):
random_grad_seq.append(np.random.uniform(size=model_shape).astype(np.float32))
init_value = np.random.uniform(size=(num_rows, line_size)).astype(np.float32)
"""
In OneFlow's optimizer, learning_rate is passed by attr in eager mode, and passed by tensor in lazy mode.
in this test, if use_optional_tensor is True, we also pass lr_tensor/down_scale_by_tensor/skip_if tensor for unittest.
if use_optional_tensor is False, we only pass lr by attr, and not have down_scale_by_tensor/skip_if, so mul down_scale_by to scale and skip skip_if's test.
"""
down_scale_by = 10
if use_optional_tensor:
scale_val = scale
else:
# if pass as attr instead of tensor, mul down_scale_by to scale_value
scale_val = scale / down_scale_by
class TestGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
def build(
self,
ids,
unique_embeddings,
embedding_grad,
lr_tensor,
down_scale_by_tensor,
skip_if,
):
# add id shuffle to set num_unique in op, and use it in update
(_, _, num_valid, _, _, _,) = flow._C.one_embedding_id_shuffle(
ids, table_ids=None, num_tables=1, embedding_name=""
)
return flow._C.one_embedding_sgd_update(
num_valid,
unique_embeddings,
embedding_grad,
lr_tensor,
down_scale_by_tensor,
skip_if,
learning_rate,
scale_val,
weight_decay,
momentum,
line_size,
embedding_size,
embedding_name="",
)
graph = TestGraph()
def METHOD_NAME():
unique_embeddings_tensor = flow.tensor(init_value, requires_grad=False).to(
"cuda"
)
if use_optional_tensor:
lr_tensor = flow.tensor(
np.array(learning_rate).reshape(1,).astype(np.float32)
).to("cuda")
down_scale_by_tensor = flow.tensor(
np.array((down_scale_by,)).astype(np.float32)
).to("cuda")
else:
# pass by attr
lr_tensor = None
down_scale_by_tensor = None
def train_one_iter(
ids,
unique_embeddings,
embedding_grad,
lr_tensor,
down_scale_by_tensor,
skip_if,
):
return graph(
ids,
unique_embeddings,
embedding_grad,
lr_tensor,
down_scale_by_tensor,
skip_if,
)
for i in range(train_iters):
np_ids = np.zeros(num_rows)
np_ids[0 : num_valid_seq[i]] = np.arange(num_valid_seq[i])
# add ids of num_valid unique to use id_shuffle out_put num_unique as grad input
ids = flow.tensor(np_ids.astype(np.int32)).to("cuda")
grad_tensor = flow.tensor(random_grad_seq[i]).to("cuda")
if use_optional_tensor:
skip_if_tensor = flow.tensor(
np.array(skip_if_seq[i]).reshape(1,).astype(np.int64)
).to("cuda")
else:
skip_if_tensor = None
updated_tensor = train_one_iter(
ids,
unique_embeddings_tensor,
grad_tensor,
lr_tensor,
down_scale_by_tensor,
skip_if_tensor,
)
unique_embeddings_tensor[0 : num_valid_seq[i]] = updated_tensor[
0 : num_valid_seq[i]
]
return unique_embeddings_tensor
def sgd_by_numpy():
x = init_value[:, 0:embedding_size]
vt = init_value[:, embedding_size:]
def train_one_iter(num_valid, grad, model, state):
grad[0:num_valid] = grad[0:num_valid] * (scale / down_scale_by)
next_state = (
(momentum * state[0:num_valid] + grad[0:num_valid])
if momentum > 0
else 0
)
if momentum > 0:
state[0:num_valid] = next_state
model[0:num_valid] = (
model[0:num_valid]
- learning_rate * next_state
- learning_rate * weight_decay * model[0:num_valid]
)
else:
state[0:num_valid] = 0
model[0:num_valid] = (
model[0:num_valid]
- learning_rate * grad[0:num_valid]
- learning_rate * weight_decay * model[0:num_valid]
)
return (model, state)
for i in range(train_iters):
if skip_if_seq[i] > 0 and use_optional_tensor:
pass
else:
(x, vt) = train_one_iter(
int(num_valid_seq[i]), random_grad_seq[i], x, vt
)
return x, vt
oneflow_res = METHOD_NAME().numpy()
of_model = oneflow_res[:, 0:embedding_size]
of_momentum = oneflow_res[:, embedding_size:]
np_model, np_momentum = sgd_by_numpy()
test_case.assertTrue(
np.allclose(of_model.flatten(), np_model.flatten(), rtol=0.001, atol=0.001)
)
if momentum > 0:
test_case.assertTrue(
np.allclose(
of_momentum.flatten(), np_momentum.flatten(), rtol=0.001, atol=0.001
)
)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n1d()
class TestOptimizers(flow.unittest.TestCase):
def test_one_embedding_sgd(test_case):
arg_dict = OrderedDict()
arg_dict["momentum"] = [0, 0.9]
arg_dict["weight_decay"] = [0, 0.1]
arg_dict["scale"] = [1, 0.1]
arg_dict["learning_rate"] = [1, 0.9]
arg_dict["train_iters"] = [10]
arg_dict["use_optional_tensor"] = [True, False]
for arg in GenArgDict(arg_dict):
compare_with_numpy_sgd(test_case, **arg)
if __name__ == "__main__":
unittest.main() |
6,711 | run test | #!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test addressindex generation and fetching
#
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
class SpentIndexTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-spentindex"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-spentindex"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-spentindex", "-txindex"]))
connect_nodes(self.nodes, 0, 1)
connect_nodes(self.nodes, 0, 2)
connect_nodes(self.nodes, 0, 3)
self.is_network_split = False
self.sync_all()
def METHOD_NAME(self):
print("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
# Check that
print("Testing spent index...")
privkey = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address = "ztUB6YWTcj2uUe5Rbucnc7oFevn7wCKyN63"
op_dup = "76"
op_hash160 = "a9"
op_push_20_bytes_onto_the_stack = "14"
addressHash = "0b2f0a0c31bfe0406b0ccc1381fdbe311946dadc"
op_equalverify = "88"
op_checksig = "ac"
genesisCbah = "20bb1acf2c1fc1228967a611c7db30632098f0c641855180b5fe23793b72eea50d00b4"
scriptPubKey = binascii.unhexlify(op_dup + op_hash160 + op_push_20_bytes_onto_the_stack + addressHash + op_equalverify + op_checksig + genesisCbah)
unspent = self.nodes[0].listunspent()
tx = CTransaction()
amount = to_satoshis(unspent[0]["amount"])
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
tx.vout = [CTxOut(amount, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
print("Testing getspentinfo method...")
# Check that the spentinfo works standalone
info = self.nodes[1].getspentinfo({"txid": unspent[0]["txid"], "index": unspent[0]["vout"]})
assert_equal(info["txid"], txid)
assert_equal(info["index"], 0)
assert_equal(info["height"], 106)
print("Testing getrawtransaction method...")
# Check that verbose raw transaction includes spent info
txVerbose = self.nodes[3].getrawtransaction(unspent[0]["txid"], 1)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentTxId"], txid)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentIndex"], 0)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentHeight"], 106)
# Check that verbose raw transaction includes input values
txVerbose2 = self.nodes[3].getrawtransaction(txid, 1)
assert_equal(txVerbose2["vin"][0]["value"], Decimal(unspent[0]["amount"]))
assert_equal(txVerbose2["vin"][0]["valueZat"], amount)
# Check that verbose raw transaction includes address values and input values
privkey2 = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address2 = "ztUB6YWTcj2uUe5Rbucnc7oFevn7wCKyN63"
addressHash2 = "0b2f0a0c31bfe0406b0ccc1381fdbe311946dadc"
scriptPubKey2 = binascii.unhexlify(op_dup + op_hash160 + op_push_20_bytes_onto_the_stack + addressHash2 + op_equalverify + op_checksig + genesisCbah)
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(int(txid, 16), 0))]
tx2.vout = [CTxOut(amount, scriptPubKey2)]
tx.rehash()
self.nodes[0].importprivkey(privkey)
signed_tx2 = self.nodes[0].signrawtransaction(binascii.hexlify(tx2.serialize()).decode("utf-8"))
txid2 = self.nodes[0].sendrawtransaction(signed_tx2["hex"], True)
# Check the mempool index
self.sync_all()
txVerbose3 = self.nodes[1].getrawtransaction(txid2, 1)
assert_equal(txVerbose3["vin"][0]["address"], address2)
assert_equal(txVerbose3["vin"][0]["value"], Decimal(unspent[0]["amount"]))
assert_equal(txVerbose3["vin"][0]["valueZat"], amount)
# Check the database index
block_hash = self.nodes[0].generate(1)
self.sync_all()
txVerbose4 = self.nodes[3].getrawtransaction(txid2, 1)
assert_equal(txVerbose4["vin"][0]["address"], address2)
assert_equal(txVerbose4["vin"][0]["value"], Decimal(unspent[0]["amount"]))
assert_equal(txVerbose4["vin"][0]["valueZat"], amount)
# Check block deltas
print("Testing getblockdeltas...")
block = self.nodes[3].getblockdeltas(block_hash[0])
assert_equal(len(block["deltas"]), 2)
assert_equal(block["deltas"][0]["index"], 0)
assert_equal(len(block["deltas"][0]["inputs"]), 0)
assert_equal(len(block["deltas"][0]["outputs"]), 4) # Miner, CF, SN, XN
assert_equal(block["deltas"][1]["index"], 1)
assert_equal(block["deltas"][1]["txid"], txid2)
assert_equal(block["deltas"][1]["inputs"][0]["index"], 0)
assert_equal(block["deltas"][1]["inputs"][0]["address"], "ztUB6YWTcj2uUe5Rbucnc7oFevn7wCKyN63")
assert_equal(block["deltas"][1]["inputs"][0]["satoshis"], amount * -1)
assert_equal(block["deltas"][1]["inputs"][0]["prevtxid"], txid)
assert_equal(block["deltas"][1]["inputs"][0]["prevout"], 0)
assert_equal(block["deltas"][1]["outputs"][0]["index"], 0)
assert_equal(block["deltas"][1]["outputs"][0]["address"], "ztUB6YWTcj2uUe5Rbucnc7oFevn7wCKyN63")
assert_equal(block["deltas"][1]["outputs"][0]["satoshis"], amount)
print("Passed\n")
if __name__ == '__main__':
SpentIndexTest().main() |
6,712 | patch params | """
Monkey patching of distutils.
"""
import sys
import distutils.filelist
import platform
import types
import functools
from importlib import import_module
import inspect
from setuptools.extern import six
import setuptools
__all__ = []
"""
Everything is private. Contact the project team
if you think you need this functionality.
"""
def _get_mro(cls):
"""
Returns the bases classes for cls sorted by the MRO.
Works around an issue on Jython where inspect.getmro will not return all
base classes if multiple classes share the same name. Instead, this
function will return a tuple containing the class itself, and the contents
of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024.
"""
if platform.python_implementation() == "Jython":
return (cls,) + cls.__bases__
return inspect.getmro(cls)
def get_unpatched(item):
lookup = (
get_unpatched_class if isinstance(item, six.class_types) else
get_unpatched_function if isinstance(item, types.FunctionType) else
lambda item: None
)
return lookup(item)
def get_unpatched_class(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
external_bases = (
cls
for cls in _get_mro(cls)
if not cls.__module__.startswith('setuptools')
)
base = next(external_bases)
if not base.__module__.startswith('distutils'):
msg = "distutils has already been patched by %r" % cls
raise AssertionError(msg)
return base
def patch_all():
# we can't patch distutils.cmd, alas
distutils.core.Command = setuptools.Command
has_issue_12885 = sys.version_info <= (3, 5, 3)
if has_issue_12885:
# fix findall bug in distutils (http://bugs.python.org/issue12885)
distutils.filelist.findall = setuptools.findall
needs_warehouse = (
sys.version_info < (2, 7, 13)
or
(3, 4) < sys.version_info < (3, 4, 6)
or
(3, 5) < sys.version_info <= (3, 5, 3)
)
if needs_warehouse:
warehouse = 'https://upload.pypi.org/legacy/'
distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse
_patch_distribution_metadata()
# Install Distribution throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = setuptools.dist.Distribution
# Install the patched Extension
distutils.core.Extension = setuptools.extension.Extension
distutils.extension.Extension = setuptools.extension.Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = (
setuptools.extension.Extension
)
patch_for_msvc_specialized_compiler()
def _patch_distribution_metadata():
"""Patch write_pkg_file and read_pkg_file for higher metadata standards"""
for attr in ('write_pkg_file', 'read_pkg_file', 'get_metadata_version'):
new_val = getattr(setuptools.dist, attr)
setattr(distutils.dist.DistributionMetadata, attr, new_val)
def patch_func(replacement, target_mod, func_name):
"""
Patch func_name in target_mod with replacement
Important - original must be resolved by name to avoid
patching an already patched function.
"""
original = getattr(target_mod, func_name)
# set the 'unpatched' attribute on the replacement to
# point to the original.
vars(replacement).setdefault('unpatched', original)
# replace the function in the original module
setattr(target_mod, func_name, replacement)
def get_unpatched_function(candidate):
return getattr(candidate, 'unpatched')
def patch_for_msvc_specialized_compiler():
"""
Patch functions in distutils to use standalone Microsoft Visual C++
compilers.
"""
# import late to avoid circular imports on Python < 3.5
msvc = import_module('setuptools.msvc')
if platform.system() != 'Windows':
# Compilers only availables on Microsoft Windows
return
def METHOD_NAME(mod_name, func_name):
"""
Prepare the parameters for patch_func to patch indicated function.
"""
repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_'
repl_name = repl_prefix + func_name.lstrip('_')
repl = getattr(msvc, repl_name)
mod = import_module(mod_name)
if not hasattr(mod, func_name):
raise ImportError(func_name)
return repl, mod, func_name
# Python 2.7 to 3.4
msvc9 = functools.partial(METHOD_NAME, 'distutils.msvc9compiler')
# Python 3.5+
msvc14 = functools.partial(METHOD_NAME, 'distutils._msvccompiler')
try:
# Patch distutils.msvc9compiler
patch_func(*msvc9('find_vcvarsall'))
patch_func(*msvc9('query_vcvarsall'))
except ImportError:
pass
try:
# Patch distutils._msvccompiler._get_vc_env
patch_func(*msvc14('_get_vc_env'))
except ImportError:
pass
try:
# Patch distutils._msvccompiler.gen_lib_options for Numpy
patch_func(*msvc14('gen_lib_options'))
except ImportError:
pass |
6,713 | test execute fails if command not executed | from ipaddress import IPv4Address
from pathlib import PureWindowsPath
from typing import List
from unittest.mock import MagicMock
import pytest
from agent_plugins.exploiters.wmi.src.smb_client import ShareInfo, SMBClient
from agent_plugins.exploiters.wmi.src.smb_options import SMBOptions
from agent_plugins.exploiters.wmi.src.smb_remote_access_client import (
COPY_FILE_TAGS,
EXECUTION_TAGS,
LOGIN_TAGS,
SHARE_DISCOVERY_TAGS,
SMBRemoteAccessClient,
)
from tests.data_for_tests.propagation_credentials import FULL_CREDENTIALS
from common import OperatingSystem
from common.credentials import Credentials
from infection_monkey.exploit import IAgentBinaryRepository
from infection_monkey.exploit.tools import (
RemoteAuthenticationError,
RemoteCommandExecutionError,
RemoteFileCopyError,
)
from infection_monkey.i_puppet import TargetHost
EXPLOITER_TAGS = {"smb-exploiter", "unit-test"}
CREDENTIALS: List[Credentials] = []
DESTINATION_PATH = PureWindowsPath("C:\\destination_path")
FILE = b"file content"
SHARED_RESOURECES = (
ShareInfo("share1", PureWindowsPath("C:\\path1"), current_uses=10, max_uses=1000),
ShareInfo("share2", PureWindowsPath("C:\\path2"), current_uses=100, max_uses=100),
ShareInfo("share3", PureWindowsPath("C:\\"), current_uses=0, max_uses=10),
ShareInfo("share4", PureWindowsPath("invalid_path"), current_uses=50, max_uses=100),
)
TARGET_HOST = TargetHost(ip=IPv4Address("1.1.1.1"), operating_system=OperatingSystem.WINDOWS)
def stub_command_builder(*args, **kwargs):
return "command"
@pytest.fixture
def mock_smb_client():
client = MagicMock(spec=SMBClient)
client.connected.return_value = False
def set_connected(value: bool):
client.connected.return_value = value
client.connect_with_user.side_effect = lambda *_, **__: set_connected(True)
return client
@pytest.fixture
def mock_agent_binary_repository() -> IAgentBinaryRepository:
return MagicMock(spec=IAgentBinaryRepository)
@pytest.fixture
def smb_remote_access_client(mock_smb_client) -> SMBRemoteAccessClient:
return SMBRemoteAccessClient(TARGET_HOST, SMBOptions(), stub_command_builder, mock_smb_client)
def test_login__succeeds(
smb_remote_access_client: SMBRemoteAccessClient,
):
tags = EXPLOITER_TAGS.copy()
smb_remote_access_client.login(FULL_CREDENTIALS[0], tags)
assert tags == EXPLOITER_TAGS.union(LOGIN_TAGS)
def test_login__fails(
mock_smb_client: SMBClient,
smb_remote_access_client: SMBRemoteAccessClient,
):
tags = EXPLOITER_TAGS.copy()
mock_smb_client.connect_with_user.side_effect = Exception()
with pytest.raises(RemoteAuthenticationError):
smb_remote_access_client.login(FULL_CREDENTIALS[0], tags)
assert tags == EXPLOITER_TAGS.union(LOGIN_TAGS)
def test_execute__fails_if_not_authenticated(
smb_remote_access_client: SMBRemoteAccessClient,
):
tags = EXPLOITER_TAGS.copy()
with pytest.raises(RemoteCommandExecutionError):
smb_remote_access_client.execute_agent(DESTINATION_PATH, tags)
assert tags == EXPLOITER_TAGS
def METHOD_NAME(
mock_smb_client: SMBClient,
smb_remote_access_client: SMBRemoteAccessClient,
):
tags = EXPLOITER_TAGS.copy()
mock_smb_client.run_service.side_effect = Exception("file")
smb_remote_access_client.login(FULL_CREDENTIALS[0], set())
with pytest.raises(RemoteCommandExecutionError):
smb_remote_access_client.execute_agent(DESTINATION_PATH, tags)
assert tags == EXPLOITER_TAGS.union(EXECUTION_TAGS)
def test_execute__succeeds(
mock_smb_client: SMBClient,
smb_remote_access_client: SMBRemoteAccessClient,
):
tags = EXPLOITER_TAGS.copy()
smb_remote_access_client.login(FULL_CREDENTIALS[0], set())
smb_remote_access_client.execute_agent(DESTINATION_PATH, tags)
assert tags == EXPLOITER_TAGS.union(EXECUTION_TAGS)
def test_copy_file__fails_if_not_authenticated(
mock_smb_client: SMBClient,
smb_remote_access_client: SMBRemoteAccessClient,
):
tags = EXPLOITER_TAGS.copy()
mock_smb_client.connected.return_value = False
with pytest.raises(RemoteFileCopyError):
smb_remote_access_client.copy_file(FILE, DESTINATION_PATH, tags)
assert tags == EXPLOITER_TAGS
def test_copy_file__fails_if_no_shares_found(
mock_smb_client: SMBClient,
smb_remote_access_client: SMBRemoteAccessClient,
):
tags = EXPLOITER_TAGS.copy()
mock_smb_client.query_shared_resources.return_value = ()
smb_remote_access_client.login(FULL_CREDENTIALS[0], set())
with pytest.raises(RemoteFileCopyError):
smb_remote_access_client.copy_file(FILE, DESTINATION_PATH, tags)
assert tags == EXPLOITER_TAGS.union(SHARE_DISCOVERY_TAGS)
def test_copy_file__fails_if_unable_to_connect_to_share(
mock_smb_client: SMBClient,
smb_remote_access_client: SMBRemoteAccessClient,
):
tags = EXPLOITER_TAGS.copy()
mock_smb_client.query_shared_resources.return_value = SHARED_RESOURECES
mock_smb_client.connect_to_share.side_effect = Exception("failed")
smb_remote_access_client.login(FULL_CREDENTIALS[0], set())
with pytest.raises(RemoteFileCopyError):
smb_remote_access_client.copy_file(FILE, DESTINATION_PATH, tags)
assert tags == EXPLOITER_TAGS.union(SHARE_DISCOVERY_TAGS)
def test_copy_file__fails_if_unable_to_send_file(
mock_smb_client: SMBClient,
smb_remote_access_client: SMBRemoteAccessClient,
):
tags = EXPLOITER_TAGS.copy()
mock_smb_client.query_shared_resources.return_value = SHARED_RESOURECES
mock_smb_client.send_file.side_effect = Exception("file")
smb_remote_access_client.login(FULL_CREDENTIALS[0], set())
with pytest.raises(RemoteFileCopyError):
smb_remote_access_client.copy_file(FILE, DESTINATION_PATH, tags)
assert tags == EXPLOITER_TAGS.union(SHARE_DISCOVERY_TAGS, COPY_FILE_TAGS)
def test_copy_file__success(
mock_smb_client: SMBClient,
smb_remote_access_client: SMBRemoteAccessClient,
):
tags = EXPLOITER_TAGS.copy()
mock_smb_client.query_shared_resources.return_value = SHARED_RESOURECES
smb_remote_access_client.login(FULL_CREDENTIALS[0], set())
smb_remote_access_client.copy_file(FILE, DESTINATION_PATH, tags)
assert tags == EXPLOITER_TAGS.union(SHARE_DISCOVERY_TAGS, COPY_FILE_TAGS)
def test_get_writable_paths(
mock_smb_client: SMBClient, smb_remote_access_client: SMBRemoteAccessClient
):
mock_smb_client.query_shared_resources.return_value = SHARED_RESOURECES
writable_paths = smb_remote_access_client.get_writable_paths()
assert len(writable_paths) == 2
assert SHARED_RESOURECES[0].path in writable_paths
assert SHARED_RESOURECES[2].path in writable_paths |
6,714 | test nan rg b to ihls | """Defines unit tests for :mod:`colour.models.rgb.hanbury2003` module."""
import numpy as np
import unittest
from itertools import product
from colour.models.rgb import RGB_to_IHLS, IHLS_to_RGB
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"TestRGB_to_IHLS",
"TestIHLS_to_RGB",
]
class TestRGB_to_IHLS(unittest.TestCase):
"""
Define :func:`colour.models.rgb.hanbury2003.RGB_to_IHLS` definition unit
tests methods.
"""
def test_RGB_to_IHLS(self):
"""Test :func:`colour.models.rgb.hanbury2003.RGB_to_IHLS` definition."""
np.testing.assert_array_almost_equal(
RGB_to_IHLS(np.array([0.45620519, 0.03081071, 0.04091952])),
np.array([6.26236117, 0.12197943, 0.42539448]),
decimal=7,
)
np.testing.assert_array_almost_equal(
RGB_to_IHLS(np.array([0.00000000, 0.00000000, 0.00000000])),
np.array([0.00000000, 0.00000000, 0.00000000]),
decimal=7,
)
np.testing.assert_array_almost_equal(
RGB_to_IHLS(np.array([1.00000000, 1.00000000, 1.00000000])),
np.array([0.00000000, 1.00000000, 0.00000000]),
decimal=7,
)
def test_n_dimensional_RGB_to_IHLS(self):
"""
Test :func:`colour.models.rgb.hanbury2003.RGB_to_IHLS` definition
n-dimensional arrays support.
"""
RGB = np.array([0.45620519, 0.03081071, 0.04091952])
HYS = RGB_to_IHLS(RGB)
RGB = np.tile(RGB, (6, 1))
HYS = np.tile(HYS, (6, 1))
np.testing.assert_array_almost_equal(RGB_to_IHLS(RGB), HYS, decimal=7)
RGB = np.reshape(RGB, (2, 3, 3))
HYS = np.reshape(HYS, (2, 3, 3))
np.testing.assert_array_almost_equal(RGB_to_IHLS(RGB), HYS, decimal=7)
def test_domain_range_scale_RGB_to_IHLS(self):
"""
Test :func:`colour.models.rgb.hanbury2003.RGB_to_IHLS` definition
domain and range scale support.
"""
RGB = np.array([0.45620519, 0.03081071, 0.04091952])
HYS = RGB_to_IHLS(RGB)
d_r = (("reference", 1), ("1", 1), ("100", 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_array_almost_equal(
RGB_to_IHLS(RGB * factor), HYS * factor, decimal=7
)
@ignore_numpy_errors
def METHOD_NAME(self):
"""
Test :func:`colour.models.rgb.hanbury2003.RGB_to_IHLS` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = np.array(list(set(product(cases, repeat=3))))
RGB_to_IHLS(cases)
class TestIHLS_to_RGB(unittest.TestCase):
"""
Define :func:`colour.models.rgb.hanbury2003.RGB_to_IHLS` definition unit
tests methods.
"""
def test_IHLS_to_RGB(self):
"""Test :func:`colour.models.rgb.hanbury2003.IHLS_to_RGB` definition."""
np.testing.assert_array_almost_equal(
IHLS_to_RGB(np.array([6.26236117, 0.12197943, 0.42539448])),
np.array([0.45620519, 0.03081071, 0.04091952]),
decimal=7,
)
np.testing.assert_array_almost_equal(
IHLS_to_RGB(np.array([0.00000000, 0.00000000, 0.00000000])),
np.array([0.00000000, 0.00000000, 0.00000000]),
decimal=7,
)
np.testing.assert_array_almost_equal(
IHLS_to_RGB(np.array([0.00000000, 1.00000000, 0.00000000])),
np.array([1.00000000, 1.00000000, 1.00000000]),
decimal=7,
)
def test_n_dimensional_IHLS_to_RGB(self):
"""
Test :func:`colour.models.rgb.hanbury2003.IHLS_to_RGB` definition
n-dimensional arrays support.
"""
HYS = np.array([6.26236117, 0.12197943, 0.42539448])
RGB = IHLS_to_RGB(HYS)
HYS = np.tile(HYS, (6, 1))
RGB = np.tile(RGB, (6, 1))
np.testing.assert_array_almost_equal(IHLS_to_RGB(HYS), RGB, decimal=7)
HYS = np.reshape(HYS, (2, 3, 3))
RGB = np.reshape(RGB, (2, 3, 3))
np.testing.assert_array_almost_equal(IHLS_to_RGB(HYS), RGB, decimal=7)
def test_domain_range_scale_IHLS_to_RGB(self):
"""
Test :func:`colour.models.rgb.hanbury2003.IHLS_to_RGB` definition
domain and range scale support.
"""
HYS = np.array([6.26236117, 0.12197943, 0.42539448])
RGB = IHLS_to_RGB(HYS)
d_r = (("reference", 1), ("1", 1), ("100", 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_array_almost_equal(
IHLS_to_RGB(HYS * factor), RGB * factor, decimal=7
)
@ignore_numpy_errors
def test_nan_IHLS_to_RGB(self):
"""
Test :func:`colour.models.rgb.hanbury2003.IHLS_to_RGB` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = np.array(list(set(product(cases, repeat=3))))
IHLS_to_RGB(cases)
if __name__ == "__main__":
unittest.main() |
6,715 | get next states | # Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that Mean Field Games are implemented properly.
These tests are intended to help developers to write mean field games that
satisfy most of the unspecified constraints assumed by the following algorithms:
- python/mfg/algorithms/policy_value.py
- python/mfg/algorithms/nash_conv.py
- python/mfg/algorithms/mirror_descent.py
- python/mfg/algorithms/fictitious_play.py
- python/mfg/algorithms/distribution.py
- python/mfg/algorithms/best_response_value.py
- python/rl_environment.py
These tests are not exhaustive and will be updated with time.
"""
import random
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import get_all_states
from open_spiel.python.mfg import games as mfg_games # pylint:disable=unused-import
from open_spiel.python.mfg.algorithms import distribution
import pyspiel
FLAGS = flags.FLAGS
# Use a small depth limit to keep the length of the test reasonable.
flags.DEFINE_integer(
'get_all_states_depth_limit', 10,
'Depth limit of getting all the states (-1 for unlimited)')
flags.DEFINE_integer('rl_env_simulations', 10,
'Number of simulations for the RL environment tests')
def METHOD_NAME(state, next_states, to_string):
"""Extract non-chance states for a subgame into the all_states dict."""
is_mean_field = state.current_player() == pyspiel.PlayerId.MEAN_FIELD
if state.is_chance_node():
# Add only if not already present
for action, _ in state.chance_outcomes():
next_state = state.child(action)
state_str = to_string(next_state)
if state_str not in next_states:
next_states[state_str] = next_state
if is_mean_field:
support = state.distribution_support()
next_state = state.clone()
support_length = len(support)
# update with a dummy distribution
next_state.update_distribution(
[1.0 / support_length for _ in range(support_length)])
state_str = to_string(next_state)
if state_str not in next_states:
next_states[state_str] = next_state
if int(state.current_player()) >= 0:
for action in state.legal_actions():
next_state = state.child(action)
state_str = to_string(next_state)
if state_str not in next_states:
next_states[state_str] = next_state
def _next_states(states, to_string):
next_states = {}
for state in states:
METHOD_NAME(state, next_states, to_string)
return set(next_states.keys()), set(next_states.values())
def type_from_states(states):
"""Get node type of a list of states and assert they are the same."""
types = [state.get_type() for state in states]
assert len(set(types)) == 1
return types[0]
class FiniteHorizonTest(parameterized.TestCase):
@parameterized.parameters(
{'game_name': 'python_mfg_crowd_modelling'},
{'game_name': 'mfg_crowd_modelling'},
{'game_name': 'mfg_garnet'},
{'game_name': 'mfg_crowd_modelling_2d'},
{'game_name': 'python_mfg_periodic_aversion'},
{'game_name': 'python_mfg_predator_prey'},
)
def test_is_finite_horizon(self, game_name):
"""Check that the game has no loop."""
game = pyspiel.load_game(game_name)
states = set(game.new_initial_states())
def to_string(s):
return s.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID)
all_states_key = set(to_string(state) for state in states)
while type_from_states(states) != pyspiel.StateType.TERMINAL:
new_states_key, states = _next_states(states, to_string)
self.assertEmpty(all_states_key.intersection(new_states_key))
all_states_key.update(new_states_key)
@parameterized.parameters(
{'game_name': 'python_mfg_crowd_modelling'},
{'game_name': 'mfg_crowd_modelling'},
{'game_name': 'mfg_garnet'},
{'game_name': 'mfg_crowd_modelling_2d'},
{'game_name': 'python_mfg_periodic_aversion'},
{'game_name': 'python_mfg_predator_prey'},
)
def test_has_at_least_an_action(self, game_name):
"""Check that all population's state have at least one action."""
game = pyspiel.load_game(game_name)
def to_string(s):
return s.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID)
states = get_all_states.get_all_states(
game,
depth_limit=FLAGS.get_all_states_depth_limit,
include_terminals=False,
include_chance_states=False,
include_mean_field_states=False,
to_string=to_string)
for state in states.values():
self.assertNotEmpty(state.legal_actions())
@parameterized.parameters(
{'game_name': 'python_mfg_crowd_modelling'},
{'game_name': 'mfg_crowd_modelling'},
{'game_name': 'mfg_garnet'},
{'game_name': 'mfg_crowd_modelling_2d'},
{'game_name': 'python_mfg_periodic_aversion'},
{'game_name': 'python_mfg_predator_prey'},
)
def test_rl_environment(self, game_name):
"""Check that the RL environment runs for a few trajectories."""
game = pyspiel.load_game(game_name)
uniform_policy = policy.UniformRandomPolicy(game)
mfg_dist = distribution.DistributionPolicy(game, uniform_policy)
envs = [
rl_environment.Environment(
game, mfg_distribution=mfg_dist, mfg_population=p)
for p in range(game.num_players())
]
for p, env in enumerate(envs):
for _ in range(FLAGS.rl_env_simulations):
time_step = env.reset()
while not time_step.last():
a = random.choice(time_step.observations['legal_actions'][p])
time_step = env.step([a])
env = envs[0]
self.assertEqual(env.mfg_distribution, mfg_dist)
# Update the distribution.
new_mfg_dist = distribution.DistributionPolicy(game, uniform_policy)
env.update_mfg_distribution(new_mfg_dist)
self.assertEqual(env.mfg_distribution, new_mfg_dist)
if __name__ == '__main__':
absltest.main() |
6,716 | create project join request notification | import re
from organization.models.organization_project_published import OrgProjectPublished
from organization.models.members import MembershipRequests
from organization.utility.email import (
send_join_project_request_email,
send_mention_email,
send_org_project_published_email,
send_project_like_email,
)
from climateconnect_api.models import UserProfile
from climateconnect_api.models.notification import (
Notification,
)
from climateconnect_api.utility.notification import (
create_user_notification,
send_comment_notification,
send_out_live_notification,
create_follower_notification,
)
from django.contrib.auth.models import User
from organization.models import ProjectMember
from organization.models.content import ProjectComment
def create_project_comment_reply_notification(
project, comment, sender, user_url_slugs_to_ignore
):
notification = send_comment_notification(
is_reply=True,
notification_type=Notification.REPLY_TO_PROJECT_COMMENT,
comment=comment,
sender=sender,
user_url_slugs_to_ignore=user_url_slugs_to_ignore,
comment_model=ProjectComment,
comment_object_name="project_comment",
object_commented_on=project,
)
return notification
def create_project_comment_notification(
project, comment, sender, user_url_slugs_to_ignore
):
notification = send_comment_notification(
is_reply=False,
notification_type=Notification.PROJECT_COMMENT,
comment=comment,
sender=sender,
user_url_slugs_to_ignore=user_url_slugs_to_ignore,
comment_model=ProjectComment,
comment_object_name="project_comment",
object_commented_on=project,
)
return notification
def get_mentions(text, url_slugs_only):
r = re.compile("(@@@__(?P<url_slug>[^\^]*)\^\^__(?P<display>[^\@]*)@@@\^\^\^)")
matches = re.findall(r, text)
if url_slugs_only:
return list(map((lambda m: m[1]), matches))
return matches
def create_comment_mention_notification(entity_type, entity, comment, sender):
if entity_type == "project":
notification = Notification.objects.create(
notification_type=Notification.MENTION, project_comment=comment
)
if entity_type == "idea":
notification = Notification.objects.create(
notification_type=Notification.MENTION, idea_comment=comment
)
matches = get_mentions(text=comment.content, url_slugs_only=False)
sender_url_slug = UserProfile.objects.get(user=sender).url_slug
for m in matches:
_, url_slug, _ = m[0], m[1], m[2]
if not url_slug == sender_url_slug:
user = UserProfile.objects.filter(url_slug=url_slug)[0].user
create_user_notification(user, notification)
send_out_live_notification(user.id)
send_mention_email(
user=user,
entity_type=entity_type,
entity=entity,
comment=comment.content,
sender=sender,
notification=notification,
)
return notification
def create_project_follower_notification(project_follower):
create_follower_notification(
notif_type_number=Notification.PROJECT_FOLLOWER,
follower=project_follower,
follower_entity=project_follower.project,
follower_user_id=project_follower.user.id,
)
def create_organization_follower_notification(organization_follower):
create_follower_notification(
notif_type_number=Notification.ORGANIZATION_FOLLOWER,
follower=organization_follower,
follower_entity=organization_follower.organization,
follower_user_id=organization_follower.user.id,
)
def create_organization_project_published_notification(
followers, organization, project
):
for follower in followers:
org_project_published = OrgProjectPublished.objects.create(
organization=organization, project=project, user=follower.user
)
notification = Notification.objects.create(
notification_type=Notification.ORG_PROJECT_PUBLISHED,
org_project_published=org_project_published,
)
create_user_notification(org_project_published.user, notification)
send_org_project_published_email(
org_project_published.user, org_project_published, notification
)
def METHOD_NAME(
requester, project_admins, project, request
):
"""
Creates a notification about a joining request from a requester to a project admin.
:param requester: UserProfile object of the user who's sent the request
:type requester: User
:param project_admin: Iterable UserProfile object of the project administrators
:type project_admin: List(UserProfile)
"""
requester_name = requester.first_name + " " + requester.last_name
notification = Notification.objects.create(
notification_type=Notification.JOIN_PROJECT_REQUEST,
text=f"{requester_name} wants to join your project {project.name}!",
membership_request=request,
)
for project_admin in project_admins:
create_user_notification(project_admin, notification)
send_join_project_request_email(project_admin, request, requester, notification)
return
def create_project_join_request_approval_notification(request_id):
"""
Creates a notification about an approved request to join a project to the requester.
:param request_id: Id of the request of the approved MembershipRequest
:type request_id: int
"""
request = MembershipRequests.objects.get(id=request_id)
notification = Notification.objects.create(
notification_type=Notification.PROJECT_JOIN_REQUEST_APPROVED,
membership_request=request,
)
create_user_notification(request.user, notification)
def create_project_like_notification(project_like):
notification = Notification.objects.create(
notification_type=Notification.PROJECT_LIKE, project_like=project_like
)
project_team = ProjectMember.objects.filter(project=project_like.project).values(
"user"
)
for member in project_team:
if not member["user"] == project_like.user.id:
user = User.objects.get(id=member["user"])
create_user_notification(user, notification)
send_project_like_email(user, project_like, notification) |
6,717 | shipyard buy | from datetime import datetime
from module.exception import ScriptError
from module.logger import logger
from module.shipyard.ui import ShipyardUI
from module.shop.shop_general import GeneralShop
from module.ui.page import page_main, page_shipyard
from module.config.utils import get_server_last_update
PRBP_BUY_PRIZE = {
(1, 2): 0,
(3, 4): 150,
(5, 6, 7): 300,
(8, 9, 10): 600,
(11, 12, 13, 14, 15): 1050,
}
DRBP_BUY_PRIZE = {
(1, 2): 0,
(3, 4, 5, 6): 600,
(7, 8, 9, 10): 1200,
(11, 12, 13, 14, 15): 3000,
}
class RewardShipyard(ShipyardUI, GeneralShop):
_shipyard_bp_rarity = 'PR'
def _shipyard_get_cost(self, amount, rarity=None):
"""
Args:
amount (int): Index of the blueprint to buy
rarity (str): 'DR', 'PR'
Returns:
int: Prize to buy
"""
if rarity is None:
rarity = self._shipyard_bp_rarity
if rarity == 'PR':
cost = [v for k, v in PRBP_BUY_PRIZE.items() if amount in k]
if len(cost):
return cost[0]
else:
return 1500
elif rarity == 'DR':
cost = [v for k, v in DRBP_BUY_PRIZE.items() if amount in k]
if len(cost):
return cost[0]
else:
return 6000
else:
raise ScriptError(f'Invalid rarity in _shipyard_get_cost: {rarity}')
def _shipyard_calculate(self, start, count, pay=False):
"""
Calculates the maximum number
of BPs based on current parameters
and _shop_gold_coins amount
Submits payment if 'pay' set to True
Args:
start (int): BUY_PRIZE key to resume at
count (int): Total remaining to buy
pay (bool): Finalize payment to _shop_gold_coins
Returns:
int, int
- BUY_PRIZE for next _shipyard_buy_calc
call
- Total capable of buying currently
"""
if start <= 0 or count <= 0:
return start, count
total = 0
i = start
for i in range(start, (start + count)):
cost = self._shipyard_get_cost(i)
if (total + cost) > self._currency:
if pay:
self._currency -= total
else:
logger.info(f'Can only buy up to {(i - start)} '
f'of the {count} BPs')
return i, i - start
total += cost
if pay:
self._currency -= total
else:
logger.info(f'Can buy all {count} BPs')
return i + 1, count
def _shipyard_buy_calc(self, start, count):
"""
Shorthand for _shipyard_calculate all information
is relevant
"""
return self._shipyard_calculate(start, count, pay=False)
def _shipyard_pay_calc(self, start, count):
"""
Shorthand for _shipyard_calculate partial
information is relevant but most importantly
finalize payment to _shop_gold_coins
"""
return self._shipyard_calculate(start, count, pay=True)
def METHOD_NAME(self, count):
"""
Buy up to the configured number of BPs
Supports buying in both DEV and FATE
Args:
count (int): Total to buy
"""
logger.hr('shipyard_buy')
prev = 1
start, count = self._shipyard_buy_calc(prev, count)
while count > 0:
if not self._shipyard_buy_enter() or \
self._shipyard_cannot_strengthen():
break
remain = self._shipyard_ensure_index(count)
if remain is None:
break
if self._shipyard_bp_rarity == 'DR':
self.config.ShipyardDr_LastRun = datetime.now().replace(microsecond=0)
else:
self.config.Shipyard_LastRun = datetime.now().replace(microsecond=0)
self._shipyard_buy_confirm('BP_BUY')
# Pay for actual amount bought based on 'remain'
# which also updates 'start' as a result
# Save into 'prev' for next _shipyard_pay_calc
start, _ = self._shipyard_pay_calc(prev, (count - remain))
prev = start
start, count = self._shipyard_buy_calc(start, remain)
def _shipyard_use(self, index):
"""
Spend all remaining extraneous BPs
Supports using BPs in both DEV and FATE
"""
logger.hr('shipyard_use')
count = self._shipyard_get_bp_count(index)
while count > 0:
if not self._shipyard_buy_enter() or \
self._shipyard_cannot_strengthen():
break
remain = self._shipyard_ensure_index(count)
if remain is None:
break
self._shipyard_buy_confirm('BP_USE')
count = self._shipyard_get_bp_count(index)
def shipyard_run(self, series, index, count):
"""
Runs shop browse operations
Args:
series (int): 1-4 inclusively, button location
index (int): 1-6 inclusively, button location
some series are restricted to 1-5
count (int): number to buy after use
Returns:
bool: If shop attempted to run
thereby transition to respective
pages. If no transition took place,
then did not run
"""
if count <= 0:
return False
# Gold difficult to Ocr in page_shipyard
# due to both text and number being
# right-aligned together
# Retrieve information from page_main instead
self.ui_ensure(page_main)
self.shop_currency()
self.ui_goto(page_shipyard)
if not self.shipyard_set_focus(series=series, index=index) \
or not self._shipyard_buy_enter() \
or self._shipyard_cannot_strengthen():
return True
self._shipyard_use(index=index)
self.METHOD_NAME(count=count)
return True
def run(self):
"""
Pages:
in: Any page
out: page_shipyard
"""
if self.config.Shipyard_BuyAmount <= 0 and self.config.ShipyardDr_BuyAmount <= 0:
self.config.Scheduler_Enable = False
self.config.task_stop()
logger.hr('Shipyard DR', level=1)
logger.attr('ShipyardDr_LastRun', self.config.ShipyardDr_LastRun)
if self.config.ShipyardDr_LastRun > get_server_last_update('04:00'):
logger.warning('Task Shipyard DR has already been run today, skip')
else:
self._shipyard_bp_rarity = 'DR'
self.shipyard_run(series=self.config.ShipyardDr_ResearchSeries,
index=self.config.ShipyardDr_ShipIndex,
count=self.config.ShipyardDr_BuyAmount)
logger.hr('Shipyard PR', level=1)
logger.attr('Shipyard_LastRun', self.config.Shipyard_LastRun)
if self.config.Shipyard_LastRun > get_server_last_update('04:00'):
logger.warning('Task Shipyard PR has already been run today, stop')
self.config.task_delay(server_update=True)
self.config.task_stop()
else:
self._shipyard_bp_rarity = 'PR'
self.shipyard_run(series=self.config.Shipyard_ResearchSeries,
index=self.config.Shipyard_ShipIndex,
count=self.config.Shipyard_BuyAmount)
self.config.task_delay(server_update=True) |
6,718 | test som | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright 2006 - 2021, Tomas Babej, Paul Beckingham, Federico Hernandez.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# https://www.opensource.org/licenses/mit-license.php
#
###############################################################################
import sys
import os
import unittest
# Ensure python finds the local simpletap module
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from basetest import Task, TestCase
class TestAbbreviation(TestCase):
def setUp(self):
self.t = Task()
self.t.config("abbreviation.minimum", "1")
self.t("add project:home priority:H hasattributes")
self.t("add noattributes")
def verify_attribute(self, expr):
code, out, err = self.t("list {0}".format(expr))
self.assertIn("hasattributes", out)
self.assertNotIn("noattributes", out)
def test_attribute_abbreviations(self):
"Test project attribute abbrevations"
self.verify_attribute("project:home")
self.verify_attribute("projec:home")
self.verify_attribute("proje:home")
self.verify_attribute("proj:home")
self.verify_attribute("pro:home")
def test_uda_abbreviations(self):
"Test uda attribute abbrevations"
# NOTE This will be a UDA when TW-1541 is closed, for now it is just
# one more attribute
self.verify_attribute("priority:H")
self.verify_attribute("priorit:H")
self.verify_attribute("priori:H")
self.verify_attribute("prior:H")
self.verify_attribute("prio:H")
self.verify_attribute("pri:H")
def verify_command(self, cmd):
code, out, err = self.t(cmd)
self.assertIn("MIT license", out)
def test_command_abbreviations(self):
"Test version command abbrevations"
self.verify_command("version")
self.verify_command("versio")
self.verify_command("versi")
self.verify_command("vers")
self.verify_command("ver")
self.verify_command("ve")
self.verify_command("v")
class TestBug1006(TestCase):
"""Bug with expansion of abbreviation "des" in task descriptions and annotations.
It happens for all the shortcuts for column attributes that are automatically
completed. This is because DOM elements are checked before standard words
when strings are tokenized.
"""
def setUp(self):
self.t = Task()
self.t.config("verbose", "affected")
def initial_tasks(self):
self.t("add des")
self.t("1 annotate des")
def test_completion_of_des_inactive(self):
"1006: Check that the completion is inactive in task descriptions"
self.initial_tasks()
code, out, err = self.t("1 info")
expected = "Description +des\n"
errormsg = "Attribute not completed in description"
self.assertRegex(out, expected, msg=errormsg)
notexpected = "description"
self.assertNotIn(notexpected, out, msg=errormsg)
def test_completion_as_expected(self):
"1006: Check that the completion works when needed"
self.initial_tasks()
code, out, err = self.t("des:des")
errormsg = "Task found using its description"
self.assertIn("1 task", out, msg=errormsg)
def test_accented_chars(self):
"1006: Check that é in entrée remains untouched"
self.t("add entrée interdite")
code, out, err = self.t("list interdite")
errormsg = "'entrée' left intact"
self.assertIn("entrée interdite", out, msg=errormsg)
class TestBug1687(TestCase):
def setUp(self):
"""Executed before each test in the class"""
self.t = Task()
def METHOD_NAME(self):
"""1687: The named date 'som' should take precedence over 'someday', for an exact match"""
self.t("rc.abbreviation.minimum=2 add one due:som")
code, out, err = self.t("_get 1.due.year")
self.assertNotEqual("9999\n", out)
self.t("rc.abbreviation.minimum=3 add two due:som")
code, out, err = self.t("_get 2.due.year")
self.assertNotEqual("9999\n", out)
self.t("rc.abbreviation.minimum=4 add three due:som")
code, out, err = self.t("_get 3.due.year")
self.assertNotEqual("9999\n", out)
self.t("rc.abbreviation.minimum=4 add three due:some")
code, out, err = self.t("_get 4.due.year")
self.assertEqual("9999\n", out)
if __name__ == "__main__":
from simpletap import TAPTestRunner
unittest.main(testRunner=TAPTestRunner())
# vim: ai sts=4 et sw=4 ft=python |
6,719 | write | # This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite.structure.io.npz"
__author__ = "Patrick Kunzmann"
__all__ = ["NpzFile"]
import numpy as np
from ...atoms import Atom, AtomArray, AtomArrayStack
from ...bonds import BondList
from ....file import File, is_binary
class NpzFile(File):
r"""
This class represents a NPZ file, the preferable format for
Biotite internal structure storage.
Internally the this class writes/reads all attribute arrays of an
:class:`AtomArray` or :class:`AtomArrayStack` using the *NumPy*
:func:`save()`/:func:`load()`
method. This format offers the fastest I/O operations and completely
preserves the content all atom annotation arrays.
Examples
--------
Load a \\*.npz file, modify the structure and save the new
structure into a new file:
>>> import os.path
>>> file = NpzFile.read(os.path.join(path_to_structures, "1l2y.npz"))
>>> array_stack = file.get_structure()
>>> array_stack_mod = rotate(array_stack, [1,2,3])
>>> file = NpzFile()
>>> file.set_structure(array_stack_mod)
>>> file.write(os.path.join(path_to_directory, "1l2y_mod.npz"))
"""
def __init__(self):
super().__init__()
self._data_dict = None
def __copy_fill__(self, clone):
super().__copy_fill__(clone)
if self._data_dict is not None:
for key, value in self._data_dict.items():
clone._data_dict[key] = np.copy(value)
@classmethod
def read(cls, file):
"""
Read a NPZ file.
Parameters
----------
file : file-like object or str
The file to be read.
Alternatively a file path can be supplied.
Returns
-------
file_object : NPZFile
The parsed file.
"""
npz_file = NpzFile()
# File name
if isinstance(file, str):
with open(file, "rb") as f:
npz_file._data_dict = dict(np.load(f, allow_pickle=False))
# File object
else:
if not is_binary(file):
raise TypeError("A file opened in 'binary' mode is required")
npz_file._data_dict = dict(np.load(file, allow_pickle=False))
return npz_file
def METHOD_NAME(self, file):
"""
Write a NPZ file.
Parameters
----------
file : file-like object or str
The file to be read.
Alternatively, a file path can be supplied.
"""
if isinstance(file, str):
with open(file, "wb") as f:
np.savez(f, **self._data_dict)
else:
if not is_binary(file):
raise TypeError("A file opened in 'binary' mode is required")
np.savez(file, **self._data_dict)
def get_structure(self):
"""
Get an :class:`AtomArray` or :class:`AtomArrayStack` from the
file.
If this method returns an array or stack depends on which type
of object was used when the file was written.
Returns
-------
array : AtomArray or AtomArrayStack
The array or stack contained in this file.
"""
if self._data_dict is None:
raise ValueError("The structure of this file "
"has not been loaded or set yet")
coord = self._data_dict["coord"]
# The type of the structure is determined by the dimensionality
# of the 'coord' field
if len(coord.shape) == 3:
array = AtomArrayStack(coord.shape[0], coord.shape[1])
else:
array = AtomArray(coord.shape[0])
for key, value in self._data_dict.items():
if key == "coord":
array.coord = value
elif key == "bonds":
array.bonds = BondList(array.array_length(), value)
elif key == "box":
array.box = value
else:
array.set_annotation(key, value)
return array
def set_structure(self, array):
"""
Set the :class:`AtomArray` or :class:`AtomArrayStack` for the
file.
Parameters
----------
array : AtomArray or AtomArrayStack
The array or stack to be saved into this file.
"""
self._data_dict = {}
self._data_dict["coord"] = np.copy(array.coord)
if array.bonds is not None:
self._data_dict["bonds"] = array.bonds.as_array()
if array.box is not None:
self._data_dict["box"] = np.copy(array.box)
for annot in array.get_annotation_categories():
self._data_dict[annot] = np.copy(array.get_annotation(annot) |
6,720 | generate licenses | #!/usr/bin/env python
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Script to generate libwebrtc.aar for distribution.
The script has to be run from the root src folder.
./tools_webrtc/android/build_aar.py
.aar-file is just a zip-archive containing the files of the library. The file
structure generated by this script looks like this:
- AndroidManifest.xml
- classes.jar
- libs/
- armeabi-v7a/
- libjingle_peerconnection_so.so
- x86/
- libjingle_peerconnection_so.so
"""
import argparse
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import zipfile
SCRIPT_DIR = os.path.dirname(os.path.realpath(sys.argv[0]))
SRC_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir))
DEFAULT_ARCHS = ['armeabi-v7a', 'arm64-v8a', 'x86', 'x86_64']
NEEDED_SO_FILES = ['libjingle_peerconnection_so.so']
JAR_FILE = 'lib.java/sdk/android/libwebrtc.jar'
MANIFEST_FILE = 'sdk/android/AndroidManifest.xml'
TARGETS = [
'sdk/android:libwebrtc',
'sdk/android:libjingle_peerconnection_so',
]
sys.path.append(os.path.join(SCRIPT_DIR, '..', 'libs'))
from generate_licenses import LicenseBuilder
sys.path.append(os.path.join(SRC_DIR, 'build'))
import find_depot_tools
def _ParseArgs():
parser = argparse.ArgumentParser(description='libwebrtc.aar generator.')
parser.add_argument('--build-dir',
help='Build dir. By default will create and use temporary dir.')
parser.add_argument('--output', default='libwebrtc.aar',
help='Output file of the script.')
parser.add_argument('--arch', default=DEFAULT_ARCHS, nargs='*',
help='Architectures to build. Defaults to %(default)s.')
parser.add_argument('--use-goma', action='store_true', default=False,
help='Use goma.')
parser.add_argument('--verbose', action='store_true', default=False,
help='Debug logging.')
parser.add_argument('--extra-gn-args', default=[], nargs='*',
help="""Additional GN arguments to be used during Ninja generation.
These are passed to gn inside `--args` switch and
applied after any other arguments and will
override any values defined by the script.
Example of building debug aar file:
build_aar.py --extra-gn-args='is_debug=true'""")
parser.add_argument('--extra-ninja-switches', default=[], nargs='*',
help="""Additional Ninja switches to be used during compilation.
These are applied after any other Ninja switches.
Example of enabling verbose Ninja output:
build_aar.py --extra-ninja-switches='-v'""")
parser.add_argument('--extra-gn-switches', default=[], nargs='*',
help="""Additional GN switches to be used during compilation.
These are applied after any other GN switches.
Example of enabling verbose GN output:
build_aar.py --extra-gn-switches='-v'""")
return parser.parse_args()
def _RunGN(args):
cmd = [sys.executable,
os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py')]
cmd.extend(args)
logging.debug('Running: %r', cmd)
subprocess.check_call(cmd)
def _RunNinja(output_directory, args):
cmd = [os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'ninja'),
'-C', output_directory]
cmd.extend(args)
logging.debug('Running: %r', cmd)
subprocess.check_call(cmd)
def _EncodeForGN(value):
"""Encodes value as a GN literal."""
if isinstance(value, str):
return '"' + value + '"'
elif isinstance(value, bool):
return repr(value).lower()
else:
return repr(value)
def _GetOutputDirectory(build_dir, arch):
"""Returns the GN output directory for the target architecture."""
return os.path.join(build_dir, arch)
def _GetTargetCpu(arch):
"""Returns target_cpu for the GN build with the given architecture."""
if arch in ['armeabi', 'armeabi-v7a']:
return 'arm'
elif arch == 'arm64-v8a':
return 'arm64'
elif arch == 'x86':
return 'x86'
elif arch == 'x86_64':
return 'x64'
else:
raise Exception('Unknown arch: ' + arch)
def _GetArmVersion(arch):
"""Returns arm_version for the GN build with the given architecture."""
if arch == 'armeabi':
return 6
elif arch == 'armeabi-v7a':
return 7
elif arch in ['arm64-v8a', 'x86', 'x86_64']:
return None
else:
raise Exception('Unknown arch: ' + arch)
def Build(build_dir, arch, use_goma, extra_gn_args, extra_gn_switches,
extra_ninja_switches):
"""Generates target architecture using GN and builds it using ninja."""
logging.info('Building: %s', arch)
output_directory = _GetOutputDirectory(build_dir, arch)
gn_args = {
'target_os': 'android',
'is_debug': False,
'is_component_build': False,
'rtc_include_tests': False,
'target_cpu': _GetTargetCpu(arch),
'use_goma': use_goma
}
arm_version = _GetArmVersion(arch)
if arm_version:
gn_args['arm_version'] = arm_version
gn_args_str = '--args=' + ' '.join([
k + '=' + _EncodeForGN(v) for k, v in gn_args.items()] + extra_gn_args)
gn_args_list = ['gen', output_directory, gn_args_str]
gn_args_list.extend(extra_gn_switches)
_RunGN(gn_args_list)
ninja_args = TARGETS[:]
if use_goma:
ninja_args.extend(['-j', '200'])
ninja_args.extend(extra_ninja_switches)
_RunNinja(output_directory, ninja_args)
def CollectCommon(aar_file, build_dir, arch):
"""Collects architecture independent files into the .aar-archive."""
logging.info('Collecting common files.')
output_directory = _GetOutputDirectory(build_dir, arch)
aar_file.write(MANIFEST_FILE, 'AndroidManifest.xml')
aar_file.write(os.path.join(output_directory, JAR_FILE), 'classes.jar')
def Collect(aar_file, build_dir, arch):
"""Collects architecture specific files into the .aar-archive."""
logging.info('Collecting: %s', arch)
output_directory = _GetOutputDirectory(build_dir, arch)
abi_dir = os.path.join('jni', arch)
for so_file in NEEDED_SO_FILES:
aar_file.write(os.path.join(output_directory, so_file),
os.path.join(abi_dir, so_file))
def METHOD_NAME(output_dir, build_dir, archs):
builder = LicenseBuilder(
[_GetOutputDirectory(build_dir, arch) for arch in archs], TARGETS)
builder.GenerateLicenseText(output_dir)
def BuildAar(archs, output_file, use_goma=False, extra_gn_args=None,
ext_build_dir=None, extra_gn_switches=None,
extra_ninja_switches=None):
extra_gn_args = extra_gn_args or []
extra_gn_switches = extra_gn_switches or []
extra_ninja_switches = extra_ninja_switches or []
build_dir = ext_build_dir if ext_build_dir else tempfile.mkdtemp()
for arch in archs:
Build(build_dir, arch, use_goma, extra_gn_args, extra_gn_switches,
extra_ninja_switches)
with zipfile.ZipFile(output_file, 'w') as aar_file:
# Architecture doesn't matter here, arbitrarily using the first one.
CollectCommon(aar_file, build_dir, archs[0])
for arch in archs:
Collect(aar_file, build_dir, arch)
license_dir = os.path.dirname(os.path.realpath(output_file))
METHOD_NAME(license_dir, build_dir, archs)
if not ext_build_dir:
shutil.rmtree(build_dir, True)
def main():
args = _ParseArgs()
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
BuildAar(args.arch, args.output, args.use_goma, args.extra_gn_args,
args.build_dir, args.extra_gn_switches, args.extra_ninja_switches)
if __name__ == '__main__':
sys.exit(main()) |
6,721 | post | import os
import json
import time
import uuid
import tornado.web
import tornado.escape
from .basehandler import BaseHandler
from gramex.config import app_log
from gramex.http import BAD_REQUEST
# JSONHandler data is stored in store. Each handler is specified with a path.
# store[path] holds the full data for that handler. It is saved in path at the
# end of each request (if the data has changed.) The time data was last synced is
# stored in _loaded[path].
store = {} # Contents of the JSON data stores
_loaded = {} # Time when persistent stores were last loaded
_jsonstores = store # Internal legacy alias for store
class JSONHandler(BaseHandler):
@classmethod
def setup(cls, path: str = None, data: str = None, **kwargs):
'''
Provides a REST API for managing and persisting JSON data.
Sample URL configuration
```yaml
pattern: /$YAMLURL/data/(.*)
handler: JSONHandler
kwargs:
path: $YAMLPATH/data.json
```
Parameters:
path: optional file where the JSON data is persisted. If not
specified, the JSON data is not persisted.
data: optional initial dataset, used only if path is not
specified. Defaults to null
'''
super(JSONHandler, cls).setup(**kwargs)
cls.path = path
cls.default_data = data
cls.json_kwargs = {
'ensure_ascii': True,
'separators': (',', ':'),
}
def parse_body_as_json(self):
try:
return tornado.escape.json_decode(self.request.body)
except ValueError:
raise tornado.web.HTTPError(BAD_REQUEST, 'Bad JSON')
def jsonwalk(self, jsonpath, create=False):
'''Return a parent, key, value from the JSON store where parent[key] == value'''
# Load data from self.path JSON file if it's specified, exists, and newer than last load.
# Otherwise, load the default data provided.
if self.path:
path = self.path
_jsonstores.setdefault(path, None)
self.changed = False
if os.path.exists(path):
if _loaded.get(path, 0) <= os.stat(path).st_mtime:
# Don't use encoding when reading JSON. We're using ensure_ascii=True
# Besides, when handling Py2 & Py3, just ignoring encoding works best
with open(path, mode='r') as handle:
try:
_jsonstores[path] = json.load(handle)
_loaded[path] = time.time()
except ValueError:
app_log.warning(f'Invalid JSON in {path}')
self.changed = True
else:
self.changed = True
else:
path = self.name
_jsonstores.setdefault(path, self.default_data)
# Walk down the path and find the parent, key and data represented by jsonpath
parent, key, data = _jsonstores, path, _jsonstores[path]
if not jsonpath:
return parent, key, data
# Split jsonpath by / -- but escape "\/" (or "%5C/") as part of the keys
keys = [p.replace('\udfff', '/') for p in jsonpath.replace(r'\/', '\udfff').split('/')]
keys.insert(0, path)
for index, key in enumerate(keys[1:]):
if hasattr(data, '__contains__') and key in data:
parent, data = data, data[key]
continue
if isinstance(data, list) and key.isdigit():
key = int(key)
if key < len(data):
parent, data = data, data[key]
continue
if create:
if not hasattr(data, '__contains__'):
parent[keys[index]] = data = {}
data[key] = {}
parent, data = data, data[key]
continue
return parent, key, None
return parent, key, data
def initialize(self, **kwargs):
super(JSONHandler, self).initialize(**kwargs)
self.set_header('Content-Type', 'application/json')
def get(self, jsonpath):
'''Return the JSON data at jsonpath. Return null for invalid paths.'''
parent, key, data = self.jsonwalk(jsonpath, create=False)
self.write(json.dumps(data, **self.json_kwargs))
def METHOD_NAME(self, jsonpath):
'''Add data as a new unique key under jsonpath. Return {name: new_key}'''
parent, key, data = self.jsonwalk(jsonpath, create=True)
if self.request.body:
if data is None:
parent[key] = data = {}
new_key = str(uuid.uuid4())
data[new_key] = self.parse_body_as_json()
self.write(json.dumps({'name': new_key}, **self.json_kwargs))
self.changed = True
else:
self.write(json.dumps(None))
def put(self, jsonpath):
'''Set JSON data at jsonpath. Return the data provided'''
parent, key, data = self.jsonwalk(jsonpath, create=True)
if self.request.body:
data = parent[key] = self.parse_body_as_json()
self.write(json.dumps(data, **self.json_kwargs))
self.changed = True
else:
self.write(json.dumps(None))
def patch(self, jsonpath):
'''Update JSON data at jsonpath. Return the data provided'''
parent, key, data = self.jsonwalk(jsonpath)
if data is not None:
data = self.parse_body_as_json()
parent[key].update(data)
self.changed = True
self.write(json.dumps(data, **self.json_kwargs))
def delete(self, jsonpath):
'''Delete data at jsonpath. Return null'''
parent, key, data = self.jsonwalk(jsonpath)
if data is not None:
del parent[key]
self.changed = True
self.write('null')
def on_finish(self):
# Write data to disk if changed. on_finish is called after writing the
# data, so the client is not waiting for the response.
if self.path and getattr(self, 'changed', False):
folder = os.path.dirname(os.path.abspath(self.path))
if not os.path.exists(folder):
os.makedirs(folder)
# Don't use encoding when reading JSON. We use ensure_ascii=True.
# When handling Py2 & Py3, just ignoring encoding works best.
with open(self.path, mode='w') as handle:
json.dump(_jsonstores.get(self.path), handle, **self.json_kwargs)
_loaded[self.path] = time.time()
super(JSONHandler, self).on_finish() |
6,722 | get job iam policy output | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetJobIamPolicyResult',
'AwaitableGetJobIamPolicyResult',
'get_job_iam_policy',
'get_job_iam_policy_output',
]
@pulumi.output_type
class GetJobIamPolicyResult:
"""
A collection of values returned by getJobIamPolicy.
"""
def __init__(__self__, etag=None, id=None, location=None, name=None, policy_data=None, project=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if policy_data and not isinstance(policy_data, str):
raise TypeError("Expected argument 'policy_data' to be a str")
pulumi.set(__self__, "policy_data", policy_data)
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
pulumi.set(__self__, "project", project)
@property
@pulumi.getter
def etag(self) -> str:
"""
(Computed) The etag of the IAM policy.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> str:
"""
(Required only by `cloudrunv2.JobIamPolicy`) The policy data generated by
a `organizations_get_iam_policy` data source.
"""
return pulumi.get(self, "policy_data")
@property
@pulumi.getter
def project(self) -> str:
return pulumi.get(self, "project")
class AwaitableGetJobIamPolicyResult(GetJobIamPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetJobIamPolicyResult(
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
policy_data=self.policy_data,
project=self.project)
def get_job_iam_policy(location: Optional[str] = None,
name: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetJobIamPolicyResult:
"""
Retrieves the current IAM policy data for job
## example
```python
import pulumi
import pulumi_gcp as gcp
policy = gcp.cloudrunv2.get_job_iam_policy(project=google_cloud_run_v2_job["default"]["project"],
location=google_cloud_run_v2_job["default"]["location"],
name=google_cloud_run_v2_job["default"]["name"])
```
:param str location: The location of the cloud run job Used to find the parent resource to bind the IAM policy to
:param str name: Used to find the parent resource to bind the IAM policy to
:param str project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
__args__ = dict()
__args__['location'] = location
__args__['name'] = name
__args__['project'] = project
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('gcp:cloudrunv2/getJobIamPolicy:getJobIamPolicy', __args__, opts=opts, typ=GetJobIamPolicyResult).value
return AwaitableGetJobIamPolicyResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
policy_data=pulumi.get(__ret__, 'policy_data'),
project=pulumi.get(__ret__, 'project'))
@_utilities.lift_output_func(get_job_iam_policy)
def METHOD_NAME(location: Optional[pulumi.Input[Optional[str]]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetJobIamPolicyResult]:
"""
Retrieves the current IAM policy data for job
## example
```python
import pulumi
import pulumi_gcp as gcp
policy = gcp.cloudrunv2.get_job_iam_policy(project=google_cloud_run_v2_job["default"]["project"],
location=google_cloud_run_v2_job["default"]["location"],
name=google_cloud_run_v2_job["default"]["name"])
```
:param str location: The location of the cloud run job Used to find the parent resource to bind the IAM policy to
:param str name: Used to find the parent resource to bind the IAM policy to
:param str project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
... |
6,723 | test get status unreliable | import unittest
from klampt.sim import *
from klampt import vis
class robotsimTest(unittest.TestCase):
def setUp(self):
self.world = WorldModel()
def test_getStatus_unstable(self):
self.world.loadRobot('data/robots/swingup.rob')
self.assertEqual(self.world.numRobots(),1)
sim = SimpleSimulator(self.world)
robot = self.world.robot(0)
sim.controller(0).setPIDCommand(robot.getConfig(), robot.getVelocity())
for i in range(10):
sim.simulate(0.01)
self.assertEqual(Simulator.STATUS_NORMAL, sim.getStatus(), sim.getStatusString())
sim.controller(0).setPIDGains([0.0], [0.0], [0.0])
robot.setTorqueLimits([1e6])
robot.setVelocityLimits([1e18])
robot.setAccelerationLimits([1e18])
sim.controller(0).setTorque([1e6])
sim.simulate(0.01)
self.assertEqual(Simulator.STATUS_UNSTABLE, sim.getStatus(), '%f is the final velocity for the joint, should be infinite'%sim.getActualVelocity(0)[0])
def test_getStatus_status_is_matching(self):
self.world.loadTerrain('data/terrains/plane.off')
self.world.loadRobot('data/robots/pr2gripper.rob')
self.world.loadRigidObject('data/objects/sphere_5cm.obj')
self.assertEqual(self.world.numRobots(),1)
self.assertEqual(self.world.numTerrains(),1)
self.assertEqual(self.world.numRigidObjects(),1)
robot = self.world.robot(0)
sphere = self.world.rigidObject(0)
pt = robot.link(0).getParentTransform()
ct = sphere.getTransform()
sphere.setTransform(ct[0], [0.14, 0, 0.028])
robot.link(0).setParentTransform(pt[0], [.0, .0, 0.03])
robot.setConfig([0.0, 1.0491851842008302, 1.0491875315795012, -1.0491852558664032, 1.0491875316019472])
robot.setTorqueLimits([100, 100, 50, 100, 50])
sim = SimpleSimulator(self.world)
c = sim.controller(0)
c.setPIDCommand([1.0], [.0])
c.setPIDGains([15], [50], [0.1])
c.setPIDCommand([0], [0])
sim.simulate(0.1)
if sim.getStatus() == Simulator.STATUS_UNSTABLE:
self.assertEqual("unstable", sim.getStatusString())
else:
print "Warning, test test_getStatus_status_is_matching is useless as the simulation is now stable"
def test_getStatus_adaptive_time_stepping(self):
self.world.loadTerrain('data/terrains/plane.off')
self.world.loadRigidObject('data/objects/sphere_5cm.obj')
self.assertEqual(self.world.numRobots(),1)
self.assertEqual(self.world.numRigidObjects(),1)
sphere = self.world.rigidObject(0)
mass = sphere.getMass()
mass.setMass(100)
mass.setInertia([100,100,100])
sphere.setMass(mass)
#need to set up contact parameters so the mesh doesn't just fall through too quickly
cparams = sphere.getContactParameters()
cparams.kStiffness = 10000.0
cparams.kDamping = 2500.0
sphere.setContactParameters(cparams)
hadAdaptiveTimeStepping = False
ct = sphere.getTransform()
sphere.setTransform(ct[0], [0, 0, 0.0263])
sim = SimpleSimulator(self.world)
for i in range(20):
sim.simulate(0.01)
print "CURRENT STATUS",sim.getStatus()
if sim.getStatus() == Simulator.STATUS_ADAPTIVE_TIME_STEPPING:
hadAdaptiveTimeStepping = True
self.assertEqual(hadAdaptiveTimeStepping,True)
def METHOD_NAME(self):
self.world.loadTerrain('data/terrains/plane.off')
self.world.loadRigidObject('data/objects/sphere_5cm.obj')
self.assertEqual(self.world.numTerrains(),1)
self.assertEqual(self.world.numRigidObjects(),1)
sphere = self.world.rigidObject(0)
mass = sphere.getMass()
mass.setMass(1000)
sphere.setMass(mass)
ct = sphere.getTransform()
sphere.setTransform(ct[0], [0, 0, 0.0263])
sim = SimpleSimulator(self.world)
sim.simulate(0.1)
self.assertEqual(Simulator.STATUS_CONTACT_UNRELIABLE, sim.getStatus())
if __name__ == '__main__':
unittest.main( |
6,724 | test get build packages | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from textwrap import dedent
from snapcraft_legacy.plugins.v2.python import PythonPlugin
def test_schema():
assert PythonPlugin.get_schema() == {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"additionalProperties": False,
"properties": {
"constraints": {
"default": [],
"items": {"type": "string"},
"type": "array",
"uniqueItems": True,
},
"python-packages": {
"default": ["pip", "setuptools", "wheel"],
"items": {"type": "string"},
"type": "array",
"uniqueItems": True,
},
"requirements": {
"default": [],
"items": {"type": "string"},
"type": "array",
"uniqueItems": True,
},
},
}
def METHOD_NAME():
plugin = PythonPlugin(part_name="my-part", options=lambda: None)
assert plugin.get_build_packages() == {"findutils", "python3-venv", "python3-dev"}
def test_get_build_environment():
plugin = PythonPlugin(part_name="my-part", options=lambda: None)
assert plugin.get_build_environment() == {
"PATH": "${SNAPCRAFT_PART_INSTALL}/bin:${PATH}",
"SNAPCRAFT_PYTHON_INTERPRETER": "python3",
"SNAPCRAFT_PYTHON_VENV_ARGS": "",
}
_FIXUP_BUILD_COMMANDS = [
dedent(
"""\
find "${SNAPCRAFT_PART_INSTALL}" -type f -executable -print0 | xargs -0 \
sed -i "1 s|^#\\!${SNAPCRAFT_PYTHON_VENV_INTERP_PATH}.*$|#\\!/usr/bin/env ${SNAPCRAFT_PYTHON_INTERPRETER}|"
"""
),
dedent(
"""\
determine_link_target() {
opts_state="$(set +o +x | grep xtrace)"
interp_dir="$(dirname "${SNAPCRAFT_PYTHON_VENV_INTERP_PATH}")"
# Determine python based on PATH, then resolve it, e.g:
# (1) /home/ubuntu/.venv/snapcraft/bin/python3 -> /usr/bin/python3.8
# (2) /usr/bin/python3 -> /usr/bin/python3.8
# (3) /root/stage/python3 -> /root/stage/python3.8
# (4) /root/parts/<part>/install/usr/bin/python3 -> /root/parts/<part>/install/usr/bin/python3.8
python_path="$(which "${SNAPCRAFT_PYTHON_INTERPRETER}")"
python_path="$(readlink -e "${python_path}")"
for dir in "${SNAPCRAFT_PART_INSTALL}" "${SNAPCRAFT_STAGE}"; do
if echo "${python_path}" | grep -q "${dir}"; then
python_path="$(realpath --strip --relative-to="${interp_dir}" \\
"${python_path}")"
break
fi
done
echo "${python_path}"
eval "${opts_state}"
}
python_path="$(determine_link_target)"
ln -sf "${python_path}" "${SNAPCRAFT_PYTHON_VENV_INTERP_PATH}"
"""
),
]
def test_get_build_commands():
class Options:
constraints = list()
requirements = list()
python_packages = list()
plugin = PythonPlugin(part_name="my-part", options=Options())
assert (
plugin.get_build_commands()
== [
'"${SNAPCRAFT_PYTHON_INTERPRETER}" -m venv ${SNAPCRAFT_PYTHON_VENV_ARGS} "${SNAPCRAFT_PART_INSTALL}"',
'SNAPCRAFT_PYTHON_VENV_INTERP_PATH="${SNAPCRAFT_PART_INSTALL}/bin/${SNAPCRAFT_PYTHON_INTERPRETER}"',
"[ -f setup.py ] && pip install -U .",
]
+ _FIXUP_BUILD_COMMANDS
)
def test_get_build_commands_with_all_properties():
class Options:
constraints = ["constraints.txt"]
requirements = ["requirements.txt"]
python_packages = ["pip", "some-pkg; sys_platform != 'win32'"]
plugin = PythonPlugin(part_name="my-part", options=Options())
assert (
plugin.get_build_commands()
== [
'"${SNAPCRAFT_PYTHON_INTERPRETER}" -m venv ${SNAPCRAFT_PYTHON_VENV_ARGS} "${SNAPCRAFT_PART_INSTALL}"',
'SNAPCRAFT_PYTHON_VENV_INTERP_PATH="${SNAPCRAFT_PART_INSTALL}/bin/${SNAPCRAFT_PYTHON_INTERPRETER}"',
"pip install -c 'constraints.txt' -U pip 'some-pkg; sys_platform != '\"'\"'win32'\"'\"''",
"pip install -c 'constraints.txt' -U -r 'requirements.txt'",
"[ -f setup.py ] && pip install -c 'constraints.txt' -U .",
]
+ _FIXUP_BUILD_COMMANDS
) |
6,725 | get duplicates | #!/usr/bin/env python3
"""
Checks that all of the "catch_foo_all.hpp" headers include all subheaders.
The logic is simple: given a folder, e.g. `catch2/matchers`, then the
ccorresponding header is called `catch_matchers_all.hpp` and contains
* all headers in `catch2/matchers`,
* all headers in `catch2/matchers/{internal, detail}`,
* all convenience catch_matchers_*_all.hpp headers from any non-internal subfolders
The top level header is called `catch_all.hpp`.
"""
internal_dirs = ['detail', 'internal']
from scriptCommon import catchPath
from glob import glob
from pprint import pprint
import os
import re
def normalized_path(path):
"""Replaces \ in paths on Windows with /"""
return path.replace('\\', '/')
def normalized_paths(paths):
"""Replaces \ with / in every path"""
return [normalized_path(path) for path in paths]
source_path = catchPath + '/src/catch2'
source_path = normalized_path(source_path)
include_parser = re.compile(r'#include <(catch2/.+\.hpp)>')
errors_found = False
def headers_in_folder(folder):
return glob(folder + '/*.hpp')
def folders_in_folder(folder):
return [x for x in os.scandir(folder) if x.is_dir()]
def collated_includes(folder):
base = headers_in_folder(folder)
for subfolder in folders_in_folder(folder):
if subfolder.name in internal_dirs:
base.extend(headers_in_folder(subfolder.path))
else:
base.append(subfolder.path + '/catch_{}_all.hpp'.format(subfolder.name))
return normalized_paths(sorted(base))
def includes_from_file(header):
includes = []
with open(header, 'r', encoding = 'utf-8') as file:
for line in file:
if not line.startswith('#include'):
continue
match = include_parser.match(line)
if match:
includes.append(match.group(1))
return normalized_paths(includes)
def normalize_includes(includes):
"""Returns """
return [include[len(catchPath)+5:] for include in includes]
def METHOD_NAME(xs):
seen = set()
duplicated = []
for x in xs:
if x in seen:
duplicated.append(x)
seen.add(x)
return duplicated
def verify_convenience_header(folder):
"""
Performs the actual checking of convenience header for specific folder.
Checks that
1) The header even exists
2) That all includes in the header are sorted
3) That there are no duplicated includes
4) That all includes that should be in the header are actually present in the header
5) That there are no superfluous includes that should not be in the header
"""
global errors_found
path = normalized_path(folder.path)
assert path.startswith(source_path), '{} does not start with {}'.format(path, source_path)
stripped_path = path[len(source_path) + 1:]
path_pieces = stripped_path.split('/')
if path == source_path:
header_name = 'catch_all.hpp'
else:
header_name = 'catch_{}_all.hpp'.format('_'.join(path_pieces))
# 1) Does it exist?
full_path = path + '/' + header_name
if not os.path.isfile(full_path):
errors_found = True
print('Missing convenience header: {}'.format(full_path))
return
file_incs = includes_from_file(path + '/' + header_name)
# 2) Are the includes are sorted?
if sorted(file_incs) != file_incs:
errors_found = True
print("'{}': Includes are not in sorted order!".format(header_name))
# 3) Are there no duplicates?
duplicated = METHOD_NAME(file_incs)
for duplicate in duplicated:
errors_found = True
print("'{}': Duplicated include: '{}'".format(header_name, duplicate))
target_includes = normalize_includes(collated_includes(path))
# Avoid requiring the convenience header to include itself
target_includes = [x for x in target_includes if header_name not in x]
# 4) Are all required headers present?
file_incs_set = set(file_incs)
for include in target_includes:
if (include not in file_incs_set and
include != 'catch2/internal/catch_windows_h_proxy.hpp'):
errors_found = True
print("'{}': missing include '{}'".format(header_name, include))
# 5) Are there any superfluous headers?
desired_set = set(target_includes)
for include in file_incs:
if include not in desired_set:
errors_found = True
print("'{}': superfluous include '{}'".format(header_name, include))
def walk_source_folders(current):
verify_convenience_header(current)
for folder in folders_in_folder(current.path):
fname = folder.name
if fname not in internal_dirs:
walk_source_folders(folder)
# This is an ugly hack because we cannot instantiate DirEntry manually
base_dir = [x for x in os.scandir(catchPath + '/src') if x.name == 'catch2']
walk_source_folders(base_dir[0])
# Propagate error "code" upwards
if not errors_found:
print('Everything ok')
exit(errors_found) |
6,726 | test default valid multi line | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import pandas as pd
import numpy as np
from skbio.util import get_data_path, assert_data_frame_almost_equal
from skbio.io.format.blast6 import _blast6_to_data_frame
class TestBlast6Reader(unittest.TestCase):
def test_default_valid_single_line(self):
fp = get_data_path('blast6_default_single_line')
df = _blast6_to_data_frame(fp, default_columns=True)
exp = pd.DataFrame([['query1', 'subject2', 75.0, 8.0, 2.0, 0.0, 1.0,
8.0, 2.0, 9.0, 0.06, 11.5]],
columns=['qseqid', 'sseqid', 'pident', 'length',
'mismatch', 'gapopen', 'qstart', 'qend',
'sstart', 'send', 'evalue', 'bitscore'])
assert_data_frame_almost_equal(df, exp)
def METHOD_NAME(self):
fp = get_data_path('blast6_default_multi_line')
df = _blast6_to_data_frame(fp, default_columns=True)
exp = pd.DataFrame([['query1', 'subject2', 100.00, 8.0, 0.0, 0.0, 1.0,
8.0, 3.0, 10.0, 9e-05, 16.9],
['query1', 'subject2', 75.00, 8.0, 2.0, 0.0, 1.0,
8.0, 2.0, 9.0, 0.060, 11.5],
['query2', 'subject1', 71.43, 7.0, 2.0, 0.0, 1.0,
7.0, 1.0, 7.0, 0.044, 11.9]],
columns=['qseqid', 'sseqid', 'pident', 'length',
'mismatch', 'gapopen', 'qstart', 'qend',
'sstart', 'send', 'evalue', 'bitscore'])
assert_data_frame_almost_equal(df, exp)
def test_custom_valid_single_line(self):
fp = get_data_path('blast6_custom_single_line')
df = _blast6_to_data_frame(fp, columns=['qacc', 'qseq', 'btop',
'sframe', 'ppos',
'positive', 'gaps'])
exp = pd.DataFrame([['query1', 'PAAWWWWW', 8.0, 1.0, 100.00, 8.0,
0.0]], columns=['qacc', 'qseq', 'btop', 'sframe',
'ppos', 'positive', 'gaps'])
assert_data_frame_almost_equal(df, exp)
def test_custom_valid_multi_line(self):
fp = get_data_path('blast6_custom_multi_line')
df = _blast6_to_data_frame(fp, columns=['sacc', 'score', 'gapopen',
'qcovs', 'sblastnames',
'sallacc', 'qaccver'])
exp = pd.DataFrame([['subject2', 32.0, 0.0, 100.0, np.nan, 'subject2',
'query1'], ['subject2', 18.0, 0.0, 100.0, np.nan,
'subject2', 'query1'],
['subject1', 19.0, 0.0, 70.0, np.nan, 'subject1',
'query2']], columns=['sacc', 'score', 'gapopen',
'qcovs', 'sblastnames',
'sallacc', 'qaccver'])
exp['sblastnames'] = exp['sblastnames'].astype(object)
assert_data_frame_almost_equal(df, exp)
def test_valid_nan_handling(self):
fp = get_data_path('blast6_custom_mixed_nans')
df = _blast6_to_data_frame(fp, columns=['qacc', 'qseq', 'btop',
'sframe', 'ppos', 'positive',
'gaps'])
exp = pd.DataFrame([[np.nan, 'PAAWWWWW', 8.0, 1.0, 100.00, np.nan,
0.0], ['query1', np.nan, 8.0, 1.0, np.nan, 8.0,
0.0]], columns=['qacc', 'qseq', 'btop',
'sframe', 'ppos',
'positive', 'gaps'])
assert_data_frame_almost_equal(df, exp)
def test_valid_minimal(self):
fp = get_data_path('blast6_custom_minimal')
df = _blast6_to_data_frame(fp, columns=['sacc'])
exp = pd.DataFrame([['subject2']], columns=['sacc'])
assert_data_frame_almost_equal(df, exp)
def test_custom_and_default_passed_error(self):
fp = get_data_path('blast6_default_single_line')
with self.assertRaisesRegex(ValueError,
r"`columns` and `default_columns`"):
_blast6_to_data_frame(fp, columns=['qseqid'], default_columns=True)
def test_no_columns_passed_error(self):
fp = get_data_path('blast6_default_single_line')
with self.assertRaisesRegex(ValueError,
r"Either `columns` or `default_columns`"):
_blast6_to_data_frame(fp)
def test_wrong_amount_of_columns_error(self):
fp = get_data_path('blast6_invalid_number_of_columns')
with self.assertRaisesRegex(
ValueError, r"Specified number of columns \(12\).*\(10\)"):
_blast6_to_data_frame(fp, default_columns=True)
def test_different_data_in_same_column(self):
fp = get_data_path('blast6_invalid_type_in_column')
with self.assertRaises(ValueError):
_blast6_to_data_frame(fp, default_columns=True)
def test_wrong_column_name_error(self):
fp = get_data_path('blast6_default_single_line')
with self.assertRaisesRegex(ValueError,
r"Unrecognized column.*'abcd'"):
_blast6_to_data_frame(fp, columns=['qseqid', 'sseqid', 'pident',
'length', 'mismatch', 'gapopen',
'qstart', 'qend', 'sstart',
'send', 'abcd', 'bitscore'])
if __name__ == '__main__':
unittest.main() |
6,727 | net message name del | from _typeshed import Incomplete
from win32.lib.pywintypes import error as error
def NetGetJoinInformation() -> tuple[str, Incomplete]: ...
def NetGroupGetInfo(server: str, groupname: str, level): ...
def NetGroupGetUsers(
server: str, groupName: str, level, resumeHandle: int = ..., prefLen: int = ...
) -> tuple[Incomplete, Incomplete, Incomplete, Incomplete]: ...
def NetGroupSetUsers(server: str, group: str, level, members: tuple[Incomplete, Incomplete]) -> None: ...
def NetGroupSetInfo(server: str, groupname: str, level, data) -> None: ...
def NetGroupAdd(server: str, level, data) -> None: ...
def NetGroupAddUser(server: str, group: str, username: str) -> None: ...
def NetGroupDel(server: str, groupname: str) -> None: ...
def NetGroupDelUser(server: str, group: str, username: str) -> None: ...
def NetGroupEnum(server: str, level, prefLen, resumeHandle=...) -> tuple[Incomplete, Incomplete, Incomplete, Incomplete]: ...
def NetLocalGroupAddMembers(server: str, group: str, level, members: tuple[Incomplete, Incomplete]) -> None: ...
def NetLocalGroupDelMembers(server: str, group: str, members: list[str]) -> None: ...
def NetLocalGroupGetMembers(
server: str, groupName: str, level, resumeHandle: int = ..., prefLen: int = ...
) -> tuple[Incomplete, Incomplete, Incomplete, Incomplete]: ...
def NetLocalGroupSetMembers(server: str, group: str, level, members: tuple[Incomplete, Incomplete]) -> None: ...
def NetMessageBufferSend(domain: str, userName: str, fromName: str, message: str) -> None: ...
def NetMessageNameAdd(server, msgname) -> None: ...
def METHOD_NAME(server, msgname) -> None: ...
def NetMessageNameEnum(Server) -> None: ...
def NetServerEnum(
server: str, level, _type, prefLen, domain: str | None = ..., resumeHandle: int = ...
) -> tuple[Incomplete, Incomplete, Incomplete, Incomplete]: ...
def NetServerGetInfo(server: str, level): ...
def NetServerSetInfo(server: str, level, data) -> None: ...
def NetShareAdd(server: str, level, data) -> None: ...
def NetShareDel(server: str, shareName: str, reserved: int = ...) -> None: ...
def NetShareCheck(server: str, deviceName: str) -> tuple[Incomplete, Incomplete]: ...
def NetShareEnum(
server: str, level, prefLen, serverName, resumeHandle=...
) -> tuple[Incomplete, Incomplete, Incomplete, Incomplete]: ...
def NetShareGetInfo(server: str, netname: str, level): ...
def NetShareSetInfo(server: str, netname: str, level, data) -> None: ...
def NetUserAdd(server: str, level, data) -> None: ...
def NetUserChangePassword(server: str, username: str, oldPassword: str, newPassword: str) -> None: ...
def NetUserEnum(server: str, level, arg, prefLen, resumeHandle=...) -> tuple[Incomplete, Incomplete, Incomplete, Incomplete]: ...
def NetUserGetGroups(serverName: str, userName: str) -> list[tuple[Incomplete, Incomplete]]: ...
def NetUserGetInfo(server: str, username: str, level): ...
def NetUserGetLocalGroups(serverName: str, userName: str, flags) -> list[Incomplete]: ...
def NetUserSetInfo(server: str, username: str, level, data) -> None: ...
def NetUserDel(server: str, username: str) -> None: ...
def NetUserModalsGet(server: str, level): ...
def NetUserModalsSet(server: str, level, data) -> None: ...
def NetWkstaUserEnum(server: str, level, prefLen, resumeHandle=...) -> tuple[Incomplete, Incomplete, Incomplete, Incomplete]: ...
def NetWkstaGetInfo(server: str, level): ...
def NetWkstaSetInfo(server: str, level, data) -> None: ...
def NetWkstaTransportEnum(
server: str, level, prefLen, resumeHandle=...
) -> tuple[Incomplete, Incomplete, Incomplete, Incomplete]: ...
def NetWkstaTransportAdd(server: str, level, data) -> None: ...
def NetWkstaTransportDel(server: str, TransportName: str, ucond: int = ...) -> None: ...
def NetServerDiskEnum(server: str, level): ...
def NetUseAdd(server: str, level, data) -> None: ...
def NetUseDel(server: str, useName: str, forceCond: int = ...) -> None: ...
def NetUseEnum(server: str, level, prefLen, resumeHandle=...) -> tuple[Incomplete, Incomplete, Incomplete, Incomplete]: ...
def NetUseGetInfo(server: str, usename: str, level: int = ...): ...
def NetGetAnyDCName(server: str | None = ..., domain: str | None = ...) -> str: ...
def NetGetDCName(server: str | None = ..., domain: str | None = ...) -> str: ...
def NetSessionEnum(
level, server: str | None = ..., client: str | None = ..., username: str | None = ...
) -> tuple[Incomplete, ...]: ...
def NetSessionDel(server: str, client: str | None = ..., username: str | None = ...) -> None: ...
def NetSessionGetInfo(level, server: str, client: str, username: str): ...
def NetFileEnum(
level, servername: str | None = ..., basepath: str | None = ..., username: str | None = ...
) -> tuple[Incomplete, ...]: ...
def NetFileClose(servername: str, fileid) -> None: ...
def NetFileGetInfo(level, servername: str, fileid): ...
def NetStatisticsGet(server: str, service: str, level, options): ...
def NetServerComputerNameAdd(ServerName: str, EmulatedDomainName: str, EmulatedServerName: str) -> None: ...
def NetServerComputerNameDel(ServerName: str, EmulatedServerName: str) -> None: ...
def NetValidateName(Server: str, Name: str, NameType, Account: str | None = ..., Password: str | None = ...) -> None: ...
def NetValidatePasswordPolicy(Server: str, Qualifier, ValidationType, arg) -> None: ...
def NetLocalGroupAdd(*args, **kwargs): ... # incomplete
def NetLocalGroupDel(*args, **kwargs): ... # incomplete
def NetLocalGroupEnum(*args, **kwargs): ... # incomplete
def NetLocalGroupGetInfo(*args, **kwargs): ... # incomplete
def NetLocalGroupSetInfo(*args, **kwargs): ... # incomplete
SERVICE_SERVER: str
SERVICE_WORKSTATION: str
USE_FORCE: int
USE_LOTS_OF_FORCE: int
USE_NOFORCE: int |
6,728 | init files | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Runs code on a fleet of machines.
This runs the lingvo code on a fleet of docker for demonstration and testing
purposes. We assume the following:
* There is a running container
* There is a shared volume in /sharedfs. In reality, this would be something
like an NFS or HDFS mount.
The script is run on the host and only requires python and the docker binary
to be installed.
We run two "clusters": one for training, and one for decoding. The trainer
jobs (controller, trainer_client/worker or trainer/ps) are connected to
each other, whereas the decoder jobs are independent, only reading from
the shared filesystem. The trainer jobs are configured via a cluster spec
flag, whereas the decoder jobs are configured with individual flags.
"""
import os
from pipes import quote as shell_quote
import shutil
import subprocess
import sys
_SYNC_TRAIN_CLUSTER_SPEC = {
"worker": [
"worker0:43222",
"worker1:43222",
"worker2:43222",
],
"controller": ["controller:43214",],
"trainer_client": ["trainer_client:24601"],
}
_ASYNC_TRAIN_CLUSTER_SPEC = {
"trainer": [
"trainer0:43222",
"trainer1:43222",
"trainer2:43222",
],
"ps": [
"ps0:43221",
"ps1:43221",
],
"controller": ["controller:43214",],
}
DECODE_CLUSTER_SPEC = {
"evaler_test": ["evaler_test:23487"],
"decoder_test": ["decoder_test:24679"],
}
MODEL = "image.mnist.LeNet5"
DATADIR = "/tmp/mnist"
TRAIN_MODE = "sync"
TRAIN_CLUSTER_SPEC = (
_SYNC_TRAIN_CLUSTER_SPEC
if TRAIN_MODE == "sync" else _ASYNC_TRAIN_CLUSTER_SPEC)
DOCKER_BIN = "/usr/bin/docker"
# All that is required is that we have pip installed tensorflow.
DOCKER_IMAGE_NAME = "tensorflow:lingvo"
# This was created using
# bazel build -c opt //lingvo:trainer.par
# cp bazel-bin/lingvo/trainer.par .
# Since /tmp/lingvo is mounted, we can see it.
# TODO(drpng): hard-wiring below.
TRAINER_PACKAGE = "/tmp/lingvo/trainer.par"
DRY_RUN = False
NETWORK_NAME = "tf-net"
SHARED_FS_MOUNTPOINT = "/tmp/sharedfs"
def _RunDocker(args):
print("Running: docker %s" % args)
if DRY_RUN:
return 0
ret = subprocess.call([DOCKER_BIN] + args)
return ret
def _RunDockerOrDie(args):
ret = _RunDocker(args)
if ret != 0:
sys.stderr.write("Failed to run: %s\n" % ret)
sys.stderr.flush()
sys.exit(ret)
def _ExecInDocker(container_name,
cmd_array,
workdir=None,
logfile=None,
detach=False):
"""Execute in docker container."""
if not workdir:
workdir = "/tmp"
opts = ["-t", "-w", workdir]
if detach:
opts += ["-d"]
# TODO(drpng): avoid quoting hell.
base_cmd = ["exec"] + opts + [container_name]
if logfile:
# The logfile is in the container.
cmd = " ".join(shell_quote(x) for x in cmd_array)
cmd += " >& %s" % logfile
full_cmd = base_cmd + ["bash", "-c", cmd]
else:
full_cmd = base_cmd + cmd_array
ret = _RunDocker(full_cmd)
if ret != 0:
sys.stderr.write(
"Failed to exec within %s: %s" % (container_name, cmd_array))
sys.exit(ret)
def _Machine(machine_port):
# From host:port to host.
return machine_port[:machine_port.index(":")]
def Cleanup():
specs = list(TRAIN_CLUSTER_SPEC.values()) + list(DECODE_CLUSTER_SPEC.values())
for job_machines in specs:
machines = [_Machine(x) for x in job_machines]
_RunDocker(["stop", "-t", "0"] + machines)
_RunDocker(["network", "rm", NETWORK_NAME])
shutil.rmtree(SHARED_FS_MOUNTPOINT, ignore_errors=True)
def METHOD_NAME():
os.mkdir(SHARED_FS_MOUNTPOINT, 0o1777)
# Create these directories so that we own them, not root.
os.mkdir(SHARED_FS_MOUNTPOINT + "/log", 0o1777)
os.mkdir(SHARED_FS_MOUNTPOINT + "/log/train", 0o1777)
os.mkdir(SHARED_FS_MOUNTPOINT + "/log/decoder_test", 0o1777)
os.mkdir(SHARED_FS_MOUNTPOINT + "/log/eval_test", 0o1777)
def InitNetwork():
_RunDockerOrDie(["network", "create", "--driver", "bridge", NETWORK_NAME])
def StartFleet():
specs = list(TRAIN_CLUSTER_SPEC.values()) + list(DECODE_CLUSTER_SPEC.values())
for job_machines in specs:
for machine_port in job_machines:
machine_name = _Machine(machine_port)
_RunDockerOrDie([
"run", "--rm", "--name", machine_name, "-dit", "--network",
NETWORK_NAME, "-v", ":".join([SHARED_FS_MOUNTPOINT] * 2), "-v",
":".join([DATADIR] * 2 + ["ro"]), DOCKER_IMAGE_NAME, "bash"
])
def MakeFlagClusterSpec(cluster_spec):
job_specs = []
for job_name in sorted(cluster_spec.keys()):
job_specs += [job_name + "=" + ",".join(cluster_spec[job_name])]
flag_spec = "@".join(job_specs)
return flag_spec
def CopyTrainerToSharedMount():
shutil.copy(TRAINER_PACKAGE, SHARED_FS_MOUNTPOINT + "/trainer.par")
def InstallAndStartProcess(cluster_spec):
"""Unpacks the trainer and kick off training."""
cluster_spec_flag = MakeFlagClusterSpec(cluster_spec)
for job_name, machines in cluster_spec.items():
task_idx = 0
for machine_port in machines:
machine_name = _Machine(machine_port)
_ExecInDocker(
machine_name, [
os.path.join(SHARED_FS_MOUNTPOINT, "trainer.par"),
"--cluster_spec=%s" % cluster_spec_flag,
"--job=%s" % job_name,
"--task=%d" % task_idx,
"--mode=%s" % TRAIN_MODE,
"--logtostderr",
"--model=%s" % MODEL,
"--logdir=%s/log" % SHARED_FS_MOUNTPOINT,
],
workdir="/tmp",
logfile="%s/%s.%d.log" % (SHARED_FS_MOUNTPOINT, job_name, task_idx),
detach=True)
task_idx += 1
def main():
Cleanup()
METHOD_NAME()
InitNetwork()
StartFleet()
CopyTrainerToSharedMount()
InstallAndStartProcess(TRAIN_CLUSTER_SPEC)
for role in sorted(DECODE_CLUSTER_SPEC.keys()):
# Each decode process is its own spec.
machine_spec = DECODE_CLUSTER_SPEC[role]
InstallAndStartProcess({role: machine_spec})
if __name__ == "__main__":
main() |
6,729 | preprocess function | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
import numpy as np
import paddle
from datasets import load_dataset
from paddle.metric import Accuracy
from paddlenlp.data import DataCollatorWithPadding
from paddlenlp.metrics import AccuracyAndF1, Mcc, PearsonAndSpearman
from paddlenlp.trainer import PdArgumentParser, Trainer, TrainingArguments
from paddlenlp.transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
BertForSequenceClassification,
BertTokenizer,
ErnieForSequenceClassification,
ErnieTokenizer,
)
METRIC_CLASSES = {
"cola": Mcc,
"sst2": Accuracy,
"mrpc": AccuracyAndF1,
"stsb": PearsonAndSpearman,
"qqp": AccuracyAndF1,
"mnli": Accuracy,
"qnli": Accuracy,
"rte": Accuracy,
"wnli": Accuracy,
}
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
MODEL_CLASSES = {
"bert": (BertForSequenceClassification, BertTokenizer),
"ernie": (ErnieForSequenceClassification, ErnieTokenizer),
}
@dataclass
class ModelArguments:
task_name: str = field(
default=None,
metadata={"help": "The name of the task to train selected in the list: " + ", ".join(METRIC_CLASSES.keys())},
)
model_name_or_path: str = field(
default=None,
metadata={"help": "Path to pre-trained model or shortcut name"},
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
def do_train():
training_args, model_args = PdArgumentParser([TrainingArguments, ModelArguments]).parse_args_into_dataclasses()
training_args: TrainingArguments = training_args
model_args: ModelArguments = model_args
training_args.print_config(model_args, "Model")
training_args.print_config(training_args, "Training")
model_args.task_name = model_args.task_name.lower()
sentence1_key, sentence2_key = task_to_keys[model_args.task_name]
train_ds = load_dataset("glue", model_args.task_name, split="train")
columns = train_ds.column_names
is_regression = model_args.task_name == "stsb"
label_list = None
if not is_regression:
label_list = train_ds.features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path)
def METHOD_NAME(examples):
# Tokenize the texts
texts = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*texts, max_length=model_args.max_seq_length, truncation=True)
if "label" in examples:
# In all cases, rename the column to labels because the model will expect that.
result["labels"] = examples["label"]
return result
train_ds = train_ds.map(METHOD_NAME, batched=True, remove_columns=columns)
data_collator = DataCollatorWithPadding(tokenizer)
if model_args.task_name == "mnli":
dev_ds_matched, dev_ds_mismatched = load_dataset(
"glue", model_args.task_name, split=["validation_matched", "validation_mismatched"]
)
dev_ds_matched = dev_ds_matched.map(METHOD_NAME, batched=True, remove_columns=columns)
dev_ds_mismatched = dev_ds_mismatched.map(METHOD_NAME, batched=True, remove_columns=columns)
dev_ds = {"matched": dev_ds_matched, "mismatched": dev_ds_mismatched}
else:
dev_ds = load_dataset("glue", model_args.task_name, split="validation")
dev_ds = dev_ds.map(METHOD_NAME, batched=True, remove_columns=columns)
model = AutoModelForSequenceClassification.from_pretrained(model_args.model_name_or_path, num_labels=num_labels)
def compute_metrics(p):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
if is_regression:
preds = np.squeeze(preds)
preds = paddle.to_tensor(preds)
label = paddle.to_tensor(p.label_ids)
metric = METRIC_CLASSES[model_args.task_name]()
result = metric.compute(preds, label)
metric.update(result)
if isinstance(metric, AccuracyAndF1):
acc, precision, recall, f1, _ = metric.accumulate()
return {"accuracy": acc, "precision": precision, "recall": recall, "f1": f1}
elif isinstance(metric, Mcc):
mcc = metric.accumulate()
return {"mcc": mcc[0]}
elif isinstance(metric, PearsonAndSpearman):
pearson, spearman, _ = metric.accumulate()
return {"pearson": pearson, "spearman": spearman}
elif isinstance(metric, Accuracy):
acc = metric.accumulate()
return {"accuracy": acc}
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=train_ds if training_args.do_train else None,
eval_dataset=dev_ds,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
)
# training
if training_args.do_train:
train_result = trainer.train()
metrics = train_result.metrics
trainer.save_model()
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
if training_args.do_eval:
if model_args.task_name == "mnli":
for _, eval_dataset in dev_ds.items():
eval_metrics = trainer.evaluate(eval_dataset)
trainer.log_metrics("eval", eval_metrics)
trainer.save_metrics("eval", eval_metrics)
else:
eval_metrics = trainer.evaluate(dev_ds)
trainer.log_metrics("eval", eval_metrics)
trainer.save_metrics("eval", eval_metrics)
if __name__ == "__main__":
do_train() |
6,730 | handle connected | from __future__ import unicode_literals
import glob
import json
import os
import re
import threading
import ttfw_idf
from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket
from tiny_test_fw import Utility
class IDEWSProtocol(WebSocket):
def handleMessage(self):
try:
j = json.loads(self.data)
except Exception as e:
Utility.console_log('Server ignores error: {}'.format(e), 'orange')
return
event = j.get('event')
if event and 'prog' in j and ((event == 'gdb_stub' and 'port' in j) or
(event == 'coredump' and 'file' in j)):
payload = {'event': 'debug_finished'}
self.sendMessage(json.dumps(payload))
Utility.console_log('Server sent: {}'.format(payload))
else:
Utility.console_log('Server received: {}'.format(j), 'orange')
def METHOD_NAME(self):
Utility.console_log('{} connected to server'.format(self.address))
def handleClose(self):
Utility.console_log('{} closed the connection'.format(self.address))
class WebSocketServer(object):
HOST = '127.0.0.1'
PORT = 1123
def run(self):
server = SimpleWebSocketServer(self.HOST, self.PORT, IDEWSProtocol)
while not self.exit_event.is_set():
server.serveonce()
def __init__(self):
self.exit_event = threading.Event()
self.thread = threading.Thread(target=self.run)
self.thread.start()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.exit_event.set()
self.thread.join(10)
if self.thread.is_alive():
Utility.console_log('Thread cannot be joined', 'orange')
@ttfw_idf.idf_custom_test(env_tag='test_jtag_arm', group='test-apps')
def test_monitor_ide_integration(env, extra_data):
config_files = glob.glob(os.path.join(os.path.dirname(__file__), 'sdkconfig.ci.*'))
config_names = [os.path.basename(s).replace('sdkconfig.ci.', '') for s in config_files]
rel_proj_path = 'tools/test_apps/system/monitor_ide_integration'
for name in config_names:
Utility.console_log('Checking config "{}"... '.format(name), 'green', end='')
dut = env.get_dut('panic', rel_proj_path, app_config_name=name)
monitor_path = os.path.join(dut.app.idf_path, 'tools/idf_monitor.py')
elf_path = os.path.join(dut.app.binary_path, 'panic.elf')
dut.start_app()
# Closing the DUT because we will reconnect with IDF Monitor
env.close_dut(dut.name)
with WebSocketServer(), ttfw_idf.CustomProcess(' '.join([monitor_path,
elf_path,
'--port', str(dut.port),
'--ws', 'ws://{}:{}'.format(WebSocketServer.HOST,
WebSocketServer.PORT)]),
logfile='monitor_{}.log'.format(name)) as p:
p.pexpect_proc.expect(re.compile(r'Guru Meditation Error'), timeout=10)
p.pexpect_proc.expect_exact('Communicating through WebSocket', timeout=5)
# "u?" is for Python 2 only in the following regular expressions.
# The elements of dictionary can be printed in different order depending on the Python version.
p.pexpect_proc.expect(re.compile(r"WebSocket sent: \{u?.*'event': u?'" + name + "'"), timeout=5)
p.pexpect_proc.expect_exact('Waiting for debug finished event', timeout=5)
p.pexpect_proc.expect(re.compile(r"WebSocket received: \{u?'event': u?'debug_finished'\}"), timeout=5)
p.pexpect_proc.expect_exact('Communications through WebSocket is finished', timeout=5)
if __name__ == '__main__':
test_monitor_ide_integration() |
6,731 | make primitives | # -*- coding: utf-8 -*-
"""Utility function for estimator testing.
copyright: aeon developers, BSD-3-Clause License (see LICENSE file)
"""
__author__ = ["mloning", "fkiraly"]
from inspect import isclass, signature
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from sklearn.utils.validation import check_random_state
from aeon.base import BaseEstimator, BaseObject
from aeon.classification.base import BaseClassifier
from aeon.classification.early_classification import BaseEarlyClassifier
from aeon.clustering.base import BaseClusterer
from aeon.datatypes._panel._check import is_nested_dataframe
from aeon.forecasting.base import BaseForecaster
from aeon.regression.base import BaseRegressor
from aeon.tests._config import VALID_ESTIMATOR_TYPES
from aeon.transformations.base import BaseTransformer
def _get_err_msg(estimator):
return (
f"Invalid estimator type: {type(estimator)}. Valid estimator types are: "
f"{VALID_ESTIMATOR_TYPES}"
)
def _list_required_methods(estimator):
"""Return list of required method names (beyond BaseEstimator ones)."""
# all BaseObject children must implement these
MUST_HAVE_FOR_OBJECTS = ["set_params", "get_params"]
# all BaseEstimator children must implement these
MUST_HAVE_FOR_ESTIMATORS = [
"fit",
"check_is_fitted",
"is_fitted", # read-only property
]
# prediction/forecasting base classes that must have predict
BASE_CLASSES_THAT_MUST_HAVE_PREDICT = (
BaseClusterer,
BaseRegressor,
BaseForecaster,
)
# transformation base classes that must have transform
BASE_CLASSES_THAT_MUST_HAVE_TRANSFORM = (BaseTransformer,)
required_methods = []
if isinstance(estimator, BaseObject):
required_methods += MUST_HAVE_FOR_OBJECTS
if isinstance(estimator, BaseEstimator):
required_methods += MUST_HAVE_FOR_ESTIMATORS
if isinstance(estimator, BASE_CLASSES_THAT_MUST_HAVE_PREDICT):
required_methods += ["predict"]
if isinstance(estimator, BASE_CLASSES_THAT_MUST_HAVE_TRANSFORM):
required_methods += ["transform"]
return required_methods
def METHOD_NAME(n_columns=1, random_state=None):
"""Generate one or more primitives, for checking inverse-transform."""
rng = check_random_state(random_state)
if n_columns == 1:
return rng.rand()
return rng.rand(size=(n_columns,))
def _make_tabular_X(n_instances=20, n_columns=1, return_numpy=True, random_state=None):
"""Generate tabular X, for checking inverse-transform."""
rng = check_random_state(random_state)
X = rng.rand(n_instances, n_columns)
if return_numpy:
return X
else:
return pd.DataFrame(X)
def _compare_nested_frame(func, x, y, **kwargs):
"""Compare two nested pd.DataFrames.
Parameters
----------
func : function
Function from np.testing for comparing arrays.
x : pd.DataFrame
y : pd.DataFrame
kwargs : dict
Keyword argument for function
Raises
------
AssertionError
If x and y are not equal
"""
# We iterate over columns and rows to make cell-wise comparisons.
# Tabularizing the data first would simplify this, but does not
# work for unequal length data.
# In rare cases, x and y may be empty (e.g. TSFreshRelevantFeatureExtractor) and
# we cannot compare individual cells, so we simply check if everything else is
# equal here.
assert isinstance(x, pd.DataFrame)
if x.empty:
assert_frame_equal(x, y)
elif is_nested_dataframe(x):
# Check if both inputs have the same shape
if not x.shape == y.shape:
raise ValueError("Found inputs with different shapes")
# Iterate over columns
n_columns = x.shape[1]
for i in range(n_columns):
xc = x.iloc[:, i].tolist()
yc = y.iloc[:, i].tolist()
# Iterate over rows, checking if individual cells are equal
for xci, yci in zip(xc, yc):
func(xci, yci, **kwargs)
def _assert_array_almost_equal(x, y, decimal=6, err_msg=""):
func = np.testing.assert_array_almost_equal
if isinstance(x, pd.DataFrame):
_compare_nested_frame(func, x, y, decimal=decimal, err_msg=err_msg)
else:
func(x, y, decimal=decimal, err_msg=err_msg)
def _assert_array_equal(x, y, err_msg=""):
func = np.testing.assert_array_equal
if isinstance(x, pd.DataFrame):
_compare_nested_frame(func, x, y, err_msg=err_msg)
else:
func(x, y, err_msg=err_msg)
def _get_args(function, varargs=False):
"""Get function arguments."""
try:
params = signature(function).parameters
except ValueError:
# Error on builtin C function
return []
args = [
key
for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)
]
if varargs:
varargs = [
param.name
for param in params.values()
if param.kind == param.VAR_POSITIONAL
]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
def _has_capability(est, method: str) -> bool:
"""Check whether estimator has capability of method."""
def get_tag(est, tag_name, tag_value_default=None):
if isclass(est):
return est.get_class_tag(
tag_name=tag_name, tag_value_default=tag_value_default
)
else:
return est.get_tag(tag_name=tag_name, tag_value_default=tag_value_default)
if not hasattr(est, method):
return False
if method == "inverse_transform":
return get_tag(est, "capability:inverse_transform", False)
if method in [
"predict_proba",
"predict_interval",
"predict_quantiles",
"predict_var",
]:
ALWAYS_HAVE_PREDICT_PROBA = (BaseClassifier, BaseEarlyClassifier, BaseClusterer)
# all classifiers and clusterers implement predict_proba
if method == "predict_proba" and isinstance(est, ALWAYS_HAVE_PREDICT_PROBA):
return True
return get_tag(est, "capability:pred_int", False)
# skip transform for forecasters that have it - pipelines
if method == "transform" and isinstance(est, BaseForecaster):
return False
return True |
6,732 | unregister |
import numpy as np
from math import pi
import bpy
from bpy.props import FloatProperty, EnumProperty, IntProperty
from mathutils import Matrix
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, zip_long_repeat, ensure_nesting_level
from sverchok.utils.curve import SvCurve, SvCurveOnSurface, SvCircle
from sverchok.utils.surface.rbf import SvRbfSurface
from sverchok.dependencies import scipy
from sverchok.utils.math import rbf_functions
if scipy is not None:
from scipy.interpolate import Rbf
class SvExMinSurfaceFromCurveNode(SverchCustomTreeNode, bpy.types.Node):
"""
Triggers: Minimal Surface from Curve
Tooltip: Generate Minimal Surface from circle-like curve
"""
bl_idname = 'SvExMinSurfaceFromCurveNode'
bl_label = 'Minimal Surface from Curve'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_EX_MINSURFACE'
sv_dependencies = {'scipy'}
function : EnumProperty(
name = "Function",
items = rbf_functions,
default = 'multiquadric',
update = updateNode)
epsilon : FloatProperty(
name = "Epsilon",
default = 1.0,
min = 0.0,
update = updateNode)
smooth : FloatProperty(
name = "Smooth",
default = 0.0,
min = 0.0,
update = updateNode)
samples_t : IntProperty(
name = "Samples",
default = 50,
min = 3,
update = updateNode)
def sv_init(self, context):
self.inputs.new('SvCurveSocket', "Curve")
self.inputs.new('SvStringsSocket', "Samples").prop_name = 'samples_t'
self.inputs.new('SvStringsSocket', "Epsilon").prop_name = 'epsilon'
self.inputs.new('SvStringsSocket', "Smooth").prop_name = 'smooth'
self.outputs.new('SvSurfaceSocket', "Surface")
self.outputs.new('SvCurveSocket', "TrimCurve")
self.outputs.new('SvCurveSocket', "Curve")
def draw_buttons(self, context, layout):
layout.prop(self, "function")
def make_surface(self, curve, epsilon, smooth, samples):
t_min, t_max = curve.get_u_bounds()
curve_ts = np.linspace(t_min, t_max, num=samples)
curve_points = curve.evaluate_array(curve_ts)
dvs = curve_points[1:] - curve_points[:-1]
segment_lengths = np.linalg.norm(dvs, axis=1)
last_segment_length = np.linalg.norm(curve_points[0] - curve_points[-1])
if last_segment_length < 0.001:
# curve is closed: remove the last segment to make it non-closed
segment_lengths = segment_lengths[:-1]
curve_points = curve_points[:-1]
last_segment_length = np.linalg.norm(curve_points[0] - curve_points[-1])
# T=0 will correspond to the center of gap between first and last point
dt = min(last_segment_length / 2.0, segment_lengths.min())
cum_segment_lengths = np.insert(np.cumsum(segment_lengths), 0, 0)
total_length = cum_segment_lengths[-1] + last_segment_length
ts = cum_segment_lengths + dt
ts = 2*pi * ts / total_length
us = np.cos(ts)
vs = np.sin(ts)
rbf = Rbf(us, vs, curve_points,
function = self.function,
epsilon = epsilon, smooth = smooth, mode = 'N-D')
surface = SvRbfSurface(rbf, 'UV', 'Z', Matrix())
surface.u_bounds = (-1.0, 1.0)
surface.v_bounds = (-1.0, 1.0)
return surface
def process(self):
if not any(socket.is_linked for socket in self.outputs):
return
curve_s = self.inputs['Curve'].sv_get()
epsilon_s = self.inputs['Epsilon'].sv_get()
smooth_s = self.inputs['Smooth'].sv_get()
samples_s = self.inputs['Samples'].sv_get()
if isinstance(curve_s[0], SvCurve):
curve_s = [curve_s]
epsilon_s = ensure_nesting_level(epsilon_s, 2)
smooth_s = ensure_nesting_level(smooth_s, 2)
samples_s = ensure_nesting_level(samples_s, 2)
surface_out = []
circle_out = []
curve_out = []
inputs = zip_long_repeat(curve_s, epsilon_s, smooth_s, samples_s)
for curves, epsilons, smooths, samples_i in inputs:
for curve, epsilon, smooth, samples in zip_long_repeat(curves, epsilons, smooths, samples_i):
new_surface = self.make_surface(curve, epsilon, smooth, samples)
circle = SvCircle(Matrix(), 1.0)
new_curve = SvCurveOnSurface(circle, new_surface, axis=2)
surface_out.append(new_surface)
curve_out.append(new_curve)
circle_out.append(circle)
self.outputs['Surface'].sv_set(surface_out)
self.outputs['TrimCurve'].sv_set(circle_out)
self.outputs['Curve'].sv_set(curve_out)
def register():
bpy.utils.register_class(SvExMinSurfaceFromCurveNode)
def METHOD_NAME():
bpy.utils.unregister_class(SvExMinSurfaceFromCurveNode) |
6,733 | append algorithm | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Register algorithms."""
from abc import abstractmethod
from neural_compressor.utils.create_obj_from_config import get_algorithm
# {location: {algorithm_type: cls}}
registry_algorithms = {}
def algorithm_registry(algorithm_type, location):
"""Decorate and register all Algorithm subclasses.
Args:
cls (class): The class of register.
algorithm_type (str): The algorithm registration name
location (str): The location to call algorithms
Returns:
cls: The class of register.
"""
def decorator_algorithm(cls):
if location in registry_algorithms and algorithm_type in registry_algorithms[location]:
raise ValueError("Cannot have two algorithms with the same name")
if location not in registry_algorithms:
registry_algorithms[location] = {}
registry_algorithms[location][algorithm_type] = cls()
return cls
return decorator_algorithm
class ALGORITHMS(object):
"""Build a dict for registered algorithms."""
algorithms = registry_algorithms
def __getitem__(self, algorithm_type):
"""Return algorithm class by algorithm_type name.
Args:
algorithm_type (str): The algorithm registration name
Returns:
cls (class): The class of algorithm.
"""
result = None
for location in self.algorithms:
for key in self.algorithms[location]:
if key == algorithm_type:
result = self.algorithms[location][key]
assert result, "algorithm type only support {}".format(self.support_algorithms())
return result
@classmethod
def support_algorithms(self):
"""Get all algorithms.
Returns:
Set: A set of all algorithms.
"""
supported_algos = set([self.algorithms[key] for key in self.algorithms])
return supported_algos
class AlgorithmScheduler(object):
"""Control the Algorithm in different phase."""
def __init__(self, conf):
"""Initialize AlgorithmScheduler.
Args:
conf (dict): Configuration of algorithm.
"""
self._exec_algorithms = {}
self._origin_model = None
self._q_model = None
self._dataloader = None
self._adaptor = None
self._calib_iter = None
def METHOD_NAME(self, location, algorithm):
"""Append algorithm to list of executed algorithms.
Args:
location: The location to call algorithm
algorithm: algorithm instance
"""
self._exec_algorithms[location] = self._exec_algorithms.get(location, [])
self._exec_algorithms[location].append(algorithm)
def reset_exec_algorithms(self):
"""Reset the list of executed algorithms."""
self._exec_algorithms = {}
def __call__(self, location):
"""Return the processed model via algorithm.
Returns:
model: The framework model.
"""
assert self._q_model, "set q_model for algorithm"
if len(self._exec_algorithms.get(location, [])) == 0:
return self._q_model
assert self._origin_model, "set origin model for algorithm"
assert self._adaptor, "set adaptor for algorithm"
assert self._calib_iter, "set calibration iteration for algorithm"
for algo in self._exec_algorithms.get(location, []):
self._q_model = algo(self._origin_model, self._q_model, self._adaptor, self._dataloader, self._calib_iter)
return self._q_model
@property
def origin_model(self):
"""Return the origin model.
Returns:
model: The origin model.
"""
return self._origin_model
@origin_model.setter
def origin_model(self, model):
"""Set the origin model.
Args:
model: The origin model.
"""
self._origin_model = model
@property
def q_model(self):
"""Return the quantized model.
Returns:
model: The quantized model.
"""
return self._q_model
@q_model.setter
def q_model(self, model):
"""Set the quantized model.
Args:
model: The quantized model.
"""
self._q_model = model
@property
def dataloader(self):
"""Return the dataloader.
Returns:
dataloader: The dataloader.
"""
return self._dataloader
@dataloader.setter
def dataloader(self, dataloader):
"""Set the dataloader.
Args:
dataloader: The dataloader.
"""
self._dataloader = dataloader
@property
def adaptor(self):
"""Return the adaptor.
Returns:
adaptor: The adaptor.
"""
return self._adaptor
@adaptor.setter
def adaptor(self, adaptor):
"""Set the adaptor.
Args:
adaptor: The adaptor.
"""
self._adaptor = adaptor
@property
def calib_iter(self):
"""Return the calibration iter number.
Returns:
calib_iter: The calibration iter number.
"""
return self._calib_iter
@calib_iter.setter
def calib_iter(self, calib_iter):
"""Set the calibration iter number.
Args:
calib_iter: The calibration iter number
"""
self._calib_iter = calib_iter
class Algorithm(object):
"""The base class of algorithm."""
@abstractmethod
def __call__(self, *args, **kwargs):
"""Raise NotImplementedError.
Raises:
NotImplementedError: NotImplementedError
"""
raise NotImplementedError |
6,734 | get local rank | """
torch.distributed utils
"""
from __future__ import annotations
import itertools
from typing import Optional
import os
import socket
from contextlib import contextmanager
import torch
from torch.distributed.algorithms.join import Join
from returnn.config import Config
import returnn.frontend as rf
class DistributedContext:
"""
This class setups some helper functions for torch distributed training
"""
def __init__(self, config):
"""
:param Config config:
"""
import torch.distributed as dist
# when no backend is specified, both gloo and nccl backends will be created
# the gloo backend will be used for collectives with CPU tensors and
# the nccl backend will be used for collectives with CUDA tensors
dist.init_process_group(backend=None)
self._config = config
self._local_rank = os.environ["LOCAL_RANK"]
self._local_size = os.environ["LOCAL_WORLD_SIZE"]
self._rank = dist.get_rank()
self._size = dist.get_world_size()
print(
"Torch distributed initialized. Hostname %s, pid %i, rank %i / size %i, local rank %s / local size %s."
% (socket.gethostname(), os.getpid(), self._rank, self._size, self._local_rank, self._local_size)
)
def local_rank(self):
"""
:rtype: int
"""
return self._local_rank
def rank(self):
"""
:rtype: int
"""
return self._rank
def size(self):
"""
:rtype: int
"""
return self._size
_is_set_up = False
_ctx = None # type: Optional[DistributedContext]
def get_ctx(config=None):
"""
:param Config|None config:
:returns: the global context if Torch distributed is enabled, or None otherwise.
If we did not setup the context yet, it will automatically create it.
:rtype: DistributedContext|None
"""
global _is_set_up, _ctx
if _is_set_up:
return _ctx
if not config:
from returnn.config import get_global_config
config = get_global_config(raise_exception=False)
if not config:
return None
_is_set_up = True
if config.typed_value("torch_distributed") is None:
return None
_ctx = DistributedContext(config=config)
return _ctx
def get_device_ids():
"""
It depends on the specific setup what to return here,
how CUDA_VISIBLE_DEVICES is set up, etc.
This is currently a reasonable assumption,
but we might extend the logic later,
or make it configurable.
"""
return [METHOD_NAME()]
def METHOD_NAME():
"""
torch.distributed does not seem to provide a function for this.
Via mpirun (OpenMPI), this env variable would be set.
It should fail with an error otherwise.
"""
return int(os.environ["LOCAL_RANK"])
def _find_tensors(obj):
"""
Recursively find all tensors contained in the specified object,
cf. torch.nn.parallel.distributed._find_tensors
"""
if isinstance(obj, torch.Tensor):
return [obj]
if isinstance(obj, (list, tuple)):
return itertools.chain(*map(_find_tensors, obj))
if isinstance(obj, dict):
return itertools.chain(*map(_find_tensors, obj.values()))
return []
@contextmanager
def ddp_train_forward_ctx(pt_model):
"""
the original (unwrapped) module is passed to the train step, therefore here we set up the right context
as what DistributedDataParallel.forward does internally
"""
if torch.is_grad_enabled() and pt_model.require_backward_grad_sync:
assert pt_model.logger is not None
pt_model.logger.set_runtime_stats_and_log()
pt_model.num_iterations += 1
pt_model.reducer.prepare_for_forward()
with torch.autograd.profiler.record_function("DistributedDataParallel.forward"):
if torch.is_grad_enabled() and pt_model.require_backward_grad_sync:
assert pt_model.logger is not None
pt_model.logger.set_runtime_stats_and_log()
pt_model.num_iterations += 1
pt_model.reducer.prepare_for_forward()
work = Join.notify_join_context(pt_model)
if work:
# noinspection PyProtectedMember
pt_model.reducer._set_forward_pass_work_handle(work, pt_model._divide_by_initial_world_size)
# noinspection PyProtectedMember
if torch.is_grad_enabled() and pt_model.reducer._rebuild_buckets():
pt_model._has_rebuilt_buckets = True
# noinspection PyProtectedMember
if pt_model._check_sync_bufs_pre_fwd():
# noinspection PyProtectedMember
pt_model._sync_buffers()
# noinspection PyProtectedMember
if pt_model._join_config.enable:
# Notify joined ranks whether they should sync in backwards pass or not.
# noinspection PyProtectedMember
pt_model._check_global_requires_backward_grad_sync(is_joined_rank=False)
# noinspection PyProtectedMember
with pt_model._inside_ddp_forward():
yield
# noinspection PyProtectedMember
if pt_model._check_sync_bufs_post_fwd():
# noinspection PyProtectedMember
pt_model._sync_buffers()
if torch.is_grad_enabled() and pt_model.require_backward_grad_sync:
pt_model.require_forward_param_sync = True
# We'll return the output object verbatim since it is a freeform
# object. We need to find any tensors in this object, though,
# because we need to figure out which parameters were used during
# this forward pass, to ensure we short circuit reduction for any
# unused parameters. Only if `find_unused_parameters` is set.
if pt_model.find_unused_parameters and not pt_model.static_graph:
# Do not need to populate this for static graph.
train_ctx = rf.get_run_ctx()
loss = list(train_ctx.losses.values())[0].loss.raw_tensor
# noinspection PyProtectedMember
pt_model.reducer.prepare_for_backward(list(_find_tensors(loss)))
else:
pt_model.reducer.prepare_for_backward([])
else:
pt_model.require_forward_param_sync = False |
6,735 | wrapper | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import inspect
import logging
import os
import sys
import traceback
import datetime as dt
from azure.core.exceptions import AzureError
from azure.cli.testsdk.exceptions import CliTestError, CliExecutionError, JMESPathCheckAssertionError
logger = logging.getLogger('azure.cli.testsdk')
logger.addHandler(logging.StreamHandler())
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
exceptions = []
test_map = dict()
SUCCESSED = "successed"
FAILED = "failed"
def try_manual(func):
def import_manual_function(origin_func):
from importlib import import_module
decorated_path = inspect.getfile(origin_func).lower()
module_path = __path__[0].lower()
if not decorated_path.startswith(module_path):
raise Exception("Decorator can only be used in submodules!")
manual_path = os.path.join(
decorated_path[module_path.rfind(os.path.sep) + 1:])
manual_file_path, manual_file_name = os.path.split(manual_path)
module_name, _ = os.path.splitext(manual_file_name)
manual_module = "..manual." + \
".".join(manual_file_path.split(os.path.sep) + [module_name, ])
return getattr(import_module(manual_module, package=__name__), origin_func.__name__)
def get_func_to_call():
func_to_call = func
try:
func_to_call = import_manual_function(func)
logger.info("Found manual override for %s(...)", func.__name__)
except (ImportError, AttributeError):
pass
return func_to_call
def METHOD_NAME(*args, **kwargs):
func_to_call = get_func_to_call()
logger.info("running %s()...", func.__name__)
try:
test_map[func.__name__] = dict()
test_map[func.__name__]["result"] = SUCCESSED
test_map[func.__name__]["error_message"] = ""
test_map[func.__name__]["error_stack"] = ""
test_map[func.__name__]["error_normalized"] = ""
test_map[func.__name__]["start_dt"] = dt.datetime.utcnow()
ret = func_to_call(*args, **kwargs)
except (AssertionError, AzureError, CliTestError, CliExecutionError, SystemExit,
JMESPathCheckAssertionError) as e:
use_exception_cache = os.getenv("TEST_EXCEPTION_CACHE")
if use_exception_cache is None or use_exception_cache.lower() != "true":
raise
test_map[func.__name__]["end_dt"] = dt.datetime.utcnow()
test_map[func.__name__]["result"] = FAILED
test_map[func.__name__]["error_message"] = str(e).replace("\r\n", " ").replace("\n", " ")[:500]
test_map[func.__name__]["error_stack"] = traceback.format_exc().replace(
"\r\n", " ").replace("\n", " ")[:500]
logger.info("--------------------------------------")
logger.info("step exception: %s", e)
logger.error("--------------------------------------")
logger.error("step exception in %s: %s", func.__name__, e)
logger.info(traceback.format_exc())
exceptions.append((func.__name__, sys.exc_info()))
else:
test_map[func.__name__]["end_dt"] = dt.datetime.utcnow()
return ret
if inspect.isclass(func):
return get_func_to_call()
return METHOD_NAME
def calc_coverage(filename):
filename = filename.split(".")[0]
coverage_name = filename + "_coverage.md"
with open(coverage_name, "w") as f:
f.write("|Scenario|Result|ErrorMessage|ErrorStack|ErrorNormalized|StartDt|EndDt|\n")
total = len(test_map)
covered = 0
for k, v in test_map.items():
if not k.startswith("step_"):
total -= 1
continue
if v["result"] == SUCCESSED:
covered += 1
f.write("|{step_name}|{result}|{error_message}|{error_stack}|{error_normalized}|{start_dt}|"
"{end_dt}|\n".format(step_name=k, **v))
f.write("Coverage: {}/{}\n".format(covered, total))
print("Create coverage\n", file=sys.stderr)
def raise_if():
if exceptions:
if len(exceptions) <= 1:
raise exceptions[0][1][1]
message = "{}\nFollowed with exceptions in other steps:\n".format(str(exceptions[0][1][1]))
message += "\n".join(["{}: {}".format(h[0], h[1][1]) for h in exceptions[1:]])
raise exceptions[0][1][0](message).with_traceback(exceptions[0][1][2]) |
6,736 | build list request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def METHOD_NAME(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.AppPlatform/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.appplatform.v2021_09_01_preview.AppPlatformManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.OperationDetail"]:
"""Lists all of the available REST API operations of the Microsoft.AppPlatform provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationDetail or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.appplatform.v2021_09_01_preview.models.OperationDetail]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop(
"api_version", _params.pop("api-version", self._api_version or "2021-09-01-preview")
)
cls: ClsType[_models.AvailableOperations] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = METHOD_NAME(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AvailableOperations", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.AppPlatform/operations"} |
6,737 | optional tests of | """
Python representations of the JSON Schema Test Suite tests.
"""
from functools import partial
from pathlib import Path
import json
import os
import re
import subprocess
import sys
import unittest
import attr
from jsonschema.validators import _VALIDATORS
import jsonschema
def _find_suite():
root = os.environ.get("JSON_SCHEMA_TEST_SUITE")
if root is not None:
return Path(root)
root = Path(jsonschema.__file__).parent.parent / "json"
if not root.is_dir(): # pragma: no cover
raise ValueError(
(
"Can't find the JSON-Schema-Test-Suite directory. "
"Set the 'JSON_SCHEMA_TEST_SUITE' environment "
"variable or run the tests from alongside a checkout "
"of the suite."
),
)
return root
@attr.s(hash=True)
class Suite:
_root = attr.ib(default=attr.Factory(_find_suite))
def _remotes(self):
jsonschema_suite = self._root.joinpath("bin", "jsonschema_suite")
remotes = subprocess.check_output(
[sys.executable, str(jsonschema_suite), "remotes"],
)
return json.loads(remotes.decode("utf-8"))
def benchmark(self, runner): # pragma: no cover
for name, Validator in _VALIDATORS.items():
self.version(name=name).benchmark(
runner=runner,
Validator=Validator,
)
def version(self, name):
return Version(
name=name,
path=self._root.joinpath("tests", name),
remotes=self._remotes(),
)
@attr.s(hash=True)
class Version:
_path = attr.ib()
_remotes = attr.ib()
name = attr.ib()
def benchmark(self, runner, **kwargs): # pragma: no cover
for suite in self.tests():
for test in suite:
runner.bench_func(
test.fully_qualified_name,
partial(test.validate_ignoring_errors, **kwargs),
)
def tests(self):
return (
test
for child in self._path.glob("*.json")
for test in self._tests_in(
subject=child.name[:-5],
path=child,
)
)
def format_tests(self):
path = self._path.joinpath("optional", "format")
return (
test
for child in path.glob("*.json")
for test in self._tests_in(
subject=child.name[:-5],
path=child,
)
)
def METHOD_NAME(self, name):
return self._tests_in(
subject=name,
path=self._path.joinpath("optional", name + ".json"),
)
def to_unittest_testcase(self, *suites, **kwargs):
name = kwargs.pop("name", "Test" + self.name.title().replace("-", ""))
methods = {
test.method_name: test.to_unittest_method(**kwargs)
for suite in suites
for tests in suite
for test in tests
}
cls = type(name, (unittest.TestCase,), methods)
try:
cls.__module__ = _someone_save_us_the_module_of_the_caller()
except Exception: # pragma: no cover
# We're doing crazy things, so if they go wrong, like a function
# behaving differently on some other interpreter, just make them
# not happen.
pass
return cls
def _tests_in(self, subject, path):
for each in json.loads(path.read_text(encoding="utf-8")):
yield (
_Test(
version=self,
subject=subject,
case_description=each["description"],
schema=each["schema"],
remotes=self._remotes,
**test,
) for test in each["tests"]
)
@attr.s(hash=True, repr=False)
class _Test:
version = attr.ib()
subject = attr.ib()
case_description = attr.ib()
description = attr.ib()
data = attr.ib()
schema = attr.ib(repr=False)
valid = attr.ib()
_remotes = attr.ib()
comment = attr.ib(default=None)
def __repr__(self): # pragma: no cover
return "<Test {}>".format(self.fully_qualified_name)
@property
def fully_qualified_name(self): # pragma: no cover
return " > ".join(
[
self.version.name,
self.subject,
self.case_description,
self.description,
],
)
@property
def method_name(self):
delimiters = r"[\W\- ]+"
return "test_{}_{}_{}".format(
re.sub(delimiters, "_", self.subject),
re.sub(delimiters, "_", self.case_description),
re.sub(delimiters, "_", self.description),
)
def to_unittest_method(self, skip=lambda test: None, **kwargs):
if self.valid:
def fn(this):
self.validate(**kwargs)
else:
def fn(this):
with this.assertRaises(jsonschema.ValidationError):
self.validate(**kwargs)
fn.__name__ = self.method_name
reason = skip(self)
if reason is None or os.environ.get("JSON_SCHEMA_DEBUG", "0") != "0":
return fn
elif os.environ.get("JSON_SCHEMA_EXPECTED_FAILURES", "0") != "0":
return unittest.expectedFailure(fn)
else:
return unittest.skip(reason)(fn)
def validate(self, Validator, **kwargs):
Validator.check_schema(self.schema)
resolver = jsonschema.RefResolver.from_schema(
schema=self.schema,
store=self._remotes,
id_of=Validator.ID_OF,
)
# XXX: #693 asks to improve the public API for this, since yeah, it's
# bad. Figures that since it's hard for end-users, we experience
# the pain internally here too.
def prevent_network_access(uri):
raise RuntimeError(f"Tried to access the network: {uri}")
resolver.resolve_remote = prevent_network_access
validator = Validator(schema=self.schema, resolver=resolver, **kwargs)
if os.environ.get("JSON_SCHEMA_DEBUG", "0") != "0":
breakpoint()
validator.validate(instance=self.data)
def validate_ignoring_errors(self, Validator): # pragma: no cover
try:
self.validate(Validator=Validator)
except jsonschema.ValidationError:
pass
def _someone_save_us_the_module_of_the_caller():
"""
The FQON of the module 2nd stack frames up from here.
This is intended to allow us to dynamically return test case classes that
are indistinguishable from being defined in the module that wants them.
Otherwise, trial will mis-print the FQON, and copy pasting it won't re-run
the class that really is running.
Save us all, this is all so so so so so terrible.
"""
return sys._getframe(2).f_globals["__name__"] |
6,738 | remove empty | #!/usr/bin/env python3
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Linux network utilities
import sys
import socket
import fcntl
import struct
import array
import os
from binascii import hexlify, unhexlify
# Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
STATE_ESTABLISHED = '01'
STATE_SYN_SENT = '02'
STATE_SYN_RECV = '03'
STATE_FIN_WAIT1 = '04'
STATE_FIN_WAIT2 = '05'
STATE_TIME_WAIT = '06'
STATE_CLOSE = '07'
STATE_CLOSE_WAIT = '08'
STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def METHOD_NAME(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = unhexlify(host)
host_out = ''
for x in range(0, len(host)//4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = METHOD_NAME(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', b'\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tobytes()
return [(namestr[i:i+16].split(b'\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return hexlify(bytearray(addr)).decode('ascii')
def test_ipv6_local():
'''
Check for (local) IPv6 support.
'''
import socket
# By using SOCK_DGRAM this will not actually make a connection, but it will
# fail if there is no route to IPv6 localhost.
have_ipv6 = True
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 0))
except socket.error:
have_ipv6 = False
return have_ipv6 |
6,739 | url | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network vnet-gateway list-advertised-routes",
)
class ListAdvertisedRoutes(AAZCommand):
"""List the routes of a virtual network gateway advertised to the specified peer.
:example: List the routes of a virtual network gateway advertised to the specified peer.
az network vnet-gateway list-advertised-routes -g MyResourceGroup -n MyVnetGateway --peer 23.10.10.9
"""
_aaz_info = {
"version": "2022-01-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/virtualnetworkgateways/{}/getadvertisedroutes", "2022-01-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the VNet gateway.",
required=True,
id_part="name",
)
_args_schema.peer = AAZStrArg(
options=["--peer"],
help="The IP address of the peer.",
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.VirtualNetworkGatewaysGetAdvertisedRoutes(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class VirtualNetworkGatewaysGetAdvertisedRoutes(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def METHOD_NAME(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"virtualNetworkGatewayName", self.ctx.args.name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"peer", self.ctx.args.peer,
required=True,
),
**self.serialize_query_param(
"api-version", "2022-01-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.as_path = AAZStrType(
serialized_name="asPath",
flags={"read_only": True},
)
_element.local_address = AAZStrType(
serialized_name="localAddress",
flags={"read_only": True},
)
_element.network = AAZStrType(
flags={"read_only": True},
)
_element.next_hop = AAZStrType(
serialized_name="nextHop",
flags={"read_only": True},
)
_element.origin = AAZStrType(
flags={"read_only": True},
)
_element.source_peer = AAZStrType(
serialized_name="sourcePeer",
flags={"read_only": True},
)
_element.weight = AAZIntType(
flags={"read_only": True},
)
return cls._schema_on_200
class _ListAdvertisedRoutesHelper:
"""Helper class for ListAdvertisedRoutes"""
__all__ = ["ListAdvertisedRoutes"] |
6,740 | get extra ansible env vars | from __future__ import absolute_import, print_function, unicode_literals
import getpass
import os
import sys
from contextlib import contextmanager
from io import open
import datadog.api
import yaml
from ansible.parsing.vault import AnsibleVaultError
from ansible_vault import Vault
from memoized import memoized
from six.moves import shlex_quote
from commcare_cloud.environment.paths import ANSIBLE_DIR
from commcare_cloud.environment.secrets.backends.abstract_backend import (
AbstractSecretsBackend,
)
from commcare_cloud.environment.secrets.secrets_schema import (
get_generated_variables,
get_known_secret_specs_by_name,
)
class AnsibleVaultSecretsBackend(AbstractSecretsBackend):
name = 'ansible-vault'
def __init__(self, env_name, vault_file_path, record_to_datadog=False, ask_vault_pass=True):
self.env_name = env_name
self.vault_file_path = vault_file_path
self.record_to_datadog = record_to_datadog
self.should_send_vault_loaded_event = True
self.ask_vault_pass = ask_vault_pass
@classmethod
def from_environment(cls, environment):
try:
datadog_enabled = environment.public_vars.get('DATADOG_ENABLED')
except IOError:
# some test envs don't have public.yml
datadog_enabled = False
try:
commcare_cloud_use_vault = environment.public_vars.get('commcare_cloud_use_vault', True)
except IOError:
commcare_cloud_use_vault = True
return cls(
environment.name, environment.paths.vault_yml,
record_to_datadog=datadog_enabled,
ask_vault_pass=commcare_cloud_use_vault,
)
def prompt_user_input(self):
# call this for its side-effect: asking the user for the vault password
# (and thus not requiring that thereafter)
self._get_ansible_vault_password_and_record()
def get_extra_ansible_args(self):
extra_ansible_args = ('-e', '@{}'.format(self.vault_file_path))
if self.ask_vault_pass:
extra_ansible_args += (
'--vault-password-file={}/echo_vault_password.sh'.format(ANSIBLE_DIR),
)
return extra_ansible_args
def METHOD_NAME(self):
if self.ask_vault_pass:
return {
'ANSIBLE_VAULT_PASSWORD': self._get_ansible_vault_password_and_record(),
}
else:
return {}
@staticmethod
def get_generated_variables():
return get_generated_variables(lambda secret_spec: secret_spec.get_legacy_reference())
def _get_ansible_vault_password_and_record(self):
"""Get ansible vault password
This method has a side-effect: it records a Datadog event with
the commcare-cloud command that is currently being run.
"""
self._get_vault_variables_and_record()
return self._get_ansible_vault_password()
@memoized
def _get_vault_variables_and_record(self):
"""Get ansible vault variables
This method has a side-effect: it records a Datadog event with
the commcare-cloud command that is currently being run.
"""
vault_vars = self._get_vault_variables()
if "secrets" in vault_vars:
self._record_vault_loaded_event(vault_vars["secrets"])
return vault_vars
@memoized
def _get_ansible_vault_password(self):
return (
os.environ.get('ANSIBLE_VAULT_PASSWORD') or
getpass.getpass("Vault Password for '{}': ".format(self.env_name))
)
@memoized
def _get_vault_variables(self):
# try unencrypted first for tests
with open(self.vault_file_path, 'r', encoding='utf-8') as f:
vault_vars = yaml.safe_load(f)
if isinstance(vault_vars, dict):
return vault_vars
while True:
try:
vault = Vault(self._get_ansible_vault_password())
with open(self.vault_file_path, 'r', encoding='utf-8') as vf:
return vault.load(vf.read())
except AnsibleVaultError:
if os.environ.get('ANSIBLE_VAULT_PASSWORD'):
raise
print('incorrect password')
self._get_ansible_vault_password.reset_cache(self)
def _get_secret(self, var_name):
context = self._get_vault_variables_and_record()
known_secret_specs_by_name = get_known_secret_specs_by_name()
if var_name in known_secret_specs_by_name:
legacy_namespace = known_secret_specs_by_name[var_name].legacy_namespace
if legacy_namespace in context and var_name in context[legacy_namespace]:
return context[legacy_namespace][var_name]
return context[var_name]
def _set_secret(self, var, value):
# No effort is made to preserve the original YAML format (comments, etc.),
# and no edge cases are handled (e.g. vault does not exist, etc.)
data = self._get_vault_variables() or {}
data[var] = value
vault = Vault(self._get_ansible_vault_password())
with open(self.vault_file_path, 'w') as vf:
vault.dump(data, vf)
self._get_vault_variables.reset_cache(self)
def _record_vault_loaded_event(self, secrets):
if (
self.should_send_vault_loaded_event and
secrets.get('DATADOG_API_KEY') and
self.record_to_datadog
):
self.should_send_vault_loaded_event = False
datadog.initialize(
api_key=secrets['DATADOG_API_KEY'],
app_key=secrets['DATADOG_APP_KEY'],
)
datadog.api.Event.create(
title="commcare-cloud vault loaded",
text=' '.join([shlex_quote(arg) for arg in sys.argv]),
tags=["environment:{}".format(self.env_name)],
source_type_name='ansible',
)
@contextmanager
def suppress_datadog_event(self):
"""Prevent "run event" from being sent to datadog
This is only effective if `self.get_vault_variables()` has not
yet been called outside of this context manager. If it has been
called then the event has already been sent and this is a no-op.
"""
value = self.should_send_vault_loaded_event
self.should_send_vault_loaded_event = False
try:
yield
finally:
self.should_send_vault_loaded_event = value |
6,741 | test with grey domain | from ipaddress import _IPAddressBase
import logging
import pytest
from conftest import hosting_provider
from .. import legacy_workers
from .. import domain_check
from .. import models as gc_models
from apps.accounts import models as ac_models
from unittest import mock
pytestmark = pytest.mark.django_db
logger = logging.getLogger(__name__)
@pytest.fixture
def checker():
return domain_check.GreenDomainChecker()
class TestDomainChecker:
def test_with_green_domain_by_ip(self, green_ip, checker):
"""
Given a matching IP, do we return a green sitecheck?
"""
res = checker.check_domain("172.217.168.238")
assert isinstance(res, legacy_workers.SiteCheck)
assert res.ip in (green_ip.ip_start, green_ip.ip_end)
def test_with_green_domain_by_asn(self, green_asn, checker):
"""
Given a matching ASN, do we return a green sitecheck?
"""
green_asn.save()
# mock response for GreenDomainChecker.asn_from_ip, to avoid
# making dns lookups
checker.asn_from_ip = mock.MagicMock(return_value=green_asn.asn)
res = checker.check_domain("172.217.168.238")
assert isinstance(res, legacy_workers.SiteCheck)
assert res.hosting_provider_id == green_asn.hostingprovider.id
def METHOD_NAME(self, checker):
"""
Do we get a regular grey sitecheck result if we have no matches?
"""
res = checker.check_domain("172.217.168.238")
assert isinstance(res, legacy_workers.SiteCheck)
assert res.green is False
assert res.url == "172.217.168.238"
assert res.ip == "172.217.168.238"
def test_with_green_domain_by_asn_double(self, green_asn, checker):
""" """
green_asn.save()
checker.asn_from_ip = mock.MagicMock(return_value=f"{green_asn.asn} 12345")
res = checker.check_domain("172.217.168.238")
assert isinstance(res, legacy_workers.SiteCheck)
assert res.hosting_provider_id == green_asn.hostingprovider.id
def test_with_green_domain_by_non_resolving_asn(self, green_asn, checker):
"""
Sometimes the service we use for resolving ASNs returns
an empty result.
"""
green_asn.save()
checker.asn_from_ip = mock.MagicMock(return_value=None)
res = checker.check_domain("100.113.75.254")
assert isinstance(res, legacy_workers.SiteCheck)
class TestDomainCheckerOrderBySize:
"""
Check that we can return the ip ranges from a check in the
ascending correct order of size.
"""
def test_order_ip_range_by_size(
self,
hosting_provider: ac_models.Hostingprovider,
checker: domain_check.GreenDomainChecker,
db,
):
hosting_provider.save()
small_ip_range = gc_models.GreencheckIp.objects.create(
active=True,
ip_start="127.0.1.2",
ip_end="127.0.1.3",
hostingprovider=hosting_provider,
)
small_ip_range.save()
large_ip_range = gc_models.GreencheckIp.objects.create(
active=True,
ip_start="127.0.1.2",
ip_end="127.0.1.200",
hostingprovider=hosting_provider,
)
large_ip_range.save()
ip_matches = gc_models.GreencheckIp.objects.filter(
ip_end__gte="127.0.1.2", ip_start__lte="127.0.1.2",
)
res = checker.order_ip_range_by_size(ip_matches)
assert res[0].ip_end == "127.0.1.3"
def test_return_org_with_smallest_ip_range_first(
self,
hosting_provider: ac_models.Hostingprovider,
checker: domain_check.GreenDomainChecker,
db,
):
"""
When we have two hosting providers, where one provider is using a
subset of larger provider's IP range, we return the smaller
provider first. This allows resellers to be visible.
"""
hosting_provider.save()
large_ip_range = gc_models.GreencheckIp.objects.create(
active=True,
ip_start="127.0.1.2",
ip_end="127.0.1.200",
hostingprovider=hosting_provider,
)
large_ip_range.save()
small_hosting_provider = ac_models.Hostingprovider(
archived=False,
country="US",
customer=False,
icon="",
iconurl="",
model="groeneenergie",
name="Smaller Reseller",
partner="",
showonwebsite=True,
website="http://small-reseller.com",
)
small_hosting_provider.save()
small_ip_range = gc_models.GreencheckIp.objects.create(
active=True,
ip_start="127.0.1.2",
ip_end="127.0.1.3",
hostingprovider=small_hosting_provider,
)
small_ip_range.save()
res = checker.check_domain("127.0.1.2")
assert res.hosting_provider_id == small_hosting_provider.id
class TestDomainCheckByCarbonTxt:
"""Test that lookups via carbon txt work as expected"""
def test_lookup_green_domain(
self, green_domain_factory, green_ip_factory, mocker, checker
):
# mock our network lookup, so we get a consistent response when
# looking up our domains
green_ip = green_ip_factory.create()
# mock our request to avoid the network call
mocker.patch(
"apps.greencheck.domain_check.GreenDomainChecker.convert_domain_to_ip",
return_value=green_ip.ip_start,
)
domain = green_domain_factory.create(hosted_by=green_ip.hostingprovider)
provider = domain.hosting_provider
# look up for domain.com
res = checker.check_domain(domain.url)
# check that we return the provider in the return value
assert res.hosting_provider_id == provider.id
def test_lookup_green_domain_with_no_provider(
self, green_domain_factory, green_ip_factory, mocker, checker
):
"""
When a domain has no provider, do we still return none?
"""
green_ip = green_ip_factory.create()
# mock our request to avoid the network call
mocker.patch(
"apps.greencheck.domain_check.GreenDomainChecker.convert_domain_to_ip",
return_value=green_ip.ip_start,
)
domain = green_domain_factory.create(hosted_by=green_ip.hostingprovider)
domain.hosting_provider.delete()
domain.save()
# look up for domain.com
res = checker.check_domain(domain.url)
# check that we get a response back and a grey result,
# as there is no evidence left to support the green result
assert res.green == False
def test_lookup_green_domain_with_no_ip_lookup(
self, green_domain_factory, green_ip_factory, mocker, checker
):
""""""
green_ip = green_ip_factory.create()
# mock our request to avoid the network call
# mocker.patch(
# "apps.greencheck.domain_check.GreenDomainChecker.convert_domain_to_ip",
# return_value=green_ip.ip_start,
# )
# domain = green_domain_factory.create(hosted_by=green_ip.hostingprovider)
# domain.hosting_provider.delete()
# domain.save()
# look up for domain.com
res = checker.check_domain("portail.numerique-educatif.fr")
from apps.greencheck.workers import SiteCheckLogger
# saving with None fails, as does "None", but passing 0 works,
# and we want to log the fact that a check took place, instead
# of silently erroring
# res.ip = 0
site_logger = SiteCheckLogger()
site_logger.log_sitecheck_to_database(res)
# check that we get a response back and a grey result,
# as there is no evidence left to support the green result
assert res.green == False |
6,742 | get reset status | #!/usr/bin/env python
try:
import os
from sonic_platform_base.sonic_xcvr.sfp_optoe_base import SfpOptoeBase
from sonic_platform.platform_thrift_client import thrift_try
from sonic_platform.platform_thrift_client import pltfm_mgr_try
except ImportError as e:
raise ImportError (str(e) + "- required module not found")
SFP_TYPE = "SFP"
QSFP_TYPE = "QSFP"
QSFP_DD_TYPE = "QSFP_DD"
EEPROM_PAGE_SIZE = 128
class Sfp(SfpOptoeBase):
"""
BFN Platform-specific SFP class
"""
def __init__(self, port_num):
SfpOptoeBase.__init__(self)
self.index = port_num
self.port_num = port_num
self.sfp_type = QSFP_TYPE
self.SFP_EEPROM_PATH = "/var/run/platform/sfp/"
self.eeprom_path = None
self.__cached_api_supported = None
@property
def _cached_api_supported(self):
def cached_num_bytes_get(client):
return client.pltfm_mgr.pltfm_mgr_qsfp_cached_num_bytes_get(1, 0, 0, 0)
if self.__cached_api_supported is None:
try:
thrift_try(cached_num_bytes_get, 1)
self.__cached_api_supported = True
except Exception as e:
self.__cached_api_supported = False
if not os.path.exists(self.SFP_EEPROM_PATH):
try:
os.makedirs(self.SFP_EEPROM_PATH)
except OSError as e:
if e.errno != errno.EEXIST:
raise
self.eeprom_path = self.SFP_EEPROM_PATH + "sfp{}-eeprom-cache".format(self.index)
return self.__cached_api_supported
def get_presence(self):
"""
Retrieves the presence of the sfp
"""
presence = False
def qsfp_presence_get(client):
return client.pltfm_mgr.pltfm_mgr_qsfp_presence_get(self.index)
try:
presence = thrift_try(qsfp_presence_get)
except Exception as e:
print(e.__doc__)
print(e.message)
return presence
def get_lpmode(self):
"""
Retrieves the lpmode (low power mode) status of this SFP
"""
def qsfp_lpmode_get(client):
return client.pltfm_mgr.pltfm_mgr_qsfp_lpmode_get(self.index)
return thrift_try(qsfp_lpmode_get)
def set_lpmode(self, lpmode):
"""
Sets the lpmode (low power mode) of SFP
"""
def qsfp_lpmode_set(client):
return client.pltfm_mgr.pltfm_mgr_qsfp_lpmode_set(self.index, lpmode)
status = thrift_try(qsfp_lpmode_set)
return (status == 0)
def get_eeprom_path(self):
def qsfp_info_get(client):
return client.pltfm_mgr.pltfm_mgr_qsfp_info_get(self.index)
eeprom_hex = thrift_try(qsfp_info_get)
eeprom_raw = bytearray.fromhex(eeprom_hex)
with open(self.eeprom_path, 'wb') as fp:
fp.write(eeprom_raw)
return self.eeprom_path
def read_eeprom(self, offset, num_bytes):
if not self.get_presence():
return None
if not self._cached_api_supported:
return super().read_eeprom(offset, num_bytes)
def cached_num_bytes_get(page, offset, num_bytes):
def qsfp_cached_num_bytes_get(client):
return client.pltfm_mgr.pltfm_mgr_qsfp_cached_num_bytes_get(self.index, page, offset, num_bytes)
return bytearray.fromhex(thrift_try(qsfp_cached_num_bytes_get))
page_offset = offset % EEPROM_PAGE_SIZE
if page_offset + num_bytes > EEPROM_PAGE_SIZE:
curr_page_num_bytes_left = EEPROM_PAGE_SIZE - page_offset
curr_page_bytes = cached_num_bytes_get(offset // EEPROM_PAGE_SIZE, page_offset, curr_page_num_bytes_left)
return curr_page_bytes + self.read_eeprom(offset + curr_page_num_bytes_left, num_bytes - curr_page_num_bytes_left)
return cached_num_bytes_get(offset // EEPROM_PAGE_SIZE, page_offset, num_bytes)
def write_eeprom(self, offset, num_bytes, write_buffer):
# Not supported at the moment
return False
def get_name(self):
"""
Retrieves the name of the device
Returns:
string: The name of the device
"""
return "sfp{}".format(self.index)
def METHOD_NAME(self):
"""
Retrieves the reset status of SFP
"""
def get_qsfp_reset(pltfm_mgr):
return pltfm_mgr.pltfm_mgr_qsfp_reset_get(self.index)
_, status = pltfm_mgr_try(get_qsfp_reset, False)
return status
def reset(self):
"""
Reset SFP and return all user module settings to their default srate.
"""
def qsfp_reset(client):
client.pltfm_mgr.pltfm_mgr_qsfp_reset(self.index, True)
return client.pltfm_mgr.pltfm_mgr_qsfp_reset(self.index, False)
err = thrift_try(qsfp_reset)
return not err
def get_status(self):
"""
Retrieves the operational status of the device
"""
reset = self.METHOD_NAME()
if reset:
status = False
else:
status = True
return status
def get_position_in_parent(self):
"""
Retrieves 1-based relative physical position in parent device.
Returns:
integer: The 1-based relative physical position in parent
device or -1 if cannot determine the position
"""
return self.index
def is_replaceable(self):
"""
Indicate whether this device is replaceable.
Returns:
bool: True if it is replaceable.
"""
return True
def get_error_description(self):
"""
Retrives the error descriptions of the SFP module
Returns:
String that represents the current error descriptions of vendor specific errors
In case there are multiple errors, they should be joined by '|',
like: "Bad EEPROM|Unsupported cable"
"""
if not self.get_presence():
return self.SFP_STATUS_UNPLUGGED
return self.SFP_STATUS_OK
def tx_disable(self, tx_disable):
"""
Disable SFP TX for all channels
Args:
tx_disable : A Boolean, True to enable tx_disable mode, False to disable
tx_disable mode.
Returns:
A boolean, True if tx_disable is set successfully, False if not
"""
if self.sfp_type == QSFP_TYPE:
return self.tx_disable_channel(0xF, tx_disable)
return False
def tx_disable_channel(self, channel, disable):
"""
Sets the tx_disable for specified SFP channels
Args:
channel : A hex of 4 bits (bit 0 to bit 3) which represent channel 0 to 3,
e.g. 0x5 for channel 0 and channel 2.
disable : A boolean, True to disable TX channels specified in channel,
False to enable
Returns:
A boolean, True if successful, False if not
"""
def qsfp_tx_disable_channel(client):
return client.pltfm_mgr.pltfm_mgr_qsfp_tx_disable(self.index, channel, disable)
if self.sfp_type == QSFP_TYPE:
status = thrift_try(qsfp_tx_disable_channel)
return (status == 0)
return False
def get_power_override(self):
def get_qsfp_power_override(pltfm_mgr):
return pltfm_mgr.pltfm_mgr_qsfp_pwr_override_get(self.index)
_, pwr_override = pltfm_mgr_try(get_qsfp_power_override)
return pwr_override
def set_power_override(self, power_override, power_set):
def set_qsfp_power_override(pltfm_mgr):
return pltfm_mgr.pltfm_mgr_qsfp_pwr_override_set(
self.index, power_override, power_set
)
_, status = pltfm_mgr_try(set_qsfp_power_override)
return status |
6,743 | composed schemas | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import annotations
from datadog_api_client.model_utils import (
ModelComposed,
cached_property,
)
class LogsProcessor(ModelComposed):
def __init__(self, **kwargs):
"""
Definition of a logs processor.
:param grok: Set of rules for the grok parser.
:type grok: LogsGrokParserRules
:param is_enabled: Whether or not the processor is enabled.
:type is_enabled: bool, optional
:param name: Name of the processor.
:type name: str, optional
:param samples: List of sample logs to test this grok parser.
:type samples: [str], optional
:param source: Name of the log attribute to parse.
:type source: str
:param type: Type of logs grok parser.
:type type: LogsGrokParserType
:param sources: Array of source attributes.
:type sources: [str]
:param override_on_conflict: Override or not the target element if already set,
:type override_on_conflict: bool, optional
:param preserve_source: Remove or preserve the remapped source element.
:type preserve_source: bool, optional
:param source_type: Defines if the sources are from log `attribute` or `tag`.
:type source_type: str, optional
:param target: Final attribute or tag name to remap the sources to.
:type target: str
:param target_format: If the `target_type` of the remapper is `attribute`, try to cast the value to a new specific type.
If the cast is not possible, the original type is kept. `string`, `integer`, or `double` are the possible types.
If the `target_type` is `tag`, this parameter may not be specified.
:type target_format: TargetFormatType, optional
:param target_type: Defines if the final attribute or tag name is from log `attribute` or `tag`.
:type target_type: str, optional
:param normalize_ending_slashes: Normalize the ending slashes or not.
:type normalize_ending_slashes: bool, none_type, optional
:param is_encoded: Define if the source attribute is URL encoded or not.
:type is_encoded: bool, optional
:param categories: Array of filters to match or not a log and their
corresponding `name` to assign a custom value to the log.
:type categories: [LogsCategoryProcessorCategory]
:param expression: Arithmetic operation between one or more log attributes.
:type expression: str
:param is_replace_missing: If `true`, it replaces all missing attributes of expression by `0`, `false`
skip the operation if an attribute is missing.
:type is_replace_missing: bool, optional
:param template: A formula with one or more attributes and raw text.
:type template: str
:param filter: Filter for logs.
:type filter: LogsFilter, optional
:param processors: Ordered list of processors in this pipeline.
:type processors: [LogsProcessor], optional
:param default_lookup: Value to set the target attribute if the source value is not found in the list.
:type default_lookup: str, optional
:param lookup_table: Mapping table of values for the source attribute and their associated target attribute values,
formatted as `["source_key1,target_value1", "source_key2,target_value2"]`
:type lookup_table: [str]
:param lookup_enrichment_table: Name of the Reference Table for the source attribute and their associated target attribute values.
:type lookup_enrichment_table: str
"""
super().__init__(kwargs)
@cached_property
def METHOD_NAME(_):
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
from datadog_api_client.v1.model.logs_grok_parser import LogsGrokParser
from datadog_api_client.v1.model.logs_date_remapper import LogsDateRemapper
from datadog_api_client.v1.model.logs_status_remapper import LogsStatusRemapper
from datadog_api_client.v1.model.logs_service_remapper import LogsServiceRemapper
from datadog_api_client.v1.model.logs_message_remapper import LogsMessageRemapper
from datadog_api_client.v1.model.logs_attribute_remapper import LogsAttributeRemapper
from datadog_api_client.v1.model.logs_url_parser import LogsURLParser
from datadog_api_client.v1.model.logs_user_agent_parser import LogsUserAgentParser
from datadog_api_client.v1.model.logs_category_processor import LogsCategoryProcessor
from datadog_api_client.v1.model.logs_arithmetic_processor import LogsArithmeticProcessor
from datadog_api_client.v1.model.logs_string_builder_processor import LogsStringBuilderProcessor
from datadog_api_client.v1.model.logs_pipeline_processor import LogsPipelineProcessor
from datadog_api_client.v1.model.logs_geo_ip_parser import LogsGeoIPParser
from datadog_api_client.v1.model.logs_lookup_processor import LogsLookupProcessor
from datadog_api_client.v1.model.reference_table_logs_lookup_processor import ReferenceTableLogsLookupProcessor
from datadog_api_client.v1.model.logs_trace_remapper import LogsTraceRemapper
return {
"oneOf": [
LogsGrokParser,
LogsDateRemapper,
LogsStatusRemapper,
LogsServiceRemapper,
LogsMessageRemapper,
LogsAttributeRemapper,
LogsURLParser,
LogsUserAgentParser,
LogsCategoryProcessor,
LogsArithmeticProcessor,
LogsStringBuilderProcessor,
LogsPipelineProcessor,
LogsGeoIPParser,
LogsLookupProcessor,
ReferenceTableLogsLookupProcessor,
LogsTraceRemapper,
],
} |
6,744 | register | from trex_stl_lib.api import *
import argparse
class STLBench(object):
ip_range = {}
ip_range['src'] = {'start': '16.0.0.0', 'end': '16.0.0.254'}
ip_range['dst'] = {'start': '48.0.0.0', 'end': '48.0.0.254'}
ports = {'min': 1234, 'max': 65500}
pkt_size = {'min': 64, 'max': 9216}
imix_table = [ {'size': 68, 'pps': 28, 'isg':0 },
{'size': 590, 'pps': 20, 'isg':0.1 },
{'size': 1514, 'pps': 4, 'isg':0.2 } ]
def create_stream (self, size, vm, src, dst, pps = 1, isg = 0):
# Create base packet and pad it to size
base_pkt = Ether()/IP(src=src, dst=dst)/UDP(dport=12,sport=1025,chksum=0)
pad = max(0, size - len(base_pkt) - 4) * 'x'
pkt = STLPktBuilder(pkt = base_pkt/pad,
vm = vm)
return STLStream(packet = pkt,
mode = STLTXCont(pps = pps),
isg = isg)
def get_streams (self, direction, tunables, **kwargs):
parser = argparse.ArgumentParser(description='Argparser for {}'.format(os.path.basename(__file__)),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--size',
type=str,
default=64,
help="""define the packet's size in the stream.
choose imix or positive integ
imix - create streams with packets size 60, 590, 1514.
positive integer number - the packets size in the stream.""")
parser.add_argument('--vm',
type=str,
default=None,
choices={'cached', 'var1', 'var2', 'random', 'tuple', 'size'},
help='define the field engine behavior')
args = parser.parse_args(tunables)
if direction == 0:
src, dst = self.ip_range['src'], self.ip_range['dst']
else:
src, dst = self.ip_range['dst'], self.ip_range['src']
vm_var = STLVM()
size, vm = args.size, args.vm
if size != "imix":
size = int(size)
if not vm or vm == 'none':
pass
elif vm == 'var1':
vm_var.var(name = 'src', min_value = src['start'], max_value = src['end'], size = 4, op = 'inc')
vm_var.write(fv_name = 'src', pkt_offset = 'IP.src')
vm_var.fix_chksum()
elif vm == 'var2':
vm_var.var(name = 'src', min_value = src['start'], max_value = src['end'], size = 4, op = 'inc')
vm_var.var(name = 'dst', min_value = dst['start'], max_value = dst['end'], size = 4, op = 'inc')
vm_var.write(fv_name = 'src', pkt_offset = 'IP.src')
vm_var.write(fv_name = 'dst', pkt_offset = 'IP.dst')
vm_var.fix_chksum()
elif vm == 'random':
vm_var.var(name = 'src', min_value = src['start'], max_value = src['end'], size = 4, op = 'random')
vm_var.write(fv_name = 'src', pkt_offset = 'IP.src')
vm_var.fix_chksum()
elif vm == 'tuple':
vm_var.tuple_var(ip_min = src['start'], ip_max = src['end'], port_min = self.ports['min'], port_max = self.ports['max'], name = 'tuple')
vm_var.write(fv_name = 'tuple.ip', pkt_offset = 'IP.src')
vm_var.write(fv_name = 'tuple.port', pkt_offset = 'UDP.sport')
vm_var.fix_chksum()
elif vm == 'size':
if size == 'imix':
raise STLError("Can't use VM of type 'size' with IMIX.")
size = self.pkt_size['max']
l3_len_fix = -len(Ether())
l4_len_fix = l3_len_fix - len(IP())
vm_var.var(name = 'fv_rand', min_value = (self.pkt_size['min'] - 4), max_value = (self.pkt_size['max'] - 4), size = 2, op = 'random')
vm_var.trim(fv_name = 'fv_rand')
vm_var.write(fv_name = 'fv_rand', pkt_offset = 'IP.len', add_val = l3_len_fix)
vm_var.write(fv_name = 'fv_rand', pkt_offset = 'UDP.len', add_val = l4_len_fix)
vm_var.fix_chksum()
elif vm == 'cached':
vm_var.var(name = 'src', min_value = src['start'], max_value = src['end'], size = 4, op = 'inc')
vm_var.write(fv_name = 'src', pkt_offset = 'IP.src')
vm_var.fix_chksum()
# set VM as cached with 255 cache size of 255
vm_var.set_cached(255)
else:
raise Exception("VM '%s' not available" % vm)
if size == 'imix':
return [self.create_stream(p['size'], vm_var, src = src['start'], dst = dst['start'], pps = p['pps'], isg = p['isg']) for p in self.imix_table]
return [self.create_stream(size, vm_var, src = src['start'], dst = dst['start'])]
# dynamic load - used for trex console or simulator
def METHOD_NAME():
return STLBench()
|
6,745 | create sample data | """
Unittests for populate_created_on_site_user_attribute management command.
"""
from unittest import mock
import ddt
import pytest
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core.management import CommandError, call_command
from django.test import TestCase
from common.djangoapps.student.models import Registration, UserAttribute
from common.djangoapps.student.tests.factories import UserFactory
from openedx.core.djangoapps.site_configuration.tests.mixins import SiteMixin
CREATED_ON_SITE = 'created_on_site'
@ddt.ddt
class TestPopulateUserAttribute(SiteMixin, TestCase):
"""
Test populate_created_on_site_user_attribute management command.
"""
def setUp(self):
super().setUp()
self.METHOD_NAME()
self.users = User.objects.all()
self.registered_users = Registration.objects.all()
self.user_ids = ','.join([str(user.id) for user in self.users])
self.activation_keys = ','.join([registered_user.activation_key for registered_user in self.registered_users])
def METHOD_NAME(self):
"""
Creates the users and register them.
"""
for __ in range(3):
Registration().register(UserFactory.create())
def test_command_by_user_ids(self):
"""
Test population of created_on_site attribute by user ids.
"""
call_command(
"populate_created_on_site_user_attribute",
"--users", self.user_ids,
"--site-domain", self.site.domain
)
for user in self.users:
assert UserAttribute.get_user_attribute(user, CREATED_ON_SITE) == self.site.domain
# Populate 'created_on_site' attribute with different site domain
call_command(
"populate_created_on_site_user_attribute",
"--users", self.user_ids,
"--site-domain", self.site_other.domain
)
for user in self.users:
# 'created_on_site' attribute already exists. Attribute's value will not change
assert UserAttribute.get_user_attribute(user, CREATED_ON_SITE) != self.site_other.domain
def test_command_by_activation_keys(self):
"""
Test population of created_on_site attribute by activation keys.
"""
call_command(
"populate_created_on_site_user_attribute",
"--activation-keys", self.activation_keys,
"--site-domain", self.site.domain
)
for register_user in self.registered_users:
assert UserAttribute.get_user_attribute(register_user.user, CREATED_ON_SITE) == self.site.domain
# Populate 'created_on_site' attribute with different site domain
call_command(
"populate_created_on_site_user_attribute",
"--activation-keys", self.activation_keys,
"--site-domain", self.site_other.domain
)
for register_user in self.registered_users:
# 'created_on_site' attribute already exists. Attribute's value will not change
assert UserAttribute.get_user_attribute(register_user.user, CREATED_ON_SITE) != self.site_other.domain
def test_command_with_incomplete_argument(self):
"""
Test management command raises CommandError without '--users' and '--activation_keys' arguments.
"""
with pytest.raises(CommandError):
call_command(
"populate_created_on_site_user_attribute",
"--site-domain", self.site.domain
)
def test_command_with_invalid_arguments(self):
"""
Test management command with invalid user ids and activation keys.
"""
user = self.users[0]
call_command(
"populate_created_on_site_user_attribute",
"--users", f'9{user.id}', # invalid id
"--site-domain", self.site.domain
)
assert UserAttribute.get_user_attribute(user, CREATED_ON_SITE) is None
register_user = self.registered_users[0]
call_command(
"populate_created_on_site_user_attribute",
"--activation-keys", f"invalid-{register_user.activation_key}", # invalid key
"--site-domain", self.site.domain
)
assert UserAttribute.get_user_attribute(register_user.user, CREATED_ON_SITE) is None
def test_command_without_site_domain(self):
"""
Test management command raises CommandError without '--site-domain' argument.
"""
with pytest.raises(CommandError):
call_command(
"populate_created_on_site_user_attribute",
"--user", self.user_ids,
"--activation-keys", self.activation_keys
)
@ddt.data('y', 'n')
def test_with_invalid_site_domain(self, populate):
"""
Test management command with invalid site domain.
"""
fake_site_domain = 'fake-site-domain'
with mock.patch('six.moves.input', return_value=populate):
call_command(
"populate_created_on_site_user_attribute",
"--users", self.user_ids,
"--site-domain", fake_site_domain
)
for user in self.users:
if populate == 'y':
assert UserAttribute.get_user_attribute(user, CREATED_ON_SITE) == fake_site_domain
else:
assert UserAttribute.get_user_attribute(user, CREATED_ON_SITE) is None |
6,746 | test log level not found | """Test the logging."""
import pytest
import json
import pydantic.v1 as pd
import numpy as np
import tidy3d as td
from tidy3d.exceptions import Tidy3dError
from tidy3d.log import DEFAULT_LEVEL, _get_level_int, set_logging_level
def test_log():
td.log.debug("debug test")
td.log.info("info test")
td.log.warning("warning test")
td.log.error("error test")
td.log.critical("critical test")
td.log.log(0, "zero test")
def test_log_config(tmp_path):
td.config.logging_level = "DEBUG"
td.set_logging_file(str(tmp_path / "test.log"))
assert len(td.log.handlers) == 2
assert td.log.handlers["console"].level == _get_level_int("DEBUG")
assert td.log.handlers["file"].level == _get_level_int(DEFAULT_LEVEL)
del td.log.handlers["file"]
def METHOD_NAME():
with pytest.raises(ValueError):
set_logging_level("NOT_A_LEVEL")
def test_set_logging_level_deprecated():
with pytest.raises(DeprecationWarning):
td.set_logging_level("WARNING")
def test_exception_message():
MESSAGE = "message"
e = Tidy3dError(MESSAGE)
assert str(e) == MESSAGE
def test_logging_upper():
"""Make sure we get an error if lowercase."""
td.config.logging_level = "WARNING"
with pytest.raises(ValueError):
td.config.logging_level = "warning"
def test_logging_unrecognized():
"""If unrecognized option, raise validation errorr."""
with pytest.raises(pd.ValidationError):
td.config.logging_level = "blah"
def test_logging_warning_capture():
# create sim with warnings
domain_size = 12
wavelength = 1
f0 = td.C_0 / wavelength
fwidth = f0 / 10.0
source_time = td.GaussianPulse(freq0=f0, fwidth=fwidth)
freqs = np.linspace(f0 - fwidth, f0 + fwidth, 11)
# 1 warning: too long run_time
run_time = 10000 / fwidth
# 1 warning: frequency outside of source frequency range
mode_mnt = td.ModeMonitor(
center=(0, 0, 0),
size=(domain_size, 0, domain_size),
freqs=list(freqs) + [0.1],
mode_spec=td.ModeSpec(num_modes=3),
name="mode",
)
# 1 warning: too high num_freqs
mode_source = td.ModeSource(
size=(domain_size, 0, domain_size),
source_time=source_time,
mode_spec=td.ModeSpec(num_modes=2, precision="single"),
mode_index=1,
num_freqs=50,
direction="-",
)
# 1 warning: ignoring "normal_dir"
monitor_flux = td.FluxMonitor(
center=(0, 0, 0),
size=(8, 8, 8),
freqs=list(freqs),
name="flux",
normal_dir="+",
)
# 1 warning: large monitor size
monitor_time = td.FieldTimeMonitor(
center=(0, 0, 0),
size=(2, 2, 2),
stop=1 / fwidth,
name="time",
)
# 1 warning: too big proj distance
proj_mnt = td.FieldProjectionCartesianMonitor(
center=(0, 0, 0),
size=(2, 2, 2),
freqs=[250e12, 300e12],
name="n2f_monitor",
custom_origin=(1, 2, 3),
x=[-1, 0, 1],
y=[-2, -1, 0, 1, 2],
proj_axis=2,
proj_distance=1e10,
far_field_approx=False,
)
# 2 warnings * 4 sources = 8 total: too close to each PML
# 1 warning * 3 DFT monitors = 3 total: medium frequency range does not cover monitors freqs
box = td.Structure(
geometry=td.Box(center=(0, 0, 0), size=(11.5, 11.5, 11.5)),
medium=td.Medium(permittivity=2, frequency_range=[0.5, 1]),
)
# 2 warnings: inside pml
box_in_pml = td.Structure(
geometry=td.Box(center=(0, 0, 0), size=(domain_size * 1.001, 5, 5)),
medium=td.Medium(permittivity=10),
)
# 2 warnings: exactly on sim edge
box_on_boundary = td.Structure(
geometry=td.Box(center=(0, 0, 0), size=(domain_size, 5, 5)),
medium=td.Medium(permittivity=20),
)
# 1 warning: outside of domain
box_outside = td.Structure(
geometry=td.Box(center=(50, 0, 0), size=(domain_size, 5, 5)),
medium=td.Medium(permittivity=6),
)
# 1 warning: too high "num_freqs"
# 1 warning: glancing angle
gaussian_beam = td.GaussianBeam(
center=(4, 0, 0),
size=(0, 2, 1),
waist_radius=2.0,
waist_distance=1,
source_time=source_time,
direction="+",
num_freqs=30,
angle_theta=np.pi / 2.1,
)
plane_wave = td.PlaneWave(
center=(4, 0, 0),
size=(0, 1, 2),
source_time=source_time,
direction="+",
)
# 2 warnings: non-uniform grid along y and z
tfsf = td.TFSF(
size=(10, 15, 15),
source_time=source_time,
direction="-",
injection_axis=0,
)
# 1 warning: bloch boundary is inconsistent with plane_wave
bspec = td.BoundarySpec(
x=td.Boundary.pml(), y=td.Boundary.periodic(), z=td.Boundary.bloch(bloch_vec=0.2)
)
# 1 warning * 1 structures (perm=20) * 4 sources = 20 total: large grid step along x
gspec = td.GridSpec(
grid_x=td.UniformGrid(dl=0.05),
grid_y=td.AutoGrid(min_steps_per_wvl=15),
grid_z=td.AutoGrid(min_steps_per_wvl=15),
override_structures=[
td.Structure(geometry=td.Box(size=(3, 2, 1)), medium=td.Medium(permittivity=4))
],
)
sim = td.Simulation(
size=[domain_size, 20, 20],
sources=[gaussian_beam, mode_source, plane_wave, tfsf],
structures=[box, box_in_pml, box_on_boundary, box_outside],
# monitors=[monitor_flux, mode_mnt, monitor_time, proj_mnt],
monitors=[monitor_flux, mode_mnt, proj_mnt],
run_time=run_time,
boundary_spec=bspec,
grid_spec=gspec,
)
# parse the entire simulation at once to capture warnings hierarchically
sim_dict = sim.dict()
# re-add projection monitors because it has been overwritten in validators (far_field_approx=False -> True)
monitors = list(sim_dict["monitors"])
monitors[2] = proj_mnt.dict()
sim_dict["monitors"] = monitors
td.log.set_capture(True)
sim = td.Simulation.parse_obj(sim_dict)
print(sim.monitors_data_size)
sim.validate_pre_upload()
warning_list = td.log.captured_warnings()
print(json.dumps(warning_list, indent=4))
assert len(warning_list) == 30
td.log.set_capture(False)
# check that capture doesn't change validation errors
# validation error during parse_obj()
sim_dict_no_source = sim.dict()
sim_dict_no_source.update({"sources": []})
# validation error during validate_pre_upload()
sim_dict_large_mnt = sim.dict()
sim_dict_large_mnt.update({"monitors": [monitor_time.updated_copy(size=(10, 10, 10))]})
# for sim_dict in [sim_dict_no_source, sim_dict_large_mnt]:
for sim_dict in [sim_dict_no_source]:
try:
sim = td.Simulation.parse_obj(sim_dict)
sim.validate_pre_upload()
except pd.ValidationError as e:
error_without = e.errors()
except Exception as e:
error_without = str(e)
td.log.set_capture(True)
try:
sim = td.Simulation.parse_obj(sim_dict)
sim.validate_pre_upload()
except pd.ValidationError as e:
error_with = e.errors()
except Exception as e:
error_with = str(e)
td.log.set_capture(False)
print(error_without)
print(error_with)
assert error_without == error_with
def test_log_suppression():
with td.log as suppressed_log:
assert td.log._counts is not None
for i in range(4):
suppressed_log.warning("Warning message")
assert td.log._counts[30] == 3
td.config.log_suppression = False
with td.log as suppressed_log:
assert td.log._counts is None
for i in range(4):
suppressed_log.warning("Warning message")
assert td.log._counts is None
td.config.log_suppression = True |
6,747 | setup class | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from builtins import object
import pytest
import warnings
import omero
from omero.cli import CLI
from omero.plugins.sessions import SessionsControl
from omero.rtypes import rstring
from omero.testlib import ITest
from mox3 import mox
class AbstractCLITest(ITest):
warnings.warn("Deprecated in 5.4.2. "
"Use omero.testlib.cli",
DeprecationWarning)
@classmethod
def METHOD_NAME(cls):
super(AbstractCLITest, cls).METHOD_NAME()
cls.cli = CLI()
cls.cli.register("sessions", SessionsControl, "TEST")
def setup_mock(self):
self.mox = mox.Mox()
def teardown_mock(self):
self.mox.UnsetStubs()
self.mox.VerifyAll()
class CLITest(AbstractCLITest):
warnings.warn("Deprecated in 5.4.2. "
"Use omero.testlib.cli",
DeprecationWarning)
def setup_method(self, method):
self.args = self.login_args()
def create_object(self, object_type, name=""):
# create object
if object_type == 'Dataset':
new_object = omero.model.DatasetI()
elif object_type == 'Project':
new_object = omero.model.ProjectI()
elif object_type == 'Plate':
new_object = omero.model.PlateI()
elif object_type == 'Screen':
new_object = omero.model.ScreenI()
elif object_type == 'Image':
new_object = self.new_image()
new_object.name = rstring(name)
new_object = self.update.saveAndReturnObject(new_object)
# check object has been created
found_object = self.query.get(object_type, new_object.id.val)
assert found_object.id.val == new_object.id.val
return new_object.id.val
@pytest.fixture()
def simpleHierarchy(self):
proj = self.make_project()
dset = self.make_dataset()
img = self.update.saveAndReturnObject(self.new_image())
self.link(proj, dset)
self.link(dset, img)
return proj, dset, img
class RootCLITest(AbstractCLITest):
warnings.warn("Deprecated in 5.4.2. "
"Use omero.testlib.cli",
DeprecationWarning)
def setup_method(self, method):
self.args = self.root_login_args()
class ArgumentFixture(object):
"""
Used to test the user/group argument
"""
warnings.warn("Deprecated in 5.4.2. "
"Use omero.testlib.cli",
DeprecationWarning)
def __init__(self, prefix, attr):
self.prefix = prefix
self.attr = attr
def get_arguments(self, obj):
args = []
if self.prefix:
args += [self.prefix]
if self.attr:
args += ["%s" % getattr(obj, self.attr).val]
return args
def __repr__(self):
if self.prefix:
return "%s" % self.prefix
else:
return "%s" % self.attr
UserIdNameFixtures = (
ArgumentFixture('--id', 'id'),
ArgumentFixture('--name', 'omeName'),
)
UserFixtures = (
ArgumentFixture(None, 'id'),
ArgumentFixture(None, 'omeName'),
ArgumentFixture('--user-id', 'id'),
ArgumentFixture('--user-name', 'omeName'),
)
GroupIdNameFixtures = (
ArgumentFixture('--id', 'id'),
ArgumentFixture('--name', 'name'),
)
GroupFixtures = (
ArgumentFixture(None, 'id'),
ArgumentFixture(None, 'name'),
ArgumentFixture('--group-id', 'id'),
ArgumentFixture('--group-name', 'name'),
)
def get_user_ids(out, sort_key=None):
columns = {'login': 1, 'first-name': 2, 'last-name': 3, 'email': 4}
lines = out.split('\n')
ids = []
last_value = None
for line in lines[2:]:
elements = line.split('|')
if len(elements) < 8:
continue
ids.append(int(elements[0].strip()))
if sort_key:
if sort_key == 'id':
new_value = ids[-1]
else:
new_value = elements[columns[sort_key]].strip()
assert new_value >= last_value
last_value = new_value
return ids
def get_group_ids(out, sort_key=None):
lines = out.split('\n')
ids = []
last_value = None
for line in lines[2:]:
elements = line.split('|')
if len(elements) < 4:
continue
ids.append(int(elements[0].strip()))
if sort_key:
if sort_key == 'id':
new_value = ids[-1]
else:
new_value = elements[1].strip()
assert new_value >= last_value
last_value = new_value
return ids |
6,748 | test main | import unittest
from test import support
# Skip this test if the _testcapi module isn't available.
support.import_module('_testcapi')
from _testcapi import _test_structmembersType, \
CHAR_MAX, CHAR_MIN, UCHAR_MAX, \
SHRT_MAX, SHRT_MIN, USHRT_MAX, \
INT_MAX, INT_MIN, UINT_MAX, \
LONG_MAX, LONG_MIN, ULONG_MAX, \
LLONG_MAX, LLONG_MIN, ULLONG_MAX, \
PY_SSIZE_T_MAX, PY_SSIZE_T_MIN
ts=_test_structmembersType(False, # T_BOOL
1, # T_BYTE
2, # T_UBYTE
3, # T_SHORT
4, # T_USHORT
5, # T_INT
6, # T_UINT
7, # T_LONG
8, # T_ULONG
23, # T_PYSSIZET
9.99999,# T_FLOAT
10.1010101010, # T_DOUBLE
"hi" # T_STRING_INPLACE
)
class ReadWriteTests(unittest.TestCase):
def test_bool(self):
ts.T_BOOL = True
self.assertEqual(ts.T_BOOL, True)
ts.T_BOOL = False
self.assertEqual(ts.T_BOOL, False)
self.assertRaises(TypeError, setattr, ts, 'T_BOOL', 1)
def test_byte(self):
ts.T_BYTE = CHAR_MAX
self.assertEqual(ts.T_BYTE, CHAR_MAX)
ts.T_BYTE = CHAR_MIN
self.assertEqual(ts.T_BYTE, CHAR_MIN)
ts.T_UBYTE = UCHAR_MAX
self.assertEqual(ts.T_UBYTE, UCHAR_MAX)
def test_short(self):
ts.T_SHORT = SHRT_MAX
self.assertEqual(ts.T_SHORT, SHRT_MAX)
ts.T_SHORT = SHRT_MIN
self.assertEqual(ts.T_SHORT, SHRT_MIN)
ts.T_USHORT = USHRT_MAX
self.assertEqual(ts.T_USHORT, USHRT_MAX)
def test_int(self):
ts.T_INT = INT_MAX
self.assertEqual(ts.T_INT, INT_MAX)
ts.T_INT = INT_MIN
self.assertEqual(ts.T_INT, INT_MIN)
ts.T_UINT = UINT_MAX
self.assertEqual(ts.T_UINT, UINT_MAX)
def test_long(self):
ts.T_LONG = LONG_MAX
self.assertEqual(ts.T_LONG, LONG_MAX)
ts.T_LONG = LONG_MIN
self.assertEqual(ts.T_LONG, LONG_MIN)
ts.T_ULONG = ULONG_MAX
self.assertEqual(ts.T_ULONG, ULONG_MAX)
def test_py_ssize_t(self):
ts.T_PYSSIZET = PY_SSIZE_T_MAX
self.assertEqual(ts.T_PYSSIZET, PY_SSIZE_T_MAX)
ts.T_PYSSIZET = PY_SSIZE_T_MIN
self.assertEqual(ts.T_PYSSIZET, PY_SSIZE_T_MIN)
@unittest.skipUnless(hasattr(ts, "T_LONGLONG"), "long long not present")
def test_longlong(self):
ts.T_LONGLONG = LLONG_MAX
self.assertEqual(ts.T_LONGLONG, LLONG_MAX)
ts.T_LONGLONG = LLONG_MIN
self.assertEqual(ts.T_LONGLONG, LLONG_MIN)
ts.T_ULONGLONG = ULLONG_MAX
self.assertEqual(ts.T_ULONGLONG, ULLONG_MAX)
## make sure these will accept a plain int as well as a long
ts.T_LONGLONG = 3
self.assertEqual(ts.T_LONGLONG, 3)
ts.T_ULONGLONG = 4
self.assertEqual(ts.T_ULONGLONG, 4)
def test_bad_assignments(self):
integer_attributes = [
'T_BOOL',
'T_BYTE', 'T_UBYTE',
'T_SHORT', 'T_USHORT',
'T_INT', 'T_UINT',
'T_LONG', 'T_ULONG',
'T_PYSSIZET'
]
if hasattr(ts, 'T_LONGLONG'):
integer_attributes.extend(['T_LONGLONG', 'T_ULONGLONG'])
# issue8014: this produced 'bad argument to internal function'
# internal error
for nonint in None, 3.2j, "full of eels", {}, []:
for attr in integer_attributes:
self.assertRaises(TypeError, setattr, ts, attr, nonint)
def test_inplace_string(self):
self.assertEqual(ts.T_STRING_INPLACE, "hi")
self.assertRaises(TypeError, setattr, ts, "T_STRING_INPLACE", "s")
self.assertRaises(TypeError, delattr, ts, "T_STRING_INPLACE")
class TestWarnings(unittest.TestCase):
def test_byte_max(self):
with support.check_warnings(('', RuntimeWarning)):
ts.T_BYTE = CHAR_MAX+1
def test_byte_min(self):
with support.check_warnings(('', RuntimeWarning)):
ts.T_BYTE = CHAR_MIN-1
def test_ubyte_max(self):
with support.check_warnings(('', RuntimeWarning)):
ts.T_UBYTE = UCHAR_MAX+1
def test_short_max(self):
with support.check_warnings(('', RuntimeWarning)):
ts.T_SHORT = SHRT_MAX+1
def test_short_min(self):
with support.check_warnings(('', RuntimeWarning)):
ts.T_SHORT = SHRT_MIN-1
def test_ushort_max(self):
with support.check_warnings(('', RuntimeWarning)):
ts.T_USHORT = USHRT_MAX+1
def METHOD_NAME(verbose=None):
support.run_unittest(__name__)
if __name__ == "__main__":
METHOD_NAME(verbose=True) |
6,749 | find eu dist | # Written by JP Janet for HJK Group
# Dpt of Chemical Engineering, MIT
# ########################################################
# ###### This script contains a neural network ##########
# ### trained on octahedral metal-ligand #######
# ###### bond distances and spin propensity ###########
# ########################################################
import csv
import numpy as np
from pkg_resources import resource_filename, Requirement
from molSimplify.utils.decorators import deprecated
from typing import List
def simple_network_builder(layers: List[int], partial_path: str):
"""Numpy based implementation of a simple neural network to replace the
now deprecated pybrain variant."""
class ThreeLayerNetwork():
"""Fixed architecture neural network"""
def __init__(self, layers: List[int], partial_path: str):
self.w1 = np.array(
csv_loader(partial_path + '_w1.csv')).reshape(-1, layers[0])
self.w2 = np.array(
csv_loader(partial_path + '_w2.csv')).reshape(-1, layers[1])
self.w3 = np.array(
csv_loader(partial_path + '_w3.csv')).reshape(-1, layers[2])
self.b1 = np.array(csv_loader(partial_path + '_b1.csv'))
self.b2 = np.array(csv_loader(partial_path + '_b2.csv'))
self.b3 = np.array(csv_loader(partial_path + '_b3.csv'))
def activate(self, input: np.ndarray) -> np.ndarray:
layer1 = np.tanh(self.w1 @ input + self.b1)
layer2 = np.tanh(self.w2 @ layer1 + self.b2)
output = self.w3 @ layer2 + self.b3
return output
return ThreeLayerNetwork(layers, partial_path)
@deprecated
def simple_network_builder_pybrain(layers: List[int], partial_path: str):
from pybrain.structure import (FeedForwardNetwork, TanhLayer, LinearLayer,
BiasUnit, FullConnection)
n = FeedForwardNetwork()
# create the network
inlayer = LinearLayer(layers[0], name="In")
hidden_one = TanhLayer(layers[1], name="Hidden 1")
hidden_two = TanhLayer(layers[2], name="Hidden 2")
b1 = BiasUnit(name="Bias")
output = LinearLayer(1, name="Out")
n.addInputModule(inlayer)
n.addModule(hidden_one)
n.addModule(hidden_two)
n.addModule(b1)
n.addOutputModule(output)
in_to_one = FullConnection(inlayer, hidden_one)
one_to_two = FullConnection(hidden_one, hidden_two)
two_to_out = FullConnection(hidden_two, output)
b1_to_one = FullConnection(b1, hidden_one)
b2_to_two = FullConnection(b1, hidden_two)
b3_to_output = FullConnection(b1, output)
# load weights and biases
in_to_one._setParameters(np.array((csv_loader(partial_path + '_w1.csv'))))
one_to_two._setParameters(np.array(csv_loader(partial_path + '_w2.csv')))
two_to_out._setParameters(np.array(csv_loader(partial_path + '_w3.csv')))
b1_to_one._setParameters(np.array(csv_loader(partial_path + '_b1.csv')))
b2_to_two._setParameters(np.array(csv_loader(partial_path + '_b2.csv')))
b3_to_output._setParameters(np.array(csv_loader(partial_path + '_b3.csv')))
# connect the network topology
n.addConnection(in_to_one)
n.addConnection(one_to_two)
n.addConnection(two_to_out)
# n.sortModules()
n.addConnection(b1_to_one)
n.addConnection(b2_to_two)
n.addConnection(b3_to_output)
# finalize network object
n.sortModules()
return n
def csv_loader(path: str) -> List[float]:
# print('in csv loader')
path_to_file = resource_filename(Requirement.parse("molSimplify"),
"molSimplify/python_nn/" + path)
with open(path_to_file, 'r') as csvfile:
csv_lines = csv.reader(csvfile, delimiter=',')
ret_list = list()
for lines in csv_lines:
this_line = [float(a) for a in lines]
ret_list += this_line
return ret_list
def matrix_loader(path, rownames=False):
# loads matrix with rowname option
if rownames:
path_to_file = resource_filename(Requirement.parse("molSimplify"),
"molSimplify/python_nn/" + path)
with open(path_to_file, "r") as f:
csv_lines = list(csv.reader(f))
row_names = [row[0] for row in csv_lines]
mat = [row[1:] for row in csv_lines]
return mat, row_names
else:
path_to_file = resource_filename(Requirement.parse("molSimplify"),
"molSimplify/python_nn/" + path)
with open(path_to_file, 'r') as csvfile:
csv_lines = csv.reader(csvfile, delimiter=',')
mat = [a for a in csv_lines]
return mat
# n = network_builder([25,50,51],"nn_split")
def simple_splitting_ann(excitation):
# path_to_file = resource_filename(Requirement.parse("molSimplify"), "molSimplify/python_nn/" + "ms_split")
# print('path to ANN data: ',path_to_file)
n = simple_network_builder([25, 50, 50], "ms_split")
excitation, sp_center, sp_shift = excitation_standardizer(excitation, 'split')
# print(excitation)
# print('center is ' + str(sp_center))
# print('scale is '+ str(sp_shift))
# print(excitation)
result = n.activate(excitation)
# print('result is ' + str(result))
result = (result*sp_shift) + sp_center
# print('result is ' + str(result))
return result, excitation
def simple_slope_ann(slope_excitation):
# path_to_file = resource_filename(Requirement.parse("molSimplify"), "molSimplify/python_nn/" + "ms_slope")
# print('path to ANN data: ',path_to_file)
n = simple_network_builder([24, 50, 50], "ms_slope") # no alpha value
# print(slope_excitation)
slope_excitation, sl_center, sl_shift = excitation_standardizer(slope_excitation, 'slope')
# print(slope_excitation)
result = n.activate(slope_excitation)
# print('result is ' + str(result))
# print('center is ' + str(sl_center) + ' shift '+ str(sl_shift))
result = (result*sl_shift) + sl_center
# print('result is ' + str(result))
return result
def simple_ls_ann(excitation):
n = simple_network_builder([25, 50, 50], "ms_ls")
excitation, ls_center, ls_shift = excitation_standardizer(excitation, 'ls')
result = n.activate(excitation)
result = result*ls_shift + ls_center
return result
def simple_hs_ann(excitation):
n = simple_network_builder([25, 50, 50], "ms_hs")
excitation, hs_center, hs_shift = excitation_standardizer(excitation, 'hs')
result = n.activate(excitation)
result = result*hs_shift + hs_center
return result
def excitation_standardizer(excitation, tag):
"""This function implements a scale-and-center type of normalization
that may help predictions currently testing for splitting and slope only
"""
centers = csv_loader(tag+"_center.csv")
shifts = csv_loader(tag+"_scale.csv")
descriptor_centers = np.array(centers[1:])
descriptor_shifts = np.array(shifts[1:])
sp_center = centers[0]
sp_shift = shifts[0]
excitation = np.array(excitation)
excitation = (excitation - descriptor_centers)
excitation = np.divide(excitation, descriptor_shifts)
return(excitation, sp_center, sp_shift)
def METHOD_NAME(excitation):
# returns euclidean distance to nearest trainning
# vector in desciptor space
mat, rownames = matrix_loader('train_data.csv', rownames=True)
train_mat = np.array(mat, dtype='float64')
min_dist = 1000
excitation, _, _ = excitation_standardizer(excitation, 'split')
for i, rows in enumerate(train_mat):
np.subtract(rows, np.array(excitation))
this_dist = np.linalg.norm(np.subtract(rows, np.array(excitation)))/3
if this_dist < min_dist:
min_dist = this_dist
best_row = rownames[i]
# print('min dist is ' +str(min_dist))
return min_dist, best_row |
6,750 | add billing projects | """Standard definitions for reusable script arguments."""
import fnmatch
import logging
import re
from argparse import Action
from functools import partial
from google.cloud import bigquery
from bigquery_etl.config import ConfigLoader
from bigquery_etl.util.common import TempDatasetReference
def add_argument(parser, *args, **kwargs):
"""Add default to help while adding argument to parser."""
if "help" in kwargs:
default = kwargs.get("default")
if default not in (None, [], [None]):
if kwargs.get("nargs") in ("*", "+"):
# unnest a single default for printing, if possible
try:
(default,) = default
except ValueError:
pass
kwargs["help"] += f"; Defaults to {default}"
parser.add_argument(*args, **kwargs)
def METHOD_NAME(parser, *extra_args, default=[None]):
"""Add argument for billing projects."""
add_argument(
parser,
"-p",
"--billing-projects",
"--billing_projects",
"--billing-project",
"--billing_project",
*extra_args,
nargs="+",
default=default,
help="One or more billing projects over which bigquery jobs should be "
"distributed",
)
def add_dry_run(parser, debug_log_queries=True):
"""Add argument for dry run."""
add_argument(
parser,
"--dry_run",
"--dry-run",
action="store_true",
help="Do not make changes, only log actions that would be taken"
+ (
"; Use with --log-level=DEBUG to log query contents"
if debug_log_queries
else ""
),
)
def add_log_level(parser, default=logging.getLevelName(logging.INFO)):
"""Add argument for log level."""
add_argument(
parser,
"-l",
"--log-level",
"--log_level",
action=LogLevelAction,
default=default,
type=str.upper,
help="Set logging level for the python root logger",
)
def add_parallelism(parser, default=4):
"""Add argument for parallel execution."""
add_argument(
parser,
"-P",
"--parallelism",
default=default,
type=int,
help="Maximum number of tasks to execute concurrently",
)
def add_priority(parser):
"""Add argument for BigQuery job priority."""
add_argument(
parser,
"--priority",
default=bigquery.QueryPriority.INTERACTIVE,
type=str.upper,
choices=[bigquery.QueryPriority.BATCH, bigquery.QueryPriority.INTERACTIVE],
help="Priority for BigQuery query jobs; BATCH priority may significantly slow "
"down queries if reserved slots are not enabled for the billing project; "
"INTERACTIVE priority is limited to 100 concurrent queries per project",
)
def add_table_filter(parser, example="telemetry_stable.main_v*"):
"""Add arguments for filtering tables."""
example_ = f"Pass names or globs like {example!r}"
add_argument(
parser,
"-o",
"--only",
nargs="+",
dest="table_filter",
raw_dest="only_tables",
action=TableFilterAction,
help=f"Process only the given tables; {example_}",
)
add_argument(
parser,
"-x",
"--except",
nargs="+",
dest="table_filter",
raw_dest="except_tables",
action=TableFilterAction,
help=f"Process all tables except for the given tables; {example_}",
)
def add_temp_dataset(parser, *extra_args):
"""Add argument for temporary dataset."""
add_argument(
parser,
"--temp-dataset",
"--temp_dataset",
"--temporary-dataset",
"--temporary_dataset",
*extra_args,
default=f"{ConfigLoader.get('default', 'project', fallback='moz-fx-data-shared-prod')}.tmp",
type=TempDatasetReference.from_string,
help="Dataset where intermediate query results will be temporarily stored, "
"formatted as PROJECT_ID.DATASET_ID",
)
class LogLevelAction(Action):
"""Custom argparse.Action for --log-level."""
def __init__(self, *args, **kwargs):
"""Set default log level if provided."""
super().__init__(*args, **kwargs)
if self.default is not None:
logging.root.setLevel(self.default)
def __call__(self, parser, namespace, value, option_string=None):
"""Set level for root logger."""
logging.root.setLevel(value)
class TableFilterAction(Action):
"""Custom argparse.Action for --only and --except."""
def __init__(self, *args, raw_dest, **kwargs):
"""Add default."""
super().__init__(*args, default=self.default, **kwargs)
self.raw_dest = raw_dest
self.arg = self.option_strings[-1]
self.invert_match = self.arg == "--except"
@staticmethod
def default(table):
"""Return True for default predicate."""
return True
@staticmethod
def compile(values):
"""Compile a list of glob patterns into a single regex."""
return re.compile("|".join(fnmatch.translate(pattern) for pattern in values))
def predicate(self, table, pattern):
"""Log tables skipped due to table filter arguments."""
matched = (pattern.match(table) is not None) != self.invert_match
if not matched:
logging.info(f"Skipping {table} due to {self.arg} argument")
return matched
def __call__(self, parser, namespace, values, option_string=None):
"""Add table filter to predicates."""
setattr(namespace, self.raw_dest, values)
predicates_attr = "_" + self.dest
predicates = getattr(namespace, predicates_attr, [])
if not hasattr(namespace, predicates_attr):
setattr(namespace, predicates_attr, predicates)
setattr(
namespace,
self.dest,
lambda table: all(predicate(table) for predicate in predicates),
)
predicates.append(partial(self.predicate, pattern=self.compile(values))) |
6,751 | run | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: EPL-2.0
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from common.base_model_init import BaseModelInitializer
from common.base_model_init import set_env_var
import os
from argparse import ArgumentParser
class ModelInitializer(BaseModelInitializer):
"""initialize mode and run benchmark"""
def __init__(self, args, custom_args=[], platform_util=None):
super(ModelInitializer, self).__init__(args, custom_args, platform_util)
self.benchmark_command = ""
if not platform_util:
raise ValueError("Did not find any platform info.")
# use default batch size if -1
if self.args.batch_size == -1:
self.args.batch_size = 10
# set num_inter_threads and num_intra_threads
self.set_num_inter_intra_threads()
arg_parser = ArgumentParser(description='Parse args')
arg_parser.add_argument("--warmup-steps", dest='warmup_steps',
type=int, default=10,
help="number of warmup steps")
arg_parser.add_argument("--steps", dest='steps',
type=int, default=50,
help="number of steps")
arg_parser.add_argument(
'--kmp-blocktime', dest='kmp_blocktime',
help='number of kmp block time',
type=int, default=1)
self.args = arg_parser.parse_args(self.custom_args, namespace=self.args)
# Set KMP env vars, if they haven't already been set, but override the default KMP_BLOCKTIME value
config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json")
self.set_kmp_vars(config_file_path, kmp_blocktime=str(self.args.kmp_blocktime))
set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads)
script_file = "run_performance.py"
if self.args.accuracy_only:
script_file = "run_accuracy.py"
benchmark_script = os.path.join(
self.args.intelai_models, self.args.mode, self.args.precision, "brats",
script_file)
self.benchmark_command = self.get_command_prefix(args.socket_id) + \
self.python_exe + " " + benchmark_script
self.benchmark_command = \
self.benchmark_command + \
" --input-graph=" + self.args.input_graph + \
" --num-inter-threads=" + str(self.args.num_inter_threads) + \
" --num-intra-threads=" + str(self.args.num_intra_threads) + \
" --batch-size=" + str(self.args.batch_size) + \
" --model-name=" + str(self.args.model_name)
if not self.args.accuracy_only:
self.benchmark_command += \
" --steps=" + str(self.args.steps) + " --warmup-steps=" + str(self.args.warmup_steps)
# if the data location directory is not empty, then include the arg
if self.args.data_location and os.listdir(self.args.data_location):
self.benchmark_command += " --data-location=" + \
self.args.data_location
if self.args.accuracy_only:
self.benchmark_command += " --accuracy-only"
# set up env vars for running accuracy with real data
set_env_var("nnUNet_preprocessed", "build/preprocessed_data_dir")
set_env_var("nnUNet_raw_data_base", "build/raw_data")
set_env_var("DOWNLOAD_DATA_DIR", self.args.data_location)
set_env_var("RESULTS_FOLDER", "build/result")
def METHOD_NAME(self):
if self.benchmark_command:
self.run_command(self.benchmark_command)
if self.args.output_results:
print("Inference results file in the output directory: {}".format(self.results_filename)) |
6,752 | evaluate | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from net import Net
# (1) import nvflare client API
import nvflare.client as flare
# (optional) set a fix place so we don't need to download everytime
DATASET_PATH = "/tmp/nvflare/data"
# (optional) We change to use GPU to speed things up.
# if you want to use CPU, change DEVICE="cpu"
DEVICE = "cuda:0"
PATH = "./cifar_net.pth"
def main():
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
batch_size = 4
trainset = torchvision.datasets.CIFAR10(root=DATASET_PATH, train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root=DATASET_PATH, train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)
net = Net()
# (2) initializes NVFlare client API
flare.init()
# (3) decorates with flare.train and load model from the first argument
# wraps training logic into a method
@flare.train
def train(input_model=None, total_epochs=2, lr=0.001):
net.load_state_dict(input_model.params)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
# (optional) use GPU to speed things up
net.to(DEVICE)
# (optional) calculate total steps
steps = 2 * len(trainloader)
for epoch in range(total_epochs): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
# (optional) use GPU to speed things up
inputs, labels = data[0].to(DEVICE), data[1].to(DEVICE)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print(f"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}")
running_loss = 0.0
print("Finished Training")
torch.save(net.state_dict(), PATH)
# (4) construct trained FL model
output_model = flare.FLModel(params=net.cpu().state_dict(), meta={"NUM_STEPS_CURRENT_ROUND": steps})
return output_model
# (5) decorates with flare.evaluate and load model from the first argument
@flare.METHOD_NAME
def fl_evaluate(input_model=None):
return METHOD_NAME(input_weights=input_model.params)
# wraps evaluate logic into a method
def METHOD_NAME(input_weights):
net.load_state_dict(input_weights)
# (optional) use GPU to speed things up
net.to(DEVICE)
correct = 0
total = 0
# since we're not training, we don't need to calculate the gradients for our outputs
with torch.no_grad():
for data in testloader:
# (optional) use GPU to speed things up
images, labels = data[0].to(DEVICE), data[1].to(DEVICE)
# calculate outputs by running images through the network
outputs = net(images)
# the class with the highest energy is what we choose as prediction
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f"Accuracy of the network on the 10000 test images: {100 * correct // total} %")
# return evaluation metrics
return 100 * correct // total
# (6) call fl_evaluate method before training
# to evaluate on the received/aggregated model
fl_evaluate()
# call train method
train(total_epochs=2, lr=0.001)
# call evaluate method
METHOD_NAME(input_weights=torch.load(PATH))
if __name__ == "__main__":
main() |
6,753 | search | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import json
import logging
from http import HTTPStatus
from typing import Any, Dict, List # noqa: F401
from amundsen_common.models.METHOD_NAME import (Filter, SearchRequestSchema,
SearchResponseSchema)
from flask import Response
from flask import current_app as app
from flask import jsonify, make_response, request
from flask.blueprints import Blueprint
from amundsen_application.log.action_log import action_logging
from amundsen_application.api.utils.request_utils import (get_query_param,
request_search)
from amundsen_application.api.utils.search_utils import (
generate_query_request, map_dashboard_result, map_feature_result,
map_table_result, map_user_result)
LOGGER = logging.getLogger(__name__)
REQUEST_SESSION_TIMEOUT_SEC = 3
SEARCH_ENDPOINT = '/v2/search'
RESOURCE_TO_MAPPING = {
'table': map_table_result,
'dashboard': map_dashboard_result,
'feature': map_feature_result,
'user': map_user_result,
}
DEFAULT_FILTER_OPERATION = 'OR'
search_blueprint = Blueprint('search', __name__, url_prefix='/api/search/v1')
def _transform_filters(filters: Dict, resources: List[str]) -> List[Filter]:
transformed_filters = []
searched_resources_with_filters = set(filters.keys()).intersection(resources)
for resource in searched_resources_with_filters:
resource_filters = filters[resource]
for field in resource_filters.keys():
field_filters = resource_filters[field]
values = []
filter_operation = DEFAULT_FILTER_OPERATION
if field_filters is not None and field_filters.get('value') is not None:
value_str = field_filters.get('value')
values = [str.strip() for str in value_str.split(',') if str != '']
filter_operation = field_filters.get('filterOperation', DEFAULT_FILTER_OPERATION)
transformed_filters.append(Filter(name=field,
values=values,
operation=filter_operation))
return transformed_filters
@search_blueprint.route('/search', methods=['POST'])
def METHOD_NAME() -> Response:
"""
Parse the request arguments and call the helper method to execute a search for specified resources
:return: a Response created with the results from the helper method
"""
results_dict = {}
try:
request_json = request.get_json()
search_term = get_query_param(request_json, 'searchTerm', '"searchTerm" parameter expected in request data')
page_index = get_query_param(request_json, 'pageIndex', '"pageIndex" parameter expected in request data')
results_per_page = get_query_param(request_json,
'resultsPerPage',
'"resultsPerPage" parameter expected in request data')
search_type = request_json.get('searchType')
resources = request_json.get('resources', [])
filters = request_json.get('filters', {})
highlight_options = request_json.get('highlightingOptions', {})
results_dict = _search_resources(search_term=search_term,
resources=resources,
page_index=int(page_index),
results_per_page=int(results_per_page),
filters=filters,
highlight_options=highlight_options,
search_type=search_type)
return make_response(jsonify(results_dict), results_dict.get('status_code', HTTPStatus.OK))
except Exception as e:
message = 'Encountered exception: ' + str(e)
LOGGER.exception(message)
return make_response(jsonify(results_dict), HTTPStatus.INTERNAL_SERVER_ERROR)
@action_logging
def _search_resources(*, search_term: str,
resources: List[str],
page_index: int,
results_per_page: int,
filters: Dict,
highlight_options: Dict,
search_type: str) -> Dict[str, Any]:
"""
Call the search service endpoint and return matching results
:return: a json output containing search results array as 'results'
"""
default_results = {
'page_index': int(page_index),
'results': [],
'total_results': 0,
}
results_dict = {
'search_term': search_term,
'msg': '',
'table': default_results,
'dashboard': default_results,
'feature': default_results,
'user': default_results,
}
try:
transformed_filters = _transform_filters(filters=filters, resources=resources)
query_request = generate_query_request(filters=transformed_filters,
resources=resources,
page_index=page_index,
results_per_page=results_per_page,
search_term=search_term,
highlight_options=highlight_options)
request_json = json.dumps(SearchRequestSchema().dump(query_request))
url_base = app.config['SEARCHSERVICE_BASE'] + SEARCH_ENDPOINT
response = request_search(url=url_base,
headers={'Content-Type': 'application/json'},
method='POST',
data=request_json)
status_code = response.status_code
if status_code == HTTPStatus.OK:
search_response = SearchResponseSchema().loads(json.dumps(response.json()))
results_dict['msg'] = search_response.msg
results = search_response.results
for resource in results.keys():
results_dict[resource] = {
'page_index': int(page_index),
'results': [RESOURCE_TO_MAPPING[resource](result) for result in results[resource]['results']],
'total_results': results[resource]['total_results'],
}
else:
message = 'Encountered error: Search request failed'
results_dict['msg'] = message
results_dict['status_code'] = status_code
return results_dict
except Exception as e:
message = f'Encountered exception: {str(e)}'
results_dict['msg'] = message
LOGGER.exception(message)
return results_dict |
6,754 | test unbound method | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import pytest
import spack.build_environment
import spack.repo
import spack.spec
from spack.package import build_system_flags, env_flags, inject_flags
@pytest.fixture()
def temp_env():
old_env = os.environ.copy()
yield
os.environ.clear()
os.environ.update(old_env)
def add_o3_to_build_system_cflags(pkg, name, flags):
build_system_flags = []
if name == "cflags":
build_system_flags.append("-O3")
return flags, None, build_system_flags
@pytest.mark.usefixtures("config", "mock_packages")
class TestFlagHandlers:
def test_no_build_system_flags(self, temp_env):
# Test that both autotools and cmake work getting no build_system flags
s1 = spack.spec.Spec("cmake-client").concretized()
spack.build_environment.setup_package(s1.package, False)
s2 = spack.spec.Spec("patchelf").concretized()
spack.build_environment.setup_package(s2.package, False)
# Use cppflags as a canary
assert "SPACK_CPPFLAGS" not in os.environ
assert "CPPFLAGS" not in os.environ
def METHOD_NAME(self, temp_env):
# Other tests test flag_handlers set as bound methods and functions.
# This tests an unbound method in python2 (no change in python3).
s = spack.spec.Spec("mpileaks cppflags=-g").concretized()
s.package.flag_handler = s.package.__class__.inject_flags
spack.build_environment.setup_package(s.package, False)
assert os.environ["SPACK_CPPFLAGS"] == "-g"
assert "CPPFLAGS" not in os.environ
def test_inject_flags(self, temp_env):
s = spack.spec.Spec("mpileaks cppflags=-g").concretized()
s.package.flag_handler = inject_flags
spack.build_environment.setup_package(s.package, False)
assert os.environ["SPACK_CPPFLAGS"] == "-g"
assert "CPPFLAGS" not in os.environ
def test_env_flags(self, temp_env):
s = spack.spec.Spec("mpileaks cppflags=-g").concretized()
s.package.flag_handler = env_flags
spack.build_environment.setup_package(s.package, False)
assert os.environ["CPPFLAGS"] == "-g"
assert "SPACK_CPPFLAGS" not in os.environ
def test_build_system_flags_cmake(self, temp_env):
s = spack.spec.Spec("cmake-client cppflags=-g").concretized()
s.package.flag_handler = build_system_flags
spack.build_environment.setup_package(s.package, False)
assert "SPACK_CPPFLAGS" not in os.environ
assert "CPPFLAGS" not in os.environ
assert set(s.package.cmake_flag_args) == {
"-DCMAKE_C_FLAGS=-g",
"-DCMAKE_CXX_FLAGS=-g",
"-DCMAKE_Fortran_FLAGS=-g",
}
def test_build_system_flags_autotools(self, temp_env):
s = spack.spec.Spec("patchelf cppflags=-g").concretized()
s.package.flag_handler = build_system_flags
spack.build_environment.setup_package(s.package, False)
assert "SPACK_CPPFLAGS" not in os.environ
assert "CPPFLAGS" not in os.environ
assert "CPPFLAGS=-g" in s.package.configure_flag_args
def test_build_system_flags_not_implemented(self, temp_env):
"""Test the command line flags method raises a NotImplementedError"""
s = spack.spec.Spec("mpileaks cppflags=-g").concretized()
s.package.flag_handler = build_system_flags
try:
spack.build_environment.setup_package(s.package, False)
assert False
except NotImplementedError:
assert True
def test_add_build_system_flags_autotools(self, temp_env):
s = spack.spec.Spec("patchelf cppflags=-g").concretized()
s.package.flag_handler = add_o3_to_build_system_cflags
spack.build_environment.setup_package(s.package, False)
assert "-g" in os.environ["SPACK_CPPFLAGS"]
assert "CPPFLAGS" not in os.environ
assert s.package.configure_flag_args == ["CFLAGS=-O3"]
def test_add_build_system_flags_cmake(self, temp_env):
s = spack.spec.Spec("cmake-client cppflags=-g").concretized()
s.package.flag_handler = add_o3_to_build_system_cflags
spack.build_environment.setup_package(s.package, False)
assert "-g" in os.environ["SPACK_CPPFLAGS"]
assert "CPPFLAGS" not in os.environ
assert s.package.cmake_flag_args == ["-DCMAKE_C_FLAGS=-O3"]
def test_ld_flags_cmake(self, temp_env):
s = spack.spec.Spec("cmake-client ldflags=-mthreads").concretized()
s.package.flag_handler = build_system_flags
spack.build_environment.setup_package(s.package, False)
assert "SPACK_LDFLAGS" not in os.environ
assert "LDFLAGS" not in os.environ
assert set(s.package.cmake_flag_args) == {
"-DCMAKE_EXE_LINKER_FLAGS=-mthreads",
"-DCMAKE_MODULE_LINKER_FLAGS=-mthreads",
"-DCMAKE_SHARED_LINKER_FLAGS=-mthreads",
"-DCMAKE_STATIC_LINKER_FLAGS=-mthreads",
}
def test_ld_libs_cmake(self, temp_env):
s = spack.spec.Spec("cmake-client ldlibs=-lfoo").concretized()
s.package.flag_handler = build_system_flags
spack.build_environment.setup_package(s.package, False)
assert "SPACK_LDLIBS" not in os.environ
assert "LDLIBS" not in os.environ
assert set(s.package.cmake_flag_args) == {
"-DCMAKE_C_STANDARD_LIBRARIES=-lfoo",
"-DCMAKE_CXX_STANDARD_LIBRARIES=-lfoo",
"-DCMAKE_Fortran_STANDARD_LIBRARIES=-lfoo",
}
def test_flag_handler_no_modify_specs(self, temp_env):
def test_flag_handler(self, name, flags):
flags.append("-foo")
return (flags, None, None)
s = spack.spec.Spec("cmake-client").concretized()
s.package.flag_handler = test_flag_handler
spack.build_environment.setup_package(s.package, False)
assert not s.compiler_flags["cflags"]
assert os.environ["SPACK_CFLAGS"] == "-foo" |
6,755 | await kafka cluster | import socket
import re
import backoff
from . import basetest
from .runner import CfLocalRunnerWithPostgreSQL
# Constants
KAFKA_CLUSTER_IMAGE_NAME = "johnnypark/kafka-zookeeper"
KAFKA_CLUSTER_IMAGE_VERSION = "2.4.0"
KAFKA_CLUSTER_NAME = "kafka-cluster"
KAFKA_CONNECT_URL = "http://localhost:8083"
KAFKA_PG_CONNECTOR_NAME = "mx-databroker-PostgreSQL-source-connector"
KAFKA_PG_CONNECTOR_STATUS_API = "{}/connectors/{}/status".format(
KAFKA_CONNECT_URL,
KAFKA_PG_CONNECTOR_NAME,
)
KAFKA_BROKER_PORT = 9092
KAFKA_ZOOKEEPER_PORT = 2181
DATABROKER_TOPIC_FORMAT_VERSION = "1_0_0"
POSTGRES_DB_DOCKER_IMAGE = "debezium/postgres"
POSTGRES_DB_VERSION = "9.6-alpine"
MAX_RETRY_COUNT = 8
BACKOFF_TIME = 10
class CfLocalRunnerWithKafka(CfLocalRunnerWithPostgreSQL):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._database_postgres_image = POSTGRES_DB_DOCKER_IMAGE
self._database_postgres_version = POSTGRES_DB_VERSION
self._kafka_container_name = "{}-{}".format(self._app_name, KAFKA_CLUSTER_NAME)
def _get_environment(self, env_vars):
environment = super()._get_environment(env_vars)
environment.update(
{
"MX_MyFirstModule_broker_url": "{}:{}".format(
self.get_host(),
KAFKA_BROKER_PORT,
)
}
)
return environment
def _start_kafka_cluster(self):
result = self._cmd(
(
"docker",
"run",
"--name",
self._kafka_container_name,
"-p",
"{}:{}".format(KAFKA_BROKER_PORT, KAFKA_BROKER_PORT),
"-e",
"ADVERTISED_HOST={}".format(self._host),
"-e",
"NUM_PARTITIONS={}".format(3),
"-d",
"{}:{}".format(
KAFKA_CLUSTER_IMAGE_NAME,
KAFKA_CLUSTER_IMAGE_VERSION,
),
)
)
if not result[1]:
raise RuntimeError(
"Cannot create {} container: {}".format(
KAFKA_CLUSTER_NAME,
result[0],
)
)
def stage(self, *args, **kwargs):
result = super().stage(*args, **kwargs)
self._start_kafka_cluster()
@backoff.on_predicate(backoff.expo, lambda x: x > 0, max_time=30)
def METHOD_NAME():
return socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(
("localhost", KAFKA_BROKER_PORT)
)
METHOD_NAME()
return result
def is_debezium_running(self):
return self.run_on_container("curl " + KAFKA_PG_CONNECTOR_STATUS_API)
def is_azkarra_running(self):
topics = self.run_on_container(
f"./opt/kafka_2.12-{KAFKA_CLUSTER_IMAGE_VERSION}/bin/kafka-topics.sh "
f"--list --zookeeper localhost:{KAFKA_ZOOKEEPER_PORT}",
target_container=self._kafka_container_name,
)
expect_public_topic_pattern = rf".*?\.{DATABROKER_TOPIC_FORMAT_VERSION}"
return (
len(
re.findall(
r"(mx-databroker-connect-(?:configs|offsets|status))",
topics,
)
)
== 3
and len(re.findall(expect_public_topic_pattern, topics)) > 0
)
class TestCaseDataBroker(basetest.BaseTestWithPostgreSQL):
def _init_cflocal_runner(self, *args, **kwargs):
return CfLocalRunnerWithKafka(*args, **kwargs)
def test_databroker_running(self):
# os.environ[
# "PACKAGE_URL"
# ] = "https://dghq119eo3niv.cloudfront.net/test-app/MyProducer902.mda"
self.stage_container(
package="https://dghq119eo3niv.cloudfront.net/test-app/MyProducer902.mda",
env_vars={
"DATABROKER_ENABLED": "true",
"FORCED_MXRUNTIME_URL": "https://dghq119eo3niv.cloudfront.net/",
},
)
self.start_container()
# check app is running
self.assert_app_running()
@backoff.on_exception(
backoff.constant,
Exception,
interval=BACKOFF_TIME,
max_tries=MAX_RETRY_COUNT,
)
def check_if_dbz_running():
return self._runner.is_debezium_running()
response = check_if_dbz_running()
assert str(response).find('"state":"RUNNING"') > 0
# check azkarra is running by verify expected topics have been created
assert self._runner.is_azkarra_running()
# check streaming service
output = self.get_recent_logs()
assert output is not None
assert str(output).find("State transition from REBALANCING to RUNNING") >= 0 |
6,756 | get wav two | import numpy as np
import torch
import torch.nn as nn
def get_wav(in_channels, pool=True):
harr_wav_L = 1 / np.sqrt(2) * np.ones((1, 2))
harr_wav_H = 1 / np.sqrt(2) * np.ones((1, 2))
harr_wav_H[0, 0] = -1 * harr_wav_H[0, 0]
harr_wav_LL = np.transpose(harr_wav_L) * harr_wav_L
harr_wav_LH = np.transpose(harr_wav_L) * harr_wav_H
harr_wav_HL = np.transpose(harr_wav_H) * harr_wav_L
harr_wav_HH = np.transpose(harr_wav_H) * harr_wav_H
filter_LL = torch.from_numpy(harr_wav_LL).unsqueeze(0)
filter_LH = torch.from_numpy(harr_wav_LH).unsqueeze(0)
filter_HL = torch.from_numpy(harr_wav_HL).unsqueeze(0)
filter_HH = torch.from_numpy(harr_wav_HH).unsqueeze(0)
if pool:
net = nn.Conv2d
else:
net = nn.ConvTranspose2d
LL = net(
in_channels,
in_channels * 2,
kernel_size=2,
stride=2,
padding=0,
bias=False,
groups=in_channels)
LH = net(
in_channels,
in_channels * 2,
kernel_size=2,
stride=2,
padding=0,
bias=False,
groups=in_channels)
HL = net(
in_channels,
in_channels * 2,
kernel_size=2,
stride=2,
padding=0,
bias=False,
groups=in_channels)
HH = net(
in_channels,
in_channels * 2,
kernel_size=2,
stride=2,
padding=0,
bias=False,
groups=in_channels)
LL.weight.requires_grad = False
LH.weight.requires_grad = False
HL.weight.requires_grad = False
HH.weight.requires_grad = False
LL.weight.data = filter_LL.float().unsqueeze(0).expand(
in_channels * 2, -1, -1, -1)
LH.weight.data = filter_LH.float().unsqueeze(0).expand(
in_channels * 2, -1, -1, -1)
HL.weight.data = filter_HL.float().unsqueeze(0).expand(
in_channels * 2, -1, -1, -1)
HH.weight.data = filter_HH.float().unsqueeze(0).expand(
in_channels * 2, -1, -1, -1)
return LL, LH, HL, HH
class WavePool(nn.Module):
def __init__(self, in_channels):
super(WavePool, self).__init__()
self.LL, self.LH, self.HL, self.HH = get_wav(in_channels)
def forward(self, x):
return self.LL(x), self.LH(x), self.HL(x), self.HH(x)
def METHOD_NAME(in_channels, out_channels=None, pool=True):
"""wavelet decomposition using conv2d"""
harr_wav_L = 1 / np.sqrt(2) * np.ones((1, 2))
harr_wav_H = 1 / np.sqrt(2) * np.ones((1, 2))
harr_wav_H[0, 0] = -1 * harr_wav_H[0, 0]
harr_wav_LL = np.transpose(harr_wav_L) * harr_wav_L
harr_wav_LH = np.transpose(harr_wav_L) * harr_wav_H
harr_wav_HL = np.transpose(harr_wav_H) * harr_wav_L
harr_wav_HH = np.transpose(harr_wav_H) * harr_wav_H
filter_LL = torch.from_numpy(harr_wav_LL).unsqueeze(0)
filter_LH = torch.from_numpy(harr_wav_LH).unsqueeze(0)
filter_HL = torch.from_numpy(harr_wav_HL).unsqueeze(0)
filter_HH = torch.from_numpy(harr_wav_HH).unsqueeze(0)
if pool:
net = nn.Conv2d
else:
net = nn.ConvTranspose2d
if out_channels is None:
out_channels = in_channels
LL = net(
in_channels,
out_channels,
kernel_size=2,
stride=2,
padding=0,
bias=False,
groups=in_channels)
LH = net(
in_channels,
out_channels,
kernel_size=2,
stride=2,
padding=0,
bias=False,
groups=in_channels)
HL = net(
in_channels,
out_channels,
kernel_size=2,
stride=2,
padding=0,
bias=False,
groups=in_channels)
HH = net(
in_channels,
out_channels,
kernel_size=2,
stride=2,
padding=0,
bias=False,
groups=in_channels)
LL.weight.requires_grad = False
LH.weight.requires_grad = False
HL.weight.requires_grad = False
HH.weight.requires_grad = False
LL.weight.data = filter_LL.float().unsqueeze(0).expand(
in_channels, -1, -1, -1)
LH.weight.data = filter_LH.float().unsqueeze(0).expand(
in_channels, -1, -1, -1)
HL.weight.data = filter_HL.float().unsqueeze(0).expand(
in_channels, -1, -1, -1)
HH.weight.data = filter_HH.float().unsqueeze(0).expand(
in_channels, -1, -1, -1)
return LL, LH, HL, HH
class WavePool2(nn.Module):
def __init__(self, in_channels, out_channels=None):
super(WavePool2, self).__init__()
self.LL, self.LH, self.HL, self.HH = METHOD_NAME(
in_channels, out_channels)
def forward(self, x):
return self.LL(x), self.LH(x), self.HL(x), self.HH(x)
class WaveUnpool(nn.Module):
def __init__(self, in_channels, out_channels=None, option_unpool='cat5'):
super(WaveUnpool, self).__init__()
self.in_channels = in_channels
self.option_unpool = option_unpool
self.LL, self.LH, self.HL, self.HH = METHOD_NAME(
self.in_channels, out_channels, pool=False)
def forward(self, LL, LH, HL, HH, original=None):
if self.option_unpool == 'sum':
return self.LL(LL) + self.LH(LH) + self.HL(HL) + self.HH(HH)
elif self.option_unpool == 'cat5' and original is not None:
return torch.cat(
[self.LL(LL),
self.LH(LH),
self.HL(HL),
self.HH(HH), original],
dim=1)
else:
raise NotImplementedError |
6,757 | enum add values | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: reflection
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Enum(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Enum()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsEnum(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def EnumBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x42\x46\x42\x53", size_prefixed=size_prefixed)
# Enum
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Enum
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Enum
def Values(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from reflection.EnumVal import EnumVal
obj = EnumVal()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Enum
def ValuesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Enum
def ValuesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
# Enum
def IsUnion(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# Enum
def UnderlyingType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from reflection.Type import Type
obj = Type()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Enum
def Attributes(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from reflection.KeyValue import KeyValue
obj = KeyValue()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Enum
def AttributesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Enum
def AttributesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
return o == 0
# Enum
def Documentation(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# Enum
def DocumentationLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Enum
def DocumentationIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
return o == 0
# File that this Enum is declared in.
# Enum
def DeclarationFile(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def EnumStart(builder):
builder.StartObject(7)
def Start(builder):
EnumStart(builder)
def EnumAddName(builder, name):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def AddName(builder, name):
EnumAddName(builder, name)
def METHOD_NAME(builder, values):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0)
def AddValues(builder, values):
METHOD_NAME(builder, values)
def EnumStartValuesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartValuesVector(builder, numElems: int) -> int:
return EnumStartValuesVector(builder, numElems)
def EnumAddIsUnion(builder, isUnion):
builder.PrependBoolSlot(2, isUnion, 0)
def AddIsUnion(builder, isUnion):
EnumAddIsUnion(builder, isUnion)
def EnumAddUnderlyingType(builder, underlyingType):
builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(underlyingType), 0)
def AddUnderlyingType(builder, underlyingType):
EnumAddUnderlyingType(builder, underlyingType)
def EnumAddAttributes(builder, attributes):
builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(attributes), 0)
def AddAttributes(builder, attributes):
EnumAddAttributes(builder, attributes)
def EnumStartAttributesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartAttributesVector(builder, numElems: int) -> int:
return EnumStartAttributesVector(builder, numElems)
def EnumAddDocumentation(builder, documentation):
builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(documentation), 0)
def AddDocumentation(builder, documentation):
EnumAddDocumentation(builder, documentation)
def EnumStartDocumentationVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartDocumentationVector(builder, numElems: int) -> int:
return EnumStartDocumentationVector(builder, numElems)
def EnumAddDeclarationFile(builder, declarationFile):
builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(declarationFile), 0)
def AddDeclarationFile(builder, declarationFile):
EnumAddDeclarationFile(builder, declarationFile)
def EnumEnd(builder):
return builder.EndObject()
def End(builder):
return EnumEnd(builder) |
6,758 | parsed resource id | # Copyright 2022 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import click
import uuid
class Resource(object):
TEST_ORG = "organizations/{org}".format(org=os.environ.get("TEST_ORG_ID"))
TEST_USER = "user:{email}".format(
email=os.environ.get("TEST_EMAIL")
).lower()
TEST_PROJECT_ID = os.environ.get("TEST_PROJECT_ID")
TEST_BILLING_ACT_ID = os.environ.get("TEST_BILLING_ACT_ID")
RESOURCE_ID_PATTERN = ""
REQUIRED_PERMISSIONS = []
ASSET_TYPE = ""
def __init__(self, resource_id, role, new_member, dry_run=False):
self._role = role
self._dry_run = dry_run
self._new_member = new_member
self._resource_id = resource_id
self._prev_policy_snapshot = {}
self._updated_policy_snapshot = None
def migrate(self):
self.__update_policy_for_resource(self._build_resource_path())
def rollback(self):
resource_path = self._build_resource_path()
updated_policy = self._get_current_policy(resource_path)
if type(updated_policy) is dict and "etag" in updated_policy:
self._prev_policy_snapshot["etag"] = updated_policy["etag"]
if hasattr(updated_policy, "etag"):
if hasattr(self._prev_policy_snapshot, "_properties"):
self._prev_policy_snapshot._properties[
"etag"
] = updated_policy.etag
elif hasattr(self._prev_policy_snapshot, "etag"):
self._prev_policy_snapshot.etag = updated_policy.etag
self._process_updated_iam_policy(
resource_path, self._prev_policy_snapshot
)
click.secho(
"ROLLED BACK BINDING ON {resource}".format(
resource=self._resource_id
),
bg="black",
fg="red",
)
def _get_policy_permissions(self):
return []
def verify_permissions(self):
try:
returnedPermissions = self._get_policy_permissions()
matches = set(self.REQUIRED_PERMISSIONS) == set(returnedPermissions)
if matches:
return click.secho(
"Permissions verified for {resource}".format(
resource=self._resource_id
),
)
except:
pass
raise SystemExit(
"ERROR: Permissions not enough to modify {resource}".format(
resource=self._resource_id
)
)
@staticmethod
def get_test_instance_name():
return "int-test-{val}".format(val=uuid.uuid4().hex)[:20]
def rollback_test_instance(self):
self.delete_test_instance()
click.secho(
"DELETED {resource}".format(resource=self._resource_id),
bg="black",
fg="red",
)
click.secho("".join(map(lambda x: x * 20, "-")))
@classmethod
def get_test_instance(cls, resource, role):
click.secho(
"CREATED NEW {resource}".format(resource=resource),
bg="black",
fg="green",
)
return cls(
resource,
role,
cls.TEST_USER,
)
def METHOD_NAME(self):
match = re.compile(self.RESOURCE_ID_PATTERN).match(self._resource_id)
if match is None:
raise RuntimeError(
"Unable to parse resource name {name}".format(
name=self._resource_id
)
)
return match.groups()
def _build_resource_path(self):
(resource_path,) = self.METHOD_NAME()
return resource_path
def _get_current_policy(self, resource_path=None):
return self._client().get_iam_policy(
request={"resource": resource_path}
)
def _get_updated_policy(self, resource_path=None):
policy = self._get_current_policy(resource_path)
policy.bindings.add(
role=self._role,
members=[self._new_member],
)
return policy
def _get_role_bindings(self):
policy = self._updated_policy_snapshot
return policy["bindings"] if type(policy) is dict else policy.bindings
def _process_updated_iam_policy(self, resource_path, new_policy):
request = {"resource": resource_path, "policy": new_policy}
return self._client().set_iam_policy(request=request)
def __log_pre_update(self):
click.secho("".join(map(lambda x: x * 20, "-")))
click.secho("UPDATING {resource}".format(resource=self._resource_id))
click.secho(
"NEW_USER => {user}".format(user=self._new_member),
bg="black",
fg="green",
)
click.secho(
"ROLE => {role}".format(role=self._role),
bg="black",
fg="green",
)
click.secho("".join(map(lambda x: x * 20, "-")))
def __update_policy_for_resource(self, resource):
self.__log_pre_update()
self._prev_policy_snapshot = self._get_current_policy(resource)
updated_policy = self._get_updated_policy(resource)
# TODO: if not self._dry_run ?
if self._dry_run is False:
self._updated_policy_snapshot = self._process_updated_iam_policy(
resource, updated_policy
|
6,759 | construct position entry | """
this:
- gets capital from the database (earmarked with a strategy name)
- runs a backtest using that capital level, and mongodb data
- gets the final positions and position buffers
- writes these into a table (earmarked with a strategy name)
"""
import datetime
from syscore.constants import arg_not_supplied
from syscore.exceptions import missingData
from sysdata.config.configdata import Config
from sysdata.data_blob import dataBlob
from sysobjects.production.optimal_positions import (
bufferedOptimalPositions,
)
from sysobjects.production.tradeable_object import instrumentStrategy
from sysproduction.data.currency_data import dataCurrency
from sysproduction.data.capital import dataCapital
from sysproduction.data.contracts import dataContracts
from sysproduction.data.optimal_positions import dataOptimalPositions
from sysproduction.data.sim_data import get_sim_data_object_for_production
from sysproduction.data.backtest import store_backtest_state
from syslogging.logger import *
from systems.provided.futures_chapter15.basesystem import futures_system
from systems.basesystem import System
class runSystemClassic(object):
def __init__(
self,
data: dataBlob,
strategy_name: str,
backtest_config_filename=arg_not_supplied,
):
if backtest_config_filename is arg_not_supplied:
raise Exception("Need to supply config filename")
self.data = data
self.strategy_name = strategy_name
self.backtest_config_filename = backtest_config_filename
## DO NOT CHANGE THE NAME OF THIS FUNCTION
def run_backtest(self):
strategy_name = self.strategy_name
data = self.data
base_currency, notional_trading_capital = self._get_currency_and_capital()
system = self.system_method(
notional_trading_capital=notional_trading_capital,
base_currency=base_currency,
)
function_to_call_on_update = self.function_to_call_on_update
function_to_call_on_update(
data=data, strategy_name=strategy_name, system=system
)
store_backtest_state(data, system, strategy_name=strategy_name)
## MODIFY THIS WHEN INHERITING FOR A DIFFERENT STRATEGY
## ARGUMENTS MUST BE: data: dataBlob, strategy_name: str, system: System
@property
def function_to_call_on_update(self):
return updated_buffered_positions
def _get_currency_and_capital(self):
data = self.data
strategy_name = self.strategy_name
capital_data = dataCapital(data)
try:
notional_trading_capital = capital_data.get_current_capital_for_strategy(
strategy_name
)
except missingData:
# critical log will send email
error_msg = (
"Capital data is missing for %s: can't run backtest" % strategy_name
)
data.log.critical(error_msg)
raise Exception(error_msg)
currency_data = dataCurrency(data)
base_currency = currency_data.get_base_currency()
self.data.log.debug(
"Using capital of %s %.2f" % (base_currency, notional_trading_capital)
)
return base_currency, notional_trading_capital
# DO NOT CHANGE THE NAME OF THIS FUNCTION; IT IS HARDCODED INTO CONFIGURATION FILES
# BECAUSE IT IS ALSO USED TO LOAD BACKTESTS
def system_method(
self,
notional_trading_capital: float = arg_not_supplied,
base_currency: str = arg_not_supplied,
) -> System:
data = self.data
backtest_config_filename = self.backtest_config_filename
system = production_classic_futures_system(
data,
backtest_config_filename,
log=data.log,
notional_trading_capital=notional_trading_capital,
base_currency=base_currency,
)
return system
def production_classic_futures_system(
data: dataBlob,
config_filename: str,
log=get_logger("futures_system"),
notional_trading_capital: float = arg_not_supplied,
base_currency: str = arg_not_supplied,
) -> System:
log_level = "on"
sim_data = get_sim_data_object_for_production(data)
config = Config(config_filename)
# Overwrite capital and base currency
if notional_trading_capital is not arg_not_supplied:
config.notional_trading_capital = notional_trading_capital
if base_currency is not arg_not_supplied:
config.base_currency = base_currency
system = futures_system(data=sim_data, config=config)
system._log = log
system.set_logging_level(log_level)
return system
def updated_buffered_positions(data: dataBlob, strategy_name: str, system: System):
log = data.log
data_optimal_positions = dataOptimalPositions(data)
list_of_instruments = system.get_instrument_list()
for instrument_code in list_of_instruments:
lower_buffer, upper_buffer = get_position_buffers_from_system(
system, instrument_code
)
position_entry = METHOD_NAME(
data=data,
system=system,
instrument_code=instrument_code,
lower_position=lower_buffer,
upper_position=upper_buffer,
)
instrument_strategy = instrumentStrategy(
instrument_code=instrument_code, strategy_name=strategy_name
)
data_optimal_positions.update_optimal_position_for_instrument_strategy(
instrument_strategy=instrument_strategy, position_entry=position_entry
)
log.debug(
"New buffered positions %.3f %.3f"
% (position_entry.lower_position, position_entry.upper_position),
instrument_code=instrument_code,
)
def get_position_buffers_from_system(system: System, instrument_code: str):
buffers = system.portfolio.get_buffers_for_position(
instrument_code
) # get the upper and lower edges of the buffer
lower_buffer = buffers.iloc[-1].bot_pos
upper_buffer = buffers.iloc[-1].top_pos
return lower_buffer, upper_buffer
def METHOD_NAME(
data: dataBlob,
system: System,
instrument_code: str,
lower_position: float,
upper_position: float,
) -> bufferedOptimalPositions:
diag_contracts = dataContracts(data)
reference_price = system.rawdata.get_daily_prices(instrument_code).iloc[-1]
reference_contract = diag_contracts.get_priced_contract_id(instrument_code)
position_entry = bufferedOptimalPositions(
date=datetime.datetime.now(),
lower_position=lower_position,
upper_position=upper_position,
reference_price=reference_price,
reference_contract=reference_contract,
)
return position_entry |
6,760 | run keras | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark suite for KPL and feature column implementations."""
import itertools
import math
import random
import string
import time
import numpy as np
import tensorflow.compat.v2 as tf
import keras
class LayerBenchmark(tf.test.Benchmark):
"""Benchmark the layer forward pass."""
def report(self, name, keras_time, fc_time, iters):
"""Calculate and report benchmark statistics."""
extras = {
"fc_avg_time": fc_time,
"fc_vs_keras_sec": fc_time - keras_time,
"fc_vs_keras_pct": ((fc_time - keras_time) / fc_time) * 100,
"keras_faster_ratio": fc_time / keras_time,
}
self.report_benchmark(
iters=iters, wall_time=keras_time, extras=extras, name=name
)
class StepTimingCallback(keras.callbacks.Callback):
"""A callback that times non-warmup steps of a Keras predict call."""
def __init__(self):
self.t0 = None
self.steps = 0
def on_predict_batch_begin(self, batch_index, _):
if batch_index == 2:
self.t0 = time.time()
elif batch_index > 2:
self.steps += 1
def on_predict_end(self, _):
self.tn = time.time()
self.t_avg = (self.tn - self.t0) / self.steps
def create_data(length, num_entries, max_value, dtype):
"""Create a ragged tensor with random data entries."""
lengths = (np.random.random(size=num_entries) * length).astype(int)
total_length = np.sum(lengths)
values = (np.random.random(size=total_length) * max_value).astype(dtype)
return tf.RaggedTensor.from_row_lengths(values, lengths)
def create_string_data(
length, num_entries, vocabulary, pct_oov, oov_string="__OOV__"
):
"""Create a ragged tensor with random data entries."""
lengths = (np.random.random(size=num_entries) * length).astype(int)
total_length = np.sum(lengths)
num_oovs = int(pct_oov * total_length)
values = []
for _ in range(total_length):
values.append(random.choice(vocabulary))
if pct_oov > 0:
oov_cadence = int(total_length / num_oovs)
idx = 0
for _ in range(num_oovs):
if idx < total_length:
values[idx] = oov_string
idx += oov_cadence
return tf.RaggedTensor.from_row_lengths(values, lengths)
def create_vocabulary(vocab_size):
base = len(string.ascii_letters)
n = math.ceil(math.log(vocab_size, base))
vocab = []
for i in range(1, n + 1):
for item in itertools.product(string.ascii_letters, repeat=i):
if len(vocab) >= vocab_size:
break
vocab.append("".join(item))
return vocab
def METHOD_NAME(data, model, batch_size, num_runs, steps_per_repeat=100):
"""Benchmark a Keras model."""
ds = (
tf.data.Dataset.from_tensor_slices(data)
.repeat()
.prefetch(tf.data.AUTOTUNE)
.batch(batch_size)
.cache()
)
steps = 0
times = []
for _ in range(num_runs):
steps += steps_per_repeat
timer = StepTimingCallback()
# Benchmarked code begins here.
model.predict(ds, steps=steps, callbacks=[timer])
# Benchmarked code ends here.
times.append(timer.t_avg)
avg_time = np.mean(times)
return avg_time
def run_fc(data, fc_fn, batch_size, num_runs, steps_per_repeat=100):
"""Benchmark a Feature Column."""
ds = (
tf.data.Dataset.from_tensor_slices(data)
.repeat()
.prefetch(tf.data.AUTOTUNE)
.batch(batch_size)
.cache()
)
# Trace the fc_fn
ds_iter = ds.__iter__()
fc_fn(next(ds_iter))
fc_starts = []
fc_ends = []
for _ in range(num_runs):
fc_starts.append(time.time())
# Benchmarked code begins here.
for _ in range(steps_per_repeat):
_ = fc_fn(next(ds_iter))
# Benchmarked code ends here.
fc_ends.append(time.time())
avg_per_step_time = (
np.array(fc_ends) - np.array(fc_starts)
) / steps_per_repeat
avg_time = np.mean(avg_per_step_time)
return avg_time |
6,761 | get api group with http info | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.28
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from kubernetes.client.api_client import ApiClient
from kubernetes.client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class CoordinationApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.METHOD_NAME(**kwargs) # noqa: E501
def METHOD_NAME(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/coordination.k8s.io/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats) |
6,762 | update | import json
from typing import Dict
from urllib.parse import parse_qs
from urllib.parse import urlparse
from Cryptodome.PublicKey import RSA
from jwkest import b64e
from jwkest.jwk import RSAKey
from jwkest.jwk import load_jwks
from oic.extension.message import TokenIntrospectionResponse
from oic.extension.signed_http_req import SignedHttpRequest
from oic.oauth2 import compact
from oic.utils.jwt import JWT
from oic.utils.keyio import KeyBundle
__author__ = "roland"
def sign_http_args(method, url, headers, body=""):
p = urlparse(url)
kwargs = {"path": p.path, "host": p.netloc, "headers": headers, "method": method}
if body:
kwargs["body"] = body
query_params = compact(parse_qs(p.query))
kwargs["query_params"] = query_params
return kwargs
class PoPCallBack(object):
def __init__(self, key, alg):
self.key = key
self.alg = alg
def __call__(self, method, url, **kwargs):
try:
body = kwargs["body"]
except KeyError:
body = None
try:
headers = kwargs["headers"]
except KeyError:
headers = {}
_kwargs = sign_http_args(method, url, headers, body)
shr = SignedHttpRequest(self.key)
kwargs["Authorization"] = "pop {}".format(shr.sign(alg=self.alg, **_kwargs))
return kwargs
class PoPClient(object):
def __init__(self, key_size=2048, sign_alg="RS256"):
self.key_size = key_size
self.state2key: Dict[str, RSAKey] = {}
self.token2key: Dict[str, RSAKey] = {}
self.alg = sign_alg
def METHOD_NAME(self, msg, state, key_size=0):
"""
Use to 'update' the AccessToken Request.
:param msg:
:param state: Used to map access token response to this request
:param key_size:
:return:
"""
if not key_size:
key_size = self.key_size
key = RSAKey(key=RSA.generate(key_size))
self.state2key[state] = key
msg["key"] = json.dumps(key.serialize())
return msg
def handle_access_token_response(self, resp):
"""
Map access token to a keypair.
:param resp: AccessTokenResponse instance
"""
self.token2key[resp["access_token"]] = self.state2key[resp["state"]]
class PoPAS(object):
def __init__(self, me):
self.thumbprint2key: Dict[str, RSAKey] = {}
self.keyjar = None
self.me = me
def store_key(self, key):
kb = KeyBundle()
kb.do_keys([key])
# Store key with thumbprint as key
key_thumbprint = b64e(kb.keys()[0].thumbprint("SHA-256")).decode("utf8")
self.thumbprint2key[key_thumbprint] = key
return key_thumbprint
def create_access_token(self, key_thumbprint):
# creating the access_token
jwt_constructor = JWT(self.keyjar, iss=self.me)
# Audience is myself
return jwt_constructor.pack(kid="abc", cnf={"kid": key_thumbprint}, aud=self.me)
def token_introspection(self, token):
jwt_constructor = JWT(self.keyjar, iss=self.me)
res = jwt_constructor.unpack(token)
tir = TokenIntrospectionResponse(active=True)
tir["key"] = json.dumps(self.thumbprint2key[res["cnf"]["kid"]])
return tir
class PoPRS(object):
def __init__(self):
self.token2key: Dict[str, RSAKey] = {}
def store_key(self, access_token, tir):
"""
Store key that was returned in response from token introspection.
:param access_token: The token that was introspected
:param tir: TokenIntrospectionResponse instance
"""
key = load_jwks(json.dumps({"keys": [json.loads(tir["key"])]}))
self.token2key[access_token] = key
def eval_signed_http_request(
self, pop_token, access_token, method, url, headers, body=""
):
kwargs = sign_http_args(method, url, headers, body)
shr = SignedHttpRequest(self.token2key[access_token][0])
return shr.verify(
signature=pop_token,
strict_query_params_verification=True,
strict_headers_verification=True,
**kwargs,
) |
6,763 | get new point | """
Sequentially adding new points to a kriging
===========================================
"""
# %%
# In this example, we show how to sequentially add new points to a kriging in order to improve the predictivity of the metamodel. In order to create simple graphics, we consider a 1D function.
# %%
# Create the function and the design of experiments
# -------------------------------------------------
# %%
import openturns as ot
from openturns.viewer import View
import numpy as np
import openturns.viewer as viewer
ot.Log.Show(ot.Log.NONE)
# %%
sampleSize = 4
dimension = 1
# %%
# Define the function.
# %%
g = ot.SymbolicFunction(["x"], ["0.5*x^2 + sin(2.5*x)"])
# %%
# Create the design of experiments.
# %%
xMin = -0.9
xMax = 1.9
X_distr = ot.Uniform(xMin, xMax)
X = ot.LHSExperiment(X_distr, sampleSize, False, False).generate()
Y = g(X)
# %%
graph = g.draw(xMin, xMax)
data = ot.Cloud(X, Y)
data.setColor("red")
graph.add(data)
view = viewer.View(graph)
# %%
# Create the algorithms
# ---------------------
# %%
def createMyBasicKriging(X, Y):
"""
Create a kriging from a pair of X and Y samples.
We use a 3/2 Matérn covariance model and a constant trend.
"""
basis = ot.ConstantBasisFactory(dimension).build()
covarianceModel = ot.MaternModel([1.0], 1.5)
algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis)
algo.run()
krigResult = algo.getResult()
return krigResult
# %%
def linearSample(xmin, xmax, npoints):
"""Returns a sample created from a regular grid
from xmin to xmax with npoints points."""
step = (xmax - xmin) / (npoints - 1)
rg = ot.RegularGrid(xmin, step, npoints)
vertices = rg.getVertices()
return vertices
# %%
def plot_kriging_bounds(vLow, vUp, n_test):
"""
From two lists containing the lower and upper bounds of the region,
create a PolygonArray.
"""
palette = ot.Drawable.BuildDefaultPalette(2)
myPaletteColor = palette[1]
polyData = [[vLow[i], vLow[i + 1], vUp[i + 1], vUp[i]] for i in range(n_test - 1)]
polygonList = [
ot.Polygon(polyData[i], myPaletteColor, myPaletteColor)
for i in range(n_test - 1)
]
boundsPoly = ot.PolygonArray(polygonList)
boundsPoly.setLegend("95% bounds")
return boundsPoly
# %%
# The following `sqrt` function will be used later to compute the standard deviation from the variance.
# %%
sqrt = ot.SymbolicFunction(["x"], ["sqrt(x)"])
# %%
def plotMyBasicKriging(krigResult, xMin, xMax, X, Y, level=0.95):
"""
Given a kriging result, plot the data, the kriging metamodel
and a confidence interval.
"""
samplesize = X.getSize()
meta = krigResult.getMetaModel()
graphKriging = meta.draw(xMin, xMax)
graphKriging.setLegends(["Kriging"])
# Create a grid of points and evaluate the function and the kriging
nbpoints = 50
xGrid = linearSample(xMin, xMax, nbpoints)
yFunction = g(xGrid)
yKrig = meta(xGrid)
# Compute the conditional covariance
epsilon = ot.Sample(nbpoints, [1.0e-8])
conditionalVariance = krigResult.getConditionalMarginalVariance(xGrid) + epsilon
conditionalSigma = sqrt(conditionalVariance)
# Compute the quantile of the Normal distribution
alpha = 1 - (1 - level) / 2
quantileAlpha = ot.DistFunc.qNormal(alpha)
# Graphics of the bounds
epsilon = 1.0e-8
dataLower = [
yKrig[i, 0] - quantileAlpha * conditionalSigma[i, 0] for i in range(nbpoints)
]
dataUpper = [
yKrig[i, 0] + quantileAlpha * conditionalSigma[i, 0] for i in range(nbpoints)
]
# Coordinates of the vertices of the Polygons
vLow = [[xGrid[i, 0], dataLower[i]] for i in range(nbpoints)]
vUp = [[xGrid[i, 0], dataUpper[i]] for i in range(nbpoints)]
# Compute the Polygon graphics
boundsPoly = plot_kriging_bounds(vLow, vUp, nbpoints)
boundsPoly.setLegend("95% bounds")
# Validate the kriging metamodel
mmv = ot.MetaModelValidation(xGrid, yFunction, meta)
Q2 = mmv.computePredictivityFactor()[0]
# Plot the function
graphFonction = ot.Curve(xGrid, yFunction)
graphFonction.setLineStyle("dashed")
graphFonction.setColor("magenta")
graphFonction.setLineWidth(2)
graphFonction.setLegend("Function")
# Draw the X and Y observed
cloudDOE = ot.Cloud(X, Y)
cloudDOE.setPointStyle("circle")
cloudDOE.setColor("red")
cloudDOE.setLegend("Data")
# Assemble the graphics
graph = ot.Graph()
graph.add(boundsPoly)
graph.add(graphFonction)
graph.add(cloudDOE)
graph.add(graphKriging)
graph.setLegendPosition("bottomright")
graph.setAxes(True)
graph.setGrid(True)
graph.setTitle("Size = %d, Q2=%.2f%%" % (samplesize, 100 * Q2))
graph.setXTitle("X")
graph.setYTitle("Y")
return graph
# %%
# We start by creating the initial kriging metamodel on the 4 points in the design of experiments.
# %%
krigResult = createMyBasicKriging(X, Y)
graph = plotMyBasicKriging(krigResult, xMin, xMax, X, Y)
view = viewer.View(graph)
# %%
# Sequentially add new points
# ---------------------------
# %%
# The following function is the building block of the algorithm. It returns a new point which maximizes the conditional variance.
# %%
def METHOD_NAME(xMin, xMax, krigResult):
"""
Returns a new point to be added to the design of experiments.
This point maximizes the conditional variance of the kriging.
"""
nbpoints = 50
xGrid = linearSample(xMin, xMax, nbpoints)
conditionalVariance = krigResult.getConditionalMarginalVariance(xGrid)
iMaxVar = int(np.argmax(conditionalVariance))
xNew = xGrid[iMaxVar, 0]
xNew = ot.Point([xNew])
return xNew
# %%
# We first call `getNewPoint` to get a point to add to the design of experiments.
# %%
xNew = METHOD_NAME(xMin, xMax, krigResult)
xNew
# %%
# Then we evaluate the function on the new point and add it to the training design of experiments.
# %%
yNew = g(xNew)
X.add(xNew)
Y.add(yNew)
# %%
# We now plot the updated kriging.
# %%
# sphinx_gallery_thumbnail_number = 3
krigResult = createMyBasicKriging(X, Y)
graph = plotMyBasicKriging(krigResult, xMin, xMax, X, Y)
graph.setTitle("Kriging #0")
view = viewer.View(graph)
# %%
# The algorithm added a point to the right bound of the domain.
# %%
for krigingStep in range(5):
xNew = METHOD_NAME(xMin, xMax, krigResult)
yNew = g(xNew)
X.add(xNew)
Y.add(yNew)
krigResult = createMyBasicKriging(X, Y)
graph = plotMyBasicKriging(krigResult, xMin, xMax, X, Y)
graph.setTitle("Kriging #%d " % (krigingStep + 1) + graph.getTitle())
View(graph)
# %%
# We observe that the second added point is the left bound of the domain.
# The remaining points were added strictly inside the domain where the accuracy was drastically improved.
#
# With only 10 points, the metamodel accuracy is already very good with a Q2 which is equal to 99.9%.
# %%
# Conclusion
# ----------
#
# The current example presents the naive implementation on the creation of a sequential design of experiments based on kriging.
# More practical algorithms are presented in the following references.
#
# * Mona Abtini. Plans prédictifs à taille fixe et séquentiels pour le krigeage (2008). Thèse de doctorat de l'Université de Lyon.
# * Céline Scheidt. Analyse statistique d’expériences simulées : Modélisation adaptative de réponses non régulières par krigeage et plans d’expériences (2007).
# Thèse présentée pour obtenir le grade de Docteur de l’Université Louis Pasteur.
# * David Ginsbourger. Sequential Design of Computer Experiments. Wiley StatsRef: Statistics Reference Online, Wiley (2018)
View.ShowAll() |
6,764 | test get resource object id not found | # encoding: utf-8
import pytest
import ckan.logic as logic
import ckan.logic.auth as logic_auth
import ckan.model as core_model
import ckan.tests.helpers as helpers
def _get_function(obj_type):
_get_object_functions = {
"package": logic_auth.get_package_object,
"resource": logic_auth.get_resource_object,
"user": logic_auth.get_user_object,
"group": logic_auth.get_group_object,
}
return _get_object_functions[obj_type]
def _get_object_in_context(obj_type):
if obj_type == "user":
context = {"user_obj": "a_fake_object"}
else:
context = {obj_type: "a_fake_object"}
obj = _get_function(obj_type)(context)
assert obj == "a_fake_object"
def _get_object_id_not_found(obj_type):
with pytest.raises(logic.NotFound):
_get_function(obj_type)({"model": core_model}, {"id": "not_here"})
def _get_object_id_none(obj_type):
with pytest.raises(logic.ValidationError):
_get_function(obj_type)({"model": core_model}, {})
def test_get_package_object_in_context():
_get_object_in_context("package")
def test_get_resource_object_in_context():
_get_object_in_context("resource")
def test_get_user_object_in_context():
_get_object_in_context("user")
def test_get_group_object_in_context():
_get_object_in_context("group")
def test_get_package_object_id_not_found():
_get_object_id_not_found("package")
def METHOD_NAME():
_get_object_id_not_found("resource")
def test_get_user_object_id_not_found():
_get_object_id_not_found("user")
def test_get_group_object_id_not_found():
_get_object_id_not_found("group")
def test_get_package_object_id_none():
_get_object_id_none("package")
def test_get_resource_object_id_none():
_get_object_id_none("resource")
def test_get_user_object_id_none():
_get_object_id_none("user")
def test_get_group_object_id_none():
_get_object_id_none("group")
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestInit(object):
def test_get_package_object_with_id(self):
user_name = helpers.call_action("get_site_user")["name"]
dataset = helpers.call_action(
"package_create", context={"user": user_name}, name="test_dataset"
)
context = {"model": core_model}
obj = logic_auth.get_package_object(context, {"id": dataset["id"]})
assert obj.id == dataset["id"]
assert context["package"] == obj
def test_get_resource_object_with_id(self):
user_name = helpers.call_action("get_site_user")["name"]
dataset = helpers.call_action(
"package_create", context={"user": user_name}, name="test_dataset"
)
resource = helpers.call_action(
"resource_create",
context={"user": user_name},
package_id=dataset["id"],
url="http://foo",
)
context = {"model": core_model}
obj = logic_auth.get_resource_object(context, {"id": resource["id"]})
assert obj.id == resource["id"]
assert context["resource"] == obj
def test_get_user_object_with_id(self):
user_name = helpers.call_action("get_site_user")["name"]
user = helpers.call_action(
"user_create",
context={"user": user_name},
name="test_user",
email="a@a.com",
password="TestPassword1",
)
context = {"model": core_model}
obj = logic_auth.get_user_object(context, {"id": user["id"]})
assert obj.id == user["id"]
assert context["user_obj"] == obj
def test_get_group_object_with_id(self):
user_name = helpers.call_action("get_site_user")["name"]
group = helpers.call_action(
"group_create", context={"user": user_name}, name="test_group"
)
context = {"model": core_model}
obj = logic_auth.get_group_object(context, {"id": group["id"]})
assert obj.id == group["id"]
assert context["group"] == obj |
6,765 | get sampled rama favored angles | from __future__ import absolute_import, division, print_function
from mmtbx.building.loop_closure import utils
from mmtbx.validation import ramalyze
import itertools
import numpy
import random
from libtbx.utils import null_out
import boost_adaptbx.boost.python as bp
from six.moves import zip
from six.moves import range
ext = bp.import_ext("mmtbx_validation_ramachandran_ext")
from mmtbx_validation_ramachandran_ext import rama_eval
from six.moves import cStringIO as StringIO
def set_rama_angles(moving_h, angles, direction_forward=True, check_omega=False):
"""
angles = [(phi, psi), (phi, psi), ... (phi, psi)]
phi or psi == None means we don't change this angle
returns deep-copied hierarchy with new angles. Change occurs from first to
last angle so starting point would be in the same place.
This function should produce up to all possible favored conformations.
This function doesn't change moving_h
direction_forward==True - set from beginning to end - the end residue moves
direction_forward==False - set from end to beginning, the first residue moves
"""
# print "angles", angles
# STOP()
result_h = moving_h.deep_copy()
result_h.reset_atom_i_seqs()
fixed_omega = False
phi_psi_atoms = utils.get_phi_psi_atoms(moving_h, omega=True)
assert len(phi_psi_atoms) == len(angles), "%d != %d" % (len(phi_psi_atoms), len(angles))
if not direction_forward:
phi_psi_atoms.reverse()
angles.reverse()
for ps_atoms, target_angle_pair in zip(phi_psi_atoms, angles):
phi_psi_pair = ps_atoms[0]
# print "phi_psi_pair", phi_psi_pair
omega = ps_atoms[2]
phi_psi_angles = utils.get_pair_angles(phi_psi_pair)
# print "ps_atoms, target_angle_pair", phi_psi_angles, target_angle_pair
# phi
if target_angle_pair[0] is not None and phi_psi_angles[0] is not None:
rotation_angle = -phi_psi_angles[0]+target_angle_pair[0]
# print "rot angle", rotation_angle
# if not direction_forward:
# rotation_angle = -rotation_angle
utils.rotate_atoms_around_bond(
result_h,
phi_psi_pair[0][1],
phi_psi_pair[0][2],
angle=rotation_angle,
direction_forward=direction_forward)
# psi
if target_angle_pair[1] is not None and phi_psi_angles[1] is not None:
rotation_angle = -phi_psi_angles[1]+target_angle_pair[1]
# print "rot angle", rotation_angle
# if not direction_forward:
# rotation_angle = -rotation_angle
utils.rotate_atoms_around_bond(
result_h,
phi_psi_pair[1][1],
phi_psi_pair[1][2],
angle=rotation_angle,
direction_forward=direction_forward)
# omega
if omega is not None and abs(abs(omega)-180) > 10 and check_omega:
rotation_angle= -omega+180
# print "Omega rotation:", omega, rotation_angle
utils.rotate_atoms_around_bond(
result_h,
phi_psi_pair[0][0],
phi_psi_pair[0][1],
angle=rotation_angle,
direction_forward=direction_forward)
fixed_omega = True
# print utils.list_rama_outliers_h(result_h)
# result_h.write_pdb_file(file_name="variant_%s.pdb" % direction_forward)
# STOP()
return result_h, fixed_omega
def is_not_none_combination(comb):
for pair in comb:
if pair != (None, None):
return True
return False
def METHOD_NAME(rama_key, r=None, step=20):
if r is None:
r = rama_eval()
result = []
for i in range(-180, 180, step):
for j in range(-180, 180, step):
score = r.evaluate_angles(ramalyze.res_types[rama_key], i,j)
r_ev = ramalyze.ramalyze.evalScore(ramalyze.res_types[rama_key], score)
if r_ev == ramalyze.RAMALYZE_FAVORED:
result.append((i,j))
return result
def get_all_starting_conformations(moving_h, change_radius,
include_allowed, n_outliers,
direction_forward=True, cutoff=50, change_all=True, log=null_out(), check_omega=False):
if log is None:
log = StringIO()
variants = []
result = []
r = rama_eval()
phi_psi_atoms = utils.get_phi_psi_atoms(moving_h, omega=True)
# print "N residue groups in h", [x.resseq for x in moving_h.residue_groups()]
if len(phi_psi_atoms) == 0:
print("Strange input to starting conformations!!!", file=log)
return result
n_rama = len(phi_psi_atoms)
# print "n_rama", n_rama
change_angles = [None]
if change_all:
change_angles = range((n_rama)//2-change_radius-n_outliers//2, (n_rama)//2+change_radius+1+n_outliers//2)
# if change_angles[0] < 0:
# change_angles = range(change_angles[-1]-change_angles[0])
has_twisted = False
if check_omega:
omegas = [x[2] for x in phi_psi_atoms]
for o in omegas:
if o is not None and abs(abs(o)-180) > 30:
has_twisted = True
print("n_outliers", n_outliers, file=log)
for i, (phi_psi_pair, rama_key, omega) in enumerate(phi_psi_atoms):
angle_is_outlier = utils.rama_evaluate(phi_psi_pair, r, rama_key) == ramalyze.RAMALYZE_OUTLIER
angle_is_outlier = angle_is_outlier or (include_allowed and utils.rama_evaluate(phi_psi_pair, r, rama_key) == ramalyze.RAMALYZE_ALLOWED)
twisted = omega is not None and ((abs(abs(omega)-180) > 30) and check_omega)
print("in cycle, N, outlier?, change?, twisted?", i, angle_is_outlier, i in change_angles, twisted, file=log)
if angle_is_outlier and n_outliers < 3:
vs = METHOD_NAME(rama_key, r)
elif (i in change_angles) or angle_is_outlier or has_twisted:
# vs = get_sampled_rama_favored_angles(rama_key, r)
vs = ramalyze.get_favored_regions(rama_key)
else:
vs = [(None, None)]
variants.append(vs)
print("variants", variants, file=log)
# Filtering them, since could be
# [len(x) for x in variants] = [129, 129, 4, 129, 129]
# resulting in 1107691524 all_angles_combination
n_comb = numpy.prod([len(x) for x in variants])
if n_comb > cutoff:
# still aiming for ~1000
n_in_each = int(1000 ** (1/len(variants)))
variants = [random.sample(x, n_in_each) if len(x)>n_in_each else x for x in variants]
all_angles_combination = list(itertools.product(*variants))
# filter none combinations
# print "len(all_angles_combination)", len(all_angles_combination)
all_angles_combination_f = []
for comb in all_angles_combination:
if is_not_none_combination(comb):
all_angles_combination_f.append(comb)
print("len(all_angles_combination_f)", len(all_angles_combination_f), file=log)
return all_angles_combination_f
# if len(all_angles_combination_f) == 0:
# print "In starting conformations - outlier was fixed?"
# return result
# n_added = 0
# n_all_combination = len(all_angles_combination_f)
# i_max = min(cutoff, n_all_combination)
# assert i_max > 0
# step = float(n_all_combination-1)/float(i_max-1)
# if step < 1:
# step = 1
# for i in range(i_max):
# comb = all_angles_combination_f[int(round(step*i))]
# result.append(set_rama_angles(moving_h, list(comb),direction_forward=direction_forward))
# print >> log, "Model %d, angles:" % i, comb
# return result |
6,766 | array | def x():
return
#? None
x()
def METHOD_NAME(first_param):
#? ['first_param']
first_param
return list()
#? []
METHOD_NAME.first_param
#? []
METHOD_NAME.first_param.
func = METHOD_NAME
#? []
func.first_param
#? list()
METHOD_NAME()
#? ['array']
arr
def inputs(param):
return param
#? list
inputs(list)
def variable_middle():
var = 3
return var
#? int()
variable_middle()
def variable_rename(param):
var = param
return var
#? int()
variable_rename(1)
def multi_line_func(a, # comment blabla
b):
return b
#? str()
multi_line_func(1,'')
def multi_line_call(b):
return b
multi_line_call(
#? int()
b=1)
# nothing after comma
def asdf(a):
return a
x = asdf(a=1,
)
#? int()
x
# -----------------
# double execution
# -----------------
def double_exe(param):
return param
#? str()
variable_rename(double_exe)("")
# -> shouldn't work (and throw no error)
#? []
variable_rename(list())().
#? []
variable_rename(1)().
# -----------------
# recursions (should ignore)
# -----------------
def recursion(a, b):
if a:
return b
else:
return recursion(a+".", b+1)
# Does not also return int anymore, because we now support operators in simple cases.
#? float()
recursion("a", 1.0)
def other(a):
return recursion2(a)
def recursion2(a):
if random.choice([0, 1]):
return other(a)
else:
if random.choice([0, 1]):
return recursion2("")
else:
return a
#? int() str()
recursion2(1)
# -----------------
# ordering
# -----------------
def a():
#? int()
b()
return b()
def b():
return 1
#? int()
a()
# -----------------
# keyword arguments
# -----------------
def func(a=1, b=''):
return a, b
exe = func(b=list, a=tuple)
#? tuple
exe[0]
#? list
exe[1]
# -----------------
# default arguments
# -----------------
#? int()
func()[0]
#? str()
func()[1]
#? float()
func(1.0)[0]
#? str()
func(1.0)[1]
#? float()
func(a=1.0)[0]
#? str()
func(a=1.0)[1]
#? int()
func(b=1.0)[0]
#? float()
func(b=1.0)[1]
#? list
func(a=list, b=set)[0]
#? set
func(a=list, b=set)[1]
def func_default(a, b=1):
return a, b
def nested_default(**kwargs):
return func_default(**kwargs)
#? float()
nested_default(a=1.0)[0]
#? int()
nested_default(a=1.0)[1]
#? str()
nested_default(a=1.0, b='')[1]
# Defaults should only work if they are defined before - not after.
def default_function(a=default):
#?
return a
#?
default_function()
default = int()
def default_function(a=default):
#? int()
return a
#? int()
default_function()
def default(a=default):
#? int()
a
# -----------------
# closures
# -----------------
def a():
l = 3
def func_b():
l = ''
#? str()
l
#? ['func_b']
func_b
#? int()
l
# -----------------
# *args
# -----------------
def args_func(*args):
#? tuple()
return args
exe = args_func(1, "")
#? int()
exe[0]
#? str()
exe[1]
# illegal args (TypeError)
#?
args_func(*1)[0]
# iterator
#? int()
args_func(*iter([1]))[0]
# different types
e = args_func(*[1 if UNDEFINED else "", {}])
#? int() str()
e[0]
#? dict()
e[1]
_list = [1,""]
exe2 = args_func(_list)[0]
#? str()
exe2[1]
exe3 = args_func([1,""])[0]
#? str()
exe3[1]
def args_func(arg1, *args):
return arg1, args
exe = args_func(1, "", list)
#? int()
exe[0]
#? tuple()
exe[1]
#? list
exe[1][1]
# In a dynamic search, both inputs should be given.
def simple(a):
#? int() str()
return a
def xargs(*args):
return simple(*args)
xargs(1)
xargs('')
# *args without a self symbol
def memoize(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
class Something():
@memoize
def x(self, a, b=1):
return a
#? int()
Something().x(1)
# -----------------
# ** kwargs
# -----------------
def kwargs_func(**kwargs):
#? ['keys']
kwargs.keys
#? dict()
return kwargs
exe = kwargs_func(a=3,b=4.0)
#? dict()
exe
#? int()
exe['a']
#? float()
exe['b']
#? int() float()
exe['c']
a = 'a'
exe2 = kwargs_func(**{a:3,
'b':4.0})
#? int()
exe2['a']
#? float()
exe2['b']
#? int() float()
exe2['c']
exe3 = kwargs_func(**{k: v for k, v in [(a, 3), ('b', 4.0)]})
# Should resolve to the same as 2 but jedi is not smart enough yet
# Here to make sure it doesn't result in crash though
#?
exe3['a']
#?
exe3['b']
#?
exe3['c']
# -----------------
# *args / ** kwargs
# -----------------
def func_without_call(*args, **kwargs):
#? tuple()
args
#? dict()
kwargs
def fu(a=1, b="", *args, **kwargs):
return a, b, args, kwargs
exe = fu(list, 1, "", c=set, d="")
#? list
exe[0]
#? int()
exe[1]
#? tuple()
exe[2]
#? str()
exe[2][0]
#? dict()
exe[3]
#? set
exe[3]['c']
def kwargs_iteration(**kwargs):
return kwargs
for x in kwargs_iteration(d=3):
#? float()
{'d': 1.0, 'c': '1'}[x]
# -----------------
# nested *args
# -----------------
def function_args(a, b, c):
return b
def nested_args(*args):
return function_args(*args)
def nested_args2(*args, **kwargs):
return nested_args(*args)
#? int()
nested_args('', 1, 1.0, list)
#? []
nested_args('').
#? int()
nested_args2('', 1, 1.0)
#? []
nested_args2('').
# -----------------
# nested **kwargs
# -----------------
def nested_kw(**kwargs1):
return function_args(**kwargs1)
def nested_kw2(**kwargs2):
return nested_kw(**kwargs2)
# invalid command, doesn't need to return anything
#?
nested_kw(b=1, c=1.0, list)
#? int()
nested_kw(b=1)
# invalid command, doesn't need to return anything
#?
nested_kw(d=1.0, b=1, list)
#? int()
nested_kw(a=3.0, b=1)
#? int()
nested_kw(b=1, a=r"")
#? []
nested_kw(1, '').
#? []
nested_kw(a='').
#? int()
nested_kw2(b=1)
#? int()
nested_kw2(b=1, c=1.0)
#? int()
nested_kw2(c=1.0, b=1)
#? []
nested_kw2('').
#? []
nested_kw2(a='').
#? []
nested_kw2('', b=1).
# -----------------
# nested *args/**kwargs
# -----------------
def nested_both(*args, **kwargs):
return function_args(*args, **kwargs)
def nested_both2(*args, **kwargs):
return nested_both(*args, **kwargs)
# invalid commands, may return whatever.
#? list
nested_both('', b=1, c=1.0, list)
#? list
nested_both('', c=1.0, b=1, list)
#? []
nested_both('').
#? int()
nested_both2('', b=1, c=1.0)
#? int()
nested_both2('', c=1.0, b=1)
#? []
nested_both2('').
# -----------------
# nested *args/**kwargs with a default arg
# -----------------
def function_def(a, b, c):
return a, b
def nested_def(a, *args, **kwargs):
return function_def(a, *args, **kwargs)
def nested_def2(*args, **kwargs):
return nested_def(*args, **kwargs)
#? str()
nested_def2('', 1, 1.0)[0]
#? str()
nested_def2('', b=1, c=1.0)[0]
#? str()
nested_def2('', c=1.0, b=1)[0]
#? int()
nested_def2('', 1, 1.0)[1]
#? int()
nested_def2('', b=1, c=1.0)[1]
#? int()
nested_def2('', c=1.0, b=1)[1]
#? []
nested_def2('')[1].
# -----------------
# magic methods
# -----------------
def a(): pass
#? ['__closure__']
a.__closure__ |
6,767 | test crash logs with crashes | import json
import os
from unittest.mock import patch
import pytest
from flexget import __version__
from flexget.api.app import __version__ as __api_version__
from flexget.api.app import base_message
from flexget.api.core.server import ObjectsContainer as OC
from flexget.tests.conftest import MockManager
from flexget.utils.tools import get_latest_flexget_version_number
class TestServerAPI:
config = """
tasks:
test:
rss:
url: http://test/rss
mock:
- title: entry 1
"""
def test_pid(self, api_client, schema_match):
rsp = api_client.get('/server/pid/', headers={})
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.pid_object, data)
assert not errors
assert data['pid'] == os.getpid()
def test_reload(self, api_client, schema_match):
with patch.object(MockManager, 'load_config') as mocked_load_config:
payload = {'operation': 'reload'}
rsp = api_client.json_post('/server/manage/', data=json.dumps(payload))
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
assert mocked_load_config.called
def test_shutdown(self, api_client, schema_match):
with patch.object(MockManager, 'shutdown') as mocked_shutdown:
payload = {'operation': 'shutdown'}
rsp = api_client.json_post('/server/manage/', data=json.dumps(payload))
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
assert mocked_shutdown.called
def test_get_config(self, api_client, schema_match):
rsp = api_client.get('/server/config/')
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match({'type': 'object'}, data)
assert not errors
assert data == {
'tasks': {
'test': {
'mock': [{'title': 'entry 1'}],
'rss': {
'url': 'http://test/rss',
'group_links': False,
'ascii': False,
'escape': False,
'silent': False,
'all_entries': True,
},
}
}
}
def test_get_raw_config(self, manager, api_client, schema_match):
manager.config_path = os.path.join(os.path.dirname(__file__), 'raw_config.yml')
rsp = api_client.get('/server/raw_config/')
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.raw_config_object, data)
assert not errors
assert (
data['raw_config']
== 'dGFza3M6CiAgdGVzdDoKICAgIHJzczoKICAgICAgdXJsOiBodHRwOi8vdGVzdC9yc3MKICAgIG1'
'vY2s6CiAgICAgIC0gdGl0bGU6IGVudHJ5IDE='
)
@pytest.mark.online
def test_version(self, api_client, schema_match):
latest = get_latest_flexget_version_number()
rsp = api_client.get('/server/version/')
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.version_object, data)
assert not errors
assert data == {
'flexget_version': __version__,
'api_version': __api_version__,
'latest_version': latest,
}
def test_crash_logs_without_crash_log(self, api_client, schema_match):
rsp = api_client.get('/server/crash_logs')
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.crash_logs, data)
assert not errors
assert not data
def METHOD_NAME(self, api_client, schema_match, manager):
manager.config_base = os.path.join(os.path.dirname(__file__))
rsp = api_client.get('/server/crash_logs')
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.crash_logs, data)
assert not errors
assert len(data) == 2 |
6,768 | get authorization tracker | """system_server.persistence: provides interface for persistent database storage."""
import logging
from pathlib import Path
from fastapi import Depends
from typing_extensions import Final
import sqlalchemy
from asyncio import Lock
from uuid import UUID
from .database import create_sql_engine
from .tables import registration_table, migration_table
from .persistent_directory import create_persistent_directory
from .system_uuid import get_system_uuid
from server_utils.fastapi_utils.app_state import (
AppState,
AppStateAccessor,
get_app_state,
)
from system_server.settings import get_settings
from system_server.connection import AuthorizationTracker
_sql_engine_accessor = AppStateAccessor[sqlalchemy.engine.Engine]("sql_engine")
_persistence_directory_accessor = AppStateAccessor[Path]("persistence_directory")
_uuid_accessor = AppStateAccessor[UUID]("system_uuid")
_authorization_tracker_accessor = AppStateAccessor[AuthorizationTracker](
"authorization_tracker"
)
_DATABASE_FILE: Final = "system_server.db"
_UUID_FILE: Final = "system_server_uuid"
_log = logging.getLogger(__name__)
# TODO(fs, 2/28/23): ideally we do not depend on locks stored this way, should move to
# how the robot server initializes these kinds of shared resources.
_persistence_dir_lock = Lock()
_sql_lock = Lock()
_uuid_lock = Lock()
_authorization_tracker_lock = Lock()
async def get_persistence_directory(
app_state: AppState = Depends(get_app_state),
) -> Path:
"""Return the root persistence directory, creating it if necessary."""
async with _persistence_dir_lock:
persistence_dir = _persistence_directory_accessor.get_from(app_state)
if persistence_dir is None:
setting = get_settings().persistence_directory
# There is no appropriate default to this setting, so raise an
# exception and bail if it isn't specified.
if setting is None:
raise RuntimeError(
"No persistence path was specified.\n"
"Configure a persistence path with OT_SYSTEM_SERVER_persistence_directory"
)
persistence_dir = await create_persistent_directory(
None if setting == "automatically_make_temporary" else Path(setting)
)
_persistence_directory_accessor.set_on(app_state, persistence_dir)
return persistence_dir
async def get_sql_engine(
app_state: AppState = Depends(get_app_state),
persistence_directory: Path = Depends(get_persistence_directory),
) -> sqlalchemy.engine.Engine:
"""Return a singleton SQL engine referring to a ready-to-use database."""
async with _sql_lock:
sql_engine = _sql_engine_accessor.get_from(app_state)
if sql_engine is None:
sql_engine = create_sql_engine(persistence_directory / _DATABASE_FILE)
_sql_engine_accessor.set_on(app_state, sql_engine)
return sql_engine
# Rely on connections being cleaned up automatically when the process dies.
# FastAPI doesn't give us a convenient way to properly tie
# the lifetime of a dependency to the lifetime of the server app.
# https://github.com/tiangolo/fastapi/issues/617
async def get_persistent_uuid(
app_state: AppState = Depends(get_app_state),
persistence_directory: Path = Depends(get_persistence_directory),
) -> UUID:
"""Return a singleton UUID for signing purposes."""
async with _uuid_lock:
system_uuid = _uuid_accessor.get_from(app_state)
if system_uuid is None:
system_uuid = await get_system_uuid(persistence_directory / _UUID_FILE)
_uuid_accessor.set_on(app_state, system_uuid)
return system_uuid
async def METHOD_NAME(
app_state: AppState = Depends(get_app_state),
) -> AuthorizationTracker:
"""Return a singleton authorization tracker for the server instance."""
async with _authorization_tracker_lock:
tracker = _authorization_tracker_accessor.get_from(app_state)
if tracker is None:
tracker = AuthorizationTracker()
_authorization_tracker_accessor.set_on(app_state, tracker)
return tracker
__all__ = [
"get_persistence_directory",
"get_sql_engine",
"get_persistent_uuid",
"get_authorization_tracker",
"registration_table",
"migration_table",
] |
6,769 | get job name | #!/usr/bin/env python
"""
Retrieves job status according to given criteria.
Example:
$ dstat -a -u your.dirac.username
"""
import datetime
from DIRAC import exit as DIRACExit, S_OK, S_ERROR
from DIRAC import gLogger
from DIRAC.Interfaces.Utilities.DCommands import DSession
from DIRAC.Interfaces.Utilities.DConfigCache import ConfigCache
from DIRAC.Interfaces.Utilities.DCommands import ArrayFormatter
from DIRAC.Core.Base.Script import Script
from DIRAC.Core.Utilities.TimeUtilities import toString, day
from DIRAC.WorkloadManagementSystem.Client.JobStatus import JOB_STATES, JOB_FINAL_STATES
from DIRAC.WorkloadManagementSystem.Client.JobMonitoringClient import (
JobMonitoringClient,
)
def selectJobs(owner, date, jobGroup, jobName):
conditions = {"Owner": owner}
if jobGroup:
conditions["JobGroup"] = jobGroup
if jobName:
conditions["JobName"] = jobName
monitoring = JobMonitoringClient()
result = monitoring.getJobs(conditions, date)
return result
def getJobSummary(jobs):
if not jobs:
return S_OK({})
monitoring = JobMonitoringClient()
result = monitoring.getJobsSummary(jobs)
if not result["OK"]:
return result
if isinstance(result["Value"], str):
try:
jobSummary = eval(result["Value"])
except:
return S_ERROR("Problem while converting result from job monitoring")
else:
jobSummary = result["Value"]
return S_OK(jobSummary)
def chunks(l, n):
return [l[i : i + n] for i in range(0, len(l), n)]
EXTRA_DISPLAY_COLUMNS = [
"JobType",
"ApplicationStatus",
"StartExecTime",
"EndExecTime",
"CPUTime",
]
DEFAULT_DISPLAY_COLUMNS = [
"Owner",
"JobName",
"OwnerGroup",
"JobGroup",
"Site",
"Status",
"MinorStatus",
"SubmissionTime",
]
class Params:
def __init__(self):
self.__session = None
self.user = None
self.status = [e.lower() for e in set(JOB_STATES) - set(JOB_FINAL_STATES)]
self.fmt = "pretty"
self.jobDate = 10
self.fields = DEFAULT_DISPLAY_COLUMNS
self.jobGroup = None
self.jobName = None
self.inputFile = None
def setSession(self, session):
self.__session = session
customFields = session.getEnv("dstat_fields", "")["Value"]
if customFields:
self.fields = customFields.split(",")
return S_OK()
def setUser(self, arg=None):
self.user = arg
return S_OK()
def getUser(self):
return self.user
def setStatus(self, arg=None):
self.status = arg.lower().split(",")
return S_OK()
def setStatusAll(self, arg=None):
self.status = [e.lower() for e in JOB_STATES]
return S_OK()
def getStatus(self):
return self.status
def setFmt(self, arg=None):
self.fmt = arg.lower()
return S_OK()
def getFmt(self):
return self.fmt
def setJobDate(self, arg=None):
self.jobDate = int(arg)
return S_OK()
def getJobDate(self):
return self.jobDate
def setFields(self, arg=None):
self.fields = arg.split(",")
return S_OK()
def getFields(self):
return self.fields
def setJobGroup(self, arg=None):
self.jobGroup = arg
return S_OK()
def getJobGroup(self):
return self.jobGroup
def setJobName(self, arg=None):
self.jobName = arg
return S_OK()
def METHOD_NAME(self):
return self.jobName
def setInputFile(self, arg=None):
self.inputFile = arg
return S_OK()
def getInputFile(self):
return self.inputFile
@Script()
def main():
params = Params()
Script.registerArgument(["JobID: DIRAC Job ID"], mandatory=False)
Script.registerSwitch("u:", "User=", "job owner", params.setUser)
Script.registerSwitch(
"S:",
"Status=",
f"select job by status (comma separated list of statuses in: {','.join(JOB_STATES)})",
params.setStatus,
)
Script.registerSwitch("a", "StatusAll", "display jobs of any status", params.setStatusAll)
Script.registerSwitch("g:", "JobGroup=", "select job by job group", params.setJobGroup)
Script.registerSwitch("n:", "JobName=", "select job by job name", params.setJobName)
Script.registerSwitch("f:", "Fmt=", "display format (pretty, csv, json)", params.setFmt)
Script.registerSwitch("D:", "JobDate=", "age of jobs to display (in days)", params.setJobDate)
Script.registerSwitch(
"F:",
"Fields=",
"display list of job fields (comma separated list of fields. e.g. %s)"
% ",".join(DEFAULT_DISPLAY_COLUMNS + EXTRA_DISPLAY_COLUMNS),
params.setFields,
)
Script.registerSwitch("i:", "input-file=", "read JobIDs from file", params.setInputFile)
configCache = ConfigCache()
Script.parseCommandLine(ignoreErrors=True)
configCache.cacheConfig()
args = Script.getPositionalArgs()
session = DSession()
params.setSession(session)
exitCode = 0
if args:
# handle comma separated list of JobIDs
newargs = []
for arg in args:
newargs += arg.split(",")
args = newargs
jobs = args
if params.getInputFile() != None:
with open(params.getInputFile()) as f:
for l in f.readlines():
jobs += l.split(",")
if not jobs:
# time interval
jobDate = toString(datetime.datetime.utcnow().date() - params.getJobDate() * day)
# job owner
userName = params.getUser()
if userName is None:
result = session.getUserName()
if result["OK"]:
userName = result["Value"]
elif userName == "*" or userName.lower() == "__all__":
# jobs from all users
userName = None
result = selectJobs(
owner=userName,
date=jobDate,
jobGroup=params.getJobGroup(),
jobName=params.METHOD_NAME(),
)
if not result["OK"]:
gLogger.error(result["Message"])
DIRACExit(-1)
jobs = result["Value"]
try:
jobs = [int(job) for job in jobs]
except Exception as x:
gLogger.error("Expected integer for jobID")
exitCode = 2
DIRACExit(exitCode)
summaries = {}
statuses = params.getStatus()
# split summary requests in chunks of a reasonable size (saves memory)
for chunk in chunks(jobs, 1000):
result = getJobSummary(chunk)
if not result["OK"]:
gLogger.error(result["Message"])
DIRACExit(2)
# filter on job statuses
if "all" in statuses:
summaries = result["Value"]
else:
for j in result["Value"]:
if result["Value"][j]["Status"].lower() in statuses:
summaries[j] = result["Value"][j]
for s in summaries.values():
s["JobID"] = int(s["JobID"])
af = ArrayFormatter(params.getFmt())
gLogger.notice(af.dictFormat(summaries, ["JobID"] + params.getFields(), sort="JobID"))
DIRACExit(exitCode)
if __name__ == "__main__":
main() |
6,770 | update | # SPDX-License-Identifier: GPL-3.0
# Copyright (c) 2014-2023 William Edwards <shadowapex@gmail.com>, Benjamin Bean <superman2k5@gmail.com>
from __future__ import annotations
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from types import TracebackType
from typing import ClassVar, Optional, Type
from tuxemon.session import Session, local_session
from tuxemon.tools import cast_dataclass_parameters
logger = logging.getLogger(__name__)
@dataclass
class EventAction(ABC):
"""EventActions are executed during gameplay.
EventAction subclasses implement "actions" defined in Tuxemon maps.
All subclasses, at minimum, must implement the following:
* The EventAction.start() method
* A meaningful name, which must match the name in map file actions
By populating the "valid_parameters" class attribute, subclasses
will be assigned a "parameters" instance attribute that holds the
parameters passed to the action in the map file. It is also used
to check the syntax of actions, by verifying the correct type and
number of parameters passed.
If an EventAction does not implement the update method, it will only
run for one frame. If it does implement the update method, then it
will continue to run until it is stopped, or the EventEngine is stopped.
If you wish to stop an EventAction, call the stop method. Calling
stop() signals to the EventEngine that this EventAction is done,
and can be removed from the processing loop at the end of the frame.
Update will be called every frame the EventAction is running,
including the first frame it is started. You should eventually
stop the action during update.
The EventAction class supports the context protocol, and you may
also use them outside of the EventEngine, but can only be run
in a blocking manner. Do not execute EventActions outside the Engine
if the action will block forever, as it will freeze the game.
**Parameters**
** this is a work-in-progress feature, that may change in time **
Tuxemon supports type-checking of the parameters defined in the maps.
valid_parameters may be the following format (may change):
(type, name)
* the type may be any valid python type, or even a python class or function
* type may be a single type, or a tuple of types
* type, if a tuple, may include None to indicate the parameter is optional
* name must be a valid python string
After parsing the parameters of the MapAction, the parameter's value
will be passed to the type constructor.
Example types: str, int, float, Monster, Item
(int, "duration") => duration must be an int
((int, float), "duration") => can be an int or float
((int, float, None), "duration") => is optional
(Monster, "monster_slug") => a Monster instance will be created
Parameters:
session: Object containing the session information.
parameters: Parameters of the action.
"""
name: ClassVar[str]
session: Session = field(init=False, repr=False)
_done: bool = field(default=False, init=False)
def __post_init__(self) -> None:
self.session = local_session
cast_dataclass_parameters(self)
def __enter__(self) -> None:
"""
Called only once, when the action is started.
Context Protocol.
"""
self.start()
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
"""
Called only once, when action is stopped and needs to close.
Context Protocol.
"""
self.cleanup()
def stop(self) -> None:
"""
Call when the action is done.
EventAction will be removed at end of frame.
If an EventAction overrides update, it must eventually call this
method.
"""
self._done = True
def execute(self) -> None:
"""
Blocking call to run the action. Will setup and cleanup action.
This may cause the game to hang if an action is waiting on game
changes.
"""
with self:
self.run()
def run(self) -> None:
"""
Blocking call to run the action, without start or cleanup.
It is better to use EventAction.execute().
This may cause the game to hang if an action is waiting on game
changes.
"""
while not self.done:
self.METHOD_NAME()
@property
def done(self) -> bool:
"""
Will be true when action is finished.
If you need the action to stop, call EventAction.stop().
"""
return self._done
@abstractmethod
def start(self) -> None:
"""
Called only once, when the action is started.
For all actions, you will need to override this method.
For actions that only need to run one frame you can simply
put all the code here. If the action will need to run over
several frames, you can init your action here, then override
the update method.
"""
raise NotImplementedError
def METHOD_NAME(self) -> None:
"""
Called once per frame while action is running.
It is also called on the first frame when EventAction is started.
If you do not override this, then the action will stop after it is
started, and live for only one frame.
If you do override this, then this method will be run every frame
until EventAction.stop() is called. If you do not ever call stop(),
then this action will block all others in the list and will continue
to run until the parent EventEngine is stopped.
"""
self.stop()
def cleanup(self) -> None:
"""
Called only once, when action is stopped and needs to close.
You do not need to override this, but it may be useful for some
actions which require special handling before they are closed.
""" |
6,771 | update model | import argparse
from collections import deque
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from ignite.engine import Engine, Events
try:
import gymnasium as gym
except ImportError:
raise ModuleNotFoundError("Please install opengym: pip install gymnasium")
eps = np.finfo(np.float32).eps.item()
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.affine1 = nn.Linear(4, 128)
self.dropout = nn.Dropout(p=0.6)
self.affine2 = nn.Linear(128, 2)
self.saved_log_probs = []
self.rewards = []
def forward(self, x):
x = self.affine1(x)
x = self.dropout(x)
x = F.relu(x)
action_scores = self.affine2(x)
return F.softmax(action_scores, dim=1)
def select_action(policy, observation):
state = torch.from_numpy(observation).float().unsqueeze(0)
probs = policy(state)
m = Categorical(probs)
action = m.sample()
policy.saved_log_probs.append(m.log_prob(action))
return action.item()
def finish_episode(policy, optimizer, gamma):
R = 0
policy_loss = []
returns = deque()
for r in policy.rewards[::-1]:
R = r + gamma * R
returns.appendleft(R)
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + eps)
for log_prob, R in zip(policy.saved_log_probs, returns):
policy_loss.append(-log_prob * R)
optimizer.zero_grad()
policy_loss = torch.cat(policy_loss).sum()
policy_loss.backward()
optimizer.step()
del policy.rewards[:]
del policy.saved_log_probs[:]
EPISODE_STARTED = Events.EPOCH_STARTED
EPISODE_COMPLETED = Events.EPOCH_COMPLETED
def main(env, args):
policy = Policy()
optimizer = optim.Adam(policy.parameters(), lr=1e-2)
timesteps = range(10000)
def run_single_timestep(engine, timestep):
observation = engine.state.observation
action = select_action(policy, observation)
engine.state.observation, reward, done, _, _ = env.step(action)
if args.render:
env.render()
policy.rewards.append(reward)
engine.state.ep_reward += reward
if done:
engine.terminate_epoch()
engine.state.timestep = timestep
trainer = Engine(run_single_timestep)
trainer.state.running_reward = 10
@trainer.on(EPISODE_STARTED)
def reset_environment_state():
torch.manual_seed(args.seed + trainer.state.epoch)
trainer.state.observation, _ = env.reset(seed=args.seed + trainer.state.epoch)
trainer.state.ep_reward = 0
@trainer.on(EPISODE_COMPLETED)
def METHOD_NAME():
trainer.state.running_reward = 0.05 * trainer.state.ep_reward + (1 - 0.05) * trainer.state.running_reward
finish_episode(policy, optimizer, args.gamma)
@trainer.on(EPISODE_COMPLETED(every=args.log_interval))
def log_episode():
i_episode = trainer.state.epoch
print(
f"Episode {i_episode}\tLast reward: {trainer.state.ep_reward:.2f}"
f"\tAverage length: {trainer.state.running_reward:.2f}"
)
@trainer.on(EPISODE_COMPLETED)
def should_finish_training():
running_reward = trainer.state.running_reward
if running_reward > env.spec.reward_threshold:
print(
f"Solved! Running reward is now {running_reward} and "
f"the last episode runs to {trainer.state.timestep} time steps!"
)
trainer.should_terminate = True
trainer.run(timesteps, max_epochs=args.max_episodes)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="PyTorch REINFORCE example")
parser.add_argument("--gamma", type=float, default=0.99, metavar="G", help="discount factor (default: 0.99)")
parser.add_argument("--seed", type=int, default=543, metavar="N", help="random seed (default: 543)")
parser.add_argument("--render", action="store_true", help="render the environment")
parser.add_argument(
"--log-interval", type=int, default=10, metavar="N", help="interval between training status logs (default: 10)"
)
parser.add_argument(
"--max-episodes",
type=int,
default=1000000,
metavar="N",
help="Number of episodes for the training (default: 1000000)",
)
args = parser.parse_args()
env = gym.make("CartPole-v1")
main(env, args) |
6,772 | on properties | # Copyright 2004-2009 Joe Wreschnig, Michael Urman, Steven Robertson
# 2011-2017 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from gi.repository import GObject, Gtk
from quodlibet.browsers.playlists.menu import PlaylistMenu
from quodlibet import _
from quodlibet import browsers
from quodlibet import qltk
from quodlibet.qltk.ratingsmenu import RatingsMenuItem
from quodlibet.qltk.x import SeparatorMenuItem, MenuItem
from quodlibet.util import connect_obj, connect_destroy, is_plasma
from quodlibet.qltk import Icons
from quodlibet.qltk.browser import LibraryBrowser
from quodlibet.qltk.information import Information
from quodlibet.qltk.properties import SongProperties
from quodlibet.qltk.util import GSignals
class IndicatorMenu(Gtk.Menu):
__gsignals__: GSignals = {
'action-item-changed': (GObject.SignalFlags.RUN_LAST, None, tuple()),
}
def __init__(self, app, add_show_item=False):
super().__init__()
self._app = app
player = app.player
show_item_bottom = is_plasma()
if add_show_item:
show_item = Gtk.CheckMenuItem.new_with_mnemonic(
_("_Show %(application-name)s") % {
"application-name": app.name})
def on_toggled(menuitem):
if menuitem.get_active():
app.present()
else:
app.hide()
self._toggle_id = show_item.connect("toggled", on_toggled)
def on_visible_changed(*args):
with show_item.handler_block(self._toggle_id):
show_item.set_active(app.window.get_visible())
connect_destroy(app.window, "notify::visible", on_visible_changed)
else:
show_item = None
self._play_item = MenuItem(_("_Play"), Icons.MEDIA_PLAYBACK_START)
self._play_item.connect("activate", self._on_play_pause, player)
self._play_item.set_no_show_all(True)
self._pause_item = MenuItem(_("P_ause"), Icons.MEDIA_PLAYBACK_PAUSE)
self._pause_item.connect("activate", self._on_play_pause, player)
self._pause_item.set_no_show_all(True)
self._action_item = None
previous = MenuItem(_("Pre_vious"), Icons.MEDIA_SKIP_BACKWARD)
previous.connect('activate', lambda *args: player.previous(force=True))
next = MenuItem(_("_Next"), Icons.MEDIA_SKIP_FORWARD)
next.connect('activate', lambda *args: player.next())
player_options = app.player_options
shuffle = Gtk.CheckMenuItem(label=_("_Shuffle"), use_underline=True)
player_options.bind_property("shuffle", shuffle, "active",
GObject.BindingFlags.BIDIRECTIONAL)
player_options.notify("shuffle")
repeat = Gtk.CheckMenuItem(label=_("_Repeat"), use_underline=True)
player_options.bind_property("repeat", repeat, "active",
GObject.BindingFlags.BIDIRECTIONAL)
player_options.notify("repeat")
safter = Gtk.CheckMenuItem(label=_("Stop _After This Song"),
use_underline=True)
player_options.bind_property("stop-after", safter, "active",
GObject.BindingFlags.BIDIRECTIONAL)
player_options.notify("stop-after")
browse = qltk.MenuItem(_("Open _Browser"), Icons.EDIT_FIND)
browse_sub = Gtk.Menu()
for Kind in browsers.browsers:
i = Gtk.MenuItem(label=Kind.accelerated_name, use_underline=True)
connect_obj(i,
'activate', LibraryBrowser.open, Kind, app.library, app.player)
browse_sub.append(i)
browse.set_submenu(browse_sub)
self._props = qltk.MenuItem(_("Edit _Tags"), Icons.EDIT)
def METHOD_NAME(*args):
song = player.song
window = SongProperties(app.librarian, [song])
window.show()
self._props.connect('activate', METHOD_NAME)
self._info = MenuItem(_("_Information"), Icons.DIALOG_INFORMATION)
self._playlists_item = MenuItem(_("Play_lists"),
Icons.FOLDER_DRAG_ACCEPT)
self._new_playlist_submenu_for(player.song)
def on_information(*args):
song = player.song
window = Information(app.librarian, [song])
window.show()
self._info.connect('activate', on_information)
def set_rating(value):
song = player.song
song["~#rating"] = value
app.librarian.changed([song])
self._rating_item = rating = RatingsMenuItem([], app.library)
quit = MenuItem(_("_Quit"), Icons.APPLICATION_EXIT)
quit.connect('activate', lambda *x: app.quit())
if not show_item_bottom and show_item:
self.append(show_item)
self.append(SeparatorMenuItem())
self.append(self._play_item)
self.append(self._pause_item)
self.append(previous)
self.append(next)
self.append(SeparatorMenuItem())
self.append(shuffle)
self.append(repeat)
self.append(safter)
self.append(SeparatorMenuItem())
self.append(rating)
self.append(self._playlists_item)
self.append(self._props)
self.append(self._info)
self.append(SeparatorMenuItem())
self.append(browse)
self.append(SeparatorMenuItem())
self.append(quit)
if show_item_bottom and show_item:
self.append(SeparatorMenuItem())
self.append(show_item)
self.show_all()
self.set_paused(True)
self.set_song(None)
def get_action_item(self):
"""Returns the 'Play' or 'Pause' action menu item (used for unity).
'action-item-changed' gets emitted if this changes.
"""
return self._action_item
def set_paused(self, paused):
"""Update the menu based on the player paused state"""
self._play_item.set_visible(paused)
self._pause_item.set_visible(not paused)
self._action_item = self._play_item if paused else self._pause_item
self.emit("action-item-changed")
def set_song(self, song):
"""Update the menu based on the passed song. Can be None.
This should be the persistent song and not a stream/info one.
"""
self._rating_item.set_sensitive(song is not None)
self._info.set_sensitive(song is not None)
self._props.set_sensitive(song is not None)
self._rating_item.set_songs([song])
self._new_playlist_submenu_for(song)
def _new_playlist_submenu_for(self, song):
submenu = self._playlists_item.get_submenu()
pl_lib = self._app.library and self._app.library.playlists
if submenu:
submenu.destroy()
playlist_menu = PlaylistMenu([song], pl_lib)
self._playlists_item.set_submenu(playlist_menu)
self._playlists_item.set_sensitive(bool(song) and song.can_add)
self._playlists_item.show_all()
def _on_play_pause(self, menuitem, player):
player.playpause() |
6,773 | get mfr id | #
# psuutil.py
# Platform-specific PSU status interface for SONiC
#
import os
import sys
from sonic_py_common.general import getstatusoutput_noshell
SENSORS_CMD = ["docker", "exec", "-i", "pmon", "/usr/bin/sensors"]
DOCKER_SENSORS_CMD = "/usr/bin/sensors"
try:
from sonic_psu.psu_base import PsuBase
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
class PsuUtil(PsuBase):
"""Platform-specific PSUutil class"""
_psu_mapping = {
1 : '0',
2 : '1'
}
def __init__(self):
PsuBase.__init__(self)
def isDockerEnv(self):
num_docker = open('/proc/self/cgroup', 'r').read().count(":/docker")
if num_docker > 0:
return True
else:
return False
def remove_nonnumeric(self, text):
digits='0123456789.'
return ''.join(c for c in text if c in digits)
def get_cpld_register(self, reg_name):
cpld_dir = "/sys/devices/platform/dell-e3224f-cpld.0/"
retval = 'ERR'
reg_file = cpld_dir +'/' + reg_name
if (not os.path.isfile(reg_file)):
return retval
try:
with open(reg_file, 'r') as fd:
retval = fd.read()
except Exception as error:
print("Unable to open ", reg_file, "file !")
retval = retval.rstrip('\r\n')
return retval
def get_num_psus(self):
"""
Retrieves the number of PSUs available on the device
:return: An integer, the number of PSUs available on the device
"""
E3224F_MAX_PSUS = 2
return E3224F_MAX_PSUS
def get_psu_status(self, index):
"""
Retrieves the oprational status of power supply unit (PSU) defined
by index <index>
:param index: An integer, index of the PSU of which to query status
:return: Boolean, True if PSU is operating properly, False if PSU is\
faulty
"""
status = 0
psu_status = self.get_cpld_register('psu'+self._psu_mapping[index]+'_status')
if (psu_status != 'ERR'):
status = int(psu_status, 10)
presence = self.get_psu_presence(index)
return (status & presence)
def get_psu_presence(self, index):
"""
Retrieves the presence status of power supply unit (PSU) defined
by index <index>
:param index: An integer, index of the PSU of which to query status
:return: Boolean, True if PSU is plugged, False if not
"""
status = 0
psu_presence = self.get_cpld_register('psu'+self._psu_mapping[index]+'_prs')
if (psu_presence != 'ERR'):
status = int(psu_presence, 10)
return status
def get_sensor(self):
dockerenv = self.isDockerEnv()
if not dockerenv:
status, cmd_output = getstatusoutput_noshell(SENSORS_CMD)
else:
status, cmd_output = getstatusoutput_noshell(DOCKER_SENSORS_CMD)
if status:
print('Failed to execute sensors command')
sys.exit(0)
return cmd_output
def get_output_current(self, index):
cmd_output= self.get_sensor()
sensor_name = 'dps460-i2c-10' if index == 1 else 'dps460-i2c-11'
found = False
for line in cmd_output.splitlines():
if line.startswith(sensor_name):
found = True
if found:
if 'Output Current' in line :
return float(self.remove_nonnumeric(line.split()[2]))
return 0.0
def get_output_voltage(self, index):
cmd_output= self.get_sensor()
sensor_name = 'dps460-i2c-10' if index == 1 else 'dps460-i2c-11'
found = False
for line in cmd_output.splitlines():
if line.startswith(sensor_name):
found = True
if found:
if 'Output Voltage' in line :
return float(self.remove_nonnumeric(line.split()[2]))
return 0.0
def get_fan_rpm(self, index, fan_index):
if fan_index > 1 : return 0.0
cmd_output= self.get_sensor()
sensor_name = 'dps460-i2c-10' if index == 1 else 'dps460-i2c-11'
found = False
for line in cmd_output.splitlines():
if line.startswith(sensor_name):
found = True
if found:
if 'Fan RPM' in line :
return self.remove_nonnumeric(line.split()[2])
return 0.0
def get_output_power(self, index):
cmd_output= self.get_sensor()
sensor_name = 'dps460-i2c-10' if index == 1 else 'dps460-i2c-11'
found = False
for line in cmd_output.splitlines():
if line.startswith(sensor_name):
found = True
if found:
if 'Output Power' in line :
return float(self.remove_nonnumeric(line.split()[2]))
return 0.0
def get_direction(self, index):
psuid = '0' if index == 1 else '1'
sysfs_path = '/sys/devices/platform/dell-e3224f-cpld.0/psu' + psuid + '_prs'
found_psu = int(open(sysfs_path).read())
if not found_psu : return ''
bus_no = '10' if index == 1 else '11'
sysfs_path = "/sys/bus/i2c/devices/" + bus_no + "-0056/eeprom"
val = (open(sysfs_path, "rb").read())[0xe1:0xe8]
dir = 'F2B' if 'FORWARD' == val else 'B2F'
return dir
def get_serial(self, index):
psuid = '0' if index == 1 else '1'
sysfs_path = '/sys/devices/platform/dell-e3224f-cpld.0/psu' + psuid + '_prs'
found_psu = int(open(sysfs_path).read())
if not found_psu : return ''
bus_no = '10' if index == 1 else '11'
sysfs_path = "/sys/bus/i2c/devices/" + bus_no + "-0056/eeprom"
val = (open(sysfs_path, "rb").read())[0xc4:0xd9]
return val
def get_model(self, index):
psuid = '0' if index == 1 else '1'
sysfs_path = '/sys/devices/platform/dell-e3224f-cpld.0/psu' + psuid + '_prs'
found_psu = int(open(sysfs_path).read())
if not found_psu : return ''
bus_no = '10' if index == 1 else '11'
sysfs_path = "/sys/bus/i2c/devices/" + bus_no + "-0056/eeprom"
val = (open(sysfs_path, "rb").read())[0x50:0x62]
return val
def METHOD_NAME(self, index):
psuid = '0' if index == 1 else '1'
sysfs_path = '/sys/devices/platform/dell-e3224f-cpld.0/psu' + psuid + '_prs'
found_psu = int(open(sysfs_path).read())
return 'DELTA' if found_psu else '' |
6,774 | test basic example | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm.relax.transform import FoldDataflowBlockOutput
from tvm.script.parser import ir as I, relax as R, tir as T
def verify(input, expected):
tvm.ir.assert_structural_equal(FoldDataflowBlockOutput()(input), expected)
def METHOD_NAME():
@tvm.script.ir_module
class Input:
@R.function
def main() -> R.Tensor((), "int32"):
with R.dataflow():
y = R.const(1)
n = y
R.output(n)
return n
@tvm.script.ir_module
class Expected:
@R.function
def main() -> R.Tensor((), "int32"):
with R.dataflow():
n = R.const(1)
R.output(n)
return n
verify(Input, Expected)
def test_match_cast():
@tvm.script.ir_module
class Input:
@R.function
def main() -> R.Tensor((), "int32"):
with R.dataflow():
y = R.const(1)
n = R.match_cast(y, R.Tensor((), "int32"))
R.output(n)
return n
@tvm.script.ir_module
class Expected:
@R.function
def main() -> R.Tensor((), "int32"):
with R.dataflow():
n = R.match_cast(R.const(1), R.Tensor((), "int32"))
R.output(n)
return n
verify(Input, Expected)
def test_unable_to_fold():
@tvm.script.ir_module
class MultipleUse:
@R.function
def main() -> R.Tensor((), "int32"):
with R.dataflow():
y = R.const(1)
# multiple uses -> cannot coalesce
m = R.add(y, y)
n = y
R.output(n)
return n
@tvm.script.ir_module
class ComplexExpr:
@R.function
def main() -> R.Tensor((), "int32"):
with R.dataflow():
y = R.const(1)
# y does not appear by itself -> cannot coalesce
n = R.add(y, y)
R.output(n)
return n
verify(MultipleUse, MultipleUse)
verify(ComplexExpr, ComplexExpr)
def test_multiple_outputs():
@tvm.script.ir_module
class Input:
@R.function
def main() -> R.Tensor((), "int32"):
with R.dataflow():
x = R.const(1)
y = R.const(1)
z = R.const(1)
l = x
m = y
n = z
R.output(l, m, n)
return n
@tvm.script.ir_module
class Expected:
@R.function
def main() -> R.Tensor((), "int32"):
with R.dataflow():
l = R.const(1)
m = R.const(1)
n = R.const(1)
R.output(l, m, n)
return n
verify(Input, Expected)
def test_multiply_used_in_outputs():
# cannot fold in this case
@tvm.script.ir_module
class UsedInMultipleOutputs:
@R.function
def main() -> R.Tensor((), "int32"):
with R.dataflow():
x = R.const(1)
l = x
m = x
n = x
R.output(l, m, n)
return n
verify(UsedInMultipleOutputs, UsedInMultipleOutputs)
if __name__ == "__main__":
tvm.testing.main() |
6,775 | set up | import time
from os.path import join
from bzt import ToolError, TaurusConfigError
from bzt.utils import EXE_SUFFIX
from bzt.modules.siege import SiegeExecutor, DataLogReader
from bzt.modules.aggregator import ConsolidatingAggregator
from tests.unit import BZTestCase, ExecutorTestCase, RESOURCES_DIR, close_reader_file, ROOT_LOGGER
TOOL_NAME = 'siege' + EXE_SUFFIX
TOOL_PATH = join(RESOURCES_DIR, "siege", TOOL_NAME)
class TestSiegeExecutor(ExecutorTestCase):
EXECUTOR = SiegeExecutor
def METHOD_NAME(self):
super(TestSiegeExecutor, self).METHOD_NAME()
#self.sniff_log()
self.obj.engine.aggregator = ConsolidatingAggregator()
self.obj.engine.aggregator.engine = self.obj.engine
self.obj.settings.merge({"path": TOOL_PATH})
def tearDown(self):
close_reader_file(self.obj.reader)
super(TestSiegeExecutor, self).tearDown()
def test_iter(self):
self.configure({"execution": {
"concurrency": 2,
"iterations": 3,
"scenario": {
"think-time": "1s",
"requests": [
"http://blazedemo.com",
"http://ya.ru"]}}})
self.obj.prepare()
self.obj.get_widget()
self.obj.startup()
def test_hold(self):
self.configure({"execution": {
"concurrency": 2,
"hold-for": '2s',
"scenario": {
"headers": {
'h1': 'value1',
'h2': 'value2'},
"variables": {
'v1': 1,
'v2': 'TWO'},
"script": join(RESOURCES_DIR, "siege", "url-file")}}})
self.obj.prepare()
self.assertNotEqual(len(self.obj.resource_files()), 0)
self.obj.get_widget()
self.obj.startup()
def test_url_exceptions(self):
self.obj.execution.merge({
"concurrency": 2,
"hold-for": '2s',
"scenario": {}})
self.assertRaises(TaurusConfigError, self.obj.prepare)
def test_check_install_exceptions(self):
self.obj.settings.merge({"path": '*'})
self.obj.execution.merge({
"concurrency": 2,
"hold-for": '2s',
"scenario": {}})
self.assertRaises(ToolError, self.obj.prepare)
def test_repetition_exceptions(self):
self.configure({"execution": {
"concurrency": 2,
"ramp-up": "1h",
"scenario": {
"requests": [
"http://blazedemo.com",
"http://ya.ru"]}}})
self.obj.prepare()
self.assertEqual(len(self.obj.resource_files()), 0)
self.assertRaises(TaurusConfigError, self.obj.startup)
def test_full_execution(self):
self.configure({"execution": {
"concurrency": 2,
"iterations": 3,
"rc-file": join(RESOURCES_DIR, "siege", "siegerc"),
"scenario": {
"requests": [
"http://blazedemo.com",
"http://ya.ru"]}}})
self.obj.prepare()
try:
self.obj.startup()
self.obj.engine.aggregator.check()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertNotEquals(self.obj.process, None)
def test_diagnostics(self):
self.obj.execution.merge({
"iterations": 1,
"scenario": {
"requests": [
"http://blazedemo.com"]}})
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertIsNotNone(self.obj.get_error_diagnostics())
class TestDataLogReader(BZTestCase):
def test_read(self):
log_path = join(RESOURCES_DIR, "siege", "siege.out")
obj = DataLogReader(log_path, ROOT_LOGGER)
list_of_values = list(obj.datapoints(True))
self.assertEqual(len(list_of_values), 8)
for values in list_of_values:
self.assertTrue(1400000000 < values['ts'] < 1500000000) |
6,776 | toner changes | import time
from collections import defaultdict
from datetime import date
from datetime import timedelta
from functools import partial
from typing import Any
from typing import Dict
from typing import List
from django.http import HttpRequest
from django.http import HttpResponse
from django.shortcuts import render
from matplotlib.figure import Figure
from ocflib.lab import stats
from ocflib.printing.printers import PRINTERS
from ocflib.printing.quota import get_connection
from ocflib.printing.quota import SEMESTERLY_QUOTA
from ocfweb.caching import periodic
from ocfweb.component.graph import plot_to_image_bytes
ALL_PRINTERS = ('papercut', 'pagefault', 'logjam', 'logjam-old', 'deforestation')
ACTIVE_PRINTERS = ('papercut', 'pagefault', 'logjam')
def stats_printing(request: HttpRequest) -> HttpResponse:
return render(
request,
'stats/printing.html',
{
'title': 'Printing Statistics',
'current_printers': PRINTERS,
'toner_changes': METHOD_NAME(),
'last_month': [
date.today() - timedelta(days=i)
for i in range(30)
],
'pages_per_day': _pages_per_day(),
},
)
def semester_histogram(request: HttpRequest) -> HttpResponse:
return HttpResponse(
plot_to_image_bytes(_semester_histogram(), format='svg'),
content_type='image/svg+xml',
)
@periodic(300)
def _semester_histogram() -> Figure:
with get_connection() as c:
c.execute(
'SELECT `user`, `semester` FROM `printed` WHERE `semester` > 0',
)
users = [SEMESTERLY_QUOTA - int(r['semester']) for r in c]
fig = Figure(figsize=(10, 5))
ax = fig.add_subplot(1, 1, 1)
ax.locator_params(nbins=20)
ax.hist(users, bins=list(range(0, SEMESTERLY_QUOTA + 5, 5)))
ax.grid(True)
ax.set_xlim(SEMESTERLY_QUOTA, 0)
ax.set_ylabel('Number of users')
ax.set_xlabel('Remaining balance')
ax.set_title('Remaining balances this semester')
return fig
@periodic(3600)
def METHOD_NAME() -> List[Any]:
return [
(
printer,
_toner_used_by_printer(printer),
)
for printer in ACTIVE_PRINTERS
]
def _toner_used_by_printer(printer: str, cutoff: float = .05, since: date = stats.current_semester_start()) -> float:
"""Returns toner used for a printer since a given date (by default it
returns toner used for this semester).
Toner numbers can be significantly noisy, including significant diffs
whenever toner gets taken out and put back in whenever there is a jam.
Because of this it's hard to determine if a new toner is inserted into a
printer or if it was the same toner again. To reduce this noise we only
count diffs that are smaller than a cutoff which empirically seems to be
more accurate.
"""
with stats.get_connection() as cursor:
cursor.execute(
'''
CREATE TEMPORARY TABLE ordered1
(PRIMARY KEY (position))
AS (
SELECT * FROM (
SELECT
T.*,
@rownum := @rownum + 1 AS position
FROM (
(
SELECT * FROM printer_toner_public
WHERE printer = %s AND
date > %s
ORDER BY date
) AS T,
(SELECT @rownum := 0) AS r
)
) AS x
)
''', (printer, since.strftime('%Y-%m-%d')),
)
cursor.execute('''
CREATE TEMPORARY TABLE ordered2
(PRIMARY KEY (position))
AS (SELECT * FROM ordered1)
''')
cursor.execute('''
CREATE TEMPORARY TABLE diffs
AS (SELECT
B.date AS date,
A.value/A.max - B.value/B.max as pct_diff
FROM
ordered1 as A,
ordered2 as B
WHERE
B.position = A.position + 1)
''')
cursor.execute(
'''
SELECT SUM(pct_diff) as toner_used
FROM
diffs
WHERE
ABS(pct_diff)<%s
''', (cutoff,),
)
result = cursor.fetchone()['toner_used']
return float(result or 0.0)
@periodic(120)
def _pages_per_day() -> Dict[str, int]:
with stats.get_connection() as cursor:
cursor.execute('''
SELECT max(value) as value, cast(date as date) as date, printer
FROM printer_pages_public
GROUP BY cast(date as date), printer
ORDER BY date ASC, printer ASC
''')
# Resolves the issue of possible missing dates.
# defaultdict(lambda: defaultdict(int)) doesn't work due to inability to pickle local objects like lambdas;
# this effectively does the same thing as that.
pages_printed: Dict[Any, Any] = defaultdict(partial(defaultdict, int))
last_seen: Dict[Any, Any] = {}
for row in cursor:
if row['printer'] in last_seen:
pages_printed.setdefault(row['date'], defaultdict(int))
pages_printed[row['date']][row['printer']] = (
row['value'] - last_seen[row['printer']]
)
last_seen[row['printer']] = row['value']
return pages_printed
def _pages_printed_for_printer(printer: str, resolution: int = 100) -> List[Any]:
with stats.get_connection() as cursor:
cursor.execute(
'''
SELECT Z.date, Z.value FROM (
SELECT
T.*,
@rownum := @rownum + 1 AS position
FROM (
(
SELECT * FROM printer_pages_public
WHERE printer = %s
ORDER BY date
) AS T,
(SELECT @rownum := 0) AS r
)
) as Z
WHERE Z.position mod %s = 0
''', (printer, resolution),
)
return [
(time.mktime(row['date'].timetuple()) * 1000, row['value'])
for row in cursor
]
@periodic(3600)
def _pages_printed_data() -> List[Any]:
return [
{
'name': printer,
'animation': False,
'data': _pages_printed_for_printer(printer),
}
for printer in ALL_PRINTERS
]
def pages_printed(request: HttpRequest) -> HttpResponse:
return render(
request,
'stats/printing/pages-printed.html',
{
'title': 'Pages Printed',
'data': _pages_printed_data(),
},
) |
6,777 | numpy polymul | """
Implementation of operations involving polynomials.
"""
import numpy as np
from numpy.polynomial import polynomial as poly
from numpy.polynomial import polyutils as pu
from numba import typeof
from numba.core import types, errors
from numba.core.extending import overload
from numba.np.numpy_support import type_can_asarray, as_dtype, from_dtype
@overload(np.roots)
def roots_impl(p):
# cast int vectors to float cf. numpy, this is a bit dicey as
# the roots could be complex which will fail anyway
ty = getattr(p, 'dtype', p)
if isinstance(ty, types.Integer):
cast_t = np.float64
else:
cast_t = as_dtype(ty)
def roots_impl(p):
# impl based on numpy:
# https://github.com/numpy/numpy/blob/master/numpy/lib/polynomial.py
if len(p.shape) != 1:
raise ValueError("Input must be a 1d array.")
non_zero = np.nonzero(p)[0]
if len(non_zero) == 0:
return np.zeros(0, dtype=cast_t)
tz = len(p) - non_zero[-1] - 1
# pull out the coeffs selecting between possible zero pads
p = p[int(non_zero[0]):int(non_zero[-1]) + 1]
n = len(p)
if n > 1:
# construct companion matrix, ensure fortran order
# to give to eigvals, write to upper diag and then
# transpose.
A = np.diag(np.ones((n - 2,), cast_t), 1).T
A[0, :] = -p[1:] / p[0] # normalize
roots = np.linalg.eigvals(A)
else:
roots = np.zeros(0, dtype=cast_t)
# add in additional zeros on the end if needed
if tz > 0:
return np.hstack((roots, np.zeros(tz, dtype=cast_t)))
else:
return roots
return roots_impl
@overload(pu.trimseq)
def polyutils_trimseq(seq):
if not type_can_asarray(seq):
msg = 'The argument "seq" must be array-like'
raise errors.TypingError(msg)
if isinstance(seq, types.BaseTuple):
msg = 'Unsupported type %r for argument "seq"'
raise errors.TypingError(msg % (seq))
if np.ndim(seq) > 1:
msg = 'Coefficient array is not 1-d'
raise errors.NumbaValueError(msg)
def impl(seq):
if len(seq) == 0:
return seq
else:
for i in range(len(seq) - 1, -1, -1):
if seq[i] != 0:
break
return seq[:i+1]
return impl
def _poly_result_dtype(tup):
# A helper function that takes a tuple of inputs and returns their result
# dtype. Used for poly functions.
res_dtype = np.float64
for item in tup:
if isinstance(item, types.Number):
s1 = str(as_dtype(item))
elif isinstance(item, types.Tuple):
t = [as_dtype(ty) for ty in item.types]
s1 = str(np.result_type(*t))
else:
s1 = str(item.dtype)
res_dtype = (np.result_type(res_dtype, s1))
return from_dtype(res_dtype)
@overload(poly.polyadd)
def numpy_polyadd(c1, c2):
if not type_can_asarray(c1):
msg = 'The argument "c1" must be array-like'
raise errors.TypingError(msg)
if not type_can_asarray(c2):
msg = 'The argument "c2" must be array-like'
raise errors.TypingError(msg)
if np.ndim(c1) > 1 or np.ndim(c2) > 1:
msg = 'Coefficient array is not 1-d'
raise errors.NumbaValueError(msg)
result_dtype = _poly_result_dtype((c1, c2))
def impl(c1, c2):
c1 = np.asarray(c1)
c2 = np.asarray(c2)
arr1 = np.atleast_1d(c1).astype(result_dtype)
arr2 = np.atleast_1d(c2).astype(result_dtype)
diff = len(arr2) - len(arr1)
if diff > 0:
zr = np.zeros(diff)
arr1 = np.concatenate((arr1, zr))
if diff < 0:
zr = np.zeros(-diff)
arr2 = np.concatenate((arr2, zr))
val = arr1 + arr2
return pu.trimseq(val)
return impl
@overload(poly.polysub)
def numpy_polysub(c1, c2):
if not type_can_asarray(c1):
msg = 'The argument "c1" must be array-like'
raise errors.TypingError(msg)
if not type_can_asarray(c2):
msg = 'The argument "c2" must be array-like'
raise errors.TypingError(msg)
if np.ndim(c1) > 1 or np.ndim(c2) > 1:
msg = 'Coefficient array is not 1-d'
raise errors.NumbaValueError(msg)
result_dtype = _poly_result_dtype((c1, c2))
def impl(c1, c2):
c1 = np.asarray(c1)
c2 = np.asarray(c2)
arr1 = np.atleast_1d(c1).astype(result_dtype)
arr2 = np.atleast_1d(c2).astype(result_dtype)
diff = len(arr2) - len(arr1)
if diff > 0:
zr = np.zeros(diff)
arr1 = np.concatenate((arr1, zr))
if diff < 0:
zr = np.zeros(-diff)
arr2 = np.concatenate((arr2, zr))
val = arr1 - arr2
return pu.trimseq(val)
return impl
@overload(poly.polymul)
def METHOD_NAME(c1, c2):
if not type_can_asarray(c1):
msg = 'The argument "c1" must be array-like'
raise errors.TypingError(msg)
if not type_can_asarray(c2):
msg = 'The argument "c2" must be array-like'
raise errors.TypingError(msg)
if np.ndim(c1) > 1 or np.ndim(c2) > 1:
msg = 'Coefficient array is not 1-d'
raise errors.NumbaValueError(msg)
result_dtype = _poly_result_dtype((c1, c2))
def impl(c1, c2):
c1 = np.asarray(c1)
c2 = np.asarray(c2)
arr1 = np.atleast_1d(c1)
arr2 = np.atleast_1d(c2)
val = np.convolve(arr1, arr2).astype(result_dtype)
return pu.trimseq(val)
return imp |
6,778 | get refresh access token response | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import logging
from abc import abstractmethod
from typing import Any, List, Mapping, MutableMapping, Optional, Tuple, Union
import backoff
import pendulum
import requests
from airbyte_cdk.models import Level
from airbyte_cdk.sources.http_logger import format_http_message
from airbyte_cdk.sources.message import MessageRepository, NoopMessageRepository
from requests.auth import AuthBase
from ..exceptions import DefaultBackoffException
logger = logging.getLogger("airbyte")
_NOOP_MESSAGE_REPOSITORY = NoopMessageRepository()
class AbstractOauth2Authenticator(AuthBase):
"""
Abstract class for an OAuth authenticators that implements the OAuth token refresh flow. The authenticator
is designed to generically perform the refresh flow without regard to how config fields are get/set by
delegating that behavior to the classes implementing the interface.
"""
_NO_STREAM_NAME = None
def __call__(self, request: requests.Request) -> requests.Request:
"""Attach the HTTP headers required to authenticate on the HTTP request"""
request.headers.update(self.get_auth_header())
return request
def get_auth_header(self) -> Mapping[str, Any]:
"""HTTP header to set on the requests"""
return {"Authorization": f"Bearer {self.get_access_token()}"}
def get_access_token(self) -> str:
"""Returns the access token"""
if self.token_has_expired():
token, expires_in = self.refresh_access_token()
self.access_token = token
self.set_token_expiry_date(expires_in)
return self.access_token
def token_has_expired(self) -> bool:
"""Returns True if the token is expired"""
return pendulum.now() > self.get_token_expiry_date()
def build_refresh_request_body(self) -> Mapping[str, Any]:
"""
Returns the request body to set on the refresh request
Override to define additional parameters
"""
payload: MutableMapping[str, Any] = {
"grant_type": self.get_grant_type(),
"client_id": self.get_client_id(),
"client_secret": self.get_client_secret(),
"refresh_token": self.get_refresh_token(),
}
if self.get_scopes:
payload["scopes"] = self.get_scopes()
if self.get_refresh_request_body():
for key, val in self.get_refresh_request_body().items():
# We defer to existing oauth constructs over custom configured fields
if key not in payload:
payload[key] = val
return payload
@backoff.on_exception(
backoff.expo,
DefaultBackoffException,
on_backoff=lambda details: logger.info(
f"Caught retryable error after {details['tries']} tries. Waiting {details['wait']} seconds then retrying..."
),
max_time=300,
)
def METHOD_NAME(self):
try:
response = requests.request(method="POST", url=self.get_token_refresh_endpoint(), data=self.build_refresh_request_body())
self._log_response(response)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
if e.response.status_code == 429 or e.response.status_code >= 500:
raise DefaultBackoffException(request=e.response.request, response=e.response)
raise
except Exception as e:
raise Exception(f"Error while refreshing access token: {e}") from e
def refresh_access_token(self) -> Tuple[str, int]:
"""
Returns the refresh token and its lifespan in seconds
:return: a tuple of (access_token, token_lifespan_in_seconds)
"""
response_json = self.METHOD_NAME()
return response_json[self.get_access_token_name()], int(response_json[self.get_expires_in_name()])
@abstractmethod
def get_token_refresh_endpoint(self) -> str:
"""Returns the endpoint to refresh the access token"""
@abstractmethod
def get_client_id(self) -> str:
"""The client id to authenticate"""
@abstractmethod
def get_client_secret(self) -> str:
"""The client secret to authenticate"""
@abstractmethod
def get_refresh_token(self) -> Optional[str]:
"""The token used to refresh the access token when it expires"""
@abstractmethod
def get_scopes(self) -> List[str]:
"""List of requested scopes"""
@abstractmethod
def get_token_expiry_date(self) -> pendulum.DateTime:
"""Expiration date of the access token"""
@abstractmethod
def set_token_expiry_date(self, value: Union[str, int]):
"""Setter for access token expiration date"""
@abstractmethod
def get_access_token_name(self) -> str:
"""Field to extract access token from in the response"""
@abstractmethod
def get_expires_in_name(self) -> str:
"""Returns the expires_in field name"""
@abstractmethod
def get_refresh_request_body(self) -> Mapping[str, Any]:
"""Returns the request body to set on the refresh request"""
@abstractmethod
def get_grant_type(self) -> str:
"""Returns grant_type specified for requesting access_token"""
@property
@abstractmethod
def access_token(self) -> str:
"""Returns the access token"""
@access_token.setter
@abstractmethod
def access_token(self, value: str) -> str:
"""Setter for the access token"""
@property
def _message_repository(self) -> Optional[MessageRepository]:
"""
The implementation can define a message_repository if it wants debugging logs for HTTP requests
"""
return _NOOP_MESSAGE_REPOSITORY
def _log_response(self, response: requests.Response):
self._message_repository.log_message(
Level.DEBUG,
lambda: format_http_message(
response,
"Refresh token",
"Obtains access token",
self._NO_STREAM_NAME,
is_auxiliary=True,
),
) |
6,779 | run interpreter | from collections import ChainMap
import numpy as np
from .LoopIR import LoopIR
from .LoopIR import T
from .prelude import *
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# Loop IR Interpreter
def _eshape(typ, env):
return tuple(r if is_pos_int(r) else env[r] for r in typ.shape())
def METHOD_NAME(proc, kwargs):
Interpreter(proc, kwargs)
class Interpreter:
def __init__(self, proc, kwargs, use_randomization=False):
assert isinstance(proc, LoopIR.proc)
self.proc = proc
self.env = ChainMap()
self.use_randomization = use_randomization
for a in proc.args:
if not str(a.name) in kwargs:
raise TypeError(f"expected argument '{a.name}' to be supplied")
if a.type is T.size:
if not is_pos_int(kwargs[str(a.name)]):
raise TypeError(
f"expected size '{a.name}' to have positive integer value"
)
self.env[a.name] = kwargs[str(a.name)]
elif a.type is T.index:
if type(kwargs[str(a.name)]) is not T.index:
raise TypeError(
f"expected index variable '{a.name}' to be an integer"
)
self.env[a.name] = kwargs[str(a.name)]
elif a.type is T.bool:
if type(kwargs[str(a.name)]) is not bool:
raise TypeError(f"expected bool variable '{a.name}' to be a bool")
self.env[a.name] = kwargs[str(a.name)]
else:
assert a.type.is_numeric()
self.simple_typecheck_buffer(a, kwargs)
self.env[a.name] = kwargs[str(a.name)]
self.env.new_child()
self.eval_stmts(proc.body)
self.env.parents
def simple_typecheck_buffer(self, fnarg, kwargs):
typ = fnarg.type
buf = kwargs[str(fnarg.name)]
nm = fnarg.name
# raise TypeError(f"type of argument '{a.name}' "
# f"value mismatches")
pre = f"bad argument '{nm}'"
if not isinstance(buf, np.ndarray):
raise TypeError(f"{pre}: expected numpy.ndarray")
elif buf.dtype != float and buf.dtype != np.float32 and buf.dtype != np.float16:
raise TypeError(
f"{pre}: expected buffer of floating-point values; "
f"had '{buf.dtype}' values"
)
# raise TypeError(f"type of argument '{name}' "
# f"value mismatches")
if typ.is_real_scalar():
if tuple(buf.shape) != (1,):
raise TypeError(
f"{pre}: expected buffer of shape (1,), "
f"but got shape {tuple(buf.shape)}"
)
else:
shape = self.eval_shape(typ)
if shape != tuple(buf.shape):
raise TypeError(
f"{pre}: expected buffer of shape {shape}, "
f"but got shape {tuple(buf.shape)}"
)
def eval_stmts(self, stmts):
for s in stmts:
self.eval_s(s)
def eval_s(self, s):
styp = type(s)
if styp is LoopIR.Pass:
pass
elif styp is LoopIR.Assign or styp is LoopIR.Reduce:
# lbuf[a0,a1,...] = rhs
lbuf = self.env[s.name]
if len(s.idx) == 0:
idx = (0,)
else:
idx = tuple(self.eval_e(a) for a in s.idx)
rhs = self.eval_e(s.rhs)
if styp is LoopIR.Assign:
lbuf[idx] = rhs
else:
lbuf[idx] += rhs
elif styp is LoopIR.If:
cond = self.eval_e(s.cond)
if cond:
self.env.new_child()
self.eval_stmts(s.body)
self.env.parents
if s.orelse and not cond:
self.env.new_child()
self.eval_stmts(s.orelse)
self.env.parents
elif styp is LoopIR.Seq:
lo = self.eval_e(s.lo)
hi = self.eval_e(s.hi)
assert self.use_randomization is False, "TODO: Implement Rand"
self.env.new_child()
for itr in range(lo, hi):
self.env[s.iter] = itr
self.eval_stmts(s.body)
self.env.parents
elif styp is LoopIR.Alloc:
if s.type.is_real_scalar():
self.env[s.name] = np.empty([1])
else:
size = self.eval_shape(s.type)
# TODO: Maybe randomize?
self.env[s.name] = np.empty(size)
elif styp is LoopIR.Call:
argvals = [self.eval_e(a, call_arg=True) for a in s.args]
argnames = [str(a.name) for a in s.f.args]
kwargs = {nm: val for nm, val in zip(argnames, argvals)}
Interpreter(s.f, kwargs, use_randomization=self.use_randomization)
else:
assert False, "bad case"
def eval_e(self, e, call_arg=False):
etyp = type(e)
if etyp is LoopIR.Read:
buf = self.env[e.name]
if call_arg or isinstance(buf, (int, bool)):
return buf
else:
idx = (0,) if len(e.idx) == 0 else tuple(self.eval_e(a) for a in e.idx)
return buf[idx]
elif etyp is LoopIR.Const:
return e.val
elif etyp is LoopIR.USub:
return -self.eval_e(e.arg)
elif etyp is LoopIR.BinOp:
lhs, rhs = self.eval_e(e.lhs), self.eval_e(e.rhs)
if e.op == "+":
return lhs + rhs
elif e.op == "-":
return lhs - rhs
elif e.op == "*":
return lhs * rhs
elif e.op == "/": # is this right?
if isinstance(lhs, int):
return (lhs + rhs - 1) // rhs
else:
return lhs / rhs
elif e.op == "%":
return lhs % rhs
elif e.op == "==":
return lhs == rhs
elif e.op == "<":
return lhs < rhs
elif e.op == ">":
return lhs > rhs
elif e.op == "<=":
return lhs <= rhs
elif e.op == ">=":
return lhs >= rhs
elif e.op == "and":
return lhs and rhs
elif e.op == "or":
return lhs or rhs
elif etyp is LoopIR.BuiltIn:
args = [self.eval_e(a) for a in e.args]
return e.f.interpret(args)
else:
assert False, "bad case"
def eval_shape(self, typ):
return tuple(self.eval_e(s) for s in typ.shape()) |
6,780 | test ffmpeg hwaccel preset | import unittest
from frigate.config import FFMPEG_INPUT_ARGS_DEFAULT, FrigateConfig
from frigate.ffmpeg_presets import parse_preset_input
class TestFfmpegPresets(unittest.TestCase):
def setUp(self):
self.default_ffmpeg = {
"mqtt": {"host": "mqtt"},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
"roles": ["detect", "rtmp"],
}
],
"output_args": {
"detect": "-f rawvideo -pix_fmt yuv420p",
"record": "-f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c copy -an",
"rtmp": "-c copy -f flv",
},
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
"record": {
"enabled": True,
},
"rtmp": {
"enabled": True,
},
"name": "back",
}
},
}
def test_default_ffmpeg(self):
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert self.default_ffmpeg == frigate_config.dict(exclude_unset=True)
def METHOD_NAME(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][
"hwaccel_args"
] = "preset-rpi-64-h264"
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "preset-rpi-64-h264" not in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
assert "-c:v:1 h264_v4l2m2m" in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
def test_ffmpeg_hwaccel_not_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][
"hwaccel_args"
] = "-other-hwaccel args"
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "-other-hwaccel args" in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
def test_ffmpeg_hwaccel_scale_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][
"hwaccel_args"
] = "preset-nvidia-h264"
self.default_ffmpeg["cameras"]["back"]["detect"] = {
"height": 1920,
"width": 2560,
"fps": 10,
}
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "preset-nvidia-h264" not in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
assert (
"fps=10,scale_cuda=w=2560:h=1920:format=nv12,hwdownload,format=nv12,format=yuv420p"
in (" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]))
)
def test_default_ffmpeg_input_arg_preset(self):
frigate_config = FrigateConfig(**self.default_ffmpeg)
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][
"input_args"
] = "preset-rtsp-generic"
frigate_preset_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
frigate_preset_config.cameras["back"].create_ffmpeg_cmds()
assert (
# Ignore global and user_agent args in comparison
frigate_preset_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
== frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
)
def test_ffmpeg_input_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][
"input_args"
] = "preset-rtmp-generic"
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "preset-rtmp-generic" not in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
assert (" ".join(parse_preset_input("preset-rtmp-generic", 5))) in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
def test_ffmpeg_input_args_as_string(self):
# Strip user_agent args here to avoid handling quoting issues
defaultArgsList = parse_preset_input(FFMPEG_INPUT_ARGS_DEFAULT, 5)[2::]
argsString = " ".join(defaultArgsList) + ' -some "arg with space"'
argsList = defaultArgsList + ["-some", "arg with space"]
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["input_args"] = argsString
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert set(argsList).issubset(
frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
)
def test_ffmpeg_input_not_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["input_args"] = "-some inputs"
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "-some inputs" in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
def test_ffmpeg_output_record_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][
"record"
] = "preset-record-generic-audio-aac"
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "preset-record-generic-audio-aac" not in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
assert "-c:v copy -c:a aac" in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
def test_ffmpeg_output_record_not_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][
"record"
] = "-some output"
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "-some output" in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
def test_ffmpeg_output_rtmp_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][
"rtmp"
] = "preset-rtmp-jpeg"
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "preset-rtmp-jpeg" not in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
assert "-c:v libx264" in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
def test_ffmpeg_output_rtmp_not_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][
"rtmp"
] = "-some output"
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "-some output" in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
if __name__ == "__main__":
unittest.main(verbosity=2) |
6,781 | get sweep data | from typing import Dict, Any, Sequence
from datetime import datetime
import json
import pytest
import numpy as np
import pandas as pd
from ipfx.dataset.ephys_data_interface import EphysDataInterface
from ipfx.stimulus import StimulusOntology
from ipfx.dataset.ephys_data_set import EphysDataSet
def test_voltage_current():
stimulus = np.arange(5)
response = np.arange(5, 10)
obt_v, obt_i = EphysDataSet._voltage_current(
stimulus, response, EphysDataSet.CURRENT_CLAMP
)
assert np.allclose(obt_v, response)
assert np.allclose(obt_i, stimulus)
def test_voltage_current_unequal():
with pytest.raises(ValueError):
EphysDataSet._voltage_current(
np.arange(2), np.arange(3), EphysDataSet.VOLTAGE_CLAMP
)
with open(StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE, "r") as _def_ont:
_default_ont_data = json.load(_def_ont)
DEFAULT_ONT = StimulusOntology(_default_ont_data)
class EphysDataFixture(EphysDataInterface):
"""
"""
REC_DATE = datetime.strptime(
"2020-03-19 10:30:12 +1000", "%Y-%m-%d %H:%M:%S %z")
SWEEPS: Dict[int, Dict[str, Dict[str, Any]]] = {
1: {
"meta": {
"sweep_number": 1,
"stimulus_units": "amperes",
"bridge_balance_mohm": "1.0",
"leak_pa": "0.0",
"stimulus_scale_factor": "1.0",
"stimulus_code": "PS_SupraThresh",
"stimulus_name": "Long Square",
"clamp_mode": "CurrentClamp"
},
"data": {
"stimulus": np.array([0, 1, 1, 0, 1, 1, 1, 1, 0, 0]),
"response": np.arange(0, 5, 0.5),
"sampling_rate": 1.5
}
},
2: {
"meta": {
"sweep_number": 2,
"stimulus_units": "amperes",
"bridge_balance_mohm": "1.0",
"leak_pa": "0.0",
"stimulus_scale_factor": "1.0",
"stimulus_code": "PS_SupraThresh",
"stimulus_name": "Long Square",
"clamp_mode": "CurrentClamp"
},
"data": {
"stimulus": np.array([0, 1, 1, 0, 1, 1, 1, 1, 0, 0]),
"response": np.arange(10)[::-1],
"sampling_rate": 1.5
}
},
3: {
"meta": {
"sweep_number": 3,
"stimulus_units": "volts",
"bridge_balance_mohm": "1.0",
"leak_pa": "0.0",
"stimulus_scale_factor": "1.0",
"stimulus_code": "shortsquaretemp",
"stimulus_name": "Short Square",
"clamp_mode": "VoltageClamp"
},
"data": {
"stimulus": np.array([0, 1, 1, 0, 1, 1, 1, 1, 0, 0]),
"response": np.arange(10),
"sampling_rate": 1.5
}
}
}
@property
def sweep_numbers(self) -> Sequence[int]:
return list(self.SWEEPS.keys())
def METHOD_NAME(self, sweep_number: int) -> Dict[str, Any]:
return self.SWEEPS[sweep_number]["data"]
def get_sweep_metadata(self, sweep_number: int) -> Dict[str, Any]:
return self.SWEEPS[sweep_number]["meta"]
def get_sweep_attrs(self, sweep_number) -> Dict[str, Any]:
raise NotImplementedError()
def get_stimulus_code(self, sweep_number: int) -> str:
return self.SWEEPS[sweep_number]["meta"]["stimulus_code"]
def get_full_recording_date(self) -> datetime:
return self.REC_DATE
def get_stimulus_unit(self, sweep_number: int) -> str:
return self.SWEEPS[sweep_number]["meta"]["stimulus_units"]
def get_clamp_mode(self, sweep_number):
return self.SWEEPS[sweep_number]["meta"]["clamp_mode"]
@pytest.fixture
def dataset():
return EphysDataSet(EphysDataFixture(DEFAULT_ONT))
def test_ontology(dataset):
assert DEFAULT_ONT is dataset.ontology
def test_sweep_table(dataset):
expected = pd.DataFrame([
swp["meta"] for swp in EphysDataFixture.SWEEPS.values()
])
pd.testing.assert_frame_equal(
expected, dataset.sweep_table, check_like=True
)
def test_sweep_info_setter(dataset):
dataset.sweep_info = [{"sweep_number": 2}, {"sweep_number": 3}]
assert set(dataset.sweep_table["sweep_number"].tolist()) == {2, 3}
def test_filtered_sweep_table(dataset):
expected = pd.DataFrame([
swp["meta"]
for num, swp in EphysDataFixture.SWEEPS.items()
if num in {1, 2}
])
pd.testing.assert_frame_equal(
expected,
dataset.filtered_sweep_table(clamp_mode=EphysDataSet.CURRENT_CLAMP),
check_like=True
)
def test_get_sweep_numbers(dataset):
assert np.allclose(
[1, 2],
dataset.get_sweep_numbers(stimuli=["PS_SupraThresh"])
)
def test_sweep(dataset):
obtained = dataset.sweep(3)
assert np.allclose([0, 1, 1, 0, 1, 1, 1, 1, 0, 0], obtained.v)
assert np.allclose(np.arange(10), obtained.i)
assert np.allclose(np.arange(10) / 1.5, obtained.t)
def test_sweep_set(dataset):
sweepset = dataset.sweep_set([1, 3])
assert np.allclose(
[np.arange(0, 5, 0.5), np.array([0, 1, 1, 0, 1, 1, 1, 1, 0, 0])],
sweepset.v
)
def test_get_recording_date(dataset):
assert dataset.get_recording_date() == "2020-03-19 10:30:12"
def test_get_sweep_data(dataset):
obtained = dataset.METHOD_NAME(1)
assert np.allclose(obtained["response"], np.arange(0, 5, 0.5))
def test_get_clamp_mode(dataset):
assert "VoltageClamp" == dataset.get_clamp_mode(3)
def test_get_stimulus_code(dataset):
assert "shortsquaretemp" == dataset.get_stimulus_code(3)
def test_get_stimulus_code_ext(dataset):
assert "PS_SupraThresh[2]" == dataset.get_stimulus_code_ext(2)
def test_get_stimulus_units(dataset):
assert "amperes" == dataset.get_stimulus_units(1) |
6,782 | add arguments | import os
from collections import defaultdict
import pandas as pd
from django.core.management.base import BaseCommand
from django.db import transaction
from tqdm import tqdm
from courses.models import Course, Topic
from PennCourses.settings.base import XWALK_S3_BUCKET, XWALK_SRC, S3_client
from review.management.commands.clearcache import clear_cache
def get_crosswalk(cross_walk):
"""
From a given crosswalk csv path, generate a dict mapping old_full_code to
a list of the new codes originating from that source.
"""
links = defaultdict(list)
cross_walk = pd.read_csv(cross_walk, delimiter="|", encoding="unicode_escape", dtype=str)
for _, r in cross_walk.iterrows():
old_full_code = f"{r['SRS_SUBJ_CODE']}-{r['SRS_COURSE_NUMBER']}"
new_full_code = f"{r['NGSS_SUBJECT']}-{r['NGSS_COURSE_NUMBER']}"
links[old_full_code].append(new_full_code)
return links
def get_crosswalk_s3(verbose=False):
"""
From the crosswalk crosswalk from settings/base.py, generate a dict mapping
old_full_code to a list of the new codes originating from that source.
"""
fp = "/tmp/" + XWALK_SRC
if verbose:
print(f"downloading crosswalk from s3://{XWALK_S3_BUCKET}/{XWALK_SRC}")
S3_client.download_file(XWALK_S3_BUCKET, XWALK_SRC, fp)
crosswalk = get_crosswalk(fp)
# Remove temporary file
os.remove(fp)
return crosswalk
def load_crosswalk(print_missing=False, verbose=False):
"""
Loads the crosswalk from settings/base.py, updating branched_from fields
and merging Topics as appropriate.
:param print_missing: If True, prints courses involved in crosswalk links that were
not found in the database.
:param verbose: A flag indicating whether this script should print its progress.
"""
crosswalk = get_crosswalk_s3(verbose=verbose)
if verbose:
print("Loading crosswalk.")
num_merges = 0
num_branch_updates = 0
num_missing_roots = 0
num_missing_children = 0
with transaction.atomic():
Topic.objects.all().update(branched_from=None)
for root_course_code, children_codes in tqdm(crosswalk.items()):
root_course = (
Course.objects.filter(full_code=root_course_code)
.order_by("-semester")
.select_related("topic")
.first()
)
if not root_course:
num_missing_roots += 1
if print_missing:
print(f"Root course {root_course} not found in db")
continue
root_topic = root_course.topic
assert root_topic, f"Root course {root_course} has no topic"
children = (
Course.objects.filter(
full_code__in=children_codes, semester__gt=root_course.semester
)
.order_by("-semester")
.select_related("topic")
)
# Take minimum semester course (after root course semester) matching child code
child_to_topic = {child.full_code: child.topic for child in children}
for child in {child.full_code: child for child in children}.values():
assert child.topic, f"Child course {child} of root {root_course} has no topic"
child_topics = set(child_to_topic.values())
missing_codes = set(children_codes) - set(child_to_topic.keys())
for child_code in missing_codes:
num_missing_children += 1
if print_missing:
print(f"Child course {child_code} not found in db")
if len(child_topics) == 1 and not missing_codes:
child_topic = child_topics.pop()
if child_topic.branched_from:
child_topic.branched_from = None
child_topic.save()
if root_topic != child_topic:
root_topic.merge_with(child_topic)
num_merges += 1
else:
for child_topic in child_topics:
if root_topic not in [child_topic, child_topic.branched_from]:
num_branch_updates += 1
child_topic.branched_from = root_topic
child_topic.save()
if verbose:
print(f"Performed {num_merges} Topic merges.")
print(f"Added branches, updating the branched_from field of {num_branch_updates} Topics.")
print(f"{num_missing_roots}/{len(crosswalk)} roots not found in db")
print(
f"{num_missing_children}/{sum(len(c) for c in crosswalk.values())} "
"children not found in db"
)
class Command(BaseCommand):
help = (
"This script loads the crosswalk from settings/base.py, updating "
"branched_from fields and merging Topics as appropriate."
)
def METHOD_NAME(self, parser):
parser.add_argument(
"--print_missing",
action="store_true",
help="Print out all missing roots and children.",
)
def handle(self, *args, **kwargs):
load_crosswalk(print_missing=kwargs["print_missing"], verbose=True)
print("Clearing cache")
del_count = clear_cache()
print(f"{del_count if del_count >=0 else 'all'} cache entries removed.") |
6,783 | current span | """OpenTracing utilities."""
import asyncio
import sys
import typing
from contextvars import ContextVar
from functools import wraps
from typing import Any, Callable, Optional, Tuple
import opentracing
from mode import shortlabel
__all__ = [
"current_span",
"noop_span",
"set_current_span",
"finish_span",
"operation_name_from_fun",
"traced_from_parent_span",
"call_with_trace",
]
if typing.TYPE_CHECKING:
_current_span: ContextVar[opentracing.Span]
_current_span = ContextVar("current_span")
def METHOD_NAME() -> Optional[opentracing.Span]:
"""Get the current span for this context (if any)."""
return _current_span.get(None)
def set_current_span(span: opentracing.Span) -> None:
"""Set the current span for the current context."""
_current_span.set(span)
def noop_span() -> opentracing.Span:
"""Return a span that does nothing when traced."""
return opentracing.Tracer()._noop_span
def finish_span(
span: Optional[opentracing.Span], *, error: BaseException = None
) -> None:
"""Finish span, and optionally set error tag."""
if span is not None:
if error:
span.__exit__(type(error), error, error.__traceback__)
else:
span.finish()
def operation_name_from_fun(fun: Any) -> str:
"""Generate opentracing name from function."""
obj = getattr(fun, "__self__", None)
if obj is not None:
objlabel = shortlabel(obj)
funlabel = shortlabel(fun)
if funlabel.startswith(objlabel):
# remove obj name from function label
funlabel = funlabel[len(objlabel) :]
return f"{objlabel}-{funlabel}"
else:
return f"{shortlabel(fun)}"
def traced_from_parent_span(
parent_span: opentracing.Span = None,
callback: Optional[Callable] = None,
**extra_context: Any,
) -> Callable:
"""Decorate function to be traced from parent span."""
def _wrapper(fun: Callable, **more_context: Any) -> Callable:
operation_name = operation_name_from_fun(fun)
@wraps(fun)
def _inner(*args: Any, **kwargs: Any) -> Any:
parent = parent_span
if parent is None:
parent = METHOD_NAME()
if parent is not None:
child = parent.tracer.start_span(
operation_name=operation_name,
child_of=parent,
tags={**extra_context, **more_context},
)
if callback is not None:
callback(child)
on_exit = (_restore_span, (parent, child))
set_current_span(child)
return call_with_trace(child, fun, on_exit, *args, **kwargs)
return fun(*args, **kwargs)
return _inner
return _wrapper
def _restore_span(
span: opentracing.Span, expected_current_span: opentracing.Span
) -> None:
current = METHOD_NAME()
assert current is expected_current_span
set_current_span(span)
def call_with_trace(
span: opentracing.Span,
fun: Callable,
callback: Optional[Tuple[Callable, Tuple[Any, ...]]],
*args: Any,
**kwargs: Any,
) -> Any:
"""Call function and trace it from parent span."""
cb: Optional[Callable] = None
cb_args: Tuple = ()
if callback:
cb, cb_args = callback
span.__enter__()
try:
ret = fun(*args, **kwargs)
except BaseException:
span.__exit__(*sys.exc_info())
raise
else:
if asyncio.iscoroutine(ret):
# if async def method, we attach our span to
# when it completes.
async def corowrapped() -> Any:
await_ret = None
try:
await_ret = await ret
except BaseException:
span.__exit__(*sys.exc_info())
if cb:
cb(*cb_args)
raise
else:
span.__exit__(None, None, None)
if cb:
cb(*cb_args)
return await_ret
return corowrapped()
else:
# for non async def method, we just exit the span.
span.__exit__(None, None, None)
if cb:
cb(*cb_args)
return ret |
6,784 | change trail color | """ Create aircraft trails on the radar display."""
from math import *
import numpy as np
import bluesky as bs
from bluesky import settings
from bluesky.core import TrafficArrays
class Trails(TrafficArrays):
"""
Traffic trails class definition : Data for trails
Methods:
Trails() : constructor
Members: see create
Created by : Jacco M. Hoekstra
"""
def __init__(self,dttrail=10.):
super().__init__()
self.active = False # Wether or not to show trails
self.dt = dttrail # Resolution of trail pieces in time
self.pygame = (bs.gui == 'pygame') # Trails are different for pygame
self.tcol0 = 60. # After how many seconds old colour
# This list contains some standard colors
self.colorList = {'BLUE': np.array([0, 0, 255]),
'CYAN': np.array([0,255,255]),
'RED' : np.array([255, 0, 0]),
'YELLOW': np.array([255, 255, 0])}
# Set default color to Blue
self.defcolor = self.colorList['CYAN']
# Foreground data on line pieces
self.lat0 = np.array([])
self.lon0 = np.array([])
self.lat1 = np.array([])
self.lon1 = np.array([])
self.time = np.array([])
self.col = []
self.fcol = np.array([])
# background copy of data
self.bglat0 = np.array([])
self.bglon0 = np.array([])
self.bglat1 = np.array([])
self.bglon1 = np.array([])
self.bgtime = np.array([])
self.bgcol = []
with self.settrafarrays():
self.accolor = []
self.lastlat = np.array([])
self.lastlon = np.array([])
self.lasttim = np.array([])
self.clearnew()
return
def create(self,n=1):
super().create(n)
self.accolor[-1] = self.defcolor
self.lastlat[-1] = bs.traf.lat[-1]
self.lastlon[-1] = bs.traf.lon[-1]
def update(self):
self.acid = bs.traf.id
if not self.active:
self.lastlat = bs.traf.lat
self.lastlon = bs.traf.lon
self.lasttim[:] = bs.sim.simt
return
"""Add linepieces for trails based on traffic data"""
# Use temporary list/array for fast append
lstlat0 = []
lstlon0 = []
lstlat1 = []
lstlon1 = []
lsttime = []
# Check for update
delta = bs.sim.simt - self.lasttim
idxs = np.where(delta > self.dt)[0]
# Add all a/c which need the update
# if len(idxs)>0:
# print "len(idxs)=",len(idxs)
for i in idxs:
# Add to lists
lstlat0.append(self.lastlat[i])
lstlon0.append(self.lastlon[i])
lstlat1.append(bs.traf.lat[i])
lstlon1.append(bs.traf.lon[i])
lsttime.append(bs.sim.simt)
if isinstance(self.col, np.ndarray):
# print type(trailcol[i])
# print trailcol[i]
# print "col type: ",type(self.col)
self.col = self.col.tolist()
type(self.col)
self.col.append(self.accolor[i])
# Update aircraft record
self.lastlat[i] = bs.traf.lat[i]
self.lastlon[i] = bs.traf.lon[i]
self.lasttim[i] = bs.sim.simt
# When a/c is no longer part of trail semgment,
# it is no longer a/c data => move to the GUI buffer (send or draw)
if self.pygame:
# Pygame: send to drawing buffer
self.lat0 = np.concatenate((self.lat0, np.array(lstlat0)))
self.lon0 = np.concatenate((self.lon0, np.array(lstlon0)))
self.lat1 = np.concatenate((self.lat1, np.array(lstlat1)))
self.lon1 = np.concatenate((self.lon1, np.array(lstlon1)))
self.time = np.concatenate((self.time, np.array(lsttime)))
else:
# QtGL: add to send buffer
self.newlat0.extend(lstlat0)
self.newlon0.extend(lstlon0)
self.newlat1.extend(lstlat1)
self.newlon1.extend(lstlon1)
# Update colours
self.fcol = (1. - np.minimum(self.tcol0, np.abs(bs.sim.simt - self.time)) / self.tcol0)
return
def buffer(self):
"""Buffer trails: Move current stack to background """
self.bglat0 = np.append(self.bglat0, self.lat0)
self.bglon0 = np.append(self.bglon0, self.lon0)
self.bglat1 = np.append(self.bglat1, self.lat1)
self.bglon1 = np.append(self.bglon1, self.lon1)
self.bgtime = np.append(self.bgtime, self.time)
# No color saved: Background: always 'old color' self.col0
if isinstance(self.bgcol, np.ndarray):
self.bgcol = self.bgcol.tolist()
if isinstance(self.col, np.ndarray):
self.col = self.col.tolist()
self.bgcol = self.bgcol + self.col
self.bgacid = self.bgacid + self.acid
self.clearfg() # Clear foreground trails
return
def clearnew(self):
# Clear new lines pipeline used for QtGL
self.newlat0 = []
self.newlon0 = []
self.newlat1 = []
self.newlon1 = []
def clearfg(self): # Foreground
"""Clear trails foreground"""
self.lat0 = np.array([])
self.lon0 = np.array([])
self.lat1 = np.array([])
self.lon1 = np.array([])
self.time = np.array([])
self.col = np.array([])
return
def clearbg(self): # Background
"""Clear trails background"""
self.bglat0 = np.array([])
self.bglon0 = np.array([])
self.bglat1 = np.array([])
self.bglon1 = np.array([])
self.bgtime = np.array([])
self.bgacid = []
return
def clear(self):
"""Clear all data, Foreground and background"""
self.lastlon = np.array([])
self.lastlat = np.array([])
self.clearfg()
self.clearbg()
self.clearnew()
return
def setTrails(self, *args):
""" Set trails on/off, or change trail color of aircraft """
if len(args)==0:
msg = "TRAIL ON/OFF, [dt] / TRAIL acid color\n"
if self.active:
msg = msg + "TRAILS ARE ON"
else:
msg = msg + "TRAILS ARE OFF"
return True,msg
# Switch on/off
elif type(args[0]) == bool:
# Set trails on/off
self.active = args[0]
if len(args) > 1:
self.dt = args[1]
if not self.active:
self.clear()
# Change color per acid (pygame only)
else:
# Change trail color
if len(args) < 2 or args[1] not in ["BLUE", "RED", "YELLOW"]:
return False, "Set aircraft trail color with: TRAIL acid BLUE/RED/YELLOW"
self.METHOD_NAME(args[1], args[0])
return True
def METHOD_NAME(self, color, idx):
"""Change color of aircraft trail"""
self.accolor[idx] = self.colorList[color]
return
def reset(self):
# This ensures that the traffic arrays (which size is dynamic)
# are all reset as well, so all lat,lon,sdp etc but also objects adsb
super().reset()
self.clear()
self.active = False |
6,785 | input mod | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
"""Unit tests for the CapturePostDfsIndexInSpans debugging pass."""
import tvm
import tvm.testing
import numpy as np
def make_const(dtype, shape):
return tvm.relay.const(np.random.rand(*shape).astype(dtype))
def make_consts(dtype, shapes):
return [make_const(dtype, shape) for shape in shapes]
metatable = {
"relay.Constant": make_consts(
"float16",
[
(2304, 768), # 0
(2304,), # 1
(600, 32, 64), # 2
],
)
}
def METHOD_NAME():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x0 : Tensor[(1600, 768), float16], %x3 : Tensor[(600, 32, 64), float16]) -> (Tensor[(1600, 2304), float16], Tensor[(600, 32, 32), float16]) {
%0 = nn.dense(%x0, meta[relay.Constant][0], units=2304);
%1 = add(%0, meta[relay.Constant][1]);
%2 = fn(%y_3_i0: Tensor[(600, 32, 64), float16], %y_3_i1: Tensor[(600, 32, 64), float16],
Inline=1, Compiler="cublas", global_symbol="tvmgen_default_cublas_main_3", Primitive=1) -> Tensor[(600, 32, 32), float16] {
%6 = fn (%FunctionVar_0_01: Tensor[(600, 32, 64), float16], %FunctionVar_0_11: Tensor[(600, 32, 64), float16],
PartitionedFromPattern="nn.batch_matmul_", Composite="cublas.batch_matmul") -> Tensor[(600, 32, 32), float16] {
nn.batch_matmul(%FunctionVar_0_01, %FunctionVar_0_11, out_dtype="float16", transpose_b=True)
};
%6(%y_3_i0, %y_3_i1)
};
%3 = %2(%x3, meta[relay.Constant][2]);
(%1, %3)
}
""",
"from_string",
None,
metatable,
)
expected_pretty_printed_output_mod = r"""def @main(%x0: Tensor[(1600, 768), float16] /* ty=Tensor[(1600, 768), float16] span=index:0:5 */, %x3: Tensor[(600, 32, 64), float16] /* ty=Tensor[(600, 32, 64), float16] span=index:1:18 */) -> (Tensor[(1600, 2304), float16], Tensor[(600, 32, 32), float16]) {
%0 = nn.dense(%x0, meta[relay.Constant][0] /* ty=Tensor[(2304, 768), float16] span=index:4:5 */, units=2304) /* ty=Tensor[(1600, 2304), float16] span=index:5:7 */;
%2 = fn (%y_3_i0: Tensor[(600, 32, 64), float16] /* ty=Tensor[(600, 32, 64), float16] span=index:8:15 */, %y_3_i1: Tensor[(600, 32, 64), float16] /* ty=Tensor[(600, 32, 64), float16] span=index:9:15 */, Inline=1, Compiler="cublas", global_symbol="tvmgen_default_cublas_main_3", Primitive=1) -> Tensor[(600, 32, 32), float16] {
%1 = fn (%FunctionVar_0_01: Tensor[(600, 32, 64), float16] /* ty=Tensor[(600, 32, 64), float16] span=index:10:13 */, %FunctionVar_0_11: Tensor[(600, 32, 64), float16] /* ty=Tensor[(600, 32, 64), float16] span=index:11:13 */, PartitionedFromPattern="nn.batch_matmul_", Composite="cublas.batch_matmul") -> Tensor[(600, 32, 32), float16] {
nn.batch_matmul(%FunctionVar_0_01, %FunctionVar_0_11, out_dtype="float16", transpose_b=True) /* ty=Tensor[(600, 32, 32), float16] span=index:13:14 */
} /* ty=fn (Tensor[(600, 32, 64), float16], Tensor[(600, 32, 64), float16]) -> Tensor[(600, 32, 32), float16] span=index:14:15 */;
%1(%y_3_i0, %y_3_i1) /* ty=Tensor[(600, 32, 32), float16] span=index:15:16 */
} /* ty=fn (Tensor[(600, 32, 64), float16], Tensor[(600, 32, 64), float16]) -> Tensor[(600, 32, 32), float16] span=index:16:18 */;
%3 = add(%0, meta[relay.Constant][1] /* ty=Tensor[(2304), float16] span=index:6:7 */) /* ty=Tensor[(1600, 2304), float16] span=index:7:19 */;
%4 = %2(%x3, meta[relay.Constant][2] /* ty=Tensor[(600, 32, 64), float16] span=index:17:18 */) /* ty=Tensor[(600, 32, 32), float16] span=index:18:19 */;
(%3, %4) /* ty=(Tensor[(1600, 2304), float16], Tensor[(600, 32, 32), float16]) span=index:19:20 */
}
"""
def test_capture_index_in_spans():
output_mod = str(tvm.relay.transform.CapturePostDfsIndexInSpans()(METHOD_NAME()))
assert output_mod == expected_pretty_printed_output_mod
if __name__ == "__main__":
tvm.testing.main() |
6,786 | test analyzer exec double quote | # -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""Test the build commands escaping and execution."""
import os
import shutil
import tempfile
import unittest
from codechecker_analyzer.analyzers import analyzer_base
from codechecker_analyzer.buildlog import build_manager
from codechecker_analyzer.buildlog import log_parser
class BuildCmdTest(unittest.TestCase):
"""
Test the build command escaping and execution.
"""
@classmethod
def setup_class(cls):
"""
Make a temporary directory and generate a source file
which will be built.
"""
cls.tmp_dir = tempfile.mkdtemp()
cls.src_file_name = "main.cpp"
cls.src_file_path = os.path.join(cls.tmp_dir,
cls.src_file_name)
cls.compiler = "clang++"
with open(cls.src_file_path, "w",
encoding="utf-8", errors="ignore") as test_src:
test_src.write("""
#include <iostream>
#ifndef MYPATH
#define MYPATH "/some/path"
#endif
int main(){
std::cout<< MYPATH << std::endl;
return 0;
}""")
@classmethod
def teardown_class(cls):
"""
Clean temporary directory and files.
"""
dir_to_clean = cls.tmp_dir
shutil.rmtree(dir_to_clean)
def __get_cmp_json(self, buildcmd):
"""
Generate a compile command json file.
"""
compile_cmd = {"directory": self.tmp_dir,
"command": buildcmd + " -c " + self.src_file_path,
"file": self.src_file_path}
return [compile_cmd]
def test_buildmgr(self):
"""
Check some simple command to be executed by
the build manager.
"""
cmd = 'cd ' + self.tmp_dir + ' && echo "test"'
print("Running: " + cmd)
ret_val = build_manager.execute_buildcmd(cmd)
self.assertEqual(ret_val, 0)
def METHOD_NAME(self):
"""
Test the process execution by the analyzer,
If the escaping fails the source file will not compile.
"""
compile_cmd = self.compiler + \
' -DDEBUG \'-DMYPATH="/this/some/path/"\''
comp_actions, _ = log_parser.\
parse_unique_log(self.__get_cmp_json(compile_cmd), self.tmp_dir)
for comp_action in comp_actions:
cmd = [self.compiler]
cmd.extend(comp_action.analyzer_options)
cmd.append(str(comp_action.source))
cwd = comp_action.directory
print(cmd)
print(cwd)
ret_val, stdout, stderr = analyzer_base.SourceAnalyzer \
.run_proc(cmd, cwd=cwd)
print(stdout)
print(stderr)
self.assertEqual(ret_val, 0)
def test_analyzer_ansic_double_quote(self):
"""
Test the process execution by the analyzer with ansi-C like
escape characters in it \".
If the escaping fails the source file will not compile.
"""
compile_cmd = self.compiler + ''' '-DMYPATH=\"/some/other/path\"' '''
comp_actions, _ = log_parser.\
parse_unique_log(self.__get_cmp_json(compile_cmd), self.tmp_dir)
for comp_action in comp_actions:
cmd = [self.compiler]
cmd.extend(comp_action.analyzer_options)
cmd.append(str(comp_action.source))
cwd = comp_action.directory
print(cmd)
print(cwd)
ret_val, stdout, stderr = analyzer_base.SourceAnalyzer \
.run_proc(cmd, cwd=cwd)
print(stdout)
print(stderr)
self.assertEqual(ret_val, 0) |
6,787 | maintenance routine | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import datetime
import json
import logging
import os
import random
from azure.monitor.opentelemetry.exporter._utils import PeriodicTask
logger = logging.getLogger(__name__)
def _fmt(timestamp):
return timestamp.strftime("%Y-%m-%dT%H%M%S.%f")
def _now():
return datetime.datetime.utcnow()
def _seconds(seconds):
return datetime.timedelta(seconds=seconds)
# pylint: disable=broad-except
class LocalFileBlob:
def __init__(self, fullpath):
self.fullpath = fullpath
def delete(self):
try:
os.remove(self.fullpath)
except Exception:
pass # keep silent
def get(self):
try:
with open(self.fullpath, "r", encoding="utf-8") as file:
return tuple(
json.loads(line.strip()) for line in file.readlines()
)
except Exception:
pass # keep silent
return None
def put(self, data, lease_period=0):
try:
fullpath = self.fullpath + ".tmp"
with open(fullpath, "w", encoding="utf-8") as file:
for item in data:
file.write(json.dumps(item))
# The official Python doc: Do not use os.linesep as a line
# terminator when writing files opened in text mode (the
# default); use a single '\n' instead, on all platforms.
file.write("\n")
if lease_period:
timestamp = _now() + _seconds(lease_period)
self.fullpath += "@{}.lock".format(_fmt(timestamp))
os.rename(fullpath, self.fullpath)
return self
except Exception:
pass # keep silent
return None
def lease(self, period):
timestamp = _now() + _seconds(period)
fullpath = self.fullpath
if fullpath.endswith(".lock"):
fullpath = fullpath[: fullpath.rindex("@")]
fullpath += "@{}.lock".format(_fmt(timestamp))
try:
os.rename(self.fullpath, fullpath)
except Exception:
return None
self.fullpath = fullpath
return self
# pylint: disable=broad-except
class LocalFileStorage:
def __init__(
self,
path,
max_size=50 * 1024 * 1024, # 50MiB
maintenance_period=60, # 1 minute
retention_period=48 * 60 * 60, # 48 hours
write_timeout=60, # 1 minute,
name=None,
lease_period=60, # 1 minute
):
self._path = os.path.abspath(path)
self._max_size = max_size
self._retention_period = retention_period
self._write_timeout = write_timeout
self.METHOD_NAME()
self._maintenance_task = PeriodicTask(
interval=maintenance_period,
function=self.METHOD_NAME,
name=name,
)
self._lease_period = lease_period
self._maintenance_task.daemon = True
self._maintenance_task.start()
def close(self):
self._maintenance_task.cancel()
self._maintenance_task.join()
def __enter__(self):
return self
# pylint: disable=redefined-builtin
def __exit__(self, type, value, traceback):
self.close()
# pylint: disable=unused-variable
def METHOD_NAME(self):
try:
# pylint: disable=unused-variable
for blob in self.gets():
pass # keep silent
except Exception:
pass # keep silent
def gets(self):
now = _now()
lease_deadline = _fmt(now)
retention_deadline = _fmt(now - _seconds(self._retention_period))
timeout_deadline = _fmt(now - _seconds(self._write_timeout))
try:
for name in sorted(os.listdir(self._path)):
path = os.path.join(self._path, name)
if not os.path.isfile(path):
continue # skip if not a file
if path.endswith(".tmp"):
if name < timeout_deadline:
try:
os.remove(path) # TODO: log data loss
except Exception:
pass # keep silent
if path.endswith(".lock"):
if path[path.rindex("@") + 1: -5] > lease_deadline:
continue # under lease
new_path = path[: path.rindex("@")]
try:
os.rename(path, new_path)
except Exception:
pass # keep silent
path = new_path
if path.endswith(".blob"):
if name < retention_deadline:
try:
os.remove(path) # TODO: log data loss
except Exception:
pass # keep silent
else:
yield LocalFileBlob(path)
except Exception:
pass # keep silent
def get(self):
cursor = self.gets()
try:
return next(cursor)
except StopIteration:
pass
return None
def put(self, data, lease_period=None):
# Create path if it doesn't exist
try:
if not os.path.isdir(self._path):
os.makedirs(self._path, exist_ok=True)
except Exception:
pass # keep silent
if not self._check_storage_size():
return None
blob = LocalFileBlob(
os.path.join(
self._path,
"{}-{}.blob".format(
_fmt(_now()),
"{:08x}".format(
random.getrandbits(32)
), # thread-safe random
),
)
)
if lease_period is None:
lease_period = self._lease_period
return blob.put(data, lease_period=lease_period)
def _check_storage_size(self):
size = 0
# pylint: disable=unused-variable
for dirpath, dirnames, filenames in os.walk(self._path):
for filename in filenames:
path = os.path.join(dirpath, filename)
# skip if it is symbolic link
if not os.path.islink(path):
try:
size += os.path.getsize(path)
except OSError:
logger.error(
"Path %s does not exist or is inaccessible.",
path,
)
continue
if size >= self._max_size:
# pylint: disable=logging-format-interpolation
logger.warning(
"Persistent storage max capacity has been "
"reached. Currently at {}KB. Telemetry will be "
"lost. Please consider increasing the value of "
"'storage_max_size' in exporter config.".format(
str(size / 1024)
)
)
return False
return True |
6,788 | frame | #
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2023 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import re
import time
import numpy as np
from pymeasure.adapters import FakeAdapter
from pymeasure.instruments import Instrument
from pymeasure.instruments.validators import strict_discrete_set
class FakeInstrument(Instrument):
""" Provides a fake implementation of the Instrument class
for testing purposes.
"""
def __init__(self, adapter=None, name="Fake Instrument", includeSCPI=False, **kwargs):
super().__init__(
FakeAdapter(**kwargs),
name,
includeSCPI=includeSCPI,
**kwargs
)
@staticmethod
def control(get_command, set_command, docs,
validator=lambda v, vs: v, values=(), map_values=False,
get_process=lambda v: v, set_process=lambda v: v,
check_set_errors=False, check_get_errors=False,
**kwargs):
"""Fake Instrument.control.
Strip commands and only store and return values indicated by
format strings to mimic many simple commands.
This is analogous how the tests in test_instrument are handled.
"""
# Regex search to find first format specifier in the command
fmt_spec_pattern = r'(%[\w.#-+ *]*[diouxXeEfFgGcrsa%])'
match = re.findall(fmt_spec_pattern, set_command)
if match:
# format_specifier = match.group(0)
format_specifier = ','.join(match)
else:
format_specifier = ''
# To preserve as much functionality as possible, call the real
# control method with modified get_command and set_command.
return Instrument.control(get_command="",
set_command=format_specifier,
docs=docs,
validator=validator,
values=values,
map_values=map_values,
get_process=get_process,
set_process=set_process,
check_set_errors=check_set_errors,
check_get_errors=check_get_errors,
**kwargs)
class SwissArmyFake(FakeInstrument):
"""Dummy instrument class useful for testing.
Like a Swiss Army knife, this class provides multi-tool functionality in the form of streams
of multiple types of fake data. Data streams that can currently be generated by this class
include 'voltages', sinusoidal 'waveforms', and mono channel 'image data'.
"""
def __init__(self, name="Mock instrument", wait=.1, **kwargs):
super().__init__(
name=name,
includeSCPI=False,
**kwargs
)
self._wait = wait
self._tstart = 0
self._voltage = 10
self._output_voltage = 0
self._time = 0
self._wave = self.wave
self._units = {'voltage': 'V',
'output_voltage': 'V',
'time': 's',
'wave': 'a.u.'}
# mock image attributes
self._w = 1920
self._h = 1080
self._frame_format = "mono_8"
@property
def time(self):
"""Control the elapsed time."""
if self._tstart == 0:
self._tstart = time.time()
self._time = time.time() - self._tstart
return self._time
@time.setter
def time(self, value):
if value == 0:
self._tstart = 0
else:
while self.time < value:
time.sleep(0.001)
@property
def wave(self):
"""Measure a waveform."""
return float(np.sin(self.time))
@property
def voltage(self):
"""Measure the voltage."""
time.sleep(self._wait)
return self._voltage
@property
def output_voltage(self):
"""Control the voltage."""
return self._output_voltage
@output_voltage.setter
def output_voltage(self, value):
time.sleep(self._wait)
self._output_voltage = value
@property
def frame_width(self):
"""Control frame width in pixels."""
time.sleep(self._wait)
return self._w
@frame_width.setter
def frame_width(self, w):
time.sleep(self._wait)
self._w = w
@property
def frame_height(self):
"""Control frame height in pixels."""
time.sleep(self._wait)
return self._h
@frame_height.setter
def frame_height(self, h):
time.sleep(self._wait)
self._h = h
@property
def frame_format(self):
"""Control the format for image data returned from the get_frame() method.
Allowed values are:
mono_8: single channel 8-bit image.
mono_16: single channel 16-bit image.
"""
time.sleep(self._wait)
return self._frame_format
@frame_format.setter
def frame_format(self, form):
allowed_formats = ["mono_8", "mono_16"]
strict_discrete_set(form, allowed_formats)
self._frame_format = form
@property
def METHOD_NAME(self):
"""Get a new image frame."""
im_format_maxval_dict = {"8": 255, "16": 65535}
im_format_type_dict = {"8": np.uint8, "16": np.uint16}
bit_depth = self.frame_format.split("_")[1]
time.sleep(self._wait)
return np.array(
im_format_maxval_dict[bit_depth] * np.random.rand(self.frame_height, self.frame_width),
dtype=im_format_type_dict[bit_depth]
) |
6,789 | test parse enterprise | import os
import tempfile
from wandb.sdk.launch.github_reference import GitHubReference
def test_parse_bad() -> None:
"""Expected parse failures, None result."""
ref = GitHubReference.parse("not a url")
assert ref is None
ref = GitHubReference.parse("http://github.com") # Not HTTPS
assert ref is None
def test_parse_ssh() -> None:
"""We should be able to parse and reconstruct an SSH reference."""
case = "git@github.com:wandb/examples.git"
ref = GitHubReference.parse(case)
assert ref.host == "github.com"
assert ref.organization == "wandb"
assert ref.repo == "examples"
assert ref.path is None
assert ref.repo_ssh == case
def test_parse_organization() -> None:
"""Should parse URLs that only have an organization."""
cases = [
"https://github.com/wandb",
# Only half-heartedly parsing non-repo URLs for now - don't support reconstructing this
"https://github.com/orgs/wandb/people",
]
for case in cases:
ref = GitHubReference.parse(case)
assert ref.host == "github.com"
assert ref.organization == "wandb"
def METHOD_NAME() -> None:
"""Should support non-github.com hosts."""
case = "https://github.foo.bar.com/wandb/examples"
ref = GitHubReference.parse(case)
assert ref.host == "github.foo.bar.com"
assert ref.organization == "wandb"
assert ref.repo == "examples"
assert ref.url == case
def test_parse_repo() -> None:
"""Should parse URLs that have an organization and a repo."""
# This case is special because we don't want to reconstruct url with the .git extension
case = "https://github.com/wandb/examples.git"
ref = GitHubReference.parse(case)
assert ref.host == "github.com"
assert ref.organization == "wandb"
assert ref.repo == "examples"
cases = [
"https://github.com/wandb/examples",
"https://github.com/wandb/examples/pulls",
"https://github.com/wandb/examples/tree/master/examples/launch/launch-quickstart",
"https://github.com/wandb/examples/blob/master/examples/launch/launch-quickstart/README.md",
"https://github.com/wandb/examples/blob/other-branch/examples/launch/launch-quickstart/README.md",
]
for case in cases:
expected_path = "/".join(case.split("/")[6:])
ref = GitHubReference.parse(case)
assert ref.host == "github.com"
assert ref.organization == "wandb"
assert ref.repo == "examples"
assert ref.url == case
assert ref.path == expected_path
def test_parse_tree() -> None:
"""Should parse a URL for viewing a dir."""
case = "https://github.com/wandb/examples/tree/master/examples/launch/launch-quickstart"
ref = GitHubReference.parse(case)
assert ref.host == "github.com"
assert ref.organization == "wandb"
assert ref.repo == "examples"
assert ref.view == "tree"
assert ref.path == "master/examples/launch/launch-quickstart"
assert ref.url == case
def test_parse_blob() -> None:
"""Should parse a URL for viewing a file."""
case = "https://github.com/wandb/examples/blob/master/examples/launch/launch-quickstart/README.md"
ref = GitHubReference.parse(case)
assert ref.host == "github.com"
assert ref.organization == "wandb"
assert ref.repo == "examples"
assert ref.view == "blob"
assert ref.path == "master/examples/launch/launch-quickstart/README.md"
assert ref.url == case
def test_parse_auth() -> None:
"""Should parse a URL that includes a username/password."""
case = "https://username@github.com/wandb/examples/blob/commit/path/entry.py"
ref = GitHubReference.parse(case)
assert ref.username == "username"
assert ref.password is None
assert ref.host == "github.com"
assert ref.organization == "wandb"
assert ref.repo == "examples"
assert ref.view == "blob"
assert ref.path == "commit/path/entry.py"
assert ref.url == case
case = "https://username:pword@github.com/wandb/examples/blob/commit/path/entry.py"
ref = GitHubReference.parse(case)
assert ref.username == "username"
assert ref.password == "pword"
assert ref.host == "github.com"
assert ref.organization == "wandb"
assert ref.repo == "examples"
assert ref.view == "blob"
assert ref.path == "commit/path/entry.py"
assert ref.url == case
def test_update_ref() -> None:
"""Test reference updating."""
case = "https://github.com/jamie-rasmussen/launch-test-private/blob/main/haspyenv/today.py"
ref = GitHubReference.parse(case)
# Simulate parsing refinement after fetch
ref.path = None
ref.ref = "main"
ref.directory = "haspyenv"
ref.file = "today.py"
ref.update_ref("jamie/testing-a-branch")
assert ref.ref_type is None
assert ref.ref == "jamie/testing-a-branch"
expected = "https://github.com/jamie-rasmussen/launch-test-private/blob/jamie/testing-a-branch/haspyenv/today.py"
assert ref.url == expected
def test_get_commit(monkeypatch) -> None:
"""Test getting commit from reference."""
def mock_fetch_repo(self, dst_dir):
# mock dumping a file to the local clone of the repo
os.makedirs(os.path.join(dst_dir, "commit/path/"), exist_ok=True)
with open(os.path.join(dst_dir, "commit/path/requirements.txt"), "w") as f:
f.write("wandb\n")
self.commit_hash = "1234567890"
self._update_path(dst_dir)
case = "https://username:pword@github.com/wandb/mock-examples-123/blob/commit/path/requirements.txt"
ref = GitHubReference.parse(case)
monkeypatch.setattr(GitHubReference, "fetch", mock_fetch_repo)
# confirm basic asserts
assert ref.repo == "mock-examples-123"
assert ref.view == "blob"
assert ref.path == "commit/path/requirements.txt"
tmpdir = tempfile.TemporaryDirectory()
ref.fetch(dst_dir=tmpdir.name)
assert ref.directory == "commit/path/"
local_dir = os.path.join(tmpdir.name, ref.directory)
assert os.path.exists(local_dir)
assert os.path.exists(os.path.join(local_dir, "requirements.txt"))
assert ref.commit_hash == "1234567890"
req_path = os.path.join(local_dir, "requirements.txt")
del ref
tmpdir.cleanup()
assert not os.path.exists(req_path) |
6,790 | load | import os
import time
from cement.core.controller import CementBaseController, expose
from wo.core.download import WODownload
from wo.core.logging import Log
from wo.core.variables import WOVar
def wo_update_hook(app):
pass
class WOUpdateController(CementBaseController):
class Meta:
label = 'wo_update'
stacked_on = 'base'
aliases = ['update']
aliases_only = True
stacked_type = 'nested'
description = ('update WordOps to latest version')
arguments = [
(['--force'],
dict(help='Force WordOps update', action='store_true')),
(['--beta'],
dict(help='Update WordOps to latest mainline release '
'(same than --mainline)',
action='store_true')),
(['--mainline'],
dict(help='Update WordOps to latest mainline release',
action='store_true')),
(['--branch'],
dict(help="Update WordOps from a specific repository branch ",
action='store' or 'store_const',
const='develop', nargs='?')),
(['--travis'],
dict(help='Argument used only for WordOps development',
action='store_true')),
]
usage = "wo update [options]"
@expose(hide=True)
def default(self):
pargs = self.app.pargs
filename = "woupdate" + time.strftime("%Y%m%d-%H%M%S")
install_args = ""
wo_branch = "master"
if pargs.mainline or pargs.beta:
wo_branch = "mainline"
install_args = install_args + "--mainline "
elif pargs.branch:
wo_branch = pargs.branch
install_args = install_args + "-b {0} ".format(wo_branch)
if pargs.force:
install_args = install_args + "--force "
if pargs.travis:
install_args = install_args + "--travis "
wo_branch = "updating-configuration"
# check if WordOps already up-to-date
if ((not pargs.force) and (not pargs.travis) and
(not pargs.mainline) and (not pargs.beta) and
(not pargs.branch)):
wo_current = ("v{0}".format(WOVar.wo_version))
wo_latest = WODownload.latest_release(self, "WordOps/WordOps")
if wo_current == wo_latest:
Log.info(
self, "WordOps {0} is already installed"
.format(wo_latest))
self.app.close(0)
# prompt user before starting upgrade
if not pargs.force:
Log.info(
self, "WordOps changelog available on "
"https://github.com/WordOps/WordOps/releases/tag/{0}"
.format(wo_latest))
start_upgrade = input("Do you want to continue:[y/N]")
if start_upgrade not in ("Y", "y"):
Log.error(self, "Not starting WordOps update")
# download the install/update script
if not os.path.isdir('/var/lib/wo/tmp'):
os.makedirs('/var/lib/wo/tmp')
WODownload.download(self, [["https://raw.githubusercontent.com/"
"WordOps/WordOps/{0}/install"
.format(wo_branch),
"/var/lib/wo/tmp/{0}".format(filename),
"update script"]])
# launch install script
if os.path.isfile('install'):
Log.info(self, "updating WordOps from local install\n")
try:
Log.info(self, "updating WordOps, please wait...")
os.system("/bin/bash install --travis")
except OSError as e:
Log.debug(self, str(e))
Log.error(self, "WordOps update failed !")
else:
try:
Log.info(self, "updating WordOps, please wait...")
os.system("/bin/bash /var/lib/wo/tmp/{0} "
"{1}".format(filename, install_args))
except OSError as e:
Log.debug(self, str(e))
Log.error(self, "WordOps update failed !")
os.remove("/var/lib/wo/tmp/{0}".format(filename))
def METHOD_NAME(app):
# register the plugin class.. this only happens if the plugin is enabled
app.handler.register(WOUpdateController)
# register a hook (function) to run after arguments are parsed.
app.hook.register('post_argument_parsing', wo_update_hook) |
6,791 | compare poolcapabilities xml | import logging as log
from avocado.utils import process
from virttest import libvirt_vm
from virttest import virsh
from virttest.libvirt_xml import pool_capability_xml
from virttest import libvirt_version
# Using as lower capital is not the best way to do, but this is just a
# workaround to avoid changing the entire file.
logging = log.getLogger('avocado.' + __name__)
def run(test, params, env):
"""
Test the command virsh capabilities
(1) Call virsh pool-capabilities
(2) Call virsh pool-capabilities with an unexpected option
"""
def METHOD_NAME(source):
"""
Compare new output of pool-capability with the standard one
(1) Dict the new pool capability XML
(2) Compare with the standard XML dict
"""
cap_xml = pool_capability_xml.PoolcapabilityXML()
cap_xml.xml = source
connect_uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", 'default'))
# Check the pool capability xml
pool_capa = cap_xml.get_pool_capabilities()
logging.debug(pool_capa)
pool_type_list = ['dir', 'fs', 'netfs', 'logical', 'disk', 'iscsi', 'iscsi-direct',
'scsi', 'mpath', 'rbd', 'sheepdog', 'gluster', 'zfs', 'vstorage']
for pooltype in pool_capa.keys():
if pooltype not in pool_type_list:
test.fail("'%s' is not expected in pool-capability" % (pooltype))
pool_type_info_dict = {'dir': {'pool_default_format_name': [],
'raw': ['none', 'raw', 'dir', 'bochs', 'cloop', 'dmg', 'iso', 'vpc', 'vdi',
'fat', 'vhd', 'ploop', 'cow', 'qcow', 'qcow2', 'qed', 'vmdk']},
'fs': {'auto': ['auto', 'ext2', 'ext3', 'ext4', 'ufs', 'iso9660', 'udf', 'gfs', 'gfs2',
'vfat', 'hfs+', 'xfs', 'ocfs2', 'vmfs'],
'raw': ['none', 'raw', 'dir', 'bochs', 'cloop', 'dmg', 'iso', 'vpc', 'vdi',
'fat', 'vhd', 'ploop', 'cow', 'qcow', 'qcow2', 'qed', 'vmdk']},
'netfs': {'auto': ['auto', 'nfs', 'glusterfs', 'cifs'],
'raw': ['none', 'raw', 'dir', 'bochs', 'cloop', 'dmg', 'iso', 'vpc', 'vdi',
'fat', 'vhd', 'ploop', 'cow', 'qcow', 'qcow2', 'qed', 'vmdk']},
'logical': {'lvm2': ['unknown', 'lvm2'], 'vol_default_format_name': []},
'disk': {'unknown': ['unknown', 'dos', 'dvh', 'gpt', 'mac', 'bsd', 'pc98', 'sun',
'lvm2'],
'none': ['none', 'linux', 'fat16', 'fat32', 'linux-swap', 'linux-lvm',
'linux-raid', 'extended']},
'iscsi': {'pool_default_format_name': [], 'vol_default_format_name': []},
'iscsi-direct': {'pool_default_format_name': [], 'vol_default_format_name': []},
'scsi': {'pool_default_format_name': [], 'vol_default_format_name': []},
'mpath': {'pool_default_format_name': [], 'vol_default_format_name': []},
'rbd': {'pool_default_format_name': []},
'sheepdog': {'pool_default_format_name': [], 'vol_default_format_name': []},
'gluster': {'pool_default_format_name': [],
'raw': ['none', 'raw', 'dir', 'bochs', 'cloop', 'dmg', 'iso', 'vpc',
'vdi', 'fat', 'vhd', 'ploop', 'cow', 'qcow', 'qcow2', 'qed',
'vmdk']},
'zfs': {'pool_default_format_name': [], 'vol_default_format_name': []},
'vstorage': {'pool_default_format_name': [],
'raw': ['none', 'raw', 'dir', 'bochs', 'cloop', 'dmg', 'iso', 'vpc',
'vdi', 'fat', 'vhd', 'ploop', 'cow', 'qcow', 'qcow2', 'qed',
'vmdk']}}
#Check the pool capability information
if pool_capa != pool_type_info_dict:
test.fail('Unexpected pool information support occurred,please check the information by manual')
# Run test case
option = params.get("virsh_pool_cap_options")
try:
output = virsh.pool_capabilities(option, ignore_status=False, debug=True)
status = 0 # good
except process.CmdError:
status = 1 # bad
output = ''
status_error = params.get("status_error")
if status_error == "yes":
if status == 0:
if not libvirt_version.version_compare(5, 0, 0):
test.fail("Command 'virsh pool-capabilities %s'"
"doesn't support in this libvirt version" % option)
else:
test.fail("Command 'virsh pool-capabilities %s'"
"succeeded (incorrect command)" % option)
elif status_error == "no":
METHOD_NAME(output)
if status != 0:
test.fail("Command 'virsh capabilities %s' failed"
"(correct command)" % option) |
6,792 | list runtime versions | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._runtime_versions_operations import build_list_runtime_versions_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RuntimeVersionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.appplatform.v2022_05_01_preview.aio.AppPlatformManagementClient`'s
:attr:`runtime_versions` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace_async
async def METHOD_NAME(self, **kwargs: Any) -> _models.AvailableRuntimeVersions:
"""Lists all of the available runtime versions supported by Microsoft.AppPlatform provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailableRuntimeVersions or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2022_05_01_preview.models.AvailableRuntimeVersions
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop(
"api_version", _params.pop("api-version", self._api_version or "2022-05-01-preview")
)
cls: ClsType[_models.AvailableRuntimeVersions] = kwargs.pop("cls", None)
request = build_list_runtime_versions_request(
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("AvailableRuntimeVersions", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
METHOD_NAME.metadata = {"url": "/providers/Microsoft.AppPlatform/runtimeVersions"} |
6,793 | transform nullable | from __future__ import annotations
import itertools
import re
from abc import ABC, abstractmethod
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Pattern,
Sequence,
Tuple,
TypedDict,
TypeVar,
)
from snuba.clickhouse.formatter.nodes import FormattedQuery
Column = TypedDict("Column", {"name": str, "type": str}, total=False)
Row = Dict[str, Any]
Result = TypedDict(
"Result",
{
"meta": List[Column],
"data": List[Row],
"totals": Row,
"profile": Optional[Dict[str, Any]],
"trace_output": str,
},
total=False,
)
def iterate_rows(result: Result) -> Iterator[Row]:
if "totals" in result:
return itertools.chain(result["data"], [result["totals"]])
else:
return iter(result["data"])
def transform_rows(result: Result, transformer: Callable[[Row], Row]) -> None:
"""
Transforms the Result dictionary in place replacing each Row object
with the one returned by the transformer function.
"""
for index, row in enumerate(result["data"]):
result["data"][index] = transformer(row)
if "totals" in result:
result["totals"] = transformer(result["totals"])
NULLABLE_RE = re.compile(r"^Nullable\((.+)\)$")
def unwrap_nullable_type(type: str) -> Tuple[bool, str]:
match = NULLABLE_RE.match(type)
if match is not None:
return True, match.groups()[0]
else:
return False, type
T = TypeVar("T")
R = TypeVar("R")
def METHOD_NAME(
function: Callable[[T], R]
) -> Callable[[Optional[T]], Optional[R]]:
def transform_column(value: Optional[T]) -> Optional[R]:
if value is None:
return value
else:
return function(value)
return transform_column
def build_result_transformer(
column_transformations: Sequence[Tuple[Pattern[str], Callable[[Any], Any]]],
) -> Callable[[Result], None]:
"""
Builds and returns a function that can be used to mutate a ``Result``
instance in-place by transforming all values for columns that have a
transformation function specified for their data type.
"""
def transform_result(result: Result) -> None:
for column in result["meta"]:
is_nullable, type = unwrap_nullable_type(column["type"])
transformer = next(
(
transformer
for pattern, transformer in column_transformations
if pattern.match(type)
),
None,
)
if transformer is None:
continue
if is_nullable:
transformer = METHOD_NAME(transformer)
name = column["name"]
for row in iterate_rows(result):
row[name] = transformer(row[name])
return transform_result
class Reader(ABC):
def __init__(
self, cache_partition_id: Optional[str], query_settings_prefix: Optional[str]
) -> None:
self.__cache_partition_id = cache_partition_id
self.__query_settings_prefix = query_settings_prefix
@abstractmethod
def execute(
self,
query: FormattedQuery,
settings: Optional[Mapping[str, str]] = None,
with_totals: bool = False,
robust: bool = False,
capture_trace: bool = False,
) -> Result:
"""Execute a query."""
raise NotImplementedError
@property
def cache_partition_id(self) -> Optional[str]:
"""
Return the cache partition if there is one.
TODO: If we double down on having the cache at Clickhouse query level
we should move the entire caching infrastructure either here or in
the cluster.
If we, instead, move the cache towards the logical level, all this
should go away.
"""
return self.__cache_partition_id
def get_query_settings_prefix(self) -> Optional[str]:
"""
Return the query settings prefix if there is one.
"""
return self.__query_settings_prefix |
6,794 | fill padding pos | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""util functions for eos normalization."""
from typing import Tuple
from lingvo import compat as tf
from lingvo.core import py_utils
import numpy as np
def METHOD_NAME(ids: tf.Tensor, id_len: tf.Tensor,
padding_value: int) -> tf.Tensor:
"""Given a batch of sequences, fills the padding pos with `padding_value`.
Args:
ids: a [B, max_len] int tensor.
id_len: a [B, ] int tensor.
padding_value: an int.
Returns:
new_ids: new ids with the property.
- new_ids[b, :id_len[b]] = ids[b, :id_len[b]]
- new_ids[b, id_len[b]:] = padding_value
"""
mask = py_utils.SequencePaddings(id_len, maxlen=tf.shape(ids)[1])
mask = tf.cast(mask, dtype=tf.bool)
new_ids = tf.where(mask, tf.fill(tf.shape(ids), padding_value), ids)
return new_ids
def NormalizeTrailingEos(ids: tf.Tensor,
id_len: tf.Tensor,
need_trailing_eos: bool = True,
eos_id: int = 2) -> Tuple[tf.Tensor, tf.Tensor]:
"""Optionally removes/adds the trailing eos symbol.
Given ids/id_len, return normalized id_len, and also make sure the padding
positions are filled with eos.
Specifically,
- If need_trailing_eos = True and the last symbol is:
* eos: id_len_eos_normalized = id_len
* not eos: id_len_eos_normalized = min(id_len + 1, max_label_len)
- If need_trailing_eos = False and the last symbol is:
* eos: id_len_eos_normalized = max(id_len - 1, 0)
* not eos: id_len_eos_normalized = id_len
Args:
ids: a [B, max_label_len] int tensor.
id_len: a [B,] int tensor. `id_len`-1 is the last symbol's 0-based index.
need_trailing_eos: bool. if True, then the output id_len includes the last
eos symbol; otherwise, it does not include the last eos.
eos_id: int. The index of eos symbol.
Returns:
new_ids: a [B, max_label_len] int tensor, and it is guaranteed that:
* new_ids[b, :min(id_len_eos_normalized[b], id_len[b])] =
ids[b, :min(id_len_eos_normalized[b], id_len[b])]
* new_ids[b, id_len_eos_normalized[b]:] = eos_id.
id_len_eos_normalized: a [B, ] int tensor, which indicates eos normalized
length.
"""
new_ids = METHOD_NAME(ids, id_len, padding_value=eos_id)
batch_size, max_len = py_utils.GetShape(new_ids, 2)
indices_x = tf.range(batch_size)
indices_y = tf.maximum(id_len - 1, 0)
indices = tf.concat([indices_x[:, tf.newaxis], indices_y[:, tf.newaxis]],
axis=-1)
last_token = tf.gather_nd(new_ids, indices)
last_token_is_eos = tf.equal(last_token, eos_id)
if need_trailing_eos:
id_len_eos_normalized = tf.where(last_token_is_eos, id_len, id_len + 1)
# because we did id_len+1, it is possible that the id_len_eos_normalized
# is larger than max_label_len, so we need to cap id_len_eos_normalized
id_len_eos_normalized = tf.minimum(id_len_eos_normalized, max_len)
else:
id_len_eos_normalized = tf.where(last_token_is_eos, id_len - 1, id_len)
id_len_eos_normalized = tf.maximum(id_len_eos_normalized, 0)
return new_ids, id_len_eos_normalized
def NumpyNormalizeTrailingEos(ids: np.ndarray,
id_len: np.ndarray,
need_trailing_eos: bool = True,
eos_id: int = 2) -> Tuple[np.ndarray, np.ndarray]:
"""Optionally removes/adds the trailing eos symbol, numpy implementation.
This is the numpy implementation of `NormalizeTrailingEos`. See more details
there. As only a reference implementation, it is not optimized but it should
sever better for readers to understand the logic and for debug purpose
as well.
Args:
ids: a [B, max_label_len] int np.array.
id_len: a [B,] int np.array. `id_len`-1 is the last symbol's 0-based index.
need_trailing_eos: bool. if True, then the output id_len includes the last
eos symbol; otherwise, it does not include the last eos.
eos_id: int. The index of eos symbol.
Returns:
new_ids: a [B, max_label_len] np.array, and it is guaranteed that:
* new_ids[b, :min(id_len_eos_normalized[b], id_len[b])] =
ids[b, :min(id_len_eos_normalized[b], id_len[b])]
* new_ids[b, id_len_eos_normalized[b]:] = eos_id.
id_len_eos_normalized: a [B, ] np.array, which indicates eos normalized
length.
"""
new_ids = np.zeros_like(ids)
id_len_eos_normalized = np.zeros_like(id_len)
(batch_size, max_label_len) = ids.shape
def CopyToNewIds(ids, new_ids, seq_num, end_pos):
new_ids[seq_num, :end_pos] = ids[seq_num, :end_pos]
def PadNewIdWithEos(new_ids, seq_num, start_pos):
new_ids[seq_num, start_pos:] = eos_id
for b in range(batch_size):
if ids[b, id_len[b] - 1] == eos_id:
if need_trailing_eos:
id_len_eos_normalized[b] = id_len[b]
else:
id_len_eos_normalized[b] = max(id_len[b] - 1, 0)
CopyToNewIds(ids, new_ids, b, id_len_eos_normalized[b])
else:
if need_trailing_eos:
id_len_eos_normalized[b] = min(max_label_len, id_len[b] + 1)
new_ids[b, id_len_eos_normalized[b] - 1] = eos_id
else:
id_len_eos_normalized[b] = id_len[b]
CopyToNewIds(ids, new_ids, b, id_len[b])
PadNewIdWithEos(new_ids, b, id_len_eos_normalized[b])
return new_ids, id_len_eos_normalized |
6,795 | get flow | #!/usr/bin/env python
from collections import defaultdict
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from dongtai_common.models.agent import IastAgent
from dongtai_common.models.heartbeat import IastHeartbeat
class AgentSerializer(serializers.ModelSerializer):
USER_MAP = {}
SERVER_MAP = {}
system_load = serializers.SerializerMethodField()
running_status = serializers.SerializerMethodField()
server = serializers.SerializerMethodField()
owner = serializers.SerializerMethodField()
flow = serializers.SerializerMethodField()
report_queue = serializers.SerializerMethodField()
method_queue = serializers.SerializerMethodField()
replay_queue = serializers.SerializerMethodField()
alias = serializers.SerializerMethodField()
register_time = serializers.SerializerMethodField()
latest_time = serializers.SerializerMethodField()
class Meta:
model = IastAgent
fields = [
"id",
"token",
"server",
"running_status",
"system_load",
"owner",
"latest_time",
"project_name",
"is_core_running",
"language",
"flow",
"is_control",
"report_queue",
"method_queue",
"replay_queue",
"alias",
"register_time",
"startup_time",
"is_audit",
]
def get_latest_heartbeat(self, obj):
try:
latest_heartbeat = obj.latest_heartbeat
except Exception:
latest_heartbeat = obj.heartbeats.values("dt", "cpu").order_by("-dt").first()
obj.latest_heartbeat = latest_heartbeat
return latest_heartbeat
def get_running_status(self, obj):
mapping = defaultdict(str)
mapping.update({1: _("Online"), 0: _("Offline")})
return mapping[obj.online]
def get_system_load(self, obj):
"""
:param obj:
:return:
"""
heartbeat = self.get_latest_heartbeat(obj)
if heartbeat:
return heartbeat["cpu"]
return _("Load data is not uploaded")
def get_server(self, obj):
def get_server_addr():
if obj.server_id not in self.SERVER_MAP:
if obj.server.ip and obj.server.port and obj.server.port != 0:
self.SERVER_MAP[obj.server_id] = f"{obj.server.ip}:{obj.server.port}"
else:
return _("No flow is detected by the probe")
return self.SERVER_MAP[obj.server_id]
if obj.server_id:
return get_server_addr()
return _("No flow is detected by the probe")
def get_user(self, obj):
if obj.user_id not in self.USER_MAP:
self.USER_MAP[obj.user_id] = obj.user.get_username()
return self.USER_MAP[obj.user_id]
def get_owner(self, obj):
return self.get_user(obj)
def METHOD_NAME(self, obj):
heartbeat = IastHeartbeat.objects.values("req_count").filter(agent=obj).first()
return heartbeat["req_count"] if heartbeat else 0
def get_method_queue(self, obj):
heartbeat = IastHeartbeat.objects.values("method_queue").filter(agent_id=obj.id).order_by("-dt").first()
return heartbeat["method_queue"] if heartbeat is not None else 0
def get_report_queue(self, obj):
heartbeat = IastHeartbeat.objects.values("report_queue").filter(agent_id=obj.id).order_by("-dt").first()
return heartbeat["report_queue"] if heartbeat is not None else 0
def get_replay_queue(self, obj):
heartbeat = IastHeartbeat.objects.values("replay_queue").filter(agent_id=obj.id).order_by("-dt").first()
return heartbeat["replay_queue"] if heartbeat is not None else 0
def get_register_time(self, obj):
if obj.register_time == 0:
return obj.latest_time
return obj.register_time
def get_alias(self, obj):
if obj.alias == "":
return obj.token
return obj.alias
def get_latest_time(self, obj):
latest_heartbeat = obj.heartbeats.values_list("dt", flat=True).order_by("-dt").first()
if latest_heartbeat:
return latest_heartbeat
return obj.latest_time
class ProjectEngineSerializer(serializers.ModelSerializer):
class Meta:
model = IastAgent
fields = ["id", "token", "is_core_running"]
class AgentToggleArgsSerializer(serializers.Serializer):
id = serializers.IntegerField(help_text=_("The id corresponding to the agent."))
ids = serializers.CharField(help_text=_('The id corresponding to the agent, use"," for segmentation.'))
class AgentInstallArgsSerializer(serializers.Serializer):
id = serializers.IntegerField(help_text=_("The id corresponding to the agent.")) |
6,796 | test make position full | from datetime import datetime
from zavod.context import Context
from zavod.meta import Dataset, get_catalog
from zavod.helpers.positions import make_position, make_occupancy
def test_make_position(testdataset1: Dataset):
context = Context(testdataset1)
name = "Minister of finance"
de = make_position(context, name=name, country="de")
de_with_date = make_position(
context, name=name, country="de", inception_date="2021-01-01"
)
uk = make_position(context, name=name, country="uk")
assert de.id != de_with_date.id
assert de.id != uk.id
assert de.get("name") == uk.get("name")
def METHOD_NAME(testdataset1: Dataset):
context = Context(testdataset1)
org = context.make("Organization")
org.id = "myorg"
one_with_everything = make_position(
context,
name="boss",
country="de",
description="desc",
summary="sum",
subnational_area="subnat",
organization=org,
inception_date="2021-01-01",
dissolution_date="2021-01-02",
number_of_seats="5",
wikidata_id="Q123",
source_url="http://example.com",
lang="en",
)
assert one_with_everything.id == "Q123"
assert one_with_everything.get("name") == ["boss"]
assert one_with_everything.get("country") == ["de"]
assert one_with_everything.get("description") == ["desc"]
assert one_with_everything.get("summary") == ["sum"]
assert one_with_everything.get("subnationalArea") == ["subnat"]
assert one_with_everything.get("organization") == ["myorg"]
assert one_with_everything.get("inceptionDate") == ["2021-01-01"]
assert one_with_everything.get("dissolutionDate") == ["2021-01-02"]
assert one_with_everything.get("numberOfSeats") == ["5"]
assert one_with_everything.get("wikidataId") == ["Q123"]
assert one_with_everything.get("sourceUrl") == ["http://example.com/"]
def test_make_occupancy(testdataset1: Dataset):
context = Context(testdataset1)
pos = make_position(context, name="A position", country="ls")
person = context.make("Person")
person.id = "thabo"
# all fields
occupancy = make_occupancy(
context,
person=person,
position=pos,
no_end_implies_current=True,
current_time=datetime(2021, 1, 3),
start_date="2021-01-01",
end_date="2021-01-02",
)
assert occupancy.id == "osv-0675000c8483d6a9163a48e4eb222fd5e4a2a886"
assert occupancy.get("holder") == ["thabo"]
assert occupancy.get("post") == ["osv-40a302b7f09ea065880a3c840855681b18ead5a4"]
assert occupancy.get("startDate") == ["2021-01-01"]
assert occupancy.get("endDate") == ["2021-01-02"]
assert occupancy.get("status") == ["ended"]
assert person.get("country") == ["ls"]
assert person.get("topics") == ["role.pep"]
def test_occupancy_not_same_start_end_id(testdataset1: Dataset):
"""Test that an occupancy with the same start but no end, and one with the
same end but no start, don't end up with the same ID. This occurs in the wild
when a source has an unknown start date, ends a term, then starts the next
term."""
context = Context(testdataset1)
pos = make_position(context, name="A position", country="ls")
person = context.make("Person")
person.id = "thabo"
def make(implies, start, end):
return make_occupancy(
context, person, pos, implies, datetime(2021, 1, 1), start, end
)
current_no_end = make(True, "2020-01-01", None)
assert current_no_end.get("status") == ["current"]
ended_no_start = make(True, None, "2020-01-01")
assert ended_no_start.get("status") == ["ended"]
assert ended_no_start.id != current_no_end.id
def test_occupancy_dataset_coverage():
# If coverage end is in the future, we trust the future end date
dataset1 = Dataset(get_catalog(), {
"name": "dataset1",
"title": "Dataset 1",
"coverage": {"end": "2021-01-04"}
})
context1 = Context(dataset1)
pos = make_position(context1, name="A position", country="ls")
person = context1.make("Person")
person.id = "thabo"
occupancy1 = make_occupancy(
context1,
person=person,
position=pos,
current_time=datetime(2021, 1, 3),
start_date="2021-01-01",
end_date="2021-01-05",
)
assert occupancy1.get("status") == ["current"]
# If coverage end date has passed, we don't trust the future end date
dataset2 = Dataset(get_catalog(), {
"name": "dataset2",
"title": "Dataset 2",
"coverage": {"end": "2021-01-02"}
})
context2 = Context(dataset2)
pos2 = make_position(context2, name="A position", country="ls")
person2 = context2.make("Person")
person2.id = "thabo"
# all fields
occupancy2 = make_occupancy(
context2,
person=person2,
position=pos2,
current_time=datetime(2021, 1, 3),
start_date="2021-01-01",
end_date="2021-01-05",
)
assert occupancy2.get("status") == ["unknown"] |
6,797 | get gcc like platform | #
# File : gcc.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2018, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2018-05-22 Bernard The first version
import os
import re
import platform
def GetGCCRoot(rtconfig):
exec_path = rtconfig.EXEC_PATH
prefix = rtconfig.PREFIX
if prefix.endswith('-'):
prefix = prefix[:-1]
if exec_path == '/usr/bin':
root_path = os.path.join('/usr/lib', prefix)
else:
root_path = os.path.join(exec_path, '..', prefix)
return root_path
def CheckHeader(rtconfig, filename):
root = GetGCCRoot(rtconfig)
fn = os.path.join(root, 'include', filename)
if os.path.isfile(fn):
return True
# Usually the cross compiling gcc toolchain has directory as:
#
# bin
# lib
# share
# arm-none-eabi
# bin
# include
# lib
# share
prefix = rtconfig.PREFIX
if prefix.endswith('-'):
prefix = prefix[:-1]
fn = os.path.join(root, prefix, 'include', filename)
if os.path.isfile(fn):
return True
return False
# GCC like means the toolchains which are compatible with GCC
def METHOD_NAME():
return ['gcc', 'armclang', 'llvm-arm']
def GetNewLibVersion(rtconfig):
version = None
root = GetGCCRoot(rtconfig)
if CheckHeader(rtconfig, '_newlib_version.h'): # get version from _newlib_version.h file
f = open(os.path.join(root, 'include', '_newlib_version.h'), 'r')
if f:
for line in f:
if line.find('_NEWLIB_VERSION') != -1 and line.find('"') != -1:
version = re.search(r'\"([^"]+)\"', line).groups()[0]
f.close()
elif CheckHeader(rtconfig, 'newlib.h'): # get version from newlib.h
f = open(os.path.join(root, 'include', 'newlib.h'), 'r')
if f:
for line in f:
if line.find('_NEWLIB_VERSION') != -1 and line.find('"') != -1:
version = re.search(r'\"([^"]+)\"', line).groups()[0]
f.close()
return version
# FIXME: there is no musl version or musl macros can be found officially
def GetMuslVersion(rtconfig):
version = None
if 'musl' in rtconfig.PREFIX:
version = 'unknown'
return version
def GCCResult(rtconfig, str):
import subprocess
result = ''
def checkAndGetResult(pattern, string):
if re.search(pattern, string):
return re.search(pattern, string).group(0)
return None
gcc_cmd = os.path.join(rtconfig.EXEC_PATH, rtconfig.CC)
# use temp file to get more information
f = open('__tmp.c', 'w')
if f:
f.write(str)
f.close()
# '-fdirectives-only',
if(platform.system() == 'Windows'):
child = subprocess.Popen([gcc_cmd, '-E', '-P', '__tmp.c'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
else:
child = subprocess.Popen(gcc_cmd + ' -E -P __tmp.c', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = child.communicate()
# print(stdout)
if stderr != '' and stderr != b'':
print(stderr)
have_fdset = 0
have_sigaction = 0
have_sigevent = 0
have_siginfo = 0
have_sigval = 0
version = None
stdc = '1989'
posix_thread = 0
for line in stdout.split(b'\n'):
line = line.decode()
if re.search('fd_set', line):
have_fdset = 1
# check for sigal
if re.search('struct[ \t]+sigaction', line):
have_sigaction = 1
if re.search('struct[ \t]+sigevent', line):
have_sigevent = 1
if re.search('siginfo_t', line):
have_siginfo = 1
if re.search('union[ \t]+sigval', line):
have_sigval = 1
if re.search('char\* version', line):
version = re.search(r'\"([^"]+)\"', line).groups()[0]
if re.findall('iso_c_visible = [\d]+', line):
stdc = re.findall('[\d]+', line)[0]
if re.findall('pthread_create', line):
posix_thread = 1
if have_fdset:
result += '#define HAVE_FDSET 1\n'
if have_sigaction:
result += '#define HAVE_SIGACTION 1\n'
if have_sigevent:
result += '#define HAVE_SIGEVENT 1\n'
if have_siginfo:
result += '#define HAVE_SIGINFO 1\n'
if have_sigval:
result += '#define HAVE_SIGVAL 1\n'
if version:
result += '#define GCC_VERSION_STR "%s"\n' % version
result += '#define STDC "%s"\n' % stdc
if posix_thread:
result += '#define LIBC_POSIX_THREADS 1\n'
os.remove('__tmp.c')
return result |
6,798 | run client | #!//usr/bin/python3
# To monitor traffic (confirm encryption over the wire)
# sudo tcpdump -A -i lo port 8765
# As an alternate to using this script with the run_client argument
# curl -v -k --cacert ./certs/selfsigned.crt --key ./certs/private.key https://127.0.0.1:8765/hey/you
#
# To run this script
# test/manual/testssl.py create_certs
# ./tcpserver 9109
# LD_PRELOAD=lib/linux/libscope.so SCOPE_EVENT_HTTP=true test/manual/testssl.py start_server
# LD_PRELOAD=lib/linux/libscope.so SCOPE_EVENT_HTTP=true test/manual/testssl.py run_client
import os
import socket
import ssl
CERT_DIR = './certs'
CERT_FILE = CERT_DIR + '/selfsigned.crt'
KEY_FILE = CERT_DIR + '/private.key'
PORT = 8765
def run_main():
print (("Running {}...").format(script))
if arg1 == "help":
print_help()
elif arg1 == "create_certs":
create_certs()
elif arg1 == "delete_certs":
delete_certs()
elif arg1 == "start_server":
start_server()
elif arg1 == "run_client":
METHOD_NAME()
else:
print('{} exiting with unknown argument {}...\n'.format(script, arg1))
print_help()
exit('{} exiting successfully.'.format(script))
def create_certs():
print ("create_certs")
os.system('mkdir -p {}'.format(CERT_DIR))
# https://stackoverflow.com/questions/27164354/create-a-self-signed-x509-certificate-in-python
from OpenSSL import crypto, SSL
from socket import gethostname
from pprint import pprint
from time import gmtime, mktime
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 1024)
# create a self-signed cert
cert = crypto.X509()
cert.get_subject().C = "US"
cert.get_subject().ST = "CA"
cert.get_subject().L = "SanFrancisco"
cert.get_subject().O = "CRIBL"
cert.get_subject().OU = "CRIBL"
cert.get_subject().CN = gethostname()
cert.set_serial_number(1000)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10*365*24*60*60)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha1')
with open(CERT_FILE, "wt") as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert).decode("utf-8"))
with open(KEY_FILE, "wt") as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k).decode("utf-8"))
def delete_certs():
print ("delete_certs")
os.system('rm -rf {}'.format(CERT_DIR))
def start_server():
print ("start_server")
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain(CERT_FILE, KEY_FILE)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock:
sock.bind(('0.0.0.0', PORT))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.listen(5)
with context.wrap_socket(sock, server_side=True) as ssock:
conn, addr = ssock.accept()
print("trying to receive something!")
data = conn.recv(1024).decode('utf-8')
print("received {}".format(data))
conn.send("HTTP/1.1 200 OK\r\n\r\n".encode('utf-8'))
print("sent reply")
conn.close()
def METHOD_NAME():
print ("run_client")
# PROTOCOL_TLS_CLIENT requires valid cert chain and hostname
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_FILE)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock:
with context.wrap_socket(sock, server_hostname=socket.gethostname()) as ssock:
print("trying to send something!")
ssock.connect(('127.0.0.1', PORT))
ssock.send("GET /hey/you HTTP/1.1\r\nHost: 127.0.0.1:8765\r\nUser-Agent: me\r\nAccept: */*\r\n\r\n".encode('utf-8'));
rx_bytes = ssock.recv(4096)
print("received: " + rx_bytes.decode('utf-8'))
ssock.close()
def print_help():
print (" Legal operations:")
print (" {} create_certs".format(script))
print (" {} delete_certs".format(script))
print (" {} start_server".format(script))
print (" {} run_client".format(script))
from sys import argv
try:
script, arg1 = argv
except:
script = argv[0]
if len(argv) == 1:
print('{} exiting with missing argument...\n'.format(script))
else:
print('{} exiting with too many arguments...\n'.format(script))
print_help()
exit()
run_main() |
6,799 | flatten metadata | import base64
import collections.abc
from datetime import datetime, timedelta
import hashlib
import hmac
import json
import logging
import pytz
import requests
from zentral.core.events import event_from_event_d
from zentral.core.stores.backends.base import BaseEventStore
logger = logging.getLogger('zentral.core.stores.backends.azure_log_analytics')
def datetime_to_iso8601z_truncated_to_milliseconds(dt):
# round created at to milliseconds
dt_microsecond = dt.microsecond
if dt_microsecond:
dt_millisecond = round(dt_microsecond / 1000)
if dt_millisecond == 1000:
dt = dt.replace(microsecond=0)
dt += timedelta(seconds=1)
else:
dt = dt.replace(microsecond=1000 * dt_millisecond)
# convert to UTC only if not naive (python<3.6)
if dt.utcoffset() is not None:
dt = dt.astimezone(pytz.utc)
# ensure naive, convert to isoformat
dt_iso = dt.replace(tzinfo=None).isoformat()
# truncate the microseconds in isoformat if necessary
if "." in dt_iso:
dt_iso = dt_iso[:-3]
# add the pseudo time zone
return "{}Z".format(dt_iso)
class EventStore(BaseEventStore):
log_type = "ZentralEvent"
content_type = "application/json"
resource = "/api/logs"
url_template = "https://{customer_id}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01"
def __init__(self, config_d):
super().__init__(config_d)
# The customer ID to your Log Analytics workspace ID
self.customer_id = config_d["customer_id"]
# For the shared key, use either the primary or the secondary Connected Sources client authentication key
self._shared_key = config_d["shared_key"]
self._decoded_shared_key = base64.b64decode(self._shared_key)
# requests session
self._session = requests.Session()
self._session.headers.update({
"Content-Type": self.content_type,
"Log-Type": self.log_type,
"time-generated-field": "CreatedAt", # ISO 8601
})
self._url = self.url_template.format(customer_id=self.customer_id)
def METHOD_NAME(self, metadata, parent_key=''):
items = []
for k, v in metadata.items():
export_k = "".join(s.title() for s in k.split("_"))
new_key = parent_key + export_k if parent_key else export_k
if isinstance(v, collections.abc.MutableMapping):
items.extend(self.METHOD_NAME(v, new_key).items())
else:
items.append((new_key, v))
return dict(items)
def _prepare_event(self, event):
if not isinstance(event, dict):
event_d = event.serialize()
else:
event_d = event
metadata = event_d.pop("_zentral")
# fix created_at format for use as TimeGenerated field via the time-generated-field header
metadata["created_at"] = datetime_to_iso8601z_truncated_to_milliseconds(event.metadata.created_at)
# flatten the metadata
azure_event = self.METHOD_NAME(metadata)
# add the rest of the data
azure_event["Properties"] = event_d
return [azure_event]
def _build_signature(self, rfc1123_date, content_length):
# Build the API signature
string_to_hash = "\n".join([
"POST",
str(content_length),
self.content_type,
'x-ms-date:' + rfc1123_date,
self.resource
])
return base64.b64encode(
hmac.new(
self._decoded_shared_key,
string_to_hash.encode("utf-8"),
digestmod=hashlib.sha256).digest()
)
def store(self, event):
# Build and send a request to the POST API
if isinstance(event, dict):
event = event_from_event_d(event)
data = json.dumps(self._prepare_event(event)).encode("utf-8")
rfc1123_date = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
signature = self._build_signature(rfc1123_date, len(data))
self._session.headers.update({
'Authorization': "SharedKey {}:{}".format(self.customer_id, signature.decode("utf-8")),
'x-ms-date': rfc1123_date,
})
r = self._session.post(self._url, data=data)
r.raise_for_status() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.