blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
463f6076099191b156d838db5d1ee88674c7defd
|
812045c3ec6587827aeb18bde666237dfffc21ae
|
/tf_quant_finance/math/jacobian_test.py
|
1938c180a9ec208e7b3293a8637f86e07206c349
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
google/tf-quant-finance
|
2062082c85e8679b71e69bbeb579fe338c1b0288
|
0d3a2193c0f2d320b65e602cf01d7a617da484df
|
refs/heads/master
| 2023-08-31T01:58:15.415811
| 2023-08-15T07:37:46
| 2023-08-15T07:38:22
| 198,669,252
| 4,165
| 557
|
Apache-2.0
| 2023-08-04T19:25:55
| 2019-07-24T16:09:50
|
Python
|
UTF-8
|
Python
| false
| false
| 7,656
|
py
|
jacobian_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for math.jacobian.py."""
import functools
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
def quadratic(p, x):
"""Quadratic function."""
a = tf.expand_dims(p[..., 0], axis=-1)
b = tf.expand_dims(p[..., 1], axis=-1)
c = tf.expand_dims(p[..., 2], axis=-1)
return a * x**2 + b * x + c
def multivariate_quadratic(p, x, y):
"""Multivariate quadratic function."""
a = tf.expand_dims(p[..., 0], axis=-1)
b = tf.expand_dims(p[..., 1], axis=-1)
c = tf.expand_dims(p[..., 2], axis=-1)
d = tf.expand_dims(p[..., 3], axis=-1)
return a * x**2 + b * x * y + c * y**2 + d
@test_util.run_all_in_graph_and_eager_modes
class JacobianTest(parameterized.TestCase, tf.test.TestCase):
"""Jacobian test cases."""
@parameterized.named_parameters(
{
"testcase_name": "SinglePrecision",
"dtype": tf.float32
}, {
"testcase_name": "DoublePrecision",
"dtype": tf.float64
},
)
def test_jacobian(self, dtype):
"""Test function jacobian."""
# Shape [2]
x = tf.range(1, 3, dtype=dtype)
func = functools.partial(quadratic, x=x)
with self.subTest("Quadratic"):
with self.subTest("SingleTensor"):
# Shape [3]
ps = tf.constant([1.0, 2.0, -1.0], dtype=dtype)
# Shape [2, 3]
expected_jacobian = [[1.0, 1.0, 1.0], [4.0, 2.0, 1.0]]
jacobian = self.evaluate(tff.math.jacobian(func, ps))
self.assertEqual(jacobian.shape, (2, 3))
np.testing.assert_allclose(jacobian, expected_jacobian)
with self.subTest("BatchedTensor"):
# Shape [2, 3]
ps = tf.constant([[1.0, 2.0, -1.0], [2.0, 0.0, 0.0]], dtype=dtype)
# Shape [2, 2, 3]
expected_jacobian = [
[[1.0, 1.0, 1.0], [4.0, 2.0, 1.0]],
[[1.0, 1.0, 1.0], [4.0, 2.0, 1.0]],
]
jacobian = self.evaluate(tff.math.jacobian(func, ps))
self.assertEqual(jacobian.shape, (2, 2, 3))
np.testing.assert_allclose(jacobian, expected_jacobian)
x = tf.range(1, 3, dtype=dtype)
y = tf.range(-3, -1, dtype=dtype)
func = functools.partial(multivariate_quadratic, x=x, y=y)
with self.subTest("MultivariateQuadratic"):
with self.subTest("SingleTensor"):
# Shape [4]
ps = tf.constant([1.0, 2.0, -1.0, 0.0], dtype=dtype)
# Shape [2, 4]
expected_jacobian = [[1.0, -3.0, 9.0, 1.0], [4.0, -4.0, 4.0, 1.0]]
jacobian = self.evaluate(tff.math.jacobian(func, ps))
self.assertEqual(jacobian.shape, (2, 4))
np.testing.assert_allclose(jacobian, expected_jacobian)
with self.subTest("BatchedTensor"):
# Shape [2, 4]
ps = tf.constant(
[[1.0, 2.0, -1.0, 0.0], [1.0, 2.0, -1.0, 0.0]], dtype=dtype
)
# Shape [2, 2, 4]
expected_jacobian = [
[[1.0, -3.0, 9.0, 1.0], [4.0, -4.0, 4.0, 1.0]],
[[1.0, -3.0, 9.0, 1.0], [4.0, -4.0, 4.0, 1.0]],
]
jacobian = self.evaluate(tff.math.jacobian(func, ps))
self.assertEqual(jacobian.shape, (2, 2, 4))
np.testing.assert_allclose(jacobian, expected_jacobian)
@parameterized.named_parameters(
{
"testcase_name": "SinglePrecision",
"dtype": tf.float32
}, {
"testcase_name": "DoublePrecision",
"dtype": tf.float64
},
)
def test_value_and_jacobian(self, dtype):
"""Test function value_and_jacobian."""
# Shape [2]
x = tf.range(1, 3, dtype=dtype)
func = functools.partial(quadratic, x=x)
with self.subTest("Quadratic"):
with self.subTest("SingleTensor"):
func = functools.partial(quadratic, x=x)
# Shape [3]
ps = tf.constant([1.0, 2.0, -1.0], dtype=dtype)
values, jacobian = self.evaluate(
tff.math.value_and_jacobian(func, ps))
with self.subTest("Values"):
# Shape [2]
expected_values = [2.0, 7.0]
self.assertEqual(values.shape, (2,))
np.testing.assert_allclose(values, expected_values)
with self.subTest("Jacobian"):
# Shape [2, 3]
expected_jacobian = [[1.0, 1.0, 1.0], [4.0, 2.0, 1.0]]
self.assertEqual(jacobian.shape, (2, 3,))
np.testing.assert_allclose(jacobian, expected_jacobian)
with self.subTest("BatchedTensor"):
func = functools.partial(quadratic, x=x)
# Shape [2, 3]
ps = tf.constant([[1.0, 2.0, -1.0], [2.0, 0.0, 0.0]], dtype=dtype)
values, jacobian = self.evaluate(
tff.math.value_and_jacobian(func, ps))
with self.subTest("Values"):
# Shape [2, 2]
expected_values = [[2.0, 7.0], [2.0, 8.0]]
self.assertEqual(values.shape, (2, 2,))
np.testing.assert_allclose(values, expected_values)
with self.subTest("Jacobian"):
# Shape [2, 2, 3]
expected_jacobian = [
[[1.0, 1.0, 1.0], [4.0, 2.0, 1.0]],
[[1.0, 1.0, 1.0], [4.0, 2.0, 1.0]],
]
self.assertEqual(jacobian.shape, (2, 2, 3))
np.testing.assert_allclose(jacobian, expected_jacobian)
x = tf.range(1, 3, dtype=dtype)
y = tf.range(-3, -1, dtype=dtype)
func = functools.partial(multivariate_quadratic, x=x, y=y)
with self.subTest("MultivariateQuadratic"):
with self.subTest("SingleTensor"):
# Shape [4]
ps = tf.constant([1.0, 2.0, -1.0, 0.0], dtype=dtype)
values, jacobian = self.evaluate(
tff.math.value_and_jacobian(func, ps)
)
with self.subTest("Values"):
# Shape [2]
expected_values = [-14.0, -8.0]
self.assertEqual(values.shape, (2,))
np.testing.assert_allclose(values, expected_values)
with self.subTest("Jacobian"):
# Shape [2, 4]
expected_jacobian = [[1.0, -3.0, 9.0, 1.0], [4.0, -4.0, 4.0, 1.0]]
self.assertEqual(jacobian.shape, (2, 4))
np.testing.assert_allclose(jacobian, expected_jacobian)
with self.subTest("BatchedTensor"):
# Shape [2, 4]
ps = tf.constant(
[[1.0, 2.0, -1.0, 0.0], [0.0, 1.0, -2.0, 1.0]], dtype=dtype
)
values, jacobian = self.evaluate(
tff.math.value_and_jacobian(func, ps)
)
with self.subTest("Values"):
# Shape [2, 2]
expected_values = [[-14.0, -8.0], [-20.0, -11.0]]
self.assertEqual(values.shape, (2, 2,))
np.testing.assert_allclose(values, expected_values)
with self.subTest("Jacobian"):
# Shape [2, 2, 4]
expected_jacobian = [
[[1.0, -3.0, 9.0, 1.0], [4.0, -4.0, 4.0, 1.0]],
[[1.0, -3.0, 9.0, 1.0], [4.0, -4.0, 4.0, 1.0]],
]
self.assertEqual(jacobian.shape, (2, 2, 4))
np.testing.assert_allclose(jacobian, expected_jacobian)
if __name__ == "__main__":
tf.test.main()
|
c7584775f51dbbdaf7e4140f8e0376910e08c43a
|
bf3d811c3e4b783a377d4c3bdf2bebcffb26d37a
|
/custom_components/custom_updater/__init__.py
|
4992797b04c48bd57b06eae1df8c03a2e2e09000
|
[
"MIT"
] |
permissive
|
custom-components/custom_updater
|
332258778c3c8e7b7eefacfcd7b4df09abec2703
|
63f4d5c98ef2f8945e9aaf31dc52a1f107182793
|
refs/heads/master
| 2021-06-22T01:42:43.755494
| 2021-01-18T14:15:06
| 2021-01-18T14:15:06
| 141,934,343
| 222
| 94
|
MIT
| 2019-05-22T13:09:12
| 2018-07-22T22:11:56
|
Python
|
UTF-8
|
Python
| false
| false
| 11,859
|
py
|
__init__.py
|
"""
A component which allows you to update your custom cards and components.
For more details about this component, please refer to the documentation at
https://github.com/custom-components/custom_updater
"""
import logging
import os.path
from datetime import timedelta
import voluptuous as vol
from aiohttp import web
from homeassistant.const import EVENT_HOMEASSISTANT_START
import homeassistant.helpers.config_validation as cv
from homeassistant.components.http import HomeAssistantView
from homeassistant.util import sanitize_path
from homeassistant.helpers.event import async_track_time_interval
VERSION = '6.0.1'
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['pyupdate==1.4.0']
CONF_TRACK = 'track'
CONF_HIDE_SENSOR = 'hide_sensor'
CONF_SHOW_INSTALLABLE = 'show_installable'
CONF_CARD_CONFIG_URLS = 'card_urls'
CONF_COMPONENT_CONFIG_URLS = 'component_urls'
CONF_PYTHON_SCRIPT_CONFIG_URLS = 'python_script_urls'
DOMAIN = 'custom_updater'
INTERVAL = timedelta(days=1)
ATTR_CARD = 'card'
ATTR_COMPONENT = 'component'
ATTR_ELEMENT = 'element'
DEFAULT_TRACK = ['components', 'cards']
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_TRACK, default=DEFAULT_TRACK):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_HIDE_SENSOR, default=False): cv.boolean,
vol.Optional(CONF_SHOW_INSTALLABLE, default=False): cv.boolean,
vol.Optional(CONF_CARD_CONFIG_URLS, default=[]):
vol.All(cv.ensure_list, [cv.url]),
vol.Optional(CONF_COMPONENT_CONFIG_URLS, default=[]):
vol.All(cv.ensure_list, [cv.url]),
vol.Optional(CONF_PYTHON_SCRIPT_CONFIG_URLS, default=[]):
vol.All(cv.ensure_list, [cv.url]),
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up this component."""
conf_mode = config.get('lovelace', {}).get('mode', 'storage')
conf_track = config[DOMAIN][CONF_TRACK]
conf_hide_sensor = config[DOMAIN][CONF_HIDE_SENSOR]
conf_card_urls = config[DOMAIN][CONF_CARD_CONFIG_URLS]
conf_component_urls = config[DOMAIN][CONF_COMPONENT_CONFIG_URLS]
conf_py_script_urls = config[DOMAIN][CONF_PYTHON_SCRIPT_CONFIG_URLS]
_LOGGER.debug('Version %s', VERSION)
_LOGGER.debug('Mode %s', conf_mode)
_LOGGER.error("This integration is deprecated, and is no longer maintained."
"As an alternative have a look at HACS https://hacs.xyz")
hass.http.register_view(CustomCardsView(str(hass.config.path())))
if conf_mode == 'yaml':
if not os.path.exists("{}/ui-lovelace.yaml".format(str(hass.config.path()))):
_LOGGER.warning(
"Configured to run with yaml mode but ui-lovelace.yaml does not exist, assuming storage is used")
conf_mode = 'storage'
if 'cards' in conf_track:
card_controller = CustomCards(
hass, conf_hide_sensor, conf_card_urls, conf_mode)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, card_controller.extra_init())
async_track_time_interval(
hass, card_controller.force_reload, INTERVAL)
if 'components' in conf_track:
components_controller = CustomComponents(
hass, conf_hide_sensor, conf_component_urls)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, components_controller.extra_init())
async_track_time_interval(
hass, components_controller.cache_versions, INTERVAL)
if 'python_scripts' in conf_track:
python_scripts_controller = CustomPythonScripts(
hass, conf_hide_sensor, conf_py_script_urls)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, python_scripts_controller.extra_init())
async_track_time_interval(
hass, python_scripts_controller.cache_versions, INTERVAL)
async def check_all_service(call):
"""Set up service for manual trigger."""
if 'cards' in conf_track:
await card_controller.force_reload()
if 'components' in conf_track:
await components_controller.cache_versions()
if 'python_scripts' in conf_track:
await python_scripts_controller.cache_versions()
async def update_all_service(call):
"""Set up service for manual trigger."""
if 'cards' in conf_track:
await card_controller.update_all()
if 'components' in conf_track:
await components_controller.update_all()
if 'python_scripts' in conf_track:
await python_scripts_controller.update_all()
async def install_service(call):
"""Install single component/card."""
element = call.data.get(ATTR_ELEMENT)
_LOGGER.debug('Installing %s', element)
if 'cards' in conf_track:
await card_controller.install(element)
if 'components' in conf_track:
await components_controller.install(element)
if 'python_scripts' in conf_track:
await python_scripts_controller.install(element)
hass.services.async_register(DOMAIN, 'check_all', check_all_service)
hass.services.async_register(DOMAIN, 'update_all', update_all_service)
hass.services.async_register(DOMAIN, 'install', install_service)
return True
class CustomCards():
"""Custom cards controller."""
# pylint: disable=too-many-instance-attributes
def __init__(self, hass, conf_hide_sensor, conf_card_urls,
conf_mode):
"""Initialize."""
_LOGGER.debug('CustomCards - __init__')
from pyupdate.ha_custom.custom_cards import CustomCards as Cards
self.hass = hass
self.ha_conf_dir = str(hass.config.path())
self.hidden = conf_hide_sensor
self.pyupdate = Cards(self.ha_conf_dir, conf_mode, '', conf_card_urls)
async def extra_init(self):
"""Additional init."""
_LOGGER.debug('CustomCards - extra_init')
await self.pyupdate.init_local_data()
await self.cache_versions()
async def force_reload(self, now=None):
"""Force data refresh"""
_LOGGER.debug('CustomCards - force_reload')
await self.pyupdate.force_reload()
await self.cache_versions()
async def cache_versions(self, now=None):
"""Cache."""
_LOGGER.debug('CustomCards - cache_versions')
information = await self.pyupdate.get_sensor_data()
state = int(information[1])
attributes = information[0]
attributes['hidden'] = self.hidden
self.hass.states.async_set(
'sensor.custom_card_tracker', state, attributes)
async def update_all(self):
"""Update all cards."""
_LOGGER.debug('CustomCards - update_all')
await self.pyupdate.update_all()
information = await self.pyupdate.get_sensor_data()
state = int(information[1])
attributes = information[0]
attributes['hidden'] = self.hidden
self.hass.states.async_set(
'sensor.custom_card_tracker', state, attributes)
async def install(self, element):
"""Install single card."""
_LOGGER.debug('CustomCards - update_all')
await self.pyupdate.install(element)
class CustomComponents():
"""Custom components controller."""
# pylint: disable=too-many-instance-attributes
def __init__(self, hass, conf_hide_sensor, conf_component_urls):
"""Initialize."""
_LOGGER.debug('CustomComponents - __init__')
from pyupdate.ha_custom.custom_components import (
CustomComponents as Components)
self.hass = hass
self.ha_conf_dir = str(hass.config.path())
self.hidden = conf_hide_sensor
self.pyupdate = Components(self.ha_conf_dir, conf_component_urls)
async def extra_init(self):
"""Additional init."""
_LOGGER.debug('CustomComponents - extra_init')
await self.cache_versions()
async def cache_versions(self, now=None):
"""Cache."""
_LOGGER.debug('CustomComponents - cache_versions')
information = await self.pyupdate.get_sensor_data(True)
state = int(information[1])
attributes = information[0]
attributes['hidden'] = self.hidden
self.hass.states.async_set(
'sensor.custom_component_tracker', state, attributes)
async def update_all(self):
"""Update all components."""
_LOGGER.debug('CustomComponents - update_all')
await self.pyupdate.update_all()
information = await self.pyupdate.get_sensor_data()
state = int(information[1])
attributes = information[0]
attributes['hidden'] = self.hidden
self.hass.states.async_set(
'sensor.custom_component_tracker', state, attributes)
async def install(self, element):
"""Install single component."""
_LOGGER.debug('CustomComponents - install')
await self.pyupdate.install(element)
class CustomPythonScripts():
"""Custom python_scripts controller."""
# pylint: disable=too-many-instance-attributes
def __init__(self, hass, conf_hide_sensor, conf_python_script_urls):
"""Initialize."""
_LOGGER.debug('CustomPythonScripts - __init__')
from pyupdate.ha_custom.python_scripts import PythonScripts
self.hass = hass
self.ha_conf_dir = str(hass.config.path())
self.hidden = conf_hide_sensor
self.pyupdate = PythonScripts(
self.ha_conf_dir, conf_python_script_urls)
async def extra_init(self):
"""Additional init."""
_LOGGER.debug('CustomPythonScripts - extra_init')
await self.cache_versions()
async def cache_versions(self, now=None):
"""Cache."""
_LOGGER.debug('CustomPythonScripts - cache_versions')
information = await self.pyupdate.get_sensor_data(True)
state = int(information[1])
attributes = information[0]
attributes['hidden'] = self.hidden
self.hass.states.async_set(
'sensor.custom_python_script_tracker', state, attributes)
async def update_all(self):
"""Update all python_scripts."""
_LOGGER.debug('CustomPythonScripts - update_all')
await self.pyupdate.update_all()
information = await self.pyupdate.get_sensor_data()
state = int(information[1])
attributes = information[0]
attributes['hidden'] = self.hidden
self.hass.states.async_set(
'sensor.custom_python_script_tracker', state, attributes)
async def install(self, element):
"""Install single python_script."""
_LOGGER.debug('CustomPythonScripts - install')
await self.pyupdate.install(element)
class CustomCardsView(HomeAssistantView):
"""View to return a custom_card file."""
requires_auth = False
url = r"/customcards/{path:.+}"
name = "customcards:path"
def __init__(self, hadir):
"""Initialize custom_card view."""
self.hadir = hadir
async def get(self, request, path):
"""Retrieve custom_card."""
_LOGGER.error("This integration is deprecated, and is no longer maintained."
"As an alternative have a look at HACS https://hacs.xyz")
if path != sanitize_path(path):
raise web.HTTPBadRequest
if '?' in path:
path = path.split('?')[0]
file = "{}/www/{}".format(self.hadir, path)
if os.path.exists(file):
msg = "Serving /customcards/{path} from /www/{path}".format(
path=path)
_LOGGER.debug(msg)
resp = web.FileResponse(file)
resp.headers["Cache-Control"] = "max-age=0, must-revalidate"
return resp
else:
_LOGGER.error("Tried to serve up '%s' but it does not exist", file)
return None
|
303698454b14429c4a5777c1a31aec08ef84c2b3
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/dataprotection/v20221101preview/outputs.py
|
5b17c0d855b51904d4e100067aeccca06280e6b7
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 12,563
|
py
|
outputs.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'DppIdentityDetailsResponse',
'ResourceGuardOperationResponse',
'ResourceGuardResponse',
'SystemDataResponse',
]
@pulumi.output_type
class DppIdentityDetailsResponse(dict):
"""
Identity details
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "principalId":
suggest = "principal_id"
elif key == "tenantId":
suggest = "tenant_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DppIdentityDetailsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DppIdentityDetailsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DppIdentityDetailsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
principal_id: str,
tenant_id: str,
type: Optional[str] = None):
"""
Identity details
:param str principal_id: The object ID of the service principal object for the managed identity that is used to grant role-based access to an Azure resource.
:param str tenant_id: A Globally Unique Identifier (GUID) that represents the Azure AD tenant where the resource is now a member.
:param str type: The identityType which can be either SystemAssigned or None
"""
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "tenant_id", tenant_id)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The object ID of the service principal object for the managed identity that is used to grant role-based access to an Azure resource.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
A Globally Unique Identifier (GUID) that represents the Azure AD tenant where the resource is now a member.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The identityType which can be either SystemAssigned or None
"""
return pulumi.get(self, "type")
@pulumi.output_type
class ResourceGuardOperationResponse(dict):
"""
This class contains all the details about a critical operation.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "requestResourceType":
suggest = "request_resource_type"
elif key == "vaultCriticalOperation":
suggest = "vault_critical_operation"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ResourceGuardOperationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ResourceGuardOperationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ResourceGuardOperationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
request_resource_type: str,
vault_critical_operation: str):
"""
This class contains all the details about a critical operation.
:param str request_resource_type: Type of resource request.
:param str vault_critical_operation: Name of the critical operation.
"""
pulumi.set(__self__, "request_resource_type", request_resource_type)
pulumi.set(__self__, "vault_critical_operation", vault_critical_operation)
@property
@pulumi.getter(name="requestResourceType")
def request_resource_type(self) -> str:
"""
Type of resource request.
"""
return pulumi.get(self, "request_resource_type")
@property
@pulumi.getter(name="vaultCriticalOperation")
def vault_critical_operation(self) -> str:
"""
Name of the critical operation.
"""
return pulumi.get(self, "vault_critical_operation")
@pulumi.output_type
class ResourceGuardResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowAutoApprovals":
suggest = "allow_auto_approvals"
elif key == "provisioningState":
suggest = "provisioning_state"
elif key == "resourceGuardOperations":
suggest = "resource_guard_operations"
elif key == "vaultCriticalOperationExclusionList":
suggest = "vault_critical_operation_exclusion_list"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ResourceGuardResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ResourceGuardResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ResourceGuardResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allow_auto_approvals: bool,
description: str,
provisioning_state: str,
resource_guard_operations: Sequence['outputs.ResourceGuardOperationResponse'],
vault_critical_operation_exclusion_list: Optional[Sequence[str]] = None):
"""
:param bool allow_auto_approvals: This flag indicates whether auto approval is allowed or not.
:param str description: Description about the pre-req steps to perform all the critical operations.
:param str provisioning_state: Provisioning state of the BackupVault resource
:param Sequence['ResourceGuardOperationResponse'] resource_guard_operations: {readonly} List of operation details those are protected by the ResourceGuard resource
:param Sequence[str] vault_critical_operation_exclusion_list: List of critical operations which are not protected by this resourceGuard
"""
pulumi.set(__self__, "allow_auto_approvals", allow_auto_approvals)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "resource_guard_operations", resource_guard_operations)
if vault_critical_operation_exclusion_list is not None:
pulumi.set(__self__, "vault_critical_operation_exclusion_list", vault_critical_operation_exclusion_list)
@property
@pulumi.getter(name="allowAutoApprovals")
def allow_auto_approvals(self) -> bool:
"""
This flag indicates whether auto approval is allowed or not.
"""
return pulumi.get(self, "allow_auto_approvals")
@property
@pulumi.getter
def description(self) -> str:
"""
Description about the pre-req steps to perform all the critical operations.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the BackupVault resource
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuardOperations")
def resource_guard_operations(self) -> Sequence['outputs.ResourceGuardOperationResponse']:
"""
{readonly} List of operation details those are protected by the ResourceGuard resource
"""
return pulumi.get(self, "resource_guard_operations")
@property
@pulumi.getter(name="vaultCriticalOperationExclusionList")
def vault_critical_operation_exclusion_list(self) -> Optional[Sequence[str]]:
"""
List of critical operations which are not protected by this resourceGuard
"""
return pulumi.get(self, "vault_critical_operation_exclusion_list")
@pulumi.output_type
class SystemDataResponse(dict):
"""
Metadata pertaining to creation and last modification of the resource.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createdAt":
suggest = "created_at"
elif key == "createdBy":
suggest = "created_by"
elif key == "createdByType":
suggest = "created_by_type"
elif key == "lastModifiedAt":
suggest = "last_modified_at"
elif key == "lastModifiedBy":
suggest = "last_modified_by"
elif key == "lastModifiedByType":
suggest = "last_modified_by_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SystemDataResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SystemDataResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SystemDataResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
created_at: Optional[str] = None,
created_by: Optional[str] = None,
created_by_type: Optional[str] = None,
last_modified_at: Optional[str] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[str] = None):
"""
Metadata pertaining to creation and last modification of the resource.
:param str created_at: The timestamp of resource creation (UTC).
:param str created_by: The identity that created the resource.
:param str created_by_type: The type of identity that created the resource.
:param str last_modified_at: The type of identity that last modified the resource.
:param str last_modified_by: The identity that last modified the resource.
:param str last_modified_by_type: The type of identity that last modified the resource.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if created_by is not None:
pulumi.set(__self__, "created_by", created_by)
if created_by_type is not None:
pulumi.set(__self__, "created_by_type", created_by_type)
if last_modified_at is not None:
pulumi.set(__self__, "last_modified_at", last_modified_at)
if last_modified_by is not None:
pulumi.set(__self__, "last_modified_by", last_modified_by)
if last_modified_by_type is not None:
pulumi.set(__self__, "last_modified_by_type", last_modified_by_type)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
The timestamp of resource creation (UTC).
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[str]:
"""
The identity that created the resource.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
"""
The type of identity that created the resource.
"""
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> Optional[str]:
"""
The type of identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
"""
The identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def last_modified_by_type(self) -> Optional[str]:
"""
The type of identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by_type")
|
3a708f5698d4cfc3eeb43b91b2941bdafaf80e2d
|
2d0bada349646b801a69c542407279cc7bc25013
|
/src/vai_runtime/vart/trace/vaitrace/writer/parser/mcHeaderGen.py
|
99977f82c4ccbf39936865bb1156ada6f303e735
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"Apache-2.0",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] |
permissive
|
Xilinx/Vitis-AI
|
31e664f7adff0958bb7d149883ab9c231efb3541
|
f74ddc6ed086ba949b791626638717e21505dba2
|
refs/heads/master
| 2023-08-31T02:44:51.029166
| 2023-07-27T06:50:28
| 2023-07-27T06:50:28
| 215,649,623
| 1,283
| 683
|
Apache-2.0
| 2023-08-17T09:24:55
| 2019-10-16T21:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,935
|
py
|
mcHeaderGen.py
|
#!/usr/bin/env python
# Copyright 2022-2023 Advanced Micro Devices Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os.path
import xml.etree.ElementTree as ET
from python import Root
from functools import reduce
class mcInst:
def __init__(self, inst):
self.inst = inst
def code_gen(self):
inst = self.inst
words = []
reserved_id = 1
reserved_name = "redvered"
for w in inst.word_list:
# uint32_t bank_addr : 14, bank_id : 6, dpby : 4, dpdon : 4, opcode : 4;
f_code = []
for f in w.field_list[::-1]:
field_name = f.name
if field_name == "reserved":
field_name = "%s_%d" % (reserved_name, reserved_id)
reserved_id += 1
field_len = f.len
f_code.append("{} : {}".format(field_name, field_len))
f_code_str = reduce(lambda _x, _y: "{}, {}".format(_x, _y), f_code)
words.append("uint32_t {};\n".format(f_code_str))
code = "struct {} {{{}}};\n\n".format(
inst.name, reduce(lambda _x, _y: _x + _y, words))
return code
class InstTable:
def __init__(self, insts):
self.inst_table = insts
def code_gen(self):
code = "std::vector<class inst_desc> inst_table = {{{}}};\n"
tmp = []
for i in self.inst_table:
ii = {"inst_name": i.name.upper(), "inst_opcode": i.opcode_str,
"inst_len": i.word_num}
tmp.append(
"create_inst_desc ({inst_name}, {inst_opcode}, {inst_len})".format(**ii))
code = code.format(reduce(lambda _x, _y: "{},{}".format(_x, _y), tmp))
return code
def gen_mc_header(root_list, dir="./"):
for dpuInstance in root_list:
h_f = "%s.h" % dpuInstance.version
h = open(os.path.join(dir, h_f), "w+t")
i_code = ""
for i in dpuInstance.inst_list:
i_code = mcInst(i).code_gen()
h.write(i_code)
t_code = InstTable(dpuInstance.inst_list).code_gen()
h.write(t_code)
def main(wrk_dir="./"):
xml_list = []
for roots, dirs, files in os.walk("./xml"):
for f in files:
xml_list.append(os.path.join(roots, f))
root_list = [Root.Root(ET.parse(x).getroot()) for x in xml_list]
gen_mc_header(root_list)
if __name__ == "__main__":
main()
|
ebdc73da8266f7e2f13d40f93ff2948aa885ccd4
|
93cec528029c65106368508fcb25b60002118349
|
/typesystem/base.py
|
a80cfec45a81d7456b4f5a01d69c8d508d88e921
|
[
"BSD-3-Clause"
] |
permissive
|
encode/typesystem
|
5517438dd1960d160b18a00ff7ead79c039c007f
|
7decd48f40de916edf18148a16516ad0a0483c4b
|
refs/heads/master
| 2023-08-17T02:15:41.387805
| 2022-02-25T12:11:38
| 2022-02-25T12:11:38
| 97,711,273
| 583
| 63
|
BSD-3-Clause
| 2022-07-07T02:22:51
| 2017-07-19T12:00:14
|
Python
|
UTF-8
|
Python
| false
| false
| 8,489
|
py
|
base.py
|
import typing
from collections.abc import Mapping
class Position:
def __init__(self, line_no: int, column_no: int, char_index: int):
self.line_no = line_no
self.column_no = column_no
self.char_index = char_index
def __eq__(self, other: typing.Any) -> bool:
return (
isinstance(other, Position)
and self.line_no == other.line_no
and self.column_no == other.column_no
and self.char_index == other.char_index
)
def __repr__(self) -> str:
class_name = self.__class__.__name__
return (
f"{class_name}(line_no={self.line_no}, column_no={self.column_no},"
f" char_index={self.char_index})"
)
class Message:
"""
An individual error message, within a ValidationError.
"""
def __init__(
self,
*,
text: str,
code: str = None,
key: typing.Union[int, str] = None,
index: typing.List[typing.Union[int, str]] = None,
position: Position = None,
start_position: Position = None,
end_position: Position = None,
):
"""
text - The error message. 'May not have more than 100 characters'
code - An optional error code, eg. 'max_length'
key - An optional key of the message within a single parent. eg. 'username'
index - The index of the message
within a nested object. eg. ['users', 3, 'username']
Optionally either:
position - The start and end position of the error message
within the raw content.
Or:
start_position - The start position of the error message within the raw content.
end_position - The end position of the error message within the raw content.
"""
self.text = text
self.code = "custom" if code is None else code
if key is not None:
assert index is None
self.index = [key]
else:
self.index = [] if index is None else index
if position is None:
self.start_position = start_position
self.end_position = end_position
else:
assert start_position is None
assert end_position is None
self.start_position = position
self.end_position = position
def __eq__(self, other: typing.Any) -> bool:
return isinstance(other, Message) and (
self.text == other.text
and self.code == other.code
and self.index == other.index
and self.start_position == other.start_position
and self.end_position == other.end_position
)
def __hash__(self) -> int:
ident = (self.code, tuple(self.index))
return hash(ident)
def __repr__(self) -> str:
class_name = self.__class__.__name__
index_str = f", index={self.index!r}" if self.index else ""
if self.start_position is None:
position_str = ""
elif self.start_position == self.end_position:
position_str = f", position={self.start_position!r}"
else:
position_str = (
f", start_position={self.start_position!r},"
f" end_position={self.end_position!r}"
)
return (
f"{class_name}(text={self.text!r},"
f" code={self.code!r}{index_str}{position_str})"
)
class BaseError(Mapping, Exception):
"""
A validation or parse error, containing one or more error messages.
Error information is accessible either by accessing as a dict-like object,
eg. `dict(error)` or by returning the list of messages with `error.messages()`.
ValidationError is either raised, in the `validate()` usage:
value = MySchema.validate(data)
Or returned in the `validate_or_error()` usage:
value, error = MySchema.validate_or_error(data)
"""
def __init__(
self,
*,
text: str = None,
code: str = None,
key: typing.Union[int, str] = None,
position: Position = None,
messages: typing.List[Message] = None,
):
"""
Either instantiated with a single message, like so:
text - The error message. 'May not have more than 100 characters'
code - An optional error code, eg. 'max_length'
key - An optional key of the message within a single parent. eg. 'username'
Or instantiated with a list of error messages:
messages - A list of all the messages in the error.
"""
if messages is None:
# Instantiated as a ValidationError with a single error message.
assert text is not None
messages = [Message(text=text, code=code, key=key, position=position)]
else:
# Instantiated as a ValidationError with multiple error messages.
assert text is None
assert code is None
assert key is None
assert position is None
assert len(messages)
self._messages = messages
self._message_dict: typing.Dict[
typing.Union[int, str], typing.Union[str, dict]
] = {}
# Populate 'self._message_dict'
for message in messages:
insert_into = self._message_dict
for key in message.index[:-1]:
insert_into = insert_into.setdefault(key, {}) # type: ignore
insert_key = message.index[-1] if message.index else ""
insert_into[insert_key] = message.text
def messages(
self, *, add_prefix: typing.Union[str, int] = None
) -> typing.List[Message]:
"""
Return a list of all the messages.
add_prefix - An optional key to add to the index of all returned messages.
Useful in nested objects when validation needs to accumulate
all the child messages for each item in the parent object.
"""
if add_prefix is not None:
return [
Message(
text=message.text,
code=message.code,
index=[add_prefix] + message.index,
)
for message in self._messages
]
return list(self._messages)
def __iter__(self) -> typing.Iterator:
return iter(self._message_dict)
def __len__(self) -> int:
return len(self._message_dict)
def __getitem__(self, key: typing.Any) -> typing.Union[str, dict]:
return self._message_dict[key]
def __eq__(self, other: typing.Any) -> bool:
return isinstance(other, ValidationError) and self._messages == other._messages
def __hash__(self) -> int:
ident = tuple(hash(m) for m in self._messages)
return hash(ident)
def __repr__(self) -> str:
class_name = self.__class__.__name__
if len(self._messages) == 1 and not self._messages[0].index:
message = self._messages[0]
return f"{class_name}(text={message.text!r}, code={message.code!r})"
return f"{class_name}({self._messages!r})"
def __str__(self) -> str:
if len(self._messages) == 1 and not self._messages[0].index:
return self._messages[0].text
return str(dict(self))
class ParseError(BaseError):
"""
Raised by `typesystem.tokenize_json()` and `typesystem.tokenize_yaml()`.
"""
class ValidationError(BaseError):
"""
Raised by `.validate()` or returned by `.validate_or_error()`.
"""
class ValidationResult:
"""
A pair providing the validated data or validation error.
Typically unpacked like so:
value, error = MySchema.validate_or_error(data)
"""
def __init__(
self, *, value: typing.Any = None, error: ValidationError = None
) -> None:
"""
Either:
value - The validated data.
Or:
error - The validation error.
"""
assert value is None or error is None
self.value = value
self.error = error
def __iter__(self) -> typing.Iterator:
yield self.value
yield self.error
def __bool__(self) -> bool:
return self.error is None
def __repr__(self) -> str:
class_name = self.__class__.__name__
if self.error is not None:
return f"{class_name}(error={self.error!r})"
return f"{class_name}(value={self.value!r})"
|
79c1e35fa05599d1f78e387f12d43909cdca9c39
|
727213382e3dbd831fa6168ae5dfeed26e22f153
|
/tools/cmake/bazel_to_cmake/golden_test.py
|
f564816987dab4e3f06912818c52d98e68dc2789
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
google/tensorstore
|
e3c41c9de41bdd56f26bbe31307c09d4ddd0278d
|
d38958e88130e7922f2c5d856a12114546ac9b73
|
refs/heads/master
| 2023-08-29T07:15:28.642462
| 2023-08-29T06:42:04
| 2023-08-29T06:43:13
| 251,312,202
| 1,190
| 88
|
NOASSERTION
| 2023-09-05T07:40:51
| 2020-03-30T13:24:59
|
C++
|
UTF-8
|
Python
| false
| false
| 11,324
|
py
|
golden_test.py
|
# Copyright 2022 The TensorStore Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=relative-beyond-top-level,wildcard-import
import json
import os
import pathlib
import shutil
import sys
from typing import Any, Dict, List, Tuple, Union
import pytest
from . import native_rules # pylint: disable=unused-import
from . import native_rules_alias # pylint: disable=unused-import
from . import native_rules_cc # pylint: disable=unused-import
from . import native_rules_cc_proto # pylint: disable=unused-import
from . import native_rules_genrule # pylint: disable=unused-import
from . import native_rules_proto # pylint: disable=unused-import
from .cmake_repository import CMakeRepository
from .cmake_repository import make_repo_mapping
from .cmake_target import CMakePackage
from .cmake_target import CMakeTarget
from .cmake_target import CMakeTargetPair
from .evaluation import EvaluationState
from .platforms import add_platform_constraints
from .starlark import rule # pylint: disable=unused-import
from .starlark.bazel_target import RepositoryId
from .starlark.bazel_target import TargetId
from .workspace import Repository
from .workspace import Workspace
# NOTE: Consider adding failure tests as well as the success tests.
# To update, run:
# UPDATE_GOLDENS=1 python3 -m pytest bazel_to_cmake/golden_test.py
#
UPDATE_GOLDENS = os.getenv('UPDATE_GOLDENS') == '1'
CMAKE_VARS = {
'CMAKE_CXX_COMPILER_ID': 'Clang',
'CMAKE_SYSTEM_NAME': 'Linux',
'CMAKE_SYSTEM_PROCESSOR': 'AMD64',
'CMAKE_COMMAND': 'cmake',
'PROJECT_IS_TOP_LEVEL': 'YES',
'CMAKE_FIND_PACKAGE_REDIRECTS_DIR': '_find_pkg_redirects_',
'CMAKE_MESSAGE_LOG_LEVEL': 'TRACE',
}
def parameters() -> List[Tuple[str, Dict[str, Any]]]:
"""Returns config tuples by reading config.json from the 'testdata' subdir."""
if UPDATE_GOLDENS:
testdata = pathlib.Path(__file__).resolve().with_name('testdata')
else:
testdata = pathlib.Path(__file__).with_name('testdata').resolve()
result: List[Tuple[str, Dict[str, Any]]] = []
for x in testdata.iterdir():
if '__' in str(x):
continue
try:
with (x / 'config.json').open('r') as f:
config: Dict[str, Any] = json.load(f)
except FileNotFoundError as e:
raise FileNotFoundError(f'Failed to read {str(x)}/config.json') from e
config['source_directory'] = str(x)
result.append((x.name, config))
return result
def get_files_list(source_directory: str) -> List[pathlib.Path]:
"""Returns non-golden files under source directory."""
files: List[pathlib.Path] = []
try:
include_goldens = 'golden' in source_directory
p = pathlib.Path(source_directory)
for x in sorted(p.glob('**/*')):
if not x.is_file():
continue
if 'golden/' in str(x) and not include_goldens:
continue
files.append(x.relative_to(p))
except FileNotFoundError as e:
print(f'Failure to read {source_directory}: {e}')
return files
def copy_tree(source_dir: str, source_files: List[str], dest_dir: str):
"""Copies source_files from source_dir to dest_dir."""
for x in source_files:
dest_path = os.path.join(dest_dir, x)
os.makedirs(os.path.dirname(dest_path), exist_ok=True)
shutil.copy(os.path.join(source_dir, x), dest_path)
def compare_files(golden, generated):
with pathlib.Path(golden).open('r') as right:
with pathlib.Path(generated).open('r') as left:
assert list(left) == list(right)
def add_repositories(workspace: Workspace):
workspace.add_cmake_repository(
CMakeRepository(
RepositoryId('com_google_protobuf'),
CMakePackage('Protobuf'),
pathlib.PurePosixPath('protobuf_src'),
pathlib.PurePosixPath('protobuf_build'),
repo_mapping={},
persisted_canonical_name={},
)
)
workspace.add_cmake_repository(
CMakeRepository(
RepositoryId('com_github_grpc_grpc'),
CMakePackage('gRPC'),
pathlib.PurePosixPath('grpc_src'),
pathlib.PurePosixPath('grpc_build'),
repo_mapping={},
persisted_canonical_name={},
)
)
workspace.add_cmake_repository(
CMakeRepository(
RepositoryId('com_google_protobuf_upb'),
CMakePackage('upb'),
pathlib.PurePosixPath('upb_src'),
pathlib.PurePosixPath('upb_build'),
repo_mapping={},
persisted_canonical_name={},
)
)
def persist_cmake_name(
target: Union[str, TargetId],
cmake_alias: CMakeTarget,
):
if not isinstance(target, TargetId):
target = workspace.root_repository.repository_id.parse_target(str(target))
assert isinstance(target, TargetId)
assert target.repository_id in workspace.all_repositories
repo = workspace.all_repositories[target.repository_id]
cmake_target_pair: CMakeTargetPair = repo.get_cmake_target_pair(
target
).with_alias(cmake_alias)
repo.set_persisted_canonical_name(target, cmake_target_pair)
# Add default mappings used in proto code.
persist_cmake_name(
'@com_google_protobuf//:protoc',
CMakeTarget('protobuf::protoc'),
)
persist_cmake_name(
'@com_google_protobuf//:protobuf',
CMakeTarget('protobuf::libprotobuf'),
)
persist_cmake_name(
'@com_google_protobuf//:protobuf_lite',
CMakeTarget('protobuf::libprotobuf_lite'),
)
persist_cmake_name(
'@com_google_protobuf//:any_protoc',
CMakeTarget('protobuf::any_proto'),
)
# gRPC
persist_cmake_name(
'@com_github_grpc_grpc//:grpc++_codegen_proto',
CMakeTarget('gRPC::gRPC_codegen'),
)
persist_cmake_name(
'@com_github_grpc_grpc//src/compiler:grpc_cpp_plugin',
CMakeTarget('gRPC::grpc_cpp_plugin'),
)
# upb
persist_cmake_name(
'@com_google_protobuf_upb//upbc:protoc-gen-upbdefs',
CMakeTarget('upb::protoc-gen-upbdefs'),
)
persist_cmake_name(
'@com_google_protobuf_upb//upbc:protoc-gen-upb',
CMakeTarget('protobuf::protoc-gen-upb'),
)
persist_cmake_name(
'@com_google_protobuf_upb//upbc:protoc-gen-upb_stage0',
CMakeTarget('protobuf::protoc-gen-upb_stage0'),
)
persist_cmake_name(
'@com_google_protobuf_upb//upbc:protoc-gen-upb_stage1',
CMakeTarget('protobuf::protoc-gen-upb_stage1'),
)
persist_cmake_name(
'@com_google_protobuf_upb//:generated_code_support__only_for_generated_code_do_not_use__i_give_permission_to_break_me',
CMakeTarget(
'upb::generated_code_support__only_for_generated_code_do_not_use__i_give_permission_to_break_me'
),
)
persist_cmake_name(
'@com_google_protobuf_upb//:generated_reflection_support__only_for_generated_code_do_not_use__i_give_permission_to_break_me',
CMakeTarget(
'upb::generated_reflection_support__only_for_generated_code_do_not_use__i_give_permission_to_break_me'
),
)
persist_cmake_name(
'@com_google_protobuf_upb//:mini_table',
CMakeTarget('upb::mini_table'),
)
@pytest.mark.parametrize('test_name,config', parameters())
def test_golden(test_name: str, config: Dict[str, Any], tmpdir):
# Start with the list of source files.
source_directory = config['source_directory']
del config['source_directory']
input_files = [str(x) for x in get_files_list(source_directory)]
# Create the working directory as a snapshot of the source directory.
directory = str(tmpdir)
os.chdir(directory)
copy_tree(source_directory, input_files, directory)
os.makedirs(CMAKE_VARS['CMAKE_FIND_PACKAGE_REDIRECTS_DIR'], exist_ok=True)
repository_id = RepositoryId(f'{test_name}_test_repo')
root_repository = CMakeRepository(
repository_id=repository_id,
cmake_project_name=CMakePackage('CMakeProject'),
source_directory=pathlib.PurePath(directory),
cmake_binary_dir=pathlib.PurePath('_cmake_binary_dir_'),
repo_mapping=make_repo_mapping(
repository_id, config.get('repo_mapping', [])
),
persisted_canonical_name={},
)
# Setup repo mapping.
for x in config.get('repo_mapping', []):
root_repository.repo_mapping[RepositoryId(x[0])] = RepositoryId(x[1])
# Workspace setup
workspace = Workspace(root_repository, CMAKE_VARS)
workspace.save_workspace = '_workspace.pickle'
workspace.host_platform_name = 'linux'
workspace._verbose = 3
add_platform_constraints(workspace)
add_repositories(workspace)
# Load specified modules.
for x in config.get('modules', []):
workspace.add_module(x)
workspace.load_modules()
# load bazelrc
bazelrc_path = os.path.join(directory, '.bazelrc')
if pathlib.Path(bazelrc_path).exists():
workspace.load_bazelrc(bazelrc_path)
# Setup active repository
active_repo = Repository(
workspace=workspace,
repository=root_repository,
bindings={},
top_level=True,
)
# Evaluate the WORKSPACE and BUILD files
state = EvaluationState(active_repo)
state.process_workspace()
for build_file in config.get('build_files', ['BUILD.bazel']):
state.process_build_file(
root_repository.source_directory.joinpath(build_file)
)
# Analyze
if config.get('targets') is None:
targets_to_analyze = state.targets_to_analyze
else:
targets_to_analyze = sorted(
[
active_repo.repository_id.parse_target(t)
for t in config.get('targets')
]
)
state.analyze(targets_to_analyze)
# Write generated file
pathlib.Path('build_rules.cmake').write_text(state.builder.as_text())
# Collect the output files, excluding the input files,
# and normalize the contents.
excludes = config.get('excludes', [])
files = []
for x in get_files_list('.'):
if str(x) in input_files:
continue
if str(x) in excludes:
continue
txt = x.read_text()
txt = txt.replace(os.path.abspath(sys.argv[0]), 'run_bazel_to_cmake.py')
txt = txt.replace(directory, '${TEST_DIRECTORY}')
txt = txt.replace(os.path.dirname(__file__), '${SCRIPT_DIRECTORY}')
x.write_text(txt)
files.append(str(x))
golden_directory = os.path.join(source_directory, 'golden')
if UPDATE_GOLDENS:
print(f'Updating goldens for {test_name}')
try:
shutil.rmtree(golden_directory)
except FileNotFoundError:
pass
for x in files:
dest_path = os.path.join(golden_directory, x)
os.makedirs(os.path.dirname(dest_path), exist_ok=True)
shutil.copyfile(x, dest_path)
# Assert files exist.
golden_files = get_files_list(golden_directory)
assert len(golden_files) > 0 # pylint: disable=g-explicit-length-test
for x in golden_files:
# Assert on file contents.
expected_file = os.path.join(golden_directory, str(x))
actual_file = os.path.join(directory, str(x))
compare_files(expected_file, actual_file)
|
1fd0895d078b1c2200612a33bea0eb859b2f7e15
|
99b2aff89dcec2f43cee32a6bdd4c0c43d6c51fa
|
/src/pytezos/rpc/docs.py
|
fd0680dee6401b5f2f16e8405bf5e3a7e8571c5f
|
[
"MIT"
] |
permissive
|
baking-bad/pytezos
|
c4248bde49a5b05521b8cc51eeca588b1a721660
|
19747e3acec2141f06e812025673f497fc07e2d4
|
refs/heads/master
| 2023-07-06T21:57:09.572985
| 2023-07-05T11:45:27
| 2023-07-05T11:45:27
| 169,243,460
| 115
| 43
|
MIT
| 2023-07-04T16:28:09
| 2019-02-05T13:12:50
|
Python
|
UTF-8
|
Python
| false
| false
| 133,621
|
py
|
docs.py
|
rpc_docs = {
"/": {
"props": [
"chains",
"config",
"errors",
"fetch_protocol",
"injection",
"monitor",
"network",
"private",
"protocols",
"stats",
"version",
"workers"
]
},
"/chains": {
"item": {
"name": "chain_id",
"descr": "A chain identifier. This is either a chain hash in Base58Check notation or a one the predefined aliases: 'main', 'test'."
}
},
"/chains/{}": {
"PATCH": {
"descr": "Forcefully set the bootstrapped flag of the node",
"args": [],
"ret": "Object"
},
"props": [
"blocks",
"chain_id",
"checkpoint",
"invalid_blocks",
"is_bootstrapped",
"levels",
"mempool"
]
},
"/chains/{}/blocks": {
"GET": {
"descr": "Lists block hashes from '<chain>', up to the last checkpoint, sorted with decreasing fitness. Without arguments it returns the head of the chain. Optional arguments allow to return the list of predecessors of a given block or of a set of blocks.",
"args": [
{
"name": "length",
"descr": "The requested number of predecessors to return (per request; see next argument)."
},
{
"name": "head",
"descr": "An empty argument requests blocks starting with the current head. A non empty list allows to request one or more specific fragments of the chain."
},
{
"name": "min_date",
"descr": "When `min_date` is provided, blocks with a timestamp before `min_date` are filtered out. However, if the `length` parameter is also provided, then up to that number of predecessors will be returned regardless of their date."
}
],
"ret": "Array"
},
"item": {
"name": "block_id",
"descr": "A block identifier. This is either a block hash in Base58Check notation, one the predefined aliases: 'genesis', 'head' or a block level (index in the chain). One might also use 'head~N' or '<hash>~N' where N is an integer to denote the Nth predecessor of the designated block.Also, '<hash>+N' denotes the Nth successor of a block."
}
},
"/chains/{}/chain_id": {
"GET": {
"descr": "The chain unique identifier.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/checkpoint": {
"GET": {
"descr": "DEPRECATED: use `../levels/{checkpoint, savepoint, caboose, history_mode}` instead. The current checkpoint for this chain.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/invalid_blocks": {
"GET": {
"descr": "Lists blocks that have been declared invalid along with the errors that led to them being declared invalid.",
"args": [],
"ret": "Array"
},
"item": {
"name": "block_hash",
"descr": "block_hash (Base58Check-encoded)"
}
},
"/chains/{}/invalid_blocks/{}": {
"GET": {
"descr": "The errors that appears during the block (in)validation.",
"args": [],
"ret": "Object"
},
"DELETE": {
"descr": "Remove an invalid block for the tezos storage",
"args": [],
"ret": "Object"
}
},
"/chains/{}/is_bootstrapped": {
"GET": {
"descr": "The bootstrap status of a chain",
"args": [],
"ret": "Object"
}
},
"/chains/{}/levels": {
"props": [
"caboose",
"checkpoint",
"savepoint"
]
},
"/chains/{}/levels/caboose": {
"GET": {
"descr": "The current caboose for this chain.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/levels/checkpoint": {
"GET": {
"descr": "The current checkpoint for this chain.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/levels/savepoint": {
"GET": {
"descr": "The current savepoint for this chain.",
"args": [],
"ret": "Object"
}
},
"/config": {
"GET": {
"descr": "Return the runtime node configuration (this takes into account the command-line arguments and the on-disk configuration file)",
"args": [],
"ret": "Object"
},
"props": [
"history_mode",
"logging",
"network"
]
},
"/config/history_mode": {
"GET": {
"descr": "Returns the history mode of the node's underlying storage.",
"args": [],
"ret": "Object"
}
},
"/config/logging": {
"PUT": {
"descr": "Replace the logging configuration of the node.",
"args": [],
"ret": "Object"
}
},
"/config/network": {
"props": [
"user_activated_protocol_overrides",
"user_activated_upgrades"
]
},
"/config/network/user_activated_protocol_overrides": {
"GET": {
"descr": "List of protocols which replace other protocols",
"args": [],
"ret": "Array"
}
},
"/config/network/user_activated_upgrades": {
"GET": {
"descr": "List of protocols to switch to at given levels",
"args": [],
"ret": "Array"
}
},
"/errors": {
"GET": {
"descr": "Schema for all the RPC errors from the shell",
"args": [],
"ret": "Object"
}
},
"/fetch_protocol": {
"item": {
"name": "Protocol_hash",
"descr": "Protocol_hash (Base58Check-encoded)"
}
},
"/fetch_protocol/{}": {
"GET": {
"descr": "Fetch a protocol from the network.",
"args": [],
"ret": "Object"
}
},
"/injection": {
"props": [
"block",
"operation",
"protocol"
]
},
"/injection/block": {
"POST": {
"descr": "Inject a block in the node and broadcast it. The `operations` embedded in `blockHeader` might be pre-validated using a contextual RPCs from the latest block (e.g. '/blocks/head/context/preapply'). Returns the ID of the block. By default, the RPC will wait for the block to be validated before answering. If ?async is true, the function returns immediately. Otherwise, the block will be validated before the result is returned. If ?force is true, it will be injected even on non strictly increasing fitness. An optional ?chain parameter can be used to specify whether to inject on the test chain or the main chain.",
"args": [
{
"name": "async",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "force",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "chain",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/injection/operation": {
"POST": {
"descr": "Inject an operation in node and broadcast it. Returns the ID of the operation. The `signedOperationContents` should be constructed using contextual RPCs from the latest block and signed by the client. The injection of the operation will apply it on the current mempool context. This context may change at each operation injection or operation reception from peers. By default, the RPC will wait for the operation to be (pre-)validated before returning. However, if ?async is true, the function returns immediately. The optional ?chain parameter can be used to specify whether to inject on the test chain or the main chain.",
"args": [
{
"name": "async",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "chain",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/injection/protocol": {
"POST": {
"descr": "Inject a protocol in node. Returns the ID of the protocol. If ?async is true, the function returns immediately. Otherwise, the protocol will be validated before the result is returned.",
"args": [
{
"name": "async",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/monitor": {
"props": [
"active_chains",
"bootstrapped",
"commit_hash",
"heads",
"protocols",
"valid_blocks"
]
},
"/monitor/active_chains": {
"GET": {
"descr": "Monitor every chain creation and destruction. Currently active chains will be given as first elements",
"args": [],
"ret": "Array"
}
},
"/monitor/bootstrapped": {
"GET": {
"descr": "Wait for the node to have synchronized its chain with a few peers (configured by the node's administrator), streaming head updates that happen during the bootstrapping process, and closing the stream at the end. If the node was already bootstrapped, returns the current head immediately.",
"args": [],
"ret": "Object"
}
},
"/monitor/commit_hash": {
"GET": {
"descr": "DEPRECATED: use `version` instead.",
"args": [],
"ret": "Object"
}
},
"/monitor/heads": {
"item": {
"name": "chain_id",
"descr": "A chain identifier. This is either a chain hash in Base58Check notation or a one the predefined aliases: 'main', 'test'."
}
},
"/monitor/heads/{}": {
"GET": {
"descr": "Monitor all blocks that are successfully validated by the node and selected as the new head of the given chain.",
"args": [
{
"name": "next_protocol",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/monitor/protocols": {
"GET": {
"descr": "Monitor all economic protocols that are retrieved and successfully loaded and compiled by the node.",
"args": [],
"ret": "Object"
}
},
"/monitor/valid_blocks": {
"GET": {
"descr": "Monitor all blocks that are successfully validated by the node, disregarding whether they were selected as the new head or not.",
"args": [
{
"name": "protocol",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "next_protocol",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "chain",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/network": {
"props": [
"connections",
"greylist",
"log",
"peers",
"points",
"self",
"stat",
"version",
"versions"
]
},
"/network/connections": {
"GET": {
"descr": "List the running P2P connection.",
"args": [],
"ret": "Array"
},
"item": {
"name": "peer_id",
"descr": "A cryptographic node identity (Base58Check-encoded)"
}
},
"/network/connections/{}": {
"GET": {
"descr": "Details about the current P2P connection to the given peer.",
"args": [],
"ret": "Object"
},
"DELETE": {
"descr": "Forced close of the current P2P connection to the given peer.",
"args": [
{
"name": "wait",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/network/greylist": {
"DELETE": {
"descr": "Clear all greylists tables. This will unban all addresses and peers automatically greylisted by the system.",
"args": [],
"ret": "Object"
},
"props": [
"clear",
"ips",
"peers"
]
},
"/network/greylist/clear": {
"GET": {
"descr": "DEPRECATED: Clear all greylists tables. This will unban all addresses and peers automatically greylisted by the system. Use DELETE `/network/greylist` instead",
"args": [],
"ret": "Object"
}
},
"/network/greylist/ips": {
"GET": {
"descr": "Returns an object that contains a list of IP and the field \"not_reliable_since\".\n If the field \"not_reliable_since\" is None then the list contains the currently greylisted IP addresses.\n If the field \"not_reliable_since\" Contains a date, this means that the greylist has been overflowed and it is no more possible to obtain the exact list of greylisted IPs. Since the greylist of IP addresses has been design to work whatever his size, there is no security issue related to this overflow.\n Reinitialize the ACL structure by calling \"delete /network/greylist\" to get back this list reliable.",
"args": [],
"ret": "Object"
}
},
"/network/greylist/peers": {
"GET": {
"descr": "List of the last greylisted peers.",
"args": [],
"ret": "Array"
}
},
"/network/log": {
"GET": {
"descr": "Stream of all network events",
"args": [],
"ret": "Object"
}
},
"/network/peers": {
"GET": {
"descr": "List the peers the node ever met.",
"args": [
{
"name": "filter",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "peer_id",
"descr": "A cryptographic node identity (Base58Check-encoded)"
}
},
"/network/peers/{}": {
"GET": {
"descr": "Details about a given peer.",
"args": [],
"ret": "Object"
},
"PATCH": {
"descr": "Change the permissions of a given peer. With `{acl: ban}`: blacklist the given peer and remove it from the whitelist if present. With `{acl: open}`: removes the peer from the blacklist and whitelist. With `{acl: trust}`: trust the given peer permanently and remove it from the blacklist if present. The peer cannot be blocked (but its host IP still can).",
"args": [],
"ret": "Object"
},
"props": [
"ban",
"banned",
"log",
"trust",
"unban",
"untrust"
]
},
"/network/peers/{}/ban": {
"GET": {
"descr": "DEPRECATED: Blacklist the given peer and remove it from the whitelist if present. Use PATCH `network/peers/<peer_id>` instead.",
"args": [],
"ret": "Object"
}
},
"/network/peers/{}/banned": {
"GET": {
"descr": "Check if a given peer is blacklisted or greylisted.",
"args": [],
"ret": "Boolean"
}
},
"/network/peers/{}/log": {
"GET": {
"descr": "Monitor network events related to a given peer.",
"args": [
{
"name": "monitor",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
}
},
"/network/peers/{}/trust": {
"GET": {
"descr": "DEPRECATED: Whitelist a given peer permanently and remove it from the blacklist if present. The peer cannot be blocked (but its host IP still can). Use PATCH `network/peers/<peer_id>` instead.",
"args": [],
"ret": "Object"
}
},
"/network/peers/{}/unban": {
"GET": {
"descr": "DEPRECATED: Remove the given peer from the blacklist. Use PATCH `network/peers/<peer_id>` instead.",
"args": [],
"ret": "Object"
}
},
"/network/peers/{}/untrust": {
"GET": {
"descr": "DEPRECATED: Remove a given peer from the whitelist. Use PATCH `network/peers/<peer_id>` instead.",
"args": [],
"ret": "Object"
}
},
"/network/points": {
"GET": {
"descr": "List the pool of known `IP:port` used for establishing P2P connections.",
"args": [
{
"name": "filter",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "point",
"descr": "A network point (ipv4:port or [ipv6]:port)."
}
},
"/network/points/{}": {
"GET": {
"descr": "Details about a given `IP:addr`.",
"args": [],
"ret": "Object"
},
"PUT": {
"descr": "Connect to a peer",
"args": [
{
"name": "timeout",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"PATCH": {
"descr": "Change the connectivity state of a given `IP:addr`. With `{acl : ban}`: blacklist the given address and remove it from the whitelist if present. With `{acl: open}`: removes an address from the blacklist and whitelist. With `{acl: trust}`: trust a given address permanently and remove it from the blacklist if present. With `{peer_id: <id>}` set the peerId of the point. Connections from this address can still be closed on authentication if the peer is greylisted. ",
"args": [],
"ret": "Object"
},
"props": [
"ban",
"banned",
"log",
"trust",
"unban",
"untrust"
]
},
"/network/points/{}/ban": {
"GET": {
"descr": "DEPRECATED: Blacklist the given address and remove it from the whitelist if present. Use PATCH `/network/point/<point_id>` instead.",
"args": [],
"ret": "Object"
}
},
"/network/points/{}/banned": {
"GET": {
"descr": "Check if a given address is blacklisted or greylisted. Port component is unused.",
"args": [],
"ret": "Boolean"
}
},
"/network/points/{}/log": {
"GET": {
"descr": "Monitor network events related to an `IP:addr`.",
"args": [
{
"name": "monitor",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
}
},
"/network/points/{}/trust": {
"GET": {
"descr": "DEPRECATED: Trust a given address permanently and remove it from the blacklist if present. Connections from this address can still be closed on authentication if the peer is greylisted. Use PATCH`/network/point/<point_id>` instead.",
"args": [],
"ret": "Object"
}
},
"/network/points/{}/unban": {
"GET": {
"descr": "DEPRECATED: Remove an address from the blacklist. Use PATCH `/network/point/<point_id>` instead.",
"args": [],
"ret": "Object"
}
},
"/network/points/{}/untrust": {
"GET": {
"descr": "DEPRECATED: Remove an address from the whitelist. Use PATCH `/network/point/<point_id>` instead.",
"args": [],
"ret": "Object"
}
},
"/network/self": {
"GET": {
"descr": "Return the node's peer id",
"args": [],
"ret": "Object"
}
},
"/network/stat": {
"GET": {
"descr": "Global network bandwidth statistics in B/s.",
"args": [],
"ret": "Object"
}
},
"/network/version": {
"GET": {
"descr": "DEPRECATED: use `version` instead.",
"args": [],
"ret": "Object"
}
},
"/network/versions": {
"GET": {
"descr": "DEPRECATED: use `version` instead.",
"args": [],
"ret": "Array"
}
},
"/private": {
"props": [
"injection"
]
},
"/private/injection": {
"props": [
"operation",
"operations"
]
},
"/private/injection/operation": {
"POST": {
"descr": "Inject an operation in node and broadcast it. Returns the ID of the operation. The `signedOperationContents` should be constructed using contextual RPCs from the latest block and signed by the client. The injection of the operation will apply it on the current mempool context. This context may change at each operation injection or operation reception from peers. By default, the RPC will wait for the operation to be (pre-)validated before returning. However, if ?async is true, the function returns immediately. The optional ?chain parameter can be used to specify whether to inject on the test chain or the main chain.",
"args": [
{
"name": "async",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "chain",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/private/injection/operations": {
"POST": {
"descr": "Inject a list of operations in a node. If [force] is [true] then the operations are immediatly injected. The injection will succeed, but it does not mean the operations are (all) valid. In any case, the injection will be quick, hence [async] will be taken into account but should have almost no impact. If [async] is [true], all the promises returned by injecting an operation will be dropped. Each injection is done independently, and does not depend on the other injected operations result. Otherwise ([async]=[force]=[false]), for each operation, we record a list of promises. If all the injections succeed, the result is the list of operation hashes injected, otherwise an error (\"injection_operations_error\") is returned. This error is followed by markers for each operation: \"injection_operation_succeed\" for success and \"injection_operation_error\" for failure (followed by the errors specific to this injection).",
"args": [
{
"name": "async",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "force",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "chain",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
}
},
"/protocols": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [],
"ret": "Array"
},
"item": {
"name": "Protocol_hash",
"descr": "Protocol_hash (Base58Check-encoded)"
}
},
"/protocols/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [],
"ret": "Object"
},
"props": [
"environment"
]
},
"/protocols/{}/environment": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [],
"ret": "Integer"
}
},
"/stats": {
"props": [
"gc",
"memory"
]
},
"/stats/gc": {
"GET": {
"descr": "Gets stats from the OCaml Garbage Collector",
"args": [],
"ret": "Object"
}
},
"/stats/memory": {
"GET": {
"descr": "Gets memory usage stats",
"args": [],
"ret": "Object"
}
},
"/version": {
"GET": {
"descr": "Get information on the node version",
"args": [],
"ret": "Object"
}
},
"/workers": {
"props": [
"block_validator",
"chain_validators",
"prevalidators"
]
},
"/workers/block_validator": {
"GET": {
"descr": "Introspect the state of the block_validator worker.",
"args": [],
"ret": "Object"
}
},
"/workers/chain_validators": {
"GET": {
"descr": "Lists the chain validator workers and their status.",
"args": [],
"ret": "Array"
},
"item": {
"name": "chain_id",
"descr": "A chain identifier. This is either a chain hash in Base58Check notation or a one the predefined aliases: 'main', 'test'."
}
},
"/workers/chain_validators/{}": {
"GET": {
"descr": "Introspect the state of a chain validator worker.",
"args": [],
"ret": "Object"
},
"props": [
"ddb",
"peers_validators"
]
},
"/workers/chain_validators/{}/ddb": {
"GET": {
"descr": "Introspect the state of the DDB attached to a chain validator worker.",
"args": [],
"ret": "Object"
}
},
"/workers/chain_validators/{}/peers_validators": {
"GET": {
"descr": "Lists the peer validator workers and their status.",
"args": [],
"ret": "Array"
},
"item": {
"name": "peer_id",
"descr": "A cryptographic node identity (Base58Check-encoded)"
}
},
"/workers/chain_validators/{}/peers_validators/{}": {
"GET": {
"descr": "Introspect the state of a peer validator worker.",
"args": [],
"ret": "Object"
}
},
"/workers/prevalidators": {
"GET": {
"descr": "Lists the Prevalidator workers and their status.",
"args": [],
"ret": "Array"
},
"item": {
"name": "chain_id",
"descr": "A chain identifier. This is either a chain hash in Base58Check notation or a one the predefined aliases: 'main', 'test'."
}
},
"/workers/prevalidators/{}": {
"GET": {
"descr": "Introspect the state of prevalidator workers.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/mempool": {
"props": [
"ban_operation",
"filter",
"monitor_operations",
"pending_operations",
"request_operations",
"unban_all_operations",
"unban_operation"
]
},
"/chains/{}/mempool/ban_operation": {
"POST": {
"descr": "Remove an operation from the mempool if present, reverting its effect if it was applied. Add it to the set of banned operations to prevent it from being fetched/processed/injected in the future. Note: If the baker has already received the operation, then it's necessary to restart it to flush the operation from it.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/mempool/filter": {
"GET": {
"descr": "Get the configuration of the mempool filter. The minimal_fees are in mutez. Each field minimal_nanotez_per_xxx is a rational number given as a numerator and a denominator, e.g. \"minimal_nanotez_per_gas_unit\": [ \"100\", \"1\" ].",
"args": [
{
"name": "include_default",
"descr": "Show fields equal to their default value (set by default)"
}
],
"ret": "Object"
},
"POST": {
"descr": "Set the configuration of the mempool filter. **If any of the fields is absent from the input JSON, then it is set to the default value for this field (i.e. its value in the default configuration), even if it previously had a different value.** If the input JSON does not describe a valid configuration, then the configuration is left unchanged. Also return the new configuration (which may differ from the input if it had omitted fields or was invalid). You may call [./octez-client rpc get '/chains/main/mempool/filter?include_default=true'] to see an example of JSON describing a valid configuration.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/mempool/monitor_operations": {
"GET": {
"descr": "Monitor the mempool operations.",
"args": [
{
"name": "applied",
"descr": "Include applied operations (set by default)"
},
{
"name": "refused",
"descr": "Include refused operations"
},
{
"name": "outdated",
"descr": "Include outdated operations"
},
{
"name": "branch_refused",
"descr": "Include branch refused operations"
},
{
"name": "branch_delayed",
"descr": "Include branch delayed operations (set by default)"
}
],
"ret": "Array"
}
},
"/chains/{}/mempool/pending_operations": {
"GET": {
"descr": "List the prevalidated operations.",
"args": [
{
"name": "version",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "applied",
"descr": "Include applied operations (true by default)"
},
{
"name": "refused",
"descr": "Include refused operations (true by default)"
},
{
"name": "outdated",
"descr": "Include outdated operations (true by default)"
},
{
"name": "branch_refused",
"descr": "Include branch refused operations (true by default)"
},
{
"name": "branch_delayed",
"descr": "Include branch delayed operations (true by default)"
}
],
"ret": "Object"
}
},
"/chains/{}/mempool/request_operations": {
"POST": {
"descr": "Request the operations of our peers or a specific peer if specified via a query parameter.",
"args": [
{
"name": "peer_id",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/mempool/unban_all_operations": {
"POST": {
"descr": "Clear the set of banned operations.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/mempool/unban_operation": {
"POST": {
"descr": "Remove an operation from the set of banned operations (nothing happens if it was not banned).",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}": {
"GET": {
"descr": "All the information about a block. The associated metadata may not be present depending on the history mode and block's distance from the head.",
"args": [
{
"name": "force_metadata",
"descr": "DEPRECATED: Forces to recompute the operations metadata if it was considered as too large."
},
{
"name": "metadata",
"descr": "Specifies whether or not if the operations metadata should be returned. To get the metadata, even if it is needed to recompute them, use \"always\". To avoid getting the metadata, use \"never\". By default, the metadata will be returned depending on the node's metadata size limit policy."
}
],
"ret": "Object"
},
"props": [
"context",
"hash",
"header",
"helpers",
"live_blocks",
"metadata",
"metadata_hash",
"operation_hashes",
"operation_metadata_hashes",
"operations",
"operations_metadata_hash",
"protocols",
"votes"
]
},
"/chains/{}/blocks/{}/context": {
"props": [
"big_maps",
"cache",
"constants",
"contracts",
"dal",
"delegates",
"liquidity_baking",
"merkle_tree",
"merkle_tree_v2",
"nonces",
"raw",
"sapling",
"sc_rollup",
"seed",
"seed_computation",
"selected_snapshot",
"tx_rollup"
]
},
"/chains/{}/blocks/{}/context/big_maps": {
"item": {
"name": "big_map_id",
"descr": "A big map identifier"
}
},
"/chains/{}/blocks/{}/context/big_maps/{}": {
"GET": {
"descr": "Get the (optionally paginated) list of values in a big map. Order of values is unspecified, but is guaranteed to be consistent.",
"args": [
{
"name": "offset",
"descr": "Skip the first [offset] values. Useful in combination with [length] for pagination."
},
{
"name": "length",
"descr": "Only retrieve [length] values. Useful in combination with [offset] for pagination."
}
],
"ret": "Array"
},
"item": {
"name": "script_expr",
"descr": "script_expr (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/big_maps/{}/{}": {
"GET": {
"descr": "Access the value associated with a key in a big map.",
"args": [],
"ret": "Object"
},
"props": [
"normalized"
]
},
"/chains/{}/blocks/{}/context/big_maps/{}/{}/normalized": {
"POST": {
"descr": "Access the value associated with a key in a big map, normalize the output using the requested unparsing mode.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/cache": {
"props": [
"contracts"
]
},
"/chains/{}/blocks/{}/context/cache/contracts": {
"props": [
"all",
"rank",
"size",
"size_limit"
]
},
"/chains/{}/blocks/{}/context/cache/contracts/all": {
"GET": {
"descr": "Return the list of cached contracts",
"args": [],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/context/cache/contracts/rank": {
"POST": {
"descr": "Return the number of cached contracts older than the provided contract",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/cache/contracts/size": {
"GET": {
"descr": "Return the size of the contract cache",
"args": [],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/cache/contracts/size_limit": {
"GET": {
"descr": "Return the size limit of the contract cache",
"args": [],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/constants": {
"GET": {
"descr": "All constants",
"args": [],
"ret": "Object"
},
"props": [
"errors",
"parametric"
]
},
"/chains/{}/blocks/{}/context/constants/errors": {
"GET": {
"descr": "Schema for all the RPC errors from this protocol version",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/constants/parametric": {
"GET": {
"descr": "Parametric constants",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/contracts": {
"GET": {
"descr": "All existing contracts (excluding empty implicit contracts).",
"args": [],
"ret": "Array"
},
"item": {
"name": "contract_id",
"descr": "A contract identifier encoded in b58check."
}
},
"/chains/{}/blocks/{}/context/contracts/{}": {
"GET": {
"descr": "Access the complete status of a contract.",
"args": [
{
"name": "normalize_types",
"descr": "Whether types should be normalized (annotations removed, combs flattened) or kept as they appeared in the original script."
}
],
"ret": "Object"
},
"props": [
"balance",
"balance_and_frozen_bonds",
"big_map_get",
"counter",
"delegate",
"entrypoints",
"frozen_bonds",
"manager_key",
"script",
"single_sapling_get_diff",
"storage"
]
},
"/chains/{}/blocks/{}/context/contracts/{}/balance": {
"GET": {
"descr": "Access the spendable balance of a contract, excluding frozen bonds.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/contracts/{}/balance_and_frozen_bonds": {
"GET": {
"descr": "Access the sum of the spendable balance and frozen bonds of a contract. This sum is part of the contract's stake, and it is exactly the contract's stake if the contract is not a delegate.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/contracts/{}/big_map_get": {
"POST": {
"descr": "Access the value associated with a key in a big map of the contract (deprecated).",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/contracts/{}/counter": {
"GET": {
"descr": "Access the counter of a contract, if any.",
"args": [],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/contracts/{}/delegate": {
"GET": {
"descr": "Access the delegate of a contract, if any.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/contracts/{}/entrypoints": {
"GET": {
"descr": "Return the list of entrypoints of the contract",
"args": [
{
"name": "normalize_types",
"descr": "Whether types should be normalized (annotations removed, combs flattened) or kept as they appeared in the original script."
}
],
"ret": "Object"
},
"item": {
"name": "entrypoint",
"descr": "A Michelson entrypoint (string of length < 32)"
}
},
"/chains/{}/blocks/{}/context/contracts/{}/entrypoints/{}": {
"GET": {
"descr": "Return the type of the given entrypoint of the contract",
"args": [
{
"name": "normalize_types",
"descr": "Whether types should be normalized (annotations removed, combs flattened) or kept as they appeared in the original script."
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/contracts/{}/frozen_bonds": {
"GET": {
"descr": "Access the frozen bonds of a contract.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/contracts/{}/manager_key": {
"GET": {
"descr": "Access the manager of an implicit contract.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/contracts/{}/script": {
"GET": {
"descr": "Access the code and data of the contract.",
"args": [],
"ret": "Object"
},
"props": [
"normalized"
]
},
"/chains/{}/blocks/{}/context/contracts/{}/script/normalized": {
"POST": {
"descr": "Access the script of the contract and normalize it using the requested unparsing mode.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/contracts/{}/single_sapling_get_diff": {
"GET": {
"descr": "Returns the root and a diff of a state starting from an optional offset which is zero by default.",
"args": [
{
"name": "offset_commitment",
"descr": "Commitments and ciphertexts are returned from the specified offset up to the most recent."
},
{
"name": "offset_nullifier",
"descr": "Nullifiers are returned from the specified offset up to the most recent."
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/contracts/{}/storage": {
"GET": {
"descr": "Access the data of the contract.",
"args": [],
"ret": "Object"
},
"props": [
"normalized",
"paid_space",
"used_space"
]
},
"/chains/{}/blocks/{}/context/contracts/{}/storage/normalized": {
"POST": {
"descr": "Access the data of the contract and normalize it using the requested unparsing mode.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/contracts/{}/storage/paid_space": {
"GET": {
"descr": "Access the paid storage space of the contract.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/contracts/{}/storage/used_space": {
"GET": {
"descr": "Access the used storage space of the contract.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/dal": {
"props": [
"confirmed_slots_history"
]
},
"/chains/{}/blocks/{}/context/dal/confirmed_slots_history": {
"GET": {
"descr": "Returns the value of the DAL confirmed slots history skip list if DAL is enabled, or [None] otherwise.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/delegates": {
"GET": {
"descr": "Lists all registered delegates by default. The arguments `active`, `inactive`, `with_minimal_stake`, and `without_minimal_stake` allow to enumerate only the delegates that are active, inactive, have at least a minimal stake to participate in consensus and in governance, or do not have such a minimal stake, respectively. Note, setting these arguments to false has no effect.",
"args": [
{
"name": "active",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "inactive",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "with_minimal_stake",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "without_minimal_stake",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "pkh",
"descr": "A Secp256k1 of a Ed25519 public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/delegates/{}": {
"GET": {
"descr": "Everything about a delegate.",
"args": [],
"ret": "Object"
},
"props": [
"consensus_key",
"current_frozen_deposits",
"deactivated",
"delegated_balance",
"delegated_contracts",
"frozen_deposits",
"frozen_deposits_limit",
"full_balance",
"grace_period",
"participation",
"staking_balance",
"voting_info",
"voting_power"
]
},
"/chains/{}/blocks/{}/context/delegates/{}/consensus_key": {
"GET": {
"descr": "The active consensus key for a given delegate and the pending consensus keys.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/delegates/{}/current_frozen_deposits": {
"GET": {
"descr": "Returns the current amount of the frozen deposits (in mutez).",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/delegates/{}/deactivated": {
"GET": {
"descr": "Tells whether the delegate is currently tagged as deactivated or not.",
"args": [],
"ret": "Boolean"
}
},
"/chains/{}/blocks/{}/context/delegates/{}/delegated_balance": {
"GET": {
"descr": "Returns the sum (in mutez) of all balances of all the contracts that delegate to a given delegate. This excludes the delegate's own balance, its frozen deposits and its frozen bonds.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/delegates/{}/delegated_contracts": {
"GET": {
"descr": "Returns the list of contracts that delegate to a given delegate.",
"args": [],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/context/delegates/{}/frozen_deposits": {
"GET": {
"descr": "Returns the initial amount (that is, at the beginning of a cycle) of the frozen deposits (in mutez). This amount is the same as the current amount of the frozen deposits, unless the delegate has been punished.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/delegates/{}/frozen_deposits_limit": {
"GET": {
"descr": "Returns the frozen deposits limit for the given delegate or none if no limit is set.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/delegates/{}/full_balance": {
"GET": {
"descr": "Returns the full balance (in mutez) of a given delegate, including the frozen deposits and the frozen bonds. It does not include its delegated balance.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/delegates/{}/grace_period": {
"GET": {
"descr": "Returns the cycle by the end of which the delegate might be deactivated if she fails to execute any delegate action. A deactivated delegate might be reactivated (without loosing any stake) by simply re-registering as a delegate. For deactivated delegates, this value contains the cycle at which they were deactivated.",
"args": [],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/delegates/{}/participation": {
"GET": {
"descr": "Returns cycle and level participation information. In particular this indicates, in the field 'expected_cycle_activity', the number of slots the delegate is expected to have in the cycle based on its active stake. The field 'minimal_cycle_activity' indicates the minimal endorsing slots in the cycle required to get endorsing rewards. It is computed based on 'expected_cycle_activity. The fields 'missed_slots' and 'missed_levels' indicate the number of missed endorsing slots and missed levels (for endorsing) in the cycle so far. 'missed_slots' indicates the number of missed endorsing slots in the cycle so far. The field 'remaining_allowed_missed_slots' indicates the remaining amount of endorsing slots that can be missed in the cycle before forfeiting the rewards. Finally, 'expected_endorsing_rewards' indicates the endorsing rewards that will be distributed at the end of the cycle if activity at that point will be greater than the minimal required; if the activity is already known to be below the required minimum, then the rewards are zero.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/delegates/{}/staking_balance": {
"GET": {
"descr": "Returns the total amount of tokens (in mutez) delegated to a given delegate. This includes the balances of all the contracts that delegate to it, but also the balance of the delegate itself, its frozen deposits, and its frozen bonds.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/delegates/{}/voting_info": {
"GET": {
"descr": "Returns the delegate info (e.g. voting power) found in the listings of the current voting period.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/delegates/{}/voting_power": {
"GET": {
"descr": "The voting power in the vote listings for a given delegate.",
"args": [],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/liquidity_baking": {
"props": [
"cpmm_address"
]
},
"/chains/{}/blocks/{}/context/liquidity_baking/cpmm_address": {
"GET": {
"descr": "Liquidity baking CPMM address",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/merkle_tree": {
"GET": {
"descr": "Returns the merkle tree of a piece of context.",
"args": [
{
"name": "holey",
"descr": "Send only hashes, omit data of key"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/merkle_tree_v2": {
"GET": {
"descr": "Returns the Irmin merkle tree of a piece of context.",
"args": [
{
"name": "holey",
"descr": "Send only hashes, omit data of key"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/nonces": {
"item": {
"name": "block_level",
"descr": "A level integer"
}
},
"/chains/{}/blocks/{}/context/nonces/{}": {
"GET": {
"descr": "Info about the nonce of a previous block.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw": {
"props": [
"bytes",
"json"
]
},
"/chains/{}/blocks/{}/context/raw/bytes": {
"GET": {
"descr": "Returns the raw context.",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/sapling": {
"item": {
"name": "sapling_state_id",
"descr": "A sapling state identifier"
}
},
"/chains/{}/blocks/{}/context/sapling/{}": {
"props": [
"get_diff"
]
},
"/chains/{}/blocks/{}/context/sapling/{}/get_diff": {
"GET": {
"descr": "Returns the root and a diff of a state starting from an optional offset which is zero by default.",
"args": [
{
"name": "offset_commitment",
"descr": "Commitments and ciphertexts are returned from the specified offset up to the most recent."
},
{
"name": "offset_nullifier",
"descr": "Nullifiers are returned from the specified offset up to the most recent."
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/sc_rollup": {
"GET": {
"descr": "List of all originated smart contract rollups",
"args": [],
"ret": "Array"
},
"item": {
"name": "Sc_rollup_hash",
"descr": "Sc_rollup_hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/sc_rollup/{}": {
"props": [
"boot_sector",
"can_be_cemented",
"commitment",
"conflicts",
"dal_slot_subscriptions",
"game",
"genesis_info",
"inbox",
"initial_pvm_state_hash",
"kind",
"last_cemented_commitment_hash_with_level",
"staker",
"timeout",
"timeout_reached"
]
},
"/chains/{}/blocks/{}/context/sc_rollup/{}/boot_sector": {
"GET": {
"descr": "Boot sector of smart-contract rollup",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/sc_rollup/{}/can_be_cemented": {
"GET": {
"descr": "Returns true if and only if the provided commitment can be cemented.",
"args": [
{
"name": "commitment",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Boolean"
}
},
"/chains/{}/blocks/{}/context/sc_rollup/{}/commitment": {
"item": {
"name": "commitment_hash",
"descr": "commitment_hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/sc_rollup/{}/commitment/{}": {
"GET": {
"descr": "Commitment for a smart contract rollup from its hash",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/sc_rollup/{}/conflicts": {
"GET": {
"descr": "List of stakers in conflict with the given staker",
"args": [
{
"name": "staker",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/context/sc_rollup/{}/dal_slot_subscriptions": {
"item": {
"name": "block_level",
"descr": "A level integer"
}
},
"/chains/{}/blocks/{}/context/sc_rollup/{}/dal_slot_subscriptions/{}": {
"GET": {
"descr": "List of slot indices to which a rollup is subscribed to at a given level",
"args": [],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/context/sc_rollup/{}/game": {
"GET": {
"descr": "Ongoing refufation game for a given staker",
"args": [
{
"name": "staker",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/sc_rollup/{}/genesis_info": {
"GET": {
"descr": "Genesis information (level and commitment hash) for a smart-contract rollup",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/sc_rollup/{}/inbox": {
"GET": {
"descr": "Inbox for a smart-contract rollup",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/sc_rollup/{}/initial_pvm_state_hash": {
"GET": {
"descr": "Initial PVM state hash of smart-contract rollup",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/sc_rollup/{}/kind": {
"GET": {
"descr": "Kind of smart-contract rollup",
"args": [],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/sc_rollup/{}/last_cemented_commitment_hash_with_level": {
"GET": {
"descr": "Level and hash of the last cemented commitment for a smart-contract rollup",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/sc_rollup/{}/staker": {
"item": {
"name": "pkh",
"descr": "A Secp256k1 of a Ed25519 public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/sc_rollup/{}/staker/{}": {
"props": [
"staked_on_commitment"
]
},
"/chains/{}/blocks/{}/context/sc_rollup/{}/staker/{}/staked_on_commitment": {
"GET": {
"descr": "The hash of the commitment on which the operator has staked on for a smart-contract rollup",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/sc_rollup/{}/timeout": {
"GET": {
"descr": "Returns the timeout of players.",
"args": [
{
"name": "staker1",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "staker2",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/sc_rollup/{}/timeout_reached": {
"GET": {
"descr": "Returns whether the timeout creates a result for the game.",
"args": [
{
"name": "staker1",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "staker2",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/seed": {
"POST": {
"descr": "Seed of the cycle to which the block belongs.",
"args": [],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/seed_computation": {
"GET": {
"descr": "Seed computation status",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/selected_snapshot": {
"GET": {
"descr": "Returns the index of the selected snapshot for the current cycle or for the specific `cycle` passed as argument, if any.",
"args": [
{
"name": "cycle",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/tx_rollup": {
"item": {
"name": "tx_rollup_id",
"descr": "A tx rollup identifier encoded in b58check."
}
},
"/chains/{}/blocks/{}/context/tx_rollup/{}": {
"props": [
"commitment",
"has_bond",
"inbox",
"pending_bonded_commitments",
"state"
]
},
"/chains/{}/blocks/{}/context/tx_rollup/{}/commitment": {
"item": {
"name": "block_level",
"descr": "A level integer"
}
},
"/chains/{}/blocks/{}/context/tx_rollup/{}/commitment/{}": {
"GET": {
"descr": "Return the commitment for a level, if any",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/tx_rollup/{}/has_bond": {
"item": {
"name": "pkh",
"descr": "A Secp256k1 of a Ed25519 public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/tx_rollup/{}/has_bond/{}": {
"GET": {
"descr": "Returns true if the public key hash already deposited a bond for the given rollup",
"args": [],
"ret": "Boolean"
}
},
"/chains/{}/blocks/{}/context/tx_rollup/{}/inbox": {
"item": {
"name": "block_level",
"descr": "A level integer"
}
},
"/chains/{}/blocks/{}/context/tx_rollup/{}/inbox/{}": {
"GET": {
"descr": "Get the inbox of a transaction rollup",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/tx_rollup/{}/pending_bonded_commitments": {
"item": {
"name": "pkh",
"descr": "A Secp256k1 of a Ed25519 public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/tx_rollup/{}/pending_bonded_commitments/{}": {
"GET": {
"descr": "Get the number of pending bonded commitments for a pkh on a rollup",
"args": [],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/tx_rollup/{}/state": {
"GET": {
"descr": "Access the state of a rollup.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/hash": {
"GET": {
"descr": "The block's hash, its unique identifier.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/header": {
"GET": {
"descr": "The whole block header.",
"args": [],
"ret": "Object"
},
"props": [
"protocol_data",
"raw",
"shell"
]
},
"/chains/{}/blocks/{}/header/protocol_data": {
"GET": {
"descr": "The version-specific fragment of the block header.",
"args": [],
"ret": "Object"
},
"props": [
"raw"
]
},
"/chains/{}/blocks/{}/header/protocol_data/raw": {
"GET": {
"descr": "The version-specific fragment of the block header (unparsed).",
"args": [],
"ret": "String"
}
},
"/chains/{}/blocks/{}/header/raw": {
"GET": {
"descr": "The whole block header (unparsed).",
"args": [],
"ret": "String"
}
},
"/chains/{}/blocks/{}/header/shell": {
"GET": {
"descr": "The shell-specific fragment of the block header.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers": {
"props": [
"baking_rights",
"complete",
"current_level",
"endorsing_rights",
"forge",
"forge_block_header",
"levels_in_current_cycle",
"parse",
"preapply",
"round",
"scripts",
"validators"
]
},
"/chains/{}/blocks/{}/helpers/baking_rights": {
"GET": {
"descr": "Retrieves the list of delegates allowed to bake a block.\nBy default, it gives the best baking opportunities (in terms of rounds) for bakers that have at least one opportunity below the 64th round for the next block.\nParameters `level` and `cycle` can be used to specify the (valid) level(s) in the past or future at which the baking rights have to be returned.\nParameter `delegate` can be used to restrict the results to the given delegates. Parameter `consensus_key` can be used to restrict the results to the given consensus_keys. If parameter `all` is set, all the baking opportunities for each baker at each level are returned, instead of just the first one.\nReturns the list of baking opportunities up to round 64. Also returns the minimal timestamps that correspond to these opportunities. The timestamps are omitted for levels in the past, and are only estimates for levels higher that the next block's, based on the hypothesis that all predecessor blocks were baked at the first round.",
"args": [
{
"name": "level",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "cycle",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "delegate",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "consensus_key",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "max_round",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "all",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/helpers/complete": {
"item": {
"name": "prefix",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
},
"/chains/{}/blocks/{}/helpers/complete/{}": {
"GET": {
"descr": "Try to complete a prefix of a Base58Check-encoded data. This RPC is actually able to complete hashes of block, operations, public_keys and contracts.",
"args": [],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/helpers/current_level": {
"GET": {
"descr": "Returns the level of the interrogated block, or the one of a block located `offset` blocks after it in the chain. For instance, the next block if `offset` is 1. The offset cannot be negative.",
"args": [
{
"name": "offset",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/endorsing_rights": {
"GET": {
"descr": "Retrieves the delegates allowed to endorse a block.\nBy default, it gives the endorsing power for delegates that have at least one endorsing slot for the next block.\nParameters `level` and `cycle` can be used to specify the (valid) level(s) in the past or future at which the endorsing rights have to be returned. Parameter `delegate` can be used to restrict the results to the given delegates.\nParameter `consensus_key` can be used to restrict the results to the given consensus_keys. \nReturns the smallest endorsing slots and the endorsing power. Also returns the minimal timestamp that corresponds to endorsing at the given level. The timestamps are omitted for levels in the past, and are only estimates for levels higher that the next block's, based on the hypothesis that all predecessor blocks were baked at the first round.",
"args": [
{
"name": "level",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "cycle",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "delegate",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "consensus_key",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/helpers/forge": {
"props": [
"operations",
"protocol_data",
"tx_rollup"
]
},
"/chains/{}/blocks/{}/helpers/forge/operations": {
"POST": {
"descr": "Forge an operation",
"args": [],
"ret": "String"
}
},
"/chains/{}/blocks/{}/helpers/forge/protocol_data": {
"POST": {
"descr": "Forge the protocol-specific part of a block header",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/forge/tx_rollup": {
"props": [
"commitment",
"inbox",
"withdraw"
]
},
"/chains/{}/blocks/{}/helpers/forge/tx_rollup/commitment": {
"props": [
"merkle_tree_hash",
"merkle_tree_path",
"message_result_hash"
]
},
"/chains/{}/blocks/{}/helpers/forge/tx_rollup/commitment/merkle_tree_hash": {
"POST": {
"descr": "Compute the merkle tree hash of a commitment",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/forge/tx_rollup/commitment/merkle_tree_path": {
"POST": {
"descr": "Compute a path of a message result hash in the commitment merkle tree",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/forge/tx_rollup/commitment/message_result_hash": {
"POST": {
"descr": "Compute the message result hash",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/forge/tx_rollup/inbox": {
"props": [
"merkle_tree_hash",
"merkle_tree_path",
"message_hash"
]
},
"/chains/{}/blocks/{}/helpers/forge/tx_rollup/inbox/merkle_tree_hash": {
"POST": {
"descr": "Compute the merkle tree hash of an inbox",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/forge/tx_rollup/inbox/merkle_tree_path": {
"POST": {
"descr": "Compute a path of an inbox message in a merkle tree",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/forge/tx_rollup/inbox/message_hash": {
"POST": {
"descr": "Compute the hash of a message",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/forge/tx_rollup/withdraw": {
"props": [
"withdraw_list_hash"
]
},
"/chains/{}/blocks/{}/helpers/forge/tx_rollup/withdraw/withdraw_list_hash": {
"POST": {
"descr": "Compute the hash of a withdraw list",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/forge_block_header": {
"POST": {
"descr": "Forge a block header",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/levels_in_current_cycle": {
"GET": {
"descr": "Levels of a cycle",
"args": [
{
"name": "offset",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/parse": {
"props": [
"block",
"operations"
]
},
"/chains/{}/blocks/{}/helpers/parse/block": {
"POST": {
"descr": "Parse a block",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/parse/operations": {
"POST": {
"descr": "Parse operations",
"args": [],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/helpers/preapply": {
"props": [
"block",
"operations"
]
},
"/chains/{}/blocks/{}/helpers/preapply/block": {
"POST": {
"descr": "Simulate the validation of a block that would contain the given operations and return the resulting fitness and context hash.",
"args": [
{
"name": "sort",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "timestamp",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/preapply/operations": {
"POST": {
"descr": "Simulate the application of the operations with the context of the given block and return the result of each operation application.",
"args": [],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/helpers/round": {
"GET": {
"descr": "Returns the round of the interrogated block, or the one of a block located `offset` blocks after in the chain (or before when negative). For instance, the next block if `offset` is 1.",
"args": [],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/helpers/scripts": {
"props": [
"entrypoint",
"entrypoints",
"normalize_data",
"normalize_script",
"normalize_type",
"pack_data",
"run_code",
"run_operation",
"run_script_view",
"run_view",
"script_size",
"simulate_operation",
"trace_code",
"typecheck_code",
"typecheck_data"
]
},
"/chains/{}/blocks/{}/helpers/scripts/entrypoint": {
"POST": {
"descr": "Return the type of the given entrypoint",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/scripts/entrypoints": {
"POST": {
"descr": "Return the list of entrypoints of the given script",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/scripts/normalize_data": {
"POST": {
"descr": "Normalizes some data expression using the requested unparsing mode",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/scripts/normalize_script": {
"POST": {
"descr": "Normalizes a Michelson script using the requested unparsing mode",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/scripts/normalize_type": {
"POST": {
"descr": "Normalizes some Michelson type by expanding `pair a b c` as `pair a (pair b c)",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/scripts/pack_data": {
"POST": {
"descr": "Computes the serialized version of some data expression using the same algorithm as script instruction PACK",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/scripts/run_code": {
"POST": {
"descr": "Run a piece of code in the current context",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/scripts/run_operation": {
"POST": {
"descr": "Run an operation with the context of the given block and without signature checks. Return the operation application result, including the consumed gas. This RPC does not support consensus operations.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/scripts/run_script_view": {
"POST": {
"descr": "Simulate a call to a michelson view",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/scripts/run_view": {
"POST": {
"descr": "Simulate a call to a view following the TZIP-4 standard. See https://gitlab.com/tezos/tzip/-/blob/master/proposals/tzip-4/tzip-4.md#view-entrypoints.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/scripts/script_size": {
"POST": {
"descr": "Compute the size of a script in the current context",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/scripts/simulate_operation": {
"POST": {
"descr": "Simulate running an operation at some future moment (based on the number of blocks given in the `latency` argument), and return the operation application result. The result is the same as run_operation except for the consumed gas, which depends on the contents of the cache at that future moment. This RPC estimates future gas consumption by trying to predict the state of the cache using some heuristics.",
"args": [
{
"name": "successor_level",
"descr": "If true, the simulation is done on the successor level of the current context."
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/scripts/trace_code": {
"POST": {
"descr": "Run a piece of code in the current context, keeping a trace",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/scripts/typecheck_code": {
"POST": {
"descr": "Typecheck a piece of code in the current context",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/scripts/typecheck_data": {
"POST": {
"descr": "Check that some data expression is well formed and of a given type in the current context",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/helpers/validators": {
"GET": {
"descr": "Retrieves the level, the endorsement slots and the public key hash of each delegate allowed to endorse a block.\nBy default, it provides this information for the next level.\nParameter `level` can be used to specify the (valid) level(s) in the past or future at which the endorsement rights have to be returned. Parameter `delegate` can be used to restrict the results results to the given delegates. Parameter `consensus_key` can be used to restrict the results to the given consensus_keys.\n",
"args": [
{
"name": "level",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "delegate",
"descr": "\u00af\\_(\u30c4)_/\u00af"
},
{
"name": "consensus_key",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/live_blocks": {
"GET": {
"descr": "List the ancestors of the given block which, if referred to as the branch in an operation header, are recent enough for that operation to be included in the current block.",
"args": [],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/metadata": {
"GET": {
"descr": "All the metadata associated to the block.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/metadata_hash": {
"GET": {
"descr": "Hash of the metadata associated to the block. This is only set on blocks starting from environment V1.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/operation_hashes": {
"GET": {
"descr": "The hashes of all the operations included in the block.",
"args": [],
"ret": "Array"
},
"item": {
"name": "list_offset",
"descr": "Index `n` of the requested validation pass."
}
},
"/chains/{}/blocks/{}/operation_hashes/{}": {
"GET": {
"descr": "All the operations included in `n-th` validation pass of the block.",
"args": [],
"ret": "Array"
},
"item": {
"name": "operation_offset",
"descr": "Index `m` of the requested operation in its validation pass."
}
},
"/chains/{}/blocks/{}/operation_hashes/{}/{}": {
"GET": {
"descr": "The hash of then `m-th` operation in the `n-th` validation pass of the block.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/operation_metadata_hashes": {
"GET": {
"descr": "The hashes of all the operation metadata included in the block. This is only set on blocks starting from environment V1.",
"args": [],
"ret": "Array"
},
"item": {
"name": "list_offset",
"descr": "Index `n` of the requested validation pass."
}
},
"/chains/{}/blocks/{}/operation_metadata_hashes/{}": {
"GET": {
"descr": "All the operation metadata included in `n-th` validation pass of the block. This is only set on blocks starting from environment V1.",
"args": [],
"ret": "Array"
},
"item": {
"name": "operation_offset",
"descr": "Index `m` of the requested operation in its validation pass."
}
},
"/chains/{}/blocks/{}/operation_metadata_hashes/{}/{}": {
"GET": {
"descr": "The hash of then `m-th` operation metadata in the `n-th` validation pass of the block. This is only set on blocks starting from environment V1.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/operations": {
"GET": {
"descr": "All the operations included in the block.",
"args": [
{
"name": "force_metadata",
"descr": "DEPRECATED: Forces to recompute the operations metadata if it was considered as too large."
},
{
"name": "metadata",
"descr": "Specifies whether or not if the operations metadata should be returned. To get the metadata, even if it is needed to recompute them, use \"always\". To avoid getting the metadata, use \"never\". By default, the metadata will be returned depending on the node's metadata size limit policy."
}
],
"ret": "Array"
},
"item": {
"name": "list_offset",
"descr": "Index `n` of the requested validation pass."
}
},
"/chains/{}/blocks/{}/operations/{}": {
"GET": {
"descr": "All the operations included in `n-th` validation pass of the block.",
"args": [
{
"name": "force_metadata",
"descr": "DEPRECATED: Forces to recompute the operations metadata if it was considered as too large."
},
{
"name": "metadata",
"descr": "Specifies whether or not if the operations metadata should be returned. To get the metadata, even if it is needed to recompute them, use \"always\". To avoid getting the metadata, use \"never\". By default, the metadata will be returned depending on the node's metadata size limit policy."
}
],
"ret": "Array"
},
"item": {
"name": "operation_offset",
"descr": "Index `m` of the requested operation in its validation pass."
}
},
"/chains/{}/blocks/{}/operations/{}/{}": {
"GET": {
"descr": "The `m-th` operation in the `n-th` validation pass of the block.",
"args": [
{
"name": "force_metadata",
"descr": "DEPRECATED: Forces to recompute the operations metadata if it was considered as too large."
},
{
"name": "metadata",
"descr": "Specifies whether or not if the operations metadata should be returned. To get the metadata, even if it is needed to recompute them, use \"always\". To avoid getting the metadata, use \"never\". By default, the metadata will be returned depending on the node's metadata size limit policy."
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/operations_metadata_hash": {
"GET": {
"descr": "The root hash of the operations metadata from the block. This is only set on blocks starting from environment V1.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/protocols": {
"GET": {
"descr": "Current and next protocol.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/votes": {
"props": [
"ballot_list",
"ballots",
"current_period",
"current_proposal",
"current_quorum",
"listings",
"proposals",
"successor_period",
"total_voting_power"
]
},
"/chains/{}/blocks/{}/votes/ballot_list": {
"GET": {
"descr": "Ballots casted so far during a voting period.",
"args": [],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/votes/ballots": {
"GET": {
"descr": "Sum of ballots casted so far during a voting period.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/votes/current_period": {
"GET": {
"descr": "Returns the voting period (index, kind, starting position) and related information (position, remaining) of the interrogated block.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/votes/current_proposal": {
"GET": {
"descr": "Current proposal under evaluation.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/votes/current_quorum": {
"GET": {
"descr": "Current expected quorum.",
"args": [],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/votes/listings": {
"GET": {
"descr": "List of delegates with their voting power.",
"args": [],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/votes/proposals": {
"GET": {
"descr": "List of proposals with number of supporters.",
"args": [],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/votes/successor_period": {
"GET": {
"descr": "Returns the voting period (index, kind, starting position) and related information (position, remaining) of the next block.Useful to craft operations that will be valid in the next block.",
"args": [],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/votes/total_voting_power": {
"GET": {
"descr": "Total voting power in the voting listings.",
"args": [],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"active_delegate_with_one_roll",
"big_maps",
"block_round",
"commitments",
"consensus_keys",
"contracts",
"cycle",
"dal",
"delegates",
"endorsement_branch",
"first_level_of_protocol",
"global_constant",
"grand_parent_branch",
"last_snapshot",
"liquidity_baking_cpmm_address",
"liquidity_baking_escape_ema",
"pending_migration_balance_updates",
"pending_migration_operation_results",
"ramp_up",
"sapling",
"sc_rollup",
"seed_status",
"staking_balance",
"ticket_balance",
"tx_rollup",
"vdf_challenge",
"votes",
"zk_rollup"
]
},
"/chains/{}/blocks/{}/context/raw/json/active_delegate_with_one_roll": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"current",
"snapshot"
]
},
"/chains/{}/blocks/{}/context/raw/json/active_delegate_with_one_roll/current": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "pkh",
"descr": "A Secp256k1 of a Ed25519 public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/active_delegate_with_one_roll/current/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/active_delegate_with_one_roll/snapshot": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "int",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
},
"/chains/{}/blocks/{}/context/raw/json/active_delegate_with_one_roll/snapshot/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "pkh",
"descr": "A Secp256k1 of a Ed25519 public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/active_delegate_with_one_roll/snapshot/{}/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/big_maps": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"index",
"next"
]
},
"/chains/{}/blocks/{}/context/raw/json/big_maps/index": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "big_map_id",
"descr": "A big map identifier"
}
},
"/chains/{}/blocks/{}/context/raw/json/big_maps/index/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"contents",
"key_type",
"total_bytes",
"value_type"
]
},
"/chains/{}/blocks/{}/context/raw/json/big_maps/index/{}/contents": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "script_expr",
"descr": "script_expr (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/big_maps/index/{}/contents/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/big_maps/index/{}/key_type": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/big_maps/index/{}/total_bytes": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/big_maps/index/{}/value_type": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/big_maps/next": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/block_round": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/commitments": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "Blinded public key hash",
"descr": "Blinded public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/commitments/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/consensus_keys": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "pkh",
"descr": "A Secp256k1 of a Ed25519 public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/consensus_keys/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Boolean"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"global_counter",
"index"
]
},
"/chains/{}/blocks/{}/context/raw/json/contracts/global_counter": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "contract_id",
"descr": "A contract identifier encoded in b58check."
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"balance",
"bond_id_index",
"code",
"consensus_key",
"counter",
"delegate",
"delegate_desactivation",
"delegated",
"frozen_deposits",
"frozen_deposits_limit",
"inactive_delegate",
"manager",
"missed_endorsements",
"paid_bytes",
"storage",
"total_frozen_bonds",
"used_bytes"
]
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/balance": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/bond_id_index": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "bond_id",
"descr": "A bond identifier."
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/bond_id_index/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"frozen_bonds"
]
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/bond_id_index/{}/frozen_bonds": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/code": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/consensus_key": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"active",
"pendings"
]
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/consensus_key/active": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/consensus_key/pendings": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "block_cycle",
"descr": "A cycle integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/consensus_key/pendings/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/counter": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/delegate": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/delegate_desactivation": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/delegated": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "contract_id",
"descr": "A contract identifier encoded in b58check."
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/delegated/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Boolean"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/frozen_deposits": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/frozen_deposits_limit": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/inactive_delegate": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Boolean"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/manager": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/missed_endorsements": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/paid_bytes": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/storage": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/total_frozen_bonds": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/contracts/index/{}/used_bytes": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/cycle": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "block_cycle",
"descr": "A cycle integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/cycle/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"delegate_sampler_state",
"nonces",
"random_seed",
"selected_stake_distribution",
"slashed_deposits",
"total_active_stake"
]
},
"/chains/{}/blocks/{}/context/raw/json/cycle/{}/delegate_sampler_state": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/cycle/{}/nonces": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "block_level",
"descr": "A level integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/cycle/{}/nonces/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/cycle/{}/random_seed": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/cycle/{}/selected_stake_distribution": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/context/raw/json/cycle/{}/slashed_deposits": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "block_level",
"descr": "A level integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/cycle/{}/slashed_deposits/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "pkh",
"descr": "A Secp256k1 of a Ed25519 public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/cycle/{}/slashed_deposits/{}/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/cycle/{}/total_active_stake": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/dal": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"level",
"slots_history"
]
},
"/chains/{}/blocks/{}/context/raw/json/dal/level": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "block_level",
"descr": "A level integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/dal/level/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"slots"
]
},
"/chains/{}/blocks/{}/context/raw/json/dal/level/{}/slots": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/context/raw/json/dal/slots_history": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/delegates": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "pkh",
"descr": "A Secp256k1 of a Ed25519 public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/delegates/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Boolean"
}
},
"/chains/{}/blocks/{}/context/raw/json/endorsement_branch": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/first_level_of_protocol": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/global_constant": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "script_expr",
"descr": "script_expr (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/global_constant/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/grand_parent_branch": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/last_snapshot": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/liquidity_baking_cpmm_address": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/liquidity_baking_escape_ema": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/pending_migration_balance_updates": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/context/raw/json/pending_migration_operation_results": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/context/raw/json/ramp_up": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"rewards"
]
},
"/chains/{}/blocks/{}/context/raw/json/ramp_up/rewards": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "block_cycle",
"descr": "A cycle integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/ramp_up/rewards/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/sapling": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"index",
"next"
]
},
"/chains/{}/blocks/{}/context/raw/json/sapling/index": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "sapling_state_id",
"descr": "A sapling state identifier"
}
},
"/chains/{}/blocks/{}/context/raw/json/sapling/index/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"ciphertexts",
"commitments",
"commitments_size",
"memo_size",
"nullifiers_hashed",
"nullifiers_ordered",
"nullifiers_size",
"roots",
"roots_level",
"roots_pos",
"total_bytes"
]
},
"/chains/{}/blocks/{}/context/raw/json/sapling/index/{}/ciphertexts": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "sapling_ciphertext_position",
"descr": "The position of a sapling ciphertext"
}
},
"/chains/{}/blocks/{}/context/raw/json/sapling/index/{}/ciphertexts/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/sapling/index/{}/commitments": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "sapling_node_position",
"descr": "The position of a node in a sapling commitment tree"
}
},
"/chains/{}/blocks/{}/context/raw/json/sapling/index/{}/commitments/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/sapling/index/{}/commitments_size": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/sapling/index/{}/memo_size": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/sapling/index/{}/nullifiers_hashed": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "sapling_nullifier",
"descr": "A sapling nullifier"
}
},
"/chains/{}/blocks/{}/context/raw/json/sapling/index/{}/nullifiers_hashed/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/sapling/index/{}/nullifiers_ordered": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "sapling_nullifier_position",
"descr": "A sapling nullifier position"
}
},
"/chains/{}/blocks/{}/context/raw/json/sapling/index/{}/nullifiers_ordered/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/sapling/index/{}/nullifiers_size": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/sapling/index/{}/roots": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "sapling_root",
"descr": "A sapling root"
}
},
"/chains/{}/blocks/{}/context/raw/json/sapling/index/{}/roots/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/sapling/index/{}/roots_level": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/sapling/index/{}/roots_pos": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/sapling/index/{}/total_bytes": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/sapling/next": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"index"
]
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "sc_rollup_address",
"descr": "A smart contract rollup address."
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"boot_sector",
"commitment_added",
"commitment_stake_count",
"commitments",
"dal",
"game",
"game_timeout",
"genesis_info",
"inbox",
"kind",
"last_cemented_commitment",
"level_index",
"opponent",
"parameters_type",
"staker_count",
"stakers"
]
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/boot_sector": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/commitment_added": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "commitment_hash",
"descr": "commitment_hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/commitment_added/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/commitment_stake_count": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "commitment_hash",
"descr": "commitment_hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/commitment_stake_count/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/commitments": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "commitment_hash",
"descr": "commitment_hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/commitments/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/dal": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"level"
]
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/dal/level": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "block_level",
"descr": "A level integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/dal/level/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"slot_subscriptions"
]
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/dal/level/{}/slot_subscriptions": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/game": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "game_index",
"descr": "A pair of stakers that index a smart contract rollup refutation game."
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/game/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/game_timeout": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "game_index",
"descr": "A pair of stakers that index a smart contract rollup refutation game."
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/game_timeout/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/genesis_info": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/inbox": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/kind": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/last_cemented_commitment": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/level_index": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "level_index",
"descr": "The level index for applied outbox message records"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/level_index/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"applied_outbox_messages"
]
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/level_index/{}/applied_outbox_messages": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/opponent": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "pkh",
"descr": "A Secp256k1 of a Ed25519 public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/opponent/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/parameters_type": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/staker_count": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/stakers": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "pkh",
"descr": "A Secp256k1 of a Ed25519 public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/sc_rollup/index/{}/stakers/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/seed_status": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Boolean"
}
},
"/chains/{}/blocks/{}/context/raw/json/staking_balance": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"current",
"snapshot"
]
},
"/chains/{}/blocks/{}/context/raw/json/staking_balance/current": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "pkh",
"descr": "A Secp256k1 of a Ed25519 public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/staking_balance/current/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/staking_balance/snapshot": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "int",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
},
"/chains/{}/blocks/{}/context/raw/json/staking_balance/snapshot/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "pkh",
"descr": "A Secp256k1 of a Ed25519 public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/staking_balance/snapshot/{}/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/ticket_balance": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"paid_bytes",
"table",
"used_bytes"
]
},
"/chains/{}/blocks/{}/context/raw/json/ticket_balance/paid_bytes": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/ticket_balance/table": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "script_expr",
"descr": "script_expr (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/ticket_balance/table/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/ticket_balance/used_bytes": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/tx_rollup": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "tx_rollup_id",
"descr": "A tx rollup identifier encoded in b58check."
}
},
"/chains/{}/blocks/{}/context/raw/json/tx_rollup/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"bond",
"state",
"tx_level"
]
},
"/chains/{}/blocks/{}/context/raw/json/tx_rollup/{}/bond": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "pkh",
"descr": "A Secp256k1 of a Ed25519 public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/tx_rollup/{}/bond/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"commitment"
]
},
"/chains/{}/blocks/{}/context/raw/json/tx_rollup/{}/bond/{}/commitment": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/tx_rollup/{}/state": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/tx_rollup/{}/tx_level": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "block_level",
"descr": "A level integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/tx_rollup/{}/tx_level/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"commitment",
"inbox",
"withdrawals"
]
},
"/chains/{}/blocks/{}/context/raw/json/tx_rollup/{}/tx_level/{}/commitment": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/tx_rollup/{}/tx_level/{}/inbox": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/tx_rollup/{}/tx_level/{}/withdrawals": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/vdf_challenge": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
}
},
"/chains/{}/blocks/{}/context/raw/json/votes": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"ballots",
"current_period",
"current_proposal",
"listings",
"participation_ema",
"pred_period_kind",
"proposals",
"proposals_count",
"voting_power_in_listings"
]
},
"/chains/{}/blocks/{}/context/raw/json/votes/ballots": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "pkh",
"descr": "A Secp256k1 of a Ed25519 public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/votes/ballots/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/votes/current_period": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/votes/current_proposal": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/votes/listings": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "pkh",
"descr": "A Secp256k1 of a Ed25519 public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/votes/listings/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/votes/participation_ema": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/votes/pred_period_kind": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/votes/proposals": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "Protocol_hash",
"descr": "Protocol_hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/votes/proposals/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "pkh",
"descr": "A Secp256k1 of a Ed25519 public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/votes/proposals/{}/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Boolean"
}
},
"/chains/{}/blocks/{}/context/raw/json/votes/proposals_count": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "pkh",
"descr": "A Secp256k1 of a Ed25519 public key hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/votes/proposals_count/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Integer"
}
},
"/chains/{}/blocks/{}/context/raw/json/votes/voting_power_in_listings": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "String"
}
},
"/chains/{}/blocks/{}/context/raw/json/zk_rollup": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "Zk_rollup_hash",
"descr": "Zk_rollup_hash (Base58Check-encoded)"
}
},
"/chains/{}/blocks/{}/context/raw/json/zk_rollup/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
},
"props": [
"account",
"pending_list",
"pending_operations"
]
},
"/chains/{}/blocks/{}/context/raw/json/zk_rollup/{}/account": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/zk_rollup/{}/pending_list": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Object"
}
},
"/chains/{}/blocks/{}/context/raw/json/zk_rollup/{}/pending_operations": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
},
"item": {
"name": "zkru_pending_op_position",
"descr": "The position of an operation in a pending operations list"
}
},
"/chains/{}/blocks/{}/context/raw/json/zk_rollup/{}/pending_operations/{}": {
"GET": {
"descr": "\u00af\\_(\u30c4)_/\u00af",
"args": [
{
"name": "depth",
"descr": "\u00af\\_(\u30c4)_/\u00af"
}
],
"ret": "Array"
}
}
}
|
793ac074c1492d0b3d19270ab888655093533219
|
5eeff8b2ab480b2354f3b134f60ded9ccb97e088
|
/train_util.py
|
1f24e280ddc5b1631dd61c7935845cb248519782
|
[
"MIT"
] |
permissive
|
fuy34/superpixel_fcn
|
5e7104ecc05a0ee48509d6a655ddca29d3dabf67
|
ae81e171a64dc9ed26a039a1a52b87b5fe744cf1
|
refs/heads/master
| 2022-05-25T23:47:37.326265
| 2022-05-04T18:32:20
| 2022-05-04T18:32:20
| 248,400,279
| 358
| 87
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,161
|
py
|
train_util.py
|
import torch
import torch.nn.functional as F
import numpy as np
from skimage.segmentation import mark_boundaries
import cv2
import sys
sys.path.append('./third_party/cython')
from connectivity import enforce_connectivity
def init_spixel_grid(args, b_train=True):
if b_train:
img_height, img_width = args.train_img_height, args.train_img_width
else:
img_height, img_width = args.input_img_height, args.input_img_width
# get spixel id for the final assignment
n_spixl_h = int(np.floor(img_height/args.downsize))
n_spixl_w = int(np.floor(img_width/args.downsize))
spixel_height = int(img_height / (1. * n_spixl_h))
spixel_width = int(img_width / (1. * n_spixl_w))
spix_values = np.int32(np.arange(0, n_spixl_w * n_spixl_h).reshape((n_spixl_h, n_spixl_w)))
spix_idx_tensor_ = shift9pos(spix_values)
spix_idx_tensor = np.repeat(
np.repeat(spix_idx_tensor_, spixel_height,axis=1), spixel_width, axis=2)
torch_spix_idx_tensor = torch.from_numpy(
np.tile(spix_idx_tensor, (args.batch_size, 1, 1, 1))).type(torch.float).cuda()
curr_img_height = int(np.floor(img_height))
curr_img_width = int(np.floor(img_width))
# pixel coord
all_h_coords = np.arange(0, curr_img_height, 1)
all_w_coords = np.arange(0, curr_img_width, 1)
curr_pxl_coord = np.array(np.meshgrid(all_h_coords, all_w_coords, indexing='ij'))
coord_tensor = np.concatenate([curr_pxl_coord[1:2, :, :], curr_pxl_coord[:1, :, :]])
all_XY_feat = (torch.from_numpy(
np.tile(coord_tensor, (args.batch_size, 1, 1, 1)).astype(np.float32)).cuda())
return torch_spix_idx_tensor, all_XY_feat
#===================== pooling and upsampling feature ==========================================
def shift9pos(input, h_shift_unit=1, w_shift_unit=1):
# input should be padding as (c, 1+ height+1, 1+width+1)
input_pd = np.pad(input, ((h_shift_unit, h_shift_unit), (w_shift_unit, w_shift_unit)), mode='edge')
input_pd = np.expand_dims(input_pd, axis=0)
# assign to ...
top = input_pd[:, :-2 * h_shift_unit, w_shift_unit:-w_shift_unit]
bottom = input_pd[:, 2 * h_shift_unit:, w_shift_unit:-w_shift_unit]
left = input_pd[:, h_shift_unit:-h_shift_unit, :-2 * w_shift_unit]
right = input_pd[:, h_shift_unit:-h_shift_unit, 2 * w_shift_unit:]
center = input_pd[:,h_shift_unit:-h_shift_unit,w_shift_unit:-w_shift_unit]
bottom_right = input_pd[:, 2 * h_shift_unit:, 2 * w_shift_unit:]
bottom_left = input_pd[:, 2 * h_shift_unit:, :-2 * w_shift_unit]
top_right = input_pd[:, :-2 * h_shift_unit, 2 * w_shift_unit:]
top_left = input_pd[:, :-2 * h_shift_unit, :-2 * w_shift_unit]
shift_tensor = np.concatenate([ top_left, top, top_right,
left, center, right,
bottom_left, bottom, bottom_right], axis=0)
return shift_tensor
def poolfeat(input, prob, sp_h=2, sp_w=2):
def feat_prob_sum(feat_sum, prob_sum, shift_feat):
feat_sum += shift_feat[:, :-1, :, :]
prob_sum += shift_feat[:, -1:, :, :]
return feat_sum, prob_sum
b, _, h, w = input.shape
h_shift_unit = 1
w_shift_unit = 1
p2d = (w_shift_unit, w_shift_unit, h_shift_unit, h_shift_unit)
feat_ = torch.cat([input, torch.ones([b, 1, h, w]).cuda()], dim=1) # b* (n+1) *h*w
prob_feat = F.avg_pool2d(feat_ * prob.narrow(1, 0, 1), kernel_size=(sp_h, sp_w),stride=(sp_h, sp_w)) # b * (n+1) * h* w
send_to_top_left = F.pad(prob_feat, p2d, mode='constant', value=0)[:, :, 2 * h_shift_unit:, 2 * w_shift_unit:]
feat_sum = send_to_top_left[:, :-1, :, :].clone()
prob_sum = send_to_top_left[:, -1:, :, :].clone()
prob_feat = F.avg_pool2d(feat_ * prob.narrow(1, 1, 1), kernel_size=(sp_h, sp_w), stride=(sp_h, sp_w)) # b * (n+1) * h* w
top = F.pad(prob_feat, p2d, mode='constant', value=0)[:, :, 2 * h_shift_unit:, w_shift_unit:-w_shift_unit]
feat_sum, prob_sum = feat_prob_sum(feat_sum,prob_sum,top )
prob_feat = F.avg_pool2d(feat_ * prob.narrow(1, 2, 1), kernel_size=(sp_h, sp_w), stride=(sp_h, sp_w)) # b * (n+1) * h* w
top_right = F.pad(prob_feat, p2d, mode='constant', value=0)[:, :, 2 * h_shift_unit:, :-2 * w_shift_unit]
feat_sum, prob_sum = feat_prob_sum(feat_sum, prob_sum, top_right)
prob_feat = F.avg_pool2d(feat_ * prob.narrow(1, 3, 1), kernel_size=(sp_h, sp_w), stride=(sp_h, sp_w)) # b * (n+1) * h* w
left = F.pad(prob_feat, p2d, mode='constant', value=0)[:, :, h_shift_unit:-h_shift_unit, 2 * w_shift_unit:]
feat_sum, prob_sum = feat_prob_sum(feat_sum, prob_sum, left)
prob_feat = F.avg_pool2d(feat_ * prob.narrow(1, 4, 1), kernel_size=(sp_h, sp_w), stride=(sp_h, sp_w)) # b * (n+1) * h* w
center = F.pad(prob_feat, p2d, mode='constant', value=0)[:, :, h_shift_unit:-h_shift_unit, w_shift_unit:-w_shift_unit]
feat_sum, prob_sum = feat_prob_sum(feat_sum, prob_sum, center)
prob_feat = F.avg_pool2d(feat_ * prob.narrow(1, 5, 1), kernel_size=(sp_h, sp_w), stride=(sp_h, sp_w)) # b * (n+1) * h* w
right = F.pad(prob_feat, p2d, mode='constant', value=0)[:, :, h_shift_unit:-h_shift_unit, :-2 * w_shift_unit]
feat_sum, prob_sum = feat_prob_sum(feat_sum, prob_sum, right)
prob_feat = F.avg_pool2d(feat_ * prob.narrow(1, 6, 1), kernel_size=(sp_h, sp_w), stride=(sp_h, sp_w)) # b * (n+1) * h* w
bottom_left = F.pad(prob_feat, p2d, mode='constant', value=0)[:, :, :-2 * h_shift_unit, 2 * w_shift_unit:]
feat_sum, prob_sum = feat_prob_sum(feat_sum, prob_sum, bottom_left)
prob_feat = F.avg_pool2d(feat_ * prob.narrow(1, 7, 1), kernel_size=(sp_h, sp_w), stride=(sp_h, sp_w)) # b * (n+1) * h* w
bottom = F.pad(prob_feat, p2d, mode='constant', value=0)[:, :, :-2 * h_shift_unit, w_shift_unit:-w_shift_unit]
feat_sum, prob_sum = feat_prob_sum(feat_sum, prob_sum, bottom)
prob_feat = F.avg_pool2d(feat_ * prob.narrow(1, 8, 1), kernel_size=(sp_h, sp_w), stride=(sp_h, sp_w)) # b * (n+1) * h* w
bottom_right = F.pad(prob_feat, p2d, mode='constant', value=0)[:, :, :-2 * h_shift_unit, :-2 * w_shift_unit]
feat_sum, prob_sum = feat_prob_sum(feat_sum, prob_sum, bottom_right)
pooled_feat = feat_sum / (prob_sum + 1e-8)
return pooled_feat
def upfeat(input, prob, up_h=2, up_w=2):
# input b*n*H*W downsampled
# prob b*9*h*w
b, c, h, w = input.shape
h_shift = 1
w_shift = 1
p2d = (w_shift, w_shift, h_shift, h_shift)
feat_pd = F.pad(input, p2d, mode='constant', value=0)
gt_frm_top_left = F.interpolate(feat_pd[:, :, :-2 * h_shift, :-2 * w_shift], size=(h * up_h, w * up_w),mode='nearest')
feat_sum = gt_frm_top_left * prob.narrow(1,0,1)
top = F.interpolate(feat_pd[:, :, :-2 * h_shift, w_shift:-w_shift], size=(h * up_h, w * up_w), mode='nearest')
feat_sum += top * prob.narrow(1, 1, 1)
top_right = F.interpolate(feat_pd[:, :, :-2 * h_shift, 2 * w_shift:], size=(h * up_h, w * up_w), mode='nearest')
feat_sum += top_right * prob.narrow(1,2,1)
left = F.interpolate(feat_pd[:, :, h_shift:-w_shift, :-2 * w_shift], size=(h * up_h, w * up_w), mode='nearest')
feat_sum += left * prob.narrow(1, 3, 1)
center = F.interpolate(input, (h * up_h, w * up_w), mode='nearest')
feat_sum += center * prob.narrow(1, 4, 1)
right = F.interpolate(feat_pd[:, :, h_shift:-w_shift, 2 * w_shift:], size=(h * up_h, w * up_w), mode='nearest')
feat_sum += right * prob.narrow(1, 5, 1)
bottom_left = F.interpolate(feat_pd[:, :, 2 * h_shift:, :-2 * w_shift], size=(h * up_h, w * up_w), mode='nearest')
feat_sum += bottom_left * prob.narrow(1, 6, 1)
bottom = F.interpolate(feat_pd[:, :, 2 * h_shift:, w_shift:-w_shift], size=(h * up_h, w * up_w), mode='nearest')
feat_sum += bottom * prob.narrow(1, 7, 1)
bottom_right = F.interpolate(feat_pd[:, :, 2 * h_shift:, 2 * w_shift:], size=(h * up_h, w * up_w), mode='nearest')
feat_sum += bottom_right * prob.narrow(1, 8, 1)
return feat_sum
# ================= - spixel related -=============
def assign2uint8(assign):
#red up, green mid, blue down, for debug only
b,c,h,w = assign.shape
red = torch.cat([torch.ones(size=assign.shape), torch.zeros(size=[b,2,h,w])],dim=1).cuda()
green = torch.cat([ torch.zeros(size=[b,1,h,w]),
torch.ones(size=assign.shape),
torch.zeros(size=[b,1,h,w])],dim=1).cuda()
blue = torch.cat([torch.zeros(size=[b,2,h,w]),
torch.ones(size=assign.shape)],dim=1).cuda()
black = torch.zeros(size=[b,3,h,w]).cuda()
white = torch.ones(size=[b,3,h,w]).cuda()
# up probablity
mat_vis = torch.where(assign.type(torch.float) < 0. , white, black)
mat_vis = torch.where(assign.type(torch.float) >= 0. , red* (assign.type(torch.float)+1)/3, mat_vis)
mat_vis = torch.where(assign.type(torch.float) >= 3., green*(assign.type(torch.float)-2)/3, mat_vis)
mat_vis = torch.where(assign.type(torch.float) >= 6., blue * (assign.type(torch.float) - 5.) / 3, mat_vis)
return (mat_vis * 255.).type(torch.uint8)
def val2uint8(mat,maxVal):
maxVal_mat = torch.ones(mat.shape).cuda() * maxVal
mat_vis = torch.where(mat > maxVal_mat, maxVal_mat, mat)
return (mat_vis * 255. / maxVal).type(torch.uint8)
def update_spixl_map (spixl_map_idx_in, assig_map_in):
assig_map = assig_map_in.clone()
b,_,h,w = assig_map.shape
_, _, id_h, id_w = spixl_map_idx_in.shape
if (id_h == h) and (id_w == w):
spixl_map_idx = spixl_map_idx_in
else:
spixl_map_idx = F.interpolate(spixl_map_idx_in, size=(h,w), mode='nearest')
assig_max,_ = torch.max(assig_map, dim=1, keepdim= True)
assignment_ = torch.where(assig_map == assig_max, torch.ones(assig_map.shape).cuda(),torch.zeros(assig_map.shape).cuda())
new_spixl_map_ = spixl_map_idx * assignment_ # winner take all
new_spixl_map = torch.sum(new_spixl_map_,dim=1,keepdim=True).type(torch.int)
return new_spixl_map
def get_spixel_image(given_img, spix_index, n_spixels = 600, b_enforce_connect = False):
if not isinstance(given_img, np.ndarray):
given_img_np_ = given_img.detach().cpu().numpy().transpose(1,2,0)
else: # for cvt lab to rgb case
given_img_np_ = given_img
if not isinstance(spix_index, np.ndarray):
spix_index_np = spix_index.detach().cpu().numpy().transpose(0,1)
else:
spix_index_np = spix_index
h, w = spix_index_np.shape
given_img_np = cv2.resize(given_img_np_, dsize=(w, h), interpolation=cv2.INTER_CUBIC)
if b_enforce_connect:
spix_index_np = spix_index_np.astype(np.int64)
segment_size = (given_img_np_.shape[0] * given_img_np_.shape[1]) / (int(n_spixels) * 1.0)
min_size = int(0.06 * segment_size)
max_size = int(3 * segment_size)
spix_index_np = enforce_connectivity(spix_index_np[None, :, :], min_size, max_size)[0]
cur_max = np.max(given_img_np)
spixel_bd_image = mark_boundaries(given_img_np/cur_max, spix_index_np.astype(int), color = (0,1,1)) #cyna
return (cur_max*spixel_bd_image).astype(np.float32).transpose(2,0,1), spix_index_np #
# ============ accumulate Q =============================
def spixlIdx(args, b_train = False):
# code modified from ssn
if b_train:
n_spixl_h = int(np.floor(args.train_img_height / args.downsize))
n_spixl_w = int(np.floor(args.train_img_width / args.downsize))
else:
n_spixl_h = int(np.floor(args.input_img_height / args.downsize))
n_spixl_w = int(np.floor(args.input_img_width / args.downsize))
spix_values = np.int32(np.arange(0, n_spixl_w * n_spixl_h).reshape((n_spixl_h, n_spixl_w)))
spix_idx_tensor = shift9pos(spix_values)
torch_spix_idx_tensor = torch.from_numpy(
np.tile(spix_idx_tensor, (args.batch_size, 1, 1, 1))).type(torch.float).cuda()
return torch_spix_idx_tensor
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __repr__(self):
return '{:.3f} ({:.3f})'.format(self.val, self.avg)
def batch2img(img):
b,_,h,w = img.shape
tmp = img.permute(0,2,3,1)
for i in range(b):
if i ==0:
tmp_stack = tmp[i,:,:,:]
else:
tmp_stack = torch.cat([tmp_stack,tmp[i,:,:,:]],dim=-2)
return tmp_stack
def build_LABXY_feat(label_in, XY_feat):
img_lab = label_in.clone().type(torch.float)
b, _, curr_img_height, curr_img_width = XY_feat.shape
scale_img = F.interpolate(img_lab, size=(curr_img_height,curr_img_width), mode='nearest')
LABXY_feat = torch.cat([scale_img, XY_feat],dim=1)
return LABXY_feat
def rgb2Lab_torch(img_in, mean_values = None):
# self implemented function that convert RGB image to LAB
# inpu img intense should be [0,1] float b*3*h*w
assert img_in.min() >= 0 and img_in.max()<=1
img= (img_in.clone() + mean_values.cuda()).clamp(0, 1)
mask = img > 0.04045
img[mask] = torch.pow((img[mask] + 0.055) / 1.055, 2.4)
img[~mask] /= 12.92
xyz_from_rgb = torch.tensor([[0.412453, 0.357580, 0.180423],
[0.212671, 0.715160, 0.072169],
[0.019334, 0.119193, 0.950227]]).cuda()
rgb = img.permute(0,2,3,1)
xyz_img = torch.matmul(rgb, xyz_from_rgb.transpose_(0,1))
xyz_ref_white = torch.tensor([0.95047, 1., 1.08883]).cuda()
# scale by CIE XYZ tristimulus values of the reference white point
lab = xyz_img / xyz_ref_white
# Nonlinear distortion and linear transformation
mask = lab > 0.008856
lab[mask] = torch.pow(lab[mask], 1. / 3.)
lab[~mask] = 7.787 * lab[~mask] + 16. / 116.
x, y, z = lab[..., 0:1], lab[..., 1:2], lab[..., 2:3]
# Vector scaling
L = (116. * y) - 16.
a = 500.0 * (x - y)
b = 200.0 * (y - z)
return torch.cat([L, a, b], dim=-1).permute(0,3,1,2)
def label2one_hot_torch(labels, C=14):
# w.r.t http://jacobkimmel.github.io/pytorch_onehot/
'''
Converts an integer label torch.autograd.Variable to a one-hot Variable.
Parameters
----------
labels : torch.autograd.Variable of torch.cuda.LongTensor
N x 1 x H x W, where N is batch size.
Each value is an integer representing correct classification.
C : integer.
number of classes in labels.
Returns
-------
target : torch.cuda.FloatTensor
N x C x H x W, where C is class number. One-hot encoded.
'''
b,_, h, w = labels.shape
one_hot = torch.zeros(b, C, h, w, dtype=torch.long).cuda()
target = one_hot.scatter_(1, labels.type(torch.long).data, 1) #require long type
return target.type(torch.float32)
|
0512db73167e0b9cae6b7477f2f570d1829f8083
|
b8bbdfc593b6d816e67a344f720f90ec05236778
|
/airflow/providers/imap/hooks/imap.py
|
1ff2c7154b7f892e237d62dac731109f7336c9ba
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/airflow
|
ed78db0a8bab7e096990e143926e52f518e288ab
|
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
|
refs/heads/main
| 2023-09-01T08:37:34.556097
| 2023-09-01T06:49:05
| 2023-09-01T06:49:05
| 33,884,891
| 22,756
| 11,558
|
Apache-2.0
| 2023-09-14T20:12:36
| 2015-04-13T18:04:58
|
Python
|
UTF-8
|
Python
| false
| false
| 15,276
|
py
|
imap.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module provides everything to search mail for a specific attachment and download it.
It uses the imaplib library that is already integrated in python 3.
"""
from __future__ import annotations
import email
import imaplib
import os
import re
import ssl
from typing import TYPE_CHECKING, Any, Iterable
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.utils.log.logging_mixin import LoggingMixin
if TYPE_CHECKING:
from airflow.models.connection import Connection
class ImapHook(BaseHook):
"""
This hook connects to a mail server by using the imap protocol.
.. note:: Please call this Hook as context manager via `with`
to automatically open and close the connection to the mail server.
:param imap_conn_id: The :ref:`imap connection id <howto/connection:imap>`
that contains the information used to authenticate the client.
"""
conn_name_attr = "imap_conn_id"
default_conn_name = "imap_default"
conn_type = "imap"
hook_name = "IMAP"
def __init__(self, imap_conn_id: str = default_conn_name) -> None:
super().__init__()
self.imap_conn_id = imap_conn_id
self.mail_client: imaplib.IMAP4_SSL | imaplib.IMAP4 | None = None
def __enter__(self) -> ImapHook:
return self.get_conn()
def __exit__(self, exc_type, exc_val, exc_tb):
self.mail_client.logout()
def get_conn(self) -> ImapHook:
"""
Login to the mail server.
.. note:: Please call this Hook as context manager via `with`
to automatically open and close the connection to the mail server.
:return: an authorized ImapHook object.
"""
if not self.mail_client:
conn = self.get_connection(self.imap_conn_id)
self.mail_client = self._build_client(conn)
self.mail_client.login(conn.login, conn.password)
return self
def _build_client(self, conn: Connection) -> imaplib.IMAP4_SSL | imaplib.IMAP4:
mail_client: imaplib.IMAP4_SSL | imaplib.IMAP4
use_ssl = conn.extra_dejson.get("use_ssl", True)
if use_ssl:
from airflow.configuration import conf
extra_ssl_context = conn.extra_dejson.get("ssl_context", None)
if extra_ssl_context:
ssl_context_string = extra_ssl_context
else:
ssl_context_string = conf.get("imap", "SSL_CONTEXT", fallback=None)
if ssl_context_string is None:
ssl_context_string = conf.get("email", "SSL_CONTEXT", fallback=None)
if ssl_context_string is None:
ssl_context_string = "default"
if ssl_context_string == "default":
ssl_context = ssl.create_default_context()
elif ssl_context_string == "none":
ssl_context = None
else:
raise RuntimeError(
f"The email.ssl_context configuration variable must "
f"be set to 'default' or 'none' and is '{ssl_context_string}'."
)
if conn.port:
mail_client = imaplib.IMAP4_SSL(conn.host, conn.port, ssl_context=ssl_context)
else:
mail_client = imaplib.IMAP4_SSL(conn.host, ssl_context=ssl_context)
else:
if conn.port:
mail_client = imaplib.IMAP4(conn.host, conn.port)
else:
mail_client = imaplib.IMAP4(conn.host)
return mail_client
def has_mail_attachment(
self, name: str, *, check_regex: bool = False, mail_folder: str = "INBOX", mail_filter: str = "All"
) -> bool:
"""
Checks the mail folder for mails containing attachments with the given name.
:param name: The name of the attachment that will be searched for.
:param check_regex: Checks the name for a regular expression.
:param mail_folder: The mail folder where to look at.
:param mail_filter: If set other than 'All' only specific mails will be checked.
See :py:meth:`imaplib.IMAP4.search` for details.
:returns: True if there is an attachment with the given name and False if not.
"""
mail_attachments = self._retrieve_mails_attachments_by_name(
name, check_regex, True, mail_folder, mail_filter
)
return bool(mail_attachments)
def retrieve_mail_attachments(
self,
name: str,
*,
check_regex: bool = False,
latest_only: bool = False,
mail_folder: str = "INBOX",
mail_filter: str = "All",
not_found_mode: str = "raise",
) -> list[tuple]:
"""
Retrieves mail's attachments in the mail folder by its name.
:param name: The name of the attachment that will be downloaded.
:param check_regex: Checks the name for a regular expression.
:param latest_only: If set to True it will only retrieve the first matched attachment.
:param mail_folder: The mail folder where to look at.
:param mail_filter: If set other than 'All' only specific mails will be checked.
See :py:meth:`imaplib.IMAP4.search` for details.
:param not_found_mode: Specify what should happen if no attachment has been found.
Supported values are 'raise', 'warn' and 'ignore'.
If it is set to 'raise' it will raise an exception,
if set to 'warn' it will only print a warning and
if set to 'ignore' it won't notify you at all.
:returns: a list of tuple each containing the attachment filename and its payload.
"""
mail_attachments = self._retrieve_mails_attachments_by_name(
name, check_regex, latest_only, mail_folder, mail_filter
)
if not mail_attachments:
self._handle_not_found_mode(not_found_mode)
return mail_attachments
def download_mail_attachments(
self,
name: str,
local_output_directory: str,
*,
check_regex: bool = False,
latest_only: bool = False,
mail_folder: str = "INBOX",
mail_filter: str = "All",
not_found_mode: str = "raise",
) -> None:
"""
Downloads mail's attachments in the mail folder by its name to the local directory.
:param name: The name of the attachment that will be downloaded.
:param local_output_directory: The output directory on the local machine
where the files will be downloaded to.
:param check_regex: Checks the name for a regular expression.
:param latest_only: If set to True it will only download the first matched attachment.
:param mail_folder: The mail folder where to look at.
:param mail_filter: If set other than 'All' only specific mails will be checked.
See :py:meth:`imaplib.IMAP4.search` for details.
:param not_found_mode: Specify what should happen if no attachment has been found.
Supported values are 'raise', 'warn' and 'ignore'.
If it is set to 'raise' it will raise an exception,
if set to 'warn' it will only print a warning and
if set to 'ignore' it won't notify you at all.
"""
mail_attachments = self._retrieve_mails_attachments_by_name(
name, check_regex, latest_only, mail_folder, mail_filter
)
if not mail_attachments:
self._handle_not_found_mode(not_found_mode)
self._create_files(mail_attachments, local_output_directory)
def _handle_not_found_mode(self, not_found_mode: str) -> None:
if not_found_mode not in ("raise", "warn", "ignore"):
self.log.error('Invalid "not_found_mode" %s', not_found_mode)
elif not_found_mode == "raise":
raise AirflowException("No mail attachments found!")
elif not_found_mode == "warn":
self.log.warning("No mail attachments found!")
def _retrieve_mails_attachments_by_name(
self, name: str, check_regex: bool, latest_only: bool, mail_folder: str, mail_filter: str
) -> list:
if not self.mail_client:
raise Exception("The 'mail_client' should be initialized before!")
all_matching_attachments = []
self.mail_client.select(mail_folder)
for mail_id in self._list_mail_ids_desc(mail_filter):
response_mail_body = self._fetch_mail_body(mail_id)
matching_attachments = self._check_mail_body(response_mail_body, name, check_regex, latest_only)
if matching_attachments:
all_matching_attachments.extend(matching_attachments)
if latest_only:
break
self.mail_client.close()
return all_matching_attachments
def _list_mail_ids_desc(self, mail_filter: str) -> Iterable[str]:
if not self.mail_client:
raise Exception("The 'mail_client' should be initialized before!")
_, data = self.mail_client.search(None, mail_filter)
mail_ids = data[0].split()
return reversed(mail_ids)
def _fetch_mail_body(self, mail_id: str) -> str:
if not self.mail_client:
raise Exception("The 'mail_client' should be initialized before!")
_, data = self.mail_client.fetch(mail_id, "(RFC822)")
mail_body = data[0][1] # type: ignore # The mail body is always in this specific location
mail_body_str = mail_body.decode("utf-8") # type: ignore
return mail_body_str
def _check_mail_body(
self, response_mail_body: str, name: str, check_regex: bool, latest_only: bool
) -> list[tuple[Any, Any]]:
mail = Mail(response_mail_body)
if mail.has_attachments():
return mail.get_attachments_by_name(name, check_regex, find_first=latest_only)
return []
def _create_files(self, mail_attachments: list, local_output_directory: str) -> None:
for name, payload in mail_attachments:
if self._is_symlink(name):
self.log.error("Can not create file because it is a symlink!")
elif self._is_escaping_current_directory(name):
self.log.error("Can not create file because it is escaping the current directory!")
else:
self._create_file(name, payload, local_output_directory)
def _is_symlink(self, name: str) -> bool:
# IMPORTANT NOTE: os.path.islink is not working for windows symlinks
# See: https://stackoverflow.com/a/11068434
return os.path.islink(name)
def _is_escaping_current_directory(self, name: str) -> bool:
return "../" in name
def _correct_path(self, name: str, local_output_directory: str) -> str:
return (
local_output_directory + name
if local_output_directory.endswith("/")
else local_output_directory + "/" + name
)
def _create_file(self, name: str, payload: Any, local_output_directory: str) -> None:
file_path = self._correct_path(name, local_output_directory)
with open(file_path, "wb") as file:
file.write(payload)
class Mail(LoggingMixin):
"""
This class simplifies working with mails returned by the imaplib client.
:param mail_body: The mail body of a mail received from imaplib client.
"""
def __init__(self, mail_body: str) -> None:
super().__init__()
self.mail = email.message_from_string(mail_body)
def has_attachments(self) -> bool:
"""
Checks the mail for a attachments.
:returns: True if it has attachments and False if not.
"""
return self.mail.get_content_maintype() == "multipart"
def get_attachments_by_name(
self, name: str, check_regex: bool, find_first: bool = False
) -> list[tuple[Any, Any]]:
"""
Gets all attachments by name for the mail.
:param name: The name of the attachment to look for.
:param check_regex: Checks the name for a regular expression.
:param find_first: If set to True it will only find the first match and then quit.
:returns: a list of tuples each containing name and payload
where the attachments name matches the given name.
"""
attachments = []
for attachment in self._iterate_attachments():
found_attachment = (
attachment.has_matching_name(name) if check_regex else attachment.has_equal_name(name)
)
if found_attachment:
file_name, file_payload = attachment.get_file()
self.log.info("Found attachment: %s", file_name)
attachments.append((file_name, file_payload))
if find_first:
break
return attachments
def _iterate_attachments(self) -> Iterable[MailPart]:
for part in self.mail.walk():
mail_part = MailPart(part)
if mail_part.is_attachment():
yield mail_part
class MailPart:
"""
This class is a wrapper for a Mail object's part and gives it more features.
:param part: The mail part in a Mail object.
"""
def __init__(self, part: Any) -> None:
self.part = part
def is_attachment(self) -> bool:
"""
Checks if the part is a valid mail attachment.
:returns: True if it is an attachment and False if not.
"""
return self.part.get_content_maintype() != "multipart" and self.part.get("Content-Disposition")
def has_matching_name(self, name: str) -> tuple[Any, Any] | None:
"""
Checks if the given name matches the part's name.
:param name: The name to look for.
:returns: True if it matches the name (including regular expression).
"""
return re.match(name, self.part.get_filename()) # type: ignore
def has_equal_name(self, name: str) -> bool:
"""
Checks if the given name is equal to the part's name.
:param name: The name to look for.
:returns: True if it is equal to the given name.
"""
return self.part.get_filename() == name
def get_file(self) -> tuple:
"""
Gets the file including name and payload.
:returns: the part's name and payload.
"""
return self.part.get_filename(), self.part.get_payload(decode=True)
|
2ad793d9d004fd69ed23f18d69839cea60e68c1d
|
d05c946e345baa67e7894ee33ca21e24b8d26028
|
/machine-learning/stock-prediction/stock_prediction.py
|
e800be7ff004f05c64bc50243fef995d77a86623
|
[
"MIT"
] |
permissive
|
x4nth055/pythoncode-tutorials
|
327255550812f84149841d56f2d13eaa84efd42e
|
d6ba5d672f7060ba88384db5910efab1768c7230
|
refs/heads/master
| 2023-09-01T02:36:58.442748
| 2023-08-19T14:04:34
| 2023-08-19T14:04:34
| 199,449,624
| 1,858
| 2,055
|
MIT
| 2023-08-25T20:41:56
| 2019-07-29T12:35:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,171
|
py
|
stock_prediction.py
|
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout, Bidirectional
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from yahoo_fin import stock_info as si
from collections import deque
import numpy as np
import pandas as pd
import random
# set seed, so we can get the same results after rerunning several times
np.random.seed(314)
tf.random.set_seed(314)
random.seed(314)
def shuffle_in_unison(a, b):
# shuffle two arrays in the same way
state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(state)
np.random.shuffle(b)
def load_data(ticker, n_steps=50, scale=True, shuffle=True, lookup_step=1, split_by_date=True,
test_size=0.2, feature_columns=['adjclose', 'volume', 'open', 'high', 'low']):
"""
Loads data from Yahoo Finance source, as well as scaling, shuffling, normalizing and splitting.
Params:
ticker (str/pd.DataFrame): the ticker you want to load, examples include AAPL, TESL, etc.
n_steps (int): the historical sequence length (i.e window size) used to predict, default is 50
scale (bool): whether to scale prices from 0 to 1, default is True
shuffle (bool): whether to shuffle the dataset (both training & testing), default is True
lookup_step (int): the future lookup step to predict, default is 1 (e.g next day)
split_by_date (bool): whether we split the dataset into training/testing by date, setting it
to False will split datasets in a random way
test_size (float): ratio for test data, default is 0.2 (20% testing data)
feature_columns (list): the list of features to use to feed into the model, default is everything grabbed from yahoo_fin
"""
# see if ticker is already a loaded stock from yahoo finance
if isinstance(ticker, str):
# load it from yahoo_fin library
df = si.get_data(ticker)
elif isinstance(ticker, pd.DataFrame):
# already loaded, use it directly
df = ticker
else:
raise TypeError("ticker can be either a str or a `pd.DataFrame` instances")
# this will contain all the elements we want to return from this function
result = {}
# we will also return the original dataframe itself
result['df'] = df.copy()
# make sure that the passed feature_columns exist in the dataframe
for col in feature_columns:
assert col in df.columns, f"'{col}' does not exist in the dataframe."
# add date as a column
if "date" not in df.columns:
df["date"] = df.index
if scale:
column_scaler = {}
# scale the data (prices) from 0 to 1
for column in feature_columns:
scaler = preprocessing.MinMaxScaler()
df[column] = scaler.fit_transform(np.expand_dims(df[column].values, axis=1))
column_scaler[column] = scaler
# add the MinMaxScaler instances to the result returned
result["column_scaler"] = column_scaler
# add the target column (label) by shifting by `lookup_step`
df['future'] = df['adjclose'].shift(-lookup_step)
# last `lookup_step` columns contains NaN in future column
# get them before droping NaNs
last_sequence = np.array(df[feature_columns].tail(lookup_step))
# drop NaNs
df.dropna(inplace=True)
sequence_data = []
sequences = deque(maxlen=n_steps)
for entry, target in zip(df[feature_columns + ["date"]].values, df['future'].values):
sequences.append(entry)
if len(sequences) == n_steps:
sequence_data.append([np.array(sequences), target])
# get the last sequence by appending the last `n_step` sequence with `lookup_step` sequence
# for instance, if n_steps=50 and lookup_step=10, last_sequence should be of 60 (that is 50+10) length
# this last_sequence will be used to predict future stock prices that are not available in the dataset
last_sequence = list([s[:len(feature_columns)] for s in sequences]) + list(last_sequence)
last_sequence = np.array(last_sequence).astype(np.float32)
# add to result
result['last_sequence'] = last_sequence
# construct the X's and y's
X, y = [], []
for seq, target in sequence_data:
X.append(seq)
y.append(target)
# convert to numpy arrays
X = np.array(X)
y = np.array(y)
if split_by_date:
# split the dataset into training & testing sets by date (not randomly splitting)
train_samples = int((1 - test_size) * len(X))
result["X_train"] = X[:train_samples]
result["y_train"] = y[:train_samples]
result["X_test"] = X[train_samples:]
result["y_test"] = y[train_samples:]
if shuffle:
# shuffle the datasets for training (if shuffle parameter is set)
shuffle_in_unison(result["X_train"], result["y_train"])
shuffle_in_unison(result["X_test"], result["y_test"])
else:
# split the dataset randomly
result["X_train"], result["X_test"], result["y_train"], result["y_test"] = train_test_split(X, y,
test_size=test_size, shuffle=shuffle)
# get the list of test set dates
dates = result["X_test"][:, -1, -1]
# retrieve test features from the original dataframe
result["test_df"] = result["df"].loc[dates]
# remove duplicated dates in the testing dataframe
result["test_df"] = result["test_df"][~result["test_df"].index.duplicated(keep='first')]
# remove dates from the training/testing sets & convert to float32
result["X_train"] = result["X_train"][:, :, :len(feature_columns)].astype(np.float32)
result["X_test"] = result["X_test"][:, :, :len(feature_columns)].astype(np.float32)
return result
def create_model(sequence_length, n_features, units=256, cell=LSTM, n_layers=2, dropout=0.3,
loss="mean_absolute_error", optimizer="rmsprop", bidirectional=False):
model = Sequential()
for i in range(n_layers):
if i == 0:
# first layer
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=True), batch_input_shape=(None, sequence_length, n_features)))
else:
model.add(cell(units, return_sequences=True, batch_input_shape=(None, sequence_length, n_features)))
elif i == n_layers - 1:
# last layer
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=False)))
else:
model.add(cell(units, return_sequences=False))
else:
# hidden layers
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=True)))
else:
model.add(cell(units, return_sequences=True))
# add dropout after each layer
model.add(Dropout(dropout))
model.add(Dense(1, activation="linear"))
model.compile(loss=loss, metrics=["mean_absolute_error"], optimizer=optimizer)
return model
|
d38a31f251adca7c1dc576b06df66f1c725f7f1e
|
99199db3f78a344e72b281c71c690518ae07375a
|
/octavia/controller/worker/v2/flows/flow_utils.py
|
f4de5cc58d12df4855fb7893f7de106c2f5a9481
|
[
"Apache-2.0"
] |
permissive
|
openstack/octavia
|
3faf2afe2ade5bd3978bb3a0558d2eeefc648ba2
|
0426285a41464a5015494584f109eed35a0d44db
|
refs/heads/master
| 2023-09-01T20:12:48.272344
| 2023-08-31T17:24:04
| 2023-08-31T17:24:04
| 21,018,188
| 147
| 180
|
Apache-2.0
| 2021-03-30T12:34:30
| 2014-06-19T22:47:19
|
Python
|
UTF-8
|
Python
| false
| false
| 5,409
|
py
|
flow_utils.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octavia.api.drivers import utils as provider_utils
from octavia.controller.worker.v2.flows import amphora_flows
from octavia.controller.worker.v2.flows import health_monitor_flows
from octavia.controller.worker.v2.flows import l7policy_flows
from octavia.controller.worker.v2.flows import l7rule_flows
from octavia.controller.worker.v2.flows import listener_flows
from octavia.controller.worker.v2.flows import load_balancer_flows
from octavia.controller.worker.v2.flows import member_flows
from octavia.controller.worker.v2.flows import pool_flows
LB_FLOWS = load_balancer_flows.LoadBalancerFlows()
AMP_FLOWS = amphora_flows.AmphoraFlows()
HM_FLOWS = health_monitor_flows.HealthMonitorFlows()
L7_POLICY_FLOWS = l7policy_flows.L7PolicyFlows()
L7_RULES_FLOWS = l7rule_flows.L7RuleFlows()
LISTENER_FLOWS = listener_flows.ListenerFlows()
M_FLOWS = member_flows.MemberFlows()
P_FLOWS = pool_flows.PoolFlows()
def get_create_load_balancer_flow(topology, listeners=None):
return LB_FLOWS.get_create_load_balancer_flow(topology,
listeners=listeners)
def get_delete_load_balancer_flow(lb):
return LB_FLOWS.get_delete_load_balancer_flow(lb)
def get_listeners_on_lb(db_lb):
"""Get a list of the listeners on a load balancer.
:param db_lb: A load balancer database model object.
:returns: A list of provider dict format listeners.
"""
listener_dicts = []
for listener in db_lb.listeners:
prov_listener = provider_utils.db_listener_to_provider_listener(
listener)
listener_dicts.append(prov_listener.to_dict())
return listener_dicts
def get_pools_on_lb(db_lb):
"""Get a list of the pools on a load balancer.
:param db_lb: A load balancer database model object.
:returns: A list of provider dict format pools.
"""
pool_dicts = []
for pool in db_lb.pools:
prov_pool = provider_utils.db_pool_to_provider_pool(pool)
pool_dicts.append(prov_pool.to_dict())
return pool_dicts
def get_cascade_delete_load_balancer_flow(lb, listeners=(), pools=()):
return LB_FLOWS.get_cascade_delete_load_balancer_flow(lb, listeners,
pools)
def get_update_load_balancer_flow():
return LB_FLOWS.get_update_load_balancer_flow()
def get_create_amphora_flow():
return AMP_FLOWS.get_create_amphora_flow()
def get_delete_amphora_flow(amphora, retry_attempts=None, retry_interval=None):
return AMP_FLOWS.get_delete_amphora_flow(amphora, retry_attempts,
retry_interval)
def get_failover_LB_flow(amps, lb):
return LB_FLOWS.get_failover_LB_flow(amps, lb)
def get_failover_amphora_flow(amphora_dict, lb_amp_count):
return AMP_FLOWS.get_failover_amphora_flow(amphora_dict, lb_amp_count)
def cert_rotate_amphora_flow():
return AMP_FLOWS.cert_rotate_amphora_flow()
def update_amphora_config_flow():
return AMP_FLOWS.update_amphora_config_flow()
def get_create_health_monitor_flow():
return HM_FLOWS.get_create_health_monitor_flow()
def get_delete_health_monitor_flow():
return HM_FLOWS.get_delete_health_monitor_flow()
def get_update_health_monitor_flow():
return HM_FLOWS.get_update_health_monitor_flow()
def get_create_l7policy_flow():
return L7_POLICY_FLOWS.get_create_l7policy_flow()
def get_delete_l7policy_flow():
return L7_POLICY_FLOWS.get_delete_l7policy_flow()
def get_update_l7policy_flow():
return L7_POLICY_FLOWS.get_update_l7policy_flow()
def get_create_l7rule_flow():
return L7_RULES_FLOWS.get_create_l7rule_flow()
def get_delete_l7rule_flow():
return L7_RULES_FLOWS.get_delete_l7rule_flow()
def get_update_l7rule_flow():
return L7_RULES_FLOWS.get_update_l7rule_flow()
def get_create_listener_flow():
return LISTENER_FLOWS.get_create_listener_flow()
def get_create_all_listeners_flow():
return LISTENER_FLOWS.get_create_all_listeners_flow()
def get_delete_listener_flow():
return LISTENER_FLOWS.get_delete_listener_flow()
def get_update_listener_flow():
return LISTENER_FLOWS.get_update_listener_flow()
def get_create_member_flow():
return M_FLOWS.get_create_member_flow()
def get_delete_member_flow():
return M_FLOWS.get_delete_member_flow()
def get_update_member_flow():
return M_FLOWS.get_update_member_flow()
def get_batch_update_members_flow(old_members, new_members, updated_members):
return M_FLOWS.get_batch_update_members_flow(old_members, new_members,
updated_members)
def get_create_pool_flow():
return P_FLOWS.get_create_pool_flow()
def get_delete_pool_flow():
return P_FLOWS.get_delete_pool_flow()
def get_update_pool_flow():
return P_FLOWS.get_update_pool_flow()
|
e89f9b6169626e6281ea1a8f1e4113de684c8baa
|
fa3f6d4e9169fb95f828013d179d03accdff381b
|
/grr/server/grr_response_server/gui/selenium_tests/forms_test.py
|
2c049bf41d7bfa7c99f090d8b3577bb9f62ed4ca
|
[
"Apache-2.0"
] |
permissive
|
google/grr
|
c51a2bd251ed2f7adae538541990a2cc01fdcc8c
|
44c0eb8c938302098ef7efae8cfd6b90bcfbb2d6
|
refs/heads/master
| 2023-09-05T20:02:36.823914
| 2023-07-26T09:34:09
| 2023-07-26T09:34:09
| 14,909,673
| 4,683
| 927
|
Apache-2.0
| 2023-07-26T09:34:10
| 2013-12-04T00:17:53
|
Python
|
UTF-8
|
Python
| false
| false
| 10,166
|
py
|
forms_test.py
|
#!/usr/bin/env python
"""Tests for the UI forms."""
from absl import app
from selenium.webdriver.common import keys
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_proto import tests_pb2
from grr_response_server import flow_base
from grr_response_server.flows.general import file_finder as flows_file_finder
from grr_response_server.gui import api_call_context
from grr_response_server.gui import gui_test_lib
from grr_response_server.gui.api_plugins import user as user_plugin
from grr.test_lib import test_lib
class DefaultArgsTestFlowArgs(rdf_structs.RDFProtoStruct):
protobuf = tests_pb2.DefaultArgsTestFlowArgs
class DefaultArgsTestFlow(flow_base.FlowBase):
args_type = DefaultArgsTestFlowArgs
category = "/Tests/"
behaviours = flow_base.BEHAVIOUR_BASIC
class TestForms(gui_test_lib.GRRSeleniumTest):
"""Tests basic forms rendering."""
def testControlsWithoutDefaultValuesAreCorrectlyDisplayed(self):
# Open the "new hunt" form and select the DefaultArgsTestFlow.
self.Open("/legacy#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
self.Click("css=#_Tests > i.jstree-icon")
self.Click("link=DefaultArgsTestFlow")
self.WaitUntil(self.IsTextPresent, "String value")
# Check that shown default values of the controls are just default
# values of the corresponding types.
self.WaitUntilEqual(
"", self.GetValue, "css=grr-new-hunt-wizard-form "
".form-group:has(label:contains('String value')) input")
self.WaitUntilEqual(
"0", self.GetValue, "css=grr-new-hunt-wizard-form "
".form-group:has(label:contains('Int value')) input")
self.WaitUntil(
self.IsElementPresent, "css=grr-new-hunt-wizard-form "
".form-group:has(label:contains('Bool value')) input:not(:checked)")
self.WaitUntil(
self.IsElementPresent, "css=grr-new-hunt-wizard-form "
".form-group:has(label:contains('Enum value')) select "
"option:selected(label='OPTION_1 (default)')")
def testControlsWithDefaultValuesAreCorrectlyDisplayed(self):
# Open the "new hunt" form and select the DefaultArgsTestFlow.
self.Open("/legacy#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
self.Click("css=#_Tests > i.jstree-icon")
self.Click("link=DefaultArgsTestFlow")
self.WaitUntil(self.IsTextPresent, "String value")
# Check that shown default values of the controls are the default values
# that we specified in the RDFValue definition.
self.WaitUntilEqual(
"default string", self.GetValue, "css=grr-new-hunt-wizard-form "
".form-group:has(label:contains('String value with default')) input")
self.WaitUntilEqual(
"42", self.GetValue, "css=grr-new-hunt-wizard-form "
".form-group:has(label:contains('Int value with default')) input")
self.WaitUntil(
self.IsElementPresent, "css=grr-new-hunt-wizard-form "
".form-group:has(label:contains('Bool value with default')) "
"input:checked")
self.WaitUntil(
self.IsElementPresent, "css=grr-new-hunt-wizard-form "
".form-group:has(label:contains('Enum value with default')) select "
"option:selected(label='OPTION_2 (default)')")
def testFileFinderArgsPathsDocHintIsDisplayed(self):
self.Open("/legacy#/hunts")
self.Click("css=button[name=NewHunt]")
self.Click("css=#_Filesystem > i.jstree-icon")
self.Click("link=" + flows_file_finder.FileFinder.friendly_name)
self.WaitUntil(
self.IsElementPresent, "css=label:contains(Paths) "
"a[href*='help/investigating-with-grr/flows/"
"specifying-file-paths.html']")
def testFileFinderArgsHasOnePathAddedByDefault(self):
self.Open("/legacy#/hunts")
self.Click("css=button[name=NewHunt]")
self.Click("css=#_Filesystem > i.jstree-icon")
self.Click("link=" + flows_file_finder.FileFinder.friendly_name)
self.WaitUntil(self.IsElementPresent,
"css=input[placeholder*='Type %% for autocompletion']")
def testApproverInputShowsAutocompletion(self):
self.CreateUser("sanchezrick")
# add decoy user, to assure that autocompletion results are based on the
# query
self.CreateUser("aaa")
client_id = self.SetupClient(0)
self.Open("/legacy#/clients/%s/host-info" % client_id)
# We do not have an approval, so we need to request one.
self.Click("css=button[name=requestApproval]")
input_selector = "css=grr-approver-input input"
self.Type(input_selector, "sanchez")
self.WaitUntil(self.IsElementPresent,
"css=[uib-typeahead-popup]:contains(sanchezrick)")
self.GetElement(input_selector).send_keys(keys.Keys.ENTER)
self.WaitUntilEqual("sanchezrick, ", self.GetValue,
input_selector + ":text")
self.Type("css=grr-request-approval-dialog input[name=acl_reason]", "Test")
self.Click(
"css=grr-request-approval-dialog button[name=Proceed]:not([disabled])")
# "Request Approval" dialog should go away
self.WaitUntilNot(self.IsVisible, "css=.modal-open")
handler = user_plugin.ApiListClientApprovalsHandler()
args = user_plugin.ApiListClientApprovalsArgs(client_id=client_id)
res = handler.Handle(
args=args, context=api_call_context.ApiCallContext(self.test_username))
self.assertLen(res.items, 1)
self.assertLen(res.items[0].notified_users, 1)
self.assertEqual(res.items[0].notified_users[0], "sanchezrick")
def testSuggestedReasonIsPropagatedFromHostInfoToApproval(self):
client_id = self.SetupClient(0)
self.Open("/legacy#/clients/{}/host-info?reason=t123".format(client_id))
self.Click("css=button[name=requestApproval]")
self.WaitUntilEqual("t123", self.GetValue, "css=input[name=acl_reason]")
class TestFormsValidation(gui_test_lib.GRRSeleniumTest):
"""Tests forms validation in different workflows ."""
def setUp(self):
super().setUp()
self.client_id = self.SetupClient(0)
self.RequestAndGrantClientApproval(self.client_id)
def testLaunchFlowButtonIsDisabledIfFlowArgumentsInvalid(self):
self.Open("/legacy#/clients/%s/launch-flow" % self.client_id)
self.Click("css=#_Filesystem a")
self.Click("link=" + flows_file_finder.FileFinder.friendly_name)
# FileFinder's literal match condition has bytes field that should
# be validated: it shouldn't contain Unicode characters.
self.Click("css=label:contains('Conditions') ~ * button")
self.Select("css=label:contains('Condition type') ~ * select",
"Contents literal match")
self.Type("css=label:contains('Literal') ~ * input", u"昨夜")
self.WaitUntil(
self.IsElementPresent,
"css=.text-danger:contains('Unicode characters are not "
"allowed in a byte string')")
self.WaitUntil(self.IsElementPresent,
"css=button:contains('Launch'):disabled")
self.Type("css=label:contains('Literal') ~ * input", "something safe")
self.WaitUntilNot(
self.IsElementPresent,
"css=.text-danger:contains('Unicode characters are not "
"allowed in a byte string')")
self.WaitUntil(self.IsElementPresent,
"css=button:contains('Launch'):not(:disabled)")
def testLaunchButtonInCopyFlowIsDisabledIfArgumentsInvalid(self):
self.Open("/legacy#/clients/%s/launch-flow" % self.client_id)
# Launch the flow.
self.Click("css=#_Filesystem a")
self.Click("link=" + flows_file_finder.FileFinder.friendly_name)
self.Type("css=grr-form-proto-repeated-field:contains('Paths') input",
"foo/bar")
self.Click("css=button:contains('Launch')")
# Open the copy dialog.
self.Open("/legacy#/clients/%s/flows" % self.client_id)
self.Click("css=tr:contains('%s')" % flows_file_finder.FileFinder.__name__)
self.Click("css=button[name=copy_flow]")
# FileFinder's literal match condition has bytes field that should
# be validated: it shouldn't contain Unicode characters.
self.Click("css=label:contains('Conditions') ~ * button")
self.Select("css=label:contains('Condition type') ~ * select",
"Contents literal match")
self.Type("css=label:contains('Literal') ~ * input", u"昨夜")
self.WaitUntil(
self.IsElementPresent,
"css=.text-danger:contains('Unicode characters are not "
"allowed in a byte string')")
self.WaitUntil(self.IsElementPresent,
"css=button:contains('Launch'):disabled")
self.Type("css=label:contains('Literal') ~ * input", "something safe")
self.WaitUntilNot(
self.IsElementPresent,
"css=.text-danger:contains('Unicode characters are not "
"allowed in a byte string')")
self.WaitUntil(self.IsElementPresent,
"css=button:contains('Launch'):not(:disabled)")
def testNextButtonInHuntWizardIsDisabledIfArgumentsInalid(self):
self.Open("/legacy#/hunts")
self.Click("css=button[name=NewHunt]")
self.Click("css=#_Filesystem a")
self.Click("link=" + flows_file_finder.FileFinder.friendly_name)
# FileFinder's literal match condition has bytes field that should
# be validated: it shouldn't contain Unicode characters.
self.Click("css=label:contains('Conditions') ~ * button")
self.Select("css=label:contains('Condition type') ~ * select",
"Contents literal match")
self.Type("css=label:contains('Literal') ~ * input", u"昨夜")
self.WaitUntil(
self.IsElementPresent,
"css=.text-danger:contains('Unicode characters are not "
"allowed in a byte string')")
self.WaitUntil(self.IsElementPresent,
"css=button:contains('Next'):disabled")
self.Type("css=label:contains('Literal') ~ * input", "something safe")
self.WaitUntilNot(
self.IsElementPresent,
"css=.text-danger:contains('Unicode characters are not "
"allowed in a byte string')")
self.WaitUntil(self.IsElementPresent,
"css=button:contains('Next'):not(:disabled)")
if __name__ == "__main__":
app.run(test_lib.main)
|
05b7936b877afa9ba7c15daac09e79824556620a
|
d793e2bbd0960b15725e7752f98e70a1f24c7c77
|
/dataflows/helpers/rows_processor.py
|
2cfea56b9b8e5e3e7f961c972047569a0191a0a4
|
[
"MIT"
] |
permissive
|
datahq/dataflows
|
f932a4d1dad12468428313ac636d5b704e9b49ff
|
9e604805accb6147cf2b1772d848d1a5e02de616
|
refs/heads/master
| 2023-07-21T04:14:09.523089
| 2023-07-19T16:45:30
| 2023-07-19T16:45:30
| 135,416,665
| 172
| 46
|
MIT
| 2023-07-19T16:45:32
| 2018-05-30T09:01:58
|
Python
|
UTF-8
|
Python
| false
| false
| 295
|
py
|
rows_processor.py
|
from .. import DataStreamProcessor
class rows_processor(DataStreamProcessor):
def __init__(self, rows_processor_func):
super(rows_processor, self).__init__()
self.func = rows_processor_func
def process_resource(self, resource):
yield from self.func(resource)
|
b1c33a4a91d07950d28b027d4335962f60ae320b
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/pvpc_hourly_pricing/test_config_flow.py
|
6560c81ebbb86a9b995b8cc8403c810a433f249b
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 4,622
|
py
|
test_config_flow.py
|
"""Tests for the pvpc_hourly_pricing config_flow."""
from datetime import datetime, timedelta
from freezegun.api import FrozenDateTimeFactory
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.pvpc_hourly_pricing import (
ATTR_POWER,
ATTR_POWER_P3,
ATTR_TARIFF,
DOMAIN,
TARIFFS,
)
from homeassistant.const import CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from homeassistant.util import dt as dt_util
from .conftest import check_valid_state
from tests.common import async_fire_time_changed
from tests.test_util.aiohttp import AiohttpClientMocker
_MOCK_TIME_VALID_RESPONSES = datetime(2023, 1, 6, 12, 0, tzinfo=dt_util.UTC)
async def test_config_flow(
hass: HomeAssistant,
freezer: FrozenDateTimeFactory,
pvpc_aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test config flow for pvpc_hourly_pricing.
- Create a new entry with tariff "2.0TD (Ceuta/Melilla)"
- Check state and attributes
- Check abort when trying to config another with same tariff
- Check removal and add again to check state restoration
- Configure options to change power and tariff to "2.0TD"
"""
freezer.move_to(_MOCK_TIME_VALID_RESPONSES)
hass.config.set_time_zone("Europe/Madrid")
tst_config = {
CONF_NAME: "test",
ATTR_TARIFF: TARIFFS[1],
ATTR_POWER: 4.6,
ATTR_POWER_P3: 5.75,
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.FlowResultType.FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], tst_config
)
assert result["type"] == data_entry_flow.FlowResultType.CREATE_ENTRY
await hass.async_block_till_done()
state = hass.states.get("sensor.esios_pvpc")
check_valid_state(state, tariff=TARIFFS[1])
assert pvpc_aioclient_mock.call_count == 1
# Check abort when configuring another with same tariff
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.FlowResultType.FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], tst_config
)
assert result["type"] == data_entry_flow.FlowResultType.ABORT
assert pvpc_aioclient_mock.call_count == 1
# Check removal
registry = er.async_get(hass)
registry_entity = registry.async_get("sensor.esios_pvpc")
assert await hass.config_entries.async_remove(registry_entity.config_entry_id)
# and add it again with UI
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.FlowResultType.FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], tst_config
)
assert result["type"] == data_entry_flow.FlowResultType.CREATE_ENTRY
await hass.async_block_till_done()
state = hass.states.get("sensor.esios_pvpc")
check_valid_state(state, tariff=TARIFFS[1])
assert pvpc_aioclient_mock.call_count == 2
assert state.attributes["period"] == "P3"
assert state.attributes["next_period"] == "P2"
assert state.attributes["available_power"] == 5750
# check options flow
current_entries = hass.config_entries.async_entries(DOMAIN)
assert len(current_entries) == 1
config_entry = current_entries[0]
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.FlowResultType.FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={ATTR_POWER: 3.0, ATTR_POWER_P3: 4.6},
)
await hass.async_block_till_done()
state = hass.states.get("sensor.esios_pvpc")
check_valid_state(state, tariff=TARIFFS[1])
assert pvpc_aioclient_mock.call_count == 3
assert state.attributes["period"] == "P3"
assert state.attributes["next_period"] == "P2"
assert state.attributes["available_power"] == 4600
# check update failed
freezer.tick(timedelta(days=1))
async_fire_time_changed(hass)
await hass.async_block_till_done()
state = hass.states.get("sensor.esios_pvpc")
check_valid_state(state, tariff=TARIFFS[0], value="unavailable")
assert "period" not in state.attributes
assert pvpc_aioclient_mock.call_count == 4
|
3c6ecee9c908655bf42e29ef725e41c426e83589
|
9da4adae4c389e84097a0da9bfce40f9132eef96
|
/test/_utils.py
|
9fef35e46ac49c346168a406bde6c4c56638d1c3
|
[
"MIT"
] |
permissive
|
ppizarror/pygame-menu
|
f8fd2ff3acefad25b07e19499a2dfebd50507403
|
bcfaccbb11d4a6ecba588eec2851932dc46c2337
|
refs/heads/master
| 2023-07-07T10:38:09.651797
| 2023-06-28T18:00:25
| 2023-06-28T18:00:25
| 89,940,842
| 570
| 207
|
NOASSERTION
| 2023-08-19T19:17:59
| 2017-05-01T16:26:50
|
Python
|
UTF-8
|
Python
| false
| false
| 23,973
|
py
|
_utils.py
|
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
UTILS
Test suite utility functions and classes.
"""
__all__ = [
# Globals
'PYGAME_V2',
'SYS_PLATFORM_OSX',
'TEST_THEME',
'THEME_NON_FIXED_TITLE',
'WIDGET_MOUSEOVER',
'WIDGET_TOP_CURSOR',
'WINDOW_SIZE',
# Methods
'reset_widgets_over',
'sleep',
'surface',
'test_reset_surface',
# Class utils
'BaseTest',
'PygameEventUtils',
'MenuUtils'
]
import random
import sys
import unittest
from time import sleep
import pygame
import pygame_menu
from pygame_menu.font import FONT_EXAMPLES
from pygame_menu.locals import FINGERDOWN, FINGERMOTION, FINGERUP
from pygame_menu.utils import assert_vector, PYGAME_V2
from pygame_menu.widgets.core.widget import check_widget_mouseleave
# noinspection PyProtectedMember
from pygame_menu._types import NumberType, Union, List, Tuple, Optional, EventType, \
Tuple2IntType, MenuColumnMaxWidthType, MenuColumnMinWidthType, Any, MenuRowsType, \
Tuple2NumberType, VectorIntType, VectorInstance, NumberInstance
EventListType = Union[EventType, List[EventType]]
# Constants
WINDOW_SIZE = (600, 600) # Width, height
# Init pygame
pygame.init()
surface = pygame.display.set_mode(WINDOW_SIZE)
TEST_THEME = pygame_menu.themes.THEME_DEFAULT.copy()
TEST_THEME.title_fixed = False
TEST_THEME.widget_margin = (0, 10)
TEST_THEME.widget_padding = 0
TEST_THEME.widget_selection_effect = pygame_menu.widgets.HighlightSelection()
THEME_NON_FIXED_TITLE = pygame_menu.themes.THEME_DEFAULT.copy()
THEME_NON_FIXED_TITLE.title_fixed = False
WIDGET_MOUSEOVER = pygame_menu.widgets.core.widget.WIDGET_MOUSEOVER
WIDGET_TOP_CURSOR = pygame_menu.widgets.core.widget.WIDGET_TOP_CURSOR
SYS_PLATFORM_OSX = sys.platform == 'darwin'
def reset_widgets_over() -> None:
"""
Reset widget over.
"""
check_widget_mouseleave(force=True)
def test_reset_surface() -> None:
"""
Reset test surface.
"""
global surface
surface = pygame.display.set_mode(WINDOW_SIZE)
class BaseTest(unittest.TestCase):
"""
Base test class.
"""
def setUp(self) -> None:
"""
Reset the surface.
"""
test_reset_surface()
def tearDown(self) -> None:
"""
Reset the surface.
"""
test_reset_surface()
class PygameEventUtils(object):
"""
Event utils.
"""
@staticmethod
def joy_motion(
x: NumberType = 0,
y: NumberType = 0,
inlist: bool = True,
testmode: bool = True
) -> EventListType:
"""
Create a pygame joy controller motion event.
:param x: X-axis movement
:param y: Y-axis movement
:param inlist: Return event in a list
:param testmode: Event is in test mode
:return: Event
"""
if x != 0 and y != 0:
return [PygameEventUtils.joy_motion(x=x, inlist=False, testmode=testmode),
PygameEventUtils.joy_motion(y=y, inlist=False, testmode=testmode)]
event_obj = None
if x != 0:
event_obj = pygame.event.Event(pygame.JOYAXISMOTION,
{
'value': x,
'axis': pygame_menu.controls.JOY_AXIS_X,
'test': testmode
})
if y != 0:
event_obj = pygame.event.Event(pygame.JOYAXISMOTION,
{
'value': y,
'axis': pygame_menu.controls.JOY_AXIS_Y,
'test': testmode
})
if inlist:
event_obj = [event_obj]
return event_obj
@staticmethod
def joy_center(
testmode: bool = True,
inlist: bool = True
) -> EventListType:
"""
Centers the joy.
:param testmode: Event is in test mode
:param inlist: Event is within a list
:return: Center joy event
"""
event_obj = pygame.event.Event(pygame.JOYAXISMOTION,
{
'value': 0,
'axis': pygame_menu.controls.JOY_AXIS_Y,
'test': testmode
})
if inlist:
event_obj = [event_obj]
return event_obj
@staticmethod
def joy_hat_motion(
key: Tuple[int, int],
inlist: bool = True,
testmode: bool = True
) -> EventListType:
"""
Create a pygame joy controller key event.
:param key: Key to press
:param inlist: Return event in a list
:param testmode: Event is in test mode
:return: Event
"""
event_obj = pygame.event.Event(pygame.JOYHATMOTION,
{
'value': key,
'test': testmode
})
if inlist:
event_obj = [event_obj]
return event_obj
@staticmethod
def joy_button(
button: int,
evtype: int = pygame.JOYBUTTONDOWN,
inlist: bool = True,
testmode: bool = True
) -> EventListType:
"""
Create a pygame joy controller key event.
:param button: Button to press
:param evtype: Event type
:param inlist: Return event in a list
:param testmode: Event is in test mode
:return: Event
"""
event_obj = pygame.event.Event(evtype,
{
'button': button,
'test': testmode
})
if inlist:
event_obj = [event_obj]
return event_obj
@staticmethod
def test_widget_key_press(
widget: 'pygame_menu.widgets.Widget',
testmode: bool = True
) -> None:
"""
Test keypress widget.
:param widget: Widget object
:param testmode: Event is in test mode
"""
widget.update(PygameEventUtils.key(pygame.K_BACKSPACE, keydown=True, testmode=testmode))
widget.update(PygameEventUtils.key(pygame.K_DELETE, keydown=True, testmode=testmode))
widget.update(PygameEventUtils.key(pygame.K_LEFT, keydown=True, testmode=testmode))
widget.update(PygameEventUtils.key(pygame.K_RIGHT, keydown=True, testmode=testmode))
widget.update(PygameEventUtils.key(pygame.K_END, keydown=True, testmode=testmode))
widget.update(PygameEventUtils.key(pygame.K_HOME, keydown=True, testmode=testmode))
@staticmethod
def keydown_mod_ctrl(
key: int,
inlist: bool = True,
testmode: bool = True
) -> EventListType:
"""
Create a mod ctrl keydown event (Ctrl+Key).
:param key: Key to press
:param inlist: Return event in a list
:param testmode: Event is in test mode
:return: Event
"""
# noinspection PyArgumentList
pygame.key.set_mods(pygame.KMOD_CTRL)
event_obj = pygame.event.Event(pygame.KEYDOWN,
{
'key': key,
'test': testmode
})
if inlist:
event_obj = [event_obj]
return event_obj
@staticmethod
def release_key_mod() -> None:
"""
Release pygame key mods.
"""
# noinspection PyArgumentList
pygame.key.set_mods(pygame.KMOD_NONE)
@staticmethod
def keydown_mod_alt(
key: int,
inlist: bool = True,
testmode: bool = True
) -> EventListType:
"""
Create a mod alt keydown event (Alt+Key).
:param key: Key to press
:param inlist: Return event in a list
:param testmode: Event is in test mode
:return: Event
"""
# noinspection PyArgumentList
pygame.key.set_mods(pygame.KMOD_ALT)
event_obj = pygame.event.Event(pygame.KEYDOWN,
{
'key': key,
'test': testmode
})
if inlist:
event_obj = [event_obj]
return event_obj
@staticmethod
def keydown(
key: Union[int, VectorIntType],
testmode: bool = True,
inlist: bool = True
) -> EventListType:
"""
Keydown list.
:param key: Key to press
:param testmode: Event is in test mode
:param inlist: Return event in a list
:return: Event list
"""
if isinstance(key, int):
key = [key]
ev = []
for k in key:
assert isinstance(k, int)
ev.append(PygameEventUtils.key(k, keydown=True, inlist=False, testmode=testmode))
if not inlist:
assert len(ev) == 1
return ev[0]
return ev
@staticmethod
def key(
key: int,
char: str = ' ',
inlist: bool = True,
keydown: bool = False,
keyup: bool = False,
testmode: bool = True
) -> EventListType:
"""
Create a keyboard event.
:param key: Key to press
:param char: Char representing the key
:param inlist: Return event in a list
:param keydown: Event is keydown
:param keyup: Event is keyup
:param testmode: Event is in test mode
:return: Event
"""
if keyup and keydown:
raise ValueError('keyup and keydown cannot be active at the same time')
if keydown == keyup and not keydown:
raise ValueError('keyup and keydown cannot be false at the same time')
event = -1
if keydown:
event = pygame.KEYDOWN
if keyup:
event = pygame.KEYUP
event_obj = pygame.event.Event(event,
{
'key': key,
'test': testmode
})
if len(char) == 1:
event_obj.dict['unicode'] = char
if inlist:
event_obj = [event_obj]
return event_obj
@staticmethod
def enter_window(inlist: bool = True, testmode: bool = True) -> EventListType:
"""
Enter window event.
:param inlist: Return event in a list
:param testmode: Event is in test mode
:return: Event
"""
ev = pygame.event.Event(pygame.ACTIVEEVENT, {'gain': 1, 'test': testmode})
if inlist:
ev = [ev]
return ev
@staticmethod
def leave_window(inlist: bool = True, testmode: bool = True) -> EventListType:
"""
Leave window event.
:param inlist: Return event in a list
:param testmode: Event is in test mode
:return: Event
"""
ev = pygame.event.Event(pygame.ACTIVEEVENT, {'gain': 0, 'test': testmode})
if inlist:
ev = [ev]
return ev
@staticmethod
def mouse_click(
x: NumberType,
y: NumberType,
inlist: bool = True,
evtype: int = pygame.MOUSEBUTTONUP,
rel: Tuple2IntType = (0, 0),
button: int = 3,
testmode: bool = True,
update_mouse: bool = False
) -> EventListType:
"""
Generate a mouse click event.
:param x: X coordinate in px
:param y: Y coordinate in px
:param inlist: Return event in a list
:param evtype: event type, it can be MOUSEBUTTONUP or MOUSEBUTTONDOWN
:param rel: Rel position (relative movement)
:param button: Which button presses, ``1`` to ``3`` are the main buttons; ``4`` and ``5`` is the wheel
:param testmode: Event is in test mode
:param update_mouse: If ``True`` updates the mouse position
:return: Event
"""
assert isinstance(button, int) and button > 0
assert isinstance(x, NumberInstance)
assert isinstance(y, NumberInstance)
assert_vector(rel, 2, int)
x = int(x)
y = int(y)
event_obj = pygame.event.Event(evtype,
{
'button': button,
'pos': (x, y),
'rel': rel,
'test': testmode
})
if update_mouse:
# print('set mouse position', (x, y))
pygame.mouse.set_pos((x, y))
if inlist:
event_obj = [event_obj]
return event_obj
@staticmethod
def touch_click(
x: NumberType,
y: NumberType,
inlist: bool = True,
evtype: int = FINGERUP,
rel: Tuple2IntType = (0, 0),
normalize: bool = True,
menu: Union['pygame_menu.Menu', None] = None,
testmode: bool = True
) -> EventListType:
"""
Generate a mouse click event.
:param x: X coordinate
:param y: Y coordinate
:param inlist: Return event in a list
:param evtype: Event type, it can be FINGERUP, FINGERDOWN or FINGERMOTION
:param rel: Rel position (relative movement)
:param normalize: Normalize event position
:param menu: Menu reference
:param testmode: Event is in test mode
:return: Event
"""
assert isinstance(x, NumberInstance)
assert isinstance(y, NumberInstance)
assert_vector(rel, 2, int)
if normalize:
assert menu is not None, \
'menu reference must be provided if normalize is used (related to touch events)'
display_size = menu.get_window_size()
x /= display_size[0]
y /= display_size[1]
event_obj = pygame.event.Event(evtype,
{
'x': x,
'y': y,
'rel': rel,
'test': testmode
})
if inlist:
event_obj = [event_obj]
return event_obj
@staticmethod
def topleft_rect_mouse_motion(
rect: Union['pygame_menu.widgets.Widget', 'pygame.Rect', Tuple2NumberType],
inlist: bool = True,
delta: Tuple2IntType = (0, 0),
testmode: bool = True,
update_mouse: bool = False
) -> EventListType:
"""
Mouse motion event.
:param rect: Widget, Rect object, or Tuple
:param inlist: If ``True`` return the event within a list
:param delta: Add tuple to rect position
:param testmode: Event is in test mode
:param update_mouse: If ``True`` updates the mouse position
:return: Event
"""
if isinstance(rect, pygame_menu.widgets.Widget):
x, y = rect.get_rect(to_real_position=True, render=True).topleft
elif isinstance(rect, pygame.Rect):
x, y = rect.topleft
elif isinstance(rect, VectorInstance):
x, y = rect[0], rect[1]
else:
raise ValueError('unknown rect type')
return PygameEventUtils.middle_rect_click(
rect=(x, y),
evtype=pygame.MOUSEMOTION,
inlist=inlist,
delta=delta,
testmode=testmode,
update_mouse=update_mouse
)
@staticmethod
def mouse_motion(
rect: Union['pygame_menu.widgets.Widget', 'pygame.Rect', Tuple2NumberType],
inlist: bool = True,
rel: Tuple2IntType = (0, 0),
delta: Tuple2IntType = (0, 0),
testmode: bool = True,
update_mouse: bool = False
) -> EventListType:
"""
Mouse motion event.
:param rect: Widget, Rect object, or Tuple
:param inlist: If ``True`` return the event within a list
:param rel: Rel position (relative movement)
:param delta: Add tuple to rect position
:param testmode: Event is in test mode
:param update_mouse: If ``True`` updates the mouse position
:return: Event
"""
return PygameEventUtils.middle_rect_click(
rect=rect,
evtype=pygame.MOUSEMOTION,
rel=rel,
inlist=inlist,
delta=delta,
testmode=testmode,
update_mouse=update_mouse
)
@staticmethod
def middle_rect_click(
rect: Union['pygame_menu.widgets.Widget', 'pygame.Rect', Tuple2NumberType],
menu: Optional['pygame_menu.Menu'] = None,
evtype: int = pygame.MOUSEBUTTONUP,
inlist: bool = True,
rel: Tuple2IntType = (0, 0),
button: int = 3,
delta: Tuple2IntType = (0, 0),
testmode: bool = True,
update_mouse: bool = False
) -> EventListType:
"""
Return event clicking the middle of a given rect.
:param rect: Widget, Rect object, or Tuple
:param menu: Menu object
:param evtype: event type, it can be MOUSEBUTTONUP, MOUSEBUTTONDOWN, MOUSEMOTION, FINGERUP, FINGERDOWN, FINGERMOTION
:param inlist: If ``True`` return the event within a list
:param rel: Rel position (relative movement)
:param button: Which button presses, ``1`` to ``3`` are the main buttons; ``4`` and ``5`` is the wheel
:param delta: Add tuple to rect position
:param testmode: Event is in test mode
:param update_mouse: If ``True`` updates the mouse position
:return: Event
"""
assert isinstance(button, int) and button > 0
assert_vector(rel, 2, int)
assert_vector(delta, 2, int)
if isinstance(rect, pygame_menu.widgets.Widget):
x, y = rect.get_rect(to_real_position=True, render=True, apply_padding=False).center
menu = rect.get_menu()
elif isinstance(rect, pygame.Rect):
x, y = rect.center
elif isinstance(rect, VectorInstance):
x, y = rect[0], rect[1]
else:
raise ValueError('unknown rect type')
if evtype == FINGERDOWN or evtype == FINGERUP or evtype == FINGERMOTION:
assert menu is not None, \
'menu cannot be none if FINGERDOWN, FINGERUP, or FINGERMOTION'
display = menu.get_window_size()
evt = pygame.event.Event(evtype,
{
'button': button,
'rel': rel,
'test': testmode,
'x': (x + delta[0]) / display[0],
'y': (y + delta[1]) / display[1]
})
if inlist:
evt = [evt]
return evt
return PygameEventUtils.mouse_click(
x=x + delta[0],
y=y + delta[1],
inlist=inlist,
evtype=evtype,
rel=rel,
button=button,
testmode=testmode,
update_mouse=update_mouse
)
class MenuUtils(object):
"""
Static class for utility pygame-menu methods.
"""
@staticmethod
def get_font(name: str, size: int) -> 'pygame.font.Font':
"""
Returns a font.
:param name: Font name
:param size: Font size
:return: Font
"""
return pygame_menu.font.get_font(name, size)
@staticmethod
def random_font() -> str:
"""
Return a random font from the library.
:return: Font file
"""
opt = random.randrange(0, len(FONT_EXAMPLES))
return FONT_EXAMPLES[opt]
@staticmethod
def load_font(font: str, size: int) -> 'pygame.font.Font':
"""
Load font from file.
:param font: Font name
:param size: Font size
:return: Font object
"""
return pygame_menu.font.get_font(font, size)
@staticmethod
def random_system_font() -> str:
"""
Return random system font.
:return: System font name
"""
fonts = pygame.font.get_fonts()
fonts.sort()
fonts.pop(0)
return fonts[int(random.randrange(0, len(fonts)))]
@staticmethod
def generic_menu(
center_content: bool = True,
column_max_width: MenuColumnMaxWidthType = None,
column_min_width: MenuColumnMinWidthType = 0,
columns: int = 1,
enabled: bool = True,
height: NumberType = 400,
mouse_visible: bool = True,
mouse_motion_selection: bool = False,
onclose: Any = None,
onreset: Any = None,
position_x: NumberType = 50,
position_y: NumberType = 50,
rows: MenuRowsType = None,
theme: 'pygame_menu.themes.Theme' = pygame_menu.themes.THEME_DEFAULT,
title: str = '',
width: NumberType = 600,
*args,
**kwargs
) -> 'pygame_menu.Menu':
"""
Generate a generic test menu.
:param center_content: Center menu content
:param column_max_width: List/Tuple representing the maximum width of each column in px, ``None`` equals no limit. For example ``column_max_width=500`` (each column width can be 500px max), or ``column_max_width=(400, 500)`` (first column 400px, second 500). If ``0` is given uses the menu width. This method does not resize the widgets, only determines the dynamic width of the column layout
:param column_min_width: List/Tuple representing the minimum width of each column in px. For example ``column_min_width=500`` (each column width is 500px min), or ``column_max_width=(400, 500)`` (first column 400px, second 500). By default, it's ``0``. Negative values are not accepted
:param columns: Number of columns
:param enabled: Menu is enabled. If ``False`` Menu cannot be drawn
:param height: Menu height in px
:param mouse_visible: Set mouse visible on Menu
:param mouse_motion_selection: Select widgets using mouse motion. If ``True`` menu draws a ``focus`` on the selected widget
:param onclose: Event or function applied when closing the Menu
:param onreset: Function executed when resetting the Menu
:param position_x: X position of the menu
:param position_y: Y position of the menu
:param rows: Number of rows
:param theme: Menu theme
:param title: Menu title
:param width: Menu width in px
:param args: Additional args
:param kwargs: Optional keyword arguments
:return: Menu
"""
return pygame_menu.Menu(
center_content=center_content,
column_max_width=column_max_width,
column_min_width=column_min_width,
columns=columns,
enabled=enabled,
height=height,
mouse_visible=mouse_visible,
mouse_motion_selection=mouse_motion_selection,
onclose=onclose,
onreset=onreset,
position=(position_x, position_y),
rows=rows,
theme=theme,
title=title,
width=width,
*args,
**kwargs
)
|
847f7e1f88cc0cd658247b9a0b4c0996abc6dddb
|
d51b2e9657d0ea1d8bcb8ed8da11a3f9dac72192
|
/test/core/linter/linter_test.py
|
5603a3b4e75aae4fbec54d34b02661ac9d3e87e8
|
[
"MIT"
] |
permissive
|
sqlfluff/sqlfluff
|
dae8294814471165582e12ea75ab8142e75f8e62
|
a66da908907ee1eaf09d88a731025da29e7fca07
|
refs/heads/main
| 2023-08-28T20:07:59.624519
| 2023-08-27T22:17:24
| 2023-08-27T22:17:24
| 155,790,228
| 5,931
| 545
|
MIT
| 2023-09-14T18:05:19
| 2018-11-01T23:56:04
|
Python
|
UTF-8
|
Python
| false
| false
| 18,875
|
py
|
linter_test.py
|
"""Tests for the Linter class and LintingResult class."""
import os
import logging
from unittest.mock import patch
import pytest
from sqlfluff.core import Linter, FluffConfig
from sqlfluff.core.dialects import load_raw_dialect
from sqlfluff.core.linter import runner
from sqlfluff.core.errors import (
SQLFluffSkipFile,
SQLLexError,
SQLBaseError,
SQLLintError,
SQLFluffUserError,
)
from sqlfluff.cli.formatters import OutputStreamFormatter
from sqlfluff.cli.outputstream import make_output_stream
from sqlfluff.core.linter import LintingResult
from sqlfluff.core.linter.runner import get_runner
from sqlfluff.core.parser import GreedyUntil, Ref
from sqlfluff.utils.testing.logging import fluff_log_catcher
class DummyLintError(SQLBaseError):
"""Fake lint error used by tests, similar to SQLLintError."""
def __init__(self, line_no: int, code: str = "LT01"):
self._code = code
super().__init__(line_no=line_no)
def normalise_paths(paths):
"""Test normalising paths.
NB Paths on difference platforms might look different, so this
makes them comparable.
"""
return {pth.replace("/", ".").replace("\\", ".") for pth in paths}
def test__linter__path_from_paths__dir():
"""Test extracting paths from directories."""
lntr = Linter()
paths = lntr.paths_from_path("test/fixtures/lexer")
assert normalise_paths(paths) == {
"test.fixtures.lexer.block_comment.sql",
"test.fixtures.lexer.inline_comment.sql",
"test.fixtures.lexer.basic.sql",
}
def test__linter__path_from_paths__default():
"""Test .sql files are found by default."""
lntr = Linter()
paths = normalise_paths(lntr.paths_from_path("test/fixtures/linter"))
assert "test.fixtures.linter.passing.sql" in paths
assert "test.fixtures.linter.passing_cap_extension.SQL" in paths
assert "test.fixtures.linter.discovery_file.txt" not in paths
def test__linter__path_from_paths__exts():
"""Test configuration of file discovery."""
lntr = Linter(
config=FluffConfig(overrides={"sql_file_exts": ".txt", "dialect": "ansi"})
)
paths = normalise_paths(lntr.paths_from_path("test/fixtures/linter"))
assert "test.fixtures.linter.passing.sql" not in paths
assert "test.fixtures.linter.passing_cap_extension.SQL" not in paths
assert "test.fixtures.linter.discovery_file.txt" in paths
def test__linter__path_from_paths__file():
"""Test extracting paths from a file path."""
lntr = Linter()
paths = lntr.paths_from_path("test/fixtures/linter/indentation_errors.sql")
assert normalise_paths(paths) == {"test.fixtures.linter.indentation_errors.sql"}
@pytest.mark.parametrize("filesize,raises_skip", [(0, False), (5, True), (2000, False)])
def test__linter__skip_large_bytes(filesize, raises_skip):
"""Test extracting paths from a file path."""
config = FluffConfig(
overrides={"large_file_skip_byte_limit": filesize, "dialect": "ansi"}
)
# First check the function directly
if raises_skip:
with pytest.raises(SQLFluffSkipFile) as excinfo:
Linter.load_raw_file_and_config(
"test/fixtures/linter/indentation_errors.sql", config
)
assert "Skipping" in str(excinfo.value)
assert f"over the limit of {filesize}" in str(excinfo.value)
# If NOT raises, then we'll catch the raise an error and the test will fail.
# Then check that it either is or isn't linted appropriately via lint_paths.
lntr = Linter(config)
result = lntr.lint_paths(
("test/fixtures/linter/indentation_errors.sql",),
)
if raises_skip:
assert not result.get_violations()
else:
assert result.get_violations()
# Same again via parse_path, which is the other entry point.
result = list(
lntr.parse_path(
"test/fixtures/linter/indentation_errors.sql",
)
)
if raises_skip:
assert not result
else:
assert result
def test__linter__path_from_paths__not_exist():
"""Test that the right errors are raise when a file doesn't exist."""
lntr = Linter()
with pytest.raises(SQLFluffUserError):
lntr.paths_from_path("asflekjfhsakuefhse")
def test__linter__path_from_paths__not_exist_ignore():
"""Test extracting paths from a file path."""
lntr = Linter()
paths = lntr.paths_from_path("asflekjfhsakuefhse", ignore_non_existent_files=True)
assert len(paths) == 0
def test__linter__path_from_paths__explicit_ignore():
"""Test ignoring files that were passed explicitly."""
lntr = Linter()
paths = lntr.paths_from_path(
"test/fixtures/linter/sqlfluffignore/path_a/query_a.sql",
ignore_non_existent_files=True,
ignore_files=True,
working_path="test/fixtures/linter/sqlfluffignore/",
)
assert len(paths) == 0
def test__linter__path_from_paths__sqlfluffignore_current_directory():
"""Test that .sqlfluffignore in the current directory is read when dir given."""
oldcwd = os.getcwd()
try:
os.chdir("test/fixtures/linter/sqlfluffignore")
lntr = Linter()
paths = lntr.paths_from_path(
"path_a/",
ignore_non_existent_files=True,
ignore_files=True,
working_path="test/fixtures/linter/sqlfluffignore/",
)
assert len(paths) == 0
finally:
os.chdir(oldcwd)
def test__linter__path_from_paths__dot():
"""Test extracting paths from a dot."""
lntr = Linter()
paths = lntr.paths_from_path(".")
# Use set theory to check that we get AT LEAST these files
assert normalise_paths(paths) >= {
"test.fixtures.lexer.block_comment.sql",
"test.fixtures.lexer.inline_comment.sql",
"test.fixtures.lexer.basic.sql",
}
@pytest.mark.parametrize(
"path",
[
"test/fixtures/linter/sqlfluffignore",
"test/fixtures/linter/sqlfluffignore/",
"test/fixtures/linter/sqlfluffignore/.",
],
)
def test__linter__path_from_paths__ignore(path):
"""Test extracting paths from a dot."""
lntr = Linter()
paths = lntr.paths_from_path(path)
# We should only get query_b, because of the sqlfluffignore files.
assert normalise_paths(paths) == {
"test.fixtures.linter.sqlfluffignore.path_b.query_b.sql"
}
@pytest.mark.parametrize(
"path",
[
"test/fixtures/linter/indentation_errors.sql",
"test/fixtures/linter/whitespace_errors.sql",
],
)
def test__linter__lint_string_vs_file(path):
"""Test the linter finds the same things on strings and files."""
with open(path) as f:
sql_str = f.read()
lntr = Linter(dialect="ansi")
assert (
lntr.lint_string(sql_str).check_tuples() == lntr.lint_path(path).check_tuples()
)
@pytest.mark.parametrize(
"rules,num_violations", [(None, 6), ("CP01", 2), (("LT01", "LT12"), 1)]
)
def test__linter__get_violations_filter_rules(rules, num_violations):
"""Test filtering violations by which rules were violated."""
lntr = Linter(dialect="ansi")
lint_result = lntr.lint_string("select a, b FROM tbl c order BY d")
assert len(lint_result.get_violations(rules=rules)) == num_violations
def test__linter__linting_result__sum_dicts():
"""Test the summing of dictionaries in the linter."""
lr = LintingResult()
i = {}
a = dict(a=3, b=123, f=876.321)
b = dict(a=19, b=321.0, g=23478)
r = dict(a=22, b=444.0, f=876.321, g=23478)
assert lr.sum_dicts(a, b) == r
# Check the identity too
assert lr.sum_dicts(r, i) == r
def test__linter__linting_result__combine_dicts():
"""Test the combination of dictionaries in the linter."""
lr = LintingResult()
a = dict(a=3, b=123, f=876.321)
b = dict(h=19, i=321.0, j=23478)
r = dict(z=22)
assert lr.combine_dicts(a, b, r) == dict(
a=3, b=123, f=876.321, h=19, i=321.0, j=23478, z=22
)
@pytest.mark.parametrize("by_path,result_type", [(False, list), (True, dict)])
def test__linter__linting_result_check_tuples_by_path(by_path, result_type):
"""Test that a LintingResult can partition violations by the source files."""
lntr = Linter()
result = lntr.lint_paths(
[
"test/fixtures/linter/comma_errors.sql",
"test/fixtures/linter/whitespace_errors.sql",
]
)
check_tuples = result.check_tuples(by_path=by_path)
isinstance(check_tuples, result_type)
@pytest.mark.parametrize("processes", [1, 2])
def test__linter__linting_result_get_violations(processes):
"""Test that we can get violations from a LintingResult."""
lntr = Linter()
result = lntr.lint_paths(
(
"test/fixtures/linter/comma_errors.sql",
"test/fixtures/linter/whitespace_errors.sql",
),
processes=processes,
)
all([isinstance(v, SQLLintError) for v in result.get_violations()])
@pytest.mark.parametrize("force_error", [False, True])
def test__linter__linting_parallel_thread(force_error, monkeypatch):
"""Run linter in parallel mode using threads.
Similar to test__linter__linting_result_get_violations but uses a thread
pool of 1 worker to test parallel mode without subprocesses. This lets the
tests capture code coverage information for the backend parts of parallel
execution without having to jump through hoops.
"""
if not force_error:
monkeypatch.setattr(Linter, "allow_process_parallelism", False)
else:
def _create_pool(*args, **kwargs):
class ErrorPool:
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def imap_unordered(self, *args, **kwargs):
yield runner.DelayedException(ValueError())
return ErrorPool()
monkeypatch.setattr(runner.MultiProcessRunner, "_create_pool", _create_pool)
config = FluffConfig(overrides={"dialect": "ansi"})
output_stream = make_output_stream(config, None, os.devnull)
lntr = Linter(
formatter=OutputStreamFormatter(output_stream, False, verbosity=0),
dialect="ansi",
)
result = lntr.lint_paths(
# NOTE: Lint more than one file to make sure we enabled the multithreaded
# code path.
(
"test/fixtures/linter/comma_errors.sql",
"test/fixtures/linter/whitespace_errors.sql",
),
processes=2,
)
all([isinstance(v, SQLLintError) for v in result.get_violations()])
@patch("sqlfluff.core.linter.Linter.lint_rendered")
def test_lint_path_parallel_wrapper_exception(patched_lint):
"""Tests the error catching behavior of _lint_path_parallel_wrapper().
Test on MultiThread runner because otherwise we have pickling issues.
"""
patched_lint.side_effect = ValueError("Something unexpected happened")
for result in runner.MultiThreadRunner(
Linter(), FluffConfig(overrides={"dialect": "ansi"}), processes=1
).run(
["test/fixtures/linter/passing.sql"],
fix=False,
):
assert isinstance(result, runner.DelayedException)
with pytest.raises(ValueError):
result.reraise()
@pytest.mark.parametrize(
"mock_cpu,in_processes,exp_processes",
[
# Make the mocked cpu count a really high value which is
# unlikely to collide with the real value. We can then
# test all the different combos.
(512, 1, 1),
(512, 0, 512),
(512, -12, 500),
(512, 5, 5),
# Check that we can't go lower than 1 in a 1 cpu case
(1, -1, 1),
],
)
@patch("multiprocessing.cpu_count")
def test__linter__get_runner_processes(
patched_cpu_count, mock_cpu, in_processes, exp_processes
):
"""Test that get_runner handles processes correctly."""
# Make the mocked cpu count a really high value which is
# unlikely to collide with the real value.
patched_cpu_count.return_value = mock_cpu
_, return_processes = get_runner(
linter=Linter(),
config=FluffConfig(overrides={"dialect": "ansi"}),
processes=in_processes,
)
assert return_processes == exp_processes
@patch("sqlfluff.core.linter.runner.linter_logger")
@patch("sqlfluff.core.linter.Linter.lint_rendered")
def test__linter__linting_unexpected_error_handled_gracefully(
patched_lint, patched_logger
):
"""Test that an unexpected internal error returns the issue-surfacing file."""
patched_lint.side_effect = Exception("Something unexpected happened")
lntr = Linter()
lntr.lint_paths(("test/fixtures/linter/passing.sql",))
assert (
"Unable to lint test/fixtures/linter/passing.sql due to an internal error."
# NB: Replace is to handle windows-style paths.
in patched_logger.warning.call_args[0][0].replace("\\", "/")
and "Exception: Something unexpected happened"
in patched_logger.warning.call_args[0][0]
)
def test__linter__empty_file():
"""Test linter behaves nicely with an empty string."""
lntr = Linter(dialect="ansi")
# Make sure no exceptions raised and no violations found in empty file.
parsed = lntr.parse_string("")
assert not parsed.violations
@pytest.mark.parametrize(
"ignore_templated_areas,check_tuples",
[
(True, [("LT01", 3, 39), ("LT01", 3, 40)]),
(
False,
[
# there are still two of each because LT01 checks
# for both *before* and *after* the operator.
# The deduplication filter makes sure there aren't 4.
("LT01", 3, 16),
("LT01", 3, 16),
("LT01", 3, 39),
("LT01", 3, 40),
],
),
],
)
def test__linter__mask_templated_violations(ignore_templated_areas, check_tuples):
"""Test linter masks files properly around templated content.
NOTE: this also tests deduplication of fixes which have the same
source position. i.e. `LintedFile.deduplicate_in_source_space()`.
"""
lntr = Linter(
config=FluffConfig(
overrides={
"rules": "L006",
"ignore_templated_areas": ignore_templated_areas,
"dialect": "ansi",
}
)
)
linted = lntr.lint_path(path="test/fixtures/templater/jinja_h_macros/jinja.sql")
assert linted.check_tuples() == check_tuples
@pytest.mark.parametrize(
"fname,config_encoding,lexerror",
[
(
"test/fixtures/linter/encoding-utf-8.sql",
"autodetect",
False,
),
(
"test/fixtures/linter/encoding-utf-8-sig.sql",
"autodetect",
False,
),
(
"test/fixtures/linter/encoding-utf-8.sql",
"utf-8",
False,
),
(
"test/fixtures/linter/encoding-utf-8-sig.sql",
"utf-8",
True,
),
(
"test/fixtures/linter/encoding-utf-8.sql",
"utf-8-sig",
False,
),
(
"test/fixtures/linter/encoding-utf-8-sig.sql",
"utf-8-sig",
False,
),
],
)
def test__linter__encoding(fname, config_encoding, lexerror):
"""Test linter deals with files with different encoding."""
lntr = Linter(
config=FluffConfig(
overrides={
"rules": "LT01",
"encoding": config_encoding,
"dialect": "ansi",
}
)
)
result = lntr.lint_paths([fname])
assert lexerror == (SQLLexError in [type(v) for v in result.get_violations()])
def test_delayed_exception():
"""Test that DelayedException stores and reraises a stored exception."""
ve = ValueError()
de = runner.DelayedException(ve)
with pytest.raises(ValueError):
de.reraise()
def test__attempt_to_change_templater_warning():
"""Test warning when changing templater in .sqlfluff file in subdirectory."""
initial_config = FluffConfig(
configs={"core": {"templater": "jinja", "dialect": "ansi"}}
)
lntr = Linter(config=initial_config)
updated_config = FluffConfig(
configs={"core": {"templater": "python", "dialect": "ansi"}}
)
with fluff_log_catcher(logging.WARNING, "sqlfluff.linter") as caplog:
lntr.render_string(
in_str="select * from table",
fname="test.sql",
config=updated_config,
encoding="utf-8",
)
assert "Attempt to set templater to " in caplog.text
def test_advanced_api_methods():
"""Test advanced API methods on segments."""
# These aren't used by the simple API, which returns
# a simple JSON representation of the parse tree, but
# are available for advanced API usage and within rules.
sql = """
WITH cte AS (
SELECT * FROM tab_a
)
SELECT
cte.col_a,
tab_b.col_b
FROM cte
INNER JOIN tab_b;
"""
linter = Linter(dialect="ansi")
parsed = linter.parse_string(sql)
# CTEDefinitionSegment.get_identifier
cte_segment = next(parsed.tree.recursive_crawl("common_table_expression"))
assert cte_segment.get_identifier().raw == "cte"
# BaseFileSegment.get_table_references & StatementSegment.get_table_references
assert parsed.tree.get_table_references() == {"tab_a", "tab_b"}
def test_normalise_newlines():
"""Test normalising newlines to unix-style line endings."""
in_str = "SELECT\r\n foo\n FROM \r \n\r bar;"
out_str = "SELECT\n foo\n FROM \n \n\n bar;"
assert out_str == Linter._normalise_newlines(in_str)
def test_require_match_parse_grammar():
"""Tests a segment validation check in Dialect.replace().
If a segment class defines both match_grammar and parse_grammar, replacing
it requires a segment that defines BOTH or NEITHER of them.
"""
ansi_dialect = load_raw_dialect("ansi")
# Try to register a segment that defines match_grammar but not
# parse_grammar.
class StatementSegment(ansi_dialect.get_segment("StatementSegment")):
match_grammar = GreedyUntil(Ref("DelimiterSegment"))
with pytest.raises(ValueError) as e:
ansi_dialect.replace(StatementSegment=StatementSegment)
assert "needs to define 'parse_grammar'" in str(e.value)
# Now try to register a segment that defines parse_grammar but not
# match_grammar.
class StatementSegment(ansi_dialect.get_segment("StatementSegment")):
parse_grammar = GreedyUntil(Ref("DelimiterSegment"))
with pytest.raises(ValueError) as e:
ansi_dialect.replace(StatementSegment=StatementSegment)
assert "needs to define 'match_grammar'" in str(e.value)
|
324da8ad555eac971ec57ed126bd8613e31899cc
|
e7b4786bd94ccb718c3cdee805a55737b1c64c14
|
/aizynthfinder/aizynthfinder.py
|
09e2c2a22969c08f284b41ef6fdcdb9f52afc92a
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
MolecularAI/aizynthfinder
|
8a68e648f84bb75485d502d119ef834f0368c968
|
82f19c4c7d7bb3410cad1859b0324331f0c1ab62
|
refs/heads/master
| 2023-08-29T14:18:42.650117
| 2023-06-01T14:14:18
| 2023-06-01T14:14:18
| 271,546,468
| 428
| 125
|
MIT
| 2023-08-14T23:18:14
| 2020-06-11T12:57:07
|
Python
|
UTF-8
|
Python
| false
| false
| 12,121
|
py
|
aizynthfinder.py
|
""" Module containing a class that is the main interface the retrosynthesis tool.
"""
from __future__ import annotations
import time
from collections import defaultdict
from typing import TYPE_CHECKING
from tqdm import tqdm
from aizynthfinder.analysis import (
RouteCollection,
RouteSelectionArguments,
TreeAnalysis,
)
from aizynthfinder.chem import FixedRetroReaction, Molecule, TreeMolecule
from aizynthfinder.context.config import Configuration
from aizynthfinder.reactiontree import ReactionTreeFromExpansion
from aizynthfinder.search.andor_trees import AndOrSearchTreeBase
from aizynthfinder.search.mcts import MctsSearchTree
from aizynthfinder.utils.exceptions import MoleculeException
from aizynthfinder.utils.loading import load_dynamic_class
# This must be imported first to setup logging for rdkit, tensorflow etc
from aizynthfinder.utils.logging import logger
if TYPE_CHECKING:
from aizynthfinder.chem import RetroReaction
from aizynthfinder.utils.type_utils import (
Callable,
Dict,
List,
Optional,
StrDict,
Tuple,
Union,
)
class AiZynthFinder:
"""
Public API to the aizynthfinder tool
If instantiated with the path to a yaml file or dictionary of settings
the stocks and policy networks are loaded directly.
Otherwise, the user is responsible for loading them prior to
executing the tree search.
:ivar config: the configuration of the search
:ivar expansion_policy: the expansion policy model
:ivar filter_policy: the filter policy model
:ivar stock: the stock
:ivar scorers: the loaded scores
:ivar tree: the search tree
:ivar analysis: the tree analysis
:ivar routes: the top-ranked routes
:ivar search_stats: statistics of the latest search
:param configfile: the path to yaml file with configuration (has priority over configdict), defaults to None
:param configdict: the config as a dictionary source, defaults to None
"""
def __init__(self, configfile: str = None, configdict: StrDict = None) -> None:
self._logger = logger()
if configfile:
self.config = Configuration.from_file(configfile)
elif configdict:
self.config = Configuration.from_dict(configdict)
else:
self.config = Configuration()
self.expansion_policy = self.config.expansion_policy
self.filter_policy = self.config.filter_policy
self.stock = self.config.stock
self.scorers = self.config.scorers
self.tree: Optional[Union[MctsSearchTree, AndOrSearchTreeBase]] = None
self._target_mol: Optional[Molecule] = None
self.search_stats: StrDict = dict()
self.routes = RouteCollection([])
self.analysis: Optional[TreeAnalysis] = None
@property
def target_smiles(self) -> str:
"""The SMILES representation of the molecule to predict routes on."""
if not self._target_mol:
return ""
return self._target_mol.smiles
@target_smiles.setter
def target_smiles(self, smiles: str) -> None:
self.target_mol = Molecule(smiles=smiles)
@property
def target_mol(self) -> Optional[Molecule]:
"""The molecule to predict routes on"""
return self._target_mol
@target_mol.setter
def target_mol(self, mol: Molecule) -> None:
self.tree = None
self._target_mol = mol
def build_routes(
self, selection: RouteSelectionArguments = None, scorer: str = "state score"
) -> None:
"""
Build reaction routes
This is necessary to call after the tree search has completed in order
to extract results from the tree search.
:param selection: the selection criteria for the routes
:param scorer: a reference to the object used to score the nodes
:raises ValueError: if the search tree not initialized
"""
if not self.tree:
raise ValueError("Search tree not initialized")
self.analysis = TreeAnalysis(self.tree, scorer=self.scorers[scorer])
config_selection = RouteSelectionArguments(
nmin=self.config.post_processing.min_routes,
nmax=self.config.post_processing.max_routes,
return_all=self.config.post_processing.all_routes,
)
self.routes = RouteCollection.from_analysis(
self.analysis, selection or config_selection
)
def extract_statistics(self) -> StrDict:
"""Extracts tree statistics as a dictionary"""
if not self.analysis:
return {}
stats = {
"target": self.target_smiles,
"search_time": self.search_stats["time"],
"first_solution_time": self.search_stats.get("first_solution_time", 0),
"first_solution_iteration": self.search_stats.get(
"first_solution_iteration", 0
),
}
stats.update(self.analysis.tree_statistics())
return stats
def prepare_tree(self) -> None:
"""
Setup the tree for searching
:raises ValueError: if the target molecule was not set
"""
if not self.target_mol:
raise ValueError("No target molecule set")
try:
self.target_mol.sanitize()
except MoleculeException:
raise ValueError("Target molecule unsanitizable")
self.stock.reset_exclusion_list()
if self.config.exclude_target_from_stock and self.target_mol in self.stock:
self.stock.exclude(self.target_mol)
self._logger.debug("Excluding the target compound from the stock")
self._setup_search_tree()
self.analysis = None
self.routes = RouteCollection([])
def stock_info(self) -> StrDict:
"""
Return the stock availability for all leaf nodes in all collected reaction trees
The key of the return dictionary will be the SMILES string of the leaves,
and the value will be the stock availability
:return: the collected stock information.
"""
if not self.analysis:
return {}
_stock_info = {}
for tree in self.routes.reaction_trees:
for leaf in tree.leafs():
if leaf.smiles not in _stock_info:
_stock_info[leaf.smiles] = self.stock.availability_list(leaf)
return _stock_info
def tree_search(self, show_progress: bool = False) -> float:
"""
Perform the actual tree search
:param show_progress: if True, shows a progress bar
:return: the time past in seconds
"""
if not self.tree:
self.prepare_tree()
# This is for type checking, prepare_tree is creating it.
assert self.tree is not None
self.search_stats = {"returned_first": False, "iterations": 0}
time0 = time.time()
i = 1
self._logger.debug("Starting search")
time_past = time.time() - time0
if show_progress:
pbar = tqdm(total=self.config.iteration_limit, leave=False)
while time_past < self.config.time_limit and i <= self.config.iteration_limit:
if show_progress:
pbar.update(1)
self.search_stats["iterations"] += 1
try:
is_solved = self.tree.one_iteration()
except StopIteration:
break
if is_solved and "first_solution_time" not in self.search_stats:
self.search_stats["first_solution_time"] = time.time() - time0
self.search_stats["first_solution_iteration"] = i
if self.config.return_first and is_solved:
self._logger.debug("Found first solved route")
self.search_stats["returned_first"] = True
break
i = i + 1
time_past = time.time() - time0
if show_progress:
pbar.close()
time_past = time.time() - time0
self._logger.debug("Search completed")
self.search_stats["time"] = time_past
return time_past
def _setup_search_tree(self) -> None:
self._logger.debug("Defining tree root: %s" % self.target_smiles)
if self.config.search_algorithm.lower() == "mcts":
self.tree = MctsSearchTree(
root_smiles=self.target_smiles, config=self.config
)
else:
cls = load_dynamic_class(self.config.search_algorithm)
self.tree = cls(root_smiles=self.target_smiles, config=self.config)
class AiZynthExpander:
"""
Public API to the AiZynthFinder expansion and filter policies
If instantiated with the path to a yaml file or dictionary of settings
the stocks and policy networks are loaded directly.
Otherwise, the user is responsible for loading them prior to
executing the tree search.
:ivar config: the configuration of the search
:ivar expansion_policy: the expansion policy model
:ivar filter_policy: the filter policy model
:param configfile: the path to yaml file with configuration (has priority over configdict), defaults to None
:param configdict: the config as a dictionary source, defaults to None
"""
def __init__(self, configfile: str = None, configdict: StrDict = None) -> None:
self._logger = logger()
if configfile:
self.config = Configuration.from_file(configfile)
elif configdict:
self.config = Configuration.from_dict(configdict)
else:
self.config = Configuration()
self.expansion_policy = self.config.expansion_policy
self.filter_policy = self.config.filter_policy
self.stats: StrDict = {}
def do_expansion(
self,
smiles: str,
return_n: int = 5,
filter_func: Callable[[RetroReaction], bool] = None,
) -> List[Tuple[FixedRetroReaction, ...]]:
"""
Do the expansion of the given molecule returning a list of
reaction tuples. Each tuple in the list contains reactions
producing the same reactants. Hence, nested structure of the
return value is way to group reactions.
If filter policy is setup, the probability of the reactions are
added as metadata to the reaction.
The additional filter functions makes it possible to do customized
filtering. The callable should take as only argument a `RetroReaction`
object and return True if the reaction can be kept or False if it should
be removed.
:param smiles: the SMILES string of the target molecule
:param return_n: the length of the return list
:param filter_func: an additional filter function
:return: the grouped reactions
"""
self.stats = {"non-applicable": 0}
mol = TreeMolecule(parent=None, smiles=smiles)
actions, _ = self.expansion_policy.get_actions([mol])
results: Dict[Tuple[str, ...], List[FixedRetroReaction]] = defaultdict(list)
for action in actions:
reactants = action.reactants
if not reactants:
self.stats["non-applicable"] += 1
continue
if filter_func and not filter_func(action):
continue
for name in self.filter_policy.selection or []:
if hasattr(self.filter_policy[name], "feasibility"):
_, feasibility_prob = self.filter_policy[name].feasibility(action)
action.metadata["feasibility"] = float(feasibility_prob)
break
action.metadata["expansion_rank"] = len(results) + 1
unique_key = tuple(sorted(mol.inchi_key for mol in reactants[0]))
if unique_key not in results and len(results) >= return_n:
continue
rxn = next(ReactionTreeFromExpansion(action).tree.reactions()) # type: ignore
results[unique_key].append(rxn)
return [tuple(reactions) for reactions in results.values()]
|
e4aa73f5a2e627ead5f02ccfc5840b75be755ffb
|
6a67a0e47046a35ff5aa18ed1519517479857fe9
|
/g-wizzy/keys.py
|
ac277b7ab94237179cdefd55f75ad8fc9ca6c14b
|
[] |
no_license
|
qtile/qtile-examples
|
9ddc178b2833e71bde44e8751af9632c82b77123
|
4a7804e91df8d117d37da34ae95218882030558c
|
refs/heads/master
| 2023-08-15T00:45:30.598349
| 2023-02-16T19:13:56
| 2023-02-16T19:13:56
| 3,598,839
| 556
| 180
| null | 2023-07-14T10:44:17
| 2012-03-02T03:25:01
|
Python
|
UTF-8
|
Python
| false
| false
| 5,095
|
py
|
keys.py
|
from libqtile.config import EzKey as Key, EzDrag as Drag, EzClick as Click
from libqtile.lazy import lazy
from datetime import datetime as time
import subprocess
# BSP resizing taken from https://github.com/qtile/qtile/issues/1402
def resize(qtile, direction):
layout = qtile.current_layout
child = layout.current
parent = child.parent
while parent:
if child in parent.children:
layout_all = False
if (direction == "left" and parent.split_horizontal) or (direction == "up" and not parent.split_horizontal):
parent.split_ratio = max(5, parent.split_ratio - layout.grow_amount)
layout_all = True
elif (direction == "right" and parent.split_horizontal) or (direction == "down" and not parent.split_horizontal):
parent.split_ratio = min(95, parent.split_ratio + layout.grow_amount)
layout_all = True
if layout_all:
layout.group.layout_all()
break
child = parent
parent = child.parent
@lazy.function
def resize_left(qtile):
resize(qtile, "left")
@lazy.function
def resize_right(qtile):
resize(qtile, "right")
@lazy.function
def resize_up(qtile):
resize(qtile, "up")
@lazy.function
def resize_down(qtile):
resize(qtile, "down")
@lazy.function
def float_to_front(qtile):
for group in qtile.groups:
for window in group.windows:
if window.floating:
window.cmd_bring_to_front()
def screenshot(to_clip = False, rect_select = False):
def f(qtile):
command = []
if to_clip:
# Requires to write one-line script `maim_to_clip` and have it in $PATH
command += ["maim_to_clip"]
else:
command += ["maim", f"/home/pierre/Pictures/{time.now().isoformat()}.png"]
if rect_select:
command += ["-s"]
subprocess.run(command)
return f
keys = [
# Layout change
Key("M-<Tab>", lazy.next_layout()),
## BSP Layout
# Change focus
Key("M-j", lazy.layout.down()),
Key("M-k", lazy.layout.up()),
Key("M-h", lazy.layout.left()),
Key("M-l", lazy.layout.right()),
# Move window
Key("M-S-j", lazy.layout.shuffle_down()),
Key("M-S-k", lazy.layout.shuffle_up()),
Key("M-S-h", lazy.layout.shuffle_left()),
Key("M-S-l", lazy.layout.shuffle_right()),
# Move window
Key("M-A-j", lazy.layout.flip_down()),
Key("M-A-k", lazy.layout.flip_up()),
Key("M-A-h", lazy.layout.flip_left()),
Key("M-A-l", lazy.layout.flip_right()),
# Resize window
Key("M-C-j", resize_down),
Key("M-C-k", resize_up),
Key("M-C-h", resize_left),
Key("M-C-l", resize_right),
# Reset
Key("M-S-n", lazy.layout.normalize()),
# Toggle split
Key("M-<space>", lazy.layout.toggle_split()),
# Programs shortcuts
Key("M-<Return>", lazy.spawn("kitty")),
Key("M-e", lazy.spawn("nautilus")),
Key("M-r", lazy.spawn("albert show")),
Key("A-<Tab>", lazy.spawn("rofi -show window")),
Key("M-f", lazy.spawn("firefox")),
Key("M-S-f", lazy.spawn("firefox --private-window")),
Key("<XF86Calculator>", lazy.spawn("gnome-calculator")),
# Screen capture (Shift => selection, Ctrl => to clipboard)
Key("<Print>", lazy.function(screenshot())),
Key("C-<Print>", lazy.function(screenshot(to_clip = True))),
Key("S-<Print>", lazy.function(screenshot(rect_select = True))),
Key("C-S-<Print>", lazy.function(screenshot(to_clip = True, rect_select = True))),
Key("M-w", lazy.window.kill()),
Key("M-C-r", lazy.restart()),
Key("M-C-q", lazy.shutdown()),
Key("M-S-C-q", lazy.spawn("shutdown 0")),
Key("M-S-C-l", lazy.spawn("gnome-screensaver-command -l")),
# Volume (hold shift for lighter adjustments)
Key("<XF86AudioLowerVolume>", lazy.spawn("amixer -c 0 -q set Master 5%-")),
Key("S-<XF86AudioLowerVolume>", lazy.spawn("amixer -c 0 -q set Master 1%-")),
Key("<XF86AudioRaiseVolume>", lazy.spawn("amixer -c 0 -q set Master 5%+")),
Key("S-<XF86AudioRaiseVolume>", lazy.spawn("amixer -c 0 -q set Master 1%+")),
Key("<XF86AudioMute>", lazy.spawn("amixer -D pulse set Master 1+ toggle")),
# Brightness (hold shift for lighter adjustments)
Key("<XF86MonBrightnessUp>", lazy.spawn("light -A 5")),
Key("S-<XF86MonBrightnessUp>", lazy.spawn("light -A 1")),
Key("<XF86MonBrightnessDown>", lazy.spawn("light -U 5")),
Key("S-<XF86MonBrightnessDown>", lazy.spawn("light -U 1")),
# Multi-screen test (not very convincing)
Key("M-<Escape>", lazy.next_screen()),
Key("M-p", lazy.spawn("sh -c ~/scripts/monitor_layout.sh")),
Key("M-S-p", lazy.spawn("sh -c ~/scripts/rotate_secondary_display.sh")),
]
mouse = [
Drag("M-1", lazy.window.set_position_floating(),
start=lazy.window.get_position()),
Drag("M-3", lazy.window.set_size_floating(),
start=lazy.window.get_size()),
Click("M-2", lazy.window.bring_to_front()),
Click("M-S-1", lazy.window.toggle_floating()),
]
|
8ec6cca713d8b2e1b98d52e4760ffc9859aaf07a
|
66b860c732de05ee8f01c16c6bd59f9c60c39e38
|
/src_backend_python/tabloo/backend.py
|
ffcbe60b5a593b016b8edcde3a9832e1057ad5a8
|
[
"MIT"
] |
permissive
|
bluenote10/tabloo
|
9262d79903cdd19bc195b6f21fb85a944c1c42aa
|
14923dde60d2bddfb87ac666b58299b990d6c85d
|
refs/heads/master
| 2023-03-16T21:54:39.352100
| 2022-12-09T10:56:38
| 2022-12-09T11:30:45
| 183,777,503
| 158
| 16
|
MIT
| 2023-03-14T18:09:47
| 2019-04-27T13:38:57
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 3,979
|
py
|
backend.py
|
import json
import math
import traceback
import numpy as np
import pandas as pd
def to_json(data):
"""
Accompanying function to get_data for generic json encoding.
"""
def converter(x):
if isinstance(x, np.ndarray):
return list(x)
elif isinstance(x, pd.Series):
return x.to_dict()
elif isinstance(x, pd.DataFrame):
return x.to_dict(orient="list")
else:
try:
return list(x)
except Exception:
pass
try:
return dict(x)
except Exception:
pass
return str(x)
# TODO: we should use recursion here so that conversion errors only affect
# inner data elements, so that the "invalid" data_string isn't applied on
# the global conversion but ideally on values, or at least on entire columns.
try:
data_string = json.dumps(data, default=converter, allow_nan=False, ensure_ascii=False)
except Exception:
traceback.print_exc()
data_string = ""
return data_string
def apply_filter(df, filter):
if filter is not None and len(filter.strip()) > 0:
try:
df = df.query(filter)
return df
except pd.core.computation.ops.UndefinedVariableError as e:
# TODO: We should be able to pass errors/messages from the backend to the frontend
print("UndefinedVariableError: {}".format(e.message))
return df
except Exception as e:
# TODO: We should be able to pass errors/messages from the backend to the frontend
print("Illegal query:")
print(traceback.format_exc())
return df
else:
return df
def convert_column(col):
# TODO: Add more tests...
# TODO: How to deeply convert nested nans/infs for json conversion?
# We cannot use col.replace({np.nan: None}) because of
# https://github.com/pandas-dev/pandas/issues/29813
# And we have to convert to object columns early because float columns
# have special treatments for None. In particular setting c[is_null] = None
# has no effect on a float column, because they get immediately converted
# back to np.nan, which is what we want to replace...
c = col.copy().astype(object)
is_null = c.isnull()
c[is_null] = None
try:
# Note that the c == +np.inf checks can fail with
# 'ValueError: The truth value of an array with more than one element is ambiguous.'
# in case the elements themselves are vectors/matrices.
is_pos_inf = c == +np.inf
is_neg_inf = c == -np.inf
c[is_pos_inf] = "inf"
c[is_neg_inf] = "-inf"
except:
pass
return list(c)
class Backend(object):
def __init__(self, df):
self.df = df
def get_columns(self):
return list(self.df.columns)
def get_num_pages(self, pagination_size, filter):
if pagination_size < 1:
pagination_size = 1
df = self.df
df = apply_filter(df, filter)
return int(math.ceil(len(df) / pagination_size))
def get_data(self, filter, sort_column, sort_kind, page, pagination_size):
df = self.df
df = apply_filter(df, filter)
if sort_column is not None:
asc = sort_kind > 0
df.sort_values(sort_column, inplace=True, ascending=asc)
if sort_kind == 0:
df.sort_index(inplace=True)
if page is not None and pagination_size is not None:
i = pagination_size * page
j = pagination_size * (page + 1)
df = df.iloc[i:j, :]
data = [
{
"columnName": columnName,
"values": convert_column(df[columnName]),
"sortKind": 0 if columnName != sort_column else sort_kind,
}
for columnName in df.columns
]
return data
|
99252ab15397c46162a38153805944dbc37b04e9
|
db12b990924703cd74748d8585cd9c11fafa6746
|
/h2o-docs/src/booklets/v2_2015/source/DeepLearning_Vignette_code_examples/deeplearning_gridsearch_random.py
|
2a87adf72939ee4c83bf24bbacd6b252b40e04ab
|
[
"Apache-2.0"
] |
permissive
|
h2oai/h2o-3
|
919019a8f297eec676011a9cfd2cc2d97891ce14
|
d817ab90c8c47f6787604a0b9639b66234158228
|
refs/heads/master
| 2023-08-17T18:50:17.732191
| 2023-08-17T16:44:42
| 2023-08-17T16:44:42
| 17,371,412
| 6,872
| 2,345
|
Apache-2.0
| 2023-09-14T18:05:40
| 2014-03-03T16:08:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 679
|
py
|
deeplearning_gridsearch_random.py
|
hidden_opt = [[17,32],[8,19],[32,16,8],[100],[10,10,10,10]]
l1_opt = [s/1e6 for s in range(1,1001)]
hyper_parameters = {"hidden":hidden_opt, "l1":l1_opt}
search_criteria = {"strategy":"RandomDiscrete",
"max_models":10, "max_runtime_secs":100,
"seed":123456}
from h2o.grid.grid_search import H2OGridSearch
model_grid = H2OGridSearch(H2ODeepLearningEstimator,
hyper_params=hyper_parameters,
search_criteria=search_criteria)
model_grid.train(x=x, y=y,
distribution="multinomial", epochs=1000,
training_frame=train, validation_frame=test,
score_interval=2, stopping_rounds=3,
stopping_tolerance=0.05,
stopping_metric="misclassification")
|
ffc496d11b2c1af2fba5e1063e086ba02c240f62
|
df1254b56f35b24644e00493c50d4b6eb3c15b7b
|
/colour/colorimetry/tests/test_generation.py
|
ae78c0fd4cff35d1410a384811b9595890492a25
|
[
"BSD-3-Clause"
] |
permissive
|
colour-science/colour
|
908400b227cf81668675e41099256ce50b23ae4b
|
1fdf3b3042922e8d4f86b989b00a06e7e5d81102
|
refs/heads/develop
| 2023-09-01T23:17:07.186869
| 2023-08-26T09:40:45
| 2023-08-26T09:40:45
| 17,114,363
| 1,756
| 301
|
BSD-3-Clause
| 2023-09-14T10:24:37
| 2014-02-23T18:55:40
|
Python
|
UTF-8
|
Python
| false
| false
| 6,778
|
py
|
test_generation.py
|
# !/usr/bin/env python
"""Define the unit tests for the :mod:`colour.colorimetry.generation` module."""
import numpy as np
import unittest
from colour.colorimetry.generation import (
sd_constant,
sd_zeros,
sd_ones,
msds_constant,
msds_zeros,
msds_ones,
sd_gaussian_normal,
sd_gaussian_fwhm,
sd_single_led_Ohno2005,
sd_multi_leds_Ohno2005,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"TestSdConstant",
"TestSdZeros",
"TestSdOnes",
"TestMsdsConstant",
"TestMsdsZeros",
"TestMsdsOnes",
"TestSdGaussianNormal",
"TestSdGaussianFwhm",
"TestSdSingleLedOhno2005",
"TestSdMultiLedsOhno2005",
]
class TestSdConstant(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.sd_constant` definition unit
tests methods.
"""
def test_sd_constant(self):
"""Test :func:`colour.colorimetry.generation.sd_constant` definition."""
sd = sd_constant(np.pi)
self.assertAlmostEqual(sd[360], np.pi, places=7)
self.assertAlmostEqual(sd[555], np.pi, places=7)
self.assertAlmostEqual(sd[780], np.pi, places=7)
class TestSdZeros(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.sd_zeros` definition unit
tests methods.
"""
def test_sd_zeros(self):
"""
Test :func:`colour.colorimetry.generation.sd_zeros`
definition.
"""
sd = sd_zeros()
self.assertEqual(sd[360], 0)
self.assertEqual(sd[555], 0)
self.assertEqual(sd[780], 0)
class TestSdOnes(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.sd_ones` definition unit
tests methods.
"""
def test_sd_ones(self):
"""Test :func:`colour.colorimetry.generation.sd_ones` definition."""
sd = sd_ones()
self.assertEqual(sd[360], 1)
self.assertEqual(sd[555], 1)
self.assertEqual(sd[780], 1)
class TestMsdsConstant(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.msds_constant` definition unit
tests methods.
"""
def test_msds_constant(self):
"""Test :func:`colour.colorimetry.generation.msds_constant` definition."""
msds = msds_constant(np.pi, labels=["a", "b", "c"])
np.testing.assert_array_almost_equal(
msds[360], np.array([np.pi, np.pi, np.pi]), decimal=7
)
np.testing.assert_array_almost_equal(
msds[555], np.array([np.pi, np.pi, np.pi]), decimal=7
)
np.testing.assert_array_almost_equal(
msds[780], np.array([np.pi, np.pi, np.pi]), decimal=7
)
class TestMsdsZeros(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.msds_zeros` definition unit
tests methods.
"""
def test_msds_zeros(self):
"""
Test :func:`colour.colorimetry.generation.msds_zeros`
definition.
"""
msds = msds_zeros(labels=["a", "b", "c"])
np.testing.assert_equal(msds[360], np.array([0, 0, 0]))
np.testing.assert_equal(msds[555], np.array([0, 0, 0]))
np.testing.assert_equal(msds[780], np.array([0, 0, 0]))
class TestMsdsOnes(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.msds_ones` definition unit
tests methods.
"""
def test_msds_ones(self):
"""Test :func:`colour.colorimetry.generation.msds_ones` definition."""
msds = msds_ones(labels=["a", "b", "c"])
np.testing.assert_equal(msds[360], np.array([1, 1, 1]))
np.testing.assert_equal(msds[555], np.array([1, 1, 1]))
np.testing.assert_equal(msds[780], np.array([1, 1, 1]))
class TestSdGaussianNormal(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.sd_gaussian_normal`
definition unit tests methods.
"""
def test_sd_gaussian_normal(self):
"""
Test :func:`colour.colorimetry.generation.sd_gaussian_normal`
definition.
"""
sd = sd_gaussian_normal(555, 25)
self.assertAlmostEqual(sd[530], 0.606530659712633, places=7)
self.assertAlmostEqual(sd[555], 1, places=7)
self.assertAlmostEqual(sd[580], 0.606530659712633, places=7)
class TestSdGaussianFwhm(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.sd_gaussian_fwhm` definition
unit tests methods.
"""
def test_sd_gaussian_fwhm(self):
"""
Test :func:`colour.colorimetry.generation.sd_gaussian_fwhm` definition.
"""
sd = sd_gaussian_fwhm(555, 25)
self.assertAlmostEqual(sd[530], 0.0625, places=7)
self.assertAlmostEqual(sd[555], 1, places=7)
self.assertAlmostEqual(sd[580], 0.062499999999999, places=7)
self.assertAlmostEqual(sd[555 - 25 / 2], 0.5, places=7)
class TestSdSingleLedOhno2005(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.sd_single_led_Ohno2005`
definition unit tests methods.
"""
def test_sd_single_led_Ohno2005(self):
"""
Test :func:`colour.colorimetry.generation.sd_single_led_Ohno2005`
definition.
"""
sd = sd_single_led_Ohno2005(555, 25)
self.assertAlmostEqual(sd[530], 0.127118445056538, places=7)
self.assertAlmostEqual(sd[555], 1, places=7)
self.assertAlmostEqual(sd[580], 0.127118445056538, places=7)
class TestSdMultiLedsOhno2005(unittest.TestCase):
"""
Define :func:`colour.colorimetry.generation.sd_multi_leds_Ohno2005`
definition unit tests methods.
"""
def test_sd_multi_leds_Ohno2005(self):
"""
Test :func:`colour.colorimetry.generation.sd_multi_leds_Ohno2005`
definition.
"""
sd = sd_multi_leds_Ohno2005(
np.array([457, 530, 615]),
np.array([20, 30, 20]),
np.array([0.731, 1.000, 1.660]),
)
self.assertAlmostEqual(sd[500], 0.129513248576116, places=7)
self.assertAlmostEqual(sd[570], 0.059932156222703, places=7)
self.assertAlmostEqual(sd[640], 0.116433257970624, places=7)
sd = sd_multi_leds_Ohno2005(
np.array([457, 530, 615]),
np.array([20, 30, 20]),
)
self.assertAlmostEqual(sd[500], 0.130394510062799, places=7)
self.assertAlmostEqual(sd[570], 0.058539618824187, places=7)
self.assertAlmostEqual(sd[640], 0.070140708922879, places=7)
if __name__ == "__main__":
unittest.main()
|
696c90bd75ae841ba5238d1e86e3195149e6ebc6
|
529e713a78e82de2ae5d44cfb8ef209e0894d72a
|
/python-microservices-with-grpc/marketplace/marketplace.py
|
d6522e26785ca8013502417eb95d308325a229fb
|
[
"MIT"
] |
permissive
|
realpython/materials
|
cd2f548276be2c82f134ca03eadb1cd279e0f26e
|
d2d62756d3854f54a12a767f2bf9470486c0ceef
|
refs/heads/master
| 2023-09-05T22:12:29.806738
| 2023-08-31T20:56:28
| 2023-08-31T20:56:28
| 132,374,697
| 4,678
| 6,482
|
MIT
| 2023-09-12T22:22:06
| 2018-05-06T20:46:18
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,093
|
py
|
marketplace.py
|
import os
from flask import Flask, render_template
import grpc
from recommendations_pb2 import BookCategory, RecommendationRequest
from recommendations_pb2_grpc import RecommendationsStub
app = Flask(__name__)
recommendations_host = os.getenv("RECOMMENDATIONS_HOST", "localhost")
with open("client.key", "rb") as fp:
client_key = fp.read()
with open("client.pem", "rb") as fp:
client_cert = fp.read()
with open("ca.pem", "rb") as fp:
ca_cert = fp.read()
creds = grpc.ssl_channel_credentials(ca_cert, client_key, client_cert)
recommendations_channel = grpc.secure_channel(
f"{recommendations_host}:443", creds
)
recommendations_client = RecommendationsStub(recommendations_channel)
@app.route("/")
def render_homepage():
recommendations_request = RecommendationRequest(
user_id=1, category=BookCategory.MYSTERY, max_results=3
)
recommendations_response = recommendations_client.Recommend(
recommendations_request
)
return render_template(
"homepage.html",
recommendations=recommendations_response.recommendations,
)
|
678b485ef715bd68ae46d8cc8e47bb29e6067e68
|
53a83642c01a8828e3d7bd0b18e33c3b694c2b84
|
/Python/GeeksforGeeks/minimum-element-in-a-sorted-and-rotated-array.py
|
614abf5d50fd8e7910e6e5abbde9d5a328cb687f
|
[] |
no_license
|
anantkaushik/Competitive_Programming
|
1dcd60a28b5b951c23024d6090942be081ad249f
|
6dba38fd7aa4e71b5196d01d64e81f9336d08b13
|
refs/heads/master
| 2022-03-06T15:36:23.797340
| 2022-02-21T12:00:37
| 2022-02-21T12:00:37
| 82,700,948
| 271
| 95
| null | 2020-10-27T17:34:39
| 2017-02-21T16:18:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,369
|
py
|
minimum-element-in-a-sorted-and-rotated-array.py
|
"""
Problem Link: https://practice.geeksforgeeks.org/problems/minimum-element-in-a-sorted-and-rotated-array/0
A sorted array A[ ] with distinct elements is rotated at some unknown point, the task is to find the minimum element in it.
Expected Time Complexity: O(Log n)
Input:
The first line of input contains a single integer T denoting the number of test cases.
Then T test cases follow. Each test case consist of two lines. The first line of each test case consists of an integer N,
where N is the size of array.
The second line of each test case contains N space separated integers denoting array elements.
Output:
Corresponding to each test case, in a new line, print the minimum element in the array.
Constraints:
1 ≤ T ≤ 200
1 ≤ N ≤ 500
1 ≤ A[i] ≤ 1000
Example:
Input
1
5
4 5 1 2 3
Output
1
"""
def minElement(arr):
if len(arr) == 1:
return arr[0]
if arr[0] < arr[-1]:
return arr[0]
start = 0
end = len(arr) - 1
while start < end:
mid = (start+end)//2
if arr[mid] < arr[mid-1]:
return arr[mid]
if arr[mid] > arr[mid+1]:
return arr[mid+1]
elif arr[mid] > arr[end]:
start = mid + 1
else:
end = end - 1
for _ in range(int(input())):
n = int(input())
arr = list(map(int,input().split()))
print(minElement(arr))
|
f89eac410e2b9b433fdd58eed478c2fba2c6e1d2
|
675ecac2016ba9f8db611f6688a46ac4d2095447
|
/Interview Questions solutions/kth smallest element in a bst/index.py
|
3760a3789f8acb852f05e83cecfc039f9e54ec94
|
[] |
no_license
|
BitPunchZ/Leetcode-in-python-50-Algorithms-Coding-Interview-Questions
|
87a9619ba011aa8c30fe33e5e94037fea3144d1a
|
86a0ceefa9c8416c17010fe90eb372daf82256db
|
refs/heads/master
| 2023-08-17T05:37:59.467046
| 2023-08-09T08:59:01
| 2023-08-09T08:59:01
| 247,604,188
| 144
| 125
| null | 2021-04-19T03:04:00
| 2020-03-16T03:42:10
|
Python
|
UTF-8
|
Python
| false
| false
| 403
|
py
|
index.py
|
class Solution:
def kthSmallest(self, root: TreeNode, k: int) -> int:
self.k = k
self.res = None
self.helper(root)
return self.res
def helper(self, root):
if not root:
return
self.helper(root.left)
self.k -= 1
if self.k == 0:
self.res = root.val
return
self.helper(root.right)
|
8e510d9f44fa25c202ab9c6b2ca0b8fa0a9519f6
|
03a7f7a7eb8c16b537b65ec21f465bb0335bc3b8
|
/pythran/tests/rosetta/perf.py
|
80756a19d7387e885b2ac845850368bdade2e726
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
serge-sans-paille/pythran
|
a0e22af1ac5e1f34f3f29dce36502f4a897b5186
|
d8ab07b4b3b690f50603cb4d08ba303d3af18b90
|
refs/heads/master
| 2023-09-01T16:04:03.289285
| 2023-08-30T09:13:58
| 2023-08-31T08:03:22
| 4,479,494
| 1,882
| 200
|
BSD-3-Clause
| 2023-09-06T20:08:10
| 2012-05-29T08:02:14
|
C++
|
UTF-8
|
Python
| false
| false
| 368
|
py
|
perf.py
|
#from http://rosettacode.org/wiki/Perfect_numbers#Python
#pythran export perf(int)
#pythran export perf_(int)
#runas list(map(perf, range(20)))
#runas list(map(perf_, range(20)))
def perf(n):
sum = 0
for i in range(1, n):
if n % i == 0:
sum += i
return sum == n
def perf_(n):
return n == sum(i for i in range(1, n) if n % i == 0)
|
feeda619a6c5be633607399e2312e020569649f0
|
279f415dd1e06c594c6c87deda57e201c73c4542
|
/utils/splitjson.py
|
aaa7988ece3bbb87f81c6a8c58318d1972c874f9
|
[
"Apache-2.0"
] |
permissive
|
espnet/espnet
|
f7ba47271c1a6b1ed606dbbfb04a7f14220bb585
|
bcd20948db7846ee523443ef9fd78c7a1248c95e
|
refs/heads/master
| 2023-08-28T23:43:34.238336
| 2023-08-23T02:51:39
| 2023-08-23T02:51:39
| 114,054,873
| 7,242
| 2,244
|
Apache-2.0
| 2023-09-14T08:01:11
| 2017-12-13T00:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,207
|
py
|
splitjson.py
|
#!/usr/bin/env python3
# encoding: utf-8
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import codecs
import json
import logging
import os
import sys
import numpy as np
from espnet.utils.cli_utils import get_commandline_args
def get_parser():
parser = argparse.ArgumentParser(
description="split a json file for parallel processing",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("json", type=str, help="json file")
parser.add_argument(
"--parts", "-p", type=int, help="Number of subparts to be prepared", default=0
)
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
# logging info
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.info(get_commandline_args())
# check directory
filename = os.path.basename(args.json).split(".")[0]
dirname = os.path.dirname(args.json)
dirname = "{}/split{}utt".format(dirname, args.parts)
if not os.path.exists(dirname):
os.makedirs(dirname)
# load json and split keys
j = json.load(codecs.open(args.json, "r", encoding="utf-8"))
utt_ids = sorted(list(j["utts"].keys()))
logging.info("number of utterances = %d" % len(utt_ids))
if len(utt_ids) < args.parts:
logging.error("#utterances < #splits. Use smaller split number.")
sys.exit(1)
utt_id_lists = np.array_split(utt_ids, args.parts)
utt_id_lists = [utt_id_list.tolist() for utt_id_list in utt_id_lists]
for i, utt_id_list in enumerate(utt_id_lists):
new_dic = dict()
for utt_id in utt_id_list:
new_dic[utt_id] = j["utts"][utt_id]
jsonstring = json.dumps(
{"utts": new_dic},
indent=4,
ensure_ascii=False,
sort_keys=True,
separators=(",", ": "),
)
fl = "{}/{}.{}.json".format(dirname, filename, i + 1)
sys.stdout = codecs.open(fl, "w+", encoding="utf-8")
print(jsonstring)
sys.stdout.close()
|
defcb224f99b77ba478bcbc3b9d9098fb867c4dd
|
f9be237f3a7e03318636de79f6d96f40583c7cb1
|
/obsei/preprocessor/text_splitter.py
|
5d07af6d8bc943bdf1bac898d6eaa9b728c860c7
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
obsei/obsei
|
7173d32b90ee6ab09089a9ab001ee5cb0cdc8d8e
|
80b7782ba488583faf63951d20abad73eda98e5b
|
refs/heads/master
| 2023-09-01T00:32:07.593921
| 2023-08-25T17:08:56
| 2023-08-25T17:08:56
| 307,191,665
| 773
| 100
|
Apache-2.0
| 2023-09-13T03:30:59
| 2020-10-25T21:00:48
|
Python
|
UTF-8
|
Python
| false
| false
| 4,306
|
py
|
text_splitter.py
|
import logging
from typing import List, Optional, Any
import uuid
import nltk
from nltk import sent_tokenize
from pydantic import BaseModel
from obsei.payload import TextPayload
from obsei.preprocessor.base_preprocessor import (
BaseTextPreprocessor,
BaseTextProcessorConfig,
)
logger = logging.getLogger(__name__)
class TextSplitterPayload(BaseModel):
phrase: str
chunk_id: int
chunk_length: int
document_id: str
total_chunks: Optional[int]
class TextSplitterConfig(BaseTextProcessorConfig):
max_split_length: int = 512
split_stride: int = 0 # overlap length
document_id_key: Optional[str] # document_id in meta
enable_sentence_split: bool = False
honor_paragraph_boundary: bool = False
paragraph_marker: str = '\n\n'
sentence_tokenizer: str = 'tokenizers/punkt/PY3/english.pickle'
def __init__(self, **data: Any):
super().__init__(**data)
if self.enable_sentence_split:
nltk.download('punkt')
class TextSplitter(BaseTextPreprocessor):
def preprocess_input( # type: ignore[override]
self, input_list: List[TextPayload], config: TextSplitterConfig, **kwargs: Any
) -> List[TextPayload]:
text_splits: List[TextPayload] = []
for idx, input_data in enumerate(input_list):
if (
config.document_id_key
and input_data.meta
and config.document_id_key in input_data.meta
):
document_id = str(input_data.meta.get(config.document_id_key))
else:
document_id = uuid.uuid4().hex
if config.honor_paragraph_boundary:
paragraphs = input_data.processed_text.split(config.paragraph_marker)
else:
paragraphs = [input_data.processed_text]
atomic_texts: List[str] = []
for paragraph in paragraphs:
if config.enable_sentence_split:
atomic_texts.extend(sent_tokenize(paragraph))
else:
atomic_texts.append(paragraph)
split_id = 0
document_splits: List[TextSplitterPayload] = []
for text in atomic_texts:
text_length = len(text)
if text_length == 0:
continue
start_idx = 0
while start_idx < text_length:
if config.split_stride > 0 and start_idx > 0:
start_idx = (
self._valid_index(
text, start_idx - config.split_stride
)
+ 1
)
end_idx = self._valid_index(
text,
min(start_idx + config.max_split_length, text_length),
)
phrase = text[start_idx:end_idx]
document_splits.append(
TextSplitterPayload(
phrase=phrase,
chunk_id=split_id,
chunk_length=len(phrase),
document_id=document_id,
)
)
start_idx = end_idx + 1
split_id += 1
total_splits = len(document_splits)
for split in document_splits:
split.total_chunks = total_splits
payload = TextPayload(
processed_text=split.phrase,
source_name=input_data.source_name,
segmented_data=input_data.segmented_data,
meta={**input_data.meta, **{"splitter": split}}
if input_data.meta
else {"splitter": split},
)
text_splits.append(payload)
return text_splits
@staticmethod
def _valid_index(document: str, idx: int) -> int:
if idx <= 0:
return 0
if idx >= len(document):
return len(document)
new_idx = idx
while new_idx > 0:
if document[new_idx] in [" ", "\n", "\t"]:
break
new_idx -= 1
return new_idx
|
7d866fd4fb1c0070fc03d6968bc2883a46470ea4
|
7f24023d365e013ec0924844c1a872edfb0c75b4
|
/tests/trac/issue-0048/check.py
|
065d36a34245928044a0d3e7e2d0ecfc1520d03c
|
[
"Python-2.0",
"MIT",
"Apache-2.0"
] |
permissive
|
pabigot/pyxb
|
cd42c024607572c6363682d389e9296caf3f2857
|
5ee5ba54c9f702dc9c9efc2731ee547ecd4dae4a
|
refs/heads/next
| 2023-05-11T03:23:19.599756
| 2023-04-29T20:38:15
| 2023-04-29T20:45:13
| 20,547,850
| 130
| 63
|
Apache-2.0
| 2021-08-19T16:52:18
| 2014-06-06T01:49:03
|
Python
|
UTF-8
|
Python
| false
| false
| 376
|
py
|
check.py
|
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import profile
import unittest
class TestIssue0048 (unittest.TestCase):
def testProfile (self):
amap = profile.AbstractFeatureBaseType._AttributeMap
self.assertEqual(1, len(amap))
if __name__ == '__main__':
unittest.main()
|
ca90f29850c8d68b745f29120850e562c71d5909
|
e7aad0b1c5d8907dbb52000c482c396d1b801751
|
/test/functional/tests/security/test_compilation_flags.py
|
e51913425099dde6eb3508788516f9adff8ad63a
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
Open-CAS/open-cas-linux
|
c57d60f860702d7bc380c5d85cf502c0bf5e1bae
|
93334b4675afee8815f8ea12bb7297e0fd2a4195
|
refs/heads/master
| 2023-07-12T10:32:26.921455
| 2023-07-03T12:24:47
| 2023-07-03T12:24:47
| 178,356,155
| 202
| 84
|
BSD-3-Clause
| 2023-07-03T12:24:49
| 2019-03-29T07:37:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,540
|
py
|
test_compilation_flags.py
|
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import re
import pytest
from core.test_run import TestRun
from test_tools.fs_utils import Permissions, PermissionsUsers, PermissionSign
from test_utils.filesystem.fs_item import FsItem
@pytest.mark.os_dependent
def test_checksec():
"""
title: Checking defenses enabled compilation flags.
description: |
Check if Open CAS executable file was compiled with defenses enabled compilation flags.
pass_criteria:
- For casadm script returns:
RELRO STACK CANARY NX PIE RPATH RUNPATH FILE
Full RELRO Canary found NX enabled PIE enabled No RPATH No RUNPATH /sbin/casadm.
"""
with TestRun.step("Prepare checksec script"):
checksec_path = os.path.join(
TestRun.usr.working_dir,
"test/functional/test-framework/test_tools/checksec.sh"
)
checksec = FsItem(checksec_path)
checksec.chmod(Permissions.x, PermissionsUsers.u, PermissionSign.add)
with TestRun.step("Check casadm compilation flags"):
casadm_binary = "/sbin/casadm"
header_expected = ["RELRO", "STACK CANARY", "NX", "PIE", "RPATH", "RUNPATH", "FILE"]
binary_expected = ["Full RELRO", "Canary found", "NX enabled", "PIE enabled", "No RPATH",
"No RUNPATH", casadm_binary]
result_lines = TestRun.executor.run_expect_success(
f'{checksec_path} --file {casadm_binary}').stdout.splitlines()
header_found = False
for line in result_lines:
if not header_found:
if line.startswith("RELRO"):
header_found = True
header = line
continue
# remove formatting from output
result = re.sub(r'\x1B\[[0-9;]*m', '', line)
break
header = [i.strip() for i in header.split(" ") if i != '']
if header != header_expected:
TestRun.LOGGER.error(
'Incorrect header detected!\n'
f'Expected: {" ".join(header_expected)},\n'
f'Actual: {" ".join(header)}')
result = [i.strip() for i in result.split(" ") if i != '']
if result != binary_expected:
TestRun.LOGGER.error(
'Incorrect compilation flags!\n'
f'Expected: {" ".join(binary_expected)},\n'
f'Actual: {" ".join(result)}')
|
82f9beb404bb875eaa0866d33872dd9fd84ba6db
|
88a39b8ec20b386400bd8b1d5fc1d5ad3314681d
|
/tests/unit/worker/docker_utils_test.py
|
67b64a78494e26dbe75760e796f1aeb4d714ec9f
|
[
"Apache-2.0"
] |
permissive
|
codalab/codalab-worksheets
|
bb35681454a0d74903aaa7468e17303986793464
|
5be8cb3fa4b43c9e7e8f0a3b217644a7f0a39628
|
refs/heads/master
| 2023-08-18T10:16:01.766541
| 2023-08-06T20:02:30
| 2023-08-06T20:02:30
| 27,352,490
| 126
| 65
|
NOASSERTION
| 2023-09-14T14:54:07
| 2014-11-30T22:33:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,151
|
py
|
docker_utils_test.py
|
from docker.errors import APIError
import unittest
from codalab.worker.docker_utils import (
DockerUserErrorException,
DockerException,
wrap_exception,
parse_image_progress,
)
class ParseImageProgressTest(unittest.TestCase):
def test_parse_image_progress_expected(self):
image_info = {
'progressDetail': {'current': 20320000, 'total': 28540000},
'progress': '[===============> ] 20.32MB/28.54MB',
}
progress = parse_image_progress(image_info)
self.assertEqual(progress, '20.32MB/28.54MB (71% done)')
def test_parse_image_progress_missing_detail(self):
progress = parse_image_progress({})
self.assertEqual(progress, '')
def test_parse_image_progress_missing_progress(self):
image_info = {
'progressDetail': {'current': 20320000, 'total': 28540000},
}
progress = parse_image_progress(image_info)
self.assertEqual(progress, '(71% done)')
def test_parse_image_progress_partial_progress(self):
image_info = {
'progressDetail': {'current': 20320000, 'total': 28540000},
'progress': ' 20.32MB/28.54MB',
}
progress = parse_image_progress(image_info)
self.assertEqual(progress, '20.32MB/28.54MB (71% done)')
class WrapExceptionTest(unittest.TestCase):
def test_wrap_exception(self):
error = (
'Cannot start Docker container: Unable to start Docker container: 500 Server '
'Error: Internal Server Error "OCI runtime create failed: some other error"'
)
@wrap_exception('Should throw DockerException')
def throw_error():
raise APIError(error)
try:
throw_error()
except Exception as e:
self.assertEqual(str(e), 'Should throw DockerException: ' + error)
self.assertIsInstance(e, DockerException)
def test_wrap_exception_with_cuda_error(self):
error = (
'Cannot start Docker container: Unable to start Docker container: 500 Server '
'Error: Internal Server Error ("OCI runtime create failed: container_linux.go:'
'345: starting container process caused "process_linux.go:430: container init '
'caused "process_linux.go:413: running prestart hook 1 caused "error '
'running hook: exit status 1, stdout: , stderr: nvidia-container-cli: mount '
'error: file creation failed: /mnt/scratch/docker/overlay2/678d6b'
'19396c4ccd341786b21393f3f/merged/usr/bin/nvidia-smi'
)
@wrap_exception('Should throw DockerUserErrorException')
def throw_cuda_error():
raise APIError(error)
try:
throw_cuda_error()
except Exception as e:
self.assertEqual(str(e), 'Should throw DockerUserErrorException: ' + error)
self.assertIsInstance(e, DockerUserErrorException)
def test_wrap_exception_with_memory_limit_error(self):
error = (
'Unable to start Docker container: 500 Server Error: Internal Server Error '
'("OCI runtime create failed: container_linux.go:349: starting container process '
'caused "process_linux.go:449: container init caused \"process_linux.go:415: '
'setting cgroup config for procHooks process caused \\\"failed to write\\\\\\\"8388608'
'\\\\\\\" to \\\\\\\"/sys/fs/cgroup/memory/docker/a5475e95e98bbb534870dfdf290e91251f54'
'e5c13be07a7b6819619a2dba48ef/memory.limit_in_bytes\\\\\\\":write /sys/fs/cgroup/memory'
'/docker/a5475e95e98bbb534870dfdf290e91251f54e5c13be07a7b6819619a2dba48ef/'
'memory.limit_in_bytes: device or resource busy\\\"\"": unknown'
)
@wrap_exception('Should throw DockerUserErrorException')
def throw_memory_error():
raise APIError(error)
try:
throw_memory_error()
except Exception as e:
self.assertEqual(str(e), 'Should throw DockerUserErrorException: ' + error)
self.assertIsInstance(e, DockerUserErrorException)
|
441c7a693b6f726e2b9f7f154529b6037e89f8c0
|
472226be9c4422e7d84aab95330b20513ab61277
|
/deepobs/tensorflow/config.py
|
b41bef6a4b52ae62d9d3bb813772888de1441228
|
[
"MIT"
] |
permissive
|
fsschneider/DeepOBS
|
a601a6dd2548107e7e1fbae7d8b5357c79bf17d6
|
b41aafe8896223ba8dc8e61449656e82d42be1d8
|
refs/heads/master
| 2023-04-14T15:52:07.937617
| 2022-01-31T12:26:36
| 2022-01-31T12:26:36
| 150,460,781
| 105
| 34
|
MIT
| 2023-03-24T22:45:25
| 2018-09-26T16:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 533
|
py
|
config.py
|
# -*- coding: utf-8 -*-
import tensorflow as tf
DATA_DIR = "data_deepobs"
BASELINE_DIR = "baselines_deepobs"
TF_FLOAT_DTYPE = tf.float32
def get_data_dir():
return DATA_DIR
def set_data_dir(data_dir):
global DATA_DIR
DATA_DIR = data_dir
def get_baseline_dir():
return BASELINE_DIR
def set_baseline_dir(baseline_dir):
global BASELINE_DIR
BASELINE_DIR = baseline_dir
def get_float_dtype():
return TF_FLOAT_DTYPE
def set_float_dtype(dtype):
global TF_FLOAT_DTYPE
TF_FLOAT_DTYPE = dtype
|
83684f817f751b0cba8b90db44c240d72d7dd569
|
85373d45a83e4096affafa4f4e5b400787413e57
|
/test/programytest/oob/defaults/test_sms.py
|
d4a8eaf5d8570744adb4b9da6710047b7bb6f069
|
[
"MIT"
] |
permissive
|
keiffster/program-y
|
a02bb9d8278835547cc875f4f9cd668d5b1f44da
|
fc7b0a3afa4fa6ed683e0c817a9aa89f9543bb20
|
refs/heads/master
| 2023-08-23T13:55:39.255535
| 2022-12-13T09:51:57
| 2022-12-13T09:51:57
| 74,462,571
| 379
| 173
|
NOASSERTION
| 2023-05-23T00:51:21
| 2016-11-22T10:43:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,431
|
py
|
test_sms.py
|
import unittest
import unittest.mock
import xml.etree.ElementTree as ET
from programy.oob.callmom.sms import SMSOutOfBandProcessor
from programytest.client import TestClient
class SMSOutOfBandProcessorTests(unittest.TestCase):
def setUp(self):
client = TestClient()
self._client_context = client.create_client_context("testid")
def test_processor_xml_parsing(self):
oob_processor = SMSOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
self.assertFalse(oob_processor.parse_oob_xml(None))
oob = []
self.assertFalse(oob_processor.parse_oob_xml(oob))
oob = []
oob.append(unittest.mock.Mock())
oob[0].tag = "recipient"
oob[0].text = "077777777"
oob.append(unittest.mock.Mock())
oob[1].tag = "message"
oob[1].text = "Hello!"
self.assertTrue(oob_processor.parse_oob_xml(oob))
def test_processor(self):
oob_processor = SMSOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
oob_content = ET.fromstring("<sms><recipient>077777777</recipient><message>Hello!</message></sms>")
self.assertEqual("SMS", oob_processor.process_out_of_bounds(self._client_context, oob_content))
def test_processor_none(self):
oob_processor = SMSOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
self.assertFalse(oob_processor.parse_oob_xml(None))
def test_processor_missing_no_oob(self):
oob_processor = SMSOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
self.assertFalse(oob_processor.parse_oob_xml(ET.fromstring("<oob></oob>")))
def test_processor_missing_recipient(self):
oob_processor = SMSOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
self.assertFalse(oob_processor.parse_oob_xml(ET.fromstring("<oob><message>Hello</message></oob>")))
def test_processor_missing_message(self):
oob_processor = SMSOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
self.assertFalse(oob_processor.parse_oob_xml(ET.fromstring("<oob><recipient>07771597630</recipient></oob>")))
def test_processor_missing_recipient_and_message(self):
oob_processor = SMSOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
self.assertFalse(oob_processor.parse_oob_xml(ET.fromstring("<oob><other>Something</other></oob>")))
|
00b5df6706300a83bb7ae0814f6bf6182f33f02f
|
8cc3498e311d15c9a4394aaa341ef489b482dbe6
|
/test/language/templates/python/StructTemplatedTemplateArgumentTest.py
|
b9398135e514c5924795f77daf4a0c7af93735de
|
[
"BSD-3-Clause"
] |
permissive
|
ndsev/zserio
|
3e55c064f72e86219a6da297f116d3dbb565a9a9
|
c540c4a97fee4e08bfc6669a2cec0d2b8282d8f6
|
refs/heads/master
| 2023-08-24T14:56:10.750155
| 2023-08-11T19:36:54
| 2023-08-11T19:36:54
| 141,550,444
| 113
| 23
|
BSD-3-Clause
| 2023-08-30T11:14:47
| 2018-07-19T08:44:23
|
Java
|
UTF-8
|
Python
| false
| false
| 877
|
py
|
StructTemplatedTemplateArgumentTest.py
|
import unittest
import zserio
from testutils import getZserioApi
class StructTemplatedTemplateArgumentTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "templates.zs").struct_templated_template_argument
def testReadWrite(self):
structTemplatedTemplateArgument = self.api.StructTemplatedTemplateArgument(
self.api.Field_Compound_uint32(self.api.Compound_uint32(42))
)
writer = zserio.BitStreamWriter()
structTemplatedTemplateArgument.write(writer)
reader = zserio.BitStreamReader(writer.byte_array, writer.bitposition)
readStructTemplatedTemplateArgument = self.api.StructTemplatedTemplateArgument()
readStructTemplatedTemplateArgument.read(reader)
self.assertEqual(structTemplatedTemplateArgument, readStructTemplatedTemplateArgument)
|
adce6173474e6f4d87fe1b9817a3aa33aef64bdf
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/perf/test_ozone_ar_speed_order_900.py
|
e4b033cdaa90078ec04992e0e02a6cb6677b3787
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 70
|
py
|
test_ozone_ar_speed_order_900.py
|
import tests.perf.test_ozone_ar_speed_many as gen
gen.run_test(900)
|
06981f0963abe1b037be9463f592817e7b85c713
|
4e36f3ce0f7bfd1959b97a20fb9dab6db815c2d7
|
/fastchat/serve/launch_all_serve.py
|
7847f00645d8dfd7d3aaf4032479e70c7ee3555d
|
[
"Apache-2.0"
] |
permissive
|
lm-sys/FastChat
|
040d6a40c16d0a67f5c6955ea7a491e192f81e4e
|
106670d28793963b0ff99811da996d1d5415f1b9
|
refs/heads/main
| 2023-08-28T19:07:58.676054
| 2023-08-28T11:38:37
| 2023-08-28T11:38:37
| 615,882,673
| 25,954
| 3,105
|
Apache-2.0
| 2023-09-14T03:47:50
| 2023-03-19T00:18:02
|
Python
|
UTF-8
|
Python
| false
| false
| 8,441
|
py
|
launch_all_serve.py
|
"""
Usage: python launch_all_serve_by_shell.py --model-path-address "THUDM/chatglm2-6b@localhost@2021" "huggyllama/llama-7b@localhost@2022"
Workers are listed in format of `model-path`@`host`@`port`
The key mechanism behind this scripts is:
1, execute shell cmd to launch the controller/worker/openai-api-server;
2, check the log of controller/worker/openai-api-server to ensure that the serve is launched properly.
Note that a few of non-critical `fastchat.serve` cmd options are not supported currently.
"""
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import subprocess
import re
import argparse
LOGDIR = "./logs/"
if not os.path.exists(LOGDIR):
os.makedirs(LOGDIR)
parser = argparse.ArgumentParser()
# ------multi worker-----------------
parser.add_argument(
"--model-path-address",
default="THUDM/chatglm2-6b@localhost@20002",
nargs="+",
type=str,
help="model path, host, and port, formatted as model-path@host@port",
)
# ---------------controller-------------------------
parser.add_argument("--controller-host", type=str, default="localhost")
parser.add_argument("--controller-port", type=int, default=21001)
parser.add_argument(
"--dispatch-method",
type=str,
choices=["lottery", "shortest_queue"],
default="shortest_queue",
)
controller_args = ["controller-host", "controller-port", "dispatch-method"]
# ----------------------worker------------------------------------------
parser.add_argument("--worker-host", type=str, default="localhost")
parser.add_argument("--worker-port", type=int, default=21002)
# parser.add_argument("--worker-address", type=str, default="http://localhost:21002")
# parser.add_argument(
# "--controller-address", type=str, default="http://localhost:21001"
# )
parser.add_argument(
"--model-path",
type=str,
default="lmsys/vicuna-7b-v1.3",
help="The path to the weights. This can be a local folder or a Hugging Face repo ID.",
)
parser.add_argument(
"--revision",
type=str,
default="main",
help="Hugging Face Hub model revision identifier",
)
parser.add_argument(
"--device",
type=str,
choices=["cpu", "cuda", "mps", "xpu"],
default="cuda",
help="The device type",
)
parser.add_argument(
"--gpus",
type=str,
default="0",
help="A single GPU like 1 or multiple GPUs like 0,2",
)
parser.add_argument("--num-gpus", type=int, default=1)
parser.add_argument(
"--max-gpu-memory",
type=str,
help="The maximum memory per gpu. Use a string like '13Gib'",
)
parser.add_argument("--load-8bit", action="store_true", help="Use 8-bit quantization")
parser.add_argument(
"--cpu-offloading",
action="store_true",
help="Only when using 8-bit quantization: Offload excess weights to the CPU that don't fit on the GPU",
)
parser.add_argument(
"--gptq-ckpt",
type=str,
default=None,
help="Load quantized model. The path to the local GPTQ checkpoint.",
)
parser.add_argument(
"--gptq-wbits",
type=int,
default=16,
choices=[2, 3, 4, 8, 16],
help="#bits to use for quantization",
)
parser.add_argument(
"--gptq-groupsize",
type=int,
default=-1,
help="Groupsize to use for quantization; default uses full row.",
)
parser.add_argument(
"--gptq-act-order",
action="store_true",
help="Whether to apply the activation order GPTQ heuristic",
)
parser.add_argument(
"--model-names",
type=lambda s: s.split(","),
help="Optional display comma separated names",
)
parser.add_argument(
"--limit-worker-concurrency",
type=int,
default=5,
help="Limit the model concurrency to prevent OOM.",
)
parser.add_argument("--stream-interval", type=int, default=2)
parser.add_argument("--no-register", action="store_true")
worker_args = [
"worker-host",
"worker-port",
"model-path",
"revision",
"device",
"gpus",
"num-gpus",
"max-gpu-memory",
"load-8bit",
"cpu-offloading",
"gptq-ckpt",
"gptq-wbits",
"gptq-groupsize",
"gptq-act-order",
"model-names",
"limit-worker-concurrency",
"stream-interval",
"no-register",
"controller-address",
]
# -----------------openai server---------------------------
parser.add_argument("--server-host", type=str, default="localhost", help="host name")
parser.add_argument("--server-port", type=int, default=8001, help="port number")
parser.add_argument(
"--allow-credentials", action="store_true", help="allow credentials"
)
# parser.add_argument(
# "--allowed-origins", type=json.loads, default=["*"], help="allowed origins"
# )
# parser.add_argument(
# "--allowed-methods", type=json.loads, default=["*"], help="allowed methods"
# )
# parser.add_argument(
# "--allowed-headers", type=json.loads, default=["*"], help="allowed headers"
# )
parser.add_argument(
"--api-keys",
type=lambda s: s.split(","),
help="Optional list of comma separated API keys",
)
server_args = [
"server-host",
"server-port",
"allow-credentials",
"api-keys",
"controller-address",
]
args = parser.parse_args()
args = argparse.Namespace(
**vars(args),
**{"controller-address": f"http://{args.controller_host}:{args.controller_port}"},
)
if args.gpus:
if len(args.gpus.split(",")) < args.num_gpus:
raise ValueError(
f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!"
)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
# 0,controller, model_worker, openai_api_server
# 1, cmd options
# 2,LOGDIR
# 3, log file name
base_launch_sh = "nohup python3 -m fastchat.serve.{0} {1} >{2}/{3}.log 2>&1 &"
# 0 LOGDIR
#! 1 log file name
# 2 controller, worker, openai_api_server
base_check_sh = """while [ `grep -c "Uvicorn running on" {0}/{1}.log` -eq '0' ];do
sleep 1s;
echo "wait {2} running"
done
echo '{2} running' """
def string_args(args, args_list):
args_str = ""
for key, value in args._get_kwargs():
key = key.replace("_", "-")
if key not in args_list:
continue
key = key.split("-")[-1] if re.search("port|host", key) else key
if not value:
pass
# 1==True -> True
elif isinstance(value, bool) and value == True:
args_str += f" --{key} "
elif (
isinstance(value, list)
or isinstance(value, tuple)
or isinstance(value, set)
):
value = " ".join(value)
args_str += f" --{key} {value} "
else:
args_str += f" --{key} {value} "
return args_str
def launch_worker(item):
log_name = (
item.split("/")[-1]
.split("\\")[-1]
.replace("-", "_")
.replace("@", "_")
.replace(".", "_")
)
args.model_path, args.worker_host, args.worker_port = item.split("@")
print("*" * 80)
worker_str_args = string_args(args, worker_args)
print(worker_str_args)
worker_sh = base_launch_sh.format(
"model_worker", worker_str_args, LOGDIR, f"worker_{log_name}"
)
worker_check_sh = base_check_sh.format(LOGDIR, f"worker_{log_name}", "model_worker")
subprocess.run(worker_sh, shell=True, check=True)
subprocess.run(worker_check_sh, shell=True, check=True)
def launch_all():
controller_str_args = string_args(args, controller_args)
controller_sh = base_launch_sh.format(
"controller", controller_str_args, LOGDIR, "controller"
)
controller_check_sh = base_check_sh.format(LOGDIR, "controller", "controller")
subprocess.run(controller_sh, shell=True, check=True)
subprocess.run(controller_check_sh, shell=True, check=True)
if isinstance(args.model_path_address, str):
launch_worker(args.model_path_address)
else:
for idx, item in enumerate(args.model_path_address):
print(f"loading {idx}th model:{item}")
launch_worker(item)
server_str_args = string_args(args, server_args)
server_sh = base_launch_sh.format(
"openai_api_server", server_str_args, LOGDIR, "openai_api_server"
)
server_check_sh = base_check_sh.format(
LOGDIR, "openai_api_server", "openai_api_server"
)
subprocess.run(server_sh, shell=True, check=True)
subprocess.run(server_check_sh, shell=True, check=True)
if __name__ == "__main__":
launch_all()
|
7a4843e63c0e4b088bbee1d7d827630ec1dd20bd
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/pytests/unit/modules/file/test_file_basics.py
|
cee60da2fab4547e72b3d146619aafd5bd889b49
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 8,055
|
py
|
test_file_basics.py
|
import logging
import os
import shutil
import pytest
import salt.config
import salt.loader
import salt.modules.cmdmod as cmdmod
import salt.modules.config as configmod
import salt.modules.file as filemod
import salt.utils.data
import salt.utils.files
import salt.utils.platform
import salt.utils.stringutils
from tests.support.mock import MagicMock, call, patch
log = logging.getLogger(__name__)
@pytest.fixture
def configure_loader_modules():
return {
filemod: {
"__salt__": {
"config.manage_mode": configmod.manage_mode,
"cmd.run": cmdmod.run,
"cmd.run_all": cmdmod.run_all,
},
"__opts__": {
"test": False,
"file_roots": {"base": "tmp"},
"pillar_roots": {"base": "tmp"},
"cachedir": "tmp",
"grains": {},
},
"__grains__": {"kernel": "Linux"},
}
}
@pytest.fixture
def tmp_sub_dir(tmp_path):
directory = tmp_path / "file-basics-test-dir"
directory.mkdir()
yield directory
shutil.rmtree(str(directory))
@pytest.fixture
def tfile(tmp_sub_dir):
filename = str(tmp_sub_dir / "file-basics-test-file")
with salt.utils.files.fopen(filename, "w+") as fp:
fp.write("Hi hello! I am a file.")
yield filename
os.remove(filename)
@pytest.fixture
def myfile(tmp_sub_dir):
filename = str(tmp_sub_dir / "myfile")
with salt.utils.files.fopen(filename, "w+") as fp:
fp.write(salt.utils.stringutils.to_str("Hello\n"))
yield filename
os.remove(filename)
@pytest.fixture
def a_link(tmp_sub_dir):
path = tmp_sub_dir / "a_link"
linkname = str(path)
yield linkname
if path.exists():
os.remove(linkname)
@pytest.fixture
def a_hardlink(tmp_sub_dir):
path = tmp_sub_dir / "a_hardlink"
linkname = str(path)
yield linkname
if path.exists():
os.remove(linkname)
@pytest.mark.skip_on_windows(reason="os.symlink is not available on Windows")
def test_symlink_already_in_desired_state(tfile, a_link):
os.symlink(tfile, a_link)
result = filemod.symlink(tfile, a_link)
assert result
@pytest.mark.skip_on_windows(reason="os.link is not available on Windows")
def test_hardlink_sanity(tfile, a_hardlink):
target = a_hardlink
result = filemod.link(tfile, target)
assert result
@pytest.mark.skip_on_windows(reason="os.link is not available on Windows")
def test_hardlink_numlinks(tfile, a_hardlink):
target = a_hardlink
result = filemod.link(tfile, target)
name_i = os.stat(tfile).st_nlink
assert name_i > 1
@pytest.mark.skip_on_windows(reason="os.link is not available on Windows")
def test_hardlink_working(tfile, a_hardlink):
target = a_hardlink
result = filemod.link(tfile, target)
name_i = os.stat(tfile).st_ino
target_i = os.stat(target).st_ino
assert name_i == target_i
def test_source_list_for_list_returns_file_from_dict_via_http():
with patch("salt.modules.file.os.remove") as remove:
remove.return_value = None
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=[]),
"cp.list_master_dirs": MagicMock(return_value=[]),
"cp.cache_file": MagicMock(return_value="/tmp/http.conf"),
},
):
with patch("salt.utils.http.query") as http_query:
http_query.return_value = {}
ret = filemod.source_list(
[{"http://t.est.com/http/httpd.conf": "filehash"}], "", "base"
)
assert list(ret) == ["http://t.est.com/http/httpd.conf", "filehash"]
def test_source_list_use_requests():
with patch("salt.modules.file.os.remove") as remove:
remove.return_value = None
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=[]),
"cp.list_master_dirs": MagicMock(return_value=[]),
"cp.cache_file": MagicMock(return_value="/tmp/http.conf"),
},
):
expected_call = call(
"http://t.est.com/http/file1",
decode_body=False,
method="HEAD",
)
with patch(
"salt.utils.http.query", MagicMock(return_value={})
) as http_query:
ret = filemod.source_list(
[{"http://t.est.com/http/file1": "filehash"}], "", "base"
)
assert list(ret) == ["http://t.est.com/http/file1", "filehash"]
assert expected_call in http_query.mock_calls
def test_source_list_for_list_returns_existing_file():
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=["http/httpd.conf.fallback"]),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list(
["salt://http/httpd.conf", "salt://http/httpd.conf.fallback"],
"filehash",
"base",
)
assert list(ret) == ["salt://http/httpd.conf.fallback", "filehash"]
def test_source_list_for_list_returns_file_from_other_env():
def list_master(env):
dct = {"base": [], "dev": ["http/httpd.conf"]}
return dct[env]
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(side_effect=list_master),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list(
[
"salt://http/httpd.conf?saltenv=dev",
"salt://http/httpd.conf.fallback",
],
"filehash",
"base",
)
assert list(ret) == ["salt://http/httpd.conf?saltenv=dev", "filehash"]
def test_source_list_for_list_returns_file_from_dict():
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=["http/httpd.conf"]),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list([{"salt://http/httpd.conf": ""}], "filehash", "base")
assert list(ret) == ["salt://http/httpd.conf", "filehash"]
def test_source_list_for_list_returns_existing_local_file_slash(myfile):
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=[]),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list([myfile + "-foo", myfile], "filehash", "base")
assert list(ret) == [myfile, "filehash"]
def test_source_list_for_list_returns_existing_local_file_proto(myfile):
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=[]),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list(
["file://" + myfile + "-foo", "file://" + myfile],
"filehash",
"base",
)
assert list(ret) == ["file://" + myfile, "filehash"]
def test_source_list_for_list_returns_local_file_slash_from_dict(myfile):
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=[]),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list([{myfile: ""}], "filehash", "base")
assert list(ret) == [myfile, "filehash"]
def test_source_list_for_list_returns_local_file_proto_from_dict(myfile):
with patch.dict(
filemod.__salt__,
{
"cp.list_master": MagicMock(return_value=[]),
"cp.list_master_dirs": MagicMock(return_value=[]),
},
):
ret = filemod.source_list([{"file://" + myfile: ""}], "filehash", "base")
assert list(ret) == ["file://" + myfile, "filehash"]
|
eaf805988688498450ba96b21931558e5bd05433
|
820b6af9fd43b270749224bb278e5f714f655ac9
|
/Filters/Modeling/Testing/Python/HyperScalarBar.py
|
f135cdd6346d3c9574573c0a90d4e1195664ab9d
|
[
"BSD-3-Clause"
] |
permissive
|
Kitware/VTK
|
49dee7d4f83401efce8826f1759cd5d9caa281d1
|
dd4138e17f1ed5dfe6ef1eab0ff6643fdc07e271
|
refs/heads/master
| 2023-09-01T10:21:57.496189
| 2023-09-01T08:20:15
| 2023-09-01T08:21:05
| 631,615
| 2,253
| 1,243
|
NOASSERTION
| 2023-09-14T07:53:03
| 2010-04-27T15:12:58
|
C++
|
UTF-8
|
Python
| false
| false
| 5,670
|
py
|
HyperScalarBar.py
|
#!/usr/bin/env python
from vtkmodules.vtkFiltersGeneral import vtkHyperStreamline
from vtkmodules.vtkFiltersGeometry import vtkImageDataGeometryFilter
from vtkmodules.vtkFiltersModeling import vtkOutlineFilter
from vtkmodules.vtkFiltersSources import vtkConeSource
from vtkmodules.vtkImagingHybrid import vtkPointLoad
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkCamera,
vtkLogLookupTable,
vtkPolyDataMapper,
vtkRenderWindow,
vtkRenderWindowInteractor,
vtkRenderer,
)
from vtkmodules.vtkRenderingAnnotation import vtkScalarBarActor
import vtkmodules.vtkInteractionStyle
import vtkmodules.vtkRenderingFreeType
import vtkmodules.vtkRenderingOpenGL2
from vtkmodules.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Test the scalar bar actor using a logarithmic lookup table
#
VTK_INTEGRATE_BOTH_DIRECTIONS = 2
#
# generate tensors
ptLoad = vtkPointLoad()
ptLoad.SetLoadValue(100.0)
ptLoad.SetSampleDimensions(20,20,20)
ptLoad.ComputeEffectiveStressOn()
ptLoad.SetModelBounds(-10,10,-10,10,-10,10)
# Generate hyperstreamlines
s1 = vtkHyperStreamline()
s1.SetInputConnection(ptLoad.GetOutputPort())
s1.SetStartPosition(9,9,-9)
s1.IntegrateMinorEigenvector()
s1.SetMaximumPropagationDistance(18.0)
s1.SetIntegrationStepLength(0.1)
s1.SetStepLength(0.01)
s1.SetRadius(0.25)
s1.SetNumberOfSides(18)
s1.SetIntegrationDirection(VTK_INTEGRATE_BOTH_DIRECTIONS)
s1.Update()
# Map hyperstreamlines
lut = vtkLogLookupTable()
lut.SetHueRange(.6667,0.0)
scalarBar = vtkScalarBarActor()
scalarBar.SetLookupTable(lut)
scalarBar.SetTitle("Stress")
scalarBar.GetPositionCoordinate().SetCoordinateSystemToNormalizedViewport()
scalarBar.GetPositionCoordinate().SetValue(0.1,0.05)
scalarBar.SetOrientationToVertical()
scalarBar.SetWidth(0.1)
scalarBar.SetHeight(0.9)
scalarBar.SetPosition(0.01,0.1)
scalarBar.SetLabelFormat("%-#6.3f")
scalarBar.GetLabelTextProperty().SetColor(1,0,0)
scalarBar.GetTitleTextProperty().SetColor(1,0,0)
s1Mapper = vtkPolyDataMapper()
s1Mapper.SetInputConnection(s1.GetOutputPort())
s1Mapper.SetLookupTable(lut)
ptLoad.Update()
#force update for scalar range
s1Mapper.SetScalarRange(ptLoad.GetOutput().GetScalarRange())
s1Actor = vtkActor()
s1Actor.SetMapper(s1Mapper)
s2 = vtkHyperStreamline()
s2.SetInputConnection(ptLoad.GetOutputPort())
s2.SetStartPosition(-9,-9,-9)
s2.IntegrateMinorEigenvector()
s2.SetMaximumPropagationDistance(18.0)
s2.SetIntegrationStepLength(0.1)
s2.SetStepLength(0.01)
s2.SetRadius(0.25)
s2.SetNumberOfSides(18)
s2.SetIntegrationDirection(VTK_INTEGRATE_BOTH_DIRECTIONS)
s2.Update()
s2Mapper = vtkPolyDataMapper()
s2Mapper.SetInputConnection(s2.GetOutputPort())
s2Mapper.SetLookupTable(lut)
s2Mapper.SetScalarRange(ptLoad.GetOutput().GetScalarRange())
s2Actor = vtkActor()
s2Actor.SetMapper(s2Mapper)
s3 = vtkHyperStreamline()
s3.SetInputConnection(ptLoad.GetOutputPort())
s3.SetStartPosition(9,-9,-9)
s3.IntegrateMinorEigenvector()
s3.SetMaximumPropagationDistance(18.0)
s3.SetIntegrationStepLength(0.1)
s3.SetStepLength(0.01)
s3.SetRadius(0.25)
s3.SetNumberOfSides(18)
s3.SetIntegrationDirection(VTK_INTEGRATE_BOTH_DIRECTIONS)
s3.Update()
s3Mapper = vtkPolyDataMapper()
s3Mapper.SetInputConnection(s3.GetOutputPort())
s3Mapper.SetLookupTable(lut)
s3Mapper.SetScalarRange(ptLoad.GetOutput().GetScalarRange())
s3Actor = vtkActor()
s3Actor.SetMapper(s3Mapper)
s4 = vtkHyperStreamline()
s4.SetInputConnection(ptLoad.GetOutputPort())
s4.SetStartPosition(-9,9,-9)
s4.IntegrateMinorEigenvector()
s4.SetMaximumPropagationDistance(18.0)
s4.SetIntegrationStepLength(0.1)
s4.SetStepLength(0.01)
s4.SetRadius(0.25)
s4.SetNumberOfSides(18)
s4.SetIntegrationDirection(VTK_INTEGRATE_BOTH_DIRECTIONS)
s4.Update()
s4Mapper = vtkPolyDataMapper()
s4Mapper.SetInputConnection(s4.GetOutputPort())
s4Mapper.SetLookupTable(lut)
s4Mapper.SetScalarRange(ptLoad.GetOutput().GetScalarRange())
s4Actor = vtkActor()
s4Actor.SetMapper(s4Mapper)
# plane for context
#
g = vtkImageDataGeometryFilter()
g.SetInputConnection(ptLoad.GetOutputPort())
g.SetExtent(0,100,0,100,0,0)
g.Update()
#for scalar range
gm = vtkPolyDataMapper()
gm.SetInputConnection(g.GetOutputPort())
gm.SetScalarRange(g.GetOutput().GetScalarRange())
ga = vtkActor()
ga.SetMapper(gm)
# Create outline around data
#
outline = vtkOutlineFilter()
outline.SetInputConnection(ptLoad.GetOutputPort())
outlineMapper = vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0,0,0)
# Create cone indicating application of load
#
coneSrc = vtkConeSource()
coneSrc.SetRadius(.5)
coneSrc.SetHeight(2)
coneMap = vtkPolyDataMapper()
coneMap.SetInputConnection(coneSrc.GetOutputPort())
coneActor = vtkActor()
coneActor.SetMapper(coneMap)
coneActor.SetPosition(0,0,11)
coneActor.RotateY(90)
coneActor.GetProperty().SetColor(1,0,0)
# Create the rendering infrastructure
#
ren1 = vtkRenderer()
renWin = vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
iren = vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
camera = vtkCamera()
camera.SetFocalPoint(0.113766,-1.13665,-1.01919)
camera.SetPosition(-29.4886,-63.1488,26.5807)
camera.SetViewAngle(24.4617)
camera.SetViewUp(0.17138,0.331163,0.927879)
camera.SetClippingRange(1,100)
ren1.AddActor2D(scalarBar)
ren1.AddActor(s1Actor)
ren1.AddActor(s2Actor)
ren1.AddActor(s3Actor)
ren1.AddActor(s4Actor)
ren1.AddActor(outlineActor)
ren1.AddActor(coneActor)
ren1.AddActor(ga)
ren1.SetBackground(1.0,1.0,1.0)
ren1.SetActiveCamera(camera)
renWin.SetSize(300,300)
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
b8dbf3f72e2cefa36228b734f0a998181b8c417a
|
fe131d9715049e3339d1ab14f3e9a0c97a47c5db
|
/sale_order_add_bom/wizard/sale_add_phantom_bom.py
|
c4f4186df957391d9cc89f775ca3a85d64b18d71
|
[] |
no_license
|
akretion/odoo-usability
|
12f5412a27dda2de2436a282d36222f596d219bc
|
5f731d18f1d9016a2faf6ef439be6caf2597aa16
|
refs/heads/16.0
| 2023-07-21T14:41:54.002291
| 2023-07-15T13:40:48
| 2023-07-15T13:40:48
| 28,053,269
| 115
| 183
| null | 2023-08-31T14:04:39
| 2014-12-15T19:51:41
|
Python
|
UTF-8
|
Python
| false
| false
| 4,186
|
py
|
sale_add_phantom_bom.py
|
# Copyright 2016-2022 Akretion France (http://www.akretion.com/)
# @author: Alexis de Lattre <alexis.delattre@akretion.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields, api, _
from odoo.exceptions import UserError
from odoo.tools import float_is_zero
class SaleAddPhantomBom(models.TransientModel):
_name = 'sale.add.phantom.bom'
_description = 'Add Kit to Quotation'
@api.model
def default_get(self, fields_list):
res = super().default_get(fields_list)
if self._context.get('active_model') == 'sale.order':
res['sale_id'] = self._context['active_id']
sale = self.env['sale.order'].browse(res['sale_id'])
res['company_id'] = sale.company_id.id
elif self._context.get('active_model') == 'stock.picking':
res['picking_id'] = self._context['active_id']
picking = self.env['stock.picking'].browse(res['picking_id'])
res['company_id'] = picking.company_id.id
else:
raise UserError(_(
"The wizard can only be started from a sale order or a picking."))
return res
bom_id = fields.Many2one(
'mrp.bom', 'Kit', required=True,
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id), ('type', '=', 'phantom'), ('sale_ok', '=', True)]")
company_id = fields.Many2one('res.company', string='Company', required=True)
qty = fields.Integer(
string='Number of Kits to Add', default=1, required=True)
sale_id = fields.Many2one(
'sale.order', string='Quotation')
picking_id = fields.Many2one(
'stock.picking', string='Picking')
@api.model
def _prepare_sale_order_line(self, bom_line, sale_order, wizard_qty):
qty_in_product_uom = bom_line.product_uom_id._compute_quantity(
bom_line.product_qty,
bom_line.product_id.uom_id)
vals = {
'product_id': bom_line.product_id.id,
'product_uom_qty': qty_in_product_uom * wizard_qty,
'order_id': sale_order.id,
}
# on sale.order.line, company_id is a related field
return vals
@api.model
def _prepare_stock_move(self, bom_line, picking, wizard_qty):
product = bom_line.product_id
qty_in_product_uom = bom_line.product_uom_id._compute_quantity(
bom_line.product_qty, product.uom_id)
vals = {
'product_id': product.id,
'product_uom_qty': qty_in_product_uom * wizard_qty,
'product_uom': product.uom_id.id,
'picking_id': picking.id,
'company_id': picking.company_id.id,
'location_id': picking.location_id.id,
'location_dest_id': picking.location_dest_id.id,
'name': product.partner_ref,
}
return vals
def add(self):
self.ensure_one()
assert self.sale_id or self.picking_id, 'No related sale_id or picking_id'
if self.qty < 1:
raise UserError(_(
"The number of kits to add must be 1 or superior"))
assert self.bom_id.type == 'phantom', 'The BOM is not a kit'
if not self.bom_id.bom_line_ids:
raise UserError(_("The selected kit is empty !"))
prec = self.env['decimal.precision'].precision_get(
'Product Unit of Measure')
solo = self.env['sale.order.line']
smo = self.env['stock.move']
for line in self.bom_id.bom_line_ids:
if float_is_zero(line.product_qty, precision_digits=prec):
continue
# The onchange is played in the inherit of the create()
# of sale order line in the 'sale' module
# TODO: if needed, we could increment existing order lines
# with the same product instead of always creating new lines
if self.sale_id:
vals = self._prepare_sale_order_line(line, self.sale_id, self.qty)
solo.create(vals)
elif self.picking_id:
vals = self._prepare_stock_move(line, self.picking_id, self.qty)
smo.create(vals)
|
2752c136d44cfb40654cec6c8f4a0f6f0c79a99b
|
fe41ede15b4cb24fc15b6b1eb7e9a393ec6bb778
|
/monobit/formats/mac/__init__.py
|
4569c1fb853fedbf37626e9ec35a3a48d03b9293
|
[
"MIT"
] |
permissive
|
robhagemans/monobit
|
424bcb2253c22d8c00d287204f04e9b69867ac48
|
3d19d930344f18080253b4046bb711aaea5620ba
|
refs/heads/master
| 2023-08-29T08:32:43.276030
| 2023-08-12T09:23:20
| 2023-08-12T09:23:20
| 188,114,254
| 154
| 11
|
MIT
| 2023-06-03T12:50:25
| 2019-05-22T21:10:56
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,964
|
py
|
__init__.py
|
"""
monobit.formats.mac - Mac OS fonts
(c) 2019--2023 Rob Hagemans
licence: https://opensource.org/licenses/MIT
"""
import logging
from ...storage import loaders, savers
from .dfont import parse_resource_fork, save_dfont
from .nfnt import extract_nfnt, convert_nfnt, create_nfnt
from .lisa import _load_lisa
from .iigs import _load_iigs, _save_iigs
@loaders.register(
name='mac',
# the magic is optional - a 'maybe magic'
magic=(b'\0\0\1\0\0',),
patterns=('*.dfont', '*.suit', '*.rsrc',),
)
def load_mac_dfont(instream):
"""Load font from MacOS resource fork or data-fork resource."""
data = instream.read()
return parse_resource_fork(data)
@savers.register(linked=load_mac_dfont)
def save_mac_dfont(fonts, outstream, resource_type:str='NFNT', family_id:int=None):
"""Save font to MacOS resource fork or data-fork resource.
resource_type: type of resource to store font in. One of `sfnt`, `NFNT`.
"""
save_dfont(fonts, outstream, resource_type)
@loaders.register(
name='nfnt',
# \x90\0 is not a formal signature, but the most common set of FONT_TYPE flags
# the \x80 sigs are LISA compressed NFNTs
magic=(b'\x90\0', b'\xb0\0', b'\x90\x80', b'\xb0\x80'),
patterns=('*.f',),
)
def load_nfnt(instream, offset:int=0):
"""
Load font from a bare FONT/NFNT resource.
offset: starting offset in bytes of the NFNT record in the file (default 0)
"""
instream.seek(offset)
data = instream.read()
fontdata = extract_nfnt(data, 0)
return convert_nfnt({}, **fontdata)
@loaders.register(name='lisa')
def load_lisa(instream):
"""Load a LISA font library."""
return _load_lisa(instream)
@loaders.register(
name='iigs',
patterns=('*.fon',),
)
def load_iigs(instream):
"""Load a IIgs font."""
return _load_iigs(instream)
@savers.register(linked=load_iigs)
def save_iigs(fonts, outstream, version:int=None):
"""
Write font to a IIgs font file.
version: IIgs font format version (0x101, 0x105). Default: 0x101 unless needed for bitmap size.
"""
if len(fonts) > 1:
logging.warning('IIgs font file can only store one font.')
font = fonts[0]
_save_iigs(outstream, font, version=version)
@savers.register(linked=load_nfnt)
def save_nfnt(
fonts, outstream,
create_width_table:bool=True,
create_height_table:bool=False,
):
"""
Write font to a bare FONT/NFNT resource.
create_width_table: include a fractional glyph-width table in the resource (default: True)
create_height_table: include an image-height table in the resource (default: False)
"""
if len(fonts) > 1:
logging.warning('NFNT resource can only store one font.')
font = fonts[0]
data, _, _ = create_nfnt(
font, endian='big', ndescent_is_high=True,
create_width_table=create_width_table,
create_height_table=create_height_table,
)
outstream.write(data)
|
0088a3a261696fe793472965c9d1b27c278d077f
|
fa959d7480c83556618b964204687922be2ec464
|
/scripts/profiling.py
|
3435fbde5c8af0df91b8244db1372ac4d1ed5dfd
|
[
"MIT"
] |
permissive
|
luinardi/hypermapper
|
b268f4a176cb9e62ed9d92c89019e14faa5aae12
|
3dfa8a7010792546e2d688b6113569b0cf36958e
|
refs/heads/master
| 2023-09-01T07:44:19.191875
| 2023-03-21T10:02:58
| 2023-03-21T10:02:58
| 229,329,674
| 139
| 24
|
MIT
| 2023-09-06T09:57:33
| 2019-12-20T20:10:54
|
C++
|
UTF-8
|
Python
| false
| false
| 3,236
|
py
|
profiling.py
|
import json
import sys
import os
import math
from collections import OrderedDict
import pandas as pd
import datetime
from utility_functions import *
from jsonschema import Draft4Validator, validators, exceptions
class Profiler:
def __init__(self, config):
"""
:param baseline_config_files: list of strings to json files containing the base configuration regarding the function we want to run
"""
self.optimization_iterations = config["optimization_iterations"]
self.profiling_file = config["profiling_file"]
if config["append_profiles"] and os.path.exists(self.profiling_file):
self.append_profiles = True
else:
self.append_profiles = False
self.name = config["application_name"]
self.average_results = {self.name: {}}
self.results = {}
self.start_time = None
def run(self):
self.start_time = datetime.datetime.now()
def stop(self):
runtime = (datetime.datetime.now() - self.start_time).total_seconds()
for key, value in self.results.items():
self.average_results[self.name][key] = sum(value) / float(runtime)
self.average_results[self.name]["Runtime per iteration (sec)"] = (
runtime / self.optimization_iterations
)
if self.append_profiles:
old_profile = pd.read_csv(self.profiling_file, index_col=0).to_dict()
if self.has_same_keys(old_profile, self.average_results):
for key, value in self.average_results.items():
identical_run_counter = 0
naming_key = key
while naming_key in old_profile.keys():
identical_run_counter += 1
naming_key = key + str(identical_run_counter)
old_profile[naming_key] = value
profile_df = pd.DataFrame(old_profile)
profile_df.to_csv(self.profiling_file)
else:
print(
"Could not extend current profile to old one due to header conflicts. Rename the output file or disable append_profiles"
)
print("The current profiling run will not have an output.")
return
else:
profile_df = pd.DataFrame(self.average_results)
profile_df.to_csv(self.profiling_file)
def add(self, message, time):
"""
adds a profiling checkpoint and the associated time, and stores it in self.results
:param message: the name of the checkpoint in the .csv output file, along the lines of "Acqusition function eval. time"
:param time: the time in seconds that the relevant secion has taken to complete
"""
if message not in self.results.keys():
self.results[message] = [time]
self.results[message].append(time)
# check that the old and new profile has the same measurement headers to enable putting them in the same file
def has_same_keys(self, old_profile, new_profile):
for result_old, result_new in zip(old_profile.values(), new_profile.values()):
return set(result_old) == set(result_new)
|
7d28c271ec979a5d7b1affeeb81cc0c03bc93ab3
|
dcfc88503e3a8df5d9083b512178d254727d1a31
|
/axelrod/strategies/verybad.py
|
747d15d618e6050ffaf056b21909d68887ac73f9
|
[
"MIT"
] |
permissive
|
Axelrod-Python/Axelrod
|
b8502822da103fbf1a56ffbc090453b95bf9f2d8
|
fa748627cd4f0333bb2dbfcb1454372a78a9098a
|
refs/heads/dev
| 2023-09-04T06:41:55.216809
| 2023-07-10T19:42:54
| 2023-07-14T02:37:16
| 30,959,449
| 673
| 289
|
NOASSERTION
| 2023-07-14T02:37:18
| 2015-02-18T09:37:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,387
|
py
|
verybad.py
|
from axelrod.action import Action
from axelrod.player import Player
C, D = Action.C, Action.D
class VeryBad(Player):
"""
It cooperates in the first three rounds, and uses probability
(it implements a memory, which stores the opponent’s moves) to decide for
cooperating or defecting.
Due to a lack of information as to what that probability refers to in this
context, probability(P(X)) refers to (Count(X)/Total_Moves) in this
implementation
P(C) = Cooperations / Total_Moves
P(D) = Defections / Total_Moves = 1 - P(C)
Names:
- VeryBad: [Andre2013]_
"""
name = "VeryBad"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
@staticmethod
def strategy(opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
total_moves = len(opponent.history)
if total_moves < 3:
return C
cooperations = opponent.cooperations
cooperation_probability = cooperations / total_moves
if cooperation_probability > 0.5:
return C
elif cooperation_probability < 0.5:
return D
else:
return opponent.history[-1]
|
d7f509318a12c3b51d3966adcc81faa8a3543b02
|
7c63d456e50fc3f43c6ec6e3c9e3683743d986c4
|
/effdet/config/__init__.py
|
0dd74ec181acca5c0aa089fc573e3c43690ad64b
|
[
"CC-BY-2.0",
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
rwightman/efficientdet-pytorch
|
df2e0fd48f1b6a5f7268059fd35132066c5d4fb9
|
d43c9e34cd62d22b4205831bb735f6dd83b8e881
|
refs/heads/master
| 2023-08-23T20:20:50.900670
| 2023-05-22T19:46:38
| 2023-05-22T19:46:38
| 250,391,956
| 1,590
| 344
|
Apache-2.0
| 2023-07-25T16:33:10
| 2020-03-26T23:09:20
|
Python
|
UTF-8
|
Python
| false
| false
| 247
|
py
|
__init__.py
|
from .config_utils import set_config_readonly, set_config_writeable
from .fpn_config import get_fpn_config
from .model_config import get_efficientdet_config, default_detection_model_configs
from .train_config import default_detection_train_config
|
56ad584abf91721f06b3784f254dc2b34a3bbbc4
|
55c5ebdfa89ba924fe81323de79002d597d5f4c1
|
/arraytool/src/arraytool/circular.py
|
b14cc56307e9d3bfaf57cbb0e6b60aaba616b36a
|
[
"BSD-3-Clause"
] |
permissive
|
zinka/arraytool
|
4ce31dc17a25e457974e7220e4024f47f167a051
|
9acc5a90a1771750e70590a3d508e2c24a55be87
|
refs/heads/master
| 2021-01-18T15:18:56.202570
| 2016-10-22T05:42:27
| 2016-10-22T05:42:27
| 871,685
| 116
| 30
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,455
|
py
|
circular.py
|
#! /usr/bin/env python
# Author: Srinivasa Rao Zinka (srinivas . zinka [at] gmail . com)
# Copyright (c) 2014 Srinivasa Rao Zinka
# License: New BSD License.
from __future__ import division
import numpy as np
from scipy import integrate, special
import matplotlib.pyplot as plt
import planar as pl
import Zolotarev as zl
from mayavi import mlab
import warnings
# adjusting "matplotlib" label fonts
from matplotlib import rc
rc('text', usetex=True)
def ip_format_c(N, radius, A="uniform", starts_at_zero=True, plot_type="2D",
color='b', linewidth=1, linestyle='-', alpha=1, show=True,
stem=False, stemline='g--', stemmarker='ro', fgcolor=(1, 1, 1),
bgcolor=(0.5, 0.5, 0.5), mayavi_app=False):
r"""
Function to generate the 'Arraytool' input format for circular ring arrays.
:param N: number of elements in the uniformly spaced circular ring array
:param radius: radius of the circular ring in wavelengths
:param A: a 'column matrix' specifying the excitation values of the
circular ring array; by default it will be uniform excitation
:param plot_type: can be '2D'/'3D' ... if False, nothing happens
:param stem: if True, the array excitation is plotted as 'stem plot'
:param mayavi_app: if True, the 3D plot will be opened in the MayaVi application
:param starts_at_zero: 'True' if array starts at beta=0
All other parameters are nothing but the 'Matplotlib/Mayavi' parameters.
These should be familiar to 'Matlab' or 'Matplotlib/Mayavi' users.
:rtype: array_ip, a Numpy array of size (Number of elements(A),4)
"""
# Creating Arraytool input form 'array_ip' for the circular ring array
if (A == "uniform"):
A = np.ones((N, 1))
if (starts_at_zero):
position_beta = (np.linspace(1, N, num=N) - 1) * (2 * np.pi / N)
else:
position_beta = (np.linspace(1, N, num=N) - 0.5) * (2 * np.pi / N)
position_beta = np.reshape(position_beta, (N, -1))
position_x = radius * np.cos(position_beta)
position_y = radius * np.sin(position_beta)
position_z = np.zeros_like(position_x)
array_ip = np.hstack((position_x, position_y, position_z, A))
# Plotting 2D/3D plots
if (plot_type):
# checking whether 'A' has any imaginary values
if((abs(A.imag) > 1e-10).sum()):
A_plt = abs(A) # if A.imag are significant, then '|A|' will be plotted
warnings.warn('Since, the given excitation "A" has significant imaginary parts, stem plot for abs(A) is plotted')
else:
A_plt = A.real # if A.imag are negligible, then 'A' will be plotted
warnings.warn('Since, the given excitation "A" has very small imaginary parts, stem plot for "A.real" is plotted')
if (plot_type == "2D"): # plot 2D plot in Matplotlib
plt.plot(position_beta, A_plt, color=color, linewidth=linewidth,
linestyle=linestyle, alpha=alpha)
if(stem): plt.stem(position_beta, A_plt, linefmt=stemline, markerfmt=stemmarker)
plt.axis('tight'); plt.grid(True)
plt.xlabel(r'$y$', fontsize=16); plt.ylabel(r'$\left|A_{n}\right|$', fontsize=16)
if(show): plt.title(r'$\mathrm{Array}\ \mathrm{Excitation}$', fontsize=18); plt.show()
else:
if (mayavi_app): # this option opens the 3D plot in MayaVi Application
mlab.options.backend = 'envisage'
mlab.figure(fgcolor=fgcolor, bgcolor=bgcolor)
s1 = mlab.quiver3d(position_x, position_y, position_z, position_z, position_z, A_plt) # stem3D representation
ranges1 = [position_x.min(), position_x.max(), position_y.min(), position_y.max(), A_plt.min(), A_plt.max()]
mlab.axes(xlabel="x", ylabel="y", zlabel="A", ranges=ranges1, nb_labels=3)
mlab.colorbar(orientation="vertical", nb_labels=5)
s1.scene.isometric_view()
if(show): mlab.show()
return array_ip
def FS(fun_str_re, fun_str_im='0', T0=2 * np.pi, m_start= -5, m_stop=5, err_lim=1e-8):
"""Function to generate a finite number of Fourier series coefficients of
a periodic function."""
N = m_stop - m_start + 1
FS = np.zeros((N, 1), dtype='complex')
m_index = range(m_start, m_stop + 1)
w0 = 2 * np.pi / T0
for m in m_index:
fun_re = lambda x: (eval(fun_str_re)) * np.cos(m * w0 * x) + (eval(fun_str_im)) * np.sin(m * w0 * x)
fun_img = lambda x:-(eval(fun_str_re)) * np.sin(m * w0 * x) + (eval(fun_str_im)) * np.cos(m * w0 * x)
FS_re = integrate.quad(fun_re, 0, 2 * np.pi)
FS_img = integrate.quad(fun_img, 0, 2 * np.pi)
if ((FS_re[1] + FS_img[1]) < err_lim):
FS[m - m_start] = (1 / T0) * (FS_re[0] + 1j * FS_img[0])
else:
print "Absolute error of the integration is not less than 1e-10 while calculating Fourier series"
print "error(FS_re): ", FS_re[1]
print "error(FS_img): ", FS_img[1]
m_index = np.array(m_index) * (2 * np.pi / T0)
m_index = np.reshape(m_index, (m_index.size, -1))
return m_index, FS
def IFS(FS, T0=2 * np.pi, m_start= -4, m_stop=4, x_min=0, x_max=2 * np.pi, x_num=10):
"""Function to reconstruct (or check) the periodic function from the
obtained Fourier coefficients"""
m = np.arange(m_start, m_stop + 1)
m = np.reshape(m, (-1, m.size))
M = np.tile(m, (x_num, 1))
x = np.linspace(x_min, x_max, num=x_num)
x = np.reshape(x, (x.size, -1))
X = np.tile(x, (1, m.size))
FS = np.reshape(FS, (FS.size,-1))
# Evaluating the inverse of the Fourier series
IFS = np.dot(np.exp(1j * M * (2 * np.pi / T0) * X), FS)
return x, IFS
def eval_Taylor(P, R, mbar, alpha_x, x):
"""My function description here."""
if(P%2==0):
T0 = 2*np.pi
else:
T0=1*np.pi
A = pl.dist(1, P + 1, R, dist_type_x='Taylor', mbar=mbar, alpha_x=alpha_x)
x, result = IFS(A, T0, m_start= -P / 2, m_stop=P / 2, x_min=x, x_max=x, x_num=1)
# if(P%2==0):
# A = pl.dist(1, P + 1, R, dist_type_x='Taylor', mbar=mbar, alpha_x=alpha_x)
# x, result = IFS(A, T0=2*np.pi, m_start= -P / 2, m_stop=P / 2, x_min=x, x_max=x, x_num=1)
# else:
# A = pl.dist(1, 2*P + 1, R, dist_type_x='Taylor', mbar=mbar, alpha_x=alpha_x)
# x, result = IFS(A, T0=1*np.pi, m_start= -P, m_stop=P, x_min=x, x_max=x, x_num=1)
result = result[0,0].real
return result
def eval_Bayliss(P, R, mbar, alpha_x, x):
"""My function description here."""
if(P%2==0):
print "Order needs to be an ODD number for null patterns"
else:
T0=1*np.pi
A = pl.dist(1, P + 1, R, dist_type_x='Bayliss', mbar=mbar, alpha_x=alpha_x)
x, result = IFS(A, T0, m_start= -P / 2, m_stop=P / 2, x_min=x, x_max=x, x_num=1)
result = result[0,0].imag
return result
def FS_Taylor(N, R, mbar, alpha_x, x_min, x_max, x_num, plot_far=False, dB_limit= -40):
"""Function to evaluate Fourier series coefficients of Chebyshev far-field
pattern"""
R = str(R)
mbar = str(mbar)
alpha_x = str(alpha_x)
if(N % 2 == 0):
m_start = int(-N / 2)
m_stop = int(N / 2)
N = str(N)
fun_str_re = 'eval_Taylor(' + N + ',' + R + ',' + mbar + ',' + alpha_x + ',' + 'x' +')'
print fun_str_re
m_index, zm = FS(fun_str_re, m_start=m_start, m_stop=m_stop, err_lim=1e-5)
else:
m_start = -N
m_stop = N
N = str(N)
fun_str_re = 'eval_Taylor(' + N + ',' + R + ',' + mbar + ',' + alpha_x + ',' + 'x' +')'
print fun_str_re
m_index, zm = FS(fun_str_re, m_start=m_start, m_stop=m_stop, err_lim=1e-5)
if(plot_far):
x, AF = IFS(zm, 2 * np.pi, m_start, m_stop, x_min, x_max, x_num)
AF = 20 * np.log10(abs(AF))
AF = pl.cutoff(AF, dB_limit)
plt.plot(x * (180 / np.pi), AF); plt.axis('tight'); plt.grid(True)
plt.title('Far-field Pattern')
plt.xlabel(r'$\phi$')
plt.ylabel('AF')
plt.show()
return m_index, zm
def FS_Bayliss(N, R, mbar, alpha_x, x_min, x_max, x_num, plot_far=False, dB_limit= -40):
"""Function to evaluate Fourier series coefficients of Chebyshev far-field
pattern"""
R = str(R)
mbar = str(mbar)
alpha_x = str(alpha_x)
if(N % 2 == 0):
print "Order needs to be an ODD number for null patterns"
else:
m_start = -N
m_stop = N
N = str(N)
fun_str_re = 'eval_Bayliss(' + N + ',' + R + ',' + mbar + ',' + alpha_x + ',' + 'x' +')'
print fun_str_re
m_index, zm = FS(fun_str_re, m_start=m_start, m_stop=m_stop, err_lim=1e-5)
if(plot_far):
x, AF = IFS(zm, 2 * np.pi, m_start, m_stop, x_min, x_max, x_num)
AF = 20 * np.log10(abs(AF))
AF = pl.cutoff(AF, dB_limit)
plt.plot(x * (180 / np.pi), AF); plt.axis('tight'); plt.grid(True)
plt.title('Far-field Pattern')
plt.xlabel(r'$\phi$')
plt.ylabel('AF')
plt.show()
return m_index, zm
def FS_Chebyshev(N, R, x_min, x_max, x_num, plot_far=False, dB_limit= -40):
"""Function to evaluate Fourier series coefficients of Chebyshev far-field
pattern"""
c = np.cosh(np.arccosh(R) / (N))
c = str(c)
if(N % 2 == 0):
m_start = int(-N / 2)
m_stop = int(N / 2)
N = str(N)
fun_str_re = 'special.eval_chebyt(' + N + ',' + c + '*np.cos(x/2))'
m_index, zm = FS(fun_str_re, m_start=m_start, m_stop=m_stop, err_lim=1e-5)
else:
m_start = -N # make this (2*P+1) ... and take fourier for only half period
m_stop = N
N = str(N)
fun_str_re = 'special.eval_chebyt(' + N + ',' + c + '*np.cos(x))'
m_index, zm = FS(fun_str_re, m_start=m_start, m_stop=m_stop, err_lim=1e-5)
if(plot_far):
x, AF = IFS(zm, 2 * np.pi, m_start, m_stop, x_min, x_max, x_num)
AF = 20 * np.log10(abs(AF))
AF = pl.cutoff(AF, dB_limit)
plt.plot(x * (180 / np.pi), AF); plt.axis('tight'); plt.grid(True)
plt.title('Far-field Pattern')
plt.xlabel(r'$\phi$')
plt.ylabel('AF')
plt.show()
return m_index, zm
def FS_Zolotarev(N, R, x_min, x_max, x_num, plot_far=False, dB_limit= -40):
"""Function to evaluate Fourier series coefficients of Chebyshev far-field
pattern"""
if(N % 2 == 0):
print "Order needs to be an ODD number for null patterns"
else:
m_start = -N # make this (2*P+1) ... and take fourier for only half period
m_stop = N
m = zl.z_m_frm_R(N, R, a=0.1, b=0.9999999999999)
m = str(m)
N = str(N)
fun_str_re = 'zl.z_Zolotarev(' + N + ',' + 'np.sin(x)' + ',' + m +')'
m_index, zm = FS(fun_str_re, m_start=m_start, m_stop=m_stop, err_lim=1e-5)
if(plot_far):
x, AF = IFS(zm, 2 * np.pi, m_start, m_stop, x_min, x_max, x_num)
AF = 20 * np.log10(abs(AF))
AF = pl.cutoff(AF, dB_limit)
plt.plot(x * (180 / np.pi), AF); plt.axis('tight'); plt.grid(True)
plt.title('Far-field Pattern')
plt.xlabel(r'$\phi$')
plt.ylabel('AF')
plt.show()
return m_index, zm
def dist_c_az(P, N, radius, R, mbar=5, alpha_x=0, starts_at_zero=True,
dist_type=None,plot_far=False, plot_modes=False, scan=False):
r"""
This function gives array excitation coefficients corresponding to various
'circular' array distribution types (for 'azhimutal' patterns) such as
Chebyshev, Zolotarev, Taylor, Bayliss, etc.
:param P: order of the 'continuous' distribution (be careful! it
has nothing to do with the number of elements 'N')
:param N: number of elements in the uniformly spaced circular ring
array
:param radius: radius of the circular ring in wavelengths
:param R: side-lobe ratio in linear scale
:param dist_type: type of the distribution, e.g., 'Chebyshev', 'Zolotarev', etc
:param bar: transition index for dilation
:param alpha: Taylor's asymptotic tapering parameter
:rtype: A, a Numpy array of size (N, 1)
"""
if(dist_type == 'Chebyshev'):
m, zm = FS_Chebyshev(P, R, x_min=0, x_max=2 * np.pi, x_num=500,
plot_far=plot_far, dB_limit= -40)
elif(dist_type == 'Taylor'):
m, zm = FS_Taylor(P, R, mbar, alpha_x, x_min=0, x_max=2*np.pi,
x_num=500, plot_far=plot_far, dB_limit= -40)
elif(dist_type == 'Zolotarev'):
m, zm = FS_Zolotarev(P, R, x_min=0, x_max=2 * np.pi, x_num=500,
plot_far=plot_far, dB_limit= -40)
elif(dist_type == 'Bayliss'):
m, zm = FS_Bayliss(P, R, mbar, alpha_x, x_min=0, x_max=2*np.pi,
x_num=500, plot_far=plot_far, dB_limit= -40)
cm = zm / ((1j ** m) * special.jn(m, 2 * np.pi * radius))
# Plotting the normalized absolute values of zm and cm
if(plot_modes):
plt.plot(m, abs(cm) / abs(cm).max(), 'r--', label=r"$\mathrm{a_m}$")
plt.plot(m, abs(zm) / abs(zm).max(), label=r"$\mathrm{f_m}$")
plt.axis('tight'); plt.grid(True)
plt.title(r'$\mathrm{Far\ \&\ Near-field\ \ Modes \ (f_m\ \& \ a_m)}$')
plt.xlabel(r'$\mathrm{m\ (Mode\ Number)}$')
plt.ylabel(r'$\mathrm{Mode\ Amplitude\ (abs)}$')
plt.legend(bbox_to_anchor=(1, 1), loc=1, borderaxespad=1)
plt.show()
# Finally, discretizing the continuous distribution
if (starts_at_zero):
beta = (np.linspace(1, N, num=N) - 1) * (2 * np.pi / N)
else:
beta = (np.linspace(1, N, num=N) - 0.5) * (2 * np.pi / N)
beta = np.reshape(beta, (N, -1))
beta_tile = np.tile(beta, cm.size)
m_tile = np.tile(m.T, (N, 1))
A = np.dot(np.exp(1j * m_tile * (beta_tile - scan)), cm)
return A, cm
if __name__ == '__main__':
#==============================================================================
# Circular Chebyshev related script
#==============================================================================
SLR = 25 # side-lobe ratio in dB
R = 10 ** (SLR / 20) # converting SLR from dB scale to linear scale
P = 21
N = 150
radius = 2 / np.pi
A, cm = dist_c_az(P, N, radius, R, mbar=5, alpha_x=0, starts_at_zero=True,
dist_type='Chebyshev',plot_far=False, plot_modes=False, scan=False)
array_ip = ip_format_c(N, radius, A, starts_at_zero=True, plot_type="3d", stem=False)
phi, F = pl.pattern_t(array_ip, tht_scan=(0) * np.pi, phi_scan=(0) * np.pi, tht=0.5 * np.pi,
phi_min=0, phi_max=2 * np.pi, phi_num=500, scale="dB",
dB_limit= -200, factor="NF", plot_type='rect')
#==============================================================================
# Circular Taylor related script
#==============================================================================
# SLR = 25 # side-lobe ratio in dB
# R = 10 ** (SLR / 20) # converting SLR from dB scale to linear scale
# P = 9
# N = 150
# radius = 2 / np.pi
# mbar = 3
# alpha_x =0
#
## A = pl.dist(1, P + 1, R, dist_type_x='Taylor', mbar=mbar, alpha_x=alpha_x)
## array_ip = pl.ip_format(1, 0, A, plot=True, stem=True, mayavi_app=False)
## pl.pattern_u(array_ip, u_scan=0, u_min= 0, u_max=1, u_num=700, scale="dB",
## dB_limit= -40, factor="AF", plot_type="rect", lattice=True)
#
## print eval_Bayliss(P, R, mbar, alpha_x, x=0)
## m_index, zm = FS_Bayliss(P, R, mbar, alpha_x, x_min=0, x_max=2*np.pi, x_num=500, plot_far=True, dB_limit= -40)
#
# A, cm = dist_c_az(P, N, radius, R, mbar, alpha_x, starts_at_zero=True,
# dist_type='Zolotarev',plot_far=True, plot_modes=False, scan=False)
# array_ip = ip_format_c(N, radius, A, starts_at_zero=True, plot_type="3d", stem=False)
#
# phi, F = pl.pattern_t(array_ip, tht_scan=(0) * np.pi, phi_scan=(0) * np.pi, tht=0.5 * np.pi,
# phi_min=0, phi_max=2 * np.pi, phi_num=500, scale="dB",
# dB_limit= -120, factor="NF", plot_type='polar')
#==============================================================================
# Notes to myself
#==============================================================================
# streamline Fourier coefficient evaluation ... i.e., FS_Chebyshev, etc
|
3bd5108495275179e574372c3318a399a4d01a6b
|
1162ecf9985dd4fb03aeea784e7fdcf72eec55b4
|
/mouse_teleop/mouse_teleop/mouse_teleop.py
|
0fdd88e7c74fc2717b5929c42837074b8f7090b7
|
[
"BSD-2-Clause"
] |
permissive
|
ros-teleop/teleop_tools
|
19c7ca7eb19ea7badc0ceb33fbd06bfd5f08cab4
|
df4d55f0b911371d18652b9808f7981dd2a237a7
|
refs/heads/master
| 2023-07-06T04:37:04.655312
| 2023-05-18T16:09:54
| 2023-05-18T16:09:54
| 13,269,341
| 143
| 121
| null | 2023-06-13T08:19:29
| 2013-10-02T11:27:33
|
Python
|
UTF-8
|
Python
| false
| false
| 8,965
|
py
|
mouse_teleop.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Enrique Fernandez
# All rights reserved.
#
# Software License Agreement (BSD License 2.0)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Enrique Fernandez nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
# Authors:
# * Enrique Fernandez
# * Jeremie Deray (artivis)
import signal
import tkinter
from geometry_msgs.msg import Twist, Vector3
import numpy
import rclpy
from rclpy.node import Node
class MouseTeleop(Node):
def __init__(self):
super().__init__('mouse_teleop')
# Retrieve params:
self._frequency = self.declare_parameter('frequency', 0.0).value
self._scale = self.declare_parameter('scale', 1.0).value
self._holonomic = self.declare_parameter('holonomic', False).value
# Create twist publisher:
self._pub_cmd = self.create_publisher(Twist, 'mouse_vel', 10)
# Initialize twist components to zero:
self._v_x = 0.0
self._v_y = 0.0
self._w = 0.0
# Initialize mouse position (x, y) to None (unknown); it's initialized
# when the mouse button is pressed on the _start callback that handles
# that event:
self._x = None
self._y = None
# Create window:
self._root = tkinter.Tk()
self._root.title('Mouse Teleop')
# Make window non-resizable:
self._root.resizable(0, 0)
# Create canvas:
self._canvas = tkinter.Canvas(self._root, bg='white')
# Create canvas objects:
self._canvas.create_arc(0, 0, 0, 0, fill='red', outline='red',
width=1, style=tkinter.PIESLICE, start=90.0, tag='w')
self._canvas.create_line(0, 0, 0, 0, fill='blue', width=4, tag='v_x')
if self._holonomic:
self._canvas.create_line(0, 0, 0, 0, fill='blue', width=4, tag='v_y')
# Create canvas text objects:
self._text_v_x = tkinter.StringVar()
if self._holonomic:
self._text_v_y = tkinter.StringVar()
self._text_w = tkinter.StringVar()
self._label_v_x = tkinter.Label(self._root, anchor=tkinter.W, textvariable=self._text_v_x)
if self._holonomic:
self._label_v_y = tkinter.Label(
self._root, anchor=tkinter.W, textvariable=self._text_v_y)
self._label_w = tkinter.Label(self._root, anchor=tkinter.W, textvariable=self._text_w)
if self._holonomic:
self._text_v_x.set('v_x = %0.2f m/s' % self._v_x)
self._text_v_y.set('v_y = %0.2f m/s' % self._v_y)
self._text_w.set('w = %0.2f deg/s' % self._w)
else:
self._text_v_x.set('v = %0.2f m/s' % self._v_x)
self._text_w.set('w = %0.2f deg/s' % self._w)
self._label_v_x.pack()
if self._holonomic:
self._label_v_y.pack()
self._label_w.pack()
# Bind event handlers:
self._canvas.bind('<Button-1>', self._start)
self._canvas.bind('<ButtonRelease-1>', self._release)
self._canvas.bind('<Configure>', self._configure)
if self._holonomic:
self._canvas.bind('<B1-Motion>', self._mouse_motion_linear)
self._canvas.bind('<Shift-B1-Motion>', self._mouse_motion_angular)
self._root.bind('<Shift_L>', self._change_to_motion_angular)
self._root.bind('<KeyRelease-Shift_L>', self._change_to_motion_linear)
else:
self._canvas.bind('<B1-Motion>', self._mouse_motion_angular)
self._canvas.pack()
# If frequency is positive, use synchronous publishing mode:
if self._frequency > 0.0:
# Create timer for the given frequency to publish the twist:
period = 1.0 / self._frequency
self._timer = self.create_timer(period, self._publish_twist)
# Handle ctrl+c on the window
self._root.bind('<Control-c>', self._quit)
# Nasty polling-trick to handle ctrl+c in terminal
self._root.after(50, self._check)
signal.signal(2, self._handle_signal)
# Start window event manager main loop:
self._root.mainloop()
def _quit(self, ev):
self._root.quit()
def __del__(self):
self._root.quit()
def _check(self):
self._root.after(50, self._check)
def _handle_signal(self, signum, frame):
self._quit(None)
def _start(self, event):
self._x, self._y = event.y, event.x
self._y_linear = self._y_angular = 0
self._v_x = self._v_y = self._w = 0.0
def _release(self, event):
self._v_x = self._v_y = self._w = 0.0
self._send_motion()
def _configure(self, event):
self._width, self._height = event.height, event.width
self._c_x = self._height / 2.0
self._c_y = self._width / 2.0
self._r = min(self._height, self._width) * 0.25
def _mouse_motion_linear(self, event):
self._v_x, self._v_y = self._relative_motion(event.y, event.x)
self._send_motion()
def _mouse_motion_angular(self, event):
self._v_x, self._w = self._relative_motion(event.y, event.x)
self._send_motion()
def _update_coords(self, tag, x0, y0, x1, y1):
x0 += self._c_x
y0 += self._c_y
x1 += self._c_x
y1 += self._c_y
self._canvas.coords(tag, (x0, y0, x1, y1))
def _draw_v_x(self, v):
x = -v * float(self._width)
self._update_coords('v_x', 0, 0, 0, x)
def _draw_v_y(self, v):
y = -v * float(self._height)
self._update_coords('v_y', 0, 0, y, 0)
def _draw_w(self, w):
x0 = y0 = -self._r
x1 = y1 = self._r
self._update_coords('w', x0, y0, x1, y1)
yaw = w * numpy.rad2deg(self._scale)
self._canvas.itemconfig('w', extent=yaw)
def _send_motion(self):
self._draw_v_x(self._v_x)
if self._holonomic:
self._draw_v_y(self._v_y)
self._draw_w(self._w)
if self._holonomic:
self._text_v_x.set('v_x = %0.2f m/s' % self._v_x)
self._text_v_y.set('v_y = %0.2f m/s' % self._v_y)
self._text_w.set('w = %0.2f deg/s' % numpy.rad2deg(self._w))
else:
self._text_v_x.set('v = %0.2f m/s' % self._v_x)
self._text_w.set('w = %0.2f deg/s' % numpy.rad2deg(self._w))
v_x = self._v_x * self._scale
v_y = self._v_y * self._scale
w = self._w * self._scale
lin = Vector3(x=v_x, y=v_y, z=0.0)
ang = Vector3(x=0.0, y=0.0, z=w)
twist = Twist(linear=lin, angular=ang)
self._pub_cmd.publish(twist)
def _publish_twist(self):
self._send_motion()
def _relative_motion(self, x, y):
dx = self._x - x
dy = self._y - y
dx /= float(self._width)
dy /= float(self._height)
dx = max(-1.0, min(dx, 1.0))
dy = max(-1.0, min(dy, 1.0))
return dx, dy
def _change_to_motion_linear(self, event):
if self._y is not None:
y = event.x
self._y_angular = self._y - y
self._y = self._y_linear + y
def _change_to_motion_angular(self, event):
if self._y is not None:
y = event.x
self._y_linear = self._y - y
self._y = self._y_angular + y
def main():
try:
rclpy.init()
node = MouseTeleop()
node.destroy_node()
rclpy.shutdown()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
8fc559e75d00d4ef1c65cddebec7f395f25a54cc
|
6c37d1d2437a08e43b13d621d4a8da4da7135b3a
|
/yt_dlp/extractor/la7.py
|
a3cd12b00347cd33e4f0452487fd44d9991b2d2d
|
[
"Unlicense",
"GPL-2.0-or-later",
"MPL-2.0",
"BSD-3-Clause",
"GPL-3.0-or-later",
"LGPL-2.1-only",
"BSD-2-Clause",
"MIT"
] |
permissive
|
yt-dlp/yt-dlp
|
be040bde10cc40258c879c75ab30215686352824
|
d3d81cc98f554d0adb87d24bfd6fabaaa803944d
|
refs/heads/master
| 2023-09-05T21:15:21.050538
| 2023-09-05T20:35:23
| 2023-09-05T20:35:23
| 307,260,205
| 52,742
| 5,376
|
Unlicense
| 2023-09-14T05:22:08
| 2020-10-26T04:22:55
|
Python
|
UTF-8
|
Python
| false
| false
| 9,435
|
py
|
la7.py
|
import re
from .common import InfoExtractor
from ..networking import HEADRequest
from ..utils import float_or_none, int_or_none, parse_duration, unified_strdate
class LA7IE(InfoExtractor):
IE_NAME = 'la7.it'
_VALID_URL = r'''(?x)https?://(?:
(?:www\.)?la7\.it/([^/]+)/(?:rivedila7|video|news)/|
tg\.la7\.it/repliche-tgla7\?id=
)(?P<id>.+)'''
_TESTS = [{
# single quality video
'url': 'http://www.la7.it/crozza/video/inccool8-02-10-2015-163722',
'md5': '8b613ffc0c4bf9b9e377169fc19c214c',
'info_dict': {
'id': 'inccool8-02-10-2015-163722',
'ext': 'mp4',
'title': 'Inc.Cool8',
'description': 'Benvenuti nell\'incredibile mondo della INC. COOL. 8. dove “INC.” sta per “Incorporated” “COOL” sta per “fashion” ed Eight sta per il gesto atletico',
'thumbnail': 're:^https?://.*',
'upload_date': '20151002',
'formats': 'count:4',
},
}, {
# multiple quality video
'url': 'https://www.la7.it/calcio-femminile/news/il-gol-di-lindsey-thomas-fiorentina-vs-milan-serie-a-calcio-femminile-26-11-2022-461736',
'md5': 'd2370e78f75e8d1238cb3a0db9a2eda3',
'info_dict': {
'id': 'il-gol-di-lindsey-thomas-fiorentina-vs-milan-serie-a-calcio-femminile-26-11-2022-461736',
'ext': 'mp4',
'title': 'Il gol di Lindsey Thomas | Fiorentina vs Milan | Serie A Calcio Femminile',
'description': 'Il gol di Lindsey Thomas | Fiorentina vs Milan | Serie A Calcio Femminile',
'thumbnail': 're:^https?://.*',
'upload_date': '20221126',
'formats': 'count:8',
},
}, {
'url': 'http://www.la7.it/omnibus/rivedila7/omnibus-news-02-07-2016-189077',
'only_matching': True,
}]
_HOST = 'https://awsvodpkg.iltrovatore.it'
def _generate_mp4_url(self, quality, m3u8_formats):
for f in m3u8_formats:
if f['vcodec'] != 'none' and quality in f['url']:
http_url = f'{self._HOST}{quality}.mp4'
urlh = self._request_webpage(
HEADRequest(http_url), quality,
note='Check filesize', fatal=False)
if urlh:
http_f = f.copy()
del http_f['manifest_url']
http_f.update({
'format_id': http_f['format_id'].replace('hls-', 'https-'),
'url': http_url,
'protocol': 'https',
'filesize_approx': int_or_none(urlh.headers.get('Content-Length', None)),
})
return http_f
return None
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if re.search(r'(?i)(drmsupport\s*:\s*true)\s*', webpage):
self.report_drm(video_id)
video_path = self._search_regex(
r'(/content/[\w/,]+?)\.mp4(?:\.csmil)?/master\.m3u8', webpage, 'video_path')
formats = self._extract_mpd_formats(
f'{self._HOST}/local/dash/,{video_path}.mp4.urlset/manifest.mpd',
video_id, mpd_id='dash', fatal=False)
m3u8_formats = self._extract_m3u8_formats(
f'{self._HOST}/local/hls/,{video_path}.mp4.urlset/master.m3u8',
video_id, 'mp4', m3u8_id='hls', fatal=False)
formats.extend(m3u8_formats)
for q in filter(None, video_path.split(',')):
http_f = self._generate_mp4_url(q, m3u8_formats)
if http_f:
formats.append(http_f)
return {
'id': video_id,
'title': self._og_search_title(webpage, default=None),
'description': self._og_search_description(webpage, default=None),
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'formats': formats,
'upload_date': unified_strdate(self._search_regex(r'datetime="(.+?)"', webpage, 'upload_date', fatal=False))
}
class LA7PodcastEpisodeIE(InfoExtractor):
IE_NAME = 'la7.it:pod:episode'
_VALID_URL = r'https?://(?:www\.)?la7\.it/[^/]+/podcast/([^/]+-)?(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.la7.it/voicetown/podcast/la-carezza-delle-memoria-di-carlo-verdone-23-03-2021-371497',
'md5': '7737d4d79b3c1a34b3de3e16297119ed',
'info_dict': {
'id': '371497',
'ext': 'mp3',
'title': '"La carezza delle memoria" di Carlo Verdone',
'description': 'md5:5abf07c3c551a687db80af3f9ceb7d52',
'thumbnail': 'https://www.la7.it/sites/default/files/podcast/371497.jpg',
'upload_date': '20210323',
},
}, {
# embed url
'url': 'https://www.la7.it/embed/podcast/371497',
'only_matching': True,
}, {
# date already in the title
'url': 'https://www.la7.it/propagandalive/podcast/lintervista-di-diego-bianchi-ad-annalisa-cuzzocrea-puntata-del-1932021-20-03-2021-371130',
'only_matching': True,
}, {
# title same as show_title
'url': 'https://www.la7.it/otto-e-mezzo/podcast/otto-e-mezzo-26-03-2021-372340',
'only_matching': True,
}]
def _extract_info(self, webpage, video_id=None, ppn=None):
if not video_id:
video_id = self._search_regex(
r'data-nid=([\'"])(?P<vid>\d+)\1',
webpage, 'video_id', group='vid')
media_url = self._search_regex(
(r'src\s*:\s*([\'"])(?P<url>\S+?mp3.+?)\1',
r'data-podcast\s*=\s*([\'"])(?P<url>\S+?mp3.+?)\1'),
webpage, 'media_url', group='url')
formats = [{
'url': media_url,
'format_id': 'http-mp3',
'ext': 'mp3',
'acodec': 'mp3',
'vcodec': 'none',
}]
title = self._html_search_regex(
(r'<div class="title">(?P<title>.+?)</',
r'<title>(?P<title>[^<]+)</title>',
r'title:\s*([\'"])(?P<title>.+?)\1'),
webpage, 'title', group='title')
description = (
self._html_search_regex(
(r'<div class="description">(.+?)</div>',
r'<div class="description-mobile">(.+?)</div>',
r'<div class="box-txt">([^<]+?)</div>',
r'<div class="field-content"><p>(.+?)</p></div>'),
webpage, 'description', default=None)
or self._html_search_meta('description', webpage))
thumb = self._html_search_regex(
(r'<div class="podcast-image"><img src="(.+?)"></div>',
r'<div class="container-embed"[^<]+url\((.+?)\);">',
r'<div class="field-content"><img src="(.+?)"'),
webpage, 'thumbnail', fatal=False, default=None)
duration = parse_duration(self._html_search_regex(
r'<span class="(?:durata|duration)">([\d:]+)</span>',
webpage, 'duration', fatal=False, default=None))
date = self._html_search_regex(
r'class="data">\s*(?:<span>)?([\d\.]+)\s*</',
webpage, 'date', default=None)
date_alt = self._search_regex(
r'(\d+[\./]\d+[\./]\d+)', title, 'date_alt', default=None)
ppn = ppn or self._search_regex(
r'ppN:\s*([\'"])(?P<ppn>.+?)\1',
webpage, 'ppn', group='ppn', default=None)
# if the date is not in the title
# and title is the same as the show_title
# add the date to the title
if date and not date_alt and ppn and ppn.lower() == title.lower():
title = f'{title} del {date}'
return {
'id': video_id,
'title': title,
'description': description,
'duration': float_or_none(duration),
'formats': formats,
'thumbnail': thumb,
'upload_date': unified_strdate(date),
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
return self._extract_info(webpage, video_id)
class LA7PodcastIE(LA7PodcastEpisodeIE): # XXX: Do not subclass from concrete IE
IE_NAME = 'la7.it:podcast'
_VALID_URL = r'https?://(?:www\.)?la7\.it/(?P<id>[^/]+)/podcast/?(?:$|[#?])'
_TESTS = [{
'url': 'https://www.la7.it/propagandalive/podcast',
'info_dict': {
'id': 'propagandalive',
'title': "Propaganda Live",
},
'playlist_count_min': 10,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = (
self._html_search_regex(
r'<h1.*?>(.+?)</h1>', webpage, 'title', fatal=False, default=None)
or self._og_search_title(webpage))
ppn = self._search_regex(
r'window\.ppN\s*=\s*([\'"])(?P<ppn>.+?)\1',
webpage, 'ppn', group='ppn', default=None)
entries = []
for episode in re.finditer(
r'<div class="container-podcast-property">([\s\S]+?)(?:</div>\s*){3}',
webpage):
entries.append(self._extract_info(episode.group(1), ppn=ppn))
return self.playlist_result(entries, playlist_id, title)
|
b50676342849a64d2559f8050ce684d7a17ba245
|
84bdf2b9e7a684b3322f08fe995fec0fb6ef0b43
|
/pycaret/internal/pycaret_experiment/non_ts_supervised_experiment.py
|
88d2dce157de5f7e7fe22b969e96da8a87d79fa9
|
[
"MIT"
] |
permissive
|
pycaret/pycaret
|
ea613de9b8da2e38b1988ff7a2952abe59a3b93c
|
ea88b2d3ee22c080ca5059f2f6f1547952b9451c
|
refs/heads/master
| 2023-08-31T23:33:50.953586
| 2023-08-30T09:31:32
| 2023-08-30T09:31:32
| 223,636,350
| 7,915
| 1,994
|
MIT
| 2023-09-11T03:58:59
| 2019-11-23T18:40:48
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,220
|
py
|
non_ts_supervised_experiment.py
|
import pandas as pd
from pycaret.internal.pycaret_experiment.supervised_experiment import (
_SupervisedExperiment,
)
class _NonTSSupervisedExperiment(_SupervisedExperiment):
def __init__(self) -> None:
super().__init__()
@property
def test(self):
"""Test set."""
return self.dataset.loc[self.idx[1], :]
@property
def X(self):
"""Feature set."""
return self.dataset.drop(self.target_param, axis=1)
@property
def X_train(self):
"""Feature set of the training set."""
return self.train.drop(self.target_param, axis=1)
@property
def X_test(self):
"""Feature set of the test set."""
return self.test.drop(self.target_param, axis=1)
@property
def dataset_transformed(self):
"""Transformed dataset."""
return pd.concat([self.train_transformed, self.test_transformed])
@property
def train_transformed(self):
"""Transformed training set."""
return pd.concat(
[
*self.pipeline.transform(
X=self.X_train,
y=self.y_train,
filter_train_only=False,
)
],
axis=1,
)
@property
def test_transformed(self):
"""Transformed test set."""
return pd.concat(
[
*self.pipeline.transform(
X=self.X_test,
y=self.y_test,
)
],
axis=1,
)
@property
def X_transformed(self):
"""Transformed feature set."""
return pd.concat([self.X_train_transformed, self.X_test_transformed])
@property
def y_transformed(self):
"""Transformed target column."""
return pd.concat([self.y_train_transformed, self.y_test_transformed])
@property
def X_train_transformed(self):
"""Transformed feature set of the training set."""
return self.train_transformed.drop(self.target_param, axis=1)
@property
def y_train_transformed(self):
"""Transformed target column of the training set."""
return self.train_transformed[self.target_param]
@property
def X_test_transformed(self):
"""Transformed feature set of the test set."""
return self.test_transformed.drop(self.target_param, axis=1)
@property
def y_test_transformed(self):
"""Transformed target column of the test set."""
return self.test_transformed[self.target_param]
def _create_model_get_train_X_y(self, X_train, y_train):
"""Return appropriate training X and y values depending on whether
X_train and y_train are passed or not. If X_train and y_train are not
passes, internal self.X_train and self.y_train are returned. If they are
passed, then a copy of them is returned.
"""
if X_train is not None:
data_X = X_train.copy()
else:
if self.X_train is None:
data_X = None
else:
data_X = self.X_train
data_y = self.y_train if y_train is None else y_train.copy()
return data_X, data_y
|
afd7aec8e3d2117e5a5a9e2ed2094463b1737ca0
|
a5a7c59b04a1a64fe34653c7970c3cf173f9c1df
|
/io/tools/build_from_doxygen.py
|
83a55039077dca513ea53b58eb4769fa3b0970b3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
siconos/siconos
|
a7afdba41a2bc1192ad8dcd93ac7266fa281f4cf
|
82a8d1338bfc1be0d36b5e8a9f40c1ad5384a641
|
refs/heads/master
| 2023-08-21T22:22:55.625941
| 2023-07-17T13:07:32
| 2023-07-17T13:07:32
| 37,709,357
| 166
| 33
|
Apache-2.0
| 2023-07-17T12:31:16
| 2015-06-19T07:55:53
|
C
|
UTF-8
|
Python
| false
| false
| 7,858
|
py
|
build_from_doxygen.py
|
#!/usr/bin/env python3
import os, os.path, sys
from glob import glob
import lxml.etree
import re
from builder_common import *
def get_classes_conditional(doxy_xml_files, cond):
"""Get classes and members from a list of Doxygen XML files that
meet the given condition."""
found = {}
for xmlfile in doxy_xml_files:
xml = lxml.etree.parse(xmlfile)
classes = xml.xpath('.//compounddef[@kind="class" or @kind="struct"]')
for cl in classes:
if cond(cl):
classname = cl.find('./compoundname')
baseclasses = cl.xpath('./basecompoundref')
membervars = cl.xpath('.//memberdef[@kind="variable"]/name')
# An exception: Members get attached to Graph classes
# through this macro, and is not understood by
# Doxygen, so we have to parse it outselves.
graphvars = cl.xpath('.//memberdef[@kind="function"]/name'
+'[text()="INSTALL_GRAPH_PROPERTIES"]')
graphmems = []
if len(graphvars)>0:
r = re.compile('\(\(\w+,\s*[\w: ]+,\s*(\w+)\)\)')
for g in graphvars:
for a in g.xpath('../argsstring'):
graphmems += r.findall(a.text)
# The INSTALL_GRAPH_PROPERTIES macro also adds a
# bool called "dummy"
graphmems.append('dummy')
location = cl.find('./location')
found[classname.text] = (
{'name': classname.text,
'bases': [base.text for base in baseclasses],
'members': [mem.text for mem in membervars] + graphmems,
'filepath': location.attrib['file'],
'line': int(location.attrib['line']),
'abstract': cl.xpath('@abstract="yes"'),
})
return found
def classes_from_build_path(build_path, targets):
"""Get classes and members from all Doxygen XML files found on the
provided build path."""
doxy_xml_path = os.path.join(build_path,'docs/build/doxygen/xml4rst')
if not os.path.exists(doxy_xml_path):
print('%s: Error, path "%s" does not exist.'%(sys.argv[0], doxy_xml_path))
sys.exit(1)
doxy_xml_files = []
for component in targets:
doxypath = os.path.join(doxy_xml_path, component)
doxy_xml_files += glob(os.path.join(doxypath, 'class*.xml'))
doxy_xml_files += glob(os.path.join(doxypath, 'struct*.xml'))
# We want only classes that contain calls to the
# ACCEPT_SERIALIZATION macro.
serializable = './/memberdef/name[text()="ACCEPT_SERIALIZATION"]'
def pred(x):
return len(x.xpath(serializable)) > 0
return get_classes_conditional(doxy_xml_files, pred)
def assign_targets(classes, source_dir):
"""For each class, figure out which target it is associated with based
on the file path."""
for cl in classes.values():
cl['target'] = get_target(source_dir, cl['filepath'])
def assign_priorities(classes, source_dir):
"""For each class, get its priority to help in ordering the
declarations in the generated file."""
for cl in classes.values():
cl['priority'] = get_priority(cl['name'], source_dir,
cl['filepath'], cl['line'])
def resolve_base_classes(classes):
"""For each class, find those base classes which are also in the list.
This is to exclude STL and BOOST base classes,
e.g. enable_shared_from_this<>. In practice we only want to
list base classes which are serializable."""
for cl in classes.values():
resolved = []
for base in cl['bases']:
if base in classes:
resolved.append(base)
cl['resolved_bases'] = resolved
def remove_unwanted_resolved(classes):
"""This is a bit of an ugly hack: For some classes, they are not in
"unwanted" because we want them to resolve as base classes, but
they are covered in SiconosFull.hpp, so we don't want them to
appear in generated headers."""
unwanted_resolved = ['_DynamicalSystemsGraph', '_InteractionsGraph']
for u in unwanted_resolved:
if u in classes:
del classes[u]
def classes_from_headers(all_headers, include_paths):
"""Use compiler preprocessor to find an approximate list of classes
referenced by a set of headers. May return some words which
are not classes."""
import os, os.path, tempfile, shutil
classes = []
try:
d = tempfile.mkdtemp()
hpp = os.path.join(d, 'headers.hpp')
compiled = os.path.join(d, 'out.cpp')
with open(hpp,'w') as h:
[print('#include "%s"'%i, file=h) for i in all_headers]
cxx = 'g++'
if 'CXX' in os.environ:
cxx = os.environ['CXX']
cmd = [cxx, '-o', compiled, '-E']
for i in include_paths:
cmd += ['-I', i]
cmd.append(hpp)
# print(' '.join(cmd))
os.system(' '.join(cmd))
with open(compiled, 'r') as out:
for line in out:
words = line.split()
if len(words)>=2 and (words[0]=='class' or words[0]=='struct'):
classes.append(words[1].strip(':'))
return classes
finally:
shutil.rmtree(d)
if __name__=='__main__':
(include_paths,
siconos_namespace,
targets,
generated_file,
source_dir,
generated_header,
build_path) = parse_args(need_build_path=True)
all_headers = get_headers(targets)
doxygen_classes = classes_from_build_path(build_path, targets)
header_classes = classes_from_headers(all_headers, include_paths)
# Allow for inner classes
def in_maybe_inner(k,h):
if '::' in k:
# (a bit loose but we can't forward-declare them so match
# individual elements instead)
return all([c in h for c in k.split('::')])
return k in h
# Find the join of the two lists
classes = {k: v for k,v in doxygen_classes.items()
if in_maybe_inner(k, header_classes) and not unwanted(k)}
print('{:} classes found.'.format(len(classes)))
if len(classes) < 10:
print('%s: Error, not enough classes found.'%sys.argv[0])
sys.exit(1)
assign_targets(classes, source_dir)
assign_priorities(classes, source_dir)
resolve_base_classes(classes)
remove_unwanted_resolved(classes)
with open(generated_file, 'w') as dest_file:
write_header(dest_file, ' '.join(sys.argv), generated_header)
write_includes(dest_file, all_headers)
sorted_classes = sorted(classes.values(),
key = lambda k: (k['priority'], k['name']))
class_list = [
(cl['name'],
cl['resolved_bases'],
[m for m in cl['members'] if not unwanted(m)]
)
for cl in sorted_classes]
write_classes(dest_file, class_list)
with_base = [(cl['name'],
cl['priority'],
cl['target'])
for cl in sorted_classes
if not cl['abstract']]
# Note: This was the condition before, but noticed that
# builder.py did not check the number of bases before adding
# to with_base!
# if len(cl['bases'])>0 and not cl['abstract']]
# filtering is not correct at this point
# some unwanted classes are necessary
# (the ones in SiconosFull.hpp) others not (xml)
# some leads to compilation errors.
write_register_with_bases(dest_file, with_base)
write_footer(dest_file)
|
9c4631a8b404e63ef3763199e58ec739c3ddd30f
|
7c1f157acafec729d060847469a5cf36cd88e792
|
/cords/utils/data/dataloader/SSL/adaptive/adaptivedataloader.py
|
be699c8a14b7883f23cd11b4de3be2474b12be22
|
[
"MIT"
] |
permissive
|
decile-team/cords
|
025415cc7b4577e01acba312908b9e12da27da9b
|
8d10c7f5d96e071f98c20e4e9ff4c41c2c4ea2af
|
refs/heads/main
| 2023-05-25T01:57:51.429546
| 2023-05-24T19:46:54
| 2023-05-24T19:46:54
| 330,041,216
| 289
| 54
|
MIT
| 2023-05-24T17:25:40
| 2021-01-15T23:02:58
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,608
|
py
|
adaptivedataloader.py
|
import logging, torch
from abc import abstractmethod
from torch.utils.data import DataLoader
from ..dssdataloader import DSSDataLoader
from cords.utils.data.datasets.SSL.utils import InfiniteSampler
from cords.utils.data.data_utils import WeightedSubset
class AdaptiveDSSDataLoader(DSSDataLoader):
"""
Implementation of AdaptiveDSSDataLoader class which serves as base class for dataloaders of other
adaptive subset selection strategies for semi-supervised learning framework.
Parameters
-----------
train_loader: torch.utils.data.DataLoader class
Dataloader of the training dataset
val_loader: torch.utils.data.DataLoader class
Dataloader of the validation dataset
dss_args: dict
Data subset selection arguments dictionary
logger: class
Logger for logging the information
"""
def __init__(self, train_loader, val_loader, dss_args,
logger, *args, **kwargs):
"""
Constructor function
"""
# Arguments assertion check
assert "select_every" in dss_args.keys(), "'select_every' is a compulsory argument. Include it as a key in dss_args"
assert "device" in dss_args.keys(), "'device' is a compulsory argument. Include it as a key in dss_args"
assert "kappa" in dss_args.keys(), "'kappa' is a compulsory argument. Include it as a key in dss_args"
assert "num_iters" in dss_args.keys(), "'num_iters' is a compulsory argument. Include it as a key in dss_args"
assert "batch_size" in kwargs.keys(), "'batch_size' is a compulsory argument. Include it as a key in kwargs"
assert "sampler" not in kwargs.keys(), "'sampler' is a prohibited argument. Do not include it as a key in kwargs"
assert "shuffle" not in kwargs.keys(), "'shuffle' is a prohibited argument. Do not include it as a key in kwargs"
self.select_every = dss_args.select_every
self.sel_iteration = int((self.select_every * len(train_loader.dataset) * dss_args.fraction) // (kwargs['batch_size']))
self.device = dss_args.device
self.kappa = dss_args.kappa
self.num_iters = dss_args.num_iters
if dss_args.kappa > 0:
assert "num_iters" in dss_args.keys(), "'num_iters' is a compulsory argument when warm starting the model(i.e., kappa > 0). Include it as a key in dss_args"
self.select_after = int(self.kappa * self.num_iters)
else:
self.select_after = 0
super(AdaptiveDSSDataLoader, self).__init__(train_loader.dataset, dss_args,
logger, *args, **kwargs)
self.train_loader = train_loader
self.val_loader = val_loader
self.wtdataloader = DataLoader(self.wt_trainset,
sampler=InfiniteSampler(len(self.wt_trainset), self.select_after * kwargs['batch_size']),
*self.loader_args, **self.loader_kwargs)
self.initialized = False
def _init_subset_loader(self):
# All strategies start with random selection
"""
Function that initializes the data subset loader
"""
self.subset_indices = self._init_subset_indices()
self.subset_weights = torch.ones(self.budget)
self._refresh_subset_loader()
def _refresh_subset_loader(self):
"""
Function that initializes the subset indices
"""
data_sub = WeightedSubset(self.dataset, self.subset_indices, self.subset_weights)
self.subset_loader = DataLoader(data_sub, sampler=InfiniteSampler(len(data_sub),
self.sel_iteration * self.loader_kwargs['batch_size']),
*self.loader_args, **self.loader_kwargs)
self.batch_wise_indices = list(self.subset_loader.batch_sampler)
if self.kappa > 0:
self.curr_loader = DataLoader(self.wt_trainset, sampler=InfiniteSampler(len(self.wt_trainset),
self.select_after * self.loader_kwargs['batch_size']),
*self.loader_args, **self.loader_kwargs)
else:
self.curr_loader = self.subset_loader
def __iter__(self):
"""
Iter function that returns the iterator of full data loader or data subset loader or empty loader based on the
warmstart kappa value.
"""
self.initialized = True
if self.cur_iter <= self.select_after:
self.logger.debug('Iteration: {0:d}, reading full dataloader... '.format(self.cur_iter))
self.curr_loader = self.wtdataloader
self.logger.debug('Iteration: {0:d}, finished reading full dataloader. '.format(self.cur_iter))
else:
self.logger.debug('Iteration: {0:d}, reading subset dataloader... '.format(self.cur_iter))
if self.cur_iter > 1:
self.resample()
self.curr_loader = self.subset_loader
self.logger.debug('Iteration: {0:d}, finished reading dataloader. '.format(self.cur_iter))
self.cur_iter += len(list(self.curr_loader.batch_sampler))
return self.curr_loader.__iter__()
def __len__(self) -> int:
"""
Returns the length of the current data loader
"""
if self.cur_iter <= self.select_after:
self.logger.debug('Iteration: {0:d}, reading full dataloader... '.format(self.cur_iter))
loader = self.wtdataloader
#self.logger.debug('Epoch: {0:d}, finished reading dataloader. '.format(self.cur_epoch))
return len(loader)
else:
self.logger.debug('Iteration: {0:d}, reading subset dataloader... '.format(self.cur_iter))
loader = self.subset_loader
return len(loader)
def resample(self):
"""
Function that resamples the subset indices and recalculates the subset weights
"""
self.subset_indices, self.subset_weights = self._resample_subset_indices()
self.logger.debug("Subset indices length: %d", len(self.subset_indices))
self._refresh_subset_loader()
self.logger.debug("Subset loader initiated, args: %s, kwargs: %s", self.loader_args, self.loader_kwargs)
self.logger.debug('Subset selection finished, Training data size: %d, Subset size: %d',
self.len_full, len(self.subset_loader.dataset))
@abstractmethod
def _resample_subset_indices(self):
raise Exception('Not implemented. ')
|
04a775ba3f05589b3d4b71533cfb4d509fc4dd16
|
4d28185e7a78a569f9a449f39f183cac3024f711
|
/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/TestGdbRemoteGPacket.py
|
e13daeb6d9a437048100dc5581dbb732922c813a
|
[
"NCSA",
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
apple/swift-lldb
|
2789bf44f648609a1674ee520ac20b64c95de072
|
d74be846ef3e62de946df343e8c234bde93a8912
|
refs/heads/stable
| 2023-04-06T00:28:15.882479
| 2019-10-25T22:46:59
| 2019-10-25T22:46:59
| 44,838,862
| 780
| 291
|
Apache-2.0
| 2020-01-10T19:28:43
| 2015-10-23T21:13:18
|
C++
|
UTF-8
|
Python
| false
| false
| 6,150
|
py
|
TestGdbRemoteGPacket.py
|
from __future__ import print_function
import gdbremote_testcase
import textwrap
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
def _extract_register_value(reg_info, reg_bank, byte_order, bytes_per_entry=8):
reg_offset = int(reg_info["offset"])*2
reg_byte_size = int(2 * int(reg_info["bitsize"]) / 8)
# Create slice with the contents of the register.
reg_slice = reg_bank[reg_offset:reg_offset+reg_byte_size]
reg_value = []
# Wrap slice according to bytes_per_entry.
for entry in textwrap.wrap(reg_slice, 2 * bytes_per_entry):
# Invert the bytes order if target uses little-endian.
if byte_order == lldb.eByteOrderLittle:
entry = "".join(reversed([entry[i:i+2] for i in range(0,
len(entry),2)]))
reg_value.append("0x" + entry)
return reg_value
class TestGdbRemoteGPacket(gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
def run_test_g_packet(self):
self.build()
self.prep_debug_monitor_and_inferior()
self.test_sequence.add_log_lines(
["read packet: $g#67",
{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$",
"capture": {1: "register_bank"}}],
True)
self.connect_to_debug_monitor()
context = self.expect_gdbremote_sequence()
register_bank = context.get("register_bank")
self.assertTrue(register_bank[0] != 'E')
self.test_sequence.add_log_lines(
["read packet: $G" + register_bank + "#00",
{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$",
"capture": {1: "G_reply"}}],
True)
context = self.expect_gdbremote_sequence()
self.assertTrue(context.get("G_reply")[0] != 'E')
@skipIfOutOfTreeDebugserver
@debugserver_test
@skipIfDarwinEmbedded
def test_g_packet_debugserver(self):
self.init_debugserver_test()
self.run_test_g_packet()
@skipIf(archs=no_match(["x86_64"]))
def g_returns_correct_data(self, with_suffix):
procs = self.prep_debug_monitor_and_inferior()
self.add_register_info_collection_packets()
if with_suffix:
self.add_thread_suffix_request_packets()
self.add_threadinfo_collection_packets()
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather register info.
reg_infos = self.parse_register_info_packets(context)
self.assertIsNotNone(reg_infos)
self.add_lldb_register_index(reg_infos)
# Index register info entries by name.
reg_infos = {info['name']: info for info in reg_infos}
# Gather thread info.
if with_suffix:
threads = self.parse_threadinfo_packets(context)
self.assertIsNotNone(threads)
thread_id = threads[0]
self.assertIsNotNone(thread_id)
else:
thread_id = None
# Send vCont packet to resume the inferior.
self.test_sequence.add_log_lines(["read packet: $vCont;c#a8",
{"direction": "send",
"regex": r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$",
"capture": {1: "hex_exit_code"}},
],
True)
# Send g packet to retrieve the register bank
if thread_id:
g_request = "read packet: $g;thread:{:x}#00".format(thread_id)
else:
g_request = "read packet: $g#00"
self.test_sequence.add_log_lines(
[g_request,
{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$",
"capture": {1: "register_bank"}}],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
reg_bank = context.get("register_bank")
self.assertTrue(reg_bank[0] != 'E')
byte_order = self.get_target_byte_order()
get_reg_value = lambda reg_name : _extract_register_value(
reg_infos[reg_name], reg_bank, byte_order)
self.assertEqual(['0x0102030405060708'], get_reg_value('r8'))
self.assertEqual(['0x1112131415161718'], get_reg_value('r9'))
self.assertEqual(['0x2122232425262728'], get_reg_value('r10'))
self.assertEqual(['0x3132333435363738'], get_reg_value('r11'))
self.assertEqual(['0x4142434445464748'], get_reg_value('r12'))
self.assertEqual(['0x5152535455565758'], get_reg_value('r13'))
self.assertEqual(['0x6162636465666768'], get_reg_value('r14'))
self.assertEqual(['0x7172737475767778'], get_reg_value('r15'))
self.assertEqual(
['0x020406080a0c0e01', '0x030507090b0d0f00'], get_reg_value('xmm8'))
self.assertEqual(
['0x121416181a1c1e11', '0x131517191b1d1f10'], get_reg_value('xmm9'))
self.assertEqual(
['0x222426282a2c2e21', '0x232527292b2d2f20'], get_reg_value('xmm10'))
self.assertEqual(
['0x323436383a3c3e31', '0x333537393b3d3f30'], get_reg_value('xmm11'))
self.assertEqual(
['0x424446484a4c4e41', '0x434547494b4d4f40'], get_reg_value('xmm12'))
self.assertEqual(
['0x525456585a5c5e51', '0x535557595b5d5f50'], get_reg_value('xmm13'))
self.assertEqual(
['0x626466686a6c6e61', '0x636567696b6d6f60'], get_reg_value('xmm14'))
self.assertEqual(
['0x727476787a7c7e71', '0x737577797b7d7f70'], get_reg_value('xmm15'))
@llgs_test
def test_g_returns_correct_data_with_suffix_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.g_returns_correct_data(True)
@llgs_test
def test_g_returns_correct_data_no_suffix_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.g_returns_correct_data(False)
|
ebd733eb0e5f9c6ca223ab6fa9ef9a40b997cd2a
|
2b319ab54d06a304397c43176ede37ae8669c6df
|
/mono-example/module.py
|
2eec3d1d3f42fb90d35fbd21d70ddafc80cb98c9
|
[] |
no_license
|
cloudius-systems/osv-apps
|
54c389eb72f100a6e8485f92aa4553896c4bc8e5
|
22e1541ca18d3794053b9ca61671508a2d1944ec
|
refs/heads/master
| 2023-06-08T01:13:22.161834
| 2023-05-31T15:49:10
| 2023-05-31T15:49:10
| 14,896,375
| 122
| 69
| null | 2023-01-21T01:34:28
| 2013-12-03T14:44:26
|
Makefile
|
UTF-8
|
Python
| false
| false
| 106
|
py
|
module.py
|
from osv.modules import api
default = api.run("--env=MONO_DISABLE_SHARED_AREA=true /run_mono hello.exe")
|
6563088e57b6ca34cd6ffedd119e8ed30086ed1f
|
df87814cb32990ad8c27d0b13a821aabce012819
|
/kolibri/core/auth/utils/users.py
|
28b17a0f8d79e1b13f83996b190676e4f67347ec
|
[
"MIT"
] |
permissive
|
learningequality/kolibri
|
26812d4ae771f3b389d3317a586bc032fc84866b
|
cc9da2a6acd139acac3cd71c4cb05c15d4465712
|
refs/heads/release-v0.16.x
| 2023-09-01T18:07:29.720772
| 2023-08-31T15:43:47
| 2023-08-31T15:43:47
| 49,976,939
| 689
| 682
|
MIT
| 2023-09-14T20:02:29
| 2016-01-19T19:22:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,658
|
py
|
users.py
|
import requests
from django.core.management.base import CommandError
from requests.exceptions import ConnectionError
from requests.exceptions import HTTPError
from rest_framework.exceptions import AuthenticationFailed
from kolibri.core import error_constants
from kolibri.core.auth.backends import FACILITY_CREDENTIAL_KEY
from kolibri.core.auth.constants.demographics import NOT_SPECIFIED
from kolibri.core.auth.models import AdHocGroup
from kolibri.core.auth.models import Membership
from kolibri.core.utils.urls import reverse_remote
def create_adhoc_group_for_learners(classroom, learners):
adhoc_group = AdHocGroup.objects.create(name="Ad hoc", parent=classroom)
for learner in learners:
Membership.objects.create(user=learner, collection=adhoc_group)
return adhoc_group
def get_remote_users_info(baseurl, facility_id, username, password):
"""
Using basic auth returns info from
the requested username.
If the requested username has admin rights it will return also
the list of users of the facility
:param baseurl: First part of the url of the server that's going to be requested
:param facility_id: Id of the facility to authenticate and get the list of users
:param username: Username of the user that's going to authenticate
:param password: Password of the user that's going to authenticate
:return: Dict with two keys: 'user' containing info of the user that authenticated and
'users' containing the list of users of the facility if the user had rights.
"""
user_info_url = reverse_remote(baseurl, "kolibri:core:publicuser-list")
params = {"facility_id": facility_id}
try:
response = requests.get(
user_info_url,
params=params,
auth=(
"username={}&{}={}".format(
username, FACILITY_CREDENTIAL_KEY, facility_id
),
password,
),
)
response.raise_for_status()
except (CommandError, HTTPError, ConnectionError) as e:
detail = []
if password == NOT_SPECIFIED or not password:
detail = [{"id": error_constants.MISSING_PASSWORD}]
raise AuthenticationFailed(detail)
else:
detail = [{"id": error_constants.AUTHENTICATION_FAILED, "message": e}]
raise AuthenticationFailed(detail)
auth_info = response.json()
if len(auth_info) > 1:
user_info = [u for u in response.json() if u["username"] == username][0]
else:
user_info = auth_info[0]
facility_info = {"user": user_info, "users": auth_info}
return facility_info
|
cd600bdcde76cbea4059eb26122e556c6922ee67
|
bf5acb19d44d031e2d8a9e37266acd55c5697863
|
/tests/integration/test_models/test_full_battery_models/test_lead_acid/test_side_reactions/test_loqs_side_reactions.py
|
501689af69aa1c3a0704d4d26010156a79f72f78
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
pybamm-team/PyBaMM
|
82ecf9bebb580aab1a4e67aa7d0297d2698a0b51
|
b4432b6da7331f992b1831912a9cf89be1f7578f
|
refs/heads/develop
| 2023-08-19T04:29:21.151964
| 2023-08-18T22:43:38
| 2023-08-18T22:43:38
| 155,538,761
| 713
| 362
|
BSD-3-Clause
| 2023-09-14T18:20:04
| 2018-10-31T10:26:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,989
|
py
|
test_loqs_side_reactions.py
|
#
# Tests for the lead-acid LOQS model
#
from tests import TestCase
import pybamm
import tests
import unittest
class TestLeadAcidLOQSWithSideReactions(TestCase):
def test_discharge_differential(self):
options = {"surface form": "differential", "hydrolysis": "true"}
model = pybamm.lead_acid.LOQS(options)
modeltest = tests.StandardModelTest(model)
modeltest.test_all()
def test_discharge_differential_varying_surface_area(self):
options = {
"surface form": "differential",
"hydrolysis": "true",
}
model = pybamm.lead_acid.LOQS(options)
modeltest = tests.StandardModelTest(model)
modeltest.test_all()
def test_discharge_algebraic(self):
options = {"surface form": "algebraic", "hydrolysis": "true"}
model = pybamm.lead_acid.LOQS(options)
modeltest = tests.StandardModelTest(model)
modeltest.test_all(skip_output_tests=True)
def test_charge(self):
options = {"surface form": "differential", "hydrolysis": "true"}
model = pybamm.lead_acid.LOQS(options)
parameter_values = model.default_parameter_values
parameter_values.update(
{"Current function [A]": -1, "Initial State of Charge": 0.5}
)
modeltest = tests.StandardModelTest(model, parameter_values=parameter_values)
modeltest.test_all(skip_output_tests=True)
def test_zero_current(self):
options = {"surface form": "differential", "hydrolysis": "true"}
model = pybamm.lead_acid.LOQS(options)
parameter_values = model.default_parameter_values
parameter_values.update({"Current function [A]": 0})
modeltest = tests.StandardModelTest(model, parameter_values=parameter_values)
modeltest.test_all(skip_output_tests=True)
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
unittest.main()
|
4255c29f6b7eaa886ccec57910fba096a895ac95
|
8df4c829269a73a6e40f7e249c09556d1689714d
|
/colour_demosaicing/bayer/masks.py
|
1c99bb1e227a664a4e79bd21c4f50f5fc7726dd1
|
[
"BSD-3-Clause"
] |
permissive
|
colour-science/colour-demosaicing
|
0da0e9311e8b4d551887a9f4cbe82af5019b5923
|
d7dae18f1c2707e6e7050eb61a042fdf9b8b3543
|
refs/heads/develop
| 2023-09-03T11:43:49.476953
| 2023-08-27T00:38:51
| 2023-08-27T00:44:34
| 43,235,447
| 252
| 63
|
BSD-3-Clause
| 2022-08-09T00:30:44
| 2015-09-27T05:18:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,315
|
py
|
masks.py
|
"""
Bayer CFA Masks
===============
*Bayer* CFA (Colour Filter Array) masks generation.
"""
from __future__ import annotations
import numpy as np
from colour.hints import Literal, NDArray, Tuple
from colour.utilities import validate_method
__author__ = "Colour Developers"
__copyright__ = "Copyright 2015 Colour Developers"
__license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"masks_CFA_Bayer",
]
def masks_CFA_Bayer(
shape: int | Tuple[int, ...],
pattern: Literal["RGGB", "BGGR", "GRBG", "GBRG"] | str = "RGGB",
) -> Tuple[NDArray, ...]:
"""
Return the *Bayer* CFA red, green and blue masks for given pattern.
Parameters
----------
shape
Dimensions of the *Bayer* CFA.
pattern
Arrangement of the colour filters on the pixel array.
Returns
-------
:class:`tuple`
*Bayer* CFA red, green and blue masks.
Examples
--------
>>> from pprint import pprint
>>> shape = (3, 3)
>>> pprint(masks_CFA_Bayer(shape))
(array([[ True, False, True],
[False, False, False],
[ True, False, True]], dtype=bool),
array([[False, True, False],
[ True, False, True],
[False, True, False]], dtype=bool),
array([[False, False, False],
[False, True, False],
[False, False, False]], dtype=bool))
>>> pprint(masks_CFA_Bayer(shape, "BGGR"))
(array([[False, False, False],
[False, True, False],
[False, False, False]], dtype=bool),
array([[False, True, False],
[ True, False, True],
[False, True, False]], dtype=bool),
array([[ True, False, True],
[False, False, False],
[ True, False, True]], dtype=bool))
"""
pattern = validate_method(
pattern,
("RGGB", "BGGR", "GRBG", "GBRG"),
'"{0}" CFA pattern is invalid, it must be one of {1}!',
).upper()
channels = {channel: np.zeros(shape, dtype="bool") for channel in "RGB"}
for channel, (y, x) in zip(pattern, [(0, 0), (0, 1), (1, 0), (1, 1)]):
channels[channel][y::2, x::2] = 1
return tuple(channels.values())
|
79d0ed69c00ffd7070ef87a82ec9490bae892aff
|
df87814cb32990ad8c27d0b13a821aabce012819
|
/kolibri/core/notifications/kolibri_plugin.py
|
e64df195eb1e8eb5cc4dcb00cb417f85f1773eb3
|
[
"MIT"
] |
permissive
|
learningequality/kolibri
|
26812d4ae771f3b389d3317a586bc032fc84866b
|
cc9da2a6acd139acac3cd71c4cb05c15d4465712
|
refs/heads/release-v0.16.x
| 2023-09-01T18:07:29.720772
| 2023-08-31T15:43:47
| 2023-08-31T15:43:47
| 49,976,939
| 689
| 682
|
MIT
| 2023-09-14T20:02:29
| 2016-01-19T19:22:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,689
|
py
|
kolibri_plugin.py
|
from kolibri.core.auth.hooks import FacilityDataSyncHook
from kolibri.core.auth.sync_event_hook_utils import get_other_side_kolibri_version
from kolibri.core.logger.models import AttemptLog
from kolibri.core.logger.models import ContentSummaryLog
from kolibri.core.logger.models import ExamAttemptLog
from kolibri.core.logger.models import ExamLog
from kolibri.core.logger.models import MasteryLog
from kolibri.core.notifications.api import batch_process_attemptlogs
from kolibri.core.notifications.api import batch_process_examlogs
from kolibri.core.notifications.api import batch_process_masterylogs_for_quizzes
from kolibri.core.notifications.api import batch_process_summarylogs
from kolibri.core.upgrade import matches_version
from kolibri.plugins.hooks import register_hook
from kolibri.utils.version import truncate_version
@register_hook
class NotificationsSyncHook(FacilityDataSyncHook):
def post_transfer(
self,
dataset_id,
local_is_single_user,
remote_is_single_user,
single_user_id,
context,
):
"""
Generates notifications at cleanup stage (the end) of a transfer, if our instance was a
"receiver" meaning we have received data
"""
# if we've just received data on a single-user device, update the exams and assignments
if context.is_receiver and not local_is_single_user:
batch_process_summarylogs(
context.transfer_session.get_touched_record_ids_for_model(
ContentSummaryLog
)
)
batch_process_attemptlogs(
context.transfer_session.get_touched_record_ids_for_model(AttemptLog)
)
# exam logs are deprecated beyond 0.15.0, but process them if syncing with version
# pre-0.15.0
remote_version = get_other_side_kolibri_version(context)
if remote_version is None or matches_version(
truncate_version(remote_version), "<0.15.0"
):
batch_process_examlogs(
context.transfer_session.get_touched_record_ids_for_model(ExamLog),
context.transfer_session.get_touched_record_ids_for_model(
ExamAttemptLog
),
)
else:
batch_process_masterylogs_for_quizzes(
context.transfer_session.get_touched_record_ids_for_model(
MasteryLog
),
context.transfer_session.get_touched_record_ids_for_model(
AttemptLog
),
)
|
4a29f517e5751e2c9b1cd4dda6d1053a579f9620
|
25daa9604b83ddc199764309c39da106a5313c22
|
/cd4ml/problems/houses/readers/zip_lookup.py
|
5f39201974088ffad0755a13c77129d3a1795db6
|
[
"MIT"
] |
permissive
|
ThoughtWorksInc/CD4ML-Scenarios
|
83d3f162a2ddbb7e02662d03f769feb8978b4de9
|
a9200df2f926f7e398dd820b99a11515c9a3eacb
|
refs/heads/master
| 2023-05-12T07:11:39.677610
| 2022-05-31T14:14:57
| 2022-05-31T14:14:57
| 246,649,538
| 127
| 327
|
MIT
| 2023-05-01T23:38:18
| 2020-03-11T18:26:59
|
Python
|
UTF-8
|
Python
| false
| false
| 305
|
py
|
zip_lookup.py
|
from csv import DictReader
from cd4ml.filenames import get_problem_files
def get_zip_lookup(problem_name):
file_names = get_problem_files(problem_name)
filename = file_names['house_data_zip_lookup']
stream = DictReader(open(filename, 'r'))
return {row['zipcode']: row for row in stream}
|
945c999ca24ca4885cfba9f61a5ae83e0c4544a7
|
f9bb6d867cae81d8734174c8708a3165ba79fce5
|
/tests/test_corruption.py
|
7de0cd86e9c039ffd0b03361fc3ab002fb1cf302
|
[
"MIT"
] |
permissive
|
henu/bigjson
|
fe15f33c7640b02c6d16c6b2adb806958417dfba
|
b562d7be1e8de689cfaf44fdca7a636a8d21ca20
|
refs/heads/master
| 2023-03-12T19:40:20.096911
| 2022-06-13T20:03:28
| 2022-06-13T20:03:28
| 64,804,958
| 189
| 34
|
MIT
| 2023-02-16T17:55:36
| 2016-08-03T01:33:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,242
|
py
|
test_corruption.py
|
from io import BytesIO
from unittest import TestCase
import bigjson
MISSING_OPEN_QUOTE_JSON_FILE = b"""
{
"object": {
"x": y"
}
}
"""
CORRUPT_BACKSLASH_ENCODING_JSON_FILE = b"""
{
"string": "\qblah"
}
"""
MISSING_DIGIT_AFTER_DOT_JSON_FILE = b"""
{
"number": 14.
}
"""
class TestCorruption(TestCase):
def test_missing_open_quote(self):
file = BytesIO(MISSING_OPEN_QUOTE_JSON_FILE)
data = bigjson.load(file)
with self.assertRaises(Exception) as e:
_ = len(data)
self.assertEqual(e.exception.args[0], "Unexpected bytes! Value 'y' Position 32")
def test_corrupt_backslash_encoding(self):
file = BytesIO(CORRUPT_BACKSLASH_ENCODING_JSON_FILE)
data = bigjson.load(file)
with self.assertRaises(Exception) as e:
_ = len(data)
self.assertEqual(e.exception.args[0], "Unexpected \\q in backslash encoding! Position 19")
def test_missing_digit_after_dot(self):
file = BytesIO(MISSING_DIGIT_AFTER_DOT_JSON_FILE)
data = bigjson.load(file)
with self.assertRaises(Exception) as e:
_ = len(data)
self.assertEqual(e.exception.args[0], "Expected digit after dot! Position 21")
|
261aa262140fb0c167d8c728740a1eea927e01b2
|
6dd5027d9f02b2c40c96fdea9796a4fba6ee7e46
|
/tests/test_reduce.py
|
d48739ac0a5463df9b690f23274ca90f86e75721
|
[
"Apache-2.0"
] |
permissive
|
cornell-zhang/heterocl
|
fb4fd3c9cdbb7c7ccbdb2a8a09f47b436200c8f6
|
b794409e68e326cafa6c3eaec2e3560ff066e129
|
refs/heads/main
| 2023-07-22T16:33:57.900104
| 2023-07-19T19:58:13
| 2023-07-19T19:58:13
| 114,906,951
| 312
| 111
|
Apache-2.0
| 2023-07-19T19:58:15
| 2017-12-20T16:13:52
|
Python
|
UTF-8
|
Python
| false
| false
| 8,723
|
py
|
test_reduce.py
|
# Copyright HeteroCL authors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import heterocl as hcl
import numpy as np
import heterocl as hcl
import numpy as np
def test_reduce_basic():
hcl.init()
def kernel(A):
my_sum = hcl.reducer(0, lambda x, y: x + y)
r = hcl.reduce_axis(0, 10)
return hcl.compute((1,), lambda x: my_sum(A[r], axis=r))
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
np_B = np.zeros(1)
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B, dtype=hcl.Int(32))
f(hcl_A, hcl_B)
ret_B = hcl_B.asnumpy()
golden_B = np.sum(np_A)
assert ret_B[0] == golden_B
def test_reduce_cond():
def kernel(A):
my_sum = hcl.reducer(0, lambda x, y: x + y)
r = hcl.reduce_axis(0, 10)
return hcl.compute((1,), lambda x: my_sum(A[r], axis=r, where=A[r] > 5))
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
np_B = np.zeros(1)
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B, dtype=hcl.Int(32))
f(hcl_A, hcl_B)
ret_B = hcl_B.asnumpy()
golden_B = np.sum(np_A[np.where(np_A > 5)])
assert ret_B[0] == golden_B
def test_reduce_dtype():
def kernel(A):
# my_sum will perform integer reduction
my_sum = hcl.reducer(0, lambda x, y: x + y)
r = hcl.reduce_axis(0, 10)
return hcl.compute(
(1,), lambda x: my_sum(A[r], axis=r, dtype=hcl.Float()), dtype=hcl.Float()
)
A = hcl.placeholder((10,), dtype=hcl.Float())
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.rand(10)
np_B = np.zeros(1)
hcl_A = hcl.asarray(np_A, dtype=hcl.Float())
hcl_B = hcl.asarray(np_B, dtype=hcl.Float())
f(hcl_A, hcl_B)
ret_B = hcl_B.asnumpy()
golden_B = np.sum(np_A)
assert np.isclose(ret_B[0], golden_B)
def test_reduce_dtype_2():
def kernel(A):
my_sum = hcl.reducer(0, lambda x, y: x + y)
r = hcl.reduce_axis(0, 10)
return hcl.compute((1,), lambda x: my_sum(A[r], axis=r, dtype=hcl.UInt(2)))
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
np_B = np.zeros(1)
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B, dtype=hcl.Int(32))
f(hcl_A, hcl_B)
ret_B = hcl_B.asnumpy()
golden_B = np.sum(np_A)
assert ret_B[0] == golden_B % 4
def test_reduce_different_init():
def kernel(a, A):
my_sum = hcl.reducer(a.v, lambda x, y: x + y)
r = hcl.reduce_axis(0, 10)
return hcl.compute((1,), lambda x: my_sum(A[r], axis=r))
a = hcl.placeholder(())
A = hcl.placeholder((10,))
s = hcl.create_schedule([a, A], kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
np_B = np.zeros(1)
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B, dtype=hcl.Int(32))
f(10, hcl_A, hcl_B)
ret_B = hcl_B.asnumpy()
golden_B = np.sum(np_A)
assert ret_B[0] == golden_B + 10
def test_reduce_2D():
def kernel(A):
my_sum = hcl.reducer(0, lambda x, y: x + y)
r = hcl.reduce_axis(0, 10)
return hcl.compute((10,), lambda x: my_sum(A[r, x], axis=r))
A = hcl.placeholder((10, 10))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10, 10))
np_B = np.zeros(10)
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B, dtype=hcl.Int(32))
f(hcl_A, hcl_B)
ret_B = hcl_B.asnumpy()
golden_B = np.sum(np_A, axis=0)
assert np.array_equal(ret_B, golden_B)
def test_reduce_2D_2():
def kernel(A):
my_sum = hcl.reducer(0, lambda x, y: x + y)
r = hcl.reduce_axis(0, 10)
return hcl.compute((1, 10), lambda x, y: my_sum(A[r, y], axis=r))
A = hcl.placeholder((10, 10))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10, 10))
np_B = np.zeros((1, 10))
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B, dtype=hcl.Int(32))
f(hcl_A, hcl_B)
ret_B = hcl_B.asnumpy()
golden_B = np.sum(np_A, axis=0)
assert np.array_equal(ret_B[0], golden_B)
def test_reduce_multi_axes():
def kernel(A):
my_sum = hcl.reducer(0, lambda x, y: x + y)
r1 = hcl.reduce_axis(0, 10)
r2 = hcl.reduce_axis(0, 10)
return hcl.compute((1,), lambda x: my_sum(A[r1, r2], axis=[r1, r2]))
A = hcl.placeholder((10, 10))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10, 10))
np_B = np.zeros(1)
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B, dtype=hcl.Int(32))
f(hcl_A, hcl_B)
ret_B = hcl_B.asnumpy()
golden_B = np.sum(np_A)
assert ret_B[0] == golden_B
def test_reduce_complex_reducer():
def kernel(A):
def reducer_body(x, y):
res = hcl.scalar(0, "res")
with hcl.if_(x > 5):
res.v = y + 1
with hcl.else_():
res.v = y + 2
return res.v
my_sum = hcl.reducer(0, reducer_body)
r = hcl.reduce_axis(0, 10)
return hcl.compute((1,), lambda x: my_sum(A[r], axis=r))
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
np_B = np.zeros(1)
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B, dtype=hcl.Int(32))
f(hcl_A, hcl_B)
ret_B = hcl_B.asnumpy()
golden_B = 10 + len(np_A[np.where(np_A <= 5)])
assert ret_B[0] == golden_B
def _test_maxpool(in_shape, out_shape, stride, kernel, test_dtype):
def max_pool(data):
h = hcl.reduce_axis(0, kernel)
w = hcl.reduce_axis(0, kernel)
return hcl.compute(
out_shape,
lambda hh, ww: hcl.max(
data[stride * hh + h, stride * ww + w], axis=[h, w], dtype=test_dtype
),
name="max_pool",
dtype=test_dtype,
)
A = hcl.placeholder(in_shape, "A", dtype=test_dtype)
s = hcl.create_schedule([A], max_pool)
f = hcl.build(s)
np_a = np.random.randint(0, 10, size=in_shape)
a = hcl.asarray(np_a, dtype=test_dtype)
b = hcl.asarray(np.zeros(out_shape), dtype=test_dtype)
f(a, b)
np_b = b.asnumpy()
b_golden = np.zeros(out_shape)
for i in range(out_shape[0]):
for j in range(out_shape[1]):
b_golden[i, j] = np.max(
np_a[i * stride : i * stride + kernel, j * stride : j * stride + kernel]
)
assert np.allclose(np_b, b_golden)
def test_maxpool():
in_shape = (2, 8)
out_shape = (1, 4)
stride = 2
kernel = 2
test_dtypes = [hcl.Int(10), hcl.Int(8), hcl.Int(6), hcl.Fixed(12, 6)]
for test_dtype in test_dtypes:
_test_maxpool(in_shape, out_shape, stride, kernel, test_dtype)
def _test_meanpool(in_shape, out_shape, stride, kernel, test_dtype):
def mean_pool(data):
h = hcl.reduce_axis(0, kernel)
w = hcl.reduce_axis(0, kernel)
return hcl.compute(
out_shape,
lambda hh, ww: (
hcl.sum(
data[stride * hh + h, stride * ww + w],
axis=[h, w],
dtype=test_dtype,
)
)
/ (kernel * kernel),
name="mean_pool",
dtype=test_dtype,
)
# 0,0 0,1 0,2 0,3 0,4 0,5 0,6 0,7
# 1,0 1,1 1,2 1,3 1,4 1,5 1,6 1,7
A = hcl.placeholder(in_shape, "A", dtype=test_dtype) # (2,8)
B = hcl.placeholder(out_shape, "B", dtype=test_dtype) # (1,4)
s = hcl.create_schedule([A], mean_pool)
f = hcl.build(s)
np_a = np.random.randint(0, 10, size=in_shape) # random through 0-10
print(np_a)
a = hcl.asarray(np_a, dtype=test_dtype) # input array np_a
b = hcl.asarray(np.zeros(out_shape), dtype=test_dtype) # output array
f(a, b)
np_b = b.asnumpy() # turning output array into array
b_golden = np.zeros(out_shape) #
for i in range(out_shape[0]):
for j in range(out_shape[1]):
b_golden[i, j] = np.mean(
np_a[i * stride : i * stride + kernel, j * stride : j * stride + kernel]
)
assert np.allclose(np_b, b_golden)
def test_meanpool():
in_shape = (2, 8)
out_shape = (1, 4)
stride = 2
kernel = 2
test_dtypes = [hcl.Float(32)]
for test_dtype in test_dtypes:
_test_meanpool(in_shape, out_shape, stride, kernel, test_dtype)
|
29e47afbe5ea03685b2c012efa1b76139a1b3c07
|
0cb8fcfab73b1e79749e22f5003b73c5f4c82efa
|
/test/PerfTests/Python/ManyInstances/__init__.py
|
2be56791845e8aede461c6cf1a02daf2224ae218
|
[
"MIT"
] |
permissive
|
Azure/azure-functions-durable-extension
|
6f9736b0c161840b074539ed7bad692bdae7038f
|
dd47436ac6b835023b30fb3138dcd14c71e45360
|
refs/heads/dev
| 2023-09-04T00:44:46.271075
| 2023-08-31T22:30:16
| 2023-08-31T22:30:16
| 93,190,658
| 699
| 318
|
MIT
| 2023-09-14T17:14:33
| 2017-06-02T17:51:30
|
C#
|
UTF-8
|
Python
| false
| false
| 780
|
py
|
__init__.py
|
import logging
import azure.functions as func
import azure.durable_functions as df
from shared_utils.parse_and_validate_input import parse_and_validate_input
import asyncio
async def main(req: func.HttpRequest, starter: str) -> func.HttpResponse:
client = df.DurableOrchestrationClient(starter)
num_instances = parse_and_validate_input(req.get_body())
tasks = map(lambda _: client.start_new("SequentialOrchestrator"), range(num_instances))
await gather(tasks=tasks, max_concurrency=200)
return ""
async def gather(tasks, max_concurrency: int):
semaphore = asyncio.Semaphore(max_concurrency)
async def sem_task(task):
async with semaphore:
return await task
return await asyncio.gather(*(sem_task(task) for task in tasks))
|
414c01c103260ee8d8b261a5d63a31ceb18a1b2c
|
fc160694094b89ab09e5c9a0f03db80437eabc93
|
/owl-bot-postprocessor/synthtool/gcp/pregenerated.py
|
d6cdb2aaaacadabb07a6f9b0eac8b2b9eae6f4f9
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
googleapis/google-cloud-java
|
4f4d97a145e0310db142ecbc3340ce3a2a444e5e
|
6e23c3a406e19af410a1a1dd0d0487329875040e
|
refs/heads/main
| 2023-09-04T09:09:02.481897
| 2023-08-31T20:45:11
| 2023-08-31T20:45:11
| 26,181,278
| 1,122
| 685
|
Apache-2.0
| 2023-09-13T21:21:23
| 2014-11-04T17:57:16
|
Java
|
UTF-8
|
Python
| false
| false
| 1,640
|
py
|
pregenerated.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import os
import shutil
import tempfile
from synthtool.log import logger
from synthtool.sources import git
class Pregenerated:
"""A synthtool component that copies pregenerated bazel code."""
def __init__(self):
local_clone = os.environ.get("SYNTHTOOL_GOOGLEAPIS_GEN")
if local_clone:
self._googleapis_gen = Path(local_clone).expanduser()
logger.debug(f"Using local googleapis-gen at {self._googleapis_gen}")
else:
logger.debug("Cloning googleapis-gen.")
self._googleapis_gen = git.clone(
git.make_repo_clone_url("googleapis/googleapis-gen")
)
def generate(self, path: str) -> Path:
# shutil.copytree(dirs_exist_ok=True) does not exist until python 3.8
tempdir = Path(tempfile.mkdtemp()) / "code"
# make a copy of the code at the provided path because autosynth
# may not reset the source git repository
shutil.copytree(self._googleapis_gen / path, tempdir)
return tempdir
|
402e1f8319d27a01f0329d6d47dff4addfc44f38
|
c6b13016ff1c07fd612e27a4cf33dc4cba4146d5
|
/gokart/utils.py
|
56f8e2c647c9c4ebecd622aa39ca660646e160d0
|
[
"MIT"
] |
permissive
|
m3dev/gokart
|
64bd6c40197b002587d013ac9e2b61315451812a
|
f5a368304f9cb8db8d392f15f0f9c0ac956cd999
|
refs/heads/master
| 2023-08-04T09:10:53.567582
| 2023-08-02T20:38:11
| 2023-08-02T20:38:11
| 162,871,731
| 308
| 59
|
MIT
| 2023-08-02T20:38:13
| 2018-12-23T07:40:27
|
Python
|
UTF-8
|
Python
| false
| false
| 199
|
py
|
utils.py
|
import os
import luigi
def add_config(file_path: str):
_, ext = os.path.splitext(file_path)
luigi.configuration.core.parser = ext
assert luigi.configuration.add_config_path(file_path)
|
7bbf16a4323720baae80bbda2ffb4e1d07c81f87
|
8de79ab1818c535dcd8ad6e0c92b5c9642ffb82a
|
/sphinx/environment/adapters/indexentries.py
|
6fdbea6fa5deba1d8dfb7107d350288c28bc0fce
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
sphinx-doc/sphinx
|
632d75bfc7bef14904f3d847e6de6d37594a13a5
|
eab54533a56119c5badd5aac647c595a9adae720
|
refs/heads/master
| 2023-08-16T18:21:54.073511
| 2023-08-15T17:36:47
| 2023-08-15T17:36:47
| 28,710,753
| 6,138
| 2,587
|
NOASSERTION
| 2023-09-14T14:22:28
| 2015-01-02T10:53:28
|
Python
|
UTF-8
|
Python
| false
| false
| 7,589
|
py
|
indexentries.py
|
"""Index entries adapters for sphinx.environment."""
from __future__ import annotations
import re
import unicodedata
from itertools import groupby
from typing import TYPE_CHECKING, Any, Literal
from sphinx.errors import NoUri
from sphinx.locale import _, __
from sphinx.util import logging
from sphinx.util.index_entries import _split_into
if TYPE_CHECKING:
from sphinx.builders import Builder
from sphinx.environment import BuildEnvironment
logger = logging.getLogger(__name__)
class IndexEntries:
def __init__(self, env: BuildEnvironment) -> None:
self.env = env
self.builder: Builder
def create_index(self, builder: Builder, group_entries: bool = True,
_fixre: re.Pattern = re.compile(r'(.*) ([(][^()]*[)])'),
) -> list[tuple[str, list[tuple[str, Any]]]]:
"""Create the real index from the collected index entries."""
new: dict[str, list] = {}
rel_uri: str | Literal[False]
index_domain = self.env.domains['index']
for docname, entries in index_domain.entries.items():
try:
rel_uri = builder.get_relative_uri('genindex', docname)
except NoUri:
rel_uri = False
# new entry types must be listed in directives/other.py!
for entry_type, value, target_id, main, category_key in entries:
uri = rel_uri is not False and f'{rel_uri}#{target_id}'
try:
if entry_type == 'single':
try:
entry, sub_entry = _split_into(2, 'single', value)
except ValueError:
entry, = _split_into(1, 'single', value)
sub_entry = ''
_add_entry(entry, sub_entry, main,
dic=new, link=uri, key=category_key)
elif entry_type == 'pair':
first, second = _split_into(2, 'pair', value)
_add_entry(first, second, main,
dic=new, link=uri, key=category_key)
_add_entry(second, first, main,
dic=new, link=uri, key=category_key)
elif entry_type == 'triple':
first, second, third = _split_into(3, 'triple', value)
_add_entry(first, second + ' ' + third, main,
dic=new, link=uri, key=category_key)
_add_entry(second, third + ', ' + first, main,
dic=new, link=uri, key=category_key)
_add_entry(third, first + ' ' + second, main,
dic=new, link=uri, key=category_key)
elif entry_type == 'see':
first, second = _split_into(2, 'see', value)
_add_entry(first, _('see %s') % second, None,
dic=new, link=False, key=category_key)
elif entry_type == 'seealso':
first, second = _split_into(2, 'see', value)
_add_entry(first, _('see also %s') % second, None,
dic=new, link=False, key=category_key)
else:
logger.warning(__('unknown index entry type %r'), entry_type,
location=docname)
except ValueError as err:
logger.warning(str(err), location=docname)
for (targets, sub_items, _category_key) in new.values():
targets.sort(key=_key_func_0)
for (sub_targets, _0, _sub_category_key) in sub_items.values():
sub_targets.sort(key=_key_func_0)
new_list = sorted(new.items(), key=_key_func_1)
if group_entries:
# fixup entries: transform
# func() (in module foo)
# func() (in module bar)
# into
# func()
# (in module foo)
# (in module bar)
old_key = ''
old_sub_items: dict[str, list] = {}
i = 0
while i < len(new_list):
key, (targets, sub_items, category_key) = new_list[i]
# cannot move if it has sub_items; structure gets too complex
if not sub_items:
m = _fixre.match(key)
if m:
if old_key == m.group(1):
# prefixes match: add entry as subitem of the
# previous entry
old_sub_items.setdefault(
m.group(2), [[], {}, category_key])[0].extend(targets)
del new_list[i]
continue
old_key = m.group(1)
else:
old_key = key
old_sub_items = sub_items
i += 1
return [(key_, list(group))
for (key_, group) in groupby(new_list, _key_func_3)]
def _add_entry(word: str, subword: str, main: str | None, *,
dic: dict[str, list], link: str | Literal[False], key: str | None) -> None:
entry = dic.setdefault(word, [[], {}, key])
if subword:
entry = entry[1].setdefault(subword, [[], {}, key])
if link:
entry[0].append((main, link))
def _key_func_0(entry: tuple[str, str]) -> tuple[bool, str]:
"""sort the index entries for same keyword."""
main, uri = entry
return not main, uri # show main entries at first
def _key_func_1(entry: tuple[str, list]) -> tuple[tuple[int, str], str]:
"""Sort the index entries"""
key, (_targets, _sub_items, category_key) = entry
if category_key:
# using the specified category key to sort
key = category_key
lc_key = unicodedata.normalize('NFD', key.lower())
if lc_key.startswith('\N{RIGHT-TO-LEFT MARK}'):
lc_key = lc_key[1:]
if not lc_key[0:1].isalpha() and not lc_key.startswith('_'):
# put symbols at the front of the index (0)
group = 0
else:
# put non-symbol characters at the following group (1)
group = 1
# ensure a deterministic order *within* letters by also sorting on
# the entry itself
return (group, lc_key), entry[0]
def _key_func_2(entry: tuple[str, list]) -> str:
"""sort the sub-index entries"""
key = unicodedata.normalize('NFD', entry[0].lower())
if key.startswith('\N{RIGHT-TO-LEFT MARK}'):
key = key[1:]
if key[0:1].isalpha() or key.startswith('_'):
key = chr(127) + key
return key
def _key_func_3(entry: tuple[str, list]) -> str:
"""Group the entries by letter"""
key, (targets, sub_items, category_key) = entry
# hack: mutating the sub_items dicts to a list in the key_func
entry[1][1] = sorted(((sub_key, sub_targets)
for (sub_key, (sub_targets, _0, _sub_category_key))
in sub_items.items()), key=_key_func_2)
if category_key is not None:
return category_key
# now calculate the key
if key.startswith('\N{RIGHT-TO-LEFT MARK}'):
key = key[1:]
letter = unicodedata.normalize('NFD', key[0])[0].upper()
if letter.isalpha() or letter == '_':
return letter
# get all other symbols under one heading
return _('Symbols')
|
57cfefd97e00b9329d1b8953c5c40624a4bac4ca
|
e75a40843a8738b84bd529a549c45776d09e70d9
|
/samples/openapi3/server/petstore/python-flask/openapi_server/models/upload_form.py
|
3e566f6837aa3d93cba7e3e705105ed57bb026d2
|
[
"Apache-2.0"
] |
permissive
|
OpenAPITools/openapi-generator
|
3478dbf8e8319977269e2e84e0bf9960233146e3
|
8c2de11ac2f268836ac9bf0906b8bb6b4013c92d
|
refs/heads/master
| 2023-09-02T11:26:28.189499
| 2023-09-02T02:21:04
| 2023-09-02T02:21:04
| 133,134,007
| 17,729
| 6,577
|
Apache-2.0
| 2023-09-14T19:45:32
| 2018-05-12T09:57:56
|
Java
|
UTF-8
|
Python
| false
| false
| 2,668
|
py
|
upload_form.py
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class UploadForm(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, additional_metadata=None, file=None): # noqa: E501
"""UploadForm - a model defined in OpenAPI
:param additional_metadata: The additional_metadata of this UploadForm. # noqa: E501
:type additional_metadata: str
:param file: The file of this UploadForm. # noqa: E501
:type file: file
"""
self.openapi_types = {
'additional_metadata': str,
'file': file
}
self.attribute_map = {
'additional_metadata': 'additionalMetadata',
'file': 'file'
}
self._additional_metadata = additional_metadata
self._file = file
@classmethod
def from_dict(cls, dikt) -> 'UploadForm':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The UploadForm of this UploadForm. # noqa: E501
:rtype: UploadForm
"""
return util.deserialize_model(dikt, cls)
@property
def additional_metadata(self):
"""Gets the additional_metadata of this UploadForm.
Additional data to pass to server # noqa: E501
:return: The additional_metadata of this UploadForm.
:rtype: str
"""
return self._additional_metadata
@additional_metadata.setter
def additional_metadata(self, additional_metadata):
"""Sets the additional_metadata of this UploadForm.
Additional data to pass to server # noqa: E501
:param additional_metadata: The additional_metadata of this UploadForm.
:type additional_metadata: str
"""
self._additional_metadata = additional_metadata
@property
def file(self):
"""Gets the file of this UploadForm.
file to upload # noqa: E501
:return: The file of this UploadForm.
:rtype: file
"""
return self._file
@file.setter
def file(self, file):
"""Sets the file of this UploadForm.
file to upload # noqa: E501
:param file: The file of this UploadForm.
:type file: file
"""
if file is None:
raise ValueError("Invalid value for `file`, must not be `None`") # noqa: E501
self._file = file
|
5772af839f3eb2212ab6ecbccda25d96587e9b6b
|
6deafbf6257a5c30f084c3678712235c2c31a686
|
/Toolz/sqlmap/plugins/dbms/oracle/enumeration.py
|
981543516190687f0e44da259694749cb8c1369c
|
[
"Unlicense",
"LicenseRef-scancode-generic-cla",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"LicenseRef-scancode-commercial-license",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
thezakman/CTF-Heaven
|
53fcb4a72afa821ad05d8cc3b309fb388f958163
|
4b52a2178922f1502ab00fa8fc156d35e1dc653f
|
refs/heads/master
| 2023-04-05T18:20:54.680378
| 2023-03-21T13:47:45
| 2023-03-21T13:47:45
| 167,290,879
| 182
| 24
|
Unlicense
| 2022-11-29T21:41:30
| 2019-01-24T02:44:24
|
Python
|
UTF-8
|
Python
| false
| false
| 5,972
|
py
|
enumeration.py
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
from lib.core.common import getLimitRange
from lib.core.common import isAdminFromPrivileges
from lib.core.common import isInferenceAvailable
from lib.core.common import isNoneValue
from lib.core.common import isNumPosStrValue
from lib.core.common import isTechniqueAvailable
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import queries
from lib.core.enums import CHARSET_TYPE
from lib.core.enums import DBMS
from lib.core.enums import EXPECTED
from lib.core.enums import PAYLOAD
from lib.core.exception import SqlmapNoneDataException
from lib.request import inject
from plugins.generic.enumeration import Enumeration as GenericEnumeration
class Enumeration(GenericEnumeration):
def __init__(self):
GenericEnumeration.__init__(self)
def getRoles(self, query2=False):
infoMsg = "fetching database users roles"
rootQuery = queries[DBMS.ORACLE].roles
if conf.user == "CU":
infoMsg += " for current user"
conf.user = self.getCurrentUser()
logger.info(infoMsg)
# Set containing the list of DBMS administrators
areAdmins = set()
if any(isTechniqueAvailable(_) for _ in (PAYLOAD.TECHNIQUE.UNION, PAYLOAD.TECHNIQUE.ERROR, PAYLOAD.TECHNIQUE.QUERY)) or conf.direct:
if query2:
query = rootQuery.inband.query2
condition = rootQuery.inband.condition2
else:
query = rootQuery.inband.query
condition = rootQuery.inband.condition
if conf.user:
users = conf.user.split(',')
query += " WHERE "
query += " OR ".join("%s = '%s'" % (condition, user) for user in sorted(users))
values = inject.getValue(query, blind=False, time=False)
if not values and not query2:
infoMsg = "trying with table USER_ROLE_PRIVS"
logger.info(infoMsg)
return self.getRoles(query2=True)
if not isNoneValue(values):
for value in values:
user = None
roles = set()
for count in xrange(0, len(value or [])):
# The first column is always the username
if count == 0:
user = value[count]
# The other columns are the roles
else:
role = value[count]
# In Oracle we get the list of roles as string
roles.add(role)
if user in kb.data.cachedUsersRoles:
kb.data.cachedUsersRoles[user] = list(roles.union(kb.data.cachedUsersRoles[user]))
else:
kb.data.cachedUsersRoles[user] = list(roles)
if not kb.data.cachedUsersRoles and isInferenceAvailable() and not conf.direct:
if conf.user:
users = conf.user.split(',')
else:
if not len(kb.data.cachedUsers):
users = self.getUsers()
else:
users = kb.data.cachedUsers
retrievedUsers = set()
for user in users:
unescapedUser = None
if user in retrievedUsers:
continue
infoMsg = "fetching number of roles "
infoMsg += "for user '%s'" % user
logger.info(infoMsg)
if unescapedUser:
queryUser = unescapedUser
else:
queryUser = user
if query2:
query = rootQuery.blind.count2 % queryUser
else:
query = rootQuery.blind.count % queryUser
count = inject.getValue(query, union=False, error=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
if count != 0 and not query2:
infoMsg = "trying with table USER_SYS_PRIVS"
logger.info(infoMsg)
return self.getPrivileges(query2=True)
warnMsg = "unable to retrieve the number of "
warnMsg += "roles for user '%s'" % user
logger.warn(warnMsg)
continue
infoMsg = "fetching roles for user '%s'" % user
logger.info(infoMsg)
roles = set()
indexRange = getLimitRange(count, plusOne=True)
for index in indexRange:
if query2:
query = rootQuery.blind.query2 % (queryUser, index)
else:
query = rootQuery.blind.query % (queryUser, index)
role = inject.getValue(query, union=False, error=False)
# In Oracle we get the list of roles as string
roles.add(role)
if roles:
kb.data.cachedUsersRoles[user] = list(roles)
else:
warnMsg = "unable to retrieve the roles "
warnMsg += "for user '%s'" % user
logger.warn(warnMsg)
retrievedUsers.add(user)
if not kb.data.cachedUsersRoles:
errMsg = "unable to retrieve the roles "
errMsg += "for the database users"
raise SqlmapNoneDataException(errMsg)
for user, privileges in kb.data.cachedUsersRoles.items():
if isAdminFromPrivileges(privileges):
areAdmins.add(user)
return kb.data.cachedUsersRoles, areAdmins
|
d51452775744d289b27a7b4f3af5cae075e290f7
|
98a0afba7ba4e2bfcbb4882a20d38255034686a2
|
/registration.py
|
b1d6569fb7fd7ff11b19b47e0d4e195d15801cf0
|
[
"MIT"
] |
permissive
|
Grim-es/material-combiner-addon
|
ecf05cfc46a241c5a15bf5244162076d5c84ffe5
|
55a235dcd93455994aec1226bbbfd78092ff6463
|
refs/heads/master
| 2023-07-24T04:16:00.992944
| 2023-07-23T22:09:35
| 2023-07-23T22:09:35
| 145,512,509
| 380
| 36
|
MIT
| 2023-07-23T22:09:37
| 2018-08-21T05:44:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,864
|
py
|
registration.py
|
from typing import Dict
from typing import Union
import bpy
from . import addon_updater_ops
from . import extend_lists
from . import extend_types
from . import globs
from . import operators
from . import ui
from .icons import initialize_smc_icons
from .icons import unload_smc_icons
from .type_annotations import BlClasses
__bl_classes = [
ui.credits_menu.CreditsMenu,
ui.main_menu.MaterialMenu,
ui.property_menu.PropertyMenu,
ui.update_menu.UpdateMenu,
operators.combiner.Combiner,
operators.combine_list.RefreshObData,
operators.combine_list.CombineSwitch,
operators.multicombine_list.MultiCombineColor,
operators.multicombine_list.MultiCombineImageAdd,
operators.multicombine_list.MultiCombineImageMove,
operators.multicombine_list.MultiCombineImagePath,
operators.multicombine_list.MultiCombineImageReset,
operators.multicombine_list.MultiCombineImageRemove,
operators.browser.OpenBrowser,
operators.get_pillow.InstallPIL,
extend_types.CombineList,
extend_types.UpdatePreferences,
extend_lists.SMC_UL_Combine_List,
]
def register_all(bl_info: Dict[str, Union[str, tuple]]) -> None:
_register_classes()
initialize_smc_icons()
addon_updater_ops.register(bl_info)
addon_updater_ops.check_for_update_background()
extend_types.register()
def unregister_all() -> None:
_unregister_classes()
unload_smc_icons()
addon_updater_ops.unregister()
extend_types.unregister()
def _register_classes() -> None:
count = 0
for cls in __bl_classes:
make_annotations(cls)
try:
bpy.utils.register_class(cls)
count += 1
except ValueError as e:
print('Error:', cls, e)
print('Registered', count, 'Material Combiner classes.')
if count < len(__bl_classes):
print('Skipped', len(__bl_classes) - count, 'Material Combiner classes.')
def _unregister_classes() -> None:
count = 0
for cls in reversed(__bl_classes):
try:
bpy.utils.unregister_class(cls)
count += 1
except (ValueError, RuntimeError) as e:
print('Error:', cls, e)
print('Unregistered', count, 'Material Combiner classes.')
def make_annotations(cls: BlClasses) -> BlClasses:
if globs.is_blender_2_79_or_older:
return cls
if bpy.app.version >= (2, 93, 0):
bl_props = {k: v for k, v in cls.__dict__.items() if isinstance(v, bpy.props._PropertyDeferred)}
else:
bl_props = {k: v for k, v in cls.__dict__.items() if isinstance(v, tuple)}
if bl_props:
if '__annotations__' not in cls.__dict__:
setattr(cls, '__annotations__', {})
annotations = cls.__dict__['__annotations__']
for k, v in bl_props.items():
annotations[k] = v
delattr(cls, k)
return cls
|
6907da6a0cca2ea40fe0bae656d0325b02fadb48
|
23eafaa60b66769190227c80ac1b087697c72ed8
|
/web/tests/test_whoosh_index.py
|
011eb6676f6f08286ac72f5206485519fbfddc01
|
[
"MIT"
] |
permissive
|
devpi/devpi
|
fe4d76f2ab57da3a766a9e13c51cd4f229598083
|
56c266744ddfd182d46ca480b787ab44a6ee4692
|
refs/heads/main
| 2023-09-04T02:57:22.075986
| 2023-08-06T10:32:46
| 2023-08-06T10:32:46
| 86,787,680
| 760
| 148
| null | 2023-09-09T19:53:10
| 2017-03-31T06:51:39
|
Python
|
UTF-8
|
Python
| false
| false
| 17,960
|
py
|
test_whoosh_index.py
|
from devpi_web.indexing import ProjectIndexingInfo
import pytest
@pytest.mark.parametrize("input, expected", [
("Foo", [(0, 0, 3, "Foo")]),
("Foo Bar", [(0, 0, 3, "Foo"), (1, 4, 7, "Bar")]),
("Foo-Bar", [(0, 0, 3, "Foo"), (1, 4, 7, "Bar")]),
("Foo_Bar", [(0, 0, 3, "Foo"), (1, 4, 7, "Bar")]),
("1Foo Bar", [(0, 0, 4, "1Foo"), (1, 5, 8, "Bar")]),
("1Foo-Bar", [(0, 0, 4, "1Foo"), (1, 5, 8, "Bar")]),
("1Foo_Bar", [(0, 0, 4, "1Foo"), (1, 5, 8, "Bar")]),
("Foo 1Bar", [(0, 0, 3, "Foo"), (1, 4, 8, "1Bar")]),
("Foo-1Bar", [(0, 0, 3, "Foo"), (1, 4, 8, "1Bar")]),
("Foo_1Bar", [(0, 0, 3, "Foo"), (1, 4, 8, "1Bar")]),
("URLBar", [(0, 0, 6, "URLBar")]),
("BarURL", [(0, 0, 3, "Bar"), (1, 3, 6, "URL")]),
("FooBar", [(0, 0, 3, "Foo"), (1, 3, 6, "Bar")])])
def test_projectnametokenizer(input, expected):
from devpi_web.whoosh_index import ProjectNameTokenizer
tokenizer = ProjectNameTokenizer()
assert [
(x.pos, x.startchar, x.endchar, x.text)
for x in tokenizer(input, positions=True, chars=True)] == expected
@pytest.mark.parametrize("input, expected", [
(["devpi"], [
"de", "dev", "devp",
"ev", "evp", "evpi",
"vp", "vpi", "pi"]),
(["farglebargle"], [
"fa", "far", "farg",
"ar", "arg", "argl",
"rg", "rgl", "rgle",
"gl", "gle", "gleb",
"le", "leb", "leba",
"eb", "eba", "ebar",
"ba", "bar", "barg",
"ar", "arg", "argl",
"rg", "rgl", "rgle",
"gl", "gle", "le"]),
(["Hello", "World"], [
"He", "Hel", "Hell",
"el", "ell", "ello",
"ll", "llo", "lo",
"Wo", "Wor", "Worl",
"or", "orl", "orld",
"rl", "rld", "ld"])])
def test_ngramfilter(input, expected):
from devpi_web.whoosh_index import NgramFilter, Token
nf = NgramFilter()
token = Token()
def tokens():
for text in input:
token.text = text
yield token
result = [(len(x.text), x.text, x.boost) for x in nf(tokens())]
# three consecutive elements belong to the same position with different sizes
# use a modified recipe from itertools docs for grouping
size_groups = zip(*[iter(result)] * 3)
for size_group in size_groups:
# add the reverse index as second tuple item
size_group = [
(x[0], i, x[1], x[2])
for i, x in zip(reversed(range(len(size_group))), size_group)]
# now if we sort, longer and further to the beginning a ngram is, the
# higher the boost
size_group = sorted(size_group)
ngrams = [x[2] for x in size_group]
boosts = [x[3] for x in size_group]
assert boosts[0] < boosts[1]
assert boosts[1] < boosts[2]
assert len(ngrams[0]) <= len(ngrams[1])
assert len(ngrams[1]) <= len(ngrams[2])
assert [x[1] for x in result] == expected
@pytest.mark.with_notifier
def test_search_after_register(mapp, testapp):
from devpi_web.main import get_indexer
indexer_thread = get_indexer(mapp.xom).indexer_thread
mapp.xom.thread_pool.start_one(indexer_thread)
api = mapp.create_and_use()
mapp.set_versiondata({
"name": "pkg1",
"version": "2.6",
"description": "foo"}, waithooks=True)
indexer_thread.wait()
r = testapp.get('/+search?query=foo', expect_errors=False)
links = r.html.select('.searchresults a')
assert [(l.text.strip(), l.attrs['href']) for l in links] == [
("pkg1-2.6", "http://localhost/%s/pkg1/2.6" % api.stagename),
("Description", "http://localhost/%s/pkg1/2.6#description" % api.stagename)]
mapp.set_versiondata({
"name": "pkg1",
"version": "2.7",
"description": "foo"}, waithooks=True)
indexer_thread.wait()
r = testapp.get('/+search?query=foo', expect_errors=False)
links = r.html.select('.searchresults a')
assert [(l.text.strip(), l.attrs['href']) for l in links] == [
("pkg1-2.7", "http://localhost/%s/pkg1/2.7" % api.stagename),
("Description", "http://localhost/%s/pkg1/2.7#description" % api.stagename)]
r = testapp.xget(200, '/+search?query=foo')
links = r.html.select('.searchresults a')
assert [(l.text.strip(), l.attrs['href']) for l in links] == [
("pkg1-2.7", "http://localhost/%s/pkg1/2.7" % api.stagename),
("Description", "http://localhost/%s/pkg1/2.7#description" % api.stagename)]
def test_indexer_relative_path():
from devpi_server.config import parseoptions, get_pluginmanager
from devpi_server.main import Fatal
from devpi_web.main import get_indexer_from_config
options = ("--indexer-backend", "whoosh:path=ham")
config = parseoptions(get_pluginmanager(), ("devpi-server",) + options)
with pytest.raises(Fatal, match="must be absolute"):
get_indexer_from_config(config)
@pytest.mark.nomocking
def test_dont_index_deleted_mirror(mapp, monkeypatch, simpypi, testapp):
from devpi_web.main import get_indexer
xom = mapp.xom
indexer = get_indexer(xom)
indexer_thread = indexer.indexer_thread
calls = []
monkeypatch.setattr(
indexer, "_update_project",
lambda *a, **kw: calls.append("update"))
monkeypatch.setattr(
indexer, "_delete_project",
lambda *a, **kw: calls.append("delete"))
simpypi.add_release("pkg", pkgver="pkg-1.0.zip")
mapp.login("root", "")
api = mapp.use("root/pypi")
mapp.modify_index(
"root/pypi",
indexconfig=dict(type="mirror", mirror_url=simpypi.simpleurl, volatile=True))
# fetching the index json triggers project names loading patched above
r = testapp.get_json(api.index)
assert r.status_code == 200
assert r.json["result"]["type"] == "mirror"
assert r.json["result"]["projects"] == ["pkg"]
(link,) = mapp.getreleaseslist("pkg")
assert "pkg-1.0.zip" in link
r = testapp.delete(api.index)
assert r.status_code == 201
r = testapp.get_json(api.index)
assert r.status_code == 404
# so far no events should have run
assert xom.keyfs.notifier.read_event_serial() == -1
# now start the event handler thread
xom.thread_pool.start_one(xom.keyfs.notifier)
serial = xom.keyfs.get_current_serial()
xom.keyfs.notifier.wait_event_serial(serial)
assert xom.keyfs.notifier.read_event_serial() == serial
# now start the indexer
xom.thread_pool.start_one(indexer_thread)
indexer_thread.wait()
assert calls == ["delete"]
class FakeStage(object):
def __init__(self, index_type):
self.ixconfig = dict(type=index_type)
self.name = index_type
self.serials = {}
self.serial = -1
def get_last_project_change_serial_perstage(self, project, at_serial=None):
return self.serials.get(project, self.serial)
class TestIndexingSharedData:
@pytest.fixture
def shared_data(self):
from devpi_web.whoosh_index import IndexingSharedData
return IndexingSharedData()
def test_mirror_priority(self, shared_data):
mirror = FakeStage('mirror')
stage = FakeStage('stage')
mirror_prj = ProjectIndexingInfo(stage=mirror, name='mirror_prj')
stage_prj = ProjectIndexingInfo(stage=stage, name='stage_prj')
result = []
def handler(is_from_mirror, serial, indexname, names):
(name,) = names
result.append(name)
# Regardless of the serial or add order, the stage should come first
cases = [
((mirror_prj, 0), (stage_prj, 0)),
((mirror_prj, 1), (stage_prj, 0)),
((mirror_prj, 0), (stage_prj, 1)),
((stage_prj, 0), (mirror_prj, 0)),
((stage_prj, 1), (mirror_prj, 0)),
((stage_prj, 0), (mirror_prj, 1))]
for (prj1, serial1), (prj2, serial2) in cases:
shared_data.add(prj1, serial1)
shared_data.add(prj2, serial2)
assert shared_data.queue.qsize() == 2
shared_data.process_next(handler)
shared_data.process_next(handler)
assert shared_data.queue.qsize() == 0
assert result == ['stage_prj', 'mirror_prj']
result.clear()
@pytest.mark.parametrize("index_type", ["mirror", "stage"])
def test_serial_priority(self, index_type, shared_data):
stage = FakeStage(index_type)
prj = ProjectIndexingInfo(stage=stage, name='prj')
result = []
def handler(is_from_mirror, serial, indexname, names):
result.append(serial)
# Later serials come first
shared_data.add(prj, 1)
shared_data.add(prj, 100)
shared_data.add(prj, 10)
assert shared_data.queue.qsize() == 3
shared_data.process_next(handler)
shared_data.process_next(handler)
shared_data.process_next(handler)
assert shared_data.queue.qsize() == 0
assert result == [100, 10, 1]
def test_error_queued(self, shared_data):
stage = FakeStage('stage')
prj = ProjectIndexingInfo(stage=stage, name='prj')
next_ts_result = []
handler_result = []
orig_next_ts = shared_data.next_ts
def next_ts(delay):
next_ts_result.append(delay)
return orig_next_ts(delay)
shared_data.next_ts = next_ts
def handler(is_from_mirror, serial, indexname, names):
(name,) = names
handler_result.append(name)
raise ValueError
# No waiting on empty queues
shared_data.QUEUE_TIMEOUT = 0
shared_data.add(prj, 0)
assert shared_data.queue.qsize() == 1
assert shared_data.error_queue.qsize() == 0
assert next_ts_result == []
assert handler_result == []
# An exception puts the info into the error queue
shared_data.process_next(handler)
assert shared_data.queue.qsize() == 0
assert shared_data.error_queue.qsize() == 1
assert next_ts_result == [11]
assert handler_result == ['prj']
# Calling again doesn't change anything,
# because there is a delay on errors
shared_data.process_next(handler)
assert shared_data.queue.qsize() == 0
assert shared_data.error_queue.qsize() == 1
assert next_ts_result == [11]
assert handler_result == ['prj']
# When removing the delay check, the handler is called again and the
# info re-queued with a longer delay
shared_data.is_in_future = lambda ts: False
shared_data.process_next(handler)
assert shared_data.queue.qsize() == 0
assert shared_data.error_queue.qsize() == 1
assert next_ts_result == [11, 11 * shared_data.ERROR_QUEUE_DELAY_MULTIPLIER]
assert handler_result == ['prj', 'prj']
while 1:
# The delay is increased until reaching a maximum
shared_data.process_next(handler)
delay = next_ts_result[-1]
if delay >= shared_data.ERROR_QUEUE_MAX_DELAY:
break
# then it will stay there
shared_data.process_next(handler)
delay = next_ts_result[-1]
assert delay == shared_data.ERROR_QUEUE_MAX_DELAY
# The number of retries should be reasonable.
# Needs adjustment in case the ERROR_QUEUE_DELAY_MULTIPLIER
# or ERROR_QUEUE_MAX_DELAY is changed
assert len(next_ts_result) == 17
assert len(handler_result) == 17
def test_extend_differing_stage(self, shared_data):
mirror = FakeStage('mirror')
stage = FakeStage('stage')
mirror_prj = ProjectIndexingInfo(stage=mirror, name='mirror_prj')
stage_prj = ProjectIndexingInfo(stage=stage, name='stage_prj')
with pytest.raises(ValueError, match="Project isn't from same index"):
shared_data.extend([mirror_prj, stage_prj], 0)
def test_extend_max_names(self, shared_data):
shared_data.QUEUE_MAX_NAMES = 3
mirror = FakeStage('mirror')
prjs = []
for i in range(10):
prjs.append(ProjectIndexingInfo(stage=mirror, name='prj%d' % i))
result = []
def handler(is_from_mirror, serial, indexname, names):
result.append(names)
shared_data.extend(prjs, 0)
assert shared_data.queue.qsize() == 4
shared_data.process_next(handler)
assert shared_data.queue.qsize() == 3
assert result == [
['prj0', 'prj1', 'prj2']]
shared_data.process_next(handler)
assert shared_data.queue.qsize() == 2
assert result == [
['prj0', 'prj1', 'prj2'],
['prj3', 'prj4', 'prj5']]
shared_data.process_next(handler)
assert shared_data.queue.qsize() == 1
assert result == [
['prj0', 'prj1', 'prj2'],
['prj3', 'prj4', 'prj5'],
['prj6', 'prj7', 'prj8']]
shared_data.process_next(handler)
assert shared_data.queue.qsize() == 0
assert result == [
['prj0', 'prj1', 'prj2'],
['prj3', 'prj4', 'prj5'],
['prj6', 'prj7', 'prj8'],
['prj9']]
assert shared_data.error_queue.qsize() == 0
def test_queue_projects_max_names(self, shared_data):
shared_data.QUEUE_MAX_NAMES = 3
mirror = FakeStage('mirror')
mirror.serial = 0
prjs = []
for i in range(10):
prjs.append(ProjectIndexingInfo(stage=mirror, name='prj%d' % i))
result = []
def handler(is_from_mirror, serial, indexname, names):
result.append(names)
class FakeSearcher:
def document_number(self, path):
return None
shared_data.queue_projects(prjs, 0, FakeSearcher())
assert shared_data.queue.qsize() == 4
shared_data.process_next(handler)
assert shared_data.queue.qsize() == 3
assert result == [
['prj0', 'prj1', 'prj2']]
shared_data.process_next(handler)
assert shared_data.queue.qsize() == 2
assert result == [
['prj0', 'prj1', 'prj2'],
['prj3', 'prj4', 'prj5']]
shared_data.process_next(handler)
assert shared_data.queue.qsize() == 1
assert result == [
['prj0', 'prj1', 'prj2'],
['prj3', 'prj4', 'prj5'],
['prj6', 'prj7', 'prj8']]
shared_data.process_next(handler)
assert shared_data.queue.qsize() == 0
assert result == [
['prj0', 'prj1', 'prj2'],
['prj3', 'prj4', 'prj5'],
['prj6', 'prj7', 'prj8'],
['prj9']]
assert shared_data.error_queue.qsize() == 0
def test_queue_projects_skip_existing(self, shared_data):
""" For projects from mirrors the existing serial from the index
is checked to skip reindexing projects which are already up to
date.
There was a bug where the used serial was overwritten during that
check causing wrong entries in the queue.
"""
class FakeSearcher:
index = {}
def document_number(self, path):
if path in self.index:
return path
def stored_fields(self, path):
return {'serial': self.index[path]}
searcher = FakeSearcher()
result = []
def handler(is_from_mirror, serial, indexname, names):
if is_from_mirror and indexname == 'mirror':
for project in names:
searcher.index['/%s/%s' % (indexname, project)] = serial
result.append((is_from_mirror, serial, indexname, names))
mirror = FakeStage('mirror')
stage = FakeStage('stage')
# add one project on the mirror at serial 0
mirror.serials['mirror1'] = 0
shared_data.queue_projects(
[
ProjectIndexingInfo(stage=mirror, name='mirror1')],
0, searcher)
assert shared_data.queue.qsize() == 1
while shared_data.queue.qsize():
shared_data.process_next(handler)
assert result == [
(True, 0, 'mirror', ['mirror1'])]
result.clear()
# add another project on the mirror at serial 1 and re-add first project
mirror.serials['mirror2'] = 1
shared_data.queue_projects(
[
ProjectIndexingInfo(stage=mirror, name='mirror1'),
ProjectIndexingInfo(stage=mirror, name='mirror2')],
1, searcher)
assert shared_data.queue.qsize() == 1
while shared_data.queue.qsize():
shared_data.process_next(handler)
assert result == [
(True, 1, 'mirror', ['mirror2'])]
result.clear()
# add a project on the stage at serial 2 and re-add mirror projects
stage.serials['prj'] = 2
shared_data.queue_projects(
[
ProjectIndexingInfo(stage=mirror, name='mirror1'),
ProjectIndexingInfo(stage=mirror, name='mirror2'),
ProjectIndexingInfo(stage=stage, name='prj')],
2, searcher)
assert shared_data.queue.qsize() == 1
while shared_data.queue.qsize():
shared_data.process_next(handler)
assert result == [
(False, 2, 'stage', ('prj',))]
result.clear()
# now re-add everything at a later serial
shared_data.queue_projects(
[
ProjectIndexingInfo(stage=mirror, name='mirror1'),
ProjectIndexingInfo(stage=mirror, name='mirror2'),
ProjectIndexingInfo(stage=stage, name='prj')],
3, searcher)
assert shared_data.queue.qsize() == 1
while shared_data.queue.qsize():
shared_data.process_next(handler)
assert result == [
(False, 3, 'stage', ('prj',))]
result.clear()
|
9bfb51062ddfd9942106f46ea80e48a2cbd51d6b
|
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
|
/devel/py-grpcio-tools/files/patch-setup.py
|
3f2434e25c609497ecdbd466cabac54f72de373a
|
[
"BSD-2-Clause"
] |
permissive
|
freebsd/freebsd-ports
|
86f2e89d43913412c4f6b2be3e255bc0945eac12
|
605a2983f245ac63f5420e023e7dce56898ad801
|
refs/heads/main
| 2023-08-30T21:46:28.720924
| 2023-08-30T19:33:44
| 2023-08-30T19:33:44
| 1,803,961
| 916
| 918
|
NOASSERTION
| 2023-09-08T04:06:26
| 2011-05-26T11:15:35
| null |
UTF-8
|
Python
| false
| false
| 1,396
|
py
|
patch-setup.py
|
--- setup.py.orig 2023-08-08 21:49:44 UTC
+++ setup.py
@@ -160,7 +160,7 @@ if EXTRA_ENV_COMPILE_ARGS is None:
# We need to statically link the C++ Runtime, only the C runtime is
# available dynamically
EXTRA_ENV_COMPILE_ARGS += " /MT"
- elif "linux" in sys.platform or "darwin" in sys.platform:
+ elif "linux" in sys.platform or "darwin" in sys.platform or "freebsd" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += " -fno-wrapv -frtti"
if EXTRA_ENV_LINK_ARGS is None:
EXTRA_ENV_LINK_ARGS = ""
@@ -187,7 +187,7 @@ if EXTRA_ENV_LINK_ARGS is None:
EXTRA_ENV_LINK_ARGS += " -Wl,-exported_symbol,_{}".format(
_EXT_INIT_SYMBOL
)
- if "linux" in sys.platform or "darwin" in sys.platform:
+ if "linux" in sys.platform or "darwin" in sys.platform or "freebsd" in sys.platform:
EXTRA_ENV_LINK_ARGS += " -lpthread"
if check_linker_need_libatomic():
EXTRA_ENV_LINK_ARGS += " -latomic"
@@ -225,7 +225,7 @@ if "win32" in sys.platform:
)
if "64bit" in platform.architecture()[0]:
DEFINE_MACROS += (("MS_WIN64", 1),)
-elif "linux" in sys.platform or "darwin" in sys.platform:
+elif "linux" in sys.platform or "darwin" in sys.platform or "freebsd" in sys.platform:
DEFINE_MACROS += (("HAVE_PTHREAD", 1),)
# By default, Python3 distutils enforces compatibility of
|
82c9c1acf9dab03ec02c287c06eeb8438e33f641
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-core/PyObjCTest/__init__.py
|
851ba8982ccf223e8856827729791c97d0fd3706
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 59
|
py
|
__init__.py
|
"""
PyObjCTest package - unittests for the pyobjc core
"""
|
5beabdb758ebef799d27dfdf647155150180e045
|
0ba9681b235b377b3f57d52532ab7212d4d4cd8a
|
/saw-remote-api/python/tests/saw-in-progress/HMAC/spec/HMAC.py
|
e977e3c55610ffacd035a497608d3af747d82f66
|
[
"BSD-3-Clause"
] |
permissive
|
GaloisInc/saw-script
|
d9a3eb7b05c1bcbcc319987223cd53b903b55b5d
|
79ddd800bec59528958ed6d7593304e2b17b7dfb
|
refs/heads/master
| 2023-09-01T09:47:31.415255
| 2023-08-30T11:26:08
| 2023-08-30T11:26:08
| 34,082,065
| 458
| 82
|
BSD-3-Clause
| 2023-09-14T16:23:09
| 2015-04-16T21:39:32
|
Haskell
|
UTF-8
|
Python
| false
| false
| 17,450
|
py
|
HMAC.py
|
import os
import os.path
from cryptol.cryptoltypes import to_cryptol
from saw_client import *
from saw_client.llvm import *
from env_server import *
# N.B., transliteration from HMAC.saw
dir_path = os.path.dirname(os.path.realpath(__file__))
c = env_connect()
# import "HMAC_iterative.cry";
cryptol_load_file(os.path.join(dir_path, 'HMAC_iterative.cry'))
# import "Hashing.cry";
cryptol_load_file(os.path.join(dir_path, 'Hashing.cry'))
################################
# Generic Utilities.
# let ptr_to_fresh n ty = do {
# x <- crucible_fresh_var n ty;
# p <- alloc_init ty (crucible_term x);
# return (x, p);
# };
def ptr_to_fresh(c : Contract, ty : LLVMType, name : Optional[str] = None, *, read_only : bool = False) -> Tuple[FreshVar, SetupVal]:
"""Add to``Contract`` ``c`` an allocation of a pointer of type ``ty`` initialized to an unknown fresh value.
:returns A fresh variable bound to the pointers initial value and the newly allocated pointer. (The fresh
variable will be assigned ``name`` if provided/available.)"""
var = c.fresh_var(ty, name)
ptr = c.alloc(ty, points_to=var, read_only=read_only)
return (var, ptr)
# TODO(AMK) Um... what is this:
# let z3_hash_unint =
# w4_unint_z3 [ "hash_init_c_state"
# , "hash_update_c_state"
# , "hash_update_c_state_unbounded"
# , "hash_digest_c_state"
# ];
# ////////////////////////////////////////////////////////////////
# // Hash.
# //
# let setup_hash_state pstate = do {
# alg0 <- crucible_fresh_var "alg" (llvm_int 32);
# h0 <- crucible_fresh_var "h" (llvm_array 8 (llvm_int 64));
# Nl0 <- crucible_fresh_var "Nl" (llvm_int 64);
# Nh0 <- crucible_fresh_var "Nh" (llvm_int 64);
# u0 <- crucible_fresh_var "u" (llvm_array 16 (llvm_int 64));
# num0 <- crucible_fresh_var "num" (llvm_int 32);
# is_ready_for_input0 <- crucible_fresh_var "is_ready_for_input" (llvm_int 8);
# currently_in_hash0 <- crucible_fresh_var "currently_in_hash" (llvm_int 64);
# md_len0 <- crucible_fresh_var "md_len" (llvm_int 32);
# (_, pimpl) <- ptr_to_fresh_readonly "impl" (llvm_struct "struct.s2n_hash");
# crucible_points_to pstate
# (crucible_struct
# [ pimpl
# , crucible_term alg0
# , crucible_term is_ready_for_input0
# , crucible_term currently_in_hash0
# , crucible_struct
# [ crucible_struct
# [ crucible_struct
# [ crucible_term h0
# , crucible_term Nl0
# , crucible_term Nh0
# , crucible_struct [ crucible_term u0 ]
# , crucible_term num0
# , crucible_term md_len0
# ]
# ]
# ]
# ]);
# let st = {{
# { h = h0
# , Nl = Nl0
# , Nh = Nh0
# , u = u0
# , num = num0
# , md_len = md_len0
# }
# }};
# return (st, currently_in_hash0);
# };
def setup_hash_state(c : Contract, pstate : SetupVal) -> Tuple[Any, FreshVar]:
alg0 = c.fresh_var(i32, "alg")
h0 = c.fresh_var(array_ty(8, i64), "h0")
Nl0 = c.fresh_var(i64, "Nl")
Nh0 = c.fresh_var(i64, "Nh")
u0 = c.fresh_var(array_ty(16, i64), "u")
num0 = c.fresh_var(i32, "h0")
is_ready_for_input0 = c.fresh_var(i8, "is_ready_for_input")
currently_in_hash0 = c.fresh_var(i64, "currently_in_hash")
md_len0 = c.fresh_var(i32, "md_len")
(_, pimpl) = ptr_to_fresh(c, alias_ty('struct.s2n_hash'), "impl", read_only=True)
c.points_to(pstate,
struct(
pimpl,
alg0,
is_ready_for_input0,
currently_in_hash0,
struct(struct(struct(h0, Nl0, Nh0, struct(u0), num0, md_len0))
)))
# BOOKMARK
# let st = {{
# { h = h0
# , Nl = Nl0
# , Nh = Nh0
# , u = u0
# , num = num0
# , md_len = md_len0
# }
# }};
# return (st, currently_in_hash0);
# let update_hash_state pstate st = do {
# alg <- crucible_fresh_var "alg" (llvm_int 32);
# is_ready_for_input <- crucible_fresh_var "is_ready_for_input" (llvm_int 8);
# currently_in_hash <- crucible_fresh_var "currently_in_hash" (llvm_int 64);
# (_, pimpl) <- ptr_to_fresh_readonly "impl" (llvm_struct "struct.s2n_hash");
# crucible_points_to pstate
# (crucible_struct
# [ pimpl
# , crucible_term alg
# , crucible_term is_ready_for_input
# , crucible_term currently_in_hash
# , crucible_struct
# [ crucible_struct
# [ crucible_struct
# [ crucible_term {{ st.h }}
# , crucible_term {{ st.Nl }}
# , crucible_term {{ st.Nh }}
# , crucible_struct [ crucible_term {{ st.u }} ]
# , crucible_term {{ st.num }}
# , crucible_term {{ st.md_len }}
# ]
# ]
# ]
# ]);
# };
# let hash_init_spec = do {
# pstate <- crucible_alloc (llvm_struct "struct.s2n_hash_state");
# (st0, _) <- setup_hash_state pstate;
# alg <- crucible_fresh_var "alg" (llvm_int 32);
# crucible_execute_func [pstate, crucible_term alg];
# // We need to pass in the starting state since many of the bits in
# // the union are unused by many of the hash algorithms.
# let st1 = {{ hash_init_c_state st0 }};
# update_hash_state pstate st1;
# crucible_return (crucible_term {{ 0 : [32] }});
# };
# let hash_reset_spec = do {
# pstate <- crucible_alloc (llvm_struct "struct.s2n_hash_state");
# (st0, _) <- setup_hash_state pstate;
# crucible_execute_func [pstate];
# let st1 = {{ hash_init_c_state st0 }};
# update_hash_state pstate st1;
# crucible_return (crucible_term {{ 0 : [32] }});
# };
# let hash_copy_spec = do {
# pstate1 <- crucible_alloc (llvm_struct "struct.s2n_hash_state");
# pstate2 <- crucible_alloc (llvm_struct "struct.s2n_hash_state");
# (st1, _) <- setup_hash_state pstate1;
# (st2, _) <- setup_hash_state pstate2;
# crucible_execute_func [pstate1, pstate2];
# update_hash_state pstate1 st2;
# update_hash_state pstate2 st2;
# crucible_return (crucible_term {{ 0 : [32] }});
# };
# let hash_update_spec msg_size = do {
# pstate <- crucible_alloc (llvm_struct "struct.s2n_hash_state");
# (msg, pmsg) <- ptr_to_fresh_readonly "msg" (llvm_array msg_size (llvm_int 8));
# (st0, _) <- setup_hash_state pstate;
# let size = crucible_term {{ `msg_size : [32] }};
# crucible_execute_func [pstate, pmsg, size];
# let st1 = {{ hash_update_c_state`{msg_size=msg_size} st0 msg }};
# update_hash_state pstate st1;
# crucible_return (crucible_term {{ 0 : [32] }});
# };
# let hash_update_unbounded_spec = do {
# pstate <- crucible_alloc (llvm_struct "struct.s2n_hash_state");
# (st0, _) <- setup_hash_state pstate;
# size <- crucible_fresh_var "size" (llvm_int 32);
# pmsg <- crucible_symbolic_alloc true 1 {{ (0 # size) : [64] }};
# msg <- crucible_fresh_cryptol_var "msg" {| ByteArray |};
# crucible_points_to_array_prefix pmsg msg {{ (0 # size) : [64] }};
# crucible_execute_func [pstate, pmsg, (crucible_term size)];
# let st1 = {{ hash_update_c_state_unbounded st0 msg size }};
# update_hash_state pstate st1;
# crucible_return (crucible_term {{ 0 : [32] }});
# };
# let hash_digest_spec digest_size = do {
# pstate <- crucible_alloc (llvm_struct "struct.s2n_hash_state");
# (dgst, pdgst) <- ptr_to_fresh "out" (llvm_array digest_size (llvm_int 8));
# (st0, _) <- setup_hash_state pstate;
# size <- crucible_fresh_var "size" (llvm_int 32);
# crucible_execute_func [pstate, pdgst, crucible_term size];
# update_hash_state pstate st0;
# let out1 = {{ hash_digest_c_state`{digest_size=digest_size} st0 }};
# crucible_points_to pdgst (crucible_term out1);
# crucible_return (crucible_term {{ 0 : [32] }});
# };
# let hash_get_currently_in_hash_total_spec = do {
# pstate <- crucible_alloc (llvm_struct "struct.s2n_hash_state");
# pout <- crucible_alloc (llvm_int 64);
# (st0, currently_in_hash) <- setup_hash_state pstate;
# crucible_execute_func [pstate, pout];
# update_hash_state pstate st0;
# crucible_points_to pout (crucible_term {{zero: [64]}} );
# crucible_return (crucible_term {{ 0 : [32] }});
# };
# ////////////////////////////////////////////////////////////////
# // HMAC.
# let setup_hmac_state alg0 hash_block_size0 block_size0 digest_size0 = do {
# pstate <- crucible_alloc (llvm_struct "struct.s2n_hmac_state");
# currently_in_hash_block0 <- crucible_fresh_var "currently_in_hash_block" (llvm_int 32);
# xor_pad0 <- crucible_fresh_var "xor_pad" (llvm_array 128 (llvm_int 8));
# let digest_size = eval_size {| SHA512_DIGEST_LENGTH |};
# digest_pad0 <- crucible_fresh_var "digest_pad" (llvm_array digest_size (llvm_int 8));
# crucible_points_to (crucible_field pstate "alg") (crucible_term alg0);
# crucible_points_to (crucible_field pstate "hash_block_size") (crucible_term hash_block_size0);
# crucible_points_to (crucible_field pstate "currently_in_hash_block") (crucible_term currently_in_hash_block0);
# crucible_points_to (crucible_field pstate "xor_pad_size") (crucible_term block_size0);
# crucible_points_to (crucible_field pstate "digest_size") (crucible_term digest_size0);
# (inner0, _) <- setup_hash_state (crucible_field pstate "inner");
# (inner_just_key0, _) <- setup_hash_state (crucible_field pstate "inner_just_key");
# (outer_just_key0, _) <- setup_hash_state (crucible_field pstate "outer_just_key");
# (outer0, _) <- setup_hash_state (crucible_field pstate "outer");
# crucible_points_to (crucible_field pstate "xor_pad") (crucible_term xor_pad0);
# crucible_points_to (crucible_field pstate "digest_pad") (crucible_term digest_pad0);
# let st0 = {{
# { alg = alg0
# , hash_block_size = hash_block_size0
# , currently_in_hash_block = currently_in_hash_block0
# , block_size = block_size0
# , digest_size = digest_size0
# , inner = inner0
# , inner_just_key = inner_just_key0
# , outer = outer0
# , outer_just_key = outer_just_key0
# , xor_pad = xor_pad0
# , digest_pad = digest_pad0
# }
# }};
# return (pstate, st0);
# };
# let check_hmac_state pstate st = do {
# crucible_points_to (crucible_field pstate "alg") (crucible_term {{ st.alg }});
# crucible_points_to (crucible_field pstate "hash_block_size") (crucible_term {{ st.hash_block_size }});
# crucible_points_to (crucible_field pstate "currently_in_hash_block") (crucible_term {{ st.currently_in_hash_block }});
# crucible_points_to (crucible_field pstate "xor_pad_size") (crucible_term {{ st.block_size }});
# crucible_points_to (crucible_field pstate "digest_size") (crucible_term {{ st.digest_size }});
# update_hash_state (crucible_field pstate "inner") {{ st.inner }};
# update_hash_state (crucible_field pstate "inner_just_key") {{ st.inner_just_key }};
# // XXX: Don't care about 'outer' because it gets overwritten by
# // 's2n_hash_reset' before use in 's2n_hmac_digest'.
# //
# //update_hash_state (crucible_elem pstate 7) {{ st.outer }};
# update_hash_state (crucible_field pstate "outer_just_key") ({{ st.outer_just_key }});
# crucible_points_to (crucible_field pstate "xor_pad") (crucible_term {{ st.xor_pad }});
# // Don't care about 'digest_pad', because it gets overwritten
# // using 's2n_hash_digest' before use in 's2n_hmac_digest'.
# //
# // However, if we leave it in, the proof still goes through
# // (since we model exactly what happens).
# //
# //crucible_points_to (crucible_elem pstate 9) (crucible_term {{ st.digest_pad }});
# };
# let hmac_invariants
# st
# (cfg : { name : String
# , hmac_alg : Term
# , digest_size : Int
# , block_size : Int
# , hash_block_size : Int
# }) = do {
# // Specify the HMAC algorithm.
# crucible_equal (crucible_term {{ st.alg }}) (crucible_term cfg.hmac_alg);
# // Specify sizes
# let hash_block_size = cfg.hash_block_size;
# let block_size = cfg.block_size;
# let digest_size = cfg.digest_size;
# crucible_equal (crucible_term {{ st.hash_block_size }}) (crucible_term {{ `hash_block_size : [16] }});
# crucible_equal (crucible_term {{ st.block_size }}) (crucible_term {{ `block_size : [16] }});
# crucible_equal (crucible_term {{ st.digest_size }}) (crucible_term {{ `digest_size : [8] }});
# };
# ////////////////////////////////////////////////////////////////
# let hmac_init_spec
# (cfg : { name : String
# , hmac_alg : Term
# , digest_size : Int
# , block_size : Int
# , hash_block_size : Int
# }) = do {
# alg0 <- crucible_fresh_var "alg" (llvm_int 32);
# hash_block_size0 <- crucible_fresh_var "hash_block_size" (llvm_int 16);
# block_size0 <- crucible_fresh_var "block_size" (llvm_int 16);
# digest_size0 <- crucible_fresh_var "digest_size" (llvm_int 8);
# (pstate, st0) <- setup_hmac_state alg0 hash_block_size0 block_size0 digest_size0;
# klen <- crucible_fresh_var "klen" (llvm_int 32);
# pkey <- crucible_symbolic_alloc true 1 {{ (0 # klen) : [64] }};
# key <- crucible_fresh_cryptol_var "key" {| ByteArray |};
# crucible_points_to_array_prefix pkey key {{ (0 # klen) : [64] }};
# crucible_execute_func [pstate, crucible_term (cfg.hmac_alg), pkey, (crucible_term klen)];
# let block_size = cfg.block_size;
# let hash_block_size = cfg.hash_block_size;
# let digest_size = cfg.digest_size;
# let alg0 = cfg.hmac_alg;
# let st1 = {{
# hmac_init_c_state_unbounded
# `{block_size=block_size
# ,hash_block_size=hash_block_size
# ,digest_size=digest_size}
# st0 alg0 key klen
# }};
# check_hmac_state pstate st1;
# hmac_invariants st1 cfg;
# crucible_return (crucible_term {{ 0 : [32] }});
# };
# let hmac_update_spec
# (cfg : { name : String
# , hmac_alg : Term
# , digest_size : Int
# , block_size : Int
# , hash_block_size : Int
# }) = do {
# let digest_size = cfg.digest_size;
# let block_size = cfg.block_size;
# let hash_block_size = cfg.hash_block_size;
# (pstate, st0) <- setup_hmac_state
# cfg.hmac_alg
# {{ `hash_block_size : [16] }}
# {{ `block_size : [16] }}
# {{ `digest_size : [8] }};
# hmac_invariants st0 cfg;
# size <- crucible_fresh_var "size" (llvm_int 32);
# pmsg <- crucible_symbolic_alloc true 1 {{ (0 # size) : [64] }};
# msg <- crucible_fresh_cryptol_var "msg" {| ByteArray |};
# crucible_points_to_array_prefix pmsg msg {{ (0 # size) : [64] }};
# crucible_execute_func [pstate, pmsg, (crucible_term size)];
# let st1 = {{ hmac_update_c_state_unbounded st0 msg size }};
# check_hmac_state pstate st1;
# hmac_invariants st1 cfg;
# crucible_return (crucible_term {{ 0 : [32] }});
# };
# let hmac_digest_spec
# (cfg : { name : String
# , hmac_alg : Term
# , digest_size : Int
# , block_size : Int
# , hash_block_size : Int
# }) = do {
# (out, pout) <- ptr_to_fresh "out" (llvm_array cfg.digest_size (llvm_int 8));
# let digest_size = cfg.digest_size;
# let block_size = cfg.block_size;
# let hash_block_size = cfg.hash_block_size;
# (pstate, st0) <- setup_hmac_state
# cfg.hmac_alg
# {{ `hash_block_size : [16] }}
# {{ `block_size : [16] }}
# {{ `digest_size : [8] }};
# hmac_invariants st0 cfg;
# let hash_block_size = cfg.hash_block_size;
# let block_size = cfg.block_size;
# let digest_size = cfg.digest_size;
# let size = {{ `digest_size : [32] }};
# crucible_execute_func [pstate, pout, crucible_term size];
# let st1_digest = {{
# hmac_digest_c_state`{block_size=block_size,digest_size=digest_size} st0
# }};
# let st1 = {{ st1_digest.0 }};
# let digest = {{ split (st1_digest.1) : [digest_size][8] }};
# crucible_points_to pout (crucible_term digest);
# crucible_return (crucible_term {{ 0 : [32] }});
# };
# let hmac_digest_size_spec
# (cfg : { name : String
# , hmac_alg : Term
# , digest_size : Int
# , block_size : Int
# , hash_block_size : Int
# }) = do {
# psize <- crucible_alloc (llvm_int 8);
# crucible_execute_func [crucible_term cfg.hmac_alg, psize];
# let digest_size = cfg.digest_size;
# crucible_points_to psize (crucible_term {{ `digest_size : [8] }});
# crucible_return (crucible_term {{ 0 : [32] }});
# };
|
040063bc7a45777bf089f07213bb279322d649f2
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-PHASE/PyObjCTest/test_phasegroup.py
|
61f7ed42557fb44a42294a54e48560391a2806a3
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 238
|
py
|
test_phasegroup.py
|
from PyObjCTools.TestSupport import TestCase
import PHASE
class TestPHASEGroup(TestCase):
def test_methods(self):
self.assertResultIsBOOL(PHASE.PHASEGroup.isMuted)
self.assertResultIsBOOL(PHASE.PHASEGroup.isSoloed)
|
c492f4aad578deaa5b39a4a39ebbe0937519e9d9
|
2ac03b8c24df220ea32ea525e1d65aeb294cd1a4
|
/custom_components/waste_collection_schedule/waste_collection_schedule/source/scheibbs_umweltverbaende_at.py
|
3d84b1e824033e940aaadc2f1803ef7c5d8afe62
|
[
"MIT"
] |
permissive
|
mampfes/hacs_waste_collection_schedule
|
a7b98319a7483dedc8cf78b724f93932934c1702
|
1dc9476efef9963a141b9ac987e2708224b9eaaf
|
refs/heads/master
| 2023-08-16T21:14:46.088962
| 2023-08-16T10:05:24
| 2023-08-16T10:05:24
| 254,347,436
| 495
| 428
|
MIT
| 2023-09-12T18:59:07
| 2020-04-09T11:02:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,067
|
py
|
scheibbs_umweltverbaende_at.py
|
from datetime import datetime
import requests
from bs4 import BeautifulSoup
from waste_collection_schedule import Collection
TITLE = "GVU Scheibbs"
DESCRIPTION = "Source for waste collection services Association of Municipalities in the District of Scheibbs"
URL = "https://scheibbs.umweltverbaende.at/"
TEST_CASES = {
"Test_001": {"region": "Gaming"},
"Test_002": {"region": "Sankt Anton an der Jeßnitz"},
"Test_003": {"region": "Göstling an der Ybbs"},
"Test_004": {"region": "Wieselburg"},
}
ICON_MAP = {
"Restmüll": "mdi:trash-can",
"Gelber Sack": "mdi:sack",
"Altpapier": "mdi:package-variant",
"Biotonne": "mdi:leaf",
}
class Source:
def __init__(self, region):
self._region = region
def fetch(self):
s = requests.Session()
# get list of regions and weblinks
r0 = s.get("https://scheibbs.umweltverbaende.at/?kat=32")
soup = BeautifulSoup(r0.text, "html.parser")
table = soup.find_all("div", {"class": "col-sm-9"})
entries = []
for item in table:
weblinks = item.find_all("a", {"class": "weblink"})
for item in weblinks:
# match weblink with region to get collection schedule
if self._region in item.text:
r1 = s.get(f"https://scheibbs.umweltverbaende.at/{item['href']}")
soup = BeautifulSoup(r1.text, "html.parser")
schedule = soup.find_all("div", {"class": "tunterlegt"})
for day in schedule:
txt = day.text.strip().split(
" "
) # this is not 3 space characters, the middle one is U+00a0
entries.append(
Collection(
date=datetime.strptime(txt[1], "%d.%m.%Y").date(),
t=txt[2],
icon=ICON_MAP.get(txt[2]),
)
)
return entries
|
591a77e278328ad85945739e5ba9ddfd8b09eb6c
|
09a7fa80d420634848b5e6af7b59353afd8c726b
|
/src/main/resources/resource/GoogleCloud/GoogleCloud.py
|
dff75cada9dedcd426fde4ddbb363643b655c18e
|
[
"Apache-2.0",
"CC-BY-2.5"
] |
permissive
|
MyRobotLab/myrobotlab
|
cf789956d9f97a98eead44faf7a8b61f70348dc3
|
0ecdc681b4928ab65649404779c095d352dd96b1
|
refs/heads/develop
| 2023-09-04T10:57:19.041683
| 2023-08-30T14:04:44
| 2023-08-30T14:04:44
| 18,051,302
| 213
| 114
|
Apache-2.0
| 2023-09-07T14:14:58
| 2014-03-24T03:59:27
|
Java
|
UTF-8
|
Python
| false
| false
| 888
|
py
|
GoogleCloud.py
|
#########################################
# GoogleCloud.py
# description: google api client service
# categories: [google, vision, cloud]
# possibly more info @: http://myrobotlab.org/service/GoogleCloud
#########################################
# start the service
googlecloud = runtime.start("googlecloud","GoogleCloud")
# connect to the google cloud back end with the vision api
# this authorization api json file needs to be created
if googlecloud.connect("../API Project-c90c3d12e7d3.json"):
faces = googlecloud.detectFaces("faces.jpg")
print("Found ", faces.size(), " faces")
print("Writing to file ", "facesOutput.jpg")
googlecloud.writeWithFaces("faces.jpg", "facesOutput.jpg", faces)
print(googlecloud.getLabels("kitchen.jpg"))
print(googlecloud.getLabels("plumbing.jpg"))
print(googlecloud.getLabels("ship.jpg"))
print(googlecloud.getLabels("greenball.jpg"))
|
58a50f5bae492b1df9c714b93fcf1069c3f89236
|
114bcbcf18d772db12c8aac3e4a10bcbde5d9d80
|
/tools/iqm_export.py
|
0e27fb333ec5be18908197175067dd6f62365b38
|
[
"BSL-1.0"
] |
permissive
|
gecko0307/dagon
|
e7c6f82cc6abd506d358c66c92c949854bcb964d
|
98556587bf578e83715167aeaba94a73fac7df0c
|
refs/heads/master
| 2023-09-01T00:27:53.845159
| 2023-08-30T21:09:18
| 2023-08-30T21:09:18
| 70,403,001
| 307
| 34
|
NOASSERTION
| 2022-06-25T10:09:02
| 2016-10-09T12:57:05
|
D
|
UTF-8
|
Python
| false
| false
| 47,247
|
py
|
iqm_export.py
|
# This script is licensed as public domain.
bl_info = {
"name": "Export Inter-Quake Model (.iqm/.iqe)",
"author": "Lee Salzman",
"version": (2016, 2, 9),
"blender": (2, 74, 0),
"location": "File > Export > Inter-Quake Model",
"description": "Export to the Inter-Quake Model format (.iqm/.iqe)",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Import-Export"}
import os, struct, math
import mathutils
import bpy
import bpy_extras.io_utils
IQM_POSITION = 0
IQM_TEXCOORD = 1
IQM_NORMAL = 2
IQM_TANGENT = 3
IQM_BLENDINDEXES = 4
IQM_BLENDWEIGHTS = 5
IQM_COLOR = 6
IQM_CUSTOM = 0x10
IQM_BYTE = 0
IQM_UBYTE = 1
IQM_SHORT = 2
IQM_USHORT = 3
IQM_INT = 4
IQM_UINT = 5
IQM_HALF = 6
IQM_FLOAT = 7
IQM_DOUBLE = 8
IQM_LOOP = 1
IQM_HEADER = struct.Struct('<16s27I')
IQM_MESH = struct.Struct('<6I')
IQM_TRIANGLE = struct.Struct('<3I')
IQM_JOINT = struct.Struct('<Ii10f')
IQM_POSE = struct.Struct('<iI20f')
IQM_ANIMATION = struct.Struct('<3IfI')
IQM_VERTEXARRAY = struct.Struct('<5I')
IQM_BOUNDS = struct.Struct('<8f')
MAXVCACHE = 32
class Vertex:
def __init__(self, index, coord, normal, uv, weights, color):
self.index = index
self.coord = coord
self.normal = normal
self.uv = uv
self.weights = weights
self.color = color
def normalizeWeights(self):
# renormalizes all weights such that they add up to 255
# the list is chopped/padded to exactly 4 weights if necessary
if not self.weights:
self.weights = [ (0, 0), (0, 0), (0, 0), (0, 0) ]
return
self.weights.sort(key = lambda weight: weight[0], reverse=True)
if len(self.weights) > 4:
del self.weights[4:]
totalweight = sum([ weight for (weight, bone) in self.weights])
if totalweight > 0:
self.weights = [ (int(round(weight * 255.0 / totalweight)), bone) for (weight, bone) in self.weights]
while len(self.weights) > 1 and self.weights[-1][0] <= 0:
self.weights.pop()
else:
totalweight = len(self.weights)
self.weights = [ (int(round(255.0 / totalweight)), bone) for (weight, bone) in self.weights]
totalweight = sum([ weight for (weight, bone) in self.weights])
while totalweight != 255:
for i, (weight, bone) in enumerate(self.weights):
if totalweight > 255 and weight > 0:
self.weights[i] = (weight - 1, bone)
totalweight -= 1
elif totalweight < 255 and weight < 255:
self.weights[i] = (weight + 1, bone)
totalweight += 1
while len(self.weights) < 4:
self.weights.append((0, self.weights[-1][1]))
def calcScore(self):
if self.uses:
self.score = 2.0 * pow(len(self.uses), -0.5)
if self.cacherank >= 3:
self.score += pow(1.0 - float(self.cacherank - 3)/MAXVCACHE, 1.5)
elif self.cacherank >= 0:
self.score += 0.75
else:
self.score = -1.0
def neighborKey(self, other):
if self.coord < other.coord:
return (self.coord.x, self.coord.y, self.coord.z, other.coord.x, other.coord.y, other.coord.z, tuple(self.weights), tuple(other.weights))
else:
return (other.coord.x, other.coord.y, other.coord.z, self.coord.x, self.coord.y, self.coord.z, tuple(other.weights), tuple(self.weights))
def __hash__(self):
return self.index
def __eq__(self, v):
return self.coord == v.coord and self.normal == v.normal and self.uv == v.uv and self.weights == v.weights and self.color == v.color
class Mesh:
def __init__(self, name, material, verts):
self.name = name
self.material = material
self.verts = [ None for v in verts ]
self.vertmap = {}
self.tris = []
def calcTangents(self):
# See "Tangent Space Calculation" at http://www.terathon.com/code/tangent.html
for v in self.verts:
v.tangent = mathutils.Vector((0.0, 0.0, 0.0))
v.bitangent = mathutils.Vector((0.0, 0.0, 0.0))
for (v0, v1, v2) in self.tris:
dco1 = v1.coord - v0.coord
dco2 = v2.coord - v0.coord
duv1 = v1.uv - v0.uv
duv2 = v2.uv - v0.uv
tangent = dco2*duv1.y - dco1*duv2.y
bitangent = dco2*duv1.x - dco1*duv2.x
if dco2.cross(dco1).dot(bitangent.cross(tangent)) < 0:
tangent.negate()
bitangent.negate()
v0.tangent += tangent
v1.tangent += tangent
v2.tangent += tangent
v0.bitangent += bitangent
v1.bitangent += bitangent
v2.bitangent += bitangent
for v in self.verts:
v.tangent = v.tangent - v.normal*v.tangent.dot(v.normal)
v.tangent.normalize()
if v.normal.cross(v.tangent).dot(v.bitangent) < 0:
v.bitangent = -1.0
else:
v.bitangent = 1.0
def optimize(self):
# Linear-speed vertex cache optimization algorithm by Tom Forsyth
for v in self.verts:
if v:
v.index = -1
v.uses = []
v.cacherank = -1
for i, (v0, v1, v2) in enumerate(self.tris):
v0.uses.append(i)
v1.uses.append(i)
v2.uses.append(i)
for v in self.verts:
if v:
v.calcScore()
besttri = -1
bestscore = -42.0
scores = []
for i, (v0, v1, v2) in enumerate(self.tris):
scores.append(v0.score + v1.score + v2.score)
if scores[i] > bestscore:
besttri = i
bestscore = scores[i]
vertloads = 0 # debug info
vertschedule = []
trischedule = []
vcache = []
while besttri >= 0:
tri = self.tris[besttri]
scores[besttri] = -666.0
trischedule.append(tri)
for v in tri:
if v.cacherank < 0: # debug info
vertloads += 1 # debug info
if v.index < 0:
v.index = len(vertschedule)
vertschedule.append(v)
v.uses.remove(besttri)
v.cacherank = -1
v.score = -1.0
vcache = [ v for v in tri if v.uses ] + [ v for v in vcache if v.cacherank >= 0 ]
for i, v in enumerate(vcache):
v.cacherank = i
v.calcScore()
besttri = -1
bestscore = -42.0
for v in vcache:
for i in v.uses:
v0, v1, v2 = self.tris[i]
scores[i] = v0.score + v1.score + v2.score
if scores[i] > bestscore:
besttri = i
bestscore = scores[i]
while len(vcache) > MAXVCACHE:
vcache.pop().cacherank = -1
if besttri < 0:
for i, score in enumerate(scores):
if score > bestscore:
besttri = i
bestscore = score
print('%s: %d verts optimized to %d/%d loads for %d entry LRU cache' % (self.name, len(self.verts), vertloads, len(vertschedule), MAXVCACHE))
#print('%s: %d verts scheduled to %d' % (self.name, len(self.verts), len(vertschedule)))
self.verts = vertschedule
# print('%s: %d tris scheduled to %d' % (self.name, len(self.tris), len(trischedule)))
self.tris = trischedule
def meshData(self, iqm):
return [ iqm.addText(self.name), iqm.addText(self.material), self.firstvert, len(self.verts), self.firsttri, len(self.tris) ]
class Bone:
def __init__(self, name, origname, index, parent, matrix):
self.name = name
self.origname = origname
self.index = index
self.parent = parent
self.matrix = matrix
self.localmatrix = matrix
if self.parent:
self.localmatrix = parent.matrix.inverted() * self.localmatrix
self.numchannels = 0
self.channelmask = 0
self.channeloffsets = [ 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10 ]
self.channelscales = [ -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10 ]
def jointData(self, iqm):
if self.parent:
parent = self.parent.index
else:
parent = -1
pos = self.localmatrix.to_translation()
orient = self.localmatrix.to_quaternion()
orient.normalize()
if orient.w > 0:
orient.negate()
scale = self.localmatrix.to_scale()
scale.x = round(scale.x*0x10000)/0x10000
scale.y = round(scale.y*0x10000)/0x10000
scale.z = round(scale.z*0x10000)/0x10000
return [ iqm.addText(self.name), parent, pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w, scale.x, scale.y, scale.z ]
def poseData(self, iqm):
if self.parent:
parent = self.parent.index
else:
parent = -1
return [ parent, self.channelmask ] + self.channeloffsets + self.channelscales
def calcChannelMask(self):
for i in range(0, 10):
self.channelscales[i] -= self.channeloffsets[i]
if self.channelscales[i] >= 1.0e-10:
self.numchannels += 1
self.channelmask |= 1 << i
self.channelscales[i] /= 0xFFFF
else:
self.channelscales[i] = 0.0
return self.numchannels
class Animation:
def __init__(self, name, frames, fps = 0.0, flags = 0):
self.name = name
self.frames = frames
self.fps = fps
self.flags = flags
def calcFrameLimits(self, bones):
for frame in self.frames:
for i, bone in enumerate(bones):
loc, quat, scale, mat = frame[i]
bone.channeloffsets[0] = min(bone.channeloffsets[0], loc.x)
bone.channeloffsets[1] = min(bone.channeloffsets[1], loc.y)
bone.channeloffsets[2] = min(bone.channeloffsets[2], loc.z)
bone.channeloffsets[3] = min(bone.channeloffsets[3], quat.x)
bone.channeloffsets[4] = min(bone.channeloffsets[4], quat.y)
bone.channeloffsets[5] = min(bone.channeloffsets[5], quat.z)
bone.channeloffsets[6] = min(bone.channeloffsets[6], quat.w)
bone.channeloffsets[7] = min(bone.channeloffsets[7], scale.x)
bone.channeloffsets[8] = min(bone.channeloffsets[8], scale.y)
bone.channeloffsets[9] = min(bone.channeloffsets[9], scale.z)
bone.channelscales[0] = max(bone.channelscales[0], loc.x)
bone.channelscales[1] = max(bone.channelscales[1], loc.y)
bone.channelscales[2] = max(bone.channelscales[2], loc.z)
bone.channelscales[3] = max(bone.channelscales[3], quat.x)
bone.channelscales[4] = max(bone.channelscales[4], quat.y)
bone.channelscales[5] = max(bone.channelscales[5], quat.z)
bone.channelscales[6] = max(bone.channelscales[6], quat.w)
bone.channelscales[7] = max(bone.channelscales[7], scale.x)
bone.channelscales[8] = max(bone.channelscales[8], scale.y)
bone.channelscales[9] = max(bone.channelscales[9], scale.z)
def animData(self, iqm):
return [ iqm.addText(self.name), self.firstframe, len(self.frames), self.fps, self.flags ]
def frameData(self, bones):
data = b''
for frame in self.frames:
for i, bone in enumerate(bones):
loc, quat, scale, mat = frame[i]
if (bone.channelmask&0x7F) == 0x7F:
lx = int(round((loc.x - bone.channeloffsets[0]) / bone.channelscales[0]))
ly = int(round((loc.y - bone.channeloffsets[1]) / bone.channelscales[1]))
lz = int(round((loc.z - bone.channeloffsets[2]) / bone.channelscales[2]))
qx = int(round((quat.x - bone.channeloffsets[3]) / bone.channelscales[3]))
qy = int(round((quat.y - bone.channeloffsets[4]) / bone.channelscales[4]))
qz = int(round((quat.z - bone.channeloffsets[5]) / bone.channelscales[5]))
qw = int(round((quat.w - bone.channeloffsets[6]) / bone.channelscales[6]))
data += struct.pack('<7H', lx, ly, lz, qx, qy, qz, qw)
else:
if bone.channelmask & 1:
data += struct.pack('<H', int(round((loc.x - bone.channeloffsets[0]) / bone.channelscales[0])))
if bone.channelmask & 2:
data += struct.pack('<H', int(round((loc.y - bone.channeloffsets[1]) / bone.channelscales[1])))
if bone.channelmask & 4:
data += struct.pack('<H', int(round((loc.z - bone.channeloffsets[2]) / bone.channelscales[2])))
if bone.channelmask & 8:
data += struct.pack('<H', int(round((quat.x - bone.channeloffsets[3]) / bone.channelscales[3])))
if bone.channelmask & 16:
data += struct.pack('<H', int(round((quat.y - bone.channeloffsets[4]) / bone.channelscales[4])))
if bone.channelmask & 32:
data += struct.pack('<H', int(round((quat.z - bone.channeloffsets[5]) / bone.channelscales[5])))
if bone.channelmask & 64:
data += struct.pack('<H', int(round((quat.w - bone.channeloffsets[6]) / bone.channelscales[6])))
if bone.channelmask & 128:
data += struct.pack('<H', int(round((scale.x - bone.channeloffsets[7]) / bone.channelscales[7])))
if bone.channelmask & 256:
data += struct.pack('<H', int(round((scale.y - bone.channeloffsets[8]) / bone.channelscales[8])))
if bone.channelmask & 512:
data += struct.pack('<H', int(round((scale.z - bone.channeloffsets[9]) / bone.channelscales[9])))
return data
def frameBoundsData(self, bones, meshes, frame, invbase):
bbmin = bbmax = None
xyradius = 0.0
radius = 0.0
transforms = []
for i, bone in enumerate(bones):
loc, quat, scale, mat = frame[i]
if bone.parent:
mat = transforms[bone.parent.index] * mat
transforms.append(mat)
for i, mat in enumerate(transforms):
transforms[i] = mat * invbase[i]
for mesh in meshes:
for v in mesh.verts:
pos = mathutils.Vector((0.0, 0.0, 0.0))
for (weight, bone) in v.weights:
if weight > 0:
pos += (transforms[bone] * v.coord) * (weight / 255.0)
if bbmin:
bbmin.x = min(bbmin.x, pos.x)
bbmin.y = min(bbmin.y, pos.y)
bbmin.z = min(bbmin.z, pos.z)
bbmax.x = max(bbmax.x, pos.x)
bbmax.y = max(bbmax.y, pos.y)
bbmax.z = max(bbmax.z, pos.z)
else:
bbmin = pos.copy()
bbmax = pos.copy()
pradius = pos.x*pos.x + pos.y*pos.y
if pradius > xyradius:
xyradius = pradius
pradius += pos.z*pos.z
if pradius > radius:
radius = pradius
if bbmin:
xyradius = math.sqrt(xyradius)
radius = math.sqrt(radius)
else:
bbmin = bbmax = mathutils.Vector((0.0, 0.0, 0.0))
return IQM_BOUNDS.pack(bbmin.x, bbmin.y, bbmin.z, bbmax.x, bbmax.y, bbmax.z, xyradius, radius)
def boundsData(self, bones, meshes):
invbase = []
for bone in bones:
invbase.append(bone.matrix.inverted())
data = b''
for i, frame in enumerate(self.frames):
print('Calculating bounding box for %s:%d' % (self.name, i))
data += self.frameBoundsData(bones, meshes, frame, invbase)
return data
class IQMFile:
def __init__(self):
self.textoffsets = {}
self.textdata = b''
self.meshes = []
self.meshdata = []
self.numverts = 0
self.numtris = 0
self.joints = []
self.jointdata = []
self.numframes = 0
self.framesize = 0
self.anims = []
self.posedata = []
self.animdata = []
self.framedata = []
self.vertdata = []
def addText(self, str):
if not self.textdata:
self.textdata += b'\x00'
self.textoffsets[''] = 0
try:
return self.textoffsets[str]
except:
offset = len(self.textdata)
self.textoffsets[str] = offset
self.textdata += bytes(str, encoding="utf8") + b'\x00'
return offset
def addJoints(self, bones):
for bone in bones:
self.joints.append(bone)
if self.meshes:
self.jointdata.append(bone.jointData(self))
def addMeshes(self, meshes):
self.meshes += meshes
for mesh in meshes:
mesh.firstvert = self.numverts
mesh.firsttri = self.numtris
self.meshdata.append(mesh.meshData(self))
self.numverts += len(mesh.verts)
self.numtris += len(mesh.tris)
def addAnims(self, anims):
self.anims += anims
for anim in anims:
anim.firstframe = self.numframes
self.animdata.append(anim.animData(self))
self.numframes += len(anim.frames)
def calcFrameSize(self):
for anim in self.anims:
anim.calcFrameLimits(self.joints)
self.framesize = 0
for joint in self.joints:
self.framesize += joint.calcChannelMask()
for joint in self.joints:
if self.anims:
self.posedata.append(joint.poseData(self))
print('Exporting %d frames of size %d' % (self.numframes, self.framesize))
def writeVerts(self, file, offset):
if self.numverts <= 0:
return
file.write(IQM_VERTEXARRAY.pack(IQM_POSITION, 0, IQM_FLOAT, 3, offset))
offset += self.numverts * struct.calcsize('<3f')
file.write(IQM_VERTEXARRAY.pack(IQM_TEXCOORD, 0, IQM_FLOAT, 2, offset))
offset += self.numverts * struct.calcsize('<2f')
file.write(IQM_VERTEXARRAY.pack(IQM_NORMAL, 0, IQM_FLOAT, 3, offset))
offset += self.numverts * struct.calcsize('<3f')
file.write(IQM_VERTEXARRAY.pack(IQM_TANGENT, 0, IQM_FLOAT, 4, offset))
offset += self.numverts * struct.calcsize('<4f')
if self.joints:
file.write(IQM_VERTEXARRAY.pack(IQM_BLENDINDEXES, 0, IQM_UBYTE, 4, offset))
offset += self.numverts * struct.calcsize('<4B')
file.write(IQM_VERTEXARRAY.pack(IQM_BLENDWEIGHTS, 0, IQM_UBYTE, 4, offset))
offset += self.numverts * struct.calcsize('<4B')
hascolors = any(mesh.verts and mesh.verts[0].color for mesh in self.meshes)
if hascolors:
file.write(IQM_VERTEXARRAY.pack(IQM_COLOR, 0, IQM_UBYTE, 4, offset))
offset += self.numverts * struct.calcsize('<4B')
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<3f', *v.coord))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<2f', *v.uv))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<3f', *v.normal))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<4f', v.tangent.x, v.tangent.y, v.tangent.z, v.bitangent))
if self.joints:
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<4B', v.weights[0][1], v.weights[1][1], v.weights[2][1], v.weights[3][1]))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<4B', v.weights[0][0], v.weights[1][0], v.weights[2][0], v.weights[3][0]))
if hascolors:
for mesh in self.meshes:
for v in mesh.verts:
if v.color:
file.write(struct.pack('<4B', v.color[0], v.color[1], v.color[2], v.color[3]))
else:
file.write(struct.pack('<4B', 0, 0, 0, 255))
def calcNeighbors(self):
edges = {}
for mesh in self.meshes:
for i, (v0, v1, v2) in enumerate(mesh.tris):
e0 = v0.neighborKey(v1)
e1 = v1.neighborKey(v2)
e2 = v2.neighborKey(v0)
tri = mesh.firsttri + i
try: edges[e0].append(tri)
except: edges[e0] = [tri]
try: edges[e1].append(tri)
except: edges[e1] = [tri]
try: edges[e2].append(tri)
except: edges[e2] = [tri]
neighbors = []
for mesh in self.meshes:
for i, (v0, v1, v2) in enumerate(mesh.tris):
e0 = edges[v0.neighborKey(v1)]
e1 = edges[v1.neighborKey(v2)]
e2 = edges[v2.neighborKey(v0)]
tri = mesh.firsttri + i
match0 = match1 = match2 = -1
if len(e0) == 2: match0 = e0[e0.index(tri)^1]
if len(e1) == 2: match1 = e1[e1.index(tri)^1]
if len(e2) == 2: match2 = e2[e2.index(tri)^1]
neighbors.append((match0, match1, match2))
self.neighbors = neighbors
def writeTris(self, file):
for mesh in self.meshes:
for (v0, v1, v2) in mesh.tris:
file.write(struct.pack('<3I', v0.index + mesh.firstvert, v1.index + mesh.firstvert, v2.index + mesh.firstvert))
for (n0, n1, n2) in self.neighbors:
if n0 < 0: n0 = 0xFFFFFFFF
if n1 < 0: n1 = 0xFFFFFFFF
if n2 < 0: n2 = 0xFFFFFFFF
file.write(struct.pack('<3I', n0, n1, n2))
def export(self, file, usebbox = True):
self.filesize = IQM_HEADER.size
if self.textdata:
while len(self.textdata) % 4:
self.textdata += b'\x00'
ofs_text = self.filesize
self.filesize += len(self.textdata)
else:
ofs_text = 0
if self.meshdata:
ofs_meshes = self.filesize
self.filesize += len(self.meshdata) * IQM_MESH.size
else:
ofs_meshes = 0
if self.numverts > 0:
ofs_vertexarrays = self.filesize
num_vertexarrays = 4
if self.joints:
num_vertexarrays += 2
hascolors = any(mesh.verts and mesh.verts[0].color for mesh in self.meshes)
if hascolors:
num_vertexarrays += 1
self.filesize += num_vertexarrays * IQM_VERTEXARRAY.size
ofs_vdata = self.filesize
self.filesize += self.numverts * struct.calcsize('<3f2f3f4f')
if self.joints:
self.filesize += self.numverts * struct.calcsize('<4B4B')
if hascolors:
self.filesize += self.numverts * struct.calcsize('<4B')
else:
ofs_vertexarrays = 0
num_vertexarrays = 0
ofs_vdata = 0
if self.numtris > 0:
ofs_triangles = self.filesize
self.filesize += self.numtris * IQM_TRIANGLE.size
ofs_neighbors = self.filesize
self.filesize += self.numtris * IQM_TRIANGLE.size
else:
ofs_triangles = 0
ofs_neighbors = 0
if self.jointdata:
ofs_joints = self.filesize
self.filesize += len(self.jointdata) * IQM_JOINT.size
else:
ofs_joints = 0
if self.posedata:
ofs_poses = self.filesize
self.filesize += len(self.posedata) * IQM_POSE.size
else:
ofs_poses = 0
if self.animdata:
ofs_anims = self.filesize
self.filesize += len(self.animdata) * IQM_ANIMATION.size
else:
ofs_anims = 0
falign = 0
if self.framesize * self.numframes > 0:
ofs_frames = self.filesize
self.filesize += self.framesize * self.numframes * struct.calcsize('<H')
falign = (4 - (self.filesize % 4)) % 4
self.filesize += falign
else:
ofs_frames = 0
if usebbox and self.numverts > 0 and self.numframes > 0:
ofs_bounds = self.filesize
self.filesize += self.numframes * IQM_BOUNDS.size
else:
ofs_bounds = 0
file.write(IQM_HEADER.pack('INTERQUAKEMODEL'.encode('ascii'), 2, self.filesize, 0, len(self.textdata), ofs_text, len(self.meshdata), ofs_meshes, num_vertexarrays, self.numverts, ofs_vertexarrays, self.numtris, ofs_triangles, ofs_neighbors, len(self.jointdata), ofs_joints, len(self.posedata), ofs_poses, len(self.animdata), ofs_anims, self.numframes, self.framesize, ofs_frames, ofs_bounds, 0, 0, 0, 0))
file.write(self.textdata)
for mesh in self.meshdata:
file.write(IQM_MESH.pack(*mesh))
self.writeVerts(file, ofs_vdata)
self.writeTris(file)
for joint in self.jointdata:
file.write(IQM_JOINT.pack(*joint))
for pose in self.posedata:
file.write(IQM_POSE.pack(*pose))
for anim in self.animdata:
file.write(IQM_ANIMATION.pack(*anim))
for anim in self.anims:
file.write(anim.frameData(self.joints))
file.write(b'\x00' * falign)
if usebbox and self.numverts > 0 and self.numframes > 0:
for anim in self.anims:
file.write(anim.boundsData(self.joints, self.meshes))
def findArmature(context):
armature = None
for obj in context.selected_objects:
if obj.type == 'ARMATURE':
armature = obj
break
if not armature:
for obj in context.selected_objects:
if obj.type == 'MESH':
armature = obj.find_armature()
if armature:
break
return armature
def derigifyBones(context, armature, scale):
data = armature.data
defnames = []
orgbones = {}
defbones = {}
org2defs = {}
def2org = {}
defparent = {}
defchildren = {}
for bone in data.bones.values():
if bone.name.startswith('ORG-'):
orgbones[bone.name[4:]] = bone
org2defs[bone.name[4:]] = []
elif bone.name.startswith('DEF-'):
defnames.append(bone.name[4:])
defbones[bone.name[4:]] = bone
defchildren[bone.name[4:]] = []
for name, bone in defbones.items():
orgname = name
orgbone = orgbones.get(orgname)
splitname = -1
if not orgbone:
splitname = name.rfind('.')
suffix = ''
if splitname >= 0 and name[splitname+1:] in [ 'l', 'r', 'L', 'R' ]:
suffix = name[splitname:]
splitname = name.rfind('.', 0, splitname)
if splitname >= 0 and name[splitname+1:splitname+2].isdigit():
orgname = name[:splitname] + suffix
orgbone = orgbones.get(orgname)
org2defs[orgname].append(name)
def2org[name] = orgname
for defs in org2defs.values():
defs.sort()
for name in defnames:
bone = defbones[name]
orgname = def2org[name]
orgbone = orgbones.get(orgname)
defs = org2defs[orgname]
if orgbone:
i = defs.index(name)
if i == 0:
orgparent = orgbone.parent
if orgparent and orgparent.name.startswith('ORG-'):
orgpname = orgparent.name[4:]
defparent[name] = org2defs[orgpname][-1]
else:
defparent[name] = defs[i-1]
if name in defparent:
defchildren[defparent[name]].append(name)
bones = {}
worldmatrix = armature.matrix_world
worklist = [ bone for bone in defnames if bone not in defparent ]
for index, bname in enumerate(worklist):
bone = defbones[bname]
bonematrix = worldmatrix * bone.matrix_local
if scale != 1.0:
bonematrix.translation *= scale
bones[bone.name] = Bone(bname, bone.name, index, bname in defparent and bones.get(defbones[defparent[bname]].name), bonematrix)
worklist.extend(defchildren[bname])
print('De-rigified %d bones' % len(worklist))
return bones
def collectBones(context, armature, scale):
data = armature.data
bones = {}
worldmatrix = armature.matrix_world
worklist = [ bone for bone in data.bones.values() if not bone.parent ]
for index, bone in enumerate(worklist):
bonematrix = worldmatrix * bone.matrix_local
if scale != 1.0:
bonematrix.translation *= scale
bones[bone.name] = Bone(bone.name, bone.name, index, bone.parent and bones.get(bone.parent.name), bonematrix)
for child in bone.children:
if child not in worklist:
worklist.append(child)
print('Collected %d bones' % len(worklist))
return bones
def collectAnim(context, armature, scale, bones, action, startframe = None, endframe = None):
if not startframe or not endframe:
startframe, endframe = action.frame_range
startframe = int(startframe)
endframe = int(endframe)
print('Exporting action "%s" frames %d-%d' % (action.name, startframe, endframe))
scene = context.scene
worldmatrix = armature.matrix_world
armature.animation_data.action = action
outdata = []
for time in range(startframe, endframe+1):
scene.frame_set(time)
pose = armature.pose
outframe = []
for bone in bones:
posematrix = pose.bones[bone.origname].matrix
if bone.parent:
posematrix = pose.bones[bone.parent.origname].matrix.inverted() * posematrix
else:
posematrix = worldmatrix * posematrix
if scale != 1.0:
posematrix.translation *= scale
loc = posematrix.to_translation()
quat = posematrix.to_quaternion()
quat.normalize()
if quat.w > 0:
quat.negate()
pscale = posematrix.to_scale()
pscale.x = round(pscale.x*0x10000)/0x10000
pscale.y = round(pscale.y*0x10000)/0x10000
pscale.z = round(pscale.z*0x10000)/0x10000
outframe.append((loc, quat, pscale, posematrix))
outdata.append(outframe)
return outdata
def collectAnims(context, armature, scale, bones, animspecs):
if not armature.animation_data:
print('Armature has no animation data')
return []
actions = bpy.data.actions
animspecs = [ spec.strip() for spec in animspecs.split(',') ]
anims = []
scene = context.scene
oldaction = armature.animation_data.action
oldframe = scene.frame_current
for animspec in animspecs:
animspec = [ arg.strip() for arg in animspec.split(':') ]
animname = animspec[0]
if animname not in actions:
print('Action "%s" not found in current armature' % animname)
continue
try:
startframe = int(animspec[1])
except:
startframe = None
try:
endframe = int(animspec[2])
except:
endframe = None
try:
fps = float(animspec[3])
except:
fps = float(scene.render.fps)
try:
flags = int(animspec[4])
except:
flags = 0
framedata = collectAnim(context, armature, scale, bones, actions[animname], startframe, endframe)
anims.append(Animation(animname, framedata, fps, flags))
armature.animation_data.action = oldaction
scene.frame_set(oldframe)
return anims
def collectMeshes(context, bones, scale, matfun, useskel = True, usecol = False, filetype = 'IQM'):
vertwarn = []
objs = context.selected_objects #context.scene.objects
meshes = []
for obj in objs:
if obj.type == 'MESH':
data = obj.to_mesh(context.scene, False, 'PREVIEW')
if not data.polygons:
continue
data.calc_normals_split()
coordmatrix = obj.matrix_world
normalmatrix = coordmatrix.inverted().transposed()
if scale != 1.0:
coordmatrix = mathutils.Matrix.Scale(scale, 4) * coordmatrix
materials = {}
groups = obj.vertex_groups
uvfaces = data.uv_textures.active and data.uv_textures.active.data
uvlayer = data.uv_layers.active and data.uv_layers.active.data
colors = None
alpha = None
if usecol:
if data.vertex_colors.active:
if data.vertex_colors.active.name.startswith('alpha'):
alpha = data.vertex_colors.active.data
else:
colors = data.vertex_colors.active.data
for layer in data.vertex_colors:
if layer.name.startswith('alpha'):
if not alpha:
alpha = layer.data
elif not colors:
colors = layer.data
for face in data.polygons:
if len(face.vertices) < 3:
continue
if all([ data.vertices[i].co == data.vertices[face.vertices[0]].co for i in face.vertices[1:] ]):
continue
uvface = uvfaces and uvfaces[face.index]
material = os.path.basename(uvface.image.filepath) if uvface and uvface.image else ''
matindex = face.material_index
try:
mesh = materials[obj.name, matindex, material]
except:
try:
matprefix = (data.materials and data.materials[matindex].name) or ''
except:
matprefix = ''
mesh = Mesh(obj.name, matfun(matprefix, material), data.vertices)
meshes.append(mesh)
materials[obj.name, matindex, material] = mesh
verts = mesh.verts
vertmap = mesh.vertmap
faceverts = []
for loopidx in face.loop_indices:
loop = data.loops[loopidx]
v = data.vertices[loop.vertex_index]
vertco = coordmatrix * v.co
if not face.use_smooth:
vertno = mathutils.Vector(face.normal)
else:
vertno = mathutils.Vector(loop.normal)
vertno = normalmatrix * vertno
vertno.normalize()
# flip V axis of texture space
if uvlayer:
uv = uvlayer[loopidx].uv
vertuv = mathutils.Vector((uv[0], 1.0 - uv[1]))
else:
vertuv = mathutils.Vector((0.0, 0.0))
if colors:
vertcol = colors[loopidx].color
vertcol = (int(round(vertcol[0] * 255.0)), int(round(vertcol[1] * 255.0)), int(round(vertcol[2] * 255.0)), 255)
else:
vertcol = None
if alpha:
vertalpha = alpha[loopidx].color
if vertcol:
vertcol = (vertcol[0], vertcol[1], vertcol[2], int(round(vertalpha[0] * 255.0)))
else:
vertcol = (255, 255, 255, int(round(vertalpha[0] * 255.0)))
vertweights = []
if useskel:
for g in v.groups:
try:
vertweights.append((g.weight, bones[groups[g.group].name].index))
except:
if (groups[g.group].name, mesh.name) not in vertwarn:
vertwarn.append((groups[g.group].name, mesh.name))
print('Vertex depends on non-existent bone: %s in mesh: %s' % (groups[g.group].name, mesh.name))
if not face.use_smooth:
vertindex = len(verts)
vertkey = Vertex(vertindex, vertco, vertno, vertuv, vertweights, vertcol)
if filetype == 'IQM':
vertkey.normalizeWeights()
mesh.verts.append(vertkey)
faceverts.append(vertkey)
continue
vertkey = Vertex(v.index, vertco, vertno, vertuv, vertweights, vertcol)
if filetype == 'IQM':
vertkey.normalizeWeights()
if not verts[v.index]:
verts[v.index] = vertkey
faceverts.append(vertkey)
elif verts[v.index] == vertkey:
faceverts.append(verts[v.index])
else:
try:
vertindex = vertmap[vertkey]
faceverts.append(verts[vertindex])
except:
vertindex = len(verts)
vertmap[vertkey] = vertindex
verts.append(vertkey)
faceverts.append(vertkey)
# Quake winding is reversed
for i in range(2, len(faceverts)):
mesh.tris.append((faceverts[0], faceverts[i], faceverts[i-1]))
for mesh in meshes:
mesh.optimize()
if filetype == 'IQM':
mesh.calcTangents()
print('%s %s: generated %d triangles' % (mesh.name, mesh.material, len(mesh.tris)))
return meshes
def exportIQE(file, meshes, bones, anims):
file.write('# Inter-Quake Export\n\n')
for bone in bones:
if bone.parent:
parent = bone.parent.index
else:
parent = -1
file.write('joint "%s" %d\n' % (bone.name, parent))
if meshes:
pos = bone.localmatrix.to_translation()
orient = bone.localmatrix.to_quaternion()
orient.normalize()
if orient.w > 0:
orient.negate()
scale = bone.localmatrix.to_scale()
scale.x = round(scale.x*0x10000)/0x10000
scale.y = round(scale.y*0x10000)/0x10000
scale.z = round(scale.z*0x10000)/0x10000
if scale.x == 1.0 and scale.y == 1.0 and scale.z == 1.0:
file.write('\tpq %.8f %.8f %.8f %.8f %.8f %.8f %.8f\n' % (pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w))
else:
file.write('\tpq %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f\n' % (pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w, scale.x, scale.y, scale.z))
hascolors = any(mesh.verts and mesh.verts[0].color for mesh in meshes)
for mesh in meshes:
file.write('\nmesh "%s"\n\tmaterial "%s"\n\n' % (mesh.name, mesh.material))
for v in mesh.verts:
file.write('vp %.8f %.8f %.8f\n\tvt %.8f %.8f\n\tvn %.8f %.8f %.8f\n' % (v.coord.x, v.coord.y, v.coord.z, v.uv.x, v.uv.y, v.normal.x, v.normal.y, v.normal.z))
if bones:
weights = '\tvb'
for weight in v.weights:
weights += ' %d %.8f' % (weight[1], weight[0])
file.write(weights + '\n')
if hascolors:
if v.color:
file.write('\tvc %.8f %.8f %.8f %.8f\n' % (v.color[0] / 255.0, v.color[1] / 255.0, v.color[2] / 255.0, v.color[3] / 255.0))
else:
file.write('\tvc 0 0 0 1\n')
file.write('\n')
for (v0, v1, v2) in mesh.tris:
file.write('fm %d %d %d\n' % (v0.index, v1.index, v2.index))
for anim in anims:
file.write('\nanimation "%s"\n\tframerate %.8f\n' % (anim.name, anim.fps))
if anim.flags&IQM_LOOP:
file.write('\tloop\n')
for frame in anim.frames:
file.write('\nframe\n')
for (pos, orient, scale, mat) in frame:
if scale.x == 1.0 and scale.y == 1.0 and scale.z == 1.0:
file.write('pq %.8f %.8f %.8f %.8f %.8f %.8f %.8f\n' % (pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w))
else:
file.write('pq %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f\n' % (pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w, scale.x, scale.y, scale.z))
file.write('\n')
def exportIQM(context, filename, usemesh = True, useskel = True, usebbox = True, usecol = False, scale = 1.0, animspecs = None, matfun = (lambda prefix, image: image), derigify = False, boneorder = None):
armature = findArmature(context)
if useskel and not armature:
print('No armature selected')
return
if filename.lower().endswith('.iqm'):
filetype = 'IQM'
elif filename.lower().endswith('.iqe'):
filetype = 'IQE'
else:
print('Unknown file type: %s' % filename)
return
if useskel:
if derigify:
bones = derigifyBones(context, armature, scale)
else:
bones = collectBones(context, armature, scale)
else:
bones = {}
if boneorder:
try:
f = open(bpy_extras.io_utils.path_reference(boneorder, os.path.dirname(bpy.data.filepath), os.path.dirname(filename)), "r", encoding = "utf-8")
names = [line.strip() for line in f.readlines()]
f.close()
names = [name for name in names if name in [bone.name for bone in bones.values()]]
if len(names) != len(bones):
print('Bone order (%d) does not match skeleton (%d)' % (len(names), len(bones)))
return
print('Reordering bones')
for bone in bones.values():
bone.index = names.index(bone.name)
except:
print('Failed opening bone order: %s' % boneorder)
return
bonelist = sorted(bones.values(), key = lambda bone: bone.index)
if usemesh:
meshes = collectMeshes(context, bones, scale, matfun, useskel, usecol, filetype)
else:
meshes = []
if useskel and animspecs:
anims = collectAnims(context, armature, scale, bonelist, animspecs)
else:
anims = []
if filetype == 'IQM':
iqm = IQMFile()
iqm.addMeshes(meshes)
iqm.addJoints(bonelist)
iqm.addAnims(anims)
iqm.calcFrameSize()
iqm.calcNeighbors()
if filename:
try:
if filetype == 'IQM':
file = open(filename, 'wb')
else:
file = open(filename, 'w')
except:
print ('Failed writing to %s' % (filename))
return
if filetype == 'IQM':
iqm.export(file, usebbox)
elif filetype == 'IQE':
exportIQE(file, meshes, bonelist, anims)
file.close()
print('Saved %s file to %s' % (filetype, filename))
else:
print('No %s file was generated' % (filetype))
class ExportIQM(bpy.types.Operator, bpy_extras.io_utils.ExportHelper):
'''Export an Inter-Quake Model IQM or IQE file'''
bl_idname = "export.iqm"
bl_label = 'Export IQM'
filename_ext = ".iqm"
animspec = bpy.props.StringProperty(name="Animations", description="Animations to export", maxlen=1024, default="")
usemesh = bpy.props.BoolProperty(name="Meshes", description="Generate meshes", default=True)
useskel = bpy.props.BoolProperty(name="Skeleton", description="Generate skeleton", default=True)
usebbox = bpy.props.BoolProperty(name="Bounding boxes", description="Generate bounding boxes", default=True)
usecol = bpy.props.BoolProperty(name="Vertex colors", description="Export vertex colors", default=False)
usescale = bpy.props.FloatProperty(name="Scale", description="Scale of exported model", default=1.0, min=0.0, step=50, precision=2)
#usetrans = bpy.props.FloatVectorProperty(name="Translate", description="Translate position of exported model", step=50, precision=2, size=3)
matfmt = bpy.props.EnumProperty(name="Materials", description="Material name format", items=[("m+i-e", "material+image-ext", ""), ("m", "material", ""), ("i", "image", "")], default="m+i-e")
derigify = bpy.props.BoolProperty(name="De-rigify", description="Export only deformation bones from rigify", default=False)
boneorder = bpy.props.StringProperty(name="Bone order", description="Override ordering of bones", subtype="FILE_NAME", default="")
def execute(self, context):
if self.properties.matfmt == "m+i-e":
matfun = lambda prefix, image: prefix + os.path.splitext(image)[0]
elif self.properties.matfmt == "m":
matfun = lambda prefix, image: prefix
else:
matfun = lambda prefix, image: image
exportIQM(context, self.properties.filepath, self.properties.usemesh, self.properties.useskel, self.properties.usebbox, self.properties.usecol, self.properties.usescale, self.properties.animspec, matfun, self.properties.derigify, self.properties.boneorder)
return {'FINISHED'}
def check(self, context):
filepath = bpy.path.ensure_ext(self.filepath, '.iqm')
filepathalt = bpy.path.ensure_ext(self.filepath, '.iqe')
if filepath != self.filepath and filepathalt != self.filepath:
self.filepath = filepath
return True
return False
def menu_func(self, context):
self.layout.operator(ExportIQM.bl_idname, text="Inter-Quake Model (.iqm, .iqe)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func)
if __name__ == "__main__":
register()
|
b6abf7532563384840be83081b3b6e943994add8
|
4434dd8bf1d177c155300860d77fb77c3d6c65dd
|
/watchman/integration/test_trigger_chdir.py
|
f0043269c1cb5efefab6f48b80a44ca5af4f6910
|
[
"MIT"
] |
permissive
|
facebook/watchman
|
528fc282642d29ea4effcc9f7bb9cd04547e1248
|
0f6c285a58df680c5586593b87c779907aa4a0ac
|
refs/heads/main
| 2023-08-31T21:32:14.180520
| 2023-08-31T01:07:08
| 2023-08-31T01:07:08
| 6,930,489
| 10,583
| 1,063
|
MIT
| 2023-08-16T18:35:09
| 2012-11-29T23:35:52
|
C++
|
UTF-8
|
Python
| false
| false
| 9,252
|
py
|
test_trigger_chdir.py
|
# vim:ts=4:sw=4:et:
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import os.path
import sys
import time
from watchman.integration.lib import HELPER_ROOT, WatchmanTestCase
TRIG_CWD = os.path.join(HELPER_ROOT, "trig-cwd.py")
CAT_PY = os.path.join(HELPER_ROOT, "cat.py")
@WatchmanTestCase.expand_matrix
class TestTrigger(WatchmanTestCase.WatchmanTestCase):
def fileContains(self, file_name, thing) -> bool:
if not os.path.exists(file_name):
return False
thing = thing + "\n"
with open(file_name, "r") as f:
return thing in f
def fileHasValidJson(self, file_name) -> bool:
if not os.path.exists(file_name):
return False
try:
with open(file_name, "r") as f:
json.load(f)
return True
except Exception:
return False
def checkOSApplicability(self) -> None:
if os.name == "nt":
self.skipTest("no append on Windows")
def test_triggerChdir(self) -> None:
root = self.mkdtemp()
os.mkdir(os.path.join(root, "sub"))
self.watchmanCommand("watch", root)
self.watchmanCommand(
"trigger",
root,
{
"name": "cap",
"command": [sys.executable, TRIG_CWD],
"stdout": ">%s" % os.path.join(root, "trig.log"),
"expression": ["suffix", "txt"],
"stdin": "/dev/null",
"chdir": "sub",
},
)
self.touchRelative(root, "A.txt")
self.assertWaitFor(
lambda: self.fileContains(
os.path.join(root, "trig.log"), "PWD=" + os.path.join(root, "sub")
)
)
self.assertWaitFor(
lambda: self.fileContains(
os.path.join(root, "trig.log"), "WATCHMAN_EMPTY_ENV_VAR="
)
)
def test_triggerChdirRelativeRoot(self) -> None:
root = self.mkdtemp()
os.mkdir(os.path.join(root, "sub1"))
os.mkdir(os.path.join(root, "sub1", "sub2"))
self.watchmanCommand("watch", root)
self.watchmanCommand(
"trigger",
root,
{
"name": "cap",
"command": [sys.executable, TRIG_CWD],
"stdout": ">%s" % os.path.join(root, "trig.log"),
"expression": ["suffix", "txt"],
"relative_root": "sub1",
"stdin": "/dev/null",
"chdir": "sub2",
},
)
self.touchRelative(root, "sub1", "A.txt")
self.assertWaitFor(
lambda: self.fileContains(
os.path.join(root, "trig.log"),
"PWD=" + os.path.join(root, "sub1", "sub2"),
)
)
self.assertWaitFor(
lambda: self.fileContains(
os.path.join(root, "trig.log"), "WATCHMAN_ROOT=" + root
)
)
self.assertWaitFor(
lambda: self.fileContains(
os.path.join(root, "trig.log"),
"WATCHMAN_RELATIVE_ROOT=" + os.path.join(root, "sub1"),
)
)
def test_triggerMaxFiles(self) -> None:
root = self.mkdtemp()
with open(os.path.join(root, ".watchmanconfig"), "w") as f:
f.write(json.dumps({"settle": 200}))
self.watchmanCommand("watch", root)
self.watchmanCommand(
"trigger",
root,
{
"name": "cap",
"command": [sys.executable, TRIG_CWD],
"stdout": ">>%s" % os.path.join(root, "trig.log"),
"expression": ["suffix", "txt"],
"stdin": ["name"],
"max_files_stdin": 2,
},
)
self.touchRelative(root, "A.txt")
self.assertWaitFor(
lambda: self.fileContains(os.path.join(root, "trig.log"), "PWD=" + root)
)
self.assertTrue(
not self.fileContains(
os.path.join(root, "trig.log"), "WATCHMAN_FILES_OVERFLOW=true"
),
msg="No overflow for a single file",
)
deadline = time.time() + 5
overflown = False
while time.time() < deadline:
os.unlink(os.path.join(root, "trig.log"))
self.touchRelative(root, "B.txt")
self.touchRelative(root, "A.txt")
self.touchRelative(root, "C.txt")
self.touchRelative(root, "D.txt")
self.assertWaitFor(
lambda: self.fileContains(os.path.join(root, "trig.log"), "PWD=" + root)
)
if self.fileContains(
os.path.join(root, "trig.log"), "WATCHMAN_FILES_OVERFLOW=true"
):
overflown = True
break
self.assertTrue(overflown, "Observed WATCHMAN_FILES_OVERFLOW")
def test_triggerNamePerLine(self) -> None:
root = self.mkdtemp()
self.watchmanCommand("watch", root)
log_file = os.path.join(root, "trig.log")
self.watchmanCommand(
"trigger",
root,
{
"name": "cat",
"command": [sys.executable, CAT_PY],
"stdout": ">%s" % log_file,
"expression": ["suffix", "txt"],
"stdin": "NAME_PER_LINE",
},
)
self.touchRelative(root, "A.txt")
self.assertWaitFor(lambda: self.fileContains(log_file, "A.txt"))
self.touchRelative(root, "B.txt")
self.touchRelative(root, "A.txt")
self.assertWaitFor(
lambda: self.fileContains(log_file, "A.txt")
and self.fileContains(log_file, "B.txt")
)
with open(log_file, "r") as f:
self.assertEqual(["A.txt\n", "B.txt\n"], sorted(f.readlines()))
def test_triggerNamePerLineRelativeRoot(self) -> None:
root = self.mkdtemp()
os.mkdir(os.path.join(root, "subdir"))
self.watchmanCommand("watch", root)
log_file = os.path.join(root, "trig.log")
self.watchmanCommand(
"trigger",
root,
{
"name": "cat",
"command": [sys.executable, CAT_PY],
"relative_root": "subdir",
"stdout": ">%s" % log_file,
"expression": ["suffix", "txt"],
"stdin": "NAME_PER_LINE",
},
)
self.touchRelative(root, "A.txt")
self.touchRelative(root, "subdir", "B.txt")
self.assertWaitFor(lambda: self.fileContains(log_file, "B.txt"))
def test_triggerNamePerLineAppend(self) -> None:
root = self.mkdtemp()
self.watchmanCommand("watch", root)
log_file = os.path.join(root, "trig.log")
self.watchmanCommand(
"trigger",
root,
{
"name": "cat",
"command": [sys.executable, CAT_PY],
"stdout": ">>%s" % log_file,
"expression": ["suffix", "txt"],
"stdin": "NAME_PER_LINE",
},
)
self.touchRelative(root, "A.txt")
self.assertWaitFor(lambda: self.fileContains(log_file, "A.txt"))
self.touchRelative(root, "B.txt")
self.assertWaitFor(
lambda: self.fileContains(log_file, "A.txt")
and self.fileContains(log_file, "B.txt")
)
with open(log_file, "r") as f:
self.assertEqual(["A.txt\n", "B.txt\n"], sorted(f.readlines()))
def test_triggerJsonNameOnly(self) -> None:
root = self.mkdtemp()
self.watchmanCommand("watch", root)
log_file = os.path.join(root, "trig.log")
self.watchmanCommand(
"trigger",
root,
{
"name": "cat",
"command": [sys.executable, CAT_PY],
"stdout": ">%s" % log_file,
"expression": ["suffix", "txt"],
"stdin": ["name"],
},
)
self.touchRelative(root, "A.txt")
self.assertWaitFor(lambda: self.fileHasValidJson(log_file))
with open(log_file, "r") as f:
data = json.load(f)
self.assertEqual(["A.txt"], data)
def test_triggerJsonNameAndSize(self) -> None:
root = self.mkdtemp()
self.watchmanCommand("watch", root)
log_file = os.path.join(root, "trig.log")
self.watchmanCommand(
"trigger",
root,
{
"name": "cat",
"command": [sys.executable, CAT_PY],
"stdout": ">%s" % log_file,
"expression": ["suffix", "txt"],
"stdin": ["name", "size"],
},
)
self.touchRelative(root, "A.txt")
self.assertWaitFor(lambda: self.fileHasValidJson(log_file))
with open(log_file, "r") as f:
data = json.load(f)
self.assertEqual("A.txt", data[0]["name"])
self.assertEqual(0, data[0]["size"])
|
1664a97f255ce9311480d397832fa5933697f5e8
|
21116da0aa0a14af210f34d6dd237d60a0d9c863
|
/test/mock_rcon.py
|
c5f7c359833e8c1ea026dd31054e5a08e5331a56
|
[
"MIT"
] |
permissive
|
nicolaschan/minecraft-backup
|
7d2edf36d165d8789fc8b2287ab41c2d20b00710
|
8a04318f93331ef2488db06d48e8e32d83f8575f
|
refs/heads/master
| 2023-02-07T00:43:49.523622
| 2021-07-30T06:58:25
| 2021-07-30T06:58:25
| 67,576,926
| 207
| 52
|
MIT
| 2023-02-05T10:37:30
| 2016-09-07T06:01:14
|
Shell
|
UTF-8
|
Python
| false
| false
| 1,142
|
py
|
mock_rcon.py
|
# Reference: https://docs.python.org/3/library/socketserver.html
import codecs
import socketserver
import sys
class Handler(socketserver.BaseRequestHandler):
def handle(self):
while True:
length = int.from_bytes(self.request.recv(4), 'little')
if not length:
continue
self.request.recv(4)
type_ = int.from_bytes(self.request.recv(4), 'little')
self.data = self.request.recv(length - 8)[:-2].decode('utf-8')
if self.data:
if type_ == 2:
print(self.data)
sys.stdout.flush()
try:
if type_ == 3 and self.data != sys.argv[2]:
self.request.sendall(codecs.decode('0a000000ffffffff020000000000', 'hex'))
else:
self.request.sendall(codecs.decode('0a00000010000000020000000000', 'hex'))
except:
break
if __name__ == "__main__":
HOST, PORT = "localhost", int(sys.argv[1])
with socketserver.ThreadingTCPServer((HOST, PORT), Handler) as server:
server.serve_forever()
|
e613f28974ee7a5c2d4220b52e4d3c812fb2aa47
|
2307bfaa6faf94297d57ad4d146f3c5ad10ebcd5
|
/tests/ble_driver_test.py
|
2b9143cd29bc3677f2fb6e8fba985beb871bc3b8
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
NordicSemiconductor/pc-ble-driver-py
|
9e979e246015c457486df8060e0c529e6b1559fc
|
a6b36d3a8924b809fe86267ba3f8079dc173fb27
|
refs/heads/master
| 2023-09-01T12:05:07.829964
| 2022-07-08T05:13:59
| 2022-07-08T05:13:59
| 60,178,516
| 131
| 119
|
NOASSERTION
| 2023-07-01T05:24:08
| 2016-06-01T13:18:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,993
|
py
|
ble_driver_test.py
|
import unittest
from pc_ble_driver_py import config
config.__conn_ic_id__ = 'NRF52'
from pc_ble_driver_py.ble_driver import Flasher
class FlasherParserTestCase(unittest.TestCase):
raw_data = [
'17', 'A5', 'D8', '46', # magic number
'02', # struct version
'FF', 'FF', 'FF', # (reserved for future use)
'00', '00', '00', '00', # revision hash
'04', '01', '02', # major, minor, patch
'FF', # (reserved for future use)
'05', # softdevice ble api number
'01', # transport type
'FF', 'FF', # (reserved for future use)
'40', '42', '0F', '00' # baud rate
]
raw_data_wrong_format = raw_data[::2]
def test_invalid_parse(self):
with self.assertRaises(IndexError): Flasher.parse_fw_struct(self.raw_data_wrong_format)
def test_valid_parse(self):
self.assertEqual(
Flasher.parse_fw_struct(self.raw_data),
{
'len': 24,
'magic_number': ['17', 'A5', 'D8', '46'],
'version': '4.1.4',
'baud_rate': 1000000,
'api_version': 5
}
)
class FlasherMagicNumberTestCase(unittest.TestCase):
def test_invalid_number(self):
self.assertFalse(Flasher.is_valid_magic_number(['17', 'A5', 'D8', '45']))
def test_valid_number(self):
self.assertTrue(Flasher.is_valid_magic_number(['17', 'A5', 'D8', '46']))
class FlasherVersionTestCase(unittest.TestCase):
def test_invalid_version(self):
self.assertFalse(Flasher.is_valid_version('4.0.0'))
def test_valid_version(self):
self.assertTrue(Flasher.is_valid_version('4.1.4'))
class FlasherBaudRateTestCase(unittest.TestCase):
def test_invalid_baud_rate(self):
self.assertFalse(Flasher.is_valid_baud_rate(115200))
def test_valid_baud_rate(self):
self.assertTrue(Flasher.is_valid_baud_rate(1000000))
if __name__ == '__main__':
unittest.main()
|
4d944c1801d8d4013911aa4e6f879e0a118e177d
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/graph__networkx__d3__dot_graphviz/graphviz__d3/generate_hello_world.py
|
029820d4e6f897fc61a9aae72da42805269d1f86
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 426
|
py
|
generate_hello_world.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
from common import generate
# pip install graphviz
from graphviz import Digraph
def get_graph():
g = Digraph("G")
g.edge("Hello", "World")
return g
FILE_NAME = "hello_world.html"
if __name__ == "__main__":
with open(FILE_NAME, "w", encoding="utf-8") as f:
g = get_graph()
text = generate(g)
f.write(text)
|
fef4d298c553353c4d92adc4fad1b2e28628dc28
|
611e0544ff871e5df58f13a5f2898102f521ec8e
|
/tests/kafkatest/services/connect.py
|
da47b90d4241c3c6213f11385e12f90dad7cd1c2
|
[
"Apache-2.0",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"W3C",
"CC0-1.0",
"GPL-1.0-or-later",
"CPL-1.0",
"GPL-2.0-or-later",
"LicenseRef-scancode-generic-export-compliance",
"LicenseRef-scancode-other-permissive",
"CC-PDDC",
"BSD-3-Clause",
"APSL-2.0",
"LicenseRef-scancode-free-unknown",
"EPL-2.0",
"CDDL-1.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"EPL-1.0",
"Classpath-exception-2.0",
"CDDL-1.1",
"BSD-2-Clause",
"WTFPL"
] |
permissive
|
confluentinc/kafka
|
3b0830c0afd81bc84ff409fa9eff61418636d697
|
cae0baef40b0d5d97af32256800492cb9d6471df
|
refs/heads/master
| 2023-09-03T12:54:24.118935
| 2023-08-31T18:05:22
| 2023-08-31T18:05:22
| 37,555,321
| 216
| 235
|
Apache-2.0
| 2023-09-14T12:05:20
| 2015-06-16T20:48:28
|
Java
|
UTF-8
|
Python
| false
| false
| 26,910
|
py
|
connect.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os.path
import random
import signal
import time
import requests
from ducktape.errors import DucktapeError
from ducktape.services.service import Service
from ducktape.utils.util import wait_until
from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin
from kafkatest.services.kafka.util import fix_opts_for_new_jvm
class ConnectServiceBase(KafkaPathResolverMixin, Service):
"""Base class for Kafka Connect services providing some common settings and functionality"""
PERSISTENT_ROOT = "/mnt/connect"
CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "connect.properties")
# The log file contains normal log4j logs written using a file appender. stdout and stderr are handled separately
# so they can be used for other output, e.g. verifiable source & sink.
LOG_FILE = os.path.join(PERSISTENT_ROOT, "connect.log")
STDOUT_FILE = os.path.join(PERSISTENT_ROOT, "connect.stdout")
STDERR_FILE = os.path.join(PERSISTENT_ROOT, "connect.stderr")
LOG4J_CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "connect-log4j.properties")
PID_FILE = os.path.join(PERSISTENT_ROOT, "connect.pid")
EXTERNAL_CONFIGS_FILE = os.path.join(PERSISTENT_ROOT, "connect-external-configs.properties")
CONNECT_REST_PORT = 8083
HEAP_DUMP_FILE = os.path.join(PERSISTENT_ROOT, "connect_heap_dump.bin")
# Currently the Connect worker supports waiting on four modes:
STARTUP_MODE_INSTANT = 'INSTANT'
"""STARTUP_MODE_INSTANT: Start Connect worker and return immediately"""
STARTUP_MODE_LOAD = 'LOAD'
"""STARTUP_MODE_LOAD: Start Connect worker and return after discovering and loading plugins"""
STARTUP_MODE_LISTEN = 'LISTEN'
"""STARTUP_MODE_LISTEN: Start Connect worker and return after opening the REST port."""
STARTUP_MODE_JOIN = 'JOIN'
"""STARTUP_MODE_JOIN: Start Connect worker and return after joining the group."""
logs = {
"connect_log": {
"path": LOG_FILE,
"collect_default": True},
"connect_stdout": {
"path": STDOUT_FILE,
"collect_default": False},
"connect_stderr": {
"path": STDERR_FILE,
"collect_default": True},
"connect_heap_dump_file": {
"path": HEAP_DUMP_FILE,
"collect_default": True}
}
def __init__(self, context, num_nodes, kafka, files, startup_timeout_sec=60,
include_filestream_connectors=False):
super(ConnectServiceBase, self).__init__(context, num_nodes)
self.kafka = kafka
self.security_config = kafka.security_config.client_config()
self.files = files
self.startup_mode = self.STARTUP_MODE_LISTEN
self.startup_timeout_sec = startup_timeout_sec
self.environment = {}
self.external_config_template_func = None
self.include_filestream_connectors = include_filestream_connectors
self.logger.debug("include_filestream_connectors % s", include_filestream_connectors)
def pids(self, node):
"""Return process ids for Kafka Connect processes."""
try:
return [pid for pid in node.account.ssh_capture("cat " + self.PID_FILE, callback=int)]
except:
return []
def set_configs(self, config_template_func, connector_config_templates=None):
"""
Set configurations for the worker and the connector to run on
it. These are not provided in the constructor because the worker
config generally needs access to ZK/Kafka services to
create the configuration.
"""
self.config_template_func = config_template_func
self.connector_config_templates = connector_config_templates if connector_config_templates else []
def set_external_configs(self, external_config_template_func):
"""
Set the properties that will be written in the external file properties
as used by the org.apache.kafka.common.config.provider.FileConfigProvider.
When this is used, the worker configuration must also enable the FileConfigProvider.
This is not provided in the constructor in case the worker
config generally needs access to ZK/Kafka services to
create the configuration.
"""
self.external_config_template_func = external_config_template_func
def listening(self, node):
try:
self.list_connectors(node)
self.logger.debug("Connect worker started serving REST at: '%s:%s')", node.account.hostname,
self.CONNECT_REST_PORT)
return True
except requests.exceptions.ConnectionError:
self.logger.debug("REST resources are not loaded yet")
return False
def start(self, mode=None, **kwargs):
if mode:
self.startup_mode = mode
super(ConnectServiceBase, self).start(**kwargs)
def start_and_return_immediately(self, node, worker_type, remote_connector_configs):
cmd = self.start_cmd(node, remote_connector_configs)
self.logger.debug("Connect %s command: %s", worker_type, cmd)
node.account.ssh(cmd)
def start_and_wait_to_load_plugins(self, node, worker_type, remote_connector_configs):
with node.account.monitor_log(self.LOG_FILE) as monitor:
self.start_and_return_immediately(node, worker_type, remote_connector_configs)
monitor.wait_until('Kafka version', timeout_sec=self.startup_timeout_sec,
err_msg="Never saw message indicating Kafka Connect finished startup on node: " +
"%s in condition mode: %s" % (str(node.account), self.startup_mode))
def start_and_wait_to_start_listening(self, node, worker_type, remote_connector_configs):
self.start_and_return_immediately(node, worker_type, remote_connector_configs)
wait_until(lambda: self.listening(node), timeout_sec=self.startup_timeout_sec,
err_msg="Kafka Connect failed to start on node: %s in condition mode: %s" %
(str(node.account), self.startup_mode))
def start_and_wait_to_join_group(self, node, worker_type, remote_connector_configs):
if worker_type != 'distributed':
raise RuntimeError("Cannot wait for joined group message for %s" % worker_type)
with node.account.monitor_log(self.LOG_FILE) as monitor:
self.start_and_return_immediately(node, worker_type, remote_connector_configs)
monitor.wait_until('Joined group', timeout_sec=self.startup_timeout_sec,
err_msg="Never saw message indicating Kafka Connect joined group on node: " +
"%s in condition mode: %s" % (str(node.account), self.startup_mode))
def stop_node(self, node, clean_shutdown=True, await_shutdown=None):
if await_shutdown is None:
await_shutdown = clean_shutdown
self.logger.info((clean_shutdown and "Cleanly" or "Forcibly") + " stopping Kafka Connect on " + str(node.account) \
+ " and " + ("" if await_shutdown else "not ") + "awaiting shutdown")
pids = self.pids(node)
sig = signal.SIGTERM if clean_shutdown else signal.SIGKILL
for pid in pids:
node.account.signal(pid, sig, allow_fail=True)
if await_shutdown:
for pid in pids:
wait_until(lambda: not node.account.alive(pid), timeout_sec=self.startup_timeout_sec, err_msg="Kafka Connect process on " + str(
node.account) + " took too long to exit")
node.account.ssh("rm -f " + self.PID_FILE, allow_fail=False)
def restart(self, clean_shutdown=True):
# We don't want to do any clean up here, just restart the process.
for node in self.nodes:
self.logger.info("Restarting Kafka Connect on " + str(node.account))
self.restart_node(node, clean_shutdown)
def restart_node(self, node, clean_shutdown=True):
self.stop_node(node, clean_shutdown)
self.start_node(node)
def clean_node(self, node):
node.account.kill_process("connect", clean_shutdown=False, allow_fail=True)
self.security_config.clean_node(node)
other_files = " ".join(self.config_filenames() + self.files)
node.account.ssh("rm -rf -- %s %s" % (ConnectServiceBase.PERSISTENT_ROOT, other_files), allow_fail=False)
def config_filenames(self):
return [os.path.join(self.PERSISTENT_ROOT, "connect-connector-" + str(idx) + ".properties") for idx, template in enumerate(self.connector_config_templates or [])]
def list_connectors(self, node=None, **kwargs):
return self._rest_with_retry('/connectors', node=node, **kwargs)
def create_connector(self, config, node=None, **kwargs):
create_request = {
'name': config['name'],
'config': config
}
return self._rest_with_retry('/connectors', create_request, node=node, method="POST", **kwargs)
def get_connector(self, name, node=None, **kwargs):
return self._rest_with_retry('/connectors/' + name, node=node, **kwargs)
def get_connector_config(self, name, node=None, **kwargs):
return self._rest_with_retry('/connectors/' + name + '/config', node=node, **kwargs)
def set_connector_config(self, name, config, node=None, **kwargs):
# Unlike many other calls, a 409 when setting a connector config is expected if the connector already exists.
# However, we also might see 409s for other reasons (e.g. rebalancing). So we still perform retries at the cost
# of tests possibly taking longer to ultimately fail. Tests that care about this can explicitly override the
# number of retries.
return self._rest_with_retry('/connectors/' + name + '/config', config, node=node, method="PUT", **kwargs)
def get_connector_tasks(self, name, node=None, **kwargs):
return self._rest_with_retry('/connectors/' + name + '/tasks', node=node, **kwargs)
def delete_connector(self, name, node=None, **kwargs):
return self._rest_with_retry('/connectors/' + name, node=node, method="DELETE", **kwargs)
def get_connector_status(self, name, node=None):
return self._rest('/connectors/' + name + '/status', node=node)
def restart_connector(self, name, node=None, **kwargs):
return self._rest_with_retry('/connectors/' + name + '/restart', node=node, method="POST", **kwargs)
def restart_connector_and_tasks(self, name, only_failed, include_tasks, node=None, **kwargs):
return self._rest_with_retry('/connectors/' + name + '/restart?onlyFailed=' + only_failed + '&includeTasks=' + include_tasks, node=node, method="POST", **kwargs)
def restart_task(self, connector_name, task_id, node=None):
return self._rest('/connectors/' + connector_name + '/tasks/' + str(task_id) + '/restart', node=node, method="POST")
def pause_connector(self, name, node=None):
return self._rest('/connectors/' + name + '/pause', node=node, method="PUT")
def resume_connector(self, name, node=None):
return self._rest('/connectors/' + name + '/resume', node=node, method="PUT")
def list_connector_plugins(self, node=None):
return self._rest('/connector-plugins/', node=node)
def validate_config(self, connector_type, validate_request, node=None):
return self._rest('/connector-plugins/' + connector_type + '/config/validate', validate_request, node=node, method="PUT")
def _rest(self, path, body=None, node=None, method="GET"):
if node is None:
node = random.choice(self.nodes)
meth = getattr(requests, method.lower())
url = self._base_url(node) + path
self.logger.debug("Kafka Connect REST request: %s %s %s %s", node.account.hostname, url, method, body)
resp = meth(url, json=body)
self.logger.debug("%s %s response: %d", url, method, resp.status_code)
if resp.status_code > 400:
self.logger.debug("Connect REST API error for %s: %d %s", resp.url, resp.status_code, resp.text)
raise ConnectRestError(resp.status_code, resp.text, resp.url)
if resp.status_code == 204 or resp.status_code == 202:
return None
else:
return resp.json()
def _rest_with_retry(self, path, body=None, node=None, method="GET", retries=40, retry_backoff=.25):
"""
Invokes a REST API with retries for errors that may occur during normal operation (notably 409 CONFLICT
responses that can occur due to rebalancing or 404 when the connect resources are not initialized yet).
"""
exception_to_throw = None
for i in range(0, retries + 1):
try:
return self._rest(path, body, node, method)
except ConnectRestError as e:
exception_to_throw = e
if e.status != 409 and e.status != 404:
break
time.sleep(retry_backoff)
raise exception_to_throw
def _base_url(self, node):
return 'http://' + node.account.externally_routable_ip + ':' + str(self.CONNECT_REST_PORT)
def append_to_environment_variable(self, envvar, value):
env_opts = self.environment[envvar]
if env_opts is None:
env_opts = "\"%s\"" % value
else:
env_opts = "\"%s %s\"" % (env_opts.strip('\"'), value)
self.environment[envvar] = env_opts
def maybe_append_filestream_connectors_to_classpath(self):
if self.include_filestream_connectors:
return self.append_module_to_classpath("file")
else:
self.logger.info("Starting Connect without filestream connectors in the CLASSPATH")
return ""
def append_test_plugins_to_classpath(self):
return self.append_module_to_classpath("test-plugins")
def append_module_to_classpath(self, module):
cwd = os.getcwd()
relative_path = "/connect/" + module + "/build/libs/"
local_dir = cwd + relative_path
lib_dir = self.path.home() + relative_path
for pwd, dirs, files in os.walk(local_dir):
for file in files:
if file.endswith(".jar"):
# Use the expected directory on the node instead of the path in the driver node
file_path = lib_dir + file
self.logger.info("Appending %s to Connect worker's CLASSPATH" % file_path)
return "export CLASSPATH=${CLASSPATH}:%s; " % file_path
self.logger.info("Jar not found within %s" % local_dir)
return ""
class ConnectStandaloneService(ConnectServiceBase):
"""Runs Kafka Connect in standalone mode."""
def __init__(self, context, kafka, files, startup_timeout_sec=60, include_filestream_connectors=False):
super(ConnectStandaloneService, self).__init__(context, 1, kafka, files, startup_timeout_sec,
include_filestream_connectors)
# For convenience since this service only makes sense with a single node
@property
def node(self):
return self.nodes[0]
def start_cmd(self, node, connector_configs):
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % self.LOG4J_CONFIG_FILE
heap_kafka_opts = "-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=%s" % \
self.logs["connect_heap_dump_file"]["path"]
other_kafka_opts = self.security_config.kafka_opts.strip('\"')
cmd += fix_opts_for_new_jvm(node)
cmd += "export KAFKA_OPTS=\"%s %s\"; " % (heap_kafka_opts, other_kafka_opts)
cmd += self.append_test_plugins_to_classpath()
cmd += self.maybe_append_filestream_connectors_to_classpath()
for envvar in self.environment:
cmd += "export %s=%s; " % (envvar, str(self.environment[envvar]))
cmd += "%s %s " % (self.path.script("connect-standalone.sh", node), self.CONFIG_FILE)
cmd += " ".join(connector_configs)
cmd += " & echo $! >&3 ) 1>> %s 2>> %s 3> %s" % (self.STDOUT_FILE, self.STDERR_FILE, self.PID_FILE)
return cmd
def start_node(self, node, **kwargs):
node.account.ssh("mkdir -p %s" % self.PERSISTENT_ROOT, allow_fail=False)
self.security_config.setup_node(node)
if self.external_config_template_func:
node.account.create_file(self.EXTERNAL_CONFIGS_FILE, self.external_config_template_func(node))
node.account.create_file(self.CONFIG_FILE, self.config_template_func(node))
node.account.create_file(self.LOG4J_CONFIG_FILE, self.render('connect_log4j.properties', log_file=self.LOG_FILE))
remote_connector_configs = []
for idx, template in enumerate(self.connector_config_templates):
target_file = os.path.join(self.PERSISTENT_ROOT, "connect-connector-" + str(idx) + ".properties")
node.account.create_file(target_file, template)
remote_connector_configs.append(target_file)
self.logger.info("Starting Kafka Connect standalone process on " + str(node.account))
if self.startup_mode == self.STARTUP_MODE_LOAD:
self.start_and_wait_to_load_plugins(node, 'standalone', remote_connector_configs)
elif self.startup_mode == self.STARTUP_MODE_INSTANT:
self.start_and_return_immediately(node, 'standalone', remote_connector_configs)
elif self.startup_mode == self.STARTUP_MODE_JOIN:
self.start_and_wait_to_join_group(node, 'standalone', remote_connector_configs)
else:
# The default mode is to wait until the complete startup of the worker
self.start_and_wait_to_start_listening(node, 'standalone', remote_connector_configs)
if not self.pids(node):
raise RuntimeError("No process ids recorded")
class ConnectDistributedService(ConnectServiceBase):
"""Runs Kafka Connect in distributed mode."""
def __init__(self, context, num_nodes, kafka, files, offsets_topic="connect-offsets",
configs_topic="connect-configs", status_topic="connect-status", startup_timeout_sec=60,
include_filestream_connectors=False):
super(ConnectDistributedService, self).__init__(context, num_nodes, kafka, files, startup_timeout_sec, include_filestream_connectors)
self.startup_mode = self.STARTUP_MODE_JOIN
self.offsets_topic = offsets_topic
self.configs_topic = configs_topic
self.status_topic = status_topic
# connector_configs argument is intentionally ignored in distributed service.
def start_cmd(self, node, connector_configs):
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % self.LOG4J_CONFIG_FILE
heap_kafka_opts = "-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=%s" % \
self.logs["connect_heap_dump_file"]["path"]
other_kafka_opts = self.security_config.kafka_opts.strip('\"')
cmd += "export KAFKA_OPTS=\"%s %s\"; " % (heap_kafka_opts, other_kafka_opts)
for envvar in self.environment:
cmd += "export %s=%s; " % (envvar, str(self.environment[envvar]))
cmd += self.maybe_append_filestream_connectors_to_classpath()
cmd += self.append_test_plugins_to_classpath()
cmd += "%s %s " % (self.path.script("connect-distributed.sh", node), self.CONFIG_FILE)
cmd += " & echo $! >&3 ) 1>> %s 2>> %s 3> %s" % (self.STDOUT_FILE, self.STDERR_FILE, self.PID_FILE)
return cmd
def start_node(self, node, **kwargs):
node.account.ssh("mkdir -p %s" % self.PERSISTENT_ROOT, allow_fail=False)
self.security_config.setup_node(node)
if self.external_config_template_func:
node.account.create_file(self.EXTERNAL_CONFIGS_FILE, self.external_config_template_func(node))
node.account.create_file(self.CONFIG_FILE, self.config_template_func(node))
node.account.create_file(self.LOG4J_CONFIG_FILE, self.render('connect_log4j.properties', log_file=self.LOG_FILE))
if self.connector_config_templates:
raise DucktapeError("Config files are not valid in distributed mode, submit connectors via the REST API")
self.logger.info("Starting Kafka Connect distributed process on " + str(node.account))
if self.startup_mode == self.STARTUP_MODE_LOAD:
self.start_and_wait_to_load_plugins(node, 'distributed', '')
elif self.startup_mode == self.STARTUP_MODE_INSTANT:
self.start_and_return_immediately(node, 'distributed', '')
elif self.startup_mode == self.STARTUP_MODE_LISTEN:
self.start_and_wait_to_start_listening(node, 'distributed', '')
else:
# The default mode is to wait until the complete startup of the worker
self.start_and_wait_to_join_group(node, 'distributed', '')
if not self.pids(node):
raise RuntimeError("No process ids recorded")
class ErrorTolerance(object):
ALL = "all"
NONE = "none"
class ConnectRestError(RuntimeError):
def __init__(self, status, msg, url):
self.status = status
self.message = msg
self.url = url
def __unicode__(self):
return "Kafka Connect REST call failed: returned " + self.status + " for " + self.url + ". Response: " + self.message
class VerifiableConnector(object):
def messages(self):
"""
Collect and parse the logs from Kafka Connect nodes. Return a list containing all parsed JSON messages generated by
this source.
"""
self.logger.info("Collecting messages from log of %s %s", type(self).__name__, self.name)
records = []
for node in self.cc.nodes:
for line in node.account.ssh_capture('cat ' + self.cc.STDOUT_FILE):
try:
data = json.loads(line)
except ValueError:
self.logger.debug("Ignoring unparseable line: %s", line)
continue
# Filter to only ones matching our name to support multiple verifiable producers
if data['name'] != self.name:
continue
data['node'] = node
records.append(data)
return records
def stop(self):
self.logger.info("Destroying connector %s %s", type(self).__name__, self.name)
self.cc.delete_connector(self.name)
class VerifiableSource(VerifiableConnector):
"""
Helper class for running a verifiable source connector on a Kafka Connect cluster and analyzing the output.
"""
def __init__(self, cc, name="verifiable-source", tasks=1, topic="verifiable", throughput=1000, complete_records=False):
self.cc = cc
self.logger = self.cc.logger
self.name = name
self.tasks = tasks
self.topic = topic
self.throughput = throughput
self.complete_records = complete_records
def committed_messages(self):
return list(filter(lambda m: 'committed' in m and m['committed'], self.messages()))
def sent_messages(self):
return list(filter(lambda m: 'committed' not in m or not m['committed'], self.messages()))
def start(self):
self.logger.info("Creating connector VerifiableSourceConnector %s", self.name)
self.cc.create_connector({
'name': self.name,
'connector.class': 'org.apache.kafka.connect.tools.VerifiableSourceConnector',
'tasks.max': self.tasks,
'topic': self.topic,
'throughput': self.throughput,
'complete.record.data': self.complete_records
})
class VerifiableSink(VerifiableConnector):
"""
Helper class for running a verifiable sink connector on a Kafka Connect cluster and analyzing the output.
"""
def __init__(self, cc, name="verifiable-sink", tasks=1, topics=["verifiable"]):
self.cc = cc
self.logger = self.cc.logger
self.name = name
self.tasks = tasks
self.topics = topics
def flushed_messages(self):
return list(filter(lambda m: 'flushed' in m and m['flushed'], self.messages()))
def received_messages(self):
return list(filter(lambda m: 'flushed' not in m or not m['flushed'], self.messages()))
def start(self):
self.logger.info("Creating connector VerifiableSinkConnector %s", self.name)
self.cc.create_connector({
'name': self.name,
'connector.class': 'org.apache.kafka.connect.tools.VerifiableSinkConnector',
'tasks.max': self.tasks,
'topics': ",".join(self.topics)
})
class MockSink(object):
def __init__(self, cc, topics, mode=None, delay_sec=10, name="mock-sink"):
self.cc = cc
self.logger = self.cc.logger
self.name = name
self.mode = mode
self.delay_sec = delay_sec
self.topics = topics
def start(self):
self.logger.info("Creating connector MockSinkConnector %s", self.name)
self.cc.create_connector({
'name': self.name,
'connector.class': 'org.apache.kafka.connect.tools.MockSinkConnector',
'tasks.max': 1,
'topics': ",".join(self.topics),
'mock_mode': self.mode,
'delay_ms': self.delay_sec * 1000
})
class MockSource(object):
def __init__(self, cc, mode=None, delay_sec=10, name="mock-source"):
self.cc = cc
self.logger = self.cc.logger
self.name = name
self.mode = mode
self.delay_sec = delay_sec
def start(self):
self.logger.info("Creating connector MockSourceConnector %s", self.name)
self.cc.create_connector({
'name': self.name,
'connector.class': 'org.apache.kafka.connect.tools.MockSourceConnector',
'tasks.max': 1,
'mock_mode': self.mode,
'delay_ms': self.delay_sec * 1000
})
|
2d5db89a117b668a8f8b2e9fdab524b311b1803c
|
312839d6a1fe98ebf6f55a11ff3136c0d1fc4895
|
/python3/vimspector/debug_session.py
|
28d0a397cbe3fac46987479eb29c65aaf56b79b1
|
[
"Apache-2.0"
] |
permissive
|
puremourning/vimspector
|
e6e6e647919b68d740f42fcb9a4c8df635afebff
|
4e49a1782e34433410f96602640a05c9ec00a65f
|
refs/heads/master
| 2023-09-01T13:25:41.271070
| 2023-08-18T18:54:11
| 2023-08-18T18:54:11
| 134,156,823
| 4,156
| 235
|
Apache-2.0
| 2023-09-10T18:57:07
| 2018-05-20T14:19:41
|
Vim Script
|
UTF-8
|
Python
| false
| false
| 76,753
|
py
|
debug_session.py
|
# vimspector - A multi-language debugging system for Vim
# Copyright 2018 Ben Jackson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import logging
import os
import shlex
import subprocess
import functools
import vim
import importlib
import typing
from vimspector import ( breakpoints,
code,
core_utils,
debug_adapter_connection,
disassembly,
install,
output,
stack_trace,
utils,
variables,
settings,
terminal,
installer )
from vimspector.vendor.json_minify import minify
# We cache this once, and don't allow it to change (FIXME?)
VIMSPECTOR_HOME = utils.GetVimspectorBase()
# cache of what the user entered for any option we ask them
USER_CHOICES = {}
class DebugSession( object ):
child_sessions: typing.List[ "DebugSession" ]
def CurrentSession():
def decorator( fct ):
@functools.wraps( fct )
def wrapper( self: "DebugSession", *args, **kwargs ):
active_session = self
if self._stackTraceView:
active_session = self._stackTraceView.GetCurrentSession()
if active_session is not None:
return fct( active_session, *args, **kwargs )
return fct( self, *args, **kwargs )
return wrapper
return decorator
def ParentOnly( otherwise=None ):
def decorator( fct ):
@functools.wraps( fct )
def wrapper( self: "DebugSession", *args, **kwargs ):
if self.parent_session:
return otherwise
return fct( self, *args, **kwargs )
return wrapper
return decorator
def IfConnected( otherwise=None ):
def decorator( fct ):
"""Decorator, call fct if self._connected else echo warning"""
@functools.wraps( fct )
def wrapper( self: "DebugSession", *args, **kwargs ):
if not self._connection:
utils.UserMessage(
'Vimspector not connected, start a debug session first',
persist=False,
error=True )
return otherwise
return fct( self, *args, **kwargs )
return wrapper
return decorator
def RequiresUI( otherwise=None ):
"""Decorator, call fct if self._connected else echo warning"""
def decorator( fct ):
@functools.wraps( fct )
def wrapper( self, *args, **kwargs ):
if not self.HasUI():
utils.UserMessage(
'Vimspector is not active',
persist=False,
error=True )
return otherwise
return fct( self, *args, **kwargs )
return wrapper
return decorator
def __init__( self,
session_id,
session_manager,
api_prefix,
session_name = None,
parent_session: "DebugSession" = None ):
self.session_id = session_id
self.manager = session_manager
self.name = session_name
self.parent_session = parent_session
self.child_sessions = []
if parent_session:
parent_session.child_sessions.append( self )
self._logger = logging.getLogger( __name__ + '.' + str( session_id ) )
utils.SetUpLogging( self._logger, session_id )
self._api_prefix = api_prefix
self._render_emitter = utils.EventEmitter()
self._logger.info( "**** INITIALISING NEW VIMSPECTOR SESSION FOR ID "
f"{session_id } ****" )
self._logger.info( "API is: {}".format( api_prefix ) )
self._logger.info( 'VIMSPECTOR_HOME = %s', VIMSPECTOR_HOME )
self._logger.info( 'gadgetDir = %s',
install.GetGadgetDir( VIMSPECTOR_HOME ) )
self._uiTab = None
self._logView: output.OutputView = None
self._stackTraceView: stack_trace.StackTraceView = None
self._variablesView: variables.VariablesView = None
self._outputView: output.DAPOutputView = None
self._codeView: code.CodeView = None
self._disassemblyView: disassembly.DisassemblyView = None
if parent_session:
self._breakpoints = parent_session._breakpoints
else:
self._breakpoints = breakpoints.ProjectBreakpoints(
session_id,
self._render_emitter,
self._IsPCPresentAt,
self._disassemblyView )
utils.SetSessionWindows( {} )
self._saved_variables_data = None
self._splash_screen = None
self._remote_term = None
self._adapter_term = None
self._run_on_server_exit = None
self._configuration = None
self._adapter = None
self._launch_config = None
self._ResetServerState()
def _ResetServerState( self ):
self._connection = None
self._init_complete = False
self._launch_complete = False
self._on_init_complete_handlers = []
self._server_capabilities = {}
self._breakpoints.ClearTemporaryBreakpoints()
def GetConfigurations( self, adapters ):
current_file = utils.GetBufferFilepath( vim.current.buffer )
filetypes = utils.GetBufferFiletypes( vim.current.buffer )
configurations = settings.Dict( 'configurations' )
for launch_config_file in PathsToAllConfigFiles( VIMSPECTOR_HOME,
current_file,
filetypes ):
self._logger.debug( f'Reading configurations from: {launch_config_file}' )
if not launch_config_file or not os.path.exists( launch_config_file ):
continue
with open( launch_config_file, 'r' ) as f:
database = json.loads( minify( f.read() ) )
configurations.update( database.get( 'configurations' ) or {} )
adapters.update( database.get( 'adapters' ) or {} )
filetype_configurations = configurations
if filetypes:
# filter out any configurations that have a 'filetypes' list set and it
# doesn't contain one of the current filetypes
filetype_configurations = {
k: c for k, c in configurations.items() if 'filetypes' not in c or any(
ft in c[ 'filetypes' ] for ft in filetypes
)
}
return launch_config_file, filetype_configurations, configurations
def Name( self ):
return self.name if self.name else "Unnamed-" + str( self.session_id )
def DisplayName( self ):
return self.Name() + ' (' + str( self.session_id ) + ')'
@ParentOnly()
def Start( self,
force_choose = False,
launch_variables = None,
adhoc_configurations = None ):
# We mutate launch_variables, so don't mutate the default argument.
# https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments
if launch_variables is None:
launch_variables = {}
self._logger.info( "User requested start debug session with %s",
launch_variables )
current_file = utils.GetBufferFilepath( vim.current.buffer )
adapters = settings.Dict( 'adapters' )
launch_config_file = None
configurations = None
if adhoc_configurations:
configurations = adhoc_configurations
else:
( launch_config_file,
configurations,
all_configurations ) = self.GetConfigurations( adapters )
if not configurations:
utils.UserMessage( 'Unable to find any debug configurations. '
'You need to tell vimspector how to launch your '
'application.' )
return
glob.glob( install.GetGadgetDir( VIMSPECTOR_HOME ) )
for gadget_config_file in PathsToAllGadgetConfigs( VIMSPECTOR_HOME,
current_file ):
self._logger.debug( f'Reading gadget config: {gadget_config_file}' )
if not gadget_config_file or not os.path.exists( gadget_config_file ):
continue
with open( gadget_config_file, 'r' ) as f:
a = json.loads( minify( f.read() ) ).get( 'adapters' ) or {}
adapters.update( a )
if 'configuration' in launch_variables:
configuration_name = launch_variables.pop( 'configuration' )
elif force_choose:
# Always display the menu
configuration_name = utils.SelectFromList(
'Which launch configuration?',
sorted( configurations.keys() ) )
elif ( len( configurations ) == 1 and
next( iter( configurations.values() ) ).get( "autoselect", True ) ):
configuration_name = next( iter( configurations.keys() ) )
else:
# Find a single configuration with 'default' True and autoselect not False
defaults = { n: c for n, c in configurations.items()
if c.get( 'default', False )
and c.get( 'autoselect', True ) }
if len( defaults ) == 1:
configuration_name = next( iter( defaults.keys() ) )
else:
configuration_name = utils.SelectFromList(
'Which launch configuration?',
sorted( configurations.keys() ) )
if not configuration_name or configuration_name not in configurations:
return
if self.name is None:
self.name = configuration_name
if launch_config_file:
self._workspace_root = os.path.dirname( launch_config_file )
else:
self._workspace_root = os.path.dirname( current_file )
try:
configuration = configurations[ configuration_name ]
except KeyError:
# Maybe the specified one by name that's not for this filetype? Let's try
# that one...
configuration = all_configurations[ configuration_name ]
current_configuration_name = configuration_name
while 'extends' in configuration:
base_configuration_name = configuration.pop( 'extends' )
base_configuration = all_configurations.get( base_configuration_name )
if base_configuration is None:
raise RuntimeError( f"The adapter { current_configuration_name } "
f"extends configuration { base_configuration_name }"
", but this does not exist" )
core_utils.override( base_configuration, configuration )
current_configuration_name = base_configuration_name
configuration = base_configuration
adapter = configuration.get( 'adapter' )
if isinstance( adapter, str ):
adapter_dict = adapters.get( adapter )
if adapter_dict is None:
suggested_gadgets = installer.FindGadgetForAdapter( adapter )
if suggested_gadgets:
response = utils.AskForInput(
f"The specified adapter '{adapter}' is not "
"installed. Would you like to install the following gadgets? ",
' '.join( suggested_gadgets ) )
if response:
new_launch_variables = dict( launch_variables )
new_launch_variables[ 'configuration' ] = configuration_name
installer.RunInstaller(
self._api_prefix,
False, # Don't leave open
*shlex.split( response ),
then = lambda: self.Start( new_launch_variables ) )
return
elif response is None:
return
utils.UserMessage( f"The specified adapter '{adapter}' is not "
"available. Did you forget to run "
"'VimspectorInstall'?",
persist = True,
error = True )
return
adapter = adapter_dict
if not adapter:
utils.UserMessage( 'No adapter configured for {}'.format(
configuration_name ),
persist=True )
return
# Pull in anything from the base(s)
# FIXME: this is copypasta from above, but sharing the code is a little icky
# due to the way it returns from this method (maybe use an exception?)
while 'extends' in adapter:
base_adapter_name = adapter.pop( 'extends' )
base_adapter = adapters.get( base_adapter_name )
if base_adapter is None:
suggested_gadgets = installer.FindGadgetForAdapter( base_adapter_name )
if suggested_gadgets:
response = utils.AskForInput(
f"The specified base adapter '{base_adapter_name}' is not "
"installed. Would you like to install the following gadgets? ",
' '.join( suggested_gadgets ) )
if response:
new_launch_variables = dict( launch_variables )
new_launch_variables[ 'configuration' ] = configuration_name
installer.RunInstaller(
self._api_prefix,
False, # Don't leave open
*shlex.split( response ),
then = lambda: self.Start( new_launch_variables ) )
return
elif response is None:
return
utils.UserMessage( f"The specified base adapter '{base_adapter_name}' "
"is not available. Did you forget to run "
"'VimspectorInstall'?",
persist = True,
error = True )
return
core_utils.override( base_adapter, adapter )
adapter = base_adapter
# Additional vars as defined by VSCode:
#
# ${workspaceFolder} - the path of the folder opened in VS Code
# ${workspaceFolderBasename} - the name of the folder opened in VS Code
# without any slashes (/)
# ${file} - the current opened file
# ${relativeFile} - the current opened file relative to workspaceFolder
# ${fileBasename} - the current opened file's basename
# ${fileBasenameNoExtension} - the current opened file's basename with no
# file extension
# ${fileDirname} - the current opened file's dirname
# ${fileExtname} - the current opened file's extension
# ${cwd} - the task runner's current working directory on startup
# ${lineNumber} - the current selected line number in the active file
# ${selectedText} - the current selected text in the active file
# ${execPath} - the path to the running VS Code executable
def relpath( p, relative_to ):
if not p:
return ''
return os.path.relpath( p, relative_to )
def splitext( p ):
if not p:
return [ '', '' ]
return os.path.splitext( p )
variables = {
'dollar': '$', # HACK. Hote '$$' also works.
'workspaceRoot': self._workspace_root,
'workspaceFolder': self._workspace_root,
'gadgetDir': install.GetGadgetDir( VIMSPECTOR_HOME ),
'file': current_file,
}
calculus = {
'relativeFileDirname': lambda: os.path.dirname( relpath( current_file,
self._workspace_root ) ),
'relativeFile': lambda: relpath( current_file,
self._workspace_root ),
'fileBasename': lambda: os.path.basename( current_file ),
'fileBasenameNoExtension':
lambda: splitext( os.path.basename( current_file ) )[ 0 ],
'fileDirname': lambda: os.path.dirname( current_file ),
'fileExtname': lambda: splitext( os.path.basename( current_file ) )[ 1 ],
# NOTE: this is the window-local cwd for the current window, *not* Vim's
# working directory.
'cwd': os.getcwd,
'unusedLocalPort': utils.GetUnusedLocalPort,
# The following, starting with uppercase letters, are 'functions' taking
# arguments.
'SelectProcess': _SelectProcess,
'PickProcess': _SelectProcess,
}
# Pretend that vars passed to the launch command were typed in by the user
# (they may have been in theory)
USER_CHOICES.update( launch_variables )
variables.update( launch_variables )
try:
variables.update(
utils.ParseVariables( adapter.pop( 'variables', {} ),
variables,
calculus,
USER_CHOICES ) )
variables.update(
utils.ParseVariables( configuration.pop( 'variables', {} ),
variables,
calculus,
USER_CHOICES ) )
utils.ExpandReferencesInDict( configuration,
variables,
calculus,
USER_CHOICES )
utils.ExpandReferencesInDict( adapter,
variables,
calculus,
USER_CHOICES )
except KeyboardInterrupt:
self._Reset()
return
self._StartWithConfiguration( configuration, adapter )
def _StartWithConfiguration( self, configuration, adapter ):
def start():
self._configuration = configuration
self._adapter = adapter
self._launch_config = None
self._logger.info( 'Configuration: %s',
json.dumps( self._configuration ) )
self._logger.info( 'Adapter: %s',
json.dumps( self._adapter ) )
if self.parent_session:
# use the parent session's stuff
self._uiTab = self.parent_session._uiTab
self._stackTraceView = self.parent_session._stackTraceView
self._variablesView = self.parent_session._variablesView
self._outputView = self.parent_session._outputView
self._disassemblyView = self.parent_session._disassemblyView
self._codeView = self.parent_session._codeView
elif not self._uiTab:
self._SetUpUI()
else:
with utils.NoAutocommands():
vim.current.tabpage = self._uiTab
self._stackTraceView.AddSession( self )
self._Prepare()
if not self._StartDebugAdapter():
self._logger.info( "Failed to launch or attach to the debug adapter" )
return
self._Initialise()
if self._saved_variables_data:
self._variablesView.Load( self._saved_variables_data )
if self._connection:
self._logger.debug( "Stop debug adapter with callback: start" )
self.StopAllSessions( interactive = False, then = start )
return
start()
@ParentOnly()
def Restart( self ):
if self._configuration is None or self._adapter is None:
return self.Start()
self._StartWithConfiguration( self._configuration, self._adapter )
def Connection( self ):
return self._connection
def HasUI( self ):
return self._uiTab and self._uiTab.valid
def IsUITab( self, tab_number ):
return self.HasUI() and self._uiTab.number == tab_number
@ParentOnly()
def SwitchTo( self ):
if self.HasUI():
vim.current.tabpage = self._uiTab
self._breakpoints.UpdateUI()
@ParentOnly()
def SwitchFrom( self ):
self._breakpoints.ClearUI()
def OnChannelData( self, data ):
if self._connection is None:
# Should _not_ happen, but maybe possible due to races or vim bufs?
return
self._connection.OnData( data )
def OnServerStderr( self, data ):
if self._outputView:
self._outputView.Print( 'server', data )
def OnRequestTimeout( self, timer_id ):
self._connection.OnRequestTimeout( timer_id )
def OnChannelClosed( self ):
# TODO: Not called
self._connection = None
def StopAllSessions( self, interactive = False, then = None ):
def Next():
if self.child_sessions:
c = self.child_sessions.pop()
c.StopAllSessions( interactive = interactive, then = Next )
elif self._connection:
self._StopDebugAdapter( interactive = interactive, callback = then )
else:
then()
Next()
@ParentOnly()
@IfConnected()
def Stop( self, interactive = False ):
self._logger.debug( "Stop debug adapter with no callback" )
self.StopAllSessions( interactive = False )
@ParentOnly()
def Destroy( self ):
"""Call when the vimspector session will be removed and never used again"""
if self._connection is not None:
raise RuntimeError( "Can't destroy a session with a live connection" )
if self.HasUI():
raise RuntimeError( "Can't destroy a session with an active UI" )
self.ClearBreakpoints()
self._ResetUI()
@ParentOnly()
def Reset( self, interactive = False ):
# We reset all of the child sessions in turn
self._logger.debug( "Stop debug adapter with callback: _Reset" )
self.StopAllSessions( interactive, self._Reset )
def _IsPCPresentAt( self, file_path, line ):
return self._codeView and self._codeView.IsPCPresentAt( file_path, line )
def _ResetUI( self ):
if not self.parent_session:
if self._stackTraceView:
self._stackTraceView.Reset()
if self._variablesView:
self._variablesView.Reset()
if self._outputView:
self._outputView.Reset()
if self._logView:
self._logView.Reset()
if self._codeView:
self._codeView.Reset()
if self._disassemblyView:
self._disassemblyView.Reset()
self._breakpoints.RemoveConnection( self._connection )
self._stackTraceView = None
self._variablesView = None
self._outputView = None
self._codeView = None
self._disassemblyView = None
self._remote_term = None
self._uiTab = None
if self.parent_session:
self.manager.DestroySession( self )
def _Reset( self ):
if self.parent_session:
self._ResetUI()
return
vim.vars[ 'vimspector_resetting' ] = 1
self._logger.info( "Debugging complete." )
if self.HasUI():
self._logger.debug( "Clearing down UI" )
with utils.NoAutocommands():
vim.current.tabpage = self._uiTab
self._splash_screen = utils.HideSplash( self._api_prefix,
self._splash_screen )
self._ResetUI()
vim.command( 'tabclose!' )
else:
self._ResetUI()
self._breakpoints.SetDisassemblyManager( None )
utils.SetSessionWindows( {
'breakpoints': vim.vars[ 'vimspector_session_windows' ].get(
'breakpoints' )
} )
vim.command( 'doautocmd <nomodeline> User VimspectorDebugEnded' )
vim.vars[ 'vimspector_resetting' ] = 0
# make sure that we're displaying signs in any still-open buffers
self._breakpoints.UpdateUI()
@ParentOnly( False )
def ReadSessionFile( self, session_file: str = None ):
if session_file is None:
session_file = self._DetectSessionFile( invent_one_if_not_found = False )
if session_file is None:
utils.UserMessage( f"No { settings.Get( 'session_file_name' ) } file "
"found. Specify a file with :VimspectorLoadSession "
"<filename>",
persist = True,
error = True )
return False
try:
with open( session_file, 'r' ) as f:
session_data = json.load( f )
USER_CHOICES.update(
session_data.get( 'session', {} ).get( 'user_choices', {} ) )
self._breakpoints.Load( session_data.get( 'breakpoints' ) )
# We might not _have_ a self._variablesView yet so we need a
# mechanism where we save this for later and reload when it's ready
variables_data = session_data.get( 'variables', {} )
if self._variablesView:
self._variablesView.Load( variables_data )
else:
self._saved_variables_data = variables_data
utils.UserMessage( f"Loaded session file { session_file }",
persist=True )
return True
except OSError:
self._logger.exception( f"Invalid session file { session_file }" )
utils.UserMessage( f"Session file { session_file } not found",
persist=True,
error=True )
return False
except json.JSONDecodeError:
self._logger.exception( f"Invalid session file { session_file }" )
utils.UserMessage( "The session file could not be read",
persist = True,
error = True )
return False
@ParentOnly( False )
def WriteSessionFile( self, session_file: str = None ):
if session_file is None:
session_file = self._DetectSessionFile( invent_one_if_not_found = True )
elif os.path.isdir( session_file ):
session_file = self._DetectSessionFile( invent_one_if_not_found = True,
in_directory = session_file )
try:
with open( session_file, 'w' ) as f:
f.write( json.dumps( {
'breakpoints': self._breakpoints.Save(),
'session': {
'user_choices': USER_CHOICES,
},
'variables': self._variablesView.Save() if self._variablesView else {}
} ) )
utils.UserMessage( f"Wrote { session_file }" )
return True
except OSError:
self._logger.exception( f"Unable to write session file { session_file }" )
utils.UserMessage( "The session file could not be read",
persist = True,
error = True )
return False
def _DetectSessionFile( self,
invent_one_if_not_found: bool,
in_directory: str = None ):
session_file_name = settings.Get( 'session_file_name' )
if in_directory:
# If a dir was supplied, read from there
write_directory = in_directory
file_path = os.path.join( in_directory, session_file_name )
if not os.path.exists( file_path ):
file_path = None
else:
# Otherwise, search based on the current file, and write based on CWD
current_file = utils.GetBufferFilepath( vim.current.buffer )
write_directory = os.getcwd()
# Search from the path of the file we're editing. But note that if we
# invent a file, we always use CWD as that's more like what would be
# expected.
file_path = utils.PathToConfigFile( session_file_name,
os.path.dirname( current_file ) )
if file_path:
return file_path
if invent_one_if_not_found:
return os.path.join( write_directory, session_file_name )
return None
@CurrentSession()
@IfConnected()
def StepOver( self, **kwargs ):
if self._stackTraceView.GetCurrentThreadId() is None:
return
arguments = {
'threadId': self._stackTraceView.GetCurrentThreadId(),
'granularity': self._CurrentSteppingGranularity(),
}
arguments.update( kwargs )
if not self._server_capabilities.get( 'supportsSteppingGranularity' ):
arguments.pop( 'granularity' )
self._connection.DoRequest( None, {
'command': 'next',
'arguments': arguments,
} )
# TODO: WHy is this different from StepInto and StepOut
self._stackTraceView.OnContinued( self )
self.ClearCurrentPC()
@CurrentSession()
@IfConnected()
def StepInto( self, **kwargs ):
threadId = self._stackTraceView.GetCurrentThreadId()
if threadId is None:
return
def handler( *_ ):
self._stackTraceView.OnContinued( self, { 'threadId': threadId } )
self.ClearCurrentPC()
arguments = {
'threadId': threadId,
'granularity': self._CurrentSteppingGranularity(),
}
arguments.update( kwargs )
self._connection.DoRequest( handler, {
'command': 'stepIn',
'arguments': arguments,
} )
@CurrentSession()
@IfConnected()
def StepOut( self, **kwargs ):
threadId = self._stackTraceView.GetCurrentThreadId()
if threadId is None:
return
def handler( *_ ):
self._stackTraceView.OnContinued( self, { 'threadId': threadId } )
self.ClearCurrentPC()
arguments = {
'threadId': threadId,
'granularity': self._CurrentSteppingGranularity(),
}
arguments.update( kwargs )
self._connection.DoRequest( handler, {
'command': 'stepOut',
'arguments': arguments,
} )
def _CurrentSteppingGranularity( self ):
if self._disassemblyView and self._disassemblyView.IsCurrent():
return 'instruction'
return 'statement'
@CurrentSession()
def Continue( self ):
if not self._connection:
self.Start()
return
threadId = self._stackTraceView.GetCurrentThreadId()
if threadId is None:
utils.UserMessage( 'No current thread', persist = True )
return
def handler( msg ):
self._stackTraceView.OnContinued( self, {
'threadId': threadId,
'allThreadsContinued': ( msg.get( 'body' ) or {} ).get(
'allThreadsContinued',
True )
} )
self.ClearCurrentPC()
self._connection.DoRequest( handler, {
'command': 'continue',
'arguments': {
'threadId': threadId,
},
} )
@CurrentSession()
@IfConnected()
def Pause( self ):
if self._stackTraceView.GetCurrentThreadId() is None:
utils.UserMessage( 'No current thread', persist = True )
return
self._connection.DoRequest( None, {
'command': 'pause',
'arguments': {
'threadId': self._stackTraceView.GetCurrentThreadId(),
},
} )
@IfConnected()
def PauseContinueThread( self ):
self._stackTraceView.PauseContinueThread()
@CurrentSession()
@IfConnected()
def SetCurrentThread( self ):
self._stackTraceView.SetCurrentThread()
@CurrentSession()
@IfConnected()
def ExpandVariable( self, buf = None, line_num = None ):
self._variablesView.ExpandVariable( buf, line_num )
@CurrentSession()
@IfConnected()
def SetVariableValue( self, new_value = None, buf = None, line_num = None ):
if not self._server_capabilities.get( 'supportsSetVariable' ):
return
self._variablesView.SetVariableValue( new_value, buf, line_num )
@ParentOnly()
def ReadMemory( self, length = None, offset = None ):
# We use the parent session because the actual connection is returned from
# the variables view (and might not be our self._connection) at least in
# theory.
if not self._server_capabilities.get( 'supportsReadMemoryRequest' ):
utils.UserMessage( "Server does not support memory request",
error = True )
return
connection: debug_adapter_connection.DebugAdapterConnection
connection, memoryReference = self._variablesView.GetMemoryReference()
if memoryReference is None or connection is None:
utils.UserMessage( "Cannot find memory reference for that",
error = True )
return
if length is None:
length = utils.AskForInput( 'How much data to display? ',
default_value = '1024' )
try:
length = int( length )
except ValueError:
return
if offset is None:
offset = utils.AskForInput( 'Location offset? ',
default_value = '0' )
try:
offset = int( offset )
except ValueError:
return
def handler( msg ):
self._codeView.ShowMemory( connection.GetSessionId(),
memoryReference,
length,
offset,
msg )
connection.DoRequest( handler, {
'command': 'readMemory',
'arguments': {
'memoryReference': memoryReference,
'count': int( length ),
'offset': int( offset )
}
} )
@CurrentSession()
@IfConnected()
@RequiresUI()
def ShowDisassembly( self ):
if self._disassemblyView and self._disassemblyView.WindowIsValid():
return
if not self._codeView or not self._codeView._window.valid:
return
if not self._stackTraceView:
return
if not self._server_capabilities.get( 'supportsDisassembleRequest', False ):
utils.UserMessage( "Sorry, server doesn't support that" )
return
with utils.LetCurrentWindow( self._codeView._window ):
vim.command( f'rightbelow { settings.Int( "disassembly_height" ) }new' )
self._disassemblyView = disassembly.DisassemblyView(
vim.current.window,
self._api_prefix,
self._render_emitter )
self._breakpoints.SetDisassemblyManager( self._disassemblyView )
utils.UpdateSessionWindows( {
'disassembly': utils.WindowID( vim.current.window, self._uiTab )
} )
self._disassemblyView.SetCurrentFrame(
self._connection,
self._stackTraceView.GetCurrentFrame(),
True )
def OnDisassemblyWindowScrolled( self, win_id ):
if self._disassemblyView:
self._disassemblyView.OnWindowScrolled( win_id )
@CurrentSession()
@IfConnected()
def AddWatch( self, expression ):
self._variablesView.AddWatch( self._connection,
self._stackTraceView.GetCurrentFrame(),
expression )
@CurrentSession()
@IfConnected()
def EvaluateConsole( self, expression, verbose ):
self._outputView.Evaluate( self._connection,
self._stackTraceView.GetCurrentFrame(),
expression,
verbose )
@CurrentSession()
@IfConnected()
def DeleteWatch( self ):
self._variablesView.DeleteWatch()
@CurrentSession()
@IfConnected()
def HoverEvalTooltip( self, winnr, bufnr, lnum, expression, is_hover ):
frame = self._stackTraceView.GetCurrentFrame()
# Check if RIP is in a frame
if frame is None:
self._logger.debug( 'Tooltip: Not in a stack frame' )
return ''
# Check if cursor in code window
if winnr == int( self._codeView._window.number ):
return self._variablesView.HoverEvalTooltip( self._connection,
frame,
expression,
is_hover )
return self._variablesView.HoverVarWinTooltip( bufnr,
lnum,
is_hover )
# Return variable aware function
@CurrentSession()
def CleanUpTooltip( self ):
return self._variablesView.CleanUpTooltip()
@IfConnected()
def ExpandFrameOrThread( self ):
self._stackTraceView.ExpandFrameOrThread()
@IfConnected()
def UpFrame( self ):
self._stackTraceView.UpFrame()
@IfConnected()
def DownFrame( self ):
self._stackTraceView.DownFrame()
def ToggleLog( self ):
if self.HasUI():
return self.ShowOutput( 'Vimspector' )
if self._logView and self._logView.WindowIsValid():
self._logView.Reset()
self._logView = None
return
if self._logView:
self._logView.Reset()
# TODO: The UI code is too scattered. Re-organise into a UI class that
# just deals with these things like window layout and custmisattion.
vim.command( f'botright { settings.Int( "bottombar_height" ) }new' )
win = vim.current.window
self._logView = output.OutputView( win, self._api_prefix )
self._logView.AddLogFileView()
self._logView.ShowOutput( 'Vimspector' )
@RequiresUI()
def ShowOutput( self, category ):
if not self._outputView.WindowIsValid():
# TODO: The UI code is too scattered. Re-organise into a UI class that
# just deals with these things like window layout and custmisattion.
# currently, this class and the CodeView share some responsibility for
# this and poking into each View class to check its window is valid also
# feels wrong.
with utils.LetCurrentTabpage( self._uiTab ):
vim.command( f'botright { settings.Int( "bottombar_height" ) }new' )
self._outputView.UseWindow( vim.current.window )
utils.UpdateSessionWindows( {
'output': utils.WindowID( vim.current.window, self._uiTab )
} )
self._outputView.ShowOutput( category )
@RequiresUI( otherwise=[] )
def GetOutputBuffers( self ):
return self._outputView.GetCategories()
@CurrentSession()
@IfConnected( otherwise=[] )
def GetCompletionsSync( self, text_line, column_in_bytes ):
if not self._server_capabilities.get( 'supportsCompletionsRequest' ):
return []
response = self._connection.DoRequestSync( {
'command': 'completions',
'arguments': {
'frameId': self._stackTraceView.GetCurrentFrame()[ 'id' ],
# TODO: encoding ? bytes/codepoints
'text': text_line,
'column': column_in_bytes
}
} )
# TODO:
# - start / length
# - sortText
return response[ 'body' ][ 'targets' ]
@CurrentSession()
@IfConnected( otherwise=[] )
def GetCommandLineCompletions( self, ArgLead, prev_non_keyword_char ):
items = []
for candidate in self.GetCompletionsSync( ArgLead, prev_non_keyword_char ):
label = candidate.get( 'text', candidate[ 'label' ] )
start = prev_non_keyword_char - 1
if 'start' in candidate and 'length' in candidate:
start = candidate[ 'start' ]
items.append( ArgLead[ 0 : start ] + label )
return items
@ParentOnly()
def RefreshSigns( self ):
if self._connection:
self._codeView.Refresh()
self._breakpoints.Refresh()
@ParentOnly()
def _SetUpUI( self ):
vim.command( '$tab split' )
# Switch to this session now that we've made it visible. Note that the
# TabEnter autocmd does trigger when the above is run, but that's before the
# following line assigns the tab to this session, so when we try to find
# this session by tab number, it's not found. So we have to manually switch
# to it when creating a new tab.
utils.Call( 'vimspector#internal#state#SwitchToSession',
self.session_id )
self._uiTab = vim.current.tabpage
mode = settings.Get( 'ui_mode' )
if mode == 'auto':
# Go vertical if there isn't enough horizontal space for at least:
# the left bar width
# + the code min width
# + the terminal min width
# + enough space for a sign column and number column?
min_width = ( settings.Int( 'sidebar_width' )
+ 1 + 2 + 3
+ settings.Int( 'code_minwidth' )
+ 1 + settings.Int( 'terminal_minwidth' ) )
min_height = ( settings.Int( 'code_minheight' ) + 1 +
settings.Int( 'topbar_height' ) + 1 +
settings.Int( 'bottombar_height' ) + 1 +
2 )
mode = ( 'vertical'
if vim.options[ 'columns' ] < min_width
else 'horizontal' )
if vim.options[ 'lines' ] < min_height:
mode = 'horizontal'
self._logger.debug( 'min_width/height: %s/%s, actual: %s/%s - result: %s',
min_width,
min_height,
vim.options[ 'columns' ],
vim.options[ 'lines' ],
mode )
if mode == 'vertical':
self._SetUpUIVertical()
else:
self._SetUpUIHorizontal()
def _SetUpUIHorizontal( self ):
# Code window
code_window = vim.current.window
self._codeView = code.CodeView( self.session_id,
code_window,
self._api_prefix,
self._render_emitter,
self._breakpoints.IsBreakpointPresentAt )
# Call stack
vim.command(
f'topleft vertical { settings.Int( "sidebar_width" ) }new' )
stack_trace_window = vim.current.window
one_third = int( vim.eval( 'winheight( 0 )' ) ) / 3
self._stackTraceView = stack_trace.StackTraceView( self.session_id,
stack_trace_window )
# Watches
vim.command( 'leftabove new' )
watch_window = vim.current.window
# Variables
vim.command( 'leftabove new' )
vars_window = vim.current.window
with utils.LetCurrentWindow( vars_window ):
vim.command( f'{ one_third }wincmd _' )
with utils.LetCurrentWindow( watch_window ):
vim.command( f'{ one_third }wincmd _' )
with utils.LetCurrentWindow( stack_trace_window ):
vim.command( f'{ one_third }wincmd _' )
self._variablesView = variables.VariablesView( self.session_id,
vars_window,
watch_window )
# Output/logging
vim.current.window = code_window
vim.command( f'rightbelow { settings.Int( "bottombar_height" ) }new' )
output_window = vim.current.window
self._outputView = output.DAPOutputView( output_window,
self._api_prefix,
session_id = self.session_id )
utils.SetSessionWindows( {
'mode': 'horizontal',
'tabpage': self._uiTab.number,
'code': utils.WindowID( code_window, self._uiTab ),
'stack_trace': utils.WindowID( stack_trace_window, self._uiTab ),
'variables': utils.WindowID( vars_window, self._uiTab ),
'watches': utils.WindowID( watch_window, self._uiTab ),
'output': utils.WindowID( output_window, self._uiTab ),
'eval': None, # updated every time eval popup is opened
'breakpoints': vim.vars[ 'vimspector_session_windows' ].get(
'breakpoints' ) # same as above, but for breakpoints
} )
with utils.RestoreCursorPosition():
with utils.RestoreCurrentWindow():
with utils.RestoreCurrentBuffer( vim.current.window ):
vim.command( 'doautocmd User VimspectorUICreated' )
def _SetUpUIVertical( self ):
# Code window
code_window = vim.current.window
self._codeView = code.CodeView( self.session_id,
code_window,
self._api_prefix,
self._render_emitter,
self._breakpoints.IsBreakpointPresentAt )
# Call stack
vim.command(
f'topleft { settings.Int( "topbar_height" ) }new' )
stack_trace_window = vim.current.window
one_third = int( vim.eval( 'winwidth( 0 )' ) ) / 3
self._stackTraceView = stack_trace.StackTraceView( self.session_id,
stack_trace_window )
# Watches
vim.command( 'leftabove vertical new' )
watch_window = vim.current.window
# Variables
vim.command( 'leftabove vertical new' )
vars_window = vim.current.window
with utils.LetCurrentWindow( vars_window ):
vim.command( f'{ one_third }wincmd |' )
with utils.LetCurrentWindow( watch_window ):
vim.command( f'{ one_third }wincmd |' )
with utils.LetCurrentWindow( stack_trace_window ):
vim.command( f'{ one_third }wincmd |' )
self._variablesView = variables.VariablesView( self.session_id,
vars_window,
watch_window )
# Output/logging
vim.current.window = code_window
vim.command( f'rightbelow { settings.Int( "bottombar_height" ) }new' )
output_window = vim.current.window
self._outputView = output.DAPOutputView( output_window,
self._api_prefix,
session_id = self.session_id )
utils.SetSessionWindows( {
'mode': 'vertical',
'tabpage': self._uiTab.number,
'code': utils.WindowID( code_window, self._uiTab ),
'stack_trace': utils.WindowID( stack_trace_window, self._uiTab ),
'variables': utils.WindowID( vars_window, self._uiTab ),
'watches': utils.WindowID( watch_window, self._uiTab ),
'output': utils.WindowID( output_window, self._uiTab ),
'eval': None, # updated every time eval popup is opened
'breakpoints': vim.vars[ 'vimspector_session_windows' ].get(
'breakpoints' ) # same as above, but for breakpoints
} )
with utils.RestoreCursorPosition():
with utils.RestoreCurrentWindow():
with utils.RestoreCurrentBuffer( vim.current.window ):
vim.command( 'doautocmd User VimspectorUICreated' )
@RequiresUI()
def ClearCurrentFrame( self ):
self.SetCurrentFrame( None )
def ClearCurrentPC( self ):
self._codeView.SetCurrentFrame( None, False )
if self._disassemblyView:
self._disassemblyView.SetCurrentFrame( None, None, False )
@RequiresUI()
def SetCurrentFrame( self, frame, reason = '' ):
if not frame:
self._variablesView.Clear()
target = self._codeView
if self._disassemblyView and self._disassemblyView.IsCurrent():
target = self._disassemblyView
if not self._codeView.SetCurrentFrame( frame,
target == self._codeView ):
return False
if self._disassemblyView:
self._disassemblyView.SetCurrentFrame( self._connection,
frame,
target == self._disassemblyView )
# the codeView.SetCurrentFrame already checked the frame was valid and
# countained a valid source
assert frame
if self._codeView.current_syntax not in ( 'ON', 'OFF' ):
self._variablesView.SetSyntax( self._codeView.current_syntax )
self._stackTraceView.SetSyntax( self._codeView.current_syntax )
else:
self._variablesView.SetSyntax( None )
self._stackTraceView.SetSyntax( None )
self._variablesView.LoadScopes( self._connection, frame )
self._variablesView.EvaluateWatches( self._connection, frame )
if reason == 'stopped':
self._breakpoints.ClearTemporaryBreakpoint( frame[ 'source' ][ 'path' ],
frame[ 'line' ] )
return True
def _StartDebugAdapter( self ):
self._splash_screen = utils.DisplaySplash(
self._api_prefix,
self._splash_screen,
f"Starting debug adapter for session {self.DisplayName()}..." )
if self._connection:
utils.UserMessage( 'The connection is already created. Please try again',
persist = True )
return False
self._logger.info( 'Starting debug adapter with: %s',
json.dumps( self._adapter ) )
self._init_complete = False
self._launch_complete = False
self._run_on_server_exit = None
self._connection_type = 'job'
if 'port' in self._adapter:
self._connection_type = 'channel'
if self._adapter[ 'port' ] == 'ask':
port = utils.AskForInput( 'Enter port to connect to: ' )
if port is None:
self._Reset()
return False
self._adapter[ 'port' ] = port
self._connection_type = self._api_prefix + self._connection_type
self._logger.debug( f"Connection Type: { self._connection_type }" )
self._adapter[ 'env' ] = self._adapter.get( 'env', {} )
if 'cwd' in self._configuration:
self._adapter[ 'cwd' ] = self._configuration[ 'cwd' ]
elif 'cwd' not in self._adapter:
self._adapter[ 'cwd' ] = os.getcwd()
vim.vars[ '_vimspector_adapter_spec' ] = self._adapter
# if the debug adapter is lame and requires a terminal or has any
# input/output on stdio, then launch it that way
if self._adapter.get( 'tty', False ):
if 'port' not in self._adapter:
utils.UserMessage( "Invalid adapter configuration. When using a tty, "
"communication must use socket. Add the 'port' to "
"the adapter config." )
return False
if 'command' not in self._adapter:
utils.UserMessage( "Invalid adapter configuration. When using a tty, "
"a command must be supplied. Add the 'command' to "
"the adapter config." )
return False
command = self._adapter[ 'command' ]
if isinstance( command, str ):
command = shlex.split( command )
self._adapter_term = terminal.LaunchTerminal(
self._api_prefix,
{
'args': command,
'cwd': self._adapter[ 'cwd' ],
'env': self._adapter[ 'env' ],
},
self._codeView._window,
self._adapter_term )
if not vim.eval( "vimspector#internal#{}#StartDebugSession( "
" {},"
" g:_vimspector_adapter_spec "
")".format( self._connection_type,
self.session_id ) ):
self._logger.error( "Unable to start debug server" )
self._splash_screen = utils.DisplaySplash(
self._api_prefix,
self._splash_screen,
[
"Unable to start or connect to debug adapter",
"",
"Check :messages and :VimspectorToggleLog for more information.",
"",
":VimspectorReset to close down vimspector",
] )
return False
else:
handlers = [ self ]
if 'custom_handler' in self._adapter:
spec = self._adapter[ 'custom_handler' ]
if isinstance( spec, dict ):
module = spec[ 'module' ]
cls = spec[ 'class' ]
else:
module, cls = spec.rsplit( '.', 1 )
try:
CustomHandler = getattr( importlib.import_module( module ), cls )
handlers = [ CustomHandler( self ), self ]
except ImportError:
self._logger.exception( "Unable to load custom adapter %s",
spec )
self._connection = debug_adapter_connection.DebugAdapterConnection(
handlers = handlers,
session_id = self.session_id,
send_func = lambda msg: utils.Call(
"vimspector#internal#{}#Send".format( self._connection_type ),
self.session_id,
msg ),
sync_timeout = self._adapter.get( 'sync_timeout' ),
async_timeout = self._adapter.get( 'async_timeout' ) )
self._logger.info( 'Debug Adapter Started' )
return True
def _StopDebugAdapter( self, interactive = False, callback = None ):
arguments = {}
def disconnect():
self._splash_screen = utils.DisplaySplash(
self._api_prefix,
self._splash_screen,
f"Shutting down debug adapter for session {self.DisplayName()}..." )
def handler( *args ):
self._splash_screen = utils.HideSplash( self._api_prefix,
self._splash_screen )
if callback:
self._logger.debug( "Setting server exit handler before disconnect" )
assert not self._run_on_server_exit
self._run_on_server_exit = callback
vim.eval( 'vimspector#internal#{}#StopDebugSession( {} )'.format(
self._connection_type,
self.session_id ) )
self._connection.DoRequest(
handler,
{
'command': 'disconnect',
'arguments': arguments,
},
failure_handler = handler,
timeout = self._connection.sync_timeout )
if not interactive:
disconnect()
elif not self._server_capabilities.get( 'supportTerminateDebuggee' ):
disconnect()
elif not self._stackTraceView.AnyThreadsRunning():
disconnect()
else:
def handle_choice( choice ):
if choice == 1:
# yes
arguments[ 'terminateDebuggee' ] = True
elif choice == 2:
# no
arguments[ 'terminateDebuggee' ] = False
elif choice <= 0:
# Abort
return
# Else, use server default
disconnect()
utils.Confirm( self._api_prefix,
"Terminate debuggee?",
handle_choice,
default_value = 3,
options = [ '(Y)es', '(N)o', '(D)efault' ],
keys = [ 'y', 'n', 'd' ] )
def _PrepareAttach( self, adapter_config, launch_config ):
attach_config = adapter_config.get( 'attach' )
if not attach_config:
return
if 'remote' in attach_config:
# FIXME: We almost want this to feed-back variables to be expanded later,
# e.g. expand variables when we use them, not all at once. This would
# remove the whole %PID% hack.
remote = attach_config[ 'remote' ]
remote_exec_cmd = self._GetRemoteExecCommand( remote )
# FIXME: Why does this not use self._GetCommands ?
pid_cmd = remote_exec_cmd + remote[ 'pidCommand' ]
self._logger.debug( 'Getting PID: %s', pid_cmd )
pid = subprocess.check_output( pid_cmd ).decode( 'utf-8' ).strip()
self._logger.debug( 'Got PID: %s', pid )
if not pid:
# FIXME: We should raise an exception here or something
utils.UserMessage( 'Unable to get PID', persist = True )
return
if 'initCompleteCommand' in remote:
initcmd = remote_exec_cmd + remote[ 'initCompleteCommand' ][ : ]
for index, item in enumerate( initcmd ):
initcmd[ index ] = item.replace( '%PID%', pid )
self._on_init_complete_handlers.append(
lambda: subprocess.check_call( initcmd ) )
commands = self._GetCommands( remote, 'attach' )
for command in commands:
cmd = remote_exec_cmd + command
for index, item in enumerate( cmd ):
cmd[ index ] = item.replace( '%PID%', pid )
self._logger.debug( 'Running remote app: %s', cmd )
self._remote_term = terminal.LaunchTerminal(
self._api_prefix,
{
'args': cmd,
'cwd': os.getcwd()
},
self._codeView._window,
self._remote_term )
else:
if attach_config[ 'pidSelect' ] == 'ask':
prop = attach_config[ 'pidProperty' ]
if prop not in launch_config:
# NOTE: We require that any custom picker process handles no-arguments
# as well as any arguments supplied in the config.
pid = _SelectProcess()
if pid is None:
return
launch_config[ prop ] = pid
return
elif attach_config[ 'pidSelect' ] == 'none':
return
raise ValueError( 'Unrecognised pidSelect {0}'.format(
attach_config[ 'pidSelect' ] ) )
if 'delay' in attach_config:
utils.UserMessage( f"Waiting ( { attach_config[ 'delay' ] } )..." )
vim.command( f'sleep { attach_config[ "delay" ] }' )
def _PrepareLaunch( self, command_line, adapter_config, launch_config ):
run_config = adapter_config.get( 'launch', {} )
if 'remote' in run_config:
remote = run_config[ 'remote' ]
remote_exec_cmd = self._GetRemoteExecCommand( remote )
commands = self._GetCommands( remote, 'run' )
for index, command in enumerate( commands ):
cmd = remote_exec_cmd + command[ : ]
full_cmd = []
for item in cmd:
if isinstance( command_line, list ):
if item == '%CMD%':
full_cmd.extend( command_line )
else:
full_cmd.append( item )
else:
full_cmd.append( item.replace( '%CMD%', command_line ) )
self._logger.debug( 'Running remote app: %s', full_cmd )
self._remote_term = terminal.LaunchTerminal(
self._api_prefix,
{
'args': full_cmd,
'cwd': os.getcwd()
},
self._codeView._window,
self._remote_term )
if 'delay' in run_config:
utils.UserMessage( f"Waiting ( {run_config[ 'delay' ]} )..." )
vim.command( f'sleep { run_config[ "delay" ] }' )
def _GetSSHCommand( self, remote ):
ssh = [ 'ssh' ] + remote.get( 'ssh', {} ).get( 'args', [] )
if 'account' in remote:
ssh.append( remote[ 'account' ] + '@' + remote[ 'host' ] )
else:
ssh.append( remote[ 'host' ] )
return ssh
def _GetShellCommand( self ):
return []
def _GetDockerCommand( self, remote ):
docker = [ 'docker', 'exec', '-t' ]
docker.append( remote[ 'container' ] )
return docker
def _GetRemoteExecCommand( self, remote ):
is_ssh_cmd = any( key in remote for key in [ 'ssh',
'host',
'account', ] )
is_docker_cmd = 'container' in remote
if is_ssh_cmd:
return self._GetSSHCommand( remote )
elif is_docker_cmd:
return self._GetDockerCommand( remote )
else:
# if it's neither docker nor ssh, run locally
return self._GetShellCommand()
def _GetCommands( self, remote, pfx ):
commands = remote.get( pfx + 'Commands', None )
if isinstance( commands, list ):
return commands
elif commands is not None:
raise ValueError( "Invalid commands; must be list" )
command = remote[ pfx + 'Command' ]
if isinstance( command, str ):
command = shlex.split( command )
if not isinstance( command, list ):
raise ValueError( "Invalid command; must be list/string" )
if not command:
raise ValueError( 'Could not determine commands for ' + pfx )
return [ command ]
def _Initialise( self ):
self._splash_screen = utils.DisplaySplash(
self._api_prefix,
self._splash_screen,
f"Initializing debug session {self.DisplayName()}..." )
# For a good explanation as to why this sequence is the way it is, see
# https://github.com/microsoft/vscode/issues/4902#issuecomment-368583522
#
# In short, we do what VSCode does:
# 1. Send the initialize request and wait for the reply
# 2a. When we receive the initialize reply, send the launch/attach request
# 2b. When we receive the initialized notification, send the breakpoints
# - if supportsConfigurationDoneRequest, send it
# - else, send the empty exception breakpoints request
# 3. When we have received both the receive the launch/attach reply *and*
# the connfiguration done reply (or, if we didn't send one, a response to
# the empty exception breakpoints request), we request threads
# 4. The threads response triggers things like scopes and triggers setting
# the current frame.
#
def handle_initialize_response( msg ):
self._server_capabilities = msg.get( 'body' ) or {}
# TODO/FIXME: We assume that the capabilities are the same for all
# connections. We should fix this when we split the server bp
# representation out?
if not self.parent_session:
self._breakpoints.SetServerCapabilities( self._server_capabilities )
self._Launch()
self._connection.DoRequest( handle_initialize_response, {
'command': 'initialize',
'arguments': {
'adapterID': self._adapter.get( 'name', 'adapter' ),
'clientID': 'vimspector',
'clientName': 'vimspector',
'linesStartAt1': True,
'columnsStartAt1': True,
'locale': 'en_GB',
'pathFormat': 'path',
'supportsVariableType': True,
'supportsVariablePaging': False,
'supportsRunInTerminalRequest': True,
'supportsMemoryReferences': True,
'supportsStartDebuggingRequest': True
},
} )
def OnFailure( self, reason, request, message ):
msg = "Request for '{}' failed: {}\nResponse: {}".format( request,
reason,
message )
self._outputView.Print( 'server', msg )
def _Prepare( self ):
self._on_init_complete_handlers = []
self._logger.debug( "LAUNCH!" )
if self._launch_config is None:
self._launch_config = {}
# TODO: Should we use core_utils.override for this? That would strictly be
# a change in behaviour as dicts in the specific configuration would merge
# with dicts in the adapter, where before they would overlay
self._launch_config.update( self._adapter.get( 'configuration', {} ) )
self._launch_config.update( self._configuration[ 'configuration' ] )
request = self._configuration.get(
'remote-request',
self._launch_config.get( 'request', 'launch' ) )
if request == "attach":
self._splash_screen = utils.DisplaySplash(
self._api_prefix,
self._splash_screen,
f"Attaching to debuggee {self.DisplayName()}..." )
self._PrepareAttach( self._adapter, self._launch_config )
elif request == "launch":
self._splash_screen = utils.DisplaySplash(
self._api_prefix,
self._splash_screen,
f"Launching debuggee {self.DisplayName()}..." )
# FIXME: This cmdLine hack is not fun.
self._PrepareLaunch( self._configuration.get( 'remote-cmdLine', [] ),
self._adapter,
self._launch_config )
# FIXME: name is mandatory. Forcefully add it (we should really use the
# _actual_ name, but that isn't actually remembered at this point)
if 'name' not in self._launch_config:
self._launch_config[ 'name' ] = 'test'
def _Launch( self ):
def failure_handler( reason, msg ):
text = [
f'Initialize for session {self.DisplayName()} Failed',
'' ] + reason.splitlines() + [
'', 'Use :VimspectorReset to close' ]
self._logger.info( "Launch failed: %s", '\n'.join( text ) )
self._splash_screen = utils.DisplaySplash( self._api_prefix,
self._splash_screen,
text )
self._connection.DoRequest(
lambda msg: self._OnLaunchComplete(),
{
'command': self._launch_config[ 'request' ],
'arguments': self._launch_config
},
failure_handler )
def _OnLaunchComplete( self ):
self._launch_complete = True
self._LoadThreadsIfReady()
def _OnInitializeComplete( self ):
self._init_complete = True
self._LoadThreadsIfReady()
def _LoadThreadsIfReady( self ):
# NOTE: You might think we should only load threads on a stopped event,
# but the spec is clear:
#
# After a successful launch or attach the development tool requests the
# baseline of currently existing threads with the threads request and
# then starts to listen for thread events to detect new or terminated
# threads.
#
# Of course, specs are basically guidelines. MS's own cpptools simply
# doesn't respond top threads request when attaching via gdbserver. At
# least it would appear that way.
#
# As it turns out this is due to a bug in gdbserver which means that
# attachment doesn't work due to sending the signal to the process group
# leader rather than the process. The workaround is to manually SIGTRAP the
# PID.
#
if self._launch_complete and self._init_complete:
self._splash_screen = utils.HideSplash( self._api_prefix,
self._splash_screen )
for h in self._on_init_complete_handlers:
h()
self._on_init_complete_handlers = []
self._stackTraceView.LoadThreads( self, True )
@CurrentSession()
@IfConnected()
@RequiresUI()
def PrintDebugInfo( self ):
def Line():
return ( "--------------------------------------------------------------"
"------------------" )
def Pretty( obj ):
if obj is None:
return [ "None" ]
return [ Line() ] + json.dumps( obj, indent=2 ).splitlines() + [ Line() ]
debugInfo = [
"Vimspector Debug Info",
Line(),
f"ConnectionType: { self._connection_type }",
"Adapter: " ] + Pretty( self._adapter ) + [
"Configuration: " ] + Pretty( self._configuration ) + [
f"API Prefix: { self._api_prefix }",
f"Launch/Init: { self._launch_complete } / { self._init_complete }",
f"Workspace Root: { self._workspace_root }",
"Launch Config: " ] + Pretty( self._launch_config ) + [
"Server Capabilities: " ] + Pretty( self._server_capabilities ) + [
"Line Breakpoints: " ] + Pretty( self._breakpoints._line_breakpoints ) + [
"Func Breakpoints: " ] + Pretty( self._breakpoints._func_breakpoints ) + [
"Ex Breakpoints: " ] + Pretty( self._breakpoints._exception_breakpoints )
self._outputView.ClearCategory( 'DebugInfo' )
self._outputView.Print( "DebugInfo", debugInfo )
self.ShowOutput( "DebugInfo" )
def OnEvent_loadedSource( self, msg ):
pass
def OnEvent_capabilities( self, msg ):
self._server_capabilities.update(
( msg.get( 'body' ) or {} ).get( 'capabilities' ) or {} )
def OnEvent_initialized( self, message ):
def OnBreakpointsDone():
self._breakpoints.Refresh()
if self._server_capabilities.get( 'supportsConfigurationDoneRequest' ):
self._connection.DoRequest(
lambda msg: self._OnInitializeComplete(),
{
'command': 'configurationDone',
}
)
else:
self._OnInitializeComplete()
self._breakpoints.SetConfiguredBreakpoints(
self._configuration.get( 'breakpoints', {} ) )
self._breakpoints.AddConnection( self._connection )
self._breakpoints.UpdateUI( OnBreakpointsDone )
def OnEvent_thread( self, message ):
self._stackTraceView.OnThreadEvent( self, message[ 'body' ] )
def OnEvent_breakpoint( self, message ):
reason = message[ 'body' ][ 'reason' ]
bp = message[ 'body' ][ 'breakpoint' ]
if reason == 'changed':
self._breakpoints.UpdatePostedBreakpoint( self._connection, bp )
elif reason == 'new':
self._breakpoints.AddPostedBreakpoint( self._connection, bp )
elif reason == 'removed':
self._breakpoints.DeletePostedBreakpoint( self._connection, bp )
else:
utils.UserMessage(
'Unrecognised breakpoint event (undocumented): {0}'.format( reason ),
persist = True )
def OnRequest_runInTerminal( self, message ):
params = message[ 'arguments' ]
if not params.get( 'cwd' ) :
params[ 'cwd' ] = self._workspace_root
self._logger.debug( 'Defaulting working directory to %s',
params[ 'cwd' ] )
term_id = self._codeView.LaunchTerminal( params )
response = {
'processId': int( utils.Call(
'vimspector#internal#{}term#GetPID'.format( self._api_prefix ),
term_id ) )
}
self._connection.DoResponse( message, None, response )
def OnEvent_terminated( self, message ):
# The debugging _session_ has terminated. This does not mean that the
# debuggee has terminated (that's the exited event).
#
# We will handle this when the server actually exists.
#
# FIXME we should always wait for this event before disconnecting closing
# any socket connection
# self._stackTraceView.OnTerminated( self )
self.SetCurrentFrame( None )
def OnEvent_exited( self, message ):
utils.UserMessage( 'The debuggee exited with status code: {}'.format(
message[ 'body' ][ 'exitCode' ] ) )
self._stackTraceView.OnExited( self, message )
self.ClearCurrentPC()
def OnRequest_startDebugging( self, message ):
self._DoStartDebuggingRequest( message,
message[ 'arguments' ][ 'request' ],
message[ 'arguments' ][ 'configuration' ],
self._adapter )
def _DoStartDebuggingRequest( self,
message,
request_type,
launch_arguments,
adapter,
session_name = None ):
session = self.manager.NewSession(
session_name = session_name or launch_arguments.get( 'name' ),
parent_session = self )
# Inject the launch config (HACK!). This will actually mean that the
# configuration passed below is ignored.
session._launch_config = launch_arguments
session._launch_config[ 'request' ] = request_type
# FIXME: We probably do need to add a StartWithLauncArguments and somehow
# tell the new session that it shoud not support "Restart" requests ?
#
# In fact, what even would Reset do... ?
session._StartWithConfiguration( { 'configuration': launch_arguments },
adapter )
self._connection.DoResponse( message, None, {} )
def OnEvent_process( self, message ):
utils.UserMessage( 'debuggee was started: {}'.format(
message[ 'body' ][ 'name' ] ) )
def OnEvent_module( self, message ):
pass
def OnEvent_continued( self, message ):
self._stackTraceView.OnContinued( self, message[ 'body' ] )
self.ClearCurrentPC()
@ParentOnly()
def Clear( self ):
self._codeView.Clear()
if self._disassemblyView:
self._disassemblyView.Clear()
self._stackTraceView.Clear()
self._variablesView.Clear()
def OnServerExit( self, status ):
self._logger.info( "The server has terminated with status %s",
status )
if self._connection is not None:
# Can be None if the server dies _before_ StartDebugSession vim function
# returns
self._connection.Reset()
self._stackTraceView.ConnectionClosed( self )
self._breakpoints.ConnectionClosed( self._connection )
self._variablesView.ConnectionClosed( self._connection )
if self._disassemblyView:
self._disassemblyView.ConnectionClosed( self._connection )
self.Clear()
self._ResetServerState()
if self._run_on_server_exit:
self._logger.debug( "Running server exit handler" )
callback = self._run_on_server_exit
self._run_on_server_exit = None
callback()
else:
self._logger.debug( "No server exit handler" )
def OnEvent_output( self, message ):
if self._outputView:
self._outputView.OnOutput( message[ 'body' ] )
def OnEvent_stopped( self, message ):
event = message[ 'body' ]
reason = event.get( 'reason' ) or '<protocol error>'
description = event.get( 'description' )
text = event.get( 'text' )
if description:
explanation = description + '(' + reason + ')'
else:
explanation = reason
if text:
explanation += ': ' + text
msg = 'Paused in thread {0} due to {1}'.format(
event.get( 'threadId', '<unknown>' ),
explanation )
utils.UserMessage( msg )
if self._outputView:
self._outputView.Print( 'server', msg )
self._stackTraceView.OnStopped( self, event )
def BreakpointsAsQuickFix( self ):
return self._breakpoints.BreakpointsAsQuickFix()
def ListBreakpoints( self ):
self._breakpoints.ToggleBreakpointsView()
def ToggleBreakpointViewBreakpoint( self ):
self._breakpoints.ToggleBreakpointViewBreakpoint()
def ToggleAllBreakpointsViewBreakpoint( self ):
self._breakpoints.ToggleAllBreakpointsViewBreakpoint()
def DeleteBreakpointViewBreakpoint( self ):
self._breakpoints.ClearBreakpointViewBreakpoint()
def JumpToBreakpointViewBreakpoint( self ):
self._breakpoints.JumpToBreakpointViewBreakpoint()
def EditBreakpointOptionsViewBreakpoint( self ):
self._breakpoints.EditBreakpointOptionsViewBreakpoint()
def JumpToNextBreakpoint( self ):
self._breakpoints.JumpToNextBreakpoint()
def JumpToPreviousBreakpoint( self ):
self._breakpoints.JumpToPreviousBreakpoint()
def JumpToProgramCounter( self ):
self._stackTraceView.JumpToProgramCounter()
def ToggleBreakpoint( self, options ):
return self._breakpoints.ToggleBreakpoint( options )
def RunTo( self, file_name, line ):
self._breakpoints.ClearTemporaryBreakpoints()
self._breakpoints.AddTemporaryLineBreakpoint( file_name,
line,
{ 'temporary': True },
lambda: self.Continue() )
@CurrentSession()
@IfConnected()
def GoTo( self, file_name, line ):
def failure_handler( reason, *args ):
utils.UserMessage( f"Can't jump to location: {reason}", error=True )
def handle_targets( msg ):
targets = msg.get( 'body', {} ).get( 'targets', [] )
if not targets:
failure_handler( "No targets" )
return
if len( targets ) == 1:
target_selected = 0
else:
target_selected = utils.SelectFromList( "Which target?", [
t[ 'label' ] for t in targets
], ret = 'index' )
if target_selected is None:
return
self._connection.DoRequest( None, {
'command': 'goto',
'arguments': {
'threadId': self._stackTraceView.GetCurrentThreadId(),
'targetId': targets[ target_selected ][ 'id' ]
},
}, failure_handler )
if not self._server_capabilities.get( 'supportsGotoTargetsRequest', False ):
failure_handler( "Server doesn't support it" )
return
self._connection.DoRequest( handle_targets, {
'command': 'gotoTargets',
'arguments': {
'source': {
'path': utils.NormalizePath( file_name )
},
'line': line
},
}, failure_handler )
def SetLineBreakpoint( self, file_name, line_num, options, then = None ):
return self._breakpoints.SetLineBreakpoint( file_name,
line_num,
options,
then )
def ClearLineBreakpoint( self, file_name, line_num ):
return self._breakpoints.ClearLineBreakpoint( file_name, line_num )
def ClearBreakpoints( self ):
return self._breakpoints.ClearBreakpoints()
def ResetExceptionBreakpoints( self ):
return self._breakpoints.ResetExceptionBreakpoints()
def AddFunctionBreakpoint( self, function, options ):
return self._breakpoints.AddFunctionBreakpoint( function, options )
def PathsToAllGadgetConfigs( vimspector_base, current_file ):
yield install.GetGadgetConfigFile( vimspector_base )
for p in sorted( glob.glob(
os.path.join( install.GetGadgetConfigDir( vimspector_base ),
'*.json' ) ) ):
yield p
yield utils.PathToConfigFile( '.gadgets.json',
os.path.dirname( current_file ) )
def PathsToAllConfigFiles( vimspector_base, current_file, filetypes ):
for ft in filetypes + [ '_all' ]:
for p in sorted( glob.glob(
os.path.join( install.GetConfigDirForFiletype( vimspector_base, ft ),
'*.json' ) ) ):
yield p
for ft in filetypes:
yield utils.PathToConfigFile( f'.vimspector.{ft}.json',
os.path.dirname( current_file ) )
yield utils.PathToConfigFile( '.vimspector.json',
os.path.dirname( current_file ) )
def _SelectProcess( *args ):
value = 0
custom_picker = settings.Get( 'custom_process_picker_func' )
if custom_picker:
try:
value = utils.Call( custom_picker, *args )
except vim.error:
pass
else:
# Use the built-in one
vimspector_process_list: str = None
try:
try:
vimspector_process_list = installer.FindExecutable(
'vimspector_process_list' )
except installer.MissingExecutable:
vimspector_process_list = installer.FindExecutable(
'vimspector_process_list',
[ os.path.join( install.GetSupportDir(),
'vimspector_process_list' ) ] )
except installer.MissingExecutable:
pass
default_pid = None
if vimspector_process_list:
output = subprocess.check_output(
( vimspector_process_list, ) + args ).decode( 'utf-8' )
# if there's only one entry, use it as the default value for input.
lines = output.splitlines()
if len( lines ) == 2:
default_pid = lines[ -1 ].split()[ 0 ]
utils.UserMessage( lines )
value = utils.AskForInput( 'Enter Process ID: ',
default_value = default_pid )
if value:
try:
return int( value )
except ValueError:
return 0
return 0
|
85601fe2a8e5b1ecfd985b4f911d9cb04f1bf4d2
|
b7314f9480634b2f2998c8181d4284d2b52ebba1
|
/src/python/txtai/api/cluster.py
|
e3f3c14a8afd5eeef8a239ac0ce638b6d143520d
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
neuml/txtai
|
3ca6fba11126d650ea4f2cf5199011a52ea56e4e
|
789a4555cb60ee9cdfa69afae5a5236d197e2b07
|
refs/heads/master
| 2023-08-31T08:09:31.834178
| 2023-08-29T15:36:23
| 2023-08-29T15:36:23
| 286,301,447
| 4,804
| 387
|
Apache-2.0
| 2023-09-11T17:12:40
| 2020-08-09T19:14:59
|
Python
|
UTF-8
|
Python
| false
| false
| 6,714
|
py
|
cluster.py
|
"""
Cluster module
"""
import asyncio
import urllib.parse
import zlib
import aiohttp
from ..database.sql import Aggregate
class Cluster:
"""
Aggregates multiple embeddings shards into a single logical embeddings instance.
"""
# pylint: disable = W0231
def __init__(self, config=None):
"""
Creates a new Cluster.
Args:
config: cluster configuration
"""
# Configuration
self.config = config
# Embeddings shard urls
self.shards = None
if "shards" in self.config:
self.shards = self.config["shards"]
# Query aggregator
self.aggregate = Aggregate()
def search(self, query, limit=None):
"""
Finds documents in the embeddings cluster most similar to the input query. Returns
a list of {id: value, score: value} sorted by highest score, where id is the
document id in the embeddings model.
Args:
query: query text
limit: maximum results
Returns:
list of {id: value, score: value}
"""
# Build URL
action = f"search?query={urllib.parse.quote_plus(query)}"
if limit:
action += f"&limit={limit}"
# Run query and flatten results into single results list
results = []
for result in self.execute("get", action):
results.extend(result)
# Combine aggregate functions and sort
results = self.aggregate(query, results)
# Limit results
return results[: (limit if limit else 10)]
def batchsearch(self, queries, limit=None):
"""
Finds documents in the embeddings cluster most similar to the input queries. Returns
a list of {id: value, score: value} sorted by highest score per query, where id is
the document id in the embeddings model.
Args:
queries: queries text
limit: maximum results
Returns:
list of {id: value, score: value} per query
"""
# POST parameters
params = {"queries": queries}
if limit:
params["limit"] = limit
# Run query
batch = self.execute("post", "batchsearch", [params] * len(self.shards))
# Combine results per query
results = []
for x, query in enumerate(queries):
result = []
for section in batch:
result.extend(section[x])
# Aggregate, sort and limit results
results.append(self.aggregate(query, result)[: (limit if limit else 10)])
return results
def add(self, documents):
"""
Adds a batch of documents for indexing.
Args:
documents: list of {id: value, text: value}
"""
self.execute("post", "add", self.shard(documents))
def index(self):
"""
Builds an embeddings index for previously batched documents.
"""
self.execute("get", "index")
def upsert(self):
"""
Runs an embeddings upsert operation for previously batched documents.
"""
self.execute("get", "upsert")
def delete(self, ids):
"""
Deletes from an embeddings cluster. Returns list of ids deleted.
Args:
ids: list of ids to delete
Returns:
ids deleted
"""
return [uid for ids in self.execute("post", "delete", self.shard(ids)) for uid in ids]
def count(self):
"""
Total number of elements in this embeddings cluster.
Returns:
number of elements in embeddings cluster
"""
return sum(self.execute("get", "count"))
def shard(self, documents):
"""
Splits documents into equal sized shards.
Args:
documents: input documents
Returns:
list of evenly sized shards with the last shard having the remaining elements
"""
shards = [[] for _ in range(len(self.shards))]
for document in documents:
uid = document["id"] if isinstance(document, dict) else document
if isinstance(uid, str):
# Quick int hash of string to help derive shard id
uid = zlib.adler32(uid.encode("utf-8"))
shards[uid % len(self.shards)].append(document)
return shards
def execute(self, method, action, data=None):
"""
Executes a HTTP action asynchronously.
Args:
method: get or post
action: url action to perform
data: post parameters
Returns:
json results if any
"""
# Get urls
urls = [f"{shard}/{action}" for shard in self.shards]
close = False
# Use existing loop if available, otherwise create one
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
close = True
try:
return loop.run_until_complete(self.run(urls, method, data))
finally:
# Close loop if it was created in this method
if close:
loop.close()
async def run(self, urls, method, data):
"""
Runs an async action.
Args:
urls: run against this list of urls
method: get or post
data: list of data for each url or None
Returns:
json results if any
"""
async with aiohttp.ClientSession(raise_for_status=True) as session:
tasks = []
for x, url in enumerate(urls):
if method == "post":
if not data or data[x]:
tasks.append(asyncio.ensure_future(self.post(session, url, data[x] if data else None)))
else:
tasks.append(asyncio.ensure_future(self.get(session, url)))
return await asyncio.gather(*tasks)
async def get(self, session, url):
"""
Runs an async HTTP GET request.
Args:
session: ClientSession
url: url
Returns:
json results if any
"""
async with session.get(url) as resp:
return await resp.json()
async def post(self, session, url, data):
"""
Runs an async HTTP POST request.
Args:
session: ClientSession
url: url
data: data to POST
Returns:
json results if any
"""
async with session.post(url, json=data) as resp:
return await resp.json()
|
c66270aa384967da017526a79cc76f1f42e67a65
|
3c6b36eb1f4f9760c52903f6d0ec4a501f948c90
|
/osp/test/common/config/test_is_test.py
|
3bf3152f1150e0b6b11a1c5fa6895ea9700b541b
|
[
"Apache-2.0"
] |
permissive
|
davidmcclure/open-syllabus-project
|
38444249af845013e3f281a7a713dca83159c56e
|
078cfd4c5a257fbfb0901d43bfbc6350824eed4e
|
refs/heads/master
| 2021-06-30T21:47:07.636558
| 2021-06-27T15:15:35
| 2021-06-27T15:15:35
| 50,152,020
| 220
| 14
|
Apache-2.0
| 2021-06-27T15:11:15
| 2016-01-22T02:29:57
|
Python
|
UTF-8
|
Python
| false
| false
| 298
|
py
|
test_is_test.py
|
import pytest
from .conftest import get_config
@pytest.mark.parametrize('fixture,is_test', [
('true', True),
('no-key', False),
('false', False),
])
def test_is_test(fixture, is_test):
config = get_config('is_test/{0}'.format(fixture))
assert config.is_test() == is_test
|
21d4f10d886e9bd791f8b204dbc57dab37488f82
|
b5ce6908490cfb8e6a1e1cbe4745d675122ddce0
|
/questions/complement-of-base-10-integer/Solution.py
|
24a8089f54290197c3d8c9600b21123415582dee
|
[
"MIT"
] |
permissive
|
franklingu/leetcode-solutions
|
8895910f13208e1d8e604100d84c2dd35684cde4
|
7ad7e5c1c040510b7b7bd225ed4297054464dbc6
|
refs/heads/master
| 2023-01-09T01:34:08.097518
| 2023-01-02T02:05:35
| 2023-01-02T02:05:35
| 43,345,677
| 155
| 66
|
MIT
| 2020-10-02T03:41:36
| 2015-09-29T04:54:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,282
|
py
|
Solution.py
|
"""
Every non-negative integer N has a binary representation. For example, 5 can be represented as "101" in binary, 11 as "1011" in binary, and so on. Note that except for N = 0, there are no leading zeroes in any binary representation.
The complement of a binary representation is the number in binary you get when changing every 1 to a 0 and 0 to a 1. For example, the complement of "101" in binary is "010" in binary.
For a given number N in base-10, return the complement of it's binary representation as a base-10 integer.
Example 1:
Input: 5
Output: 2
Explanation: 5 is "101" in binary, with complement "010" in binary, which is 2 in base-10.
Example 2:
Input: 7
Output: 0
Explanation: 7 is "111" in binary, with complement "000" in binary, which is 0 in base-10.
Example 3:
Input: 10
Output: 5
Explanation: 10 is "1010" in binary, with complement "0101" in binary, which is 5 in base-10.
Note:
0 <= N < 10^9
This question is the same as 476: https://leetcode.com/problems/number-complement/
"""
class Solution:
def bitwiseComplement(self, num: int) -> int:
b = bin(num)[2:]
t = []
for c in b:
if c == '1':
t.append('0')
else:
t.append('1')
return int(''.join(t), 2)
|
bcfc89632b001c5937e9efedd255d8a0bab02a8a
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/qt__pyqt__pyside__pyqode/pyqt5__exchange_rate_and_weather__use_QThread/main.py
|
82abc37be7659397752b0decc18c13ee6e34dfbc
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 3,494
|
py
|
main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
import sys
import traceback
import time
from PyQt5 import Qt
from utils import exchange_rate, get_weather
def log_uncaught_exceptions(ex_cls, ex, tb):
text = f"{ex_cls.__name__}: {ex}:\n"
text += "".join(traceback.format_tb(tb))
print(text)
Qt.QMessageBox.critical(None, "Error", text)
sys.exit(1)
sys.excepthook = log_uncaught_exceptions
class ThreadExchangeRate(Qt.QThread):
about_exchange_rate = Qt.pyqtSignal(str)
def __init__(self, parent, currency):
super().__init__(parent)
self.currency = currency
def run(self):
while True:
print("Start ThreadExchangeRate.currency: " + self.currency)
value = exchange_rate(self.currency)
print(f" ThreadExchangeRate.{self.currency} value: {value}")
self.about_exchange_rate.emit(value)
time.sleep(60)
class ThreadGetWeather(Qt.QThread):
about_weather = Qt.pyqtSignal(str)
def __init__(self, parent, city):
super().__init__(parent)
self.city = city
def run(self):
while True:
print("Start ThreadGetWeather.city: " + self.city)
weather = get_weather(self.city)
print(f" ThreadGetWeather.{self.city} weather: {weather}")
self.about_weather.emit(weather)
time.sleep(60)
class Window(Qt.QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Widget-Info: exchange rate and weather")
self.exchange_rate_USD = Qt.QLabel()
self.exchange_rate_EUR = Qt.QLabel()
self.weather_1 = Qt.QLabel()
self.weather_2 = Qt.QLabel()
self.weather_3 = Qt.QLabel()
layout = Qt.QFormLayout()
layout.addRow("USD:", self.exchange_rate_USD)
layout.addRow("EUR:", self.exchange_rate_EUR)
line = Qt.QFrame()
line.setFrameShape(Qt.QFrame.HLine)
line.setFrameShadow(Qt.QFrame.Sunken)
line.setLineWidth(1)
layout.addRow(line)
layout.addRow("Магнитогорск:", self.weather_1)
layout.addRow("Челябинск:", self.weather_2)
layout.addRow("Екатеринбург:", self.weather_3)
self.setLayout(layout)
self.thread_exchange_rate_USD = ThreadExchangeRate(self, "USD")
self.thread_exchange_rate_USD.about_exchange_rate.connect(
self.exchange_rate_USD.setText
)
self.thread_exchange_rate_USD.start()
self.thread_exchange_rate_EUR = ThreadExchangeRate(self, "EUR")
self.thread_exchange_rate_EUR.about_exchange_rate.connect(
self.exchange_rate_EUR.setText
)
self.thread_exchange_rate_EUR.start()
self.thread_weather_1 = ThreadGetWeather(self, "Магнитогорск")
self.thread_weather_1.about_weather.connect(self.weather_1.setText)
self.thread_weather_1.start()
self.thread_weather_2 = ThreadGetWeather(self, "Челябинск")
self.thread_weather_2.about_weather.connect(self.weather_2.setText)
self.thread_weather_2.start()
self.thread_weather_3 = ThreadGetWeather(self, "Екатеринбург")
self.thread_weather_3.about_weather.connect(self.weather_3.setText)
self.thread_weather_3.start()
if __name__ == "__main__":
app = Qt.QApplication([])
mw = Window()
mw.show()
app.exec()
|
e5e8dd3de4986b88dae11918c2ac4ee8ac4ffb07
|
2804432fba5a4fe639d07a207bb01f71e03d9189
|
/test/cts/tool/CTSConverter/src/nn/specs/V1_1/mean_quant8_2.mod.py
|
23fd87c631a93b71bb634d34a53a2d92156d0978
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
intel/webml-polyfill
|
5685299e1b6d91a010c5e057685bf010d5646e4f
|
bd014955c5bcc9dc5465aea06721072f45ab4a75
|
refs/heads/master
| 2023-09-01T17:30:55.961667
| 2023-04-14T01:18:47
| 2023-04-14T01:18:47
| 126,892,425
| 168
| 75
|
Apache-2.0
| 2023-04-14T05:16:41
| 2018-03-26T21:31:32
|
Python
|
UTF-8
|
Python
| false
| false
| 603
|
py
|
mean_quant8_2.mod.py
|
model = Model()
i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4, 3, 2}, 0.8, 5")
axis = Parameter("axis", "TENSOR_INT32", "{2}", [0, 2])
keepDims = Int32Scalar("keepDims", 1)
output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 3, 1}, 0.8, 5")
model = model.Operation("MEAN", i1, axis, keepDims).To(output)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24]}
output0 = {output: # output 0
[10, 12, 14]}
# Instantiate an example
Example((input0, output0))
|
f0c1c34e1f57867353be3bd1fa3fc6d7c7104ae9
|
14f40e51d1f2b0671650ee6c350756b42262c6f2
|
/platformio/builder/tools/piosize.py
|
3ac24311e8fcc084757cff1279fb6b64d96d808e
|
[
"Apache-2.0"
] |
permissive
|
platformio/platformio-core
|
7cfb8bb60661122f883ca175b6c48c9299fc3262
|
897844ebc172bd8a2e313bdb9011fc7d986863c2
|
refs/heads/develop
| 2023-08-22T01:48:10.215800
| 2023-08-18T11:39:03
| 2023-08-18T11:39:03
| 19,606,299
| 6,058
| 707
|
Apache-2.0
| 2023-09-07T05:23:55
| 2014-05-09T09:38:42
|
Python
|
UTF-8
|
Python
| false
| false
| 8,702
|
py
|
piosize.py
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-locals
import json
import sys
from os import environ, makedirs, remove
from os.path import isdir, join, splitdrive
from elftools.elf.descriptions import describe_sh_flags
from elftools.elf.elffile import ELFFile
from platformio.compat import IS_WINDOWS
from platformio.proc import exec_command
def _run_tool(cmd, env, tool_args):
sysenv = environ.copy()
sysenv["PATH"] = str(env["ENV"]["PATH"])
build_dir = env.subst("$BUILD_DIR")
if not isdir(build_dir):
makedirs(build_dir)
tmp_file = join(build_dir, "size-data-longcmd.txt")
with open(tmp_file, mode="w", encoding="utf8") as fp:
fp.write("\n".join(tool_args))
cmd.append("@" + tmp_file)
result = exec_command(cmd, env=sysenv)
remove(tmp_file)
return result
def _get_symbol_locations(env, elf_path, addrs):
if not addrs:
return {}
cmd = [env.subst("$CC").replace("-gcc", "-addr2line"), "-e", elf_path]
result = _run_tool(cmd, env, addrs)
locations = [line for line in result["out"].split("\n") if line]
assert len(addrs) == len(locations)
return dict(zip(addrs, [loc.strip() for loc in locations]))
def _get_demangled_names(env, mangled_names):
if not mangled_names:
return {}
result = _run_tool(
[env.subst("$CC").replace("-gcc", "-c++filt")], env, mangled_names
)
demangled_names = [line for line in result["out"].split("\n") if line]
assert len(mangled_names) == len(demangled_names)
return dict(
zip(
mangled_names,
[dn.strip().replace("::__FUNCTION__", "") for dn in demangled_names],
)
)
def _collect_sections_info(env, elffile):
sections = {}
for section in elffile.iter_sections():
if section.is_null() or section.name.startswith(".debug"):
continue
section_type = section["sh_type"]
section_flags = describe_sh_flags(section["sh_flags"])
section_size = section.data_size
section_data = {
"name": section.name,
"size": section_size,
"start_addr": section["sh_addr"],
"type": section_type,
"flags": section_flags,
}
sections[section.name] = section_data
sections[section.name]["in_flash"] = env.pioSizeIsFlashSection(section_data)
sections[section.name]["in_ram"] = env.pioSizeIsRamSection(section_data)
return sections
def _collect_symbols_info(env, elffile, elf_path, sections):
symbols = []
symbol_section = elffile.get_section_by_name(".symtab")
if symbol_section.is_null():
sys.stderr.write("Couldn't find symbol table. Is ELF file stripped?")
env.Exit(1)
sysenv = environ.copy()
sysenv["PATH"] = str(env["ENV"]["PATH"])
symbol_addrs = []
mangled_names = []
for s in symbol_section.iter_symbols():
symbol_info = s.entry["st_info"]
symbol_addr = s["st_value"]
symbol_size = s["st_size"]
symbol_type = symbol_info["type"]
if not env.pioSizeIsValidSymbol(s.name, symbol_type, symbol_addr):
continue
symbol = {
"addr": symbol_addr,
"bind": symbol_info["bind"],
"name": s.name,
"type": symbol_type,
"size": symbol_size,
"section": env.pioSizeDetermineSection(sections, symbol_addr),
}
if s.name.startswith("_Z"):
mangled_names.append(s.name)
symbol_addrs.append(hex(symbol_addr))
symbols.append(symbol)
symbol_locations = _get_symbol_locations(env, elf_path, symbol_addrs)
demangled_names = _get_demangled_names(env, mangled_names)
for symbol in symbols:
if symbol["name"].startswith("_Z"):
symbol["demangled_name"] = demangled_names.get(symbol["name"])
location = symbol_locations.get(hex(symbol["addr"]))
if not location or "?" in location:
continue
if IS_WINDOWS:
drive, tail = splitdrive(location)
location = join(drive.upper(), tail)
symbol["file"] = location
symbol["line"] = 0
if ":" in location:
file_, line = location.rsplit(":", 1)
if line.isdigit():
symbol["file"] = file_
symbol["line"] = int(line)
return symbols
def pioSizeDetermineSection(_, sections, symbol_addr):
for section, info in sections.items():
if not info.get("in_flash", False) and not info.get("in_ram", False):
continue
if symbol_addr in range(info["start_addr"], info["start_addr"] + info["size"]):
return section
return "unknown"
def pioSizeIsValidSymbol(_, symbol_name, symbol_type, symbol_address):
return symbol_name and symbol_address != 0 and symbol_type != "STT_NOTYPE"
def pioSizeIsRamSection(_, section):
return (
section.get("type", "") in ("SHT_NOBITS", "SHT_PROGBITS")
and section.get("flags", "") == "WA"
)
def pioSizeIsFlashSection(_, section):
return section.get("type", "") == "SHT_PROGBITS" and "A" in section.get("flags", "")
def pioSizeCalculateFirmwareSize(_, sections):
flash_size = ram_size = 0
for section_info in sections.values():
if section_info.get("in_flash", False):
flash_size += section_info.get("size", 0)
if section_info.get("in_ram", False):
ram_size += section_info.get("size", 0)
return ram_size, flash_size
def DumpSizeData(_, target, source, env): # pylint: disable=unused-argument
data = {"device": {}, "memory": {}, "version": 1}
board = env.BoardConfig()
if board:
data["device"] = {
"mcu": board.get("build.mcu", ""),
"cpu": board.get("build.cpu", ""),
"frequency": board.get("build.f_cpu"),
"flash": int(board.get("upload.maximum_size", 0)),
"ram": int(board.get("upload.maximum_ram_size", 0)),
}
if data["device"]["frequency"] and data["device"]["frequency"].endswith("L"):
data["device"]["frequency"] = int(data["device"]["frequency"][0:-1])
elf_path = env.subst("$PIOMAINPROG")
with open(elf_path, "rb") as fp:
elffile = ELFFile(fp)
if not elffile.has_dwarf_info():
sys.stderr.write("Elf file doesn't contain DWARF information")
env.Exit(1)
sections = _collect_sections_info(env, elffile)
firmware_ram, firmware_flash = env.pioSizeCalculateFirmwareSize(sections)
data["memory"]["total"] = {
"ram_size": firmware_ram,
"flash_size": firmware_flash,
"sections": sections,
}
files = {}
for symbol in _collect_symbols_info(env, elffile, elf_path, sections):
file_path = symbol.get("file") or "unknown"
if not files.get(file_path, {}):
files[file_path] = {"symbols": [], "ram_size": 0, "flash_size": 0}
symbol_size = symbol.get("size", 0)
section = sections.get(symbol.get("section", ""), {})
if not section:
continue
if section.get("in_ram", False):
files[file_path]["ram_size"] += symbol_size
if section.get("in_flash", False):
files[file_path]["flash_size"] += symbol_size
files[file_path]["symbols"].append(symbol)
data["memory"]["files"] = []
for k, v in files.items():
file_data = {"path": k}
file_data.update(v)
data["memory"]["files"].append(file_data)
with open(
join(env.subst("$BUILD_DIR"), "sizedata.json"), mode="w", encoding="utf8"
) as fp:
fp.write(json.dumps(data))
def exists(_):
return True
def generate(env):
env.AddMethod(pioSizeIsRamSection)
env.AddMethod(pioSizeIsFlashSection)
env.AddMethod(pioSizeCalculateFirmwareSize)
env.AddMethod(pioSizeDetermineSection)
env.AddMethod(pioSizeIsValidSymbol)
env.AddMethod(DumpSizeData)
return env
|
71e4069fc4e48763198c16ad27b5ca428eb3bad9
|
b20dcf585fcda752d567a17fe1e0eb3b3dcdbf81
|
/tllib/vision/datasets/stanford_cars.py
|
be72fa8a487a328890c44e2bf6f3255036b81bf5
|
[
"MIT"
] |
permissive
|
thuml/Transfer-Learning-Library
|
1dc1402025ac842e361221f4fe1ed72bc36e9eac
|
ed03f0b11c16062e7faacb547f6eb9f83ce5f15e
|
refs/heads/master
| 2023-08-18T00:46:42.764139
| 2023-08-09T02:31:20
| 2023-08-09T02:31:20
| 240,494,185
| 2,786
| 527
|
MIT
| 2023-05-04T09:53:41
| 2020-02-14T11:33:06
|
Python
|
UTF-8
|
Python
| false
| false
| 4,453
|
py
|
stanford_cars.py
|
"""
@author: Yifei Ji
@contact: jiyf990330@163.com
"""
import os
from typing import Optional
from .imagelist import ImageList
from ._util import download as download_data, check_exits
class StanfordCars(ImageList):
"""`The Stanford Cars <https://ai.stanford.edu/~jkrause/cars/car_dataset.html>`_ \
contains 16,185 images of 196 classes of cars. \
Each category has been split roughly in a 50-50 split. \
There are 8,144 images for training and 8,041 images for testing.
Args:
root (str): Root directory of dataset
split (str, optional): The dataset split, supports ``train``, or ``test``.
sample_rate (int): The sampling rates to sample random ``training`` images for each category.
Choices include 100, 50, 30, 15. Default: 100.
download (bool, optional): If true, downloads the dataset from the internet and puts it \
in root directory. If dataset is already downloaded, it is not downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image and returns a \
transformed version. E.g, :class:`torchvision.transforms.RandomCrop`.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
.. note:: In `root`, there will exist following files after downloading.
::
train/
test/
image_list/
train_100.txt
train_50.txt
train_30.txt
train_15.txt
test.txt
"""
download_list = [
("image_list", "image_list.zip", "https://cloud.tsinghua.edu.cn/f/aeeb690e9886442aa267/?dl=1"),
("train", "train.tgz", "https://cloud.tsinghua.edu.cn/f/fd80c30c120a42a08fd3/?dl=1"),
("test", "test.tgz", "https://cloud.tsinghua.edu.cn/f/01e6b279f20440cb8bf9/?dl=1"),
]
image_list = {
"train": "image_list/train_100.txt",
"train100": "image_list/train_100.txt",
"train50": "image_list/train_50.txt",
"train30": "image_list/train_30.txt",
"train15": "image_list/train_15.txt",
"test": "image_list/test.txt",
"test100": "image_list/test.txt",
}
CLASSES = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36',
'37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53',
'54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70',
'71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87',
'88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103',
'104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118',
'119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133',
'134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148',
'149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163',
'164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178',
'179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193',
'194', '195', '196']
def __init__(self, root: str, split: str, sample_rate: Optional[int] = 100, download: Optional[bool] = False,
**kwargs):
if split == 'train':
list_name = 'train' + str(sample_rate)
assert list_name in self.image_list
data_list_file = os.path.join(root, self.image_list[list_name])
else:
data_list_file = os.path.join(root, self.image_list['test'])
if download:
list(map(lambda args: download_data(root, *args), self.download_list))
else:
list(map(lambda file_name, _: check_exits(root, file_name), self.download_list))
super(StanfordCars, self).__init__(root, StanfordCars.CLASSES, data_list_file=data_list_file, **kwargs)
|
5f16259e941a5a8c12d9649aa2be78d3f86cb2f6
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/dts/airbyte/airbyte-integrations/connectors/source-instagram/source_instagram/streams.py
|
16352c1d93836e63a80eef8ac30af717314c81ac
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"Elastic-2.0"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 17,344
|
py
|
streams.py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import copy
from abc import ABC
from datetime import datetime
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional
import pendulum
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources.streams import Stream
from cached_property import cached_property
from facebook_business.adobjects.igmedia import IGMedia
from facebook_business.exceptions import FacebookRequestError
from source_instagram.api import InstagramAPI
from .common import remove_params_from_url
class InstagramStream(Stream, ABC):
"""Base stream class"""
page_size = 100
primary_key = "id"
def __init__(self, api: InstagramAPI, **kwargs):
super().__init__(**kwargs)
self._api = api
@cached_property
def fields(self) -> List[str]:
"""List of fields that we want to query, for now just all properties from stream's schema"""
non_object_fields = ["page_id", "business_account_id"]
fields = list(self.get_json_schema().get("properties", {}).keys())
return list(set(fields) - set(non_object_fields))
def request_params(
self,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
"""Parameters that should be passed to query_records method"""
return {"limit": self.page_size}
def stream_slices(
self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
"""
Override to define the slices for this stream. See the stream slicing section of the docs for more information.
:param sync_mode:
:param cursor_field:
:param stream_state:
:return:
"""
for account in self._api.accounts:
yield {"account": account}
def transform(self, record: MutableMapping[str, Any]) -> MutableMapping[str, Any]:
return self._clear_url(record)
@staticmethod
def _clear_url(record: MutableMapping[str, Any]) -> MutableMapping[str, Any]:
"""
This function removes the _nc_rid parameter from the video url and ccb from profile_picture_url for users.
_nc_rid is generated every time a new one and ccb can change its value, and tests fail when checking for identity.
This does not spoil the link, it remains correct and by clicking on it you can view the video or see picture.
"""
if record.get("media_url"):
record["media_url"] = remove_params_from_url(record["media_url"], params=["_nc_rid"])
if record.get("profile_picture_url"):
record["profile_picture_url"] = remove_params_from_url(record["profile_picture_url"], params=["ccb"])
return record
class InstagramIncrementalStream(InstagramStream, ABC):
"""Base class for incremental streams"""
def __init__(self, start_date: datetime, **kwargs):
super().__init__(**kwargs)
self._start_date = pendulum.instance(start_date)
class Users(InstagramStream):
"""Docs: https://developers.facebook.com/docs/instagram-api/reference/ig-user"""
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
account = stream_slice["account"]
ig_account = account["instagram_business_account"]
record = ig_account.api_get(fields=self.fields).export_all_data()
record["page_id"] = account["page_id"]
yield self.transform(record)
class UserLifetimeInsights(InstagramStream):
"""Docs: https://developers.facebook.com/docs/instagram-api/reference/ig-user/insights"""
primary_key = None
LIFETIME_METRICS = ["audience_city", "audience_country", "audience_gender_age", "audience_locale"]
period = "lifetime"
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
account = stream_slice["account"]
ig_account = account["instagram_business_account"]
for insight in ig_account.get_insights(params=self.request_params()):
yield {
"page_id": account["page_id"],
"business_account_id": ig_account.get("id"),
"metric": insight["name"],
"date": insight["values"][0]["end_time"],
"value": insight["values"][0]["value"],
}
def request_params(
self,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = super().request_params(stream_slice=stream_slice, stream_state=stream_state)
params.update({"metric": self.LIFETIME_METRICS, "period": self.period})
return params
class UserInsights(InstagramIncrementalStream):
"""Docs: https://developers.facebook.com/docs/instagram-api/reference/ig-user/insights"""
METRICS_BY_PERIOD = {
"day": [
"email_contacts",
"follower_count",
"get_directions_clicks",
"impressions",
"phone_call_clicks",
"profile_views",
"reach",
"text_message_clicks",
"website_clicks",
],
"week": ["impressions", "reach"],
"days_28": ["impressions", "reach"],
"lifetime": ["online_followers"],
}
primary_key = None
cursor_field = "date"
# For some metrics we can only get insights not older than 30 days, it is Facebook policy
buffer_days = 30
days_increment = 1
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._end_date = pendulum.now()
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
account = stream_slice["account"]
ig_account = account["instagram_business_account"]
account_id = ig_account.get("id")
base_params = self.request_params(stream_state=stream_state, stream_slice=stream_slice)
insight_list = []
# iterate over each period, query insights
for period, metrics in self.METRICS_BY_PERIOD.items():
params = {
**base_params,
"metric": metrics,
"period": [period],
}
# we get only first record, because cursor will try to fetch next date interval
cursor = ig_account.get_insights(params=params)
if len(cursor):
insight_list += [insights.export_all_data() for insights in cursor[: len(cursor)]]
# end then merge all periods in one record
insight_record = {"page_id": account["page_id"], "business_account_id": account_id}
for insight in insight_list:
key = insight["name"]
if insight["period"] in ["week", "days_28"]:
key += f"_{insight['period']}"
insight_record[key] = insight.get("values")[0]["value"] # this depends on days_increment value
if not insight_record.get(self.cursor_field):
insight_record[self.cursor_field] = insight.get("values")[0]["end_time"]
yield insight_record
def stream_slices(
self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
"""Extend default slicing based on accounts with slices based on date intervals"""
stream_state = stream_state or {}
stream_slices = super().stream_slices(sync_mode=sync_mode, cursor_field=cursor_field, stream_state=stream_state)
for stream_slice in stream_slices:
account = stream_slice["account"]
account_id = account["instagram_business_account"]["id"]
state_value = stream_state.get(account_id, {}).get(self.cursor_field)
start_date = pendulum.parse(state_value) if state_value else self._start_date
start_date = max(start_date, self._start_date, pendulum.now().subtract(days=self.buffer_days))
if start_date > pendulum.now():
continue
for since in pendulum.period(start_date, self._end_date).range("days", self.days_increment):
until = since.add(days=self.days_increment)
self.logger.info(f"Reading insights between {since.date()} and {until.date()}")
yield {
**stream_slice,
"since": since.to_datetime_string(),
"until": until.to_datetime_string(), # excluding
}
def request_params(
self,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
"""Append datetime range params"""
params = super().request_params(stream_state=stream_state, stream_slice=stream_slice)
return {
**params,
"since": stream_slice["since"],
"until": stream_slice["until"],
}
def _state_has_legacy_format(self, state: Mapping[str, Any]) -> bool:
"""Tell if the format of state is outdated"""
for value in state.values():
if not isinstance(value, Mapping):
return True
return False
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]):
"""Update stream state from latest record"""
record_value = latest_record[self.cursor_field]
account_id = latest_record.get("business_account_id")
state_value = current_stream_state.get(account_id, {}).get(self.cursor_field) or record_value
max_cursor = max(pendulum.parse(state_value), pendulum.parse(record_value))
new_stream_state = copy.deepcopy(current_stream_state)
new_stream_state[account_id] = {
self.cursor_field: str(max_cursor),
}
return new_stream_state
class Media(InstagramStream):
"""Children objects can only be of the media_type == "CAROUSEL_ALBUM".
And children object does not support INVALID_CHILDREN_FIELDS fields,
so they are excluded when trying to get child objects to avoid the error
"""
INVALID_CHILDREN_FIELDS = ["caption", "comments_count", "is_comment_enabled", "like_count", "children"]
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
"""
This method should be overridden by subclasses to read records based on the inputs
"""
account = stream_slice["account"]
ig_account = account["instagram_business_account"]
media = ig_account.get_media(params=self.request_params(), fields=self.fields)
for record in media:
record_data = record.export_all_data()
if record_data.get("children"):
ids = [child["id"] for child in record["children"]["data"]]
record_data["children"] = list(self._get_children(ids))
record_data.update(
{
"page_id": account["page_id"],
"business_account_id": ig_account.get("id"),
}
)
yield self.transform(record_data)
def _get_children(self, ids: List):
children_fields = list(set(self.fields) - set(self.INVALID_CHILDREN_FIELDS))
for pk in ids:
yield self.transform(IGMedia(pk).api_get(fields=children_fields).export_all_data())
class MediaInsights(Media):
"""Docs: https://developers.facebook.com/docs/instagram-api/reference/ig-media/insights"""
MEDIA_METRICS = ["engagement", "impressions", "reach", "saved"]
CAROUSEL_ALBUM_METRICS = ["carousel_album_engagement", "carousel_album_impressions", "carousel_album_reach", "carousel_album_saved"]
REELS_METRICS = ["comments", "likes", "reach", "saved", "shares", "total_interactions", "plays"]
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
account = stream_slice["account"]
ig_account = account["instagram_business_account"]
media = ig_account.get_media(params=self.request_params(), fields=["media_type", "media_product_type"])
for ig_media in media:
account_id = ig_account.get("id")
insights = self._get_insights(ig_media, account_id)
if insights is None:
break
insights["id"] = ig_media["id"]
insights["page_id"] = account["page_id"]
insights["business_account_id"] = ig_account["id"]
yield self.transform(insights)
def _get_insights(self, item, account_id) -> Optional[MutableMapping[str, Any]]:
"""Get insights for specific media"""
if item.get("media_product_type") == "REELS":
metrics = self.REELS_METRICS
elif item.get("media_type") == "VIDEO":
metrics = self.MEDIA_METRICS + ["video_views"]
elif item.get("media_type") == "CAROUSEL_ALBUM":
metrics = self.CAROUSEL_ALBUM_METRICS
else:
metrics = self.MEDIA_METRICS
try:
insights = item.get_insights(params={"metric": metrics})
return {record.get("name"): record.get("values")[0]["value"] for record in insights}
except FacebookRequestError as error:
# An error might occur if the media was posted before the most recent time that
# the user's account was converted to a business account from a personal account
if error.api_error_subcode() == 2108006:
details = error.body().get("error", {}).get("error_user_title") or error.api_error_message()
self.logger.error(f"Insights error for business_account_id {account_id}: {details}")
# We receive all Media starting from the last one, and if on the next Media we get an Insight error,
# then no reason to make inquiries for each Media further, since they were published even earlier.
return None
raise error
class Stories(InstagramStream):
"""Docs: https://developers.facebook.com/docs/instagram-api/reference/ig-user/stories"""
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
account = stream_slice["account"]
ig_account = account["instagram_business_account"]
stories = ig_account.get_stories(params=self.request_params(), fields=self.fields)
for record in stories:
record_data = record.export_all_data()
record_data["page_id"] = account["page_id"]
record_data["business_account_id"] = ig_account.get("id")
yield self.transform(record_data)
class StoryInsights(Stories):
"""Docs: https://developers.facebook.com/docs/instagram-api/reference/ig-media/insights"""
metrics = ["exits", "impressions", "reach", "replies", "taps_forward", "taps_back"]
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
account = stream_slice["account"]
ig_account = account["instagram_business_account"]
stories = ig_account.get_stories(params=self.request_params(), fields=[])
for ig_story in stories:
insights = self._get_insights(IGMedia(ig_story["id"]))
if not insights:
continue
insights["id"] = ig_story["id"]
insights["page_id"] = account["page_id"]
insights["business_account_id"] = ig_account["id"]
yield self.transform(insights)
def _get_insights(self, story: IGMedia) -> MutableMapping[str, Any]:
"""Get insights for specific story"""
# Story IG Media object metrics with values less than 5 will return an error code 10 with the message (#10)
# Not enough viewers for the media to show insights.
try:
insights = story.get_insights(params={"metric": self.metrics})
return {record["name"]: record["values"][0]["value"] for record in insights}
except FacebookRequestError as error:
if error.api_error_code() == 10:
self.logger.error(f"Insights error: {error.api_error_message()}")
return {}
raise error
|
ebc86996a89ccaff63950f68a9ac486b30ebc4e0
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/Perch/Integrations/Perch/Perch.py
|
da897b9e06a6588ea47dbe405b4d79661ad51e26
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 17,403
|
py
|
Perch.py
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
import json
import collections
import urllib3
# Disable insecure warnings
urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
PARAMS = demisto.params()
USERNAME = PARAMS.get('credentials').get('identifier')
PASSWORD = PARAMS.get('credentials').get('password')
API_KEY = PARAMS.get('api-key_creds', {}).get('password') or PARAMS.get('api-key')
FETCH_TIME = int(PARAMS.get('fetch_time', '7'))
SERVER = PARAMS['url'].removesuffix('/')
USE_SSL = not PARAMS.get('insecure', False)
BASE_URL = SERVER + '/v1'
# Remove proxy if not set to true in params
handle_proxy()
STATUSES = {
'Not Reviewed': '0',
'Investigating': '1',
'On hold': '2',
'False Positive': '3',
'Escalated': '4'
}
TLP_MAP = {
'WHITE': 0,
'GREEN': 1,
'AMBER': 2,
'RED': 3
}
CONFIDENCE_MAP = {
'LOW': 0,
'MEDIUM': 1,
'HIGH': 2
}
OBSERVABLE_TYPES_MAP = {
'IP': 0,
'Domain': 1,
'URL': 2,
'REGEX': 3,
'File Hash': 4
}
''' HELPER FUNCTIONS '''
# Allows nested keys to be accessible
def makehash():
return collections.defaultdict(makehash)
def http_request(method, url_suffix, params=None, data=None, headers=None):
try:
res = requests.request(
method,
BASE_URL + url_suffix,
verify=USE_SSL,
params=params,
data=data,
headers=headers
)
if res.status_code == 403:
return_error('Connection forbidden. Please verify your API key is valid.')
elif res.status_code not in {200, 201}:
return_error(f'Error in API call to Perch Integration [{res.status_code}] - {res.reason}')
except requests.exceptions.ConnectionError as error:
return_error(f"Failed to establish a new connection: {type(error)}")
try:
response = res.json()
except Exception as e:
return_error(f'Failed to parse JSON response: {str(e)}')
return response
def find_key_by_value(val, dic_map):
for key, value in dic_map.items():
if value == val:
return key
return None
def format_alerts(alert):
hr = makehash() # type: dict
ec = makehash() # type: dict
if alert.get('id'):
hr['ID'] = alert.get('id')
ec['ID'] = alert.get('id')
if alert.get('sensor_id'):
hr['Sensor ID'] = alert.get('sensor_id')
ec['SensorID'] = alert.get('sensor_id')
if alert.get('observable_id'):
hr['Observable ID'] = alert.get('observable_id')
ec['ObservableID'] = alert.get('observable_id')
if alert.get('indicator_id'):
hr['Indicator ID'] = alert.get('indicator_id')
ec['IndicatorID'] = alert.get('indicator_id')
if alert.get('status'):
hr['Status'] = alert.get('status')
ec['Status'] = alert.get('status')
if alert.get('ts'):
hr['Timestamp'] = alert.get('ts')
ec['TS'] = alert.get('ts')
if alert.get('title'):
hr['Title'] = alert.get('title')
ec['Title'] = alert.get('title')
if alert.get('protocol'):
hr['Protocol'] = alert.get('protocol')
ec['Protocol'] = alert.get('protocol')
if alert.get('src_ip'):
hr['Source IP'] = alert.get('src_ip')
ec['SrcIP'] = alert.get('src_ip')
if alert.get('src_port'):
hr['Source Port'] = alert.get('src_port')
ec['SrcPort'] = alert.get('src_port')
if alert.get('src_geo_ip'):
src_geo = alert['src_geo_ip']
if src_geo.get('latitude'):
hr['Source Geo']['Latitude'] = src_geo.get('latitude')
ec['SrcGeo']['Latitude'] = src_geo.get('latitude')
if src_geo.get('longitude'):
hr['Source Geo']['Longitude'] = src_geo.get('longitude')
ec['SrcGeo']['Longitude'] = src_geo.get('longitude')
if src_geo.get('country_name'):
hr['Source Geo']['Country Name'] = src_geo.get('country_name')
ec['SrcGeo']['Country'] = src_geo.get('country_name')
if alert.get('dest_ip'):
hr['Destination IP'] = alert.get('dest_ip')
ec['DestIP'] = alert.get('dest_ip')
if alert.get('dest_port'):
hr['Destination Port'] = alert.get('dest_port')
ec['DestPort'] = alert.get('dest_port')
if alert.get('dest_geo_ip'):
dest_geo = alert['dest_geo_ip']
if dest_geo.get('latitude'):
hr['Destination Geo']['Latitude'] = dest_geo.get('latitude')
ec['DestGeo']['Latitude'] = dest_geo.get('latitude')
if dest_geo.get('longitude'):
hr['Destination Geo']['Longitude'] = dest_geo.get('longitude')
ec['DestGeo']['Longitude'] = dest_geo.get('longitude')
if dest_geo.get('country_name'):
hr['Destination Geo']['Country Name'] = dest_geo.get('country_name')
ec['DestGeo']['Country'] = dest_geo.get('country_name')
return hr, ec
def alerts_params(args):
params = {} # type:dict
if args.get('page'):
params['page'] = args.get('page')
if args.get('page_size'):
params['page_size'] = args.get('page_size')
if args.get('closed'):
params['closed'] = args.get('closed')
if args.get('closed_at'):
params['closed_at'] = args.get('closed_at')
if args.get('community_id'):
params['community_id'] = args.get('community_id')
if args.get('created_at'):
params['created_at'] = args.get('created_at')
if args.get('dest_ip'):
params['dest_ip'] = args.get('dest_ip')
if args.get('dest_port'):
params['dest_port'] = args.get('dest_port')
if args.get('full_url'):
params['full_url'] = args.get('full_url')
if args.get('id'):
params['id'] = args.get('id')
if args.get('indicator_id'):
params['indicator_id'] = args.get('indicator_id')
if args.get('indicator_loaded'):
params['indicator_loaded'] = args.get('indicator_loaded')
if args.get('observable_id'):
params['observable_id'] = args.get('observable_id')
if args.get('protocol'):
params['protocol'] = args.get('protocol')
if args.get('sensor_id'):
params['sensor_id'] = args.get('sensor_id')
if args.get('sensor_name'):
params['sensor_name'] = args.get('sensor_name')
if args.get('soc_status'):
params['soc_status'] = args.get('soc_status')
if args.get('src_ip'):
params['src_ip'] = args.get('src_ip')
if args.get('src_port'):
params['src_port'] = args.get('src_port')
if args.get('status'):
params['status'] = args.get('status')
if args.get('status_updated_at'):
params['status_updated_at'] = args.get('status_updated_at')
if args.get('team_id'):
params['team_id'] = args.get('team_id')
if args.get('title'):
params['title'] = args.get('title')
if args.get('ts'):
params['ts'] = args.get('ts')
if args.get('closed_at__gte'):
params['closed_at__gte'] = args.get('closed_at__gte')
if args.get('closed_at__lte'):
params['closed_at__lte'] = args.get('closed_at__lte')
if args.get('created_at__gte'):
params['created_at__gte'] = args.get('created_at__gte')
if args.get('created_at__lte'):
params['created_at__lte'] = args.get('created_at__lte')
if args.get('status_updated_at__gte'):
params['status_updated_at__gte'] = args.get('status_updated_at__gte')
if args.get('status_updated_at__lte'):
params['status_updated_at__lte'] = args.get('status_updated_at__lte')
if args.get('status_updated_at__gt'):
params['status_updated_at__gt'] = args.get('status_updated_at__gt')
if args.get('status_updated_at__lt'):
params['status_updated_at__lt'] = args.get('status_updated_at__lt')
if args.get('ordering'):
params['ordering'] = args.get('ordering')
return params
def indicator_params(args):
params = []
param = {}
observables = []
communities = []
if args.get('communities'):
community = {
'id': args.get('communities')
}
communities.append(community)
param['communities'] = communities
if args.get('type'):
observable = {
'type': OBSERVABLE_TYPES_MAP[args.get('type')],
'details': {
'value': args.get('value')
}
}
observables.append(observable)
param['observables'] = observables
if args.get('title'):
param['title'] = args.get('title')
if args.get('description'):
param['description'] = args.get('description')
if args.get('tlp'):
param['tlp'] = TLP_MAP[args.get('tlp')] # type: ignore
if args.get('confidence'):
param['confidence'] = CONFIDENCE_MAP[args.get('confidence')] # type: ignore
if args.get('operator'):
param['operator'] = args.get('operator')
if args.get('first_sighting'):
param['first_sighting'] = args.get('first_sighting')
if args.get('email_summary'):
param['email_summary'] = args.get('email_summary')
params.append(param)
return params
def authenticate():
headers = {'Content-Type': 'application/json', 'x-api-key': API_KEY}
req_body = json.dumps({'username': USERNAME, 'password': PASSWORD})
url = '/auth/access_token'
res_body = http_request('POST', url, data=req_body, headers=headers)
headers['Authorization'] = 'Bearer ' + res_body['access_token']
return headers
def format_indicator(indicator):
hr = makehash() # type: dict
ec = makehash() # type: dict
if indicator.get('id'):
hr['ID'] = indicator.get('id')
ec['ID'] = indicator.get('id')
if indicator.get('confidence'):
hr['Confidence'] = find_key_by_value(indicator.get('confidence'), CONFIDENCE_MAP)
ec['Confidence'] = find_key_by_value(indicator.get('confidence'), CONFIDENCE_MAP)
if indicator.get('created_at'):
hr['Created At'] = indicator.get('created_at')
ec['CreatedAt'] = indicator.get('created_at')
if indicator.get('created_by'):
hr['Created By'] = indicator.get('created_by')
ec['CreatedBy'] = indicator.get('created_by')
if indicator.get('description'):
hr['Description'] = indicator.get('description')
ec['Description'] = indicator.get('description')
if indicator.get('email_summary'):
hr['Email Summary'] = indicator.get('email_summary')
ec['EmailSummary'] = indicator.get('email_summary')
if indicator.get('title'):
hr['Title'] = indicator.get('title')
ec['Title'] = indicator.get('title')
if indicator.get('first_sighting'):
hr['First Sighting'] = indicator.get('first_sighting')
ec['FirstSighting'] = indicator.get('first_sighting')
if indicator.get('perch_id'):
hr['Perch ID'] = indicator.get('perch_id')
ec['PerchID'] = indicator.get('perch_id')
if indicator.get('team'):
hr['Team'] = indicator.get('team')
ec['Team'] = indicator.get('team')
if indicator.get('tlp'):
hr['TLP'] = find_key_by_value(indicator.get('tlp'), TLP_MAP)
ec['TLP'] = find_key_by_value(indicator.get('tlp'), TLP_MAP)
if indicator.get('updated_at'):
hr['Updated At'] = indicator.get('updated_at')
ec['UpdatedAt'] = indicator.get('updated_at')
if indicator.get('operator'):
hr['Operator'] = indicator.get('operator')
ec['Operator'] = indicator.get('operator')
return hr, ec
def item_to_incident(item):
incident = {'name': 'Perch Incident: ' + item.get('title'),
'occurred': item.get('created_at'),
'rawJSON': json.dumps(item)}
return incident
'''COMMAND FUNCTIONS'''
def search_alerts_command():
headers = authenticate()
args = demisto.args()
params = alerts_params(args)
url = '/alerts'
res = http_request('GET', url, headers=headers, params=params)
res_results = res.get('results')
hr = ''
ec = {
"Perch": {
"Alert": []
}
} # type: dict
for alert in res_results:
alert_hr, alert_ec = format_alerts(alert)
ec['Perch']['Alert'].append(alert_ec)
hr += tableToMarkdown(f'{alert_ec.get("Title")}', alert_hr)
if len(res_results) == 0:
demisto.results('No results were found')
else:
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['markdown'],
'Contents': res_results,
'HumanReadable': hr,
'EntryContext': ec
})
def list_communities_command():
headers = authenticate()
args = demisto.args()
params = alerts_params(args)
url = '/communities'
res = http_request('GET', url, headers=headers, params=params)
res_results = res.get('results')
hr = tableToMarkdown('Communities Found', res_results, headerTransform=string_to_table_header, removeNull=True)
ec = {
"Perch": {
"Community": []
}
} # type: dict
for alert in res_results:
ec['Perch']['Community'].append(createContext(alert, keyTransform=string_to_context_key, removeNull=True))
if len(res_results) == 0:
demisto.results('No communities were found')
else:
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['markdown'],
'Contents': res_results,
'HumanReadable': hr,
'EntryContext': ec
})
def get_community_command():
headers = authenticate()
args = demisto.args()
params = alerts_params(args)
community_id = args.get('id')
url = f'/communities/{community_id}'
res = http_request('GET', url, headers=headers, params=params)
if len(res) > 0:
hr = tableToMarkdown('Communities Found', res, headerTransform=string_to_table_header, removeNull=True)
ec = {
"Perch": {
"Community": createContext(res, keyTransform=string_to_context_key, removeNull=True)
}
} # type: dict
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['markdown'],
'Contents': res,
'HumanReadable': hr,
'EntryContext': ec
})
else:
demisto.results('No communities were found')
def create_indicator_command():
headers = authenticate()
args = demisto.args()
raw_data = indicator_params(args)
data = json.dumps(raw_data)
url = '/indicators'
res = http_request('POST', url, headers=headers, data=data)
indicator_hr, indicator_ec = format_indicator(res[0])
hr = ''
ec = {
"Perch": {
"Indicator": []
}
} # type: dict
ec['Perch']['Indicator'].append(indicator_ec)
hr += tableToMarkdown(f'{indicator_hr.get("Title")}', indicator_hr)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['markdown'],
'Contents': res,
'HumanReadable': hr,
'EntryContext': ec
})
def fetch_alerts(last_run, headers):
last_fetch = last_run.get('time')
url = '/alerts'
statuses_to_fetch = PARAMS.get('soc_status', [])
if statuses_to_fetch:
items = []
for status in statuses_to_fetch:
res = http_request('GET', url, headers=headers, params=alerts_params({'soc_status': STATUSES[status]}))
items += res.get('results')
else:
res = http_request('GET', url, headers=headers)
items = res.get('results')
items.sort(key=lambda r: r['created_at'])
if last_fetch is None:
last_fetch_raw = datetime.now() - timedelta(days=FETCH_TIME)
last_fetch = date_to_timestamp(last_fetch_raw, '%Y-%m-%dT%H:%M:%S.%fZ')
incidents = []
for item in items:
incident = item_to_incident(item)
incident_date = date_to_timestamp(incident['occurred'], '%Y-%m-%dT%H:%M:%S.%fZ')
if incident_date > last_fetch:
incidents.append(incident)
last_fetch = incident_date
return last_fetch, incidents
def fetch_alerts_command():
last_run = demisto.getLastRun()
headers = authenticate()
last_fetch, incidents = fetch_alerts(last_run, headers)
demisto.setLastRun({'time': last_fetch})
demisto.incidents(incidents)
def test_module():
try:
headers = authenticate()
if PARAMS.get('isFetch'):
last_run = {'time': 1561017202}
fetch_alerts(last_run, headers)
demisto.results('ok')
except Exception as err:
return_error(str(err))
''' COMMANDS MANAGER / SWITCH PANEL '''
demisto.info(f'Command being called is {demisto.command()}')
try:
if demisto.command() == 'perch-search-alerts':
search_alerts_command()
elif demisto.command() == 'perch-get-community':
get_community_command()
elif demisto.command() == 'perch-list-communities':
list_communities_command()
elif demisto.command() == 'perch-create-indicator':
create_indicator_command()
elif demisto.command() == 'fetch-incidents':
fetch_alerts_command()
elif demisto.command() == 'test-module':
test_module()
# Log exceptions
except Exception as e:
LOG(str(e))
LOG.print_log()
raise
|
543631e50bda338fde0f2e2bce403f65c29b9aa8
|
9ed3b16b3da72e4c47a04f2f2e3ef395e9fd9f20
|
/contrib/emacs-console/update.py
|
d6123c97e20a1ca259bb82d0ad6cca46088e297d
|
[
"BSD-2-Clause"
] |
permissive
|
chimera-linux/cports
|
fdae59dc25856942be3041e10e3533dbf8f883c3
|
714680161cd719dd047452c95fbb9b447bc23a86
|
refs/heads/master
| 2023-09-03T19:30:40.720670
| 2023-09-03T15:07:40
| 2023-09-03T15:07:40
| 374,000,317
| 118
| 37
|
BSD-2-Clause
| 2023-09-14T20:31:08
| 2021-06-05T02:07:34
|
Python
|
UTF-8
|
Python
| false
| false
| 18
|
py
|
update.py
|
pkgname = "emacs"
|
a36b9554e9e0be3c92cd28c8bc43dffe8350fd6c
|
9b5ec84b75949cf51275da9140723e23519169e1
|
/examples/memory-fixed/evolve.py
|
3fdf13b4ec0a529017f47a6ec1b2e49a60bfd3ba
|
[
"BSD-3-Clause"
] |
permissive
|
CodeReclaimers/neat-python
|
82b4aca158e3bc20cf0d5ee8151ceb7f3cad0394
|
37bc8bb73fd6153a115001c2646f9f02bac3ad81
|
refs/heads/master
| 2023-09-02T03:29:29.036458
| 2023-07-28T22:57:40
| 2023-07-28T22:57:40
| 43,226,304
| 1,365
| 647
|
BSD-3-Clause
| 2023-08-13T17:31:00
| 2015-09-26T22:59:53
|
Python
|
UTF-8
|
Python
| false
| false
| 4,800
|
py
|
evolve.py
|
"""
This example produces networks that can remember a fixed-length sequence of bits. It is
intentionally very (overly?) simplistic just to show the usage of the NEAT library. However,
if you come up with a more interesting or impressive example, please submit a pull request!
This example also demonstrates the use of a custom activation function.
"""
import math
import os
import random
import neat
import visualize
# Demonstration of how to add your own custom activation function.
# This sinc function will be available if my_sinc_function is included in the
# config file activation_options option under the DefaultGenome section.
# Note that sinc is not necessarily useful for this example, it was chosen
# arbitrarily just to demonstrate adding a custom activation function.
def sinc(x):
return 1.0 if x == 0 else math.sin(x) / x
# Demonstration of how to add your own custom aggregation function.
# This l2norm function will be available if my_l2norm_function is included in the
# config file aggregation_options option under the DefaultGenome section.
# Note that l2norm is not necessarily useful for this example, it was chosen
# arbitrarily just to demonstrate adding a custom aggregation function.
def l2norm(x):
return (sum(i**2 for i in x))**0.5
# N is the length of the test sequence.
N = 4
# num_tests is the number of random examples each network is tested against.
num_tests = 2 ** (N + 2)
def eval_genome(genome, config):
net = neat.nn.RecurrentNetwork.create(genome, config)
error = 0.0
for _ in range(num_tests):
# Create a random sequence, and feed it to the network with the
# second input set to zero.
seq = [random.choice((0.0, 1.0)) for _ in range(N)]
net.reset()
for s in seq:
inputs = [s, 0.0]
net.activate(inputs)
# Set the second input to one, and get the network output.
for s in seq:
inputs = [0.0, 1.0]
output = net.activate(inputs)
error += (round(output[0]) - s) ** 2
return 4.0 - 4.0 * (error / (N * num_tests))
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
genome.fitness = eval_genome(genome, config)
def run():
# Determine path to configuration file.
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config')
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_path)
# Demonstration of saving a configuration back to a text file.
config.save('test_save_config.txt')
# Demonstration of how to add your own custom activation function.
# This sinc function will be available if my_sinc_function is included in the
# config file activation_options option under the DefaultGenome section.
config.genome_config.add_activation('my_sinc_function', sinc)
# Demonstration of how to add your own custom aggregation function.
# This l2norm function will be available if my_l2norm_function is included in the
# config file aggregation_options option under the DefaultGenome section.
config.genome_config.add_aggregation('my_l2norm_function', l2norm)
pop = neat.Population(config)
stats = neat.StatisticsReporter()
pop.add_reporter(stats)
pop.add_reporter(neat.StdOutReporter(True))
winner = pop.run(eval_genomes, 200)
# Log statistics.
stats.save()
# Show output of the most fit genome against a random input.
print('\nBest genome:\n{!s}'.format(winner))
print('\nOutput:')
winner_net = neat.nn.RecurrentNetwork.create(winner, config)
num_correct = 0
for n in range(num_tests):
print('\nRun {0} output:'.format(n))
seq = [random.choice((0.0, 1.0)) for _ in range(N)]
winner_net.reset()
for s in seq:
inputs = [s, 0.0]
winner_net.activate(inputs)
print('\tseq {0}'.format(inputs))
correct = True
for s in seq:
output = winner_net.activate([0, 1])
print("\texpected {0:1.5f} got {1:1.5f}".format(s, output[0]))
correct = correct and round(output[0]) == s
print("OK" if correct else "FAIL")
num_correct += 1 if correct else 0
print("{0} of {1} correct {2:.2f}%".format(num_correct, num_tests, 100.0 * num_correct / num_tests))
node_names = {-1: 'input', -2: 'gate', 0: 'output'}
visualize.draw_net(config, winner, True, node_names=node_names)
visualize.draw_net(config, winner, True, node_names=node_names, prune_unused=True)
visualize.plot_stats(stats, ylog=False, view=True)
visualize.plot_species(stats, view=True)
if __name__ == '__main__':
run()
|
2a0457c1c2c0e153d421fc757af8ea16c0ac24fb
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/pdf/ui/DEPS
|
66fe72596d09b815b17615e3bc8a843593a69d19
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 155
|
DEPS
|
include_rules = [
"+components/strings/grit/components_strings.h",
"+third_party/icu/source/i18n/unicode/ulocdata.h",
"+ui/base/l10n/l10n_util.h",
]
|
|
60e7a5e70fca78c71b3a467748254a897894bffd
|
600c580ddcfc9adc2fb4852006cad86cc2759241
|
/autogoal/datasets/cifar10.py
|
70e0b6472604bebc406595076684461d1381a1aa
|
[
"MIT"
] |
permissive
|
autogoal/autogoal
|
4cf2917d1bcd2267e8ca97b9eaabe03ec416b624
|
e7f504ffe57571c59cf582bcbc3ab0a5b91b7e06
|
refs/heads/main
| 2023-05-23T20:47:48.751916
| 2022-06-20T16:16:27
| 2022-06-20T16:16:27
| 255,711,241
| 195
| 78
|
MIT
| 2023-08-23T15:36:09
| 2020-04-14T19:53:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,357
|
py
|
cifar10.py
|
from autogoal.datasets import download, datapath
import pickle
import numpy as np
def load(training_batches=5):
"""
Load the CIFAR-10 dataset
##### Parameters
* 'training_batches': maximum number of batches to load for training,
each batch has 10,000 examples (min=`1`, max=`5`, default=`5`).
##### Examples
>>> X_train, y_train, X_test, y_test = load(training_batches=5)
>>> X_train.shape
(50000, 32, 32, 3)
>>> len(y_train)
50000
>>> X_test.shape
(10000, 32, 32, 3)
>>> len(y_test)
10000
>>> y_train[0]
6
"""
download("cifar10")
X_train = []
y_train = []
for i in range(1, training_batches + 1):
batch = datapath("cifar10") / f"data_batch_{i}"
with open(batch, "rb") as fp:
data = pickle.load(fp, encoding="bytes")
X_train.append(data[b"data"])
y_train.extend(data[b"labels"])
X_train = np.vstack(X_train)
X_train = np.reshape(X_train, (-1, 3, 32, 32)).transpose(0, 2, 3, 1)
test_batch = datapath("cifar10") / "test_batch"
with open(test_batch, "rb") as fp:
data = pickle.load(fp, encoding="bytes")
X_test, y_test = data[b"data"], data[b"labels"]
X_test = np.reshape(X_test, (-1, 3, 32, 32)).transpose(0, 2, 3, 1)
return X_train, y_train, X_test, y_test
|
66b028bcca75f3d8056281458a6c73e5c8fd1d81
|
5cdd9c1b6adb67fec94f6349ad6203ce2702fecb
|
/idd/schema/modify_schema.py
|
e20e60e32672bcd532fa15645328e9a9fc04ea31
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
NREL/EnergyPlus
|
9d8fc6936b5a0c81d2469ab9cdabe55405ccb8f2
|
50b8a5511ce559e5175524b943c26cc5b99d712d
|
refs/heads/develop
| 2023-09-04T08:20:29.804559
| 2023-09-01T13:58:55
| 2023-09-01T13:58:55
| 14,620,185
| 1,013
| 406
|
NOASSERTION
| 2023-09-14T19:52:57
| 2013-11-22T14:47:34
|
C++
|
UTF-8
|
Python
| false
| false
| 16,365
|
py
|
modify_schema.py
|
# EnergyPlus, Copyright (c) 1996-2023, The Board of Trustees of the University
# of Illinois, The Regents of the University of California, through Lawrence
# Berkeley National Laboratory (subject to receipt of any required approvals
# from the U.S. Dept. of Energy), Oak Ridge National Laboratory, managed by UT-
# Battelle, Alliance for Sustainable Energy, LLC, and other contributors. All
# rights reserved.
#
# NOTICE: This Software was developed under funding from the U.S. Department of
# Energy and the U.S. Government consequently retains certain rights. As such,
# the U.S. Government has been granted for itself and others acting on its
# behalf a paid-up, nonexclusive, irrevocable, worldwide license in the
# Software to reproduce, distribute copies to the public, prepare derivative
# works, and perform publicly and display publicly, and to permit others to do
# so.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# (1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# (3) Neither the name of the University of California, Lawrence Berkeley
# National Laboratory, the University of Illinois, U.S. Dept. of Energy nor
# the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# (4) Use of EnergyPlus(TM) Name. If Licensee (i) distributes the software in
# stand-alone form without changes from the version obtained under this
# License, or (ii) Licensee makes a reference solely to the software
# portion of its product, Licensee must refer to the software as
# "EnergyPlus version X" software, where "X" is the version number Licensee
# obtained under this License and may not use a different name for the
# software. Except as specifically required in this Section (4), Licensee
# shall not use in a company name, a product name, in advertising,
# publicity, or other promotional activities any name, trade name,
# trademark, logo, or other designation of "EnergyPlus", "E+", "e+" or
# confusingly similar designation, without the U.S. Department of Energy's
# prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
def anyOf():
return [
{
"type": "number",
},
{
"type": "string"
}
]
def isInt(s):
try:
int(s)
return True
except ValueError:
return False
extension_renaming = {
'LifeCycleCost:UseAdjustment': 'multipliers',
'LifeCycleCost:UsePriceEscalation': 'escalations',
'ElectricLoadCenter:Transformer': 'meters',
'ElectricLoadCenter:Generators': 'generator_outputs',
'Generator:FuelCell:AirSupply': 'constituent_fractions',
'DemandManager:ExteriorLights': 'lights',
'DemandManager:Ventilation': 'controllers',
'DemandManagerAssignmentList': 'manager_data',
'DemandManager:Lights': 'lights',
'DemandManager:Thermostats': 'thermostats',
'DemandManager:ElectricEquipment': 'equipment',
'DaylightingDevice:Tubular': 'transition_lengths',
'Daylighting:Controls': 'control_data',
'ZoneHVAC:Baseboard:RadiantConvective:Steam': 'surface_fractions',
'ZoneHVAC:Baseboard:RadiantConvective:Electric': 'surface_fractions',
'ZoneHVAC:HighTemperatureRadiant': 'surface_fractions',
'ZoneHVAC:LowTemperatureRadiant:SurfaceGroup': 'surface_fractions',
'ZoneHVAC:Baseboard:RadiantConvective:Water': 'surface_fractions',
'ZoneHVAC:VentilatedSlab:SlabGroup': 'data',
'ZoneHVAC:CoolingPanel:RadiantConvective:Water': 'surface_fractions',
'AirConditioner:VariableRefrigerantFlow:FluidTemperatureControl:HR': 'loading_indices',
'AirConditioner:VariableRefrigerantFlow:FluidTemperatureControl': 'loading_indices',
'ZoneTerminalUnitList': 'terminal_units',
'RoomAir:TemperaturePattern:NondimensionalHeight': 'pairs',
'RoomAir:Node:AirflowNetwork:InternalGains': 'gains',
'RoomAirSettings:AirflowNetwork': 'nodes',
'RoomAir:Node:AirflowNetwork:HVACEquipment': 'equipment_fractions',
'RoomAir:TemperaturePattern:SurfaceMapping': 'surface_deltas',
'RoomAir:Node:AirflowNetwork:AdjacentSurfaceList': 'surfaces',
'Controller:MechanicalVentilation': 'zone_specifications',
'WaterUse:Connections': 'connections',
'WaterUse:RainCollector': 'surfaces',
'Output:Table:Annual': 'variable_details',
'Meter:CustomDecrement': 'variable_details',
'Output:Table:Monthly': 'variable_details',
'Output:Table:SummaryReports': 'reports',
'Meter:Custom': 'variable_details',
'UnitarySystemPerformance:Multispeed': 'flow_ratios',
'SurfaceProperty:ExteriorNaturalVentedCavity': 'surface',
'ZoneProperty:UserViewFactors:BySurfaceName': 'view_factors',
'SurfaceProperty:HeatTransferAlgorithm:SurfaceList': 'surface',
'AirLoopHVAC:ZoneSplitter': 'nodes',
'AirLoopHVAC:SupplyPath': 'components',
'AirLoopHVAC:ReturnPath': 'components',
'AirLoopHVAC:ReturnPlenum': 'nodes',
'AirLoopHVAC:ZoneMixer': 'nodes',
'AirLoopHVAC:SupplyPlenum': 'nodes',
'BuildingSurface:Detailed': 'vertices',
'Shading:Zone:Detailed': 'vertices',
'RoofCeiling:Detailed': 'vertices',
'Shading:Site:Detailed': 'vertices',
'Wall:Detailed': 'vertices',
'Space': 'tags',
'SpaceList': 'spaces',
'DesignSpecification:OutdoorAir:SpaceList': 'space_specs',
'ZoneList': 'zones',
'Floor:Detailed': 'vertices',
'Shading:Building:Detailed': 'vertices',
'SolarCollector:UnglazedTranspired:Multisystem': 'systems',
'SolarCollector:UnglazedTranspired': 'surfaces',
'Parametric:SetValueForRun': 'values',
'Parametric:Logic': 'lines',
'Parametric:FileNameSuffix': 'suffixes',
'Parametric:RunControl': 'runs',
'ZoneHVAC:EquipmentList': 'equipment',
'SpaceHVAC:ZoneEquipmentSplitter': 'spaces',
'SpaceHVAC:ZoneEquipmentMixer': 'spaces',
'AvailabilityManagerAssignmentList': 'managers',
'Table:IndependentVariable': 'values',
'Table:IndependentVariableList': 'independent_variables',
'Table:Lookup': 'values',
'Matrix:TwoDimension': 'values',
'WindowMaterial:GlazingGroup:Thermochromic': 'temperature_data',
'Schedule:Compact': 'data',
'Schedule:Day:Interval': 'data',
'Schedule:Week:Compact': 'data',
'EnergyManagementSystem:GlobalVariable': 'variables',
'EnergyManagementSystem:ProgramCallingManager': 'programs',
'EnergyManagementSystem:Program': 'lines',
'EnergyManagementSystem:Subroutine': 'lines',
'Refrigeration:CaseAndWalkInList': 'cases_and_walkins',
'Refrigeration:CompressorList': 'compressors',
'ZoneHVAC:RefrigerationChillerSet': 'chillers',
'Refrigeration:WalkIn': 'zone_data',
'Refrigeration:TransferLoadList': 'transfer_loads',
'Branch': 'components',
'PipingSystem:Underground:Domain': 'pipe_circuits',
'Connector:Splitter': 'branches',
'Connector:Mixer': 'branches',
'BranchList': 'branches',
'PipingSystem:Underground:PipeCircuit': 'pipe_segments',
'NodeList': 'nodes',
'OutdoorAir:NodeList': 'nodes',
'Fan:SystemModel': 'speed_fractions',
'AirflowNetwork:Distribution:DuctViewFactors': 'surfaces',
'GroundHeatExchanger:System': 'vertical_well_locations',
'GroundHeatExchanger:ResponseFactors': 'g_functions',
'Foundation:Kiva': 'blocks',
'SurfaceProperty:ExposedFoundationPerimeter': 'surfaces',
'SurfaceProperty:SurroundingSurfaces': 'surfaces',
'SurfaceProperty:GroundSurfaces': 'ground_surfaces',
'ZoneHVAC:HybridUnitaryHVAC': 'modes',
'ShadowCalculation': 'shading_zone_groups',
'Schedule:Year': 'schedule_weeks',
'WindowShadingControl': 'fenestration_surfaces',
'PlantEquipmentList': 'equipment',
'CondenserEquipmentList': 'equipment',
'AirLoopHVAC:Mixer': 'nodes',
'AirLoopHVAC:Splitter': 'nodes',
'AirLoopHVAC:DedicatedOutdoorAirSystem': 'airloophvacs',
'PythonPlugin:Variables': 'global_py_vars',
'PythonPlugin:SearchPaths': 'py_search_paths',
'Output:Diagnostics': 'diagnostics',
}
remaining_objects = [
'Site:SpectrumData',
'Schedule:Day:List',
'MaterialProperty:GlazingSpectralData',
]
def get_schema_object(schema, object_key):
if '.*' in schema['properties'][object_key]['patternProperties']:
return schema['properties'][object_key]['patternProperties']['.*']
if R'^.*\S.*$' in schema['properties'][object_key]['patternProperties']:
return schema['properties'][object_key]['patternProperties'][R'^.*\S.*$']
raise KeyError(R'The patternProperties value is not a valid choice (".*", "^.*\S.*$")')
def change_schedule_compact(schema):
loc = get_schema_object(schema, 'Schedule:Compact')['properties']['extensions']['items']['properties']['field']
loc.pop('type')
loc['anyOf'] = anyOf()
def change_special_cased_enums(schema):
loc = get_schema_object(schema, 'GroundHeatTransfer:Slab:Insulation')['properties']['ivins_flag_is_there_vertical_insulation']
loc.pop('type')
newAnyOf = anyOf()
newAnyOf[0]['enum'] = [int(i) for i in loc['enum'][:] if isInt(i)]
newAnyOf[1]['enum'] = ['']
loc['anyOf'] = newAnyOf
loc.pop('enum')
loc = get_schema_object(schema, 'WindowMaterial:Screen')['properties']['angle_of_resolution_for_screen_transmittance_output_map']
loc.pop('type')
newAnyOf = anyOf()
newAnyOf[0]['enum'] = [int(i) for i in loc['enum'][:] if isInt(i)]
newAnyOf[1]['enum'] = ['']
loc['anyOf'] = newAnyOf
loc.pop('enum')
loc = get_schema_object(schema, 'Refrigeration:System')['properties']['number_of_compressor_stages']
loc.pop('type')
newAnyOf = anyOf()
newAnyOf[0]['enum'] = [int(i) for i in loc['enum'][:] if isInt(i)]
newAnyOf[1]['enum'] = ['']
loc['anyOf'] = newAnyOf
loc.pop('enum')
loc = get_schema_object(schema, 'ElectricLoadCenter:Transformer')['properties']['phase']
loc.pop('type')
newAnyOf = anyOf()
newAnyOf[0]['enum'] = [int(i) for i in loc['enum'][:] if isInt(i)]
newAnyOf[1]['enum'] = ['']
loc['anyOf'] = newAnyOf
loc.pop('enum')
loc = get_schema_object(schema, 'Zone')['properties']['zone_outside_convection_algorithm']['enum']
loc.insert(0, '')
loc = get_schema_object(schema, 'Zone')['properties']['zone_inside_convection_algorithm']['enum']
loc.insert(0, '')
def change_utility_cost(schema):
legacy_idd = schema['properties']['UtilityCost:Charge:Block']['legacy_idd']['fields']
loc = get_schema_object(schema, 'UtilityCost:Charge:Block')['properties']
for i in range(6, len(legacy_idd)):
field = legacy_idd[i]
loc[field].pop('type')
loc[field]['anyOf'] = anyOf()
loc = get_schema_object(schema, 'UtilityCost:Ratchet')['properties']
loc['offset_value_or_variable_name'].pop('type')
loc['offset_value_or_variable_name']['anyOf'] = anyOf()
loc['multiplier_value_or_variable_name'].pop('type')
loc['multiplier_value_or_variable_name']['anyOf'] = anyOf()
loc = get_schema_object(schema, 'UtilityCost:Charge:Simple')['properties']
loc['cost_per_unit_value_or_variable_name'].pop('type')
loc['cost_per_unit_value_or_variable_name']['anyOf'] = anyOf()
loc = get_schema_object(schema, 'UtilityCost:Qualify')['properties']
loc['threshold_value_or_variable_name'].pop('type')
loc['threshold_value_or_variable_name']['anyOf'] = anyOf()
loc = get_schema_object(schema, 'UtilityCost:Tariff')['properties']
loc['minimum_monthly_charge_or_variable_name'].pop('type')
loc['minimum_monthly_charge_or_variable_name']['anyOf'] = anyOf()
loc['monthly_charge_or_variable_name'].pop('type')
loc['monthly_charge_or_variable_name']['anyOf'] = anyOf()
def add_explicit_extensible_bounds(schema):
# Schedule:Year
loc = get_schema_object(schema, 'Schedule:Year')['properties']['schedule_weeks']
loc['minItems'] = 1
loc['maxItems'] = 53
# EnergyManagementSystem:Program
loc = get_schema_object(schema, 'EnergyManagementSystem:Program')
if 'required' in loc and 'lines' not in loc['required']:
loc['required'].append('lines')
if 'required' not in loc:
loc['required'] = ['lines']
loc['properties']['lines']['minItems'] = 1
def change_special_cased_name_fields(schema):
original_name = schema['properties']['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['legacy_idd']['field_info'].pop('zone_terminal_unit_name')
schema['properties']['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['legacy_idd']['field_info']['name'] = original_name
schema['properties']['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['legacy_idd']['fields'][0] = 'name'
schema['properties']['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['legacy_idd']['alphas']['fields'][0] = 'name'
del get_schema_object(schema, 'ZoneHVAC:TerminalUnit:VariableRefrigerantFlow')['required'][0]
schema['properties']['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['name'] = \
get_schema_object(schema, 'ZoneHVAC:TerminalUnit:VariableRefrigerantFlow')['properties'].pop('zone_terminal_unit_name')
original_name = schema['properties']['AirConditioner:VariableRefrigerantFlow']['legacy_idd']['field_info'].pop('heat_pump_name')
schema['properties']['AirConditioner:VariableRefrigerantFlow']['legacy_idd']['field_info']['name'] = original_name
schema['properties']['AirConditioner:VariableRefrigerantFlow']['legacy_idd']['fields'][0] = 'name'
schema['properties']['AirConditioner:VariableRefrigerantFlow']['legacy_idd']['alphas']['fields'][0] = 'name'
del get_schema_object(schema, 'AirConditioner:VariableRefrigerantFlow')['required'][0]
schema['properties']['AirConditioner:VariableRefrigerantFlow']['name'] = \
get_schema_object(schema, 'AirConditioner:VariableRefrigerantFlow')['properties'].pop('heat_pump_name')
def change_extensions_name(schema):
for key, value in extension_renaming.items():
get_schema_object(schema, key)['properties'][value] = get_schema_object(schema, key)['properties']['extensions']
loc = get_schema_object(schema, key)['properties']
del loc['extensions']
schema['properties'][key]['legacy_idd']['extension'] = value
for key in remaining_objects:
schema['properties'][key]['legacy_idd']['extension'] = 'extensions'
def change_89_release_issues(schema):
curves = [
'Curve:Linear', 'Curve:Quadratic', 'Curve:Cubic', 'Curve:Quartic', 'Curve:Exponent',
'Curve:Bicubic', 'Curve:Biquadratic', 'Curve:QuadraticLinear', 'Curve:CubicLinear', 'Curve:Triquadratic',
'Curve:ExponentialSkewNormal', 'Curve:Sigmoid', 'Curve:RectangularHyperbola1', 'Curve:RectangularHyperbola2', 'Curve:ExponentialDecay',
'Curve:DoubleExponentialDecay', 'Curve:ChillerPartLoadWithLift', 'Table:Lookup'
]
for curve in curves:
get_schema_object(schema, curve)['properties']['output_unit_type']['enum'] = [
'',
'Capacity',
'Dimensionless',
'Power',
'Pressure',
'Temperature'
]
get_schema_object(schema, 'Schedule:Week:Compact')['properties']['data']['items']['properties']['daytype_list'].pop('enum')
|
ea55713b080eedc03fc0d6dc1648e8ecee66b829
|
f6aac61a48a87743be9c40fecdc24344bae4d263
|
/htdocs/onsite/features/vote.py
|
164319ed1a814e6b30b1be7f132390b4940654d1
|
[
"MIT"
] |
permissive
|
akrherz/iem
|
8714d99b371c8818f7cdde73dd24639e9fc7d42b
|
178015584b7fb5b585f65be6013eaf16fb6db0c7
|
refs/heads/main
| 2023-08-19T02:58:24.507782
| 2023-08-18T12:08:31
| 2023-08-18T12:08:31
| 4,253,774
| 118
| 74
|
MIT
| 2023-09-14T18:28:41
| 2012-05-07T20:32:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,990
|
py
|
vote.py
|
""" Feature Voting"""
import datetime
import json
from http.cookies import SimpleCookie
from paste.request import parse_formvars
from pyiem.util import get_dbconn
def do(environ, headers, vote):
"""Do Something, yes do something"""
cookie = SimpleCookie(environ.get("HTTP_COOKIE", ""))
myoid = 0
if "foid" in cookie:
myoid = int(cookie["foid"].value)
pgconn = get_dbconn("mesosite")
cursor = pgconn.cursor()
cursor.execute(
"SELECT to_char(valid, 'YYmmdd')::int as oid, good, bad, abstain "
"from feature WHERE valid < now() ORDER by valid DESC LIMIT 1"
)
row = cursor.fetchone()
foid = row[0]
d = {"good": row[1], "bad": row[2], "abstain": row[3], "can_vote": True}
if myoid == foid:
d["can_vote"] = False
if myoid < foid and vote in ["good", "bad", "abstain"]:
# Allow this vote
d[vote] += 1
cursor.execute(
f"UPDATE feature SET {vote} = {vote} + 1 WHERE "
"to_char(valid, 'YYmmdd')::int = %s",
(foid,),
)
# Now we set a cookie
expiration = datetime.datetime.now() + datetime.timedelta(days=4)
cookie = SimpleCookie()
cookie["foid"] = foid
cookie["foid"]["path"] = "/onsite/features/"
cookie["foid"]["expires"] = expiration.strftime(
"%a, %d-%b-%Y %H:%M:%S CST"
)
headers.append(("Set-Cookie", cookie.output(header="")))
cursor.close()
pgconn.commit()
d["can_vote"] = False
return d
def application(environ, start_response):
"""Process this request.
This should look something like "/onsite/features/vote.json"
or like "/onsite/features/vote/abstain.json".
"""
headers = [("Content-type", "application/json")]
fields = parse_formvars(environ)
vote = fields.get("vote", "missing")
j = do(environ, headers, vote)
start_response("200 OK", headers)
return [json.dumps(j).encode("ascii")]
|
7b3bb02174414fdc69886a486253e657aab5e042
|
b4cfd4949cab5dc5bd27fb028596a9fc02f4e1db
|
/skfda/preprocessing/registration/_fisher_rao.py
|
9c630571ff64a4255ec10f164f03ea7a0264a0f8
|
[
"BSD-3-Clause"
] |
permissive
|
GAA-UAM/scikit-fda
|
dabfd995f2c82efb0d44fa1d2005b2a8ca67442b
|
dfbce35cc9e67d93306dddf0edf4f95aaacd8aff
|
refs/heads/develop
| 2023-08-31T09:11:31.407423
| 2023-08-18T08:19:21
| 2023-08-18T08:19:21
| 96,133,420
| 231
| 55
|
BSD-3-Clause
| 2023-08-18T08:19:22
| 2017-07-03T17:06:56
|
Python
|
UTF-8
|
Python
| false
| false
| 10,632
|
py
|
_fisher_rao.py
|
"""Fisher-Rao elastic registration."""
from __future__ import annotations
import warnings
from typing import Callable, Optional, TypeVar, Union
import numpy as np
from sklearn.utils.validation import check_is_fitted
from ..._utils import invert_warping, normalize_scale
from ...exploratory.stats import fisher_rao_karcher_mean
from ...exploratory.stats._fisher_rao import _elastic_alignment_array
from ...misc.operators import SRSF
from ...misc.validation import check_fdata_dimensions, check_fdata_same_kind
from ...representation import FDataGrid
from ...representation.basis import Basis
from ...representation.interpolation import SplineInterpolation
from ...typing._numpy import ArrayLike
from ._base import InductiveRegistrationTransformer
_MeanType = Callable[[FDataGrid], FDataGrid]
SelfType = TypeVar("SelfType", bound="FisherRaoElasticRegistration")
class FisherRaoElasticRegistration(
InductiveRegistrationTransformer[FDataGrid, FDataGrid],
):
r"""
Align a FDatagrid using the SRSF framework.
Let :math:`f` be a function of the functional data object wich will be
aligned to the template :math:`g`. Calculates the warping wich minimises
the Fisher-Rao distance between :math:`g` and the registered function
:math:`f^*(t)=f(\gamma^*(t))=f \circ \gamma^*`.
.. math::
\gamma^* = argmin_{\gamma \in \Gamma} d_{\lambda}(f \circ
\gamma, g)
Where :math:`d_{\lambda}` denotes the extended Fisher-Rao distance with a
penalty term, used to control the amount of warping.
.. math::
d_{\lambda}^2(f \circ \gamma, g) = \| SRSF(f \circ \gamma)
\sqrt{\gamma'} - SRSF(g)\|_{\mathbb{L}^2}^2 + \lambda
\mathcal{R}(\gamma)
In the implementation it is used as penalty term
.. math::
\mathcal{R}(\gamma) = \|\sqrt{\gamma'}- 1 \|_{\mathbb{L}^2}^2
Wich restrict the amount of elasticity employed in the alignment.
The registered function :math:`f^*(t)` can be calculated using the
composition :math:`f^*(t)=f(\gamma^*(t))`.
If the template is not specified it is used the Karcher mean of the set of
functions under the elastic metric to perform the alignment, also known as
`elastic mean`, wich is the local minimum of the sum of squares of elastic
distances. See :func:`~elastic_mean`.
In :footcite:`srivastava+klassen_2016_functionala` are described
extensively the algorithms employed and the SRSF framework.
Args:
template: Template to
align the curves. Can contain 1 sample to align all the curves to
it or the same number of samples than the fdatagrid. By default
`elastic mean`, in which case :func:`elastic_mean` is called.
penalty_term: Controls the amount of elasticity.
Defaults to 0.
output_points: Set of points where the
functions are evaluated, by default uses the sample points of the
fdatagrid which will be transformed.
grid_dim: Dimension of the grid used in the DP
alignment algorithm. Defaults 7.
derivative_method: Method to use to compute the derivative. If ``None``
(the default), finite differences are used. In a basis
object is passed the grid is converted to a basis
representation and the derivative is evaluated using that
representation.
Attributes:
template\_: Template learned during fitting,
used for alignment in :meth:`transform`.
warping\_: Warping applied during the last
transformation.
References:
.. footbibliography::
Examples:
Elastic registration of with train/test sets.
>>> from skfda.preprocessing.registration import (
... FisherRaoElasticRegistration,
... )
>>> from skfda.datasets import make_multimodal_samples
>>> X_train = make_multimodal_samples(n_samples=15, random_state=0)
>>> X_test = make_multimodal_samples(n_samples=3, random_state=1)
Fit the transformer, which learns the elastic mean of the train
set as template.
>>> elastic_registration = FisherRaoElasticRegistration()
>>> elastic_registration.fit(X_train)
FisherRaoElasticRegistration(...)
Registration of the test set.
>>> elastic_registration.transform(X_test)
FDataGrid(...)
"""
template_: FDataGrid
warping_: FDataGrid
def __init__(
self,
*,
template: Union[FDataGrid, _MeanType] = fisher_rao_karcher_mean,
penalty: float = 0,
output_points: Optional[ArrayLike] = None,
grid_dim: int = 7,
derivative_method: Optional[Basis] = None,
) -> None:
self.template = template
self.penalty = penalty
self.output_points = output_points
self.grid_dim = grid_dim
self.derivative_method = derivative_method
def fit(self: SelfType, X: FDataGrid, y: object = None) -> SelfType:
# Points of discretization
self._output_points = (
X.grid_points[0]
if self.output_points is None
else np.asarray(self.output_points)
)
if isinstance(self.template, FDataGrid):
self.template_ = self.template # Template already constructed
else:
self.template_ = self.template(X)
check_fdata_same_kind(X, self.template_)
# Constructs the SRSF of the template
self._srsf = SRSF(
output_points=self._output_points,
initial_value=0,
method=self.derivative_method,
)
self._template_srsf = self._srsf.fit_transform(self.template_)
return self
def transform(self, X: FDataGrid, y: object = None) -> FDataGrid:
check_is_fitted(self)
check_fdata_dimensions(
X,
dim_domain=1,
dim_codomain=1,
)
check_fdata_same_kind(X, self.template_)
if (
len(self._template_srsf) != 1
and len(X) != len(self._template_srsf)
):
raise ValueError(
"The template should contain one sample to align "
"all the curves to the same function or the "
"same number of samples than X.",
)
fdatagrid_srsf = self._srsf.fit_transform(X)
# Points of discretization
output_points = self._output_points
# Discretizacion in evaluation points
q_data = fdatagrid_srsf(output_points)[..., 0]
template_data = self._template_srsf(output_points)[..., 0]
if q_data.shape[0] == 1:
q_data = q_data[0]
if template_data.shape[0] == 1:
template_data = template_data[0]
# Values of the warping
gamma = _elastic_alignment_array(
template_data,
q_data,
normalize_scale(output_points),
self.penalty,
self.grid_dim,
)
# Normalize warping to original interval
gamma = normalize_scale(
gamma,
a=output_points[0],
b=output_points[-1],
)
# Interpolation
interpolation = SplineInterpolation(
interpolation_order=3,
monotone=True,
)
self.warping_ = FDataGrid(
gamma,
output_points,
interpolation=interpolation,
)
return X.compose(self.warping_, eval_points=output_points)
def inverse_transform(self, X: FDataGrid, y: object = None) -> FDataGrid:
r"""
Reverse the registration procedure previosly applied.
Let :math:`gamma(t)` the warping applied to construct a registered
functional datum :math:`f^*(t)=f(\gamma(t))`.
Given a functional datum :math:`f^*(t) it is computed
:math:`\gamma^{-1}(t)` to reverse the registration procedure
:math:`f(t)=f^*(\gamma^{-1}(t))`.
Args:
X: Functional data to apply the reverse
transform.
y: Present for API conventions.
Returns:
Functional data compose by the inverse warping.
Raises:
ValueError: If the warpings :math:`\gamma` were not build via
:meth:`transform` or if the number of samples of `X` is
different than the number of samples of the dataset
previously transformed.
Examples:
Center the datasets taking into account the misalignment.
>>> from skfda.preprocessing.registration import (
... FisherRaoElasticRegistration,
... )
>>> from skfda.datasets import make_multimodal_samples
>>> X = make_multimodal_samples(random_state=0)
Registration of the dataset.
>>> elastic_registration = FisherRaoElasticRegistration()
>>> X = elastic_registration.fit_transform(X)
Substract the elastic mean build as template during the
registration and reverse the transformation.
>>> X = X - elastic_registration.template_
>>> X_center = elastic_registration.inverse_transform(X)
>>> X_center
FDataGrid(...)
See also:
:func:`invert_warping`
"""
warping = getattr(self, 'warping_', None)
if warping is None:
raise ValueError(
"Data must be previosly transformed to apply the "
"inverse transform",
)
elif len(X) != len(warping):
raise ValueError(
"Data must contain the same number of samples "
"than the dataset previously transformed",
)
inverse_warping = invert_warping(warping)
return X.compose(inverse_warping, eval_points=self.output_points)
class ElasticRegistration(FisherRaoElasticRegistration):
"""Deprecated name for FisherRaoElasticRegistration."""
def __init__(
self,
template: Union[FDataGrid, _MeanType] = fisher_rao_karcher_mean,
penalty: float = 0,
output_points: Optional[ArrayLike] = None,
grid_dim: int = 7,
) -> None:
warnings.warn(
"ElasticRegistration has been renamed. "
"Use FisherRaoElasticRegistration instead.",
DeprecationWarning,
)
super().__init__(
template=template,
penalty=penalty,
output_points=output_points,
grid_dim=grid_dim,
)
|
f828793cbe7c546d190d3ecc8b5142a19d089319
|
bea7c9c1873755ff1b5a6a22be47d73b0b2ca722
|
/plugins/public/cinq-collector-aws/cinq_collector_aws/views/s3.py
|
cae478d1ab4aa9a38ed10096c18f88b5ba84e7c6
|
[
"Apache-2.0"
] |
permissive
|
RiotGames/cloud-inquisitor
|
25297bb2d9204aefa8494d1865c97fb4d0a4d3b9
|
29a26c705381fdba3538b4efedb25b9e09b387ed
|
refs/heads/master
| 2023-07-09T12:45:53.016708
| 2020-04-16T17:01:01
| 2020-04-16T17:01:01
| 111,150,198
| 468
| 60
|
Apache-2.0
| 2020-09-23T01:00:59
| 2017-11-17T21:00:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,885
|
py
|
s3.py
|
from cloud_inquisitor.constants import ROLE_USER, HTTP
from cloud_inquisitor.plugins import BaseView
from cloud_inquisitor.plugins.types.resources import S3Bucket
from cloud_inquisitor.utils import MenuItem
from cloud_inquisitor.wrappers import check_auth, rollback
class S3List(BaseView):
URLS = ['/api/v1/s3']
MENU_ITEMS = [
MenuItem(
'browse',
'S3 Buckets',
's3.list',
's3',
args={
'page': 1,
'count': 100,
'accounts': None,
'location': None,
'resourceId': None,
'websiteEnabled': None,
},
order=5
)
]
@rollback
@check_auth(ROLE_USER)
def get(self):
try:
self.reqparse.add_argument('page', type=int, default=1)
self.reqparse.add_argument('count', type=int, default=100, choices=[25, 50, 100])
self.reqparse.add_argument('accounts', type=str, default=None, action='append')
self.reqparse.add_argument('location', type=str, default=None, action='append')
self.reqparse.add_argument('resourceId', type=str, default=None, action='append')
self.reqparse.add_argument('websiteEnabled', type=str, default=None, action='append')
args = self.reqparse.parse_args()
query = {
'limit': args['count'],
'page': args['page'],
'properties': {}
}
if args['accounts']:
query['accounts'] = args['accounts']
if args['location']:
query['properties']['location'] = args['location']
if args['resourceId']:
query['resources'] = args['resourceId']
if args['websiteEnabled']:
query['properties']['website_enabled'] = args['websiteEnabled']
total, buckets = S3Bucket.search(**query)
response = {
'message': None,
's3Count': total,
's3': buckets,
}
if total == 0:
return self.make_response({
'message': 'No buckets found matching criteria',
's3Count': total,
's3': []
}, HTTP.NOT_FOUND)
return self.make_response(response)
except Exception as e:
print('Error calling base class get {}'.format(e))
class S3Get(BaseView):
URLS = ['/api/v1/s3/<string:resource_id>']
@rollback
@check_auth(ROLE_USER)
def get(self, resource_id):
try:
bucket = S3Bucket.get(resource_id)
return self.make_response({
's3': bucket
}, HTTP.OK)
except Exception as e:
print('Error calling S3Get {}'.format(e))
|
fab3d78403014279bd08ad0d8a9f30363c73785c
|
7c3e647b9ff8cb7931af8f5571ebc4dfe234197f
|
/ps6000aExamples/ps6000aStreamingModeExample.py
|
69ad7d747e30198ca6061b1fffaee3f87f983311
|
[
"LicenseRef-scancode-unknown-license-reference",
"ISC"
] |
permissive
|
picotech/picosdk-python-wrappers
|
6c164ab808c540214086a0a1b98b3a90120707a8
|
d0231b11619530ba278404a67ff75ccc667bd4b8
|
refs/heads/master
| 2023-08-18T06:56:50.885498
| 2023-08-16T08:56:26
| 2023-08-16T08:56:26
| 148,156,912
| 157
| 117
|
ISC
| 2023-09-12T15:00:30
| 2018-09-10T13:07:42
|
Python
|
UTF-8
|
Python
| false
| false
| 6,330
|
py
|
ps6000aStreamingModeExample.py
|
#
# Copyright (C) 2020 Pico Technology Ltd. See LICENSE file for terms.
#
# PS6000 A STREAMING MODE EXAMPLE
# This example opens a 6000a driver device, sets up one channel then collects a streamed set of data.
# This data is then plotted as mV against time in ns.
import ctypes
# import numpy as np
from picosdk.ps6000a import ps6000a as ps
from picosdk.PicoDeviceEnums import picoEnum as enums
from picosdk.PicoDeviceStructs import picoStruct as structs
import matplotlib.pyplot as plt
from picosdk.functions import adc2mV, assert_pico_ok
from picosdk.constants import PICO_STATUS
import time
# Create chandle and status ready for use
chandle = ctypes.c_int16()
status = {}
# Open 6000 A series PicoScope
# returns handle to chandle for use in future API functions
resolution = enums.PICO_DEVICE_RESOLUTION["PICO_DR_8BIT"]
status["openunit"] = ps.ps6000aOpenUnit(ctypes.byref(chandle), None, resolution)
assert_pico_ok(status["openunit"])
# Set channel A on
# handle = chandle
channelA = enums.PICO_CHANNEL["PICO_CHANNEL_A"]
coupling = enums.PICO_COUPLING["PICO_DC"]
channelRange = 7
# analogueOffset = 0 V
bandwidth = enums.PICO_BANDWIDTH_LIMITER["PICO_BW_FULL"]
status["setChannelA"] = ps.ps6000aSetChannelOn(chandle, channelA, coupling, channelRange, 0, bandwidth)
assert_pico_ok(status["setChannelA"])
channelB = enums.PICO_CHANNEL["PICO_CHANNEL_B"]
status["setChannelB"] = ps.ps6000aSetChannelOn(chandle, channelB, coupling, channelRange, 0, bandwidth)
assert_pico_ok(status["setChannelB"])
# set channel C-H off
for x in range(2, 7, 1):
channel = x
status["setChannel", x] = ps.ps6000aSetChannelOff(chandle, channel)
assert_pico_ok(status["setChannel", x])
# Set number of samples to be collected
noOfPreTriggerSamples = 100000
noOfPostTriggerSamples = 900000
nSamples = noOfPostTriggerSamples + noOfPreTriggerSamples
# Set simple trigger on channel A, 1 V rising with 1 s autotrigger
# handle = chandle
# enable = 1
source = channelA
# threshold = 1000 mV
direction = enums.PICO_THRESHOLD_DIRECTION["PICO_RISING"]
# delay = 0 s
# autoTriggerMicroSeconds = 1000000 us
status["setSimpleTrigger"] = ps.ps6000aSetSimpleTrigger(chandle, 1, source, 1000, direction, 0, 1000000)
assert_pico_ok(status["setSimpleTrigger"])
# create buffers
maxBuffers = 10
bufferA = ((ctypes.c_int16 * nSamples) * 10)()
bufferB = ((ctypes.c_int16 * nSamples) * 10)()
# Set data buffers
# handle = chandle
# channel = channelA
# bufferMax = bufferAMax
# bufferMin = bufferAMin
# nSamples = nSamples
dataType = enums.PICO_DATA_TYPE["PICO_INT16_T"]
waveform = 0
downSampleMode = enums.PICO_RATIO_MODE["PICO_RATIO_MODE_RAW"]
clear = enums.PICO_ACTION["PICO_CLEAR_ALL"]
add = enums.PICO_ACTION["PICO_ADD"]
action = clear | add # PICO_ACTION["PICO_CLEAR_WAVEFORM_CLEAR_ALL"] | PICO_ACTION["PICO_ADD"]
actionAdd = add
status["setDataBuffersA"] = ps.ps6000aSetDataBuffer(chandle, channelA, ctypes.byref(bufferA[0]), nSamples, dataType,
waveform, downSampleMode, action)
assert_pico_ok(status["setDataBuffersA"])
status["setDataBuffersB"] = ps.ps6000aSetDataBuffer(chandle, channelB, ctypes.byref(bufferB[0]), nSamples, dataType,
waveform, downSampleMode, actionAdd)
assert_pico_ok(status["setDataBuffersB"])
# Run streaming
sampleInterval = ctypes.c_double(1)
sampleIntervalTimeUnits = enums.PICO_TIME_UNITS["PICO_US"]
autoStop = 0
downSampleRatio = 1
status["runStreaming"] = ps.ps6000aRunStreaming(chandle, ctypes.byref(sampleInterval), sampleIntervalTimeUnits,
noOfPreTriggerSamples, noOfPostTriggerSamples, autoStop,
downSampleRatio, downSampleMode)
assert_pico_ok(status["runStreaming"])
streamData = (structs.PICO_STREAMING_DATA_INFO * 2)()
streamData[0] = structs.PICO_STREAMING_DATA_INFO(channelA, downSampleMode, dataType, 0, 0, 0, 0)
streamData[1] = structs.PICO_STREAMING_DATA_INFO(channelB, downSampleMode, dataType, 0, 0, 0, 0)
streamTrigger = structs.PICO_STREAMING_DATA_TRIGGER_INFO(0, 0, 0)
count = 0
picoOk = PICO_STATUS["PICO_OK"]
collectedSamples = 0
while collectedSamples < (maxBuffers*nSamples):
status["getStreamingLatestValues"] = ps.ps6000aGetStreamingLatestValues(chandle, ctypes.byref(streamData), 1,
ctypes.byref(streamTrigger))
if status["getStreamingLatestValues"] == picoOk:
# do nothing
time.sleep(0.01)
else:
count = count + 1
if count < maxBuffers:
status["setDataBufferA"] = ps.ps6000aSetDataBuffer(chandle, channelA, ctypes.byref(bufferA[count]),
nSamples, dataType, waveform, downSampleMode, actionAdd)
assert_pico_ok(status["setDataBufferA"])
status["setDataBufferB"] = ps.ps6000aSetDataBuffer(chandle, channelB, ctypes.byref(bufferB[count]),
nSamples, dataType, waveform, downSampleMode, actionAdd)
assert_pico_ok(status["setDataBufferB"])
print(count)
collectedSamples = collectedSamples + streamData[0].noOfSamples
# stop scope streaming
status["stop"] = ps.ps6000aStop(chandle)
assert_pico_ok(status["stop"])
# get total number of streamed data points
noOfStreamedSamples=ctypes.c_uint64()
status["noOfStreamedSamples"] = ps.ps6000aNoOfStreamingValues(chandle, ctypes.byref(noOfStreamedSamples))
assert_pico_ok(status["noOfStreamedSamples"])
print("streaming finished")
print("Number of samples collected during streaming")
print(noOfStreamedSamples.value)
# get max ADC value
# handle = chandle
minADC = ctypes.c_int16()
maxADC = ctypes.c_int16()
status["getAdcLimits"] = ps.ps6000aGetAdcLimits(chandle, resolution, ctypes.byref(minADC), ctypes.byref(maxADC))
assert_pico_ok(status["getAdcLimits"])
# plot ADC data
plt.plot(bufferA[0])
plt.plot(bufferB[0])
plt.show()
# Close the scope
status["closeunit"] = ps.ps6000aCloseUnit(chandle)
assert_pico_ok(status["closeunit"])
print(status)
|
159934b2aa3169314f91cb4bc77ad51d94902e0d
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/DQMOffline/Trigger/test/egHLTOffDQMSourceTest_cfg.py
|
d09794ef4ae796db23145ac54fe916bd7c6d57c8
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 6,196
|
py
|
egHLTOffDQMSourceTest_cfg.py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("DQMTest")
#set DQM enviroment
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.MEtoEDMConverter_cff")
process.load("CondCore.DBCommon.CondDBSetup_cfi")
process.load("DQMServices.Components.DQMEnvironment_cfi")
#load and setup E/g HLT Offline DQM module
process.load("DQMOffline.Trigger.EgHLTOfflineSource_cfi")
#load calo geometry
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Geometry.CaloEventSetup.CaloGeometry_cfi")
process.load("Geometry.CaloEventSetup.CaloTopology_cfi")
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
# Other statements
#use two following lines to grab GlobalTag automatically
#from Configuration.AlCa.autoCond import autoCond
#process.GlobalTag.globaltag = autoCond['hltonline']
process.GlobalTag.globaltag = 'GR_R_50_V11::All'
#configure message logger to something sane
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.INFO.limit = 0
process.MessageLogger.cout.threshold = cms.untracked.string('WARNING')
process.MessageLogger.cerr.FwkSummary = cms.untracked.PSet(
reportEvery = cms.untracked.int32(500),
limit = cms.untracked.int32(10000000)
)
#process.options = cms.untracked.PSet(wantSummary=cms.untracked.bool(True))
#process.hltTrigReport = cms.EDAnalyzer( "HLTrigReport",
# HLTriggerResults = cms.InputTag( 'TriggerResults','','HLT' )
#)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(),
)
process.source.fileNames=[
#'/store/relval/CMSSW_3_5_0/RelValZTT/GEN-SIM-RECO/START3X_V21-v1/0014/A43F691A-6D13-DF11-99D0-001A92971BDC.root',
#'/store/relval/CMSSW_3_5_0/RelValZTT/GEN-SIM-RECO/START3X_V21-v1/0013/F0BF21B7-5513-DF11-8A40-0018F3D09688.root',
#'/store/relval/CMSSW_3_5_0/RelValZTT/GEN-SIM-RECO/START3X_V21-v1/0013/CA5E6D51-5113-DF11-8CC2-001A92811748.root',
#'/store/relval/CMSSW_3_5_0/RelValZTT/GEN-SIM-RECO/START3X_V21-v1/0013/82ABD700-5613-DF11-92D2-0018F3D09664.root',
#'/store/relval/CMSSW_3_5_0/RelValZTT/GEN-SIM-RECO/START3X_V21-v1/0013/3C397451-6213-DF11-B274-0018F3D095FA.root',
#'/store/relval/CMSSW_3_5_0/RelValZTT/GEN-SIM-RECO/START3X_V21-v1/0013/385227FB-5413-DF11-8E1A-0018F3D09634.root'
'/store/relval/CMSSW_5_2_0_pre3/SingleElectron/RECO/GR_R_50_V11_RelVal_electron2011B-v1/0000/0CCB804F-154C-E111-A8A7-001A92810A94.root',
'/store/relval/CMSSW_5_2_0_pre3/SingleElectron/RECO/GR_R_50_V11_RelVal_electron2011B-v1/0000/740FF94D-154C-E111-95C8-001A928116C0.root',
'/store/relval/CMSSW_5_2_0_pre3/SingleElectron/RECO/GR_R_50_V11_RelVal_electron2011B-v1/0000/92D7A150-154C-E111-94C9-003048FFD7C2.root',
'/store/relval/CMSSW_5_2_0_pre3/SingleElectron/RECO/GR_R_50_V11_RelVal_electron2011B-v1/0000/96F9074C-154C-E111-BD78-001A92810AA6.root',
'/store/relval/CMSSW_5_2_0_pre3/SingleElectron/RECO/GR_R_50_V11_RelVal_electron2011B-v1/0000/DEDE1B51-154C-E111-9CD6-001A92810AEE.root',
'/store/relval/CMSSW_5_2_0_pre3/SingleElectron/RECO/GR_R_50_V11_RelVal_electron2011B-v1/0000/ECFBE94D-154C-E111-B5A8-001A92971BC8.root',
# '/store/relval/CMSSW_5_2_0_pre3/SingleElectron/RECO/GR_R_50_V11_RelVal_electron2011A-v1/0000/60F854F0-504C-E111-9C47-002618943932.root',
# '/store/relval/CMSSW_5_2_0_pre3/SingleElectron/RECO/GR_R_50_V11_RelVal_electron2011A-v1/0000/EAD4E6ED-504C-E111-9AF3-003048678A7E.root',
# '/store/data/Run2011A/DoubleElectron/RECO/PromptReco-v4/000/165/088/66AD1342-E47F-E011-B825-003048F01E88.root',
# '/store/data/Run2011A/SingleElectron/RECO/PromptReco-v4/000/165/098/203C4130-DA7F-E011-9BD0-003048F11CF0.root',
# '/store/data/Run2011A/DoubleElectron/RECO/PromptReco-v4/000/165/098/C4A7FB10-DA7F-E011-97CF-0030487CD718.root',
# '/store/data/Run2011A/SingleElectron/RECO/PromptReco-v4/000/165/099/3A9A0435-D17F-E011-9999-0030487C6A66.root',
# '/store/data/Run2011A/DoubleElectron/RECO/PromptReco-v4/000/165/099/82CDB84B-D17F-E011-8C79-0030487CD710.root',
# '/store/data/Run2011A/SingleElectron/RECO/PromptReco-v4/000/165/102/2A1014DE-CF80-E011-9924-001617DBD5AC.root',
# '/store/data/Run2011A/DoubleElectron/RECO/PromptReco-v4/000/165/102/00530480-CF80-E011-B76C-001617E30D4A.root',
# '/store/data/Run2011A/SingleElectron/RECO/PromptReco-v4/000/165/103/0EB41167-EE80-E011-8B64-003048F024DC.root',
# '/store/data/Run2011A/DoubleElectron/RECO/PromptReco-v4/000/165/103/B0BACE7E-EE80-E011-886C-00304879BAB2.root',
# '/store/data/Run2011A/SingleElectron/RECO/PromptReco-v5/000/172/163/0C163946-FCBB-E011-8C0C-BCAEC5329719.root',
#-----test across runs----
# '/store/data/Run2011A/SingleElectron/RECO/PromptReco-v4/000/165/467/7E4DDAD6-ED85-E011-AC8D-001D09F24600.root',
# '/store/data/Run2011A/SingleElectron/RECO/PromptReco-v4/000/165/472/361C4077-3386-E011-A483-001D09F2960F.root',
]
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.load("Configuration.StandardSequences.RawToDigi_Data_cff")
process.load("Configuration.StandardSequences.MagneticField_38T_cff")
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.load("Configuration.EventContent.EventContent_cff")
process.FEVT = cms.OutputModule("PoolOutputModule",
process.FEVTEventContent,
dataset = cms.untracked.PSet(dataTier = cms.untracked.string('RECO')),
# fileName = cms.untracked.string('/data/ndpc3/c/dmorse/HLTDQMrootFiles/May18/SourceTest_420_2.root')
# fileName = cms.untracked.string('Run2011A_SingleElectronRuns165364-166462Et40cut_RECO.root')
fileName = cms.untracked.string('SingleElectron_CMSSW_5_2_0_pre3_RECO_2011B.root')
)
process.FEVT.outputCommands = cms.untracked.vstring('drop *','keep *_MEtoEDMConverter_*_DQMTest')
#monitor elements are converted to EDM format to store in CMSSW file
#client will convert them back before processing
process.psource = cms.Path(process.egHLTOffDQMSource*process.MEtoEDMConverter)
process.outpath = cms.EndPath(process.FEVT)
process.MEtoEDMConverter.Verbosity = 0
process.DQMStore.verbose = 0
process.DQM.collectorHost = ''
|
21d13f385bf2c168a90d5b436b87b8b84ab6ad72
|
a529d3ab1f0a82fee7e11a127d1932a7cd46fc26
|
/utils/text/cleaners.py
|
9bcdb3b0d9b9fcf699d07b36ba0c85e2d4a438f9
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
as-ideas/ForwardTacotron
|
2f3192820977435cc4b437a580edd372ddf0f4af
|
b83217a72381d871c946af185d48a85ed1527199
|
refs/heads/master
| 2023-08-24T10:49:10.356701
| 2023-08-22T15:36:11
| 2023-08-22T15:36:11
| 244,631,442
| 604
| 131
|
MIT
| 2023-09-05T14:43:59
| 2020-03-03T12:33:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,859
|
py
|
cleaners.py
|
import re
from typing import Dict, Any
from phonemizer.backend import EspeakBackend
from unidecode import unidecode
from utils.text.numbers import normalize_numbers
from utils.text.symbols import phonemes_set
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def no_cleaners(text):
return text
def english_cleaners(text):
text = unidecode(text)
text = normalize_numbers(text)
text = expand_abbreviations(text)
return text
class Cleaner:
def __init__(self,
cleaner_name: str,
use_phonemes: bool,
lang: str) -> None:
if cleaner_name == 'english_cleaners':
self.clean_func = english_cleaners
elif cleaner_name == 'no_cleaners':
self.clean_func = no_cleaners
else:
raise ValueError(f'Cleaner not supported: {cleaner_name}! '
f'Currently supported: [\'english_cleaners\', \'no_cleaners\']')
self.use_phonemes = use_phonemes
self.lang = lang
if use_phonemes:
self.backend = EspeakBackend(language=lang,
preserve_punctuation=True,
with_stress=False,
punctuation_marks=';:,.!?¡¿—…"«»“”()',
language_switch='remove-flags')
def __call__(self, text: str) -> str:
text = self.clean_func(text)
if self.use_phonemes:
text = self.backend.phonemize([text], strip=True)[0]
text = ''.join([p for p in text if p in phonemes_set])
text = collapse_whitespace(text)
text = text.strip()
return text
@classmethod
def from_config(cls, config: Dict[str, Any]) -> 'Cleaner':
return Cleaner(
cleaner_name=config['preprocessing']['cleaner_name'],
use_phonemes=config['preprocessing']['use_phonemes'],
lang=config['preprocessing']['language']
)
|
ba4f4f50a486478c0c2c3a28e441ce71a5776bca
|
8fa191cd4a67431a04eff62d35122ee83cc7b0af
|
/bookwyrm/views/admin/celery_status.py
|
cd8b85b6d4a3580b71fa2cae2185ecc14676f29a
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
bookwyrm-social/bookwyrm
|
24678676a7a58dba96641194dfae3fffbf01574d
|
0f8da5b738047f3c34d60d93f59bdedd8f797224
|
refs/heads/main
| 2023-08-20T21:45:30.957277
| 2023-08-19T23:41:50
| 2023-08-19T23:41:50
| 236,415,735
| 1,398
| 216
|
NOASSERTION
| 2023-09-08T20:43:06
| 2020-01-27T03:51:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,637
|
py
|
celery_status.py
|
""" celery status """
import json
from django.contrib.auth.decorators import login_required, permission_required
from django.http import HttpResponse
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.http import require_GET
from django import forms
import redis
from celerywyrm import settings
from bookwyrm.tasks import (
app as celery,
LOW,
MEDIUM,
HIGH,
STREAMS,
IMAGES,
SUGGESTED_USERS,
EMAIL,
CONNECTORS,
LISTS,
INBOX,
IMPORTS,
IMPORT_TRIGGERED,
BROADCAST,
MISC,
)
r = redis.from_url(settings.REDIS_BROKER_URL)
# pylint: disable= no-self-use
@method_decorator(login_required, name="dispatch")
@method_decorator(
permission_required("bookwyrm.edit_instance_settings", raise_exception=True),
name="dispatch",
)
class CeleryStatus(View):
"""Are your tasks running? Well you'd better go catch them"""
def get(self, request):
"""See workers and active tasks"""
errors = []
try:
inspect = celery.control.inspect()
stats = inspect.stats()
active_tasks = inspect.active()
# pylint: disable=broad-except
except Exception as err:
stats = active_tasks = None
errors.append(err)
try:
queues = {
LOW: r.llen(LOW),
MEDIUM: r.llen(MEDIUM),
HIGH: r.llen(HIGH),
STREAMS: r.llen(STREAMS),
IMAGES: r.llen(IMAGES),
SUGGESTED_USERS: r.llen(SUGGESTED_USERS),
EMAIL: r.llen(EMAIL),
CONNECTORS: r.llen(CONNECTORS),
LISTS: r.llen(LISTS),
INBOX: r.llen(INBOX),
IMPORTS: r.llen(IMPORTS),
IMPORT_TRIGGERED: r.llen(IMPORT_TRIGGERED),
BROADCAST: r.llen(BROADCAST),
MISC: r.llen(MISC),
}
# pylint: disable=broad-except
except Exception as err:
queues = None
errors.append(err)
form = ClearCeleryForm()
data = {
"stats": stats,
"active_tasks": active_tasks,
"queues": queues,
"form": form,
"errors": errors,
}
return TemplateResponse(request, "settings/celery.html", data)
def post(self, request):
"""Submit form to clear queues"""
form = ClearCeleryForm(request.POST)
if form.is_valid():
if len(celery.control.ping()) != 0:
return HttpResponse(
"Refusing to delete tasks while Celery worker is active"
)
pipeline = r.pipeline()
for queue in form.cleaned_data["queues"]:
for task in r.lrange(queue, 0, -1):
task_json = json.loads(task)
if task_json["headers"]["task"] in form.cleaned_data["tasks"]:
pipeline.lrem(queue, 0, task)
results = pipeline.execute()
return HttpResponse(f"Deleted {sum(results)} tasks")
class ClearCeleryForm(forms.Form):
"""Form to clear queues"""
queues = forms.MultipleChoiceField(
label="Queues",
choices=[
(LOW, "Low prioirty"),
(MEDIUM, "Medium priority"),
(HIGH, "High priority"),
(STREAMS, "Streams"),
(IMAGES, "Images"),
(SUGGESTED_USERS, "Suggested users"),
(EMAIL, "Email"),
(CONNECTORS, "Connectors"),
(LISTS, "Lists"),
(INBOX, "Inbox"),
(IMPORTS, "Imports"),
(IMPORT_TRIGGERED, "Import triggered"),
(BROADCAST, "Broadcasts"),
(MISC, "Misc"),
],
widget=forms.CheckboxSelectMultiple,
)
tasks = forms.MultipleChoiceField(
label="Tasks", choices=[], widget=forms.CheckboxSelectMultiple
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
celery.loader.import_default_modules()
self.fields["tasks"].choices = sorted(
[(k, k) for k in celery.tasks.keys() if not k.startswith("celery.")]
)
@require_GET
# pylint: disable=unused-argument
def celery_ping(request):
"""Just tells you if Celery is on or not"""
try:
ping = celery.control.inspect().ping()
if ping:
return HttpResponse()
# pylint: disable=broad-except
except Exception:
pass
return HttpResponse(status=500)
|
0a787fe3c5954b7b9fec5dbf6e4460ee74e22b87
|
8e68fa08e0275bb77a57f2dcb1b2082afbd928e9
|
/src/cutadapt/predicates.py
|
4346efef04bb1963184e6fbc526778a6f620f54c
|
[
"MIT"
] |
permissive
|
marcelm/cutadapt
|
a892a871ed8b5b8491c2177ede43a6ebe0b43810
|
cefb3e0bc6bacf5e15a842174502852466bd6990
|
refs/heads/main
| 2023-09-01T08:14:21.694545
| 2023-08-31T08:18:46
| 2023-08-31T08:18:46
| 4,577,614
| 450
| 165
|
MIT
| 2023-09-11T06:48:19
| 2012-06-06T20:26:25
|
Python
|
UTF-8
|
Python
| false
| false
| 4,006
|
py
|
predicates.py
|
"""
Filtering criteria (predicates)
"""
from abc import ABC, abstractmethod
from .qualtrim import expected_errors
from .modifiers import ModificationInfo
class Predicate(ABC):
@abstractmethod
def test(self, read, info: ModificationInfo) -> bool:
"""
Return True if the filtering criterion matches.
"""
@classmethod
def descriptive_identifier(cls) -> str:
"""
Return a short name for this predicate based on the class name such as "too_long",
"too_many_expected_errors".
This is used as identifier in the JSON report.
"""
return "".join(
("_" + ch.lower() if ch.isupper() else ch) for ch in cls.__name__
)[1:]
class TooShort(Predicate):
"""Select reads that are shorter than the specified minimum length"""
def __init__(self, minimum_length: int):
self.minimum_length = minimum_length
def __repr__(self):
return f"TooShort(minimum_length={self.minimum_length})"
def test(self, read, info: ModificationInfo):
return len(read) < self.minimum_length
class TooLong(Predicate):
"""Select reads that are longer than the specified maximum length"""
def __init__(self, maximum_length: int):
self.maximum_length = maximum_length
def __repr__(self):
return f"TooLong(maximum_length={self.maximum_length})"
def test(self, read, info: ModificationInfo):
return len(read) > self.maximum_length
class TooManyExpectedErrors(Predicate):
"""
Select reads whose expected number of errors, according to the quality
values, exceeds a threshold.
The idea comes from usearch's -fastq_maxee parameter
(http://drive5.com/usearch/).
"""
def __init__(self, max_errors: float):
self.max_errors = max_errors
def __repr__(self):
return f"TooManyExpectedErrors(max_errors={self.max_errors})"
def test(self, read, info: ModificationInfo):
return expected_errors(read.qualities) > self.max_errors
class TooManyN(Predicate):
"""
Select reads that have too many 'N' bases.
Both a raw count or a proportion (relative to the sequence length) can be used.
"""
def __init__(self, count: float):
"""
count -- if it is below 1.0, it will be considered a proportion, and above and equal to
1 will be considered as discarding reads with a number of N's greater than this cutoff.
"""
assert count >= 0
self.is_proportion = count < 1.0
self.cutoff = count
def __repr__(self):
return f"TooManyN(cutoff={self.cutoff}, is_proportion={self.is_proportion})"
def test(self, read, info: ModificationInfo):
n_count = read.sequence.lower().count("n")
if self.is_proportion:
if len(read) == 0:
return False
return n_count / len(read) > self.cutoff
else:
return n_count > self.cutoff
class CasavaFiltered(Predicate):
"""
Select reads that have failed the CASAVA filter according to the read header.
The headers look like ``xxxx x:Y:x:x`` (with a ``Y``). Reads that pass the filter
have an ``N`` instead of ``Y``.
Reads with unrecognized headers are not selected.
"""
def __repr__(self):
return "CasavaFiltered()"
def test(self, read, info: ModificationInfo):
_, _, right = read.name.partition(" ")
return right[1:4] == ":Y:" # discard if :Y: found
class DiscardUntrimmed(Predicate):
"""
Select reads for which no adapter match was found
"""
def __repr__(self):
return "DiscardUntrimmed()"
def test(self, read, info: ModificationInfo):
return not info.matches
class DiscardTrimmed(Predicate):
"""
Select reads for which at least one adapter match was found
"""
def __repr__(self):
return "DiscardTrimmed()"
def test(self, read, info: ModificationInfo):
return bool(info.matches)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.