hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73037a5f0c5b05174b9618464d1cd001011645b | 6,765 | py | Python | dm_pix/_src/metrics.py | mbilalai/dm_pix | 458e86f28df3f72017dc00b5449bc9ede3e0f566 | [
"Apache-2.0"
] | 1 | 2021-07-29T06:51:21.000Z | 2021-07-29T06:51:21.000Z | dm_pix/_src/metrics.py | mbilalai/dm_pix | 458e86f28df3f72017dc00b5449bc9ede3e0f566 | [
"Apache-2.0"
] | null | null | null | dm_pix/_src/metrics.py | mbilalai/dm_pix | 458e86f28df3f72017dc00b5449bc9ede3e0f566 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to compare image pairs.
Images are assumed to be [0, 1] of floating point dtype with [N]HWC shapes.
Each image metric function returns a scalar for each image pair.
"""
import chex
import jax
import jax.numpy as jnp
import jax.scipy as jsp
def mae(a: chex.Array, b: chex.Array) -> chex.Numeric:
"""Returns the Mean Absolute Error between `a` and `b`.
Args:
a: First image (or set of images).
b: Second image (or set of images).
Returns:
MAE between `a` and `b`.
"""
chex.assert_rank([a, b], {3, 4})
chex.assert_type([a, b], float)
chex.assert_equal_shape([a, b])
return jnp.abs(a - b).mean(axis=(-3, -2, -1))
def mse(a: chex.Array, b: chex.Array) -> chex.Numeric:
"""Returns the Mean Squared Error between `a` and `b`.
Args:
a: First image (or set of images).
b: Second image (or set of images).
Returns:
MSE between `a` and `b`.
"""
chex.assert_rank([a, b], {3, 4})
chex.assert_type([a, b], float)
chex.assert_equal_shape([a, b])
return jnp.square(a - b).mean(axis=(-3, -2, -1))
def psnr(a: chex.Array, b: chex.Array) -> chex.Numeric:
"""Returns the Peak Signal-to-Noise Ratio between `a` and `b`.
Assumes that the dynamic range of the images (the difference between the
maximum and the minimum allowed values) is 1.0.
Args:
a: First image (or set of images).
b: Second image (or set of images).
Returns:
PSNR in decibels between `a` and `b`.
"""
chex.assert_rank([a, b], {3, 4})
chex.assert_type([a, b], float)
chex.assert_equal_shape([a, b])
return -10.0 * jnp.log(mse(a, b)) / jnp.log(10.0)
def rmse(a: chex.Array, b: chex.Array) -> chex.Numeric:
"""Returns the Root Mean Squared Error between `a` and `b`.
Args:
a: First image (or set of images).
b: Second image (or set of images).
Returns:
RMSE between `a` and `b`.
"""
chex.assert_rank([a, b], {3, 4})
chex.assert_type([a, b], float)
chex.assert_equal_shape([a, b])
return jnp.sqrt(mse(a, b))
def simse(a: chex.Array, b: chex.Array) -> chex.Numeric:
"""Returns the Scale-Invariant Mean Squared Error between `a` and `b`.
For each image pair, a scaling factor for `b` is computed as the solution to
the following problem:
min_alpha || vec(a) - alpha * vec(b) ||_2^2,
where `a` and `b` are flattened, i.e., vec(x) = np.flatten(x). The MSE between
the optimally scaled `b` and `a` is returned: mse(a, alpha*b).
This is a scale-invariant metric, so for example: simse(x, y) == sims(x, y*5).
This metric was used in "Shape, Illumination, and Reflectance from Shading" by
Barron and Malik, TPAMI, '15.
Args:
a: First image (or set of images).
b: Second image (or set of images).
Returns:
SIMSE between `a` and `b`.
"""
chex.assert_rank([a, b], {3, 4})
chex.assert_type([a, b], float)
chex.assert_equal_shape([a, b])
a_dot_b = (a * b).sum(axis=(-3, -2, -1), keepdims=True)
b_dot_b = (b * b).sum(axis=(-3, -2, -1), keepdims=True)
alpha = a_dot_b / b_dot_b
return mse(a, alpha * b)
def ssim(
a: chex.Array,
b: chex.Array,
*,
max_val: float = 1.0,
filter_size: int = 11,
filter_sigma: float = 1.5,
k1: float = 0.01,
k2: float = 0.03,
return_map: bool = False,
) -> chex.Numeric:
"""Computes the structural similarity index (SSIM) between image pairs.
This function is based on the standard SSIM implementation from:
Z. Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli,
"Image quality assessment: from error visibility to structural similarity",
in IEEE Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, 2004.
This function was modeled after tf.image.ssim, and should produce comparable
output.
Note: the true SSIM is only defined on grayscale. This function does not
perform any colorspace transform. If the input is in a color space, then it
will compute the average SSIM.
Args:
a: First image (or set of images).
b: Second image (or set of images).
max_val: The maximum magnitude that `a` or `b` can have.
filter_size: Window size (>= 1). Image dims must be at least this small.
filter_sigma: The bandwidth of the Gaussian used for filtering (> 0.).
k1: One of the SSIM dampening parameters (> 0.).
k2: One of the SSIM dampening parameters (> 0.).
return_map: If True, will cause the per-pixel SSIM "map" to be returned.
Returns:
Each image's mean SSIM, or a tensor of individual values if `return_map`.
"""
chex.assert_rank([a, b], {3, 4})
chex.assert_type([a, b], float)
chex.assert_equal_shape([a, b])
# Construct a 1D Gaussian blur filter.
hw = filter_size // 2
shift = (2 * hw - filter_size + 1) / 2
f_i = ((jnp.arange(filter_size) - hw + shift) / filter_sigma)**2
filt = jnp.exp(-0.5 * f_i)
filt /= jnp.sum(filt)
# Blur in x and y (faster than the 2D convolution).
def convolve2d(z, f):
return jsp.signal.convolve2d(
z, f, mode="valid", precision=jax.lax.Precision.HIGHEST)
filt_fn1 = lambda z: convolve2d(z, filt[:, jnp.newaxis])
filt_fn2 = lambda z: convolve2d(z, filt[jnp.newaxis, :])
# `vmap` the blurs to the tensor size, and then compose them.
num_dims = len(a.shape)
map_axes = tuple(list(range(num_dims - 3)) + [num_dims - 1])
filt_fn = lambda z: filt_fn1(filt_fn2(z))
for d in map_axes:
filt_fn = jax.vmap(filt_fn, in_axes=d, out_axes=d)
mu0 = filt_fn(a)
mu1 = filt_fn(b)
mu00 = mu0 * mu0
mu11 = mu1 * mu1
mu01 = mu0 * mu1
sigma00 = filt_fn(a**2) - mu00
sigma11 = filt_fn(b**2) - mu11
sigma01 = filt_fn(a * b) - mu01
# Clip the variances and covariances to valid values.
# Variance must be non-negative:
sigma00 = jnp.maximum(0., sigma00)
sigma11 = jnp.maximum(0., sigma11)
sigma01 = jnp.sign(sigma01) * jnp.minimum(
jnp.sqrt(sigma00 * sigma11), jnp.abs(sigma01))
c1 = (k1 * max_val)**2
c2 = (k2 * max_val)**2
numer = (2 * mu01 + c1) * (2 * sigma01 + c2)
denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2)
ssim_map = numer / denom
ssim_value = jnp.mean(ssim_map, list(range(num_dims - 3, num_dims)))
return ssim_map if return_map else ssim_value
| 31.910377 | 80 | 0.660902 |
import chex
import jax
import jax.numpy as jnp
import jax.scipy as jsp
def mae(a: chex.Array, b: chex.Array) -> chex.Numeric:
chex.assert_rank([a, b], {3, 4})
chex.assert_type([a, b], float)
chex.assert_equal_shape([a, b])
return jnp.abs(a - b).mean(axis=(-3, -2, -1))
def mse(a: chex.Array, b: chex.Array) -> chex.Numeric:
chex.assert_rank([a, b], {3, 4})
chex.assert_type([a, b], float)
chex.assert_equal_shape([a, b])
return jnp.square(a - b).mean(axis=(-3, -2, -1))
def psnr(a: chex.Array, b: chex.Array) -> chex.Numeric:
chex.assert_rank([a, b], {3, 4})
chex.assert_type([a, b], float)
chex.assert_equal_shape([a, b])
return -10.0 * jnp.log(mse(a, b)) / jnp.log(10.0)
def rmse(a: chex.Array, b: chex.Array) -> chex.Numeric:
chex.assert_rank([a, b], {3, 4})
chex.assert_type([a, b], float)
chex.assert_equal_shape([a, b])
return jnp.sqrt(mse(a, b))
def simse(a: chex.Array, b: chex.Array) -> chex.Numeric:
chex.assert_rank([a, b], {3, 4})
chex.assert_type([a, b], float)
chex.assert_equal_shape([a, b])
a_dot_b = (a * b).sum(axis=(-3, -2, -1), keepdims=True)
b_dot_b = (b * b).sum(axis=(-3, -2, -1), keepdims=True)
alpha = a_dot_b / b_dot_b
return mse(a, alpha * b)
def ssim(
a: chex.Array,
b: chex.Array,
*,
max_val: float = 1.0,
filter_size: int = 11,
filter_sigma: float = 1.5,
k1: float = 0.01,
k2: float = 0.03,
return_map: bool = False,
) -> chex.Numeric:
chex.assert_rank([a, b], {3, 4})
chex.assert_type([a, b], float)
chex.assert_equal_shape([a, b])
hw = filter_size // 2
shift = (2 * hw - filter_size + 1) / 2
f_i = ((jnp.arange(filter_size) - hw + shift) / filter_sigma)**2
filt = jnp.exp(-0.5 * f_i)
filt /= jnp.sum(filt)
def convolve2d(z, f):
return jsp.signal.convolve2d(
z, f, mode="valid", precision=jax.lax.Precision.HIGHEST)
filt_fn1 = lambda z: convolve2d(z, filt[:, jnp.newaxis])
filt_fn2 = lambda z: convolve2d(z, filt[jnp.newaxis, :])
num_dims = len(a.shape)
map_axes = tuple(list(range(num_dims - 3)) + [num_dims - 1])
filt_fn = lambda z: filt_fn1(filt_fn2(z))
for d in map_axes:
filt_fn = jax.vmap(filt_fn, in_axes=d, out_axes=d)
mu0 = filt_fn(a)
mu1 = filt_fn(b)
mu00 = mu0 * mu0
mu11 = mu1 * mu1
mu01 = mu0 * mu1
sigma00 = filt_fn(a**2) - mu00
sigma11 = filt_fn(b**2) - mu11
sigma01 = filt_fn(a * b) - mu01
sigma00 = jnp.maximum(0., sigma00)
sigma11 = jnp.maximum(0., sigma11)
sigma01 = jnp.sign(sigma01) * jnp.minimum(
jnp.sqrt(sigma00 * sigma11), jnp.abs(sigma01))
c1 = (k1 * max_val)**2
c2 = (k2 * max_val)**2
numer = (2 * mu01 + c1) * (2 * sigma01 + c2)
denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2)
ssim_map = numer / denom
ssim_value = jnp.mean(ssim_map, list(range(num_dims - 3, num_dims)))
return ssim_map if return_map else ssim_value
| true | true |
f7303a247f463ef73a520434699ce478c010e0a7 | 416 | py | Python | receitas/migrations/0005_receita_foto_receita.py | maldonadopereira/django-receitas | 72a2215abacf5e8076b57b34ebf36211a8f0afb2 | [
"MIT"
] | null | null | null | receitas/migrations/0005_receita_foto_receita.py | maldonadopereira/django-receitas | 72a2215abacf5e8076b57b34ebf36211a8f0afb2 | [
"MIT"
] | null | null | null | receitas/migrations/0005_receita_foto_receita.py | maldonadopereira/django-receitas | 72a2215abacf5e8076b57b34ebf36211a8f0afb2 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.2 on 2022-02-20 01:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('receitas', '0004_receita_publicar'),
]
operations = [
migrations.AddField(
model_name='receita',
name='foto_receita',
field=models.ImageField(blank=True, upload_to='fotos/%d/%m/%Y'),
),
]
| 21.894737 | 76 | 0.600962 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('receitas', '0004_receita_publicar'),
]
operations = [
migrations.AddField(
model_name='receita',
name='foto_receita',
field=models.ImageField(blank=True, upload_to='fotos/%d/%m/%Y'),
),
]
| true | true |
f7303adbd62fce274698448d9c9ef1a6103caf71 | 11,555 | py | Python | jupyter_kernel_test/msgspec_v5.py | IsraelMiles/jupyter_kernel_test | 369cf22e505820d910aaf50cdbbb6b3f51766a63 | [
"BSD-3-Clause"
] | 57 | 2015-08-04T15:45:45.000Z | 2022-02-03T23:14:31.000Z | jupyter_kernel_test/msgspec_v5.py | IsraelMiles/jupyter_kernel_test | 369cf22e505820d910aaf50cdbbb6b3f51766a63 | [
"BSD-3-Clause"
] | 55 | 2015-07-02T17:55:05.000Z | 2021-12-30T19:05:40.000Z | jupyter_kernel_test/msgspec_v5.py | IsraelMiles/jupyter_kernel_test | 369cf22e505820d910aaf50cdbbb6b3f51766a63 | [
"BSD-3-Clause"
] | 34 | 2015-07-02T17:20:43.000Z | 2022-03-28T22:25:05.000Z | """Message schemas for message spec version 5"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from jsonschema import Draft4Validator, ValidationError
import re
protocol_version = (5, 1)
# These fragments will be wrapped in the boilerplate for a valid JSON schema.
# We also add a default 'required' containing all keys.
schema_fragments = {}
def get_msg_content_validator(msg_type, version_minor):
frag = schema_fragments[msg_type]
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "{} message contents schema".format(msg_type),
"type": "object",
"properties": {},
"additionalProperties": version_minor > protocol_version[1],
}
schema.update(frag)
if "required" not in schema:
# Require all keys by default
schema["required"] = sorted(schema["properties"].keys())
return Draft4Validator(schema)
header_part = {"type": "object", "properties": {
"msg_id": {"type": "string"},
"username": {"type": "string"},
"session": {"type": "string"},
# TODO - this is parsed to a datetime before we get it:
"date": {}, #{"type": "string"},
"msg_type": {"type": "string"},
"version": {"type": "string"},
}, "required": ["msg_id", "username", "session", "date", "msg_type", "version"]}
msg_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Jupyter message structure schema",
"type": "object",
"properties": {
"header": header_part,
"parent_header": {"type": "object"},
"metadata": {"type": "object"},
"content": {"type": "object"}, # Checked separately
"buffers": {"type": "array"}
},
"required": ["header", "parent_header", "metadata", "content"],
}
msg_structure_validator = Draft4Validator(msg_schema)
def get_error_reply_validator(version_minor):
return Draft4Validator({
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Jupyter 'error' reply schema",
"type": "object",
"properties": {
"status": {"const": "error"},
"ename": {"type": "string"},
"evalue": {"type": "string"},
"traceback": {"type": "array", "items": {"type": "string"}},
},
"required": ["status", "ename", "evalue", "traceback"],
"additionalProperties": version_minor > protocol_version[1]
})
def get_abort_reply_validator(version_minor):
return Draft4Validator({
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Jupyter 'abort' reply schema",
"type": "object",
"properties": {
"status": {"const": "error"},
"ename": {"type": "string"},
"evalue": {"type": "string"},
"traceback": {"type": "list", "items": {"type": "string"}},
},
"required": ["status", "ename", "evalue", "traceback"],
"additionalProperties": version_minor > protocol_version[1]
})
reply_msgs_using_status = {
'execute_reply', 'inspect_reply', 'complete_reply', 'history_reply',
'connect_reply', 'comm_info_reply', 'kernel_info_reply', 'shutdown_reply',
'interrupt_reply',
}
def validate_message(msg, msg_type=None, parent_id=None):
msg_structure_validator.validate(msg)
msg_version_s = msg['header']['version']
m = re.match(r'(\d+)\.(\d+)', msg_version_s)
if not m:
raise ValidationError("Version {} not like 'x.y'")
version_minor = int(m.group(2))
if msg_type is not None:
if msg['header']['msg_type'] != msg_type:
raise ValidationError("Message type {!r} != {!r}".format(
msg['header']['msg_type'], msg_type
))
else:
msg_type = msg['header']['msg_type']
# Check for unexpected fields, unless it's a newer protocol version
if version_minor <= protocol_version[1]:
unx_top = set(msg) - set(msg_schema['properties'])
if unx_top:
raise ValidationError("Unexpected keys: {}".format(unx_top))
unx_header = set(msg['header']) - set(header_part['properties'])
if unx_header:
raise ValidationError("Unexpected keys in header: {}".format(unx_header))
# Check the parent id
if 'reply' in msg_type and parent_id and msg['parent_header']['msg_id'] != parent_id:
raise ValidationError("Parent header does not match expected")
if msg_type in reply_msgs_using_status:
# Most _reply messages have common 'error' and 'abort' structures
try:
status = msg['content']['status']
except KeyError as e:
raise ValidationError(str(e))
if status == 'error':
content_vdor = get_error_reply_validator(version_minor)
elif status == 'abort':
content_vdor = get_abort_reply_validator(version_minor)
elif status == 'ok':
content_vdor = get_msg_content_validator(msg_type, version_minor)
else:
raise ValidationError(
"status {!r} should be ok/error/abort".format(status))
else:
content_vdor = get_msg_content_validator(msg_type, version_minor)
content_vdor.validate(msg['content'])
# Shell messages ----------------------------------------------
schema_fragments['execute_request'] = {"properties": {
"code": {"type": "string"},
"silent": {"type": "boolean"},
"store_history": {"type": "boolean"},
"user_expressions": {"type": "object"},
"allow_stdin": {"type": "boolean"},
"stop_on_error": {"type": "boolean"}
}}
schema_fragments['execute_reply'] = {"properties": {
# statuses 'error' and 'abort' change the structure, so check separately
"status": {"const": "ok"},
"execution_count": {"type": "number"},
"payload": {"type": "array", "items": {
"type": "object",
"properties": {"source": {"type": "string"}},
"additionalProperties": True,
}},
"user_expressions": {"type": "object"},
}, "required": ["status", "execution_count"]}
schema_fragments['inspect_request'] = {"properties": {
"code": {"type": "string"},
"cursor_pos": {"type": "number"},
"detail_level": {"enum": [0, 1]},
}}
schema_fragments['inspect_reply'] = {"properties": {
# statuses 'error' and 'abort' change the structure, so check separately
"status": {"const": "ok"},
"found": {"type": "boolean"},
"data": {"type": "object"},
"metadata": {"type": "object"},
}}
schema_fragments['complete_request'] = {"properties": {
"code": {"type": "string"},
"cursor_pos": {"type": "number"},
}}
schema_fragments['complete_reply'] = {"properties": {
# statuses 'error' and 'abort' change the structure, so check separately
"status": {"const": "ok"},
"matches": {"type": "array", "items": {"type": "string"}},
"cursor_start": {"type": "number"},
"cursor_end": {"type": "number"},
"metadata": {"type": "object"},
}}
schema_fragments['history_request'] = {"properties": {
'output' : {"type": "boolean"},
'raw' : {"type": "boolean"},
'hist_access_type' : {"enum": ["range", "tail", "search"]},
'session' : {"type": "number"},
'start' : {"type": "number"},
'stop' : {"type": "number"},
'n' : {"type": "number"},
'pattern' : {"type": "string"},
'unique' : {"type": "boolean"},
}, "required": ["output", "raw", "hist_access_type"]}
schema_fragments['history_reply'] = {"properties": {
"status": {"const": "ok"},
"history": {"type": "array", "items": {
"minItems": 3, "maxItems": 3
}}
}}
schema_fragments['is_complete_request'] = {"properties": {
"code": {"type": "string"},
}}
schema_fragments['is_complete_reply'] = {"properties": {
"status": {"enum": ["complete", "incomplete", "invalid", "unknown"]},
"indent": {"type": "string"}
}, "required": ["status"]}
# NB connect_request is deprecated
schema_fragments["connect_request"] = {"properties": {}}
schema_fragments["connect_reply"] = {"properties": {
"shell_port": {"type": "number"},
"iopub_port": {"type": "number"},
"stdin_port": {"type": "number"},
"hb_port": {"type": "number"},
"control_port": {"type": "number"},
}}
schema_fragments["comm_info_request"] = {"properties": {
"target_name": {"type": "string"},
}, "required": []}
schema_fragments["comm_info_reply"] = {"properties": {
# statuses 'error' and 'abort' change the structure, so check separately
"status": {"const": "ok"},
"comms": {"type": "object"},
}}
schema_fragments["kernel_info_request"] = {"properties": {}}
schema_fragments["kernel_info_reply"] = {"properties": {
# statuses 'error' and 'abort' change the structure, so check separately
"status": {"const": "ok"},
"protocol_version": {"type": "string"},
"implementation": {"type": "string"},
"implementation_version": {"type": "string"},
"language_info": {"type": "object"},
"banner": {"type": "string"},
"debugger": {"type": "boolean"},
"help_links": {"type": "array", "items": {"type": "object", "properties": {
"text": {"type": "string"},
"url": {"type": "string"}
}}}
}, "required": ["status", "protocol_version", "implementation", "language_info", "banner"]}
schema_fragments['shutdown_request'] = {"properties": {
"restart": {"type": "boolean"},
}}
schema_fragments['shutdown_reply'] = {"properties": {
# statuses 'error' and 'abort' change the structure, so check separately
"status": {"const": "ok"},
"restart": {"type": "boolean"},
}}
schema_fragments["interrupt_request"] = {"properties": {}}
schema_fragments["interrupt_reply"] = {"properties": {
# statuses 'error' and 'abort' change the structure, so check separately
"status": {"const": "ok"},
}}
# IOPub messages ----------------------------------------------
mime_data = {
"type":"object",
"patternProperties": {r'^[\w\-\+\.]+/[\w\-\+\.]+$': {}},
"additionalProperties": False,
}
schema_fragments['stream'] = {"properties": {
"name": {"enum": ["stdout", "stderr"]},
"text": {"type": "string"},
}}
schema_fragments['display_data'] = {"properties": {
"data": mime_data,
"metadata": {"type": "object"},
"transient": {"type": "object"},
}, "required": ["data", "metadata"]}
schema_fragments['update_display_data'] = {"properties": {
"data": mime_data,
"metadata": {"type": "object"},
"transient": {"type": "object"},
}}
schema_fragments['execute_result'] = {"properties": {
"execution_count": {"type": "number"},
"data": mime_data,
"metadata": {"type": "object"},
"transient": {"type": "object"},
}, "required": ["execution_count", "data", "metadata"]}
schema_fragments['clear_output'] = {"properties": {
"wait": {"type": "boolean"},
}}
schema_fragments['execute_input'] = {"properties": {
"code": {"type": "string"},
"execution_count": {"type": "number"},
}}
schema_fragments['error'] = {"properties": {
"ename": {"type": "string"},
"evalue": {"type": "string"},
"traceback": {"type": "array", "items": {"type": "string"}},
}}
schema_fragments['status'] = {"properties": {
"execution_state": {"enum": ["busy", "idle", "starting"]},
}}
# Stdin messages ---------------------------------------------
schema_fragments["input_request"] = {"properties": {
"prompt": {"type": "string"},
"password": {"type": "number"},
}}
schema_fragments["input_reply"] = {"properties": {
"value": {"type": "string"},
}}
| 34.186391 | 91 | 0.590134 |
from jsonschema import Draft4Validator, ValidationError
import re
protocol_version = (5, 1)
schema_fragments = {}
def get_msg_content_validator(msg_type, version_minor):
frag = schema_fragments[msg_type]
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "{} message contents schema".format(msg_type),
"type": "object",
"properties": {},
"additionalProperties": version_minor > protocol_version[1],
}
schema.update(frag)
if "required" not in schema:
schema["required"] = sorted(schema["properties"].keys())
return Draft4Validator(schema)
header_part = {"type": "object", "properties": {
"msg_id": {"type": "string"},
"username": {"type": "string"},
"session": {"type": "string"},
"date": {},
"msg_type": {"type": "string"},
"version": {"type": "string"},
}, "required": ["msg_id", "username", "session", "date", "msg_type", "version"]}
msg_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Jupyter message structure schema",
"type": "object",
"properties": {
"header": header_part,
"parent_header": {"type": "object"},
"metadata": {"type": "object"},
"content": {"type": "object"},
"buffers": {"type": "array"}
},
"required": ["header", "parent_header", "metadata", "content"],
}
msg_structure_validator = Draft4Validator(msg_schema)
def get_error_reply_validator(version_minor):
return Draft4Validator({
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Jupyter 'error' reply schema",
"type": "object",
"properties": {
"status": {"const": "error"},
"ename": {"type": "string"},
"evalue": {"type": "string"},
"traceback": {"type": "array", "items": {"type": "string"}},
},
"required": ["status", "ename", "evalue", "traceback"],
"additionalProperties": version_minor > protocol_version[1]
})
def get_abort_reply_validator(version_minor):
return Draft4Validator({
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Jupyter 'abort' reply schema",
"type": "object",
"properties": {
"status": {"const": "error"},
"ename": {"type": "string"},
"evalue": {"type": "string"},
"traceback": {"type": "list", "items": {"type": "string"}},
},
"required": ["status", "ename", "evalue", "traceback"],
"additionalProperties": version_minor > protocol_version[1]
})
reply_msgs_using_status = {
'execute_reply', 'inspect_reply', 'complete_reply', 'history_reply',
'connect_reply', 'comm_info_reply', 'kernel_info_reply', 'shutdown_reply',
'interrupt_reply',
}
def validate_message(msg, msg_type=None, parent_id=None):
msg_structure_validator.validate(msg)
msg_version_s = msg['header']['version']
m = re.match(r'(\d+)\.(\d+)', msg_version_s)
if not m:
raise ValidationError("Version {} not like 'x.y'")
version_minor = int(m.group(2))
if msg_type is not None:
if msg['header']['msg_type'] != msg_type:
raise ValidationError("Message type {!r} != {!r}".format(
msg['header']['msg_type'], msg_type
))
else:
msg_type = msg['header']['msg_type']
if version_minor <= protocol_version[1]:
unx_top = set(msg) - set(msg_schema['properties'])
if unx_top:
raise ValidationError("Unexpected keys: {}".format(unx_top))
unx_header = set(msg['header']) - set(header_part['properties'])
if unx_header:
raise ValidationError("Unexpected keys in header: {}".format(unx_header))
# Check the parent id
if 'reply' in msg_type and parent_id and msg['parent_header']['msg_id'] != parent_id:
raise ValidationError("Parent header does not match expected")
if msg_type in reply_msgs_using_status:
# Most _reply messages have common 'error' and 'abort' structures
try:
status = msg['content']['status']
except KeyError as e:
raise ValidationError(str(e))
if status == 'error':
content_vdor = get_error_reply_validator(version_minor)
elif status == 'abort':
content_vdor = get_abort_reply_validator(version_minor)
elif status == 'ok':
content_vdor = get_msg_content_validator(msg_type, version_minor)
else:
raise ValidationError(
"status {!r} should be ok/error/abort".format(status))
else:
content_vdor = get_msg_content_validator(msg_type, version_minor)
content_vdor.validate(msg['content'])
# Shell messages ----------------------------------------------
schema_fragments['execute_request'] = {"properties": {
"code": {"type": "string"},
"silent": {"type": "boolean"},
"store_history": {"type": "boolean"},
"user_expressions": {"type": "object"},
"allow_stdin": {"type": "boolean"},
"stop_on_error": {"type": "boolean"}
}}
schema_fragments['execute_reply'] = {"properties": {
# statuses 'error' and 'abort' change the structure, so check separately
"status": {"const": "ok"},
"execution_count": {"type": "number"},
"payload": {"type": "array", "items": {
"type": "object",
"properties": {"source": {"type": "string"}},
"additionalProperties": True,
}},
"user_expressions": {"type": "object"},
}, "required": ["status", "execution_count"]}
schema_fragments['inspect_request'] = {"properties": {
"code": {"type": "string"},
"cursor_pos": {"type": "number"},
"detail_level": {"enum": [0, 1]},
}}
schema_fragments['inspect_reply'] = {"properties": {
# statuses 'error' and 'abort' change the structure, so check separately
"status": {"const": "ok"},
"found": {"type": "boolean"},
"data": {"type": "object"},
"metadata": {"type": "object"},
}}
schema_fragments['complete_request'] = {"properties": {
"code": {"type": "string"},
"cursor_pos": {"type": "number"},
}}
schema_fragments['complete_reply'] = {"properties": {
# statuses 'error' and 'abort' change the structure, so check separately
"status": {"const": "ok"},
"matches": {"type": "array", "items": {"type": "string"}},
"cursor_start": {"type": "number"},
"cursor_end": {"type": "number"},
"metadata": {"type": "object"},
}}
schema_fragments['history_request'] = {"properties": {
'output' : {"type": "boolean"},
'raw' : {"type": "boolean"},
'hist_access_type' : {"enum": ["range", "tail", "search"]},
'session' : {"type": "number"},
'start' : {"type": "number"},
'stop' : {"type": "number"},
'n' : {"type": "number"},
'pattern' : {"type": "string"},
'unique' : {"type": "boolean"},
}, "required": ["output", "raw", "hist_access_type"]}
schema_fragments['history_reply'] = {"properties": {
"status": {"const": "ok"},
"history": {"type": "array", "items": {
"minItems": 3, "maxItems": 3
}}
}}
schema_fragments['is_complete_request'] = {"properties": {
"code": {"type": "string"},
}}
schema_fragments['is_complete_reply'] = {"properties": {
"status": {"enum": ["complete", "incomplete", "invalid", "unknown"]},
"indent": {"type": "string"}
}, "required": ["status"]}
# NB connect_request is deprecated
schema_fragments["connect_request"] = {"properties": {}}
schema_fragments["connect_reply"] = {"properties": {
"shell_port": {"type": "number"},
"iopub_port": {"type": "number"},
"stdin_port": {"type": "number"},
"hb_port": {"type": "number"},
"control_port": {"type": "number"},
}}
schema_fragments["comm_info_request"] = {"properties": {
"target_name": {"type": "string"},
}, "required": []}
schema_fragments["comm_info_reply"] = {"properties": {
# statuses 'error' and 'abort' change the structure, so check separately
"status": {"const": "ok"},
"comms": {"type": "object"},
}}
schema_fragments["kernel_info_request"] = {"properties": {}}
schema_fragments["kernel_info_reply"] = {"properties": {
# statuses 'error' and 'abort' change the structure, so check separately
"status": {"const": "ok"},
"protocol_version": {"type": "string"},
"implementation": {"type": "string"},
"implementation_version": {"type": "string"},
"language_info": {"type": "object"},
"banner": {"type": "string"},
"debugger": {"type": "boolean"},
"help_links": {"type": "array", "items": {"type": "object", "properties": {
"text": {"type": "string"},
"url": {"type": "string"}
}}}
}, "required": ["status", "protocol_version", "implementation", "language_info", "banner"]}
schema_fragments['shutdown_request'] = {"properties": {
"restart": {"type": "boolean"},
}}
schema_fragments['shutdown_reply'] = {"properties": {
# statuses 'error' and 'abort' change the structure, so check separately
"status": {"const": "ok"},
"restart": {"type": "boolean"},
}}
schema_fragments["interrupt_request"] = {"properties": {}}
schema_fragments["interrupt_reply"] = {"properties": {
# statuses 'error' and 'abort' change the structure, so check separately
"status": {"const": "ok"},
}}
# IOPub messages ----------------------------------------------
mime_data = {
"type":"object",
"patternProperties": {r'^[\w\-\+\.]+/[\w\-\+\.]+$': {}},
"additionalProperties": False,
}
schema_fragments['stream'] = {"properties": {
"name": {"enum": ["stdout", "stderr"]},
"text": {"type": "string"},
}}
schema_fragments['display_data'] = {"properties": {
"data": mime_data,
"metadata": {"type": "object"},
"transient": {"type": "object"},
}, "required": ["data", "metadata"]}
schema_fragments['update_display_data'] = {"properties": {
"data": mime_data,
"metadata": {"type": "object"},
"transient": {"type": "object"},
}}
schema_fragments['execute_result'] = {"properties": {
"execution_count": {"type": "number"},
"data": mime_data,
"metadata": {"type": "object"},
"transient": {"type": "object"},
}, "required": ["execution_count", "data", "metadata"]}
schema_fragments['clear_output'] = {"properties": {
"wait": {"type": "boolean"},
}}
schema_fragments['execute_input'] = {"properties": {
"code": {"type": "string"},
"execution_count": {"type": "number"},
}}
schema_fragments['error'] = {"properties": {
"ename": {"type": "string"},
"evalue": {"type": "string"},
"traceback": {"type": "array", "items": {"type": "string"}},
}}
schema_fragments['status'] = {"properties": {
"execution_state": {"enum": ["busy", "idle", "starting"]},
}}
# Stdin messages ---------------------------------------------
schema_fragments["input_request"] = {"properties": {
"prompt": {"type": "string"},
"password": {"type": "number"},
}}
schema_fragments["input_reply"] = {"properties": {
"value": {"type": "string"},
}}
| true | true |
f7303b21e2e3db21c47520be7c83cb2644cc4758 | 466 | py | Python | env/Lib/site-packages/plotly/validators/scattercarpet/marker/colorbar/_xpad.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | env/Lib/site-packages/plotly/validators/scattercarpet/marker/colorbar/_xpad.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | env/Lib/site-packages/plotly/validators/scattercarpet/marker/colorbar/_xpad.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class XpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="xpad", parent_name="scattercarpet.marker.colorbar", **kwargs
):
super(XpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs
)
| 31.066667 | 87 | 0.641631 | import _plotly_utils.basevalidators
class XpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="xpad", parent_name="scattercarpet.marker.colorbar", **kwargs
):
super(XpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs
)
| true | true |
f7303bca22e1ac8527afeffd1d88492df41a22c8 | 1,874 | py | Python | Sources/ospboard/opt/osp/share/scripts/system_check.py | nihospr01/OpenSpeechPlatform | 799fb5baa5b8cdfad0f5387dd48b394adc583ede | [
"BSD-2-Clause"
] | null | null | null | Sources/ospboard/opt/osp/share/scripts/system_check.py | nihospr01/OpenSpeechPlatform | 799fb5baa5b8cdfad0f5387dd48b394adc583ede | [
"BSD-2-Clause"
] | null | null | null | Sources/ospboard/opt/osp/share/scripts/system_check.py | nihospr01/OpenSpeechPlatform | 799fb5baa5b8cdfad0f5387dd48b394adc583ede | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
import sys
import time
import datetime
import os
import psutil
def main():
CurrentTime = datetime.datetime.now()
with open(r"/sys/class/thermal/thermal_zone0/temp") as f:
CurrentTemp0 = f.readline()
with open(r"/sys/class/thermal/thermal_zone1/temp") as f:
CurrentTemp1 = f.readline()
freq = []
for i in range(4):
with open(f"/sys/devices/system/cpu/cpu{i}/cpufreq/cpuinfo_cur_freq") as f:
freq.append(f.readline())
with open(r"/sys/devices/system/cpu/cpu0/cpufreq/stats/time_in_state") as f:
time_in_state = f.read()
print(f"\n{CurrentTime.strftime('%H:%M:%S')}\t CPU0-1: {float(CurrentTemp0) / 1000} ℃\t\tCPU2-3: {float(CurrentTemp1) / 1000} ℃")
cpu = psutil.cpu_times_percent(percpu=True)
time.sleep(1)
cpu = psutil.cpu_times_percent(percpu=True)
print(f"\nCPU busy (%) (1-4) : {100-cpu[0].idle:.2f} {100-cpu[1].idle:.2f} {100-cpu[2].idle:.2f} {100-cpu[3].idle:.2f}")
print(f"\nCPU freq (kHz) (1-4) : {int(freq[0])/1000} {int(freq[1])/1000} {int(freq[2])/1000} {int(freq[3])/1000}")
print("\nTIME IN STATE\n-------------\nkHz Percent\n-------------")
total = 0
for t in time_in_state.split('\n'):
if t:
freq, per = t.split()
total += int(per)
for t in time_in_state.split('\n'):
if t:
freq, per = t.split()
freq = int(int(freq)/1000)
per = int(int(per) / total * 100)
print(f"{freq} {per}")
print("\nOSP Status")
os.system('ps -T -p `pgrep OSP` -o cpuid,cls,pri,pcpu,lwp,comm')
diskfree = psutil.disk_usage('/').percent
print(f"\nDiskfree: {diskfree}%")
print("\nCharge Log\n----------")
with open(r"/var/log/charge.log") as f:
print(f.read())
if __name__ == '__main__':
sys.exit(main())
| 30.225806 | 133 | 0.577375 |
import sys
import time
import datetime
import os
import psutil
def main():
CurrentTime = datetime.datetime.now()
with open(r"/sys/class/thermal/thermal_zone0/temp") as f:
CurrentTemp0 = f.readline()
with open(r"/sys/class/thermal/thermal_zone1/temp") as f:
CurrentTemp1 = f.readline()
freq = []
for i in range(4):
with open(f"/sys/devices/system/cpu/cpu{i}/cpufreq/cpuinfo_cur_freq") as f:
freq.append(f.readline())
with open(r"/sys/devices/system/cpu/cpu0/cpufreq/stats/time_in_state") as f:
time_in_state = f.read()
print(f"\n{CurrentTime.strftime('%H:%M:%S')}\t CPU0-1: {float(CurrentTemp0) / 1000} ℃\t\tCPU2-3: {float(CurrentTemp1) / 1000} ℃")
cpu = psutil.cpu_times_percent(percpu=True)
time.sleep(1)
cpu = psutil.cpu_times_percent(percpu=True)
print(f"\nCPU busy (%) (1-4) : {100-cpu[0].idle:.2f} {100-cpu[1].idle:.2f} {100-cpu[2].idle:.2f} {100-cpu[3].idle:.2f}")
print(f"\nCPU freq (kHz) (1-4) : {int(freq[0])/1000} {int(freq[1])/1000} {int(freq[2])/1000} {int(freq[3])/1000}")
print("\nTIME IN STATE\n-------------\nkHz Percent\n-------------")
total = 0
for t in time_in_state.split('\n'):
if t:
freq, per = t.split()
total += int(per)
for t in time_in_state.split('\n'):
if t:
freq, per = t.split()
freq = int(int(freq)/1000)
per = int(int(per) / total * 100)
print(f"{freq} {per}")
print("\nOSP Status")
os.system('ps -T -p `pgrep OSP` -o cpuid,cls,pri,pcpu,lwp,comm')
diskfree = psutil.disk_usage('/').percent
print(f"\nDiskfree: {diskfree}%")
print("\nCharge Log\n----------")
with open(r"/var/log/charge.log") as f:
print(f.read())
if __name__ == '__main__':
sys.exit(main())
| true | true |
f7303bebd882517201c20381d045a17ff877a40a | 6,044 | py | Python | passengerCOVIDscan/glove_detection/tensorflow_infer.py | pradip026/passengerCOVIDscan | 1ebbe23beb91963679a97d8e9fe45354c47bbbff | [
"MIT"
] | null | null | null | passengerCOVIDscan/glove_detection/tensorflow_infer.py | pradip026/passengerCOVIDscan | 1ebbe23beb91963679a97d8e9fe45354c47bbbff | [
"MIT"
] | null | null | null | passengerCOVIDscan/glove_detection/tensorflow_infer.py | pradip026/passengerCOVIDscan | 1ebbe23beb91963679a97d8e9fe45354c47bbbff | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
import cv2
import time
import argparse
import os
import numpy as np
from PIL import Image
#from keras.models import model_from_json
from .utils.anchor_generator import generate_anchors
from .utils.anchor_decode import decode_bbox
from .utils.nms import single_class_non_max_suppression
from .load_model.tensorflow_loader import load_tf_model, tf_inference
MODEL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models/face_mask_detection.pb")
sess, graph = load_tf_model(MODEL_PATH)
# anchor configuration
feature_map_sizes = [[33, 33], [17, 17], [9, 9], [5, 5], [3, 3]]
anchor_sizes = [[0.04, 0.056], [0.08, 0.11], [0.16, 0.22], [0.32, 0.45], [0.64, 0.72]]
anchor_ratios = [[1, 0.62, 0.42]] * 5
# generate anchors
anchors = generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios)
# for inference , the batch size is 1, the model output shape is [1, N, 4],
# so we expand dim for anchors to [1, anchor_num, 4]
anchors_exp = np.expand_dims(anchors, axis=0)
id2class = {0: 'glove', 1: 'Noglove'}
def inference(image,
conf_thresh=0.5,
iou_thresh=0.4,
target_shape=(160, 160),
draw_result=True,
show_result=True
):
'''
Main function of detection inference
:param image: 3D numpy array of image
:param conf_thresh: the min threshold of classification probabity.
:param iou_thresh: the IOU threshold of NMS
:param target_shape: the model input size.
:param draw_result: whether to daw bounding box to the image.
:param show_result: whether to display the image.
:return:
'''
# image = np.copy(image)
output_info = []
height, width, _ = image.shape
image_resized = cv2.resize(image, target_shape)
image_np = image_resized / 255.0 # 归一化到0~1
image_exp = np.expand_dims(image_np, axis=0)
y_bboxes_output, y_cls_output = tf_inference(sess, graph, image_exp)
# remove the batch dimension, for batch is always 1 for inference.
y_bboxes = decode_bbox(anchors_exp, y_bboxes_output)[0]
y_cls = y_cls_output[0]
# To speed up, do single class NMS, not multiple classes NMS.
bbox_max_scores = np.max(y_cls, axis=1)
bbox_max_score_classes = np.argmax(y_cls, axis=1)
# keep_idx is the alive bounding box after nms.
keep_idxs = single_class_non_max_suppression(y_bboxes,
bbox_max_scores,
conf_thresh=conf_thresh,
iou_thresh=iou_thresh,
)
for idx in keep_idxs:
conf = float(bbox_max_scores[idx])
class_id = bbox_max_score_classes[idx]
bbox = y_bboxes[idx]
# clip the coordinate, avoid the value exceed the image boundary.
xmin = max(0, int(bbox[0] * width))
ymin = max(0, int(bbox[1] * height))
xmax = min(int(bbox[2] * width), width)
ymax = min(int(bbox[3] * height), height)
if draw_result:
if class_id == 0:
color = (0, 255, 0)
else:
color = (255, 0, 0)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 2)
cv2.putText(image, "%s: %.2f" % (id2class[class_id], conf), (xmin + 2, ymin - 2),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, color)
output_info.append([class_id, conf, xmin, ymin, xmax, ymax])
if show_result:
Image.fromarray(image).show()
return output_info
def run_on_video(video_path, output_video_name, conf_thresh):
cap = cv2.VideoCapture(video_path)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
fps = cap.get(cv2.CAP_PROP_FPS)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
# writer = cv2.VideoWriter(output_video_name, fourcc, int(fps), (int(width), int(height)))
total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
if not cap.isOpened():
raise ValueError("Video open failed.")
return
status = True
idx = 0
while status:
start_stamp = time.time()
status, img_raw = cap.read()
img_raw = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)
read_frame_stamp = time.time()
if (status):
inference(img_raw,
conf_thresh,
iou_thresh=0.5,
target_shape=(260, 260),
draw_result=True,
show_result=False)
cv2.imshow('image', img_raw[:, :, ::-1])
cv2.waitKey(1)
inference_stamp = time.time()
# writer.write(img_raw)
write_frame_stamp = time.time()
idx += 1
print("%d of %d" % (idx, total_frames))
print("read_frame:%f, infer time:%f, write time:%f" % (read_frame_stamp - start_stamp,
inference_stamp - read_frame_stamp,
write_frame_stamp - inference_stamp))
# writer.release()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Face Mask Detection")
parser.add_argument('--img-mode', type=int, default=1, help='set 1 to run on image, 0 to run on video.')
parser.add_argument('--img-path', type=str, help='path to your image.')
parser.add_argument('--video-path', type=str, default='0', help='path to your video, `0` means to use camera.')
# parser.add_argument('--hdf5', type=str, help='keras hdf5 file')
args = parser.parse_args()
if args.img_mode:
imgPath = args.img_path
img = cv2.imread(imgPath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
inference(img, show_result=True, target_shape=(260, 260))
else:
video_path = args.video_path
if args.video_path == '0':
video_path = 0
run_on_video(video_path, '', conf_thresh=0.5)
| 40.293333 | 115 | 0.603739 |
import cv2
import time
import argparse
import os
import numpy as np
from PIL import Image
from .utils.anchor_generator import generate_anchors
from .utils.anchor_decode import decode_bbox
from .utils.nms import single_class_non_max_suppression
from .load_model.tensorflow_loader import load_tf_model, tf_inference
MODEL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models/face_mask_detection.pb")
sess, graph = load_tf_model(MODEL_PATH)
feature_map_sizes = [[33, 33], [17, 17], [9, 9], [5, 5], [3, 3]]
anchor_sizes = [[0.04, 0.056], [0.08, 0.11], [0.16, 0.22], [0.32, 0.45], [0.64, 0.72]]
anchor_ratios = [[1, 0.62, 0.42]] * 5
anchors = generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios)
anchors_exp = np.expand_dims(anchors, axis=0)
id2class = {0: 'glove', 1: 'Noglove'}
def inference(image,
conf_thresh=0.5,
iou_thresh=0.4,
target_shape=(160, 160),
draw_result=True,
show_result=True
):
output_info = []
height, width, _ = image.shape
image_resized = cv2.resize(image, target_shape)
image_np = image_resized / 255.0
image_exp = np.expand_dims(image_np, axis=0)
y_bboxes_output, y_cls_output = tf_inference(sess, graph, image_exp)
y_bboxes = decode_bbox(anchors_exp, y_bboxes_output)[0]
y_cls = y_cls_output[0]
bbox_max_scores = np.max(y_cls, axis=1)
bbox_max_score_classes = np.argmax(y_cls, axis=1)
keep_idxs = single_class_non_max_suppression(y_bboxes,
bbox_max_scores,
conf_thresh=conf_thresh,
iou_thresh=iou_thresh,
)
for idx in keep_idxs:
conf = float(bbox_max_scores[idx])
class_id = bbox_max_score_classes[idx]
bbox = y_bboxes[idx]
xmin = max(0, int(bbox[0] * width))
ymin = max(0, int(bbox[1] * height))
xmax = min(int(bbox[2] * width), width)
ymax = min(int(bbox[3] * height), height)
if draw_result:
if class_id == 0:
color = (0, 255, 0)
else:
color = (255, 0, 0)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 2)
cv2.putText(image, "%s: %.2f" % (id2class[class_id], conf), (xmin + 2, ymin - 2),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, color)
output_info.append([class_id, conf, xmin, ymin, xmax, ymax])
if show_result:
Image.fromarray(image).show()
return output_info
def run_on_video(video_path, output_video_name, conf_thresh):
cap = cv2.VideoCapture(video_path)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
fps = cap.get(cv2.CAP_PROP_FPS)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
if not cap.isOpened():
raise ValueError("Video open failed.")
return
status = True
idx = 0
while status:
start_stamp = time.time()
status, img_raw = cap.read()
img_raw = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)
read_frame_stamp = time.time()
if (status):
inference(img_raw,
conf_thresh,
iou_thresh=0.5,
target_shape=(260, 260),
draw_result=True,
show_result=False)
cv2.imshow('image', img_raw[:, :, ::-1])
cv2.waitKey(1)
inference_stamp = time.time()
write_frame_stamp = time.time()
idx += 1
print("%d of %d" % (idx, total_frames))
print("read_frame:%f, infer time:%f, write time:%f" % (read_frame_stamp - start_stamp,
inference_stamp - read_frame_stamp,
write_frame_stamp - inference_stamp))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Face Mask Detection")
parser.add_argument('--img-mode', type=int, default=1, help='set 1 to run on image, 0 to run on video.')
parser.add_argument('--img-path', type=str, help='path to your image.')
parser.add_argument('--video-path', type=str, default='0', help='path to your video, `0` means to use camera.')
args = parser.parse_args()
if args.img_mode:
imgPath = args.img_path
img = cv2.imread(imgPath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
inference(img, show_result=True, target_shape=(260, 260))
else:
video_path = args.video_path
if args.video_path == '0':
video_path = 0
run_on_video(video_path, '', conf_thresh=0.5)
| true | true |
f7303c0eda96006944c2233d97ba5ca90c986d6f | 2,295 | py | Python | benchresults/rankselect/bitsize_table.py | pacman616/hybrid-fenwick-tree | 84e7cc8aa84b87937b98d85f3c2ed1998c0d79af | [
"MIT"
] | 4 | 2019-01-10T17:55:43.000Z | 2019-11-26T09:33:38.000Z | benchresults/rankselect/bitsize_table.py | pacman616/hybrid-fenwick-tree | 84e7cc8aa84b87937b98d85f3c2ed1998c0d79af | [
"MIT"
] | null | null | null | benchresults/rankselect/bitsize_table.py | pacman616/hybrid-fenwick-tree | 84e7cc8aa84b87937b98d85f3c2ed1998c0d79af | [
"MIT"
] | null | null | null | from collections import OrderedDict
# Data from: benchresults/rankselect/data/rankselect_nohugetlb/bitsize.csv
# Numbers to LaTeX regex: :s/\([0-9]\.?[0-9]*\)\([,\" ]\)/$\1$\2/g
lbls = "Elements,fixed[F],fixed[$\ell$],byte[F],byte[$\ell$],bit[F],bit[$\ell$],fixed[$16$]fixed,byte[$16$]byte,bit[$16$]bit,fixed[$16$]byte,fixed[$16$]bit,byte[$16$]bit,fixed[F]$8$,fixed[$\ell$]$8$,byte[F]$8$,byte[$\ell$]$8$,bit[F]$8$,bit[$\ell$]$8$,fixed[$16$]fixed$8$,byte[$16$]byte$8$,bit[$16$]bit$8$,fixed[$16$]byte$8$,fixed[$16$]bit$8$,byte[$16$]bit$8$,fixed[F]$16$,fixed[$\ell$]$16$,byte[F]$16$,byte[$\ell$]$16$,bit[F]$16$,bit[$\ell$]$16$,fixed[$16$]fixed$16$,byte[$16$]byte$16$,bit[$16$]bit$16$,fixed[$16$]byte$16$,fixed[$16$]bit$16$,byte[$16$]bit$16$,fixed[F]$32$,fixed[$\ell$]$32$,byte[F]$32$,byte[$\ell$]$32$,bit[F]$32$,bit[$\ell$]$32$,fixed[$16$]fixed$32$,byte[$16$]byte$32$,bit[$16$]bit$32$,fixed[$16$]byte$32$,fixed[$16$]bit$32$,byte[$16$]bit$32$,fixed[F]$64$,fixed[$\ell$]$64$,byte[F]$64$,byte[$\ell$]$64$,bit[F]$64$,bit[$\ell$]$64$,fixed[$16$]fixed$64$,byte[$16$]byte$64$,bit[$16$]bit$64$,fixed[$16$]byte$64$,fixed[$16$]bit$64$,byte[$16$]bit$64$,\\emph{Prezza}"
vals = [ 32_000_000_000,2.000000,2.000000,1.156982,1.156373,1.125000,1.125000,2.000073,1.157061,1.125095,1.157070,1.125105,1.125096,1.125000,1.125000,1.031982,1.031373,1.021484,1.021484,1.125009,1.031992,1.021496,1.031993,1.021497,1.021496,1.062500,1.062500,1.016357,1.015748,1.011719,1.011719,1.062505,1.016362,1.011725,1.016363,1.011725,1.011725,1.031250,1.031250,1.008545,1.007935,1.006348,1.006348,1.031252,1.008547,1.006351,1.008548,1.006351,1.006351,1.015625,1.015625,1.004639,1.004029,1.003418,1.003418,1.015626,1.004640,1.003419,1.004640,1.003420,1.003420,1.127441 ]
#if __name__ == '__main__':
mapped = dict(zip(lbls.split(',')[1:], vals[1:]))
ordered = OrderedDict(sorted(mapped.items(), key=lambda x: x[1]))
length = 7
keys, vals = list(ordered.keys()), [ "${:0.4f}$".format(i) for i in ordered.values() ]
for i in range(0, len(keys), length):
print("\\begin{tabular}{" + "|x{2.35cm}"*len(keys[i:i+length]) + "|}")
print("\\hline")
print(" & ".join(keys[i:i+length]) + " \\\\")
print("\\hline")
print(" & ".join(vals[i:i+length]) + " \\\\")
print("\\hline")
print("\\end{tabular} \\vspace{0.2cm}\n")
| 91.8 | 977 | 0.639216 | from collections import OrderedDict
lbls = "Elements,fixed[F],fixed[$\ell$],byte[F],byte[$\ell$],bit[F],bit[$\ell$],fixed[$16$]fixed,byte[$16$]byte,bit[$16$]bit,fixed[$16$]byte,fixed[$16$]bit,byte[$16$]bit,fixed[F]$8$,fixed[$\ell$]$8$,byte[F]$8$,byte[$\ell$]$8$,bit[F]$8$,bit[$\ell$]$8$,fixed[$16$]fixed$8$,byte[$16$]byte$8$,bit[$16$]bit$8$,fixed[$16$]byte$8$,fixed[$16$]bit$8$,byte[$16$]bit$8$,fixed[F]$16$,fixed[$\ell$]$16$,byte[F]$16$,byte[$\ell$]$16$,bit[F]$16$,bit[$\ell$]$16$,fixed[$16$]fixed$16$,byte[$16$]byte$16$,bit[$16$]bit$16$,fixed[$16$]byte$16$,fixed[$16$]bit$16$,byte[$16$]bit$16$,fixed[F]$32$,fixed[$\ell$]$32$,byte[F]$32$,byte[$\ell$]$32$,bit[F]$32$,bit[$\ell$]$32$,fixed[$16$]fixed$32$,byte[$16$]byte$32$,bit[$16$]bit$32$,fixed[$16$]byte$32$,fixed[$16$]bit$32$,byte[$16$]bit$32$,fixed[F]$64$,fixed[$\ell$]$64$,byte[F]$64$,byte[$\ell$]$64$,bit[F]$64$,bit[$\ell$]$64$,fixed[$16$]fixed$64$,byte[$16$]byte$64$,bit[$16$]bit$64$,fixed[$16$]byte$64$,fixed[$16$]bit$64$,byte[$16$]bit$64$,\\emph{Prezza}"
vals = [ 32_000_000_000,2.000000,2.000000,1.156982,1.156373,1.125000,1.125000,2.000073,1.157061,1.125095,1.157070,1.125105,1.125096,1.125000,1.125000,1.031982,1.031373,1.021484,1.021484,1.125009,1.031992,1.021496,1.031993,1.021497,1.021496,1.062500,1.062500,1.016357,1.015748,1.011719,1.011719,1.062505,1.016362,1.011725,1.016363,1.011725,1.011725,1.031250,1.031250,1.008545,1.007935,1.006348,1.006348,1.031252,1.008547,1.006351,1.008548,1.006351,1.006351,1.015625,1.015625,1.004639,1.004029,1.003418,1.003418,1.015626,1.004640,1.003419,1.004640,1.003420,1.003420,1.127441 ]
#if __name__ == '__main__':
mapped = dict(zip(lbls.split(',')[1:], vals[1:]))
ordered = OrderedDict(sorted(mapped.items(), key=lambda x: x[1]))
length = 7
keys, vals = list(ordered.keys()), [ "${:0.4f}$".format(i) for i in ordered.values() ]
for i in range(0, len(keys), length):
print("\\begin{tabular}{" + "|x{2.35cm}"*len(keys[i:i+length]) + "|}")
print("\\hline")
print(" & ".join(keys[i:i+length]) + " \\\\")
print("\\hline")
print(" & ".join(vals[i:i+length]) + " \\\\")
print("\\hline")
print("\\end{tabular} \\vspace{0.2cm}\n")
| true | true |
f7303cda043ce962a43ddaf990831db987ed128f | 758 | py | Python | manage.py | Boring-Mind/sbc-store | d16cce07bcb05ff2ea901411a5129ab1f0540161 | [
"MIT"
] | null | null | null | manage.py | Boring-Mind/sbc-store | d16cce07bcb05ff2ea901411a5129ab1f0540161 | [
"MIT"
] | null | null | null | manage.py | Boring-Mind/sbc-store | d16cce07bcb05ff2ea901411a5129ab1f0540161 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
from store.settings.base import get_config_type
def main():
# Set path to the current config file
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE',
'store.settings.' + get_config_type()
)
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.074074 | 73 | 0.675462 |
import os
import sys
from store.settings.base import get_config_type
def main():
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE',
'store.settings.' + get_config_type()
)
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
f7303e3fc37e9d2b3d1733a2847b96c9ba7bc3e0 | 7,086 | py | Python | ansible/modules/network/avi/avi_sslprofile.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | ansible/modules/network/avi/avi_sslprofile.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | ansible/modules/network/avi/avi_sslprofile.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2020-02-13T14:24:57.000Z | 2020-02-13T14:24:57.000Z | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_sslprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of SSLProfile Avi RESTful Object
description:
- This module is used to configure SSLProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
accepted_ciphers:
description:
- Ciphers suites represented as defined by U(http://www.openssl.org/docs/apps/ciphers.html).
- Default value when not specified in API or module is interpreted by Avi Controller as AES:3DES:RC4.
accepted_versions:
description:
- Set of versions accepted by the server.
cipher_enums:
description:
- Cipher_enums of sslprofile.
description:
description:
- User defined description for the object.
dhparam:
description:
- Dh parameters used in ssl.
- At this time, it is not configurable and is set to 2048 bits.
enable_ssl_session_reuse:
description:
- Enable ssl session re-use.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
name:
description:
- Name of the object.
required: true
prefer_client_cipher_ordering:
description:
- Prefer the ssl cipher ordering presented by the client during the ssl handshake over the one specified in the ssl profile.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
send_close_notify:
description:
- Send 'close notify' alert message for a clean shutdown of the ssl connection.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
ssl_rating:
description:
- Sslrating settings for sslprofile.
ssl_session_timeout:
description:
- The amount of time before an ssl session expires.
- Default value when not specified in API or module is interpreted by Avi Controller as 86400.
tags:
description:
- List of tag.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create SSL profile with list of allowed ciphers
avi_sslprofile:
controller: ''
username: ''
password: ''
accepted_ciphers: >
ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:
ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:
AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:
AES256-SHA:DES-CBC3-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:
ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA
accepted_versions:
- type: SSL_VERSION_TLS1
- type: SSL_VERSION_TLS1_1
- type: SSL_VERSION_TLS1_2
cipher_enums:
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384
- TLS_RSA_WITH_AES_128_GCM_SHA256
- TLS_RSA_WITH_AES_256_GCM_SHA384
- TLS_RSA_WITH_AES_128_CBC_SHA256
- TLS_RSA_WITH_AES_256_CBC_SHA256
- TLS_RSA_WITH_AES_128_CBC_SHA
- TLS_RSA_WITH_AES_256_CBC_SHA
- TLS_RSA_WITH_3DES_EDE_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
name: PFS-BOTH-RSA-EC
send_close_notify: true
ssl_rating:
compatibility_rating: SSL_SCORE_EXCELLENT
performance_rating: SSL_SCORE_EXCELLENT
security_score: '100.0'
tenant_ref: Demo
'''
RETURN = '''
obj:
description: SSLProfile (api/sslprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
accepted_ciphers=dict(type='str',),
accepted_versions=dict(type='list',),
cipher_enums=dict(type='list',),
description=dict(type='str',),
dhparam=dict(type='str',),
enable_ssl_session_reuse=dict(type='bool',),
name=dict(type='str', required=True),
prefer_client_cipher_ordering=dict(type='bool',),
send_close_notify=dict(type='bool',),
ssl_rating=dict(type='dict',),
ssl_session_timeout=dict(type='int',),
tags=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'sslprofile',
set([]))
if __name__ == '__main__':
main()
| 35.787879 | 136 | 0.673299 |
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_sslprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of SSLProfile Avi RESTful Object
description:
- This module is used to configure SSLProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
accepted_ciphers:
description:
- Ciphers suites represented as defined by U(http://www.openssl.org/docs/apps/ciphers.html).
- Default value when not specified in API or module is interpreted by Avi Controller as AES:3DES:RC4.
accepted_versions:
description:
- Set of versions accepted by the server.
cipher_enums:
description:
- Cipher_enums of sslprofile.
description:
description:
- User defined description for the object.
dhparam:
description:
- Dh parameters used in ssl.
- At this time, it is not configurable and is set to 2048 bits.
enable_ssl_session_reuse:
description:
- Enable ssl session re-use.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
name:
description:
- Name of the object.
required: true
prefer_client_cipher_ordering:
description:
- Prefer the ssl cipher ordering presented by the client during the ssl handshake over the one specified in the ssl profile.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
send_close_notify:
description:
- Send 'close notify' alert message for a clean shutdown of the ssl connection.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
ssl_rating:
description:
- Sslrating settings for sslprofile.
ssl_session_timeout:
description:
- The amount of time before an ssl session expires.
- Default value when not specified in API or module is interpreted by Avi Controller as 86400.
tags:
description:
- List of tag.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create SSL profile with list of allowed ciphers
avi_sslprofile:
controller: ''
username: ''
password: ''
accepted_ciphers: >
ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:
ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:
AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:
AES256-SHA:DES-CBC3-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:
ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA
accepted_versions:
- type: SSL_VERSION_TLS1
- type: SSL_VERSION_TLS1_1
- type: SSL_VERSION_TLS1_2
cipher_enums:
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384
- TLS_RSA_WITH_AES_128_GCM_SHA256
- TLS_RSA_WITH_AES_256_GCM_SHA384
- TLS_RSA_WITH_AES_128_CBC_SHA256
- TLS_RSA_WITH_AES_256_CBC_SHA256
- TLS_RSA_WITH_AES_128_CBC_SHA
- TLS_RSA_WITH_AES_256_CBC_SHA
- TLS_RSA_WITH_3DES_EDE_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
name: PFS-BOTH-RSA-EC
send_close_notify: true
ssl_rating:
compatibility_rating: SSL_SCORE_EXCELLENT
performance_rating: SSL_SCORE_EXCELLENT
security_score: '100.0'
tenant_ref: Demo
'''
RETURN = '''
obj:
description: SSLProfile (api/sslprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
accepted_ciphers=dict(type='str',),
accepted_versions=dict(type='list',),
cipher_enums=dict(type='list',),
description=dict(type='str',),
dhparam=dict(type='str',),
enable_ssl_session_reuse=dict(type='bool',),
name=dict(type='str', required=True),
prefer_client_cipher_ordering=dict(type='bool',),
send_close_notify=dict(type='bool',),
ssl_rating=dict(type='dict',),
ssl_session_timeout=dict(type='int',),
tags=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'sslprofile',
set([]))
if __name__ == '__main__':
main()
| true | true |
f7303fe1c7865651a322e4e2df538947ebd447f6 | 6,503 | py | Python | zerver/webhooks/sonarr/view.py | dumpmemory/zulip | 496273ddbc567330a0022699d6d6eb5c646e5da5 | [
"Apache-2.0"
] | 4 | 2021-09-16T16:46:55.000Z | 2022-02-06T13:00:21.000Z | zerver/webhooks/sonarr/view.py | dumpmemory/zulip | 496273ddbc567330a0022699d6d6eb5c646e5da5 | [
"Apache-2.0"
] | null | null | null | zerver/webhooks/sonarr/view.py | dumpmemory/zulip | 496273ddbc567330a0022699d6d6eb5c646e5da5 | [
"Apache-2.0"
] | 1 | 2022-02-04T05:15:12.000Z | 2022-02-04T05:15:12.000Z | from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.exceptions import UnsupportedWebhookEventType
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message, get_setup_webhook_message
from zerver.models import UserProfile
SONARR_TOPIC_TEMPLATE = "{series_title}".strip()
SONARR_TOPIC_TEMPLATE_TEST = "Sonarr - Test".strip()
SONARR_TOPIC_TEMPLATE_HEALTH_CHECK = "Health {level}".strip()
SONARR_MESSAGE_TEMPLATE_SERIES_DELETED = "{series_title} has been deleted.".strip()
SONARR_MESSAGE_TEMPLATE_HEALTH_CHECK = "{message}.".strip()
SONARR_MESSAGE_TEMPLATE_EPISODES_RENAMED = "{series_title} episodes have been renamed.".strip()
SONARR_MESSAGE_TEMPLATE_EPISODE_IMPORTED = (
"{series_title} - {series_number}x{episode_number} - {episode_name} has been imported.".strip()
)
SONARR_MESSAGE_TEMPLATE_EPISODE_IMPORTED_UPGRADE = "{series_title} - {series_number}x{episode_number} - {episode_name} has been upgraded from {old_quality} to {new_quality}.".strip()
SONARR_MESSAGE_TEMPLATE_EPISODE_GRABBED = (
"{series_title} - {series_number}x{episode_number} - {episode_name} has been grabbed.".strip()
)
SONARR_MESSAGE_TEMPLATE_EPISODE_DELETED = (
"{series_title} - {series_number}x{episode_number} - {episode_name} has been deleted.".strip()
)
SONARR_MESSAGE_TEMPLATE_EPISODE_DELETED_UPGRADE = "{series_title} - {series_number}x{episode_number} - {episode_name} has been deleted due to quality upgrade.".strip()
ALL_EVENT_TYPES = [
"Grab",
"EpisodeFileDelete",
"Test",
"Download",
"SeriesDelete",
"Health",
"Rename",
]
@webhook_view("Sonarr", all_event_types=ALL_EVENT_TYPES)
@has_request_variables
def api_sonarr_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Any] = REQ(argument_type="body"),
) -> HttpResponse:
body = get_body_for_http_request(payload)
subject = get_subject_for_http_request(payload)
check_send_webhook_message(request, user_profile, subject, body, payload["eventType"])
return json_success(request)
def get_subject_for_http_request(payload: Dict[str, Any]) -> str:
if payload["eventType"] != "Test" and payload["eventType"] != "Health":
topic = SONARR_TOPIC_TEMPLATE.format(series_title=payload["series"]["title"])
elif payload["eventType"] == "Test":
topic = SONARR_TOPIC_TEMPLATE_TEST
elif payload["eventType"] == "Health":
topic = SONARR_TOPIC_TEMPLATE_HEALTH_CHECK.format(level=payload["level"])
return topic
def get_body_for_health_check_event(payload: Dict[str, Any]) -> str:
return SONARR_MESSAGE_TEMPLATE_HEALTH_CHECK.format(message=payload["message"])
def get_body_for_episodes_renamed_event(payload: Dict[str, Any]) -> str:
return SONARR_MESSAGE_TEMPLATE_EPISODES_RENAMED.format(series_title=payload["series"]["title"])
def get_body_for_series_deleted_event(payload: Dict[str, Any]) -> str:
return SONARR_MESSAGE_TEMPLATE_SERIES_DELETED.format(series_title=payload["series"]["title"])
def get_body_for_episode_imported_upgrade_event(payload: Dict[str, Any]) -> str:
data = {
"series_title": payload["series"]["title"],
"series_number": payload["episodes"][0]["seasonNumber"],
"episode_number": payload["episodes"][0]["episodeNumber"],
"episode_name": payload["episodes"][0]["title"],
"new_quality": payload["episodeFile"]["quality"],
"old_quality": payload["deletedFiles"][0]["quality"],
}
return SONARR_MESSAGE_TEMPLATE_EPISODE_IMPORTED_UPGRADE.format(**data)
def get_body_for_episode_imported_event(payload: Dict[str, Any]) -> str:
data = {
"series_title": payload["series"]["title"],
"series_number": payload["episodes"][0]["seasonNumber"],
"episode_number": payload["episodes"][0]["episodeNumber"],
"episode_name": payload["episodes"][0]["title"],
}
return SONARR_MESSAGE_TEMPLATE_EPISODE_IMPORTED.format(**data)
def get_body_for_episode_grabbed_event(payload: Dict[str, Any]) -> str:
data = {
"series_title": payload["series"]["title"],
"series_number": payload["episodes"][0]["seasonNumber"],
"episode_number": payload["episodes"][0]["episodeNumber"],
"episode_name": payload["episodes"][0]["title"],
}
return SONARR_MESSAGE_TEMPLATE_EPISODE_GRABBED.format(**data)
def get_body_for_episode_deleted_upgrade_event(payload: Dict[str, Any]) -> str:
data = {
"series_title": payload["series"]["title"],
"series_number": payload["episodes"][0]["seasonNumber"],
"episode_number": payload["episodes"][0]["episodeNumber"],
"episode_name": payload["episodes"][0]["title"],
}
return SONARR_MESSAGE_TEMPLATE_EPISODE_DELETED_UPGRADE.format(**data)
def get_body_for_episode_deleted_event(payload: Dict[str, Any]) -> str:
data = {
"series_title": payload["series"]["title"],
"series_number": payload["episodes"][0]["seasonNumber"],
"episode_number": payload["episodes"][0]["episodeNumber"],
"episode_name": payload["episodes"][0]["title"],
}
return SONARR_MESSAGE_TEMPLATE_EPISODE_DELETED.format(**data)
def get_body_for_http_request(payload: Dict[str, Any]) -> str:
if payload["eventType"] == "Test":
return get_setup_webhook_message("Sonarr")
elif payload["eventType"] == "Health":
return get_body_for_health_check_event(payload)
elif payload["eventType"] == "Rename":
return get_body_for_episodes_renamed_event(payload)
elif payload["eventType"] == "SeriesDelete":
return get_body_for_series_deleted_event(payload)
elif payload["eventType"] == "Download" and "isUpgrade" in payload:
if payload["isUpgrade"]:
return get_body_for_episode_imported_upgrade_event(payload)
else:
return get_body_for_episode_imported_event(payload)
elif payload["eventType"] == "Grab":
return get_body_for_episode_grabbed_event(payload)
elif payload["eventType"] == "EpisodeFileDelete" and "deleteReason" in payload:
if payload["deleteReason"] == "upgrade":
return get_body_for_episode_deleted_upgrade_event(payload)
else:
return get_body_for_episode_deleted_event(payload)
else:
raise UnsupportedWebhookEventType(payload["eventType"])
| 40.899371 | 182 | 0.721974 | from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.exceptions import UnsupportedWebhookEventType
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message, get_setup_webhook_message
from zerver.models import UserProfile
SONARR_TOPIC_TEMPLATE = "{series_title}".strip()
SONARR_TOPIC_TEMPLATE_TEST = "Sonarr - Test".strip()
SONARR_TOPIC_TEMPLATE_HEALTH_CHECK = "Health {level}".strip()
SONARR_MESSAGE_TEMPLATE_SERIES_DELETED = "{series_title} has been deleted.".strip()
SONARR_MESSAGE_TEMPLATE_HEALTH_CHECK = "{message}.".strip()
SONARR_MESSAGE_TEMPLATE_EPISODES_RENAMED = "{series_title} episodes have been renamed.".strip()
SONARR_MESSAGE_TEMPLATE_EPISODE_IMPORTED = (
"{series_title} - {series_number}x{episode_number} - {episode_name} has been imported.".strip()
)
SONARR_MESSAGE_TEMPLATE_EPISODE_IMPORTED_UPGRADE = "{series_title} - {series_number}x{episode_number} - {episode_name} has been upgraded from {old_quality} to {new_quality}.".strip()
SONARR_MESSAGE_TEMPLATE_EPISODE_GRABBED = (
"{series_title} - {series_number}x{episode_number} - {episode_name} has been grabbed.".strip()
)
SONARR_MESSAGE_TEMPLATE_EPISODE_DELETED = (
"{series_title} - {series_number}x{episode_number} - {episode_name} has been deleted.".strip()
)
SONARR_MESSAGE_TEMPLATE_EPISODE_DELETED_UPGRADE = "{series_title} - {series_number}x{episode_number} - {episode_name} has been deleted due to quality upgrade.".strip()
ALL_EVENT_TYPES = [
"Grab",
"EpisodeFileDelete",
"Test",
"Download",
"SeriesDelete",
"Health",
"Rename",
]
@webhook_view("Sonarr", all_event_types=ALL_EVENT_TYPES)
@has_request_variables
def api_sonarr_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Any] = REQ(argument_type="body"),
) -> HttpResponse:
body = get_body_for_http_request(payload)
subject = get_subject_for_http_request(payload)
check_send_webhook_message(request, user_profile, subject, body, payload["eventType"])
return json_success(request)
def get_subject_for_http_request(payload: Dict[str, Any]) -> str:
if payload["eventType"] != "Test" and payload["eventType"] != "Health":
topic = SONARR_TOPIC_TEMPLATE.format(series_title=payload["series"]["title"])
elif payload["eventType"] == "Test":
topic = SONARR_TOPIC_TEMPLATE_TEST
elif payload["eventType"] == "Health":
topic = SONARR_TOPIC_TEMPLATE_HEALTH_CHECK.format(level=payload["level"])
return topic
def get_body_for_health_check_event(payload: Dict[str, Any]) -> str:
return SONARR_MESSAGE_TEMPLATE_HEALTH_CHECK.format(message=payload["message"])
def get_body_for_episodes_renamed_event(payload: Dict[str, Any]) -> str:
return SONARR_MESSAGE_TEMPLATE_EPISODES_RENAMED.format(series_title=payload["series"]["title"])
def get_body_for_series_deleted_event(payload: Dict[str, Any]) -> str:
return SONARR_MESSAGE_TEMPLATE_SERIES_DELETED.format(series_title=payload["series"]["title"])
def get_body_for_episode_imported_upgrade_event(payload: Dict[str, Any]) -> str:
data = {
"series_title": payload["series"]["title"],
"series_number": payload["episodes"][0]["seasonNumber"],
"episode_number": payload["episodes"][0]["episodeNumber"],
"episode_name": payload["episodes"][0]["title"],
"new_quality": payload["episodeFile"]["quality"],
"old_quality": payload["deletedFiles"][0]["quality"],
}
return SONARR_MESSAGE_TEMPLATE_EPISODE_IMPORTED_UPGRADE.format(**data)
def get_body_for_episode_imported_event(payload: Dict[str, Any]) -> str:
data = {
"series_title": payload["series"]["title"],
"series_number": payload["episodes"][0]["seasonNumber"],
"episode_number": payload["episodes"][0]["episodeNumber"],
"episode_name": payload["episodes"][0]["title"],
}
return SONARR_MESSAGE_TEMPLATE_EPISODE_IMPORTED.format(**data)
def get_body_for_episode_grabbed_event(payload: Dict[str, Any]) -> str:
data = {
"series_title": payload["series"]["title"],
"series_number": payload["episodes"][0]["seasonNumber"],
"episode_number": payload["episodes"][0]["episodeNumber"],
"episode_name": payload["episodes"][0]["title"],
}
return SONARR_MESSAGE_TEMPLATE_EPISODE_GRABBED.format(**data)
def get_body_for_episode_deleted_upgrade_event(payload: Dict[str, Any]) -> str:
data = {
"series_title": payload["series"]["title"],
"series_number": payload["episodes"][0]["seasonNumber"],
"episode_number": payload["episodes"][0]["episodeNumber"],
"episode_name": payload["episodes"][0]["title"],
}
return SONARR_MESSAGE_TEMPLATE_EPISODE_DELETED_UPGRADE.format(**data)
def get_body_for_episode_deleted_event(payload: Dict[str, Any]) -> str:
data = {
"series_title": payload["series"]["title"],
"series_number": payload["episodes"][0]["seasonNumber"],
"episode_number": payload["episodes"][0]["episodeNumber"],
"episode_name": payload["episodes"][0]["title"],
}
return SONARR_MESSAGE_TEMPLATE_EPISODE_DELETED.format(**data)
def get_body_for_http_request(payload: Dict[str, Any]) -> str:
if payload["eventType"] == "Test":
return get_setup_webhook_message("Sonarr")
elif payload["eventType"] == "Health":
return get_body_for_health_check_event(payload)
elif payload["eventType"] == "Rename":
return get_body_for_episodes_renamed_event(payload)
elif payload["eventType"] == "SeriesDelete":
return get_body_for_series_deleted_event(payload)
elif payload["eventType"] == "Download" and "isUpgrade" in payload:
if payload["isUpgrade"]:
return get_body_for_episode_imported_upgrade_event(payload)
else:
return get_body_for_episode_imported_event(payload)
elif payload["eventType"] == "Grab":
return get_body_for_episode_grabbed_event(payload)
elif payload["eventType"] == "EpisodeFileDelete" and "deleteReason" in payload:
if payload["deleteReason"] == "upgrade":
return get_body_for_episode_deleted_upgrade_event(payload)
else:
return get_body_for_episode_deleted_event(payload)
else:
raise UnsupportedWebhookEventType(payload["eventType"])
| true | true |
f730429da94736b9ecf8b387b5667497fa6dec89 | 132 | py | Python | GA_tsp_optimisation/__init__.py | JessikaSmith/OptimizationAlgorithms | bf0f871f4d6150e1e7533360cfc6f70eb616c870 | [
"MIT"
] | 15 | 2018-11-16T04:42:44.000Z | 2020-03-20T16:00:47.000Z | GA_tsp_optimisation/__init__.py | JessikaSmith/OptimizationAlgorithms | bf0f871f4d6150e1e7533360cfc6f70eb616c870 | [
"MIT"
] | null | null | null | GA_tsp_optimisation/__init__.py | JessikaSmith/OptimizationAlgorithms | bf0f871f4d6150e1e7533360cfc6f70eb616c870 | [
"MIT"
] | 3 | 2019-01-17T13:18:56.000Z | 2019-12-17T22:22:48.000Z | from .mutation import Mutation
from .crossover import Crossover
from .selection import Selector
from .ga_pipeline import ga_pipeline | 33 | 36 | 0.856061 | from .mutation import Mutation
from .crossover import Crossover
from .selection import Selector
from .ga_pipeline import ga_pipeline | true | true |
f730429fdfa6612d800f5277781da67e08805140 | 204 | py | Python | libs/VulnScan.py | glaudsonml/kurgan-ai | c0ad4450f9fb2004f35b8a0201bfe894e01adc8f | [
"Apache-2.0"
] | 35 | 2017-05-22T14:42:01.000Z | 2020-09-07T21:24:41.000Z | libs/VulnScan.py | tmaxter/kurgan-ai | c0ad4450f9fb2004f35b8a0201bfe894e01adc8f | [
"Apache-2.0"
] | null | null | null | libs/VulnScan.py | tmaxter/kurgan-ai | c0ad4450f9fb2004f35b8a0201bfe894e01adc8f | [
"Apache-2.0"
] | 5 | 2017-12-19T03:36:54.000Z | 2021-04-14T18:05:08.000Z | '''
Vulnerability Scanner Class
'''
class VulnScan(object):
scanning=True
def set_scanning(self, val):
self.scanning = val
def get_scanning(self):
return self.scanning
| 14.571429 | 32 | 0.637255 |
class VulnScan(object):
scanning=True
def set_scanning(self, val):
self.scanning = val
def get_scanning(self):
return self.scanning
| true | true |
f730431b27e07083dae8b1e4b35faa1a13906b39 | 2,221 | py | Python | generated-libraries/python/netapp/coredump/coredump_config_modify_iter_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | 2 | 2017-03-28T15:31:26.000Z | 2018-08-16T22:15:18.000Z | generated-libraries/python/netapp/coredump/coredump_config_modify_iter_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | generated-libraries/python/netapp/coredump/coredump_config_modify_iter_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | from netapp.coredump.coredump_config_info import CoredumpConfigInfo
from netapp.netapp_object import NetAppObject
class CoredumpConfigModifyIterInfo(NetAppObject):
"""
Information about the modify operation that was
attempted/performed against coredump-config object.
were not modified due to some error.
due to some error.
This element will be returned only if input element
'return-failure-list' is true.
"""
_error_code = None
@property
def error_code(self):
"""
Error code, if the modify operation caused an error.
"""
return self._error_code
@error_code.setter
def error_code(self, val):
if val != None:
self.validate('error_code', val)
self._error_code = val
_error_message = None
@property
def error_message(self):
"""
Error description, if the modify operation caused an
error.
"""
return self._error_message
@error_message.setter
def error_message(self, val):
if val != None:
self.validate('error_message', val)
self._error_message = val
_coredump_config_key = None
@property
def coredump_config_key(self):
"""
The keys for the coredump-config object to which the
modify operation applies.
"""
return self._coredump_config_key
@coredump_config_key.setter
def coredump_config_key(self, val):
if val != None:
self.validate('coredump_config_key', val)
self._coredump_config_key = val
@staticmethod
def get_api_name():
return "coredump-config-modify-iter-info"
@staticmethod
def get_desired_attrs():
return [
'error-code',
'error-message',
'coredump-config-key',
]
def describe_properties(self):
return {
'error_code': { 'class': int, 'is_list': False, 'required': 'optional' },
'error_message': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'coredump_config_key': { 'class': CoredumpConfigInfo, 'is_list': False, 'required': 'required' },
}
| 30.424658 | 109 | 0.61729 | from netapp.coredump.coredump_config_info import CoredumpConfigInfo
from netapp.netapp_object import NetAppObject
class CoredumpConfigModifyIterInfo(NetAppObject):
_error_code = None
@property
def error_code(self):
return self._error_code
@error_code.setter
def error_code(self, val):
if val != None:
self.validate('error_code', val)
self._error_code = val
_error_message = None
@property
def error_message(self):
return self._error_message
@error_message.setter
def error_message(self, val):
if val != None:
self.validate('error_message', val)
self._error_message = val
_coredump_config_key = None
@property
def coredump_config_key(self):
return self._coredump_config_key
@coredump_config_key.setter
def coredump_config_key(self, val):
if val != None:
self.validate('coredump_config_key', val)
self._coredump_config_key = val
@staticmethod
def get_api_name():
return "coredump-config-modify-iter-info"
@staticmethod
def get_desired_attrs():
return [
'error-code',
'error-message',
'coredump-config-key',
]
def describe_properties(self):
return {
'error_code': { 'class': int, 'is_list': False, 'required': 'optional' },
'error_message': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'coredump_config_key': { 'class': CoredumpConfigInfo, 'is_list': False, 'required': 'required' },
}
| true | true |
f730433703f39fdd32cf073d91c160a604ea19a7 | 1,983 | py | Python | recipes/recipe_modules/depot_tools/api.py | xiayongtao/depot_tools | 02e6133a844e47dd55159a585144708bae11b76d | [
"BSD-3-Clause"
] | null | null | null | recipes/recipe_modules/depot_tools/api.py | xiayongtao/depot_tools | 02e6133a844e47dd55159a585144708bae11b76d | [
"BSD-3-Clause"
] | null | null | null | recipes/recipe_modules/depot_tools/api.py | xiayongtao/depot_tools | 02e6133a844e47dd55159a585144708bae11b76d | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The `depot_tools` module provides safe functions to access paths within
the depot_tools repo."""
import contextlib
from recipe_engine import recipe_api
class DepotToolsApi(recipe_api.RecipeApi):
@property
def download_from_google_storage_path(self):
return self.repo_resource('download_from_google_storage.py')
@property
def upload_to_google_storage_path(self):
return self.repo_resource('upload_to_google_storage.py')
@property
def root(self):
"""Returns (Path): The "depot_tools" root directory."""
return self.repo_resource()
@property
def cros_path(self):
return self.repo_resource('cros')
@property
def gn_py_path(self):
return self.repo_resource('gn.py')
# TODO(dnj): Remove this once everything uses the "gsutil" recipe module
# version.
@property
def gsutil_py_path(self):
return self.repo_resource('gsutil.py')
@property
def ninja_path(self):
ninja_exe = 'ninja.exe' if self.m.platform.is_win else 'ninja'
return self.repo_resource(ninja_exe)
@property
def autoninja_path(self):
autoninja = 'autoninja.bat' if self.m.platform.is_win else 'autoninja'
return self.repo_resource(autoninja)
@property
def presubmit_support_py_path(self):
return self.repo_resource('presubmit_support.py')
@property
def dirmd_path(self):
return self.repo_resource('dirmd')
@contextlib.contextmanager
def on_path(self):
"""Use this context manager to put depot_tools on $PATH.
Example:
```python
with api.depot_tools.on_path():
# run some steps
```
"""
# By default Depot Tools do not auto update on the bots.
# (crbug/1090603)
with self.m.context(
**{'env_suffixes': {
'PATH': [self.root],
'DEPOT_TOOLS_UPDATE': '0'
}}):
yield
| 25.753247 | 74 | 0.702975 |
import contextlib
from recipe_engine import recipe_api
class DepotToolsApi(recipe_api.RecipeApi):
@property
def download_from_google_storage_path(self):
return self.repo_resource('download_from_google_storage.py')
@property
def upload_to_google_storage_path(self):
return self.repo_resource('upload_to_google_storage.py')
@property
def root(self):
return self.repo_resource()
@property
def cros_path(self):
return self.repo_resource('cros')
@property
def gn_py_path(self):
return self.repo_resource('gn.py')
@property
def gsutil_py_path(self):
return self.repo_resource('gsutil.py')
@property
def ninja_path(self):
ninja_exe = 'ninja.exe' if self.m.platform.is_win else 'ninja'
return self.repo_resource(ninja_exe)
@property
def autoninja_path(self):
autoninja = 'autoninja.bat' if self.m.platform.is_win else 'autoninja'
return self.repo_resource(autoninja)
@property
def presubmit_support_py_path(self):
return self.repo_resource('presubmit_support.py')
@property
def dirmd_path(self):
return self.repo_resource('dirmd')
@contextlib.contextmanager
def on_path(self):
with self.m.context(
**{'env_suffixes': {
'PATH': [self.root],
'DEPOT_TOOLS_UPDATE': '0'
}}):
yield
| true | true |
f730440cb51639d90c3a94b3a0f7db56fd17a3c5 | 5,151 | py | Python | PaddleSlim/classification/pruning/compress.py | XiaoguangHu01/models | a95d49323ed504e5a9164586f171f408954fd43a | [
"Apache-2.0"
] | null | null | null | PaddleSlim/classification/pruning/compress.py | XiaoguangHu01/models | a95d49323ed504e5a9164586f171f408954fd43a | [
"Apache-2.0"
] | null | null | null | PaddleSlim/classification/pruning/compress.py | XiaoguangHu01/models | a95d49323ed504e5a9164586f171f408954fd43a | [
"Apache-2.0"
] | null | null | null | import os
import sys
import logging
import paddle
import argparse
import functools
import math
import paddle.fluid as fluid
sys.path.append("..")
import imagenet_reader as reader
import models
sys.path.append("../../")
from utility import add_arguments, print_arguments
from paddle.fluid.contrib.slim import Compressor
logging.basicConfig(format='%(asctime)s-%(levelname)s: %(message)s')
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('batch_size', int, 64*4, "Minibatch size.")
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('model', str, None, "The target model.")
add_arg('pretrained_model', str, None, "Whether to use pretrained model.")
add_arg('lr', float, 0.1, "The learning rate used to fine-tune pruned model.")
add_arg('lr_strategy', str, "piecewise_decay", "The learning rate decay strategy.")
add_arg('l2_decay', float, 3e-5, "The l2_decay parameter.")
add_arg('momentum_rate', float, 0.9, "The value of momentum_rate.")
add_arg('num_epochs', int, 120, "The number of total epochs.")
add_arg('total_images', int, 1281167, "The number of total training images.")
parser.add_argument('--step_epochs', nargs='+', type=int, default=[30, 60, 90], help="piecewise decay step")
add_arg('config_file', str, None, "The config file for compression with yaml format.")
# yapf: enable
model_list = [m for m in dir(models) if "__" not in m]
def piecewise_decay(args):
step = int(math.ceil(float(args.total_images) / args.batch_size))
bd = [step * e for e in args.step_epochs]
lr = [args.lr * (0.1**i) for i in range(len(bd) + 1)]
learning_rate = fluid.layers.piecewise_decay(boundaries=bd, values=lr)
optimizer = fluid.optimizer.Momentum(
learning_rate=learning_rate,
momentum=args.momentum_rate,
regularization=fluid.regularizer.L2Decay(args.l2_decay))
return optimizer
def cosine_decay(args):
step = int(math.ceil(float(args.total_images) / args.batch_size))
learning_rate = fluid.layers.cosine_decay(
learning_rate=args.lr,
step_each_epoch=step,
epochs=args.num_epochs)
optimizer = fluid.optimizer.Momentum(
learning_rate=learning_rate,
momentum=args.momentum_rate,
regularization=fluid.regularizer.L2Decay(args.l2_decay))
return optimizer
def create_optimizer(args):
if args.lr_strategy == "piecewise_decay":
return piecewise_decay(args)
elif args.lr_strategy == "cosine_decay":
return cosine_decay(args)
def compress(args):
class_dim=1000
image_shape="3,224,224"
image_shape = [int(m) for m in image_shape.split(",")]
assert args.model in model_list, "{} is not in lists: {}".format(args.model, model_list)
image = fluid.data(name='image', shape=[None] + image_shape, dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
# model definition
model = models.__dict__[args.model]()
out = model.net(input=image, class_dim=class_dim)
cost = fluid.layers.cross_entropy(input=out, label=label)
avg_cost = fluid.layers.mean(x=cost)
acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
val_program = fluid.default_main_program().clone()
opt = create_optimizer(args)
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
if args.pretrained_model:
def if_exist(var):
return os.path.exists(os.path.join(args.pretrained_model, var.name))
fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
val_reader = paddle.batch(reader.val(), batch_size=args.batch_size)
val_feed_list = [('image', image.name), ('label', label.name)]
val_fetch_list = [('acc_top1', acc_top1.name), ('acc_top5', acc_top5.name)]
train_reader = paddle.batch(
reader.train(), batch_size=args.batch_size, drop_last=True)
train_feed_list = [('image', image.name), ('label', label.name)]
train_fetch_list = [('loss', avg_cost.name)]
com_pass = Compressor(
place,
fluid.global_scope(),
fluid.default_main_program(),
train_reader=train_reader,
train_feed_list=train_feed_list,
train_fetch_list=train_fetch_list,
eval_program=val_program,
eval_reader=val_reader,
eval_feed_list=val_feed_list,
eval_fetch_list=val_fetch_list,
save_eval_model=True,
prune_infer_model=[[image.name], [out.name]],
train_optimizer=opt)
com_pass.config(args.config_file)
com_pass.run()
def main():
args = parser.parse_args()
print_arguments(args)
compress(args)
if __name__ == '__main__':
main()
| 39.623077 | 108 | 0.673461 | import os
import sys
import logging
import paddle
import argparse
import functools
import math
import paddle.fluid as fluid
sys.path.append("..")
import imagenet_reader as reader
import models
sys.path.append("../../")
from utility import add_arguments, print_arguments
from paddle.fluid.contrib.slim import Compressor
logging.basicConfig(format='%(asctime)s-%(levelname)s: %(message)s')
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('batch_size', int, 64*4, "Minibatch size.")
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('model', str, None, "The target model.")
add_arg('pretrained_model', str, None, "Whether to use pretrained model.")
add_arg('lr', float, 0.1, "The learning rate used to fine-tune pruned model.")
add_arg('lr_strategy', str, "piecewise_decay", "The learning rate decay strategy.")
add_arg('l2_decay', float, 3e-5, "The l2_decay parameter.")
add_arg('momentum_rate', float, 0.9, "The value of momentum_rate.")
add_arg('num_epochs', int, 120, "The number of total epochs.")
add_arg('total_images', int, 1281167, "The number of total training images.")
parser.add_argument('--step_epochs', nargs='+', type=int, default=[30, 60, 90], help="piecewise decay step")
add_arg('config_file', str, None, "The config file for compression with yaml format.")
model_list = [m for m in dir(models) if "__" not in m]
def piecewise_decay(args):
step = int(math.ceil(float(args.total_images) / args.batch_size))
bd = [step * e for e in args.step_epochs]
lr = [args.lr * (0.1**i) for i in range(len(bd) + 1)]
learning_rate = fluid.layers.piecewise_decay(boundaries=bd, values=lr)
optimizer = fluid.optimizer.Momentum(
learning_rate=learning_rate,
momentum=args.momentum_rate,
regularization=fluid.regularizer.L2Decay(args.l2_decay))
return optimizer
def cosine_decay(args):
step = int(math.ceil(float(args.total_images) / args.batch_size))
learning_rate = fluid.layers.cosine_decay(
learning_rate=args.lr,
step_each_epoch=step,
epochs=args.num_epochs)
optimizer = fluid.optimizer.Momentum(
learning_rate=learning_rate,
momentum=args.momentum_rate,
regularization=fluid.regularizer.L2Decay(args.l2_decay))
return optimizer
def create_optimizer(args):
if args.lr_strategy == "piecewise_decay":
return piecewise_decay(args)
elif args.lr_strategy == "cosine_decay":
return cosine_decay(args)
def compress(args):
class_dim=1000
image_shape="3,224,224"
image_shape = [int(m) for m in image_shape.split(",")]
assert args.model in model_list, "{} is not in lists: {}".format(args.model, model_list)
image = fluid.data(name='image', shape=[None] + image_shape, dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
model = models.__dict__[args.model]()
out = model.net(input=image, class_dim=class_dim)
cost = fluid.layers.cross_entropy(input=out, label=label)
avg_cost = fluid.layers.mean(x=cost)
acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
val_program = fluid.default_main_program().clone()
opt = create_optimizer(args)
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
if args.pretrained_model:
def if_exist(var):
return os.path.exists(os.path.join(args.pretrained_model, var.name))
fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
val_reader = paddle.batch(reader.val(), batch_size=args.batch_size)
val_feed_list = [('image', image.name), ('label', label.name)]
val_fetch_list = [('acc_top1', acc_top1.name), ('acc_top5', acc_top5.name)]
train_reader = paddle.batch(
reader.train(), batch_size=args.batch_size, drop_last=True)
train_feed_list = [('image', image.name), ('label', label.name)]
train_fetch_list = [('loss', avg_cost.name)]
com_pass = Compressor(
place,
fluid.global_scope(),
fluid.default_main_program(),
train_reader=train_reader,
train_feed_list=train_feed_list,
train_fetch_list=train_fetch_list,
eval_program=val_program,
eval_reader=val_reader,
eval_feed_list=val_feed_list,
eval_fetch_list=val_fetch_list,
save_eval_model=True,
prune_infer_model=[[image.name], [out.name]],
train_optimizer=opt)
com_pass.config(args.config_file)
com_pass.run()
def main():
args = parser.parse_args()
print_arguments(args)
compress(args)
if __name__ == '__main__':
main()
| true | true |
f730443cb35af115bf5129a4cd48778f2a808e00 | 150 | py | Python | uploading/apps.py | trevin-livele/django_api_awwwards | 604a480cfe3d0efc01019c4ba15ffba5c140be0b | [
"MIT"
] | null | null | null | uploading/apps.py | trevin-livele/django_api_awwwards | 604a480cfe3d0efc01019c4ba15ffba5c140be0b | [
"MIT"
] | null | null | null | uploading/apps.py | trevin-livele/django_api_awwwards | 604a480cfe3d0efc01019c4ba15ffba5c140be0b | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class UploadingConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'uploading'
| 21.428571 | 56 | 0.766667 | from django.apps import AppConfig
class UploadingConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'uploading'
| true | true |
f73045076a374efc7d1d0f523aa322cd6ee1a0de | 470 | py | Python | tests/RigTests/bob_guide_build.py | FabricExile/Kraken | d8c1f5189cb191945e2c18a1369c458d05305afc | [
"BSD-3-Clause"
] | 7 | 2017-12-04T16:57:42.000Z | 2021-09-07T07:02:38.000Z | tests/RigTests/bob_guide_build.py | xtvjxk123456/Kraken | d8c1f5189cb191945e2c18a1369c458d05305afc | [
"BSD-3-Clause"
] | null | null | null | tests/RigTests/bob_guide_build.py | xtvjxk123456/Kraken | d8c1f5189cb191945e2c18a1369c458d05305afc | [
"BSD-3-Clause"
] | 6 | 2017-11-14T06:50:48.000Z | 2021-08-21T22:47:29.000Z | import json
from kraken import plugins
from kraken_examples.bob_guide import BobGuide
from kraken.core.profiler import Profiler
from kraken.helpers.utility_methods import logHierarchy
Profiler.getInstance().push("bob_guide_build")
bobGuide = BobGuide("char_bob_guide")
builder = plugins.getBuilder()
builder.build(bobGuide)
Profiler.getInstance().pop()
if __name__ == "__main__":
print Profiler.getInstance().generateReport()
else:
logHierarchy(bobGuide)
| 21.363636 | 55 | 0.797872 | import json
from kraken import plugins
from kraken_examples.bob_guide import BobGuide
from kraken.core.profiler import Profiler
from kraken.helpers.utility_methods import logHierarchy
Profiler.getInstance().push("bob_guide_build")
bobGuide = BobGuide("char_bob_guide")
builder = plugins.getBuilder()
builder.build(bobGuide)
Profiler.getInstance().pop()
if __name__ == "__main__":
print Profiler.getInstance().generateReport()
else:
logHierarchy(bobGuide)
| false | true |
f730464f2c13d32a8c07491ab4bcbeb1a2f56555 | 11,607 | py | Python | HPC/pandayoda/yodaexe/Droid.py | virthead/COMPASS-multijob-pilot | beac49ec432d24382d4d23aacfe6c9674a59e118 | [
"Apache-2.0"
] | null | null | null | HPC/pandayoda/yodaexe/Droid.py | virthead/COMPASS-multijob-pilot | beac49ec432d24382d4d23aacfe6c9674a59e118 | [
"Apache-2.0"
] | null | null | null | HPC/pandayoda/yodaexe/Droid.py | virthead/COMPASS-multijob-pilot | beac49ec432d24382d4d23aacfe6c9674a59e118 | [
"Apache-2.0"
] | null | null | null | import commands
import json
import os
import shutil
import sys
import time
import pickle
import signal
from os.path import abspath as _abspath, join as _join
from pandayoda.yodacore import Interaction,Database,Logger
from EventServer.EventServerJobManager import EventServerJobManager
class Droid:
def __init__(self, globalWorkingDir, localWorkingDir):
self.__globalWorkingDir = globalWorkingDir
self.__localWorkingDir = localWorkingDir
self.__currentDir = None
self.__comm = Interaction.Requester()
self.__tmpLog = Logger.Logger()
self.__esJobManager = None
self.__rank = self.__comm.getRank()
self.__tmpLog.info("Rank %s: Global working dir: %s" % (self.__rank, self.__globalWorkingDir))
self.initWorkingDir()
self.__tmpLog.info("Rank %s: Current working dir: %s" % (self.__rank, self.__currentDir))
self.__poolFileCatalog = None
self.__inputFiles = None
self.__copyInputFiles = None
signal.signal(signal.SIGTERM, self.stop)
def initWorkingDir(self):
# Create separate working directory for each rank
curdir = _abspath (self.__localWorkingDir)
wkdirname = "rank_%s" % str(self.__rank)
wkdir = _abspath (_join(curdir,wkdirname))
if not os.path.exists(wkdir):
os.makedirs (wkdir)
os.chdir (wkdir)
self.__currentDir = wkdir
def postExecJob(self):
if self.__copyInputFiles and self.__inputFiles is not None and self.__poolFileCatalog is not None:
for inputFile in self.__inputFiles:
localInputFile = os.path.join(os.getcwd(), os.path.basename(inputFile))
self.__tmpLog.debug("Rank %s: Remove input file: %s" % (self.__rank, localInputFile))
os.remove(localInputFile)
if self.__globalWorkingDir != self.__localWorkingDir:
command = "mv " + self.__currentDir + " " + self.__globalWorkingDir
self.__tmpLog.debug("Rank %s: copy files from local working directory to global working dir(cmd: %s)" % (self.__rank, command))
status, output = commands.getstatusoutput(command)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
def setup(self, job):
#try:
if True:
self.__poolFileCatalog = job.get('PoolFileCatalog', None)
self.__inputFiles = job.get('InputFiles', None)
self.__copyInputFiles = job.get('CopyInputFiles', False)
if self.__copyInputFiles and self.__inputFiles is not None and self.__poolFileCatalog is not None:
for inputFile in self.__inputFiles:
shutil.copy(inputFile, './')
pfc_name = os.path.basename(self.__poolFileCatalog)
pfc_name = os.path.join(os.getcwd(), pfc_name)
pfc_name_back = pfc_name + ".back"
shutil.copy2(self.__poolFileCatalog, pfc_name_back)
with open(pfc_name, 'wt') as pfc_out:
with open(pfc_name_back, 'rt') as pfc_in:
for line in pfc_in:
pfc_out.write(line.replace('HPCWORKINGDIR', os.getcwd()))
job["AthenaMPCmd"] = job["AthenaMPCmd"].replace('HPCWORKINGDIR', os.getcwd())
self.__esJobManager = EventServerJobManager(self.__rank)
self.__esJobManager.initMessageThread(socketname='EventService_EventRanges', context='local')
self.__esJobManager.initTokenExtractorProcess(job["TokenExtractCmd"])
self.__esJobManager.initAthenaMPProcess(job["AthenaMPCmd"])
return True, None
#except Exception, e:
# errMsg = "Failed to init EventServerJobManager: %s" % str(e)
# self.__esJobManager.terminate()
# return False, errMsg
def getJob(self):
request = {'Test':'TEST'}
self.__tmpLog.debug("Rank %s: getJob(request: %s)" % (self.__rank, request))
status, output = self.__comm.sendRequest('getJob',request)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
if status:
statusCode = output["StatusCode"]
job = output["job"]
if statusCode == 0:
return True, job
return False, None
def getEventRanges(self):
request = {'nRanges': 1}
self.__tmpLog.debug("Rank %s: getEventRanges(request: %s)" % (self.__rank, request))
status, output = self.__comm.sendRequest('getEventRanges',request)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
if status:
statusCode = output["StatusCode"]
eventRanges = output['eventRanges']
if statusCode == 0:
return True, eventRanges
return False, None
def updateEventRange(self, output):
try:
eventRangeID = output.split(",")[1]
except Exception, e:
self.__tmpLog.warnning("Rank %s: failed to get eventRangeID from output: %s" % (self.__rank, output))
self.__tmpLog.warnning("Rank %s: error message: %s" % (self.__rank, str(e)))
request = {"eventRangeID": eventRangeID,
'eventStatus':" finished",
"output": output}
self.__tmpLog.debug("Rank %s: updateEventRange(request: %s)" % (self.__rank, request))
retStatus, retOutput = self.__comm.sendRequest('updateEventRange',request)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, retStatus, retOutput))
if retStatus:
statusCode = retOutput["StatusCode"]
if statusCode == 0:
return True
return False
def finishJob(self):
request = {'state': 'finished'}
self.__tmpLog.debug("Rank %s: updateJob(request: %s)" % (self.__rank, request))
status, output = self.__comm.sendRequest('updateJob',request)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
if status:
statusCode = output["StatusCode"]
if statusCode == 0:
return True
return False
def failedJob(self):
request = {'state': 'failed'}
self.__tmpLog.debug("Rank %s: updateJob(request: %s)" % (self.__rank, request))
status, output = self.__comm.sendRequest('updateJob',request)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
if status:
statusCode = output["StatusCode"]
if statusCode == 0:
return True
return False
def waitYoda(self):
self.__tmpLog.debug("Rank %s: WaitYoda" % (self.__rank))
while True:
status, output = self.__comm.waitMessage()
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
if status:
statusCode = output["StatusCode"]
state = output["State"]
if statusCode == 0 and state == 'finished':
return True
return True
def run(self):
self.__tmpLog.info("Droid Starts")
status, job = self.getJob()
self.__tmpLog.info("Rank %s: getJob(%s)" % (self.__rank, job))
if not status:
self.__tmpLog.debug("Rank %s: Failed to get job" % self.__rank)
self.failedJob()
return -1
status, output = self.setup(job)
self.__tmpLog.info("Rank %s: setup job(status:%s, output:%s)" % (self.__rank, status, output))
if not status:
self.__tmpLog.debug("Rank %s: Failed to setup job(%s)" % (self.__rank, output))
self.failedJob()
return -1
# main loop
failedNum = 0
#self.__tmpLog.info("Rank %s: isDead: %s" % (self.__rank, self.__esJobManager.isDead()))
while not self.__esJobManager.isDead():
#self.__tmpLog.info("Rank %s: isDead: %s" % (self.__rank, self.__esJobManager.isDead()))
#self.__tmpLog.info("Rank %s: isNeedMoreEvents: %s" % (self.__rank, self.__esJobManager.isNeedMoreEvents()))
if self.__esJobManager.isNeedMoreEvents():
self.__tmpLog.info("Rank %s: need more events" % self.__rank)
status, eventRanges = self.getEventRanges()
# failed to get message again and again
if not status:
fileNum += 1
if fileNum > 30:
self.__tmpLog.warning("Rank %s: failed to get events more than 30 times. finish job" % self.__rank)
self.__esJobManager.insertEventRange("No more events")
else:
continue
else:
fileNum = 0
self.__tmpLog.info("Rank %s: get event ranges(%s)" % (self.__rank, eventRanges))
if len(eventRanges) == 0:
self.__tmpLog.info("Rank %s: no more events" % self.__rank)
self.__esJobManager.insertEventRange("No more events")
for eventRange in eventRanges:
self.__esJobManager.insertEventRange(eventRange)
self.__esJobManager.poll()
output = self.__esJobManager.getOutput()
if output is not None:
self.__tmpLog.info("Rank %s: get output(%s)" % (self.__rank, output))
self.updateEventRange(output)
time.sleep(2)
self.__esJobManager.flushMessages()
output = self.__esJobManager.getOutput()
while output:
self.__tmpLog.info("Rank %s: get output(%s)" % (self.__rank, output))
self.updateEventRange(output)
output = self.__esJobManager.getOutput()
self.__tmpLog.info("Rank %s: post exec job" % self.__rank)
self.postExecJob()
self.__tmpLog.info("Rank %s: finish job" % self.__rank)
self.finishJob()
self.waitYoda()
return 0
def stop(self, signum=None, frame=None):
self.__tmpLog.info('Rank %s: stop signal received' % self.__rank)
self.__esJobManager.terminate()
self.__esJobManager.flushMessages()
output = self.__esJobManager.getOutput()
while output:
self.__tmpLog.info("Rank %s: get output(%s)" % (self.__rank, output))
self.updateEventRange(output)
output = self.__esJobManager.getOutput()
self.__tmpLog.info("Rank %s: post exec job" % self.__rank)
self.postExecJob()
self.__tmpLog.info("Rank %s: finish job" % self.__rank)
self.finishJob()
self.__tmpLog.info('Rank %s: stop' % self.__rank)
def __del__(self):
self.__tmpLog.info('Rank %s: __del__ function' % self.__rank)
#self.__esJobManager.terminate()
#self.__esJobManager.flushMessages()
#output = self.__esJobManager.getOutput()
#while output:
# self.__tmpLog.info("Rank %s: get output(%s)" % (self.__rank, output))
# self.updateEventRange(output)
# output = self.__esJobManager.getOutput()
#self.__tmpLog.info("Rank %s: post exec job" % self.__rank)
#self.postExecJob()
#self.__tmpLog.info("Rank %s: finish job" % self.__rank)
#self.finishJob()
self.__tmpLog.info('Rank %s: __del__ function' % self.__rank)
| 44.814672 | 139 | 0.593177 | import commands
import json
import os
import shutil
import sys
import time
import pickle
import signal
from os.path import abspath as _abspath, join as _join
from pandayoda.yodacore import Interaction,Database,Logger
from EventServer.EventServerJobManager import EventServerJobManager
class Droid:
def __init__(self, globalWorkingDir, localWorkingDir):
self.__globalWorkingDir = globalWorkingDir
self.__localWorkingDir = localWorkingDir
self.__currentDir = None
self.__comm = Interaction.Requester()
self.__tmpLog = Logger.Logger()
self.__esJobManager = None
self.__rank = self.__comm.getRank()
self.__tmpLog.info("Rank %s: Global working dir: %s" % (self.__rank, self.__globalWorkingDir))
self.initWorkingDir()
self.__tmpLog.info("Rank %s: Current working dir: %s" % (self.__rank, self.__currentDir))
self.__poolFileCatalog = None
self.__inputFiles = None
self.__copyInputFiles = None
signal.signal(signal.SIGTERM, self.stop)
def initWorkingDir(self):
curdir = _abspath (self.__localWorkingDir)
wkdirname = "rank_%s" % str(self.__rank)
wkdir = _abspath (_join(curdir,wkdirname))
if not os.path.exists(wkdir):
os.makedirs (wkdir)
os.chdir (wkdir)
self.__currentDir = wkdir
def postExecJob(self):
if self.__copyInputFiles and self.__inputFiles is not None and self.__poolFileCatalog is not None:
for inputFile in self.__inputFiles:
localInputFile = os.path.join(os.getcwd(), os.path.basename(inputFile))
self.__tmpLog.debug("Rank %s: Remove input file: %s" % (self.__rank, localInputFile))
os.remove(localInputFile)
if self.__globalWorkingDir != self.__localWorkingDir:
command = "mv " + self.__currentDir + " " + self.__globalWorkingDir
self.__tmpLog.debug("Rank %s: copy files from local working directory to global working dir(cmd: %s)" % (self.__rank, command))
status, output = commands.getstatusoutput(command)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
def setup(self, job):
if True:
self.__poolFileCatalog = job.get('PoolFileCatalog', None)
self.__inputFiles = job.get('InputFiles', None)
self.__copyInputFiles = job.get('CopyInputFiles', False)
if self.__copyInputFiles and self.__inputFiles is not None and self.__poolFileCatalog is not None:
for inputFile in self.__inputFiles:
shutil.copy(inputFile, './')
pfc_name = os.path.basename(self.__poolFileCatalog)
pfc_name = os.path.join(os.getcwd(), pfc_name)
pfc_name_back = pfc_name + ".back"
shutil.copy2(self.__poolFileCatalog, pfc_name_back)
with open(pfc_name, 'wt') as pfc_out:
with open(pfc_name_back, 'rt') as pfc_in:
for line in pfc_in:
pfc_out.write(line.replace('HPCWORKINGDIR', os.getcwd()))
job["AthenaMPCmd"] = job["AthenaMPCmd"].replace('HPCWORKINGDIR', os.getcwd())
self.__esJobManager = EventServerJobManager(self.__rank)
self.__esJobManager.initMessageThread(socketname='EventService_EventRanges', context='local')
self.__esJobManager.initTokenExtractorProcess(job["TokenExtractCmd"])
self.__esJobManager.initAthenaMPProcess(job["AthenaMPCmd"])
return True, None
def getJob(self):
request = {'Test':'TEST'}
self.__tmpLog.debug("Rank %s: getJob(request: %s)" % (self.__rank, request))
status, output = self.__comm.sendRequest('getJob',request)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
if status:
statusCode = output["StatusCode"]
job = output["job"]
if statusCode == 0:
return True, job
return False, None
def getEventRanges(self):
request = {'nRanges': 1}
self.__tmpLog.debug("Rank %s: getEventRanges(request: %s)" % (self.__rank, request))
status, output = self.__comm.sendRequest('getEventRanges',request)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
if status:
statusCode = output["StatusCode"]
eventRanges = output['eventRanges']
if statusCode == 0:
return True, eventRanges
return False, None
def updateEventRange(self, output):
try:
eventRangeID = output.split(",")[1]
except Exception, e:
self.__tmpLog.warnning("Rank %s: failed to get eventRangeID from output: %s" % (self.__rank, output))
self.__tmpLog.warnning("Rank %s: error message: %s" % (self.__rank, str(e)))
request = {"eventRangeID": eventRangeID,
'eventStatus':" finished",
"output": output}
self.__tmpLog.debug("Rank %s: updateEventRange(request: %s)" % (self.__rank, request))
retStatus, retOutput = self.__comm.sendRequest('updateEventRange',request)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, retStatus, retOutput))
if retStatus:
statusCode = retOutput["StatusCode"]
if statusCode == 0:
return True
return False
def finishJob(self):
request = {'state': 'finished'}
self.__tmpLog.debug("Rank %s: updateJob(request: %s)" % (self.__rank, request))
status, output = self.__comm.sendRequest('updateJob',request)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
if status:
statusCode = output["StatusCode"]
if statusCode == 0:
return True
return False
def failedJob(self):
request = {'state': 'failed'}
self.__tmpLog.debug("Rank %s: updateJob(request: %s)" % (self.__rank, request))
status, output = self.__comm.sendRequest('updateJob',request)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
if status:
statusCode = output["StatusCode"]
if statusCode == 0:
return True
return False
def waitYoda(self):
self.__tmpLog.debug("Rank %s: WaitYoda" % (self.__rank))
while True:
status, output = self.__comm.waitMessage()
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
if status:
statusCode = output["StatusCode"]
state = output["State"]
if statusCode == 0 and state == 'finished':
return True
return True
def run(self):
self.__tmpLog.info("Droid Starts")
status, job = self.getJob()
self.__tmpLog.info("Rank %s: getJob(%s)" % (self.__rank, job))
if not status:
self.__tmpLog.debug("Rank %s: Failed to get job" % self.__rank)
self.failedJob()
return -1
status, output = self.setup(job)
self.__tmpLog.info("Rank %s: setup job(status:%s, output:%s)" % (self.__rank, status, output))
if not status:
self.__tmpLog.debug("Rank %s: Failed to setup job(%s)" % (self.__rank, output))
self.failedJob()
return -1
failedNum = 0
while not self.__esJobManager.isDead():
if self.__esJobManager.isNeedMoreEvents():
self.__tmpLog.info("Rank %s: need more events" % self.__rank)
status, eventRanges = self.getEventRanges()
if not status:
fileNum += 1
if fileNum > 30:
self.__tmpLog.warning("Rank %s: failed to get events more than 30 times. finish job" % self.__rank)
self.__esJobManager.insertEventRange("No more events")
else:
continue
else:
fileNum = 0
self.__tmpLog.info("Rank %s: get event ranges(%s)" % (self.__rank, eventRanges))
if len(eventRanges) == 0:
self.__tmpLog.info("Rank %s: no more events" % self.__rank)
self.__esJobManager.insertEventRange("No more events")
for eventRange in eventRanges:
self.__esJobManager.insertEventRange(eventRange)
self.__esJobManager.poll()
output = self.__esJobManager.getOutput()
if output is not None:
self.__tmpLog.info("Rank %s: get output(%s)" % (self.__rank, output))
self.updateEventRange(output)
time.sleep(2)
self.__esJobManager.flushMessages()
output = self.__esJobManager.getOutput()
while output:
self.__tmpLog.info("Rank %s: get output(%s)" % (self.__rank, output))
self.updateEventRange(output)
output = self.__esJobManager.getOutput()
self.__tmpLog.info("Rank %s: post exec job" % self.__rank)
self.postExecJob()
self.__tmpLog.info("Rank %s: finish job" % self.__rank)
self.finishJob()
self.waitYoda()
return 0
def stop(self, signum=None, frame=None):
self.__tmpLog.info('Rank %s: stop signal received' % self.__rank)
self.__esJobManager.terminate()
self.__esJobManager.flushMessages()
output = self.__esJobManager.getOutput()
while output:
self.__tmpLog.info("Rank %s: get output(%s)" % (self.__rank, output))
self.updateEventRange(output)
output = self.__esJobManager.getOutput()
self.__tmpLog.info("Rank %s: post exec job" % self.__rank)
self.postExecJob()
self.__tmpLog.info("Rank %s: finish job" % self.__rank)
self.finishJob()
self.__tmpLog.info('Rank %s: stop' % self.__rank)
def __del__(self):
self.__tmpLog.info('Rank %s: __del__ function' % self.__rank)
self.__tmpLog.info('Rank %s: __del__ function' % self.__rank)
| false | true |
f73046cc8c22aa50dfd3a3b28c9591b12f1c2237 | 2,457 | py | Python | tests/configs/realview64-o3-checker.py | ronaldof1s1/gem5 | 418ac1d9ab21049f6d21476bc66f08467c29a9e3 | [
"BSD-3-Clause"
] | 135 | 2016-10-21T03:31:49.000Z | 2022-03-25T01:22:20.000Z | tests/configs/realview64-o3-checker.py | akeley98/FU-pools | dcd47b7dad279246093081cab24b95cae363c3b3 | [
"BSD-3-Clause"
] | 35 | 2017-03-10T17:57:46.000Z | 2022-02-18T17:34:16.000Z | tests/configs/realview64-o3-checker.py | akeley98/FU-pools | dcd47b7dad279246093081cab24b95cae363c3b3 | [
"BSD-3-Clause"
] | 48 | 2016-12-08T12:03:13.000Z | 2022-02-16T09:16:13.000Z | # Copyright (c) 2012, 2017, 2019 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from arm_generic import *
from common.cores.arm.O3_ARM_v7a import O3_ARM_v7a_3
root = LinuxArmFSSystemUniprocessor(mem_mode='timing',
mem_class=DDR3_1600_8x8,
cpu_class=O3_ARM_v7a_3,
checker=True).create_root()
| 53.413043 | 72 | 0.764754 |
from m5.objects import *
from arm_generic import *
from common.cores.arm.O3_ARM_v7a import O3_ARM_v7a_3
root = LinuxArmFSSystemUniprocessor(mem_mode='timing',
mem_class=DDR3_1600_8x8,
cpu_class=O3_ARM_v7a_3,
checker=True).create_root()
| true | true |
f73047495463637b196d2d01ac086770b021da5f | 274 | py | Python | edamino/logger.py | drevenzz/aminoCAT | 48c4de0da0c830a0550fdee7fb8499645c434e90 | [
"MIT"
] | null | null | null | edamino/logger.py | drevenzz/aminoCAT | 48c4de0da0c830a0550fdee7fb8499645c434e90 | [
"MIT"
] | null | null | null | edamino/logger.py | drevenzz/aminoCAT | 48c4de0da0c830a0550fdee7fb8499645c434e90 | [
"MIT"
] | null | null | null | import logging
__all__ = ['logger']
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
fmt = logging.Formatter(fmt="%(levelname)s: %(message)s")
handler.setFormatter(fmt)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
| 21.076923 | 57 | 0.770073 | import logging
__all__ = ['logger']
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
fmt = logging.Formatter(fmt="%(levelname)s: %(message)s")
handler.setFormatter(fmt)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
| true | true |
f7304753312193a3300eef211e2e820629c26beb | 745 | py | Python | Python/ex49.py | Anderson0312/Python | 1fd225378c55309640d584a4894393f7c40dc9ed | [
"MIT"
] | 1 | 2022-02-01T17:59:50.000Z | 2022-02-01T17:59:50.000Z | Python/ex49.py | Anderson0312/Python | 1fd225378c55309640d584a4894393f7c40dc9ed | [
"MIT"
] | null | null | null | Python/ex49.py | Anderson0312/Python | 1fd225378c55309640d584a4894393f7c40dc9ed | [
"MIT"
] | null | null | null | numeros = list()
for n in range(0, 5):
numeros.append(int(input(f'Digite um valor para a posição {n}: ')))
print(f'Voce digitou os valores {numeros}')
print('O maior valor digitado foi {} nas possições'.format(max(numeros)), end=' ')
'''
No for
O pos foi usado para fixar a localização
o v foi usado para fixar os valores
e o enumerate para enumerar a lista informada
No if
usamos se o v = valor for igual ao maior da lista, mostrar a posição na lista +1
'''
for pos, v in enumerate(numeros):
if v == max(numeros):
print(f'{pos+1}', end=' ')
print('\nO menor numero digitado foi {} nas posições'.format(min(numeros)), end=' ')
for pos, v in enumerate(numeros):
if v == min(numeros):
print(f'{pos + 1}', end=' ')
| 29.8 | 84 | 0.660403 | numeros = list()
for n in range(0, 5):
numeros.append(int(input(f'Digite um valor para a posição {n}: ')))
print(f'Voce digitou os valores {numeros}')
print('O maior valor digitado foi {} nas possições'.format(max(numeros)), end=' ')
for pos, v in enumerate(numeros):
if v == max(numeros):
print(f'{pos+1}', end=' ')
print('\nO menor numero digitado foi {} nas posições'.format(min(numeros)), end=' ')
for pos, v in enumerate(numeros):
if v == min(numeros):
print(f'{pos + 1}', end=' ')
| true | true |
f73047624b50599b4347a4f861a06e86d3173252 | 3,865 | py | Python | database.py | techx/evolution-chamber | dea9b7d563df6f06d270078f5c512e3f7e367a92 | [
"MIT"
] | 4 | 2015-06-22T15:44:57.000Z | 2015-06-22T15:57:03.000Z | database.py | techx/evolution-chamber | dea9b7d563df6f06d270078f5c512e3f7e367a92 | [
"MIT"
] | null | null | null | database.py | techx/evolution-chamber | dea9b7d563df6f06d270078f5c512e3f7e367a92 | [
"MIT"
] | 2 | 2015-07-09T15:21:37.000Z | 2016-02-02T15:59:09.000Z | import sqlite3
from flask import g, Flask
from constants import Constants
import json
DATABASE = 'db/sqlite.db'
app = Flask(Constants.APP_NAME)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def serialize_result_to_individual(res,idname="id"):
return {idname: res[0], "parameters":json.loads(res[1]), "elo": res[2]}
class Database:
@staticmethod
def incr_comparisons():
cursor = get_db().cursor()
cursor.execute('UPDATE stats SET num_comparisons = %d WHERE 1 == 1' % (Database.num_comparisons() + 1))
get_db().commit()
@staticmethod
def reset_comparisons():
cursor = get_db().cursor()
cursor.execute('UPDATE stats SET num_comparisons = 0 WHERE 1 == 1')
get_db().commit()
@staticmethod
def num_comparisons():
db = get_db()
cursor = db.cursor()
cursor.execute('SELECT num_comparisons FROM stats;')
return cursor.fetchone()[0]
@staticmethod
def current_generation_is_empty():
db = get_db()
cursor = db.cursor()
cursor.execute('SELECT * FROM current')
return not cursor.fetchone()
@staticmethod
def add_individual_to_current_generation(parameters):
string = json.dumps(parameters)
cursor = get_db().cursor()
cursor.execute('INSERT INTO current (parameters, elo) VALUES (?, 1000.0)', (string,))
get_db().commit()
@staticmethod
def get_individual_for_id(idd):
db = get_db()
cursor = db.cursor()
cursor.execute('SELECT id, parameters, elo FROM current WHERE id = ?', (idd,))
return serialize_result_to_individual(cursor.fetchone())
@staticmethod
def update_elo_for_id(idd, elo):
db = get_db()
cursor = db.cursor()
cursor.execute('UPDATE current SET elo = ? WHERE id = ?', (elo, idd))
db.commit()
@staticmethod
def get_all_individuals_sorted():
db = get_db()
cursor = db.cursor()
cursor.execute('SELECT id, parameters, elo FROM current ORDER BY elo DESC')
return [serialize_result_to_individual(res) for res in cursor.fetchall()]
@staticmethod
def get_random_individuals(num):
db = get_db()
cursor = db.cursor()
cursor.execute('SELECT id, parameters, elo FROM current ORDER BY RANDOM() LIMIT ?', (num,))
return [serialize_result_to_individual(res) for res in cursor.fetchall()]
@staticmethod
def delete_individuals(individuals):
cursor = get_db().cursor()
id_list = ", ".join(map(lambda x: str(x["id"]), individuals))
cursor.execute('DELETE FROM current WHERE id IN (%s)' % id_list)
get_db().commit()
@staticmethod
def get_historical_individuals():
db = get_db()
cursor = db.cursor()
cursor.execute('SELECT gen, parameters, elo FROM historical ORDER BY gen')
return [serialize_result_to_individual(res,"gen") for res in cursor.fetchall()]
@staticmethod
def add_historical_individual(individual):
string = json.dumps(individual['parameters'])
elo = individual['elo']
cursor = get_db().cursor()
cursor.execute('INSERT INTO historical (parameters, elo) VALUES (?, ?)', (string,elo))
get_db().commit()
@staticmethod
def record_decision(winner, loser):
db = get_db()
cursor = db.cursor()
data = (winner["id"],json.dumps(winner["parameters"]),loser["id"],json.dumps(loser["parameters"]))
cursor.execute('INSERT INTO decisions (winner_id, winner_parameters, loser_id, loser_parameters) VALUES (?, ?, ?, ?)', data)
| 33.318966 | 132 | 0.636999 | import sqlite3
from flask import g, Flask
from constants import Constants
import json
DATABASE = 'db/sqlite.db'
app = Flask(Constants.APP_NAME)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def serialize_result_to_individual(res,idname="id"):
return {idname: res[0], "parameters":json.loads(res[1]), "elo": res[2]}
class Database:
@staticmethod
def incr_comparisons():
cursor = get_db().cursor()
cursor.execute('UPDATE stats SET num_comparisons = %d WHERE 1 == 1' % (Database.num_comparisons() + 1))
get_db().commit()
@staticmethod
def reset_comparisons():
cursor = get_db().cursor()
cursor.execute('UPDATE stats SET num_comparisons = 0 WHERE 1 == 1')
get_db().commit()
@staticmethod
def num_comparisons():
db = get_db()
cursor = db.cursor()
cursor.execute('SELECT num_comparisons FROM stats;')
return cursor.fetchone()[0]
@staticmethod
def current_generation_is_empty():
db = get_db()
cursor = db.cursor()
cursor.execute('SELECT * FROM current')
return not cursor.fetchone()
@staticmethod
def add_individual_to_current_generation(parameters):
string = json.dumps(parameters)
cursor = get_db().cursor()
cursor.execute('INSERT INTO current (parameters, elo) VALUES (?, 1000.0)', (string,))
get_db().commit()
@staticmethod
def get_individual_for_id(idd):
db = get_db()
cursor = db.cursor()
cursor.execute('SELECT id, parameters, elo FROM current WHERE id = ?', (idd,))
return serialize_result_to_individual(cursor.fetchone())
@staticmethod
def update_elo_for_id(idd, elo):
db = get_db()
cursor = db.cursor()
cursor.execute('UPDATE current SET elo = ? WHERE id = ?', (elo, idd))
db.commit()
@staticmethod
def get_all_individuals_sorted():
db = get_db()
cursor = db.cursor()
cursor.execute('SELECT id, parameters, elo FROM current ORDER BY elo DESC')
return [serialize_result_to_individual(res) for res in cursor.fetchall()]
@staticmethod
def get_random_individuals(num):
db = get_db()
cursor = db.cursor()
cursor.execute('SELECT id, parameters, elo FROM current ORDER BY RANDOM() LIMIT ?', (num,))
return [serialize_result_to_individual(res) for res in cursor.fetchall()]
@staticmethod
def delete_individuals(individuals):
cursor = get_db().cursor()
id_list = ", ".join(map(lambda x: str(x["id"]), individuals))
cursor.execute('DELETE FROM current WHERE id IN (%s)' % id_list)
get_db().commit()
@staticmethod
def get_historical_individuals():
db = get_db()
cursor = db.cursor()
cursor.execute('SELECT gen, parameters, elo FROM historical ORDER BY gen')
return [serialize_result_to_individual(res,"gen") for res in cursor.fetchall()]
@staticmethod
def add_historical_individual(individual):
string = json.dumps(individual['parameters'])
elo = individual['elo']
cursor = get_db().cursor()
cursor.execute('INSERT INTO historical (parameters, elo) VALUES (?, ?)', (string,elo))
get_db().commit()
@staticmethod
def record_decision(winner, loser):
db = get_db()
cursor = db.cursor()
data = (winner["id"],json.dumps(winner["parameters"]),loser["id"],json.dumps(loser["parameters"]))
cursor.execute('INSERT INTO decisions (winner_id, winner_parameters, loser_id, loser_parameters) VALUES (?, ?, ?, ?)', data)
| true | true |
f730492b5e3c2fa866c5ccaab58e6a2dbe382054 | 2,222 | py | Python | cirq-core/cirq/devices/device_test.py | dabacon/Cirq | 54286063f679d67501ff1b905cd16b879feaae27 | [
"Apache-2.0"
] | 1 | 2021-04-29T15:30:32.000Z | 2021-04-29T15:30:32.000Z | cirq-core/cirq/devices/device_test.py | dabacon/Cirq | 54286063f679d67501ff1b905cd16b879feaae27 | [
"Apache-2.0"
] | 1 | 2020-04-03T20:23:20.000Z | 2020-04-03T20:23:20.000Z | cirq-core/cirq/devices/device_test.py | dabacon/Cirq | 54286063f679d67501ff1b905cd16b879feaae27 | [
"Apache-2.0"
] | 2 | 2021-09-22T11:16:46.000Z | 2021-09-23T12:55:22.000Z | # pylint: disable=wrong-or-nonexistent-copyright-notice
import pytest
import cirq
def test_qubit_set():
class RawDevice(cirq.Device):
pass
assert RawDevice().qubit_set() is None
class QubitFieldDevice(cirq.Device):
def __init__(self):
self.qubits = cirq.LineQubit.range(3)
assert QubitFieldDevice().qubit_set() == frozenset(cirq.LineQubit.range(3))
class PrivateQubitFieldDevice(cirq.Device):
def __init__(self):
self._qubits = cirq.LineQubit.range(4)
assert PrivateQubitFieldDevice().qubit_set() == frozenset(cirq.LineQubit.range(4))
class QubitMethodDevice(cirq.Device):
def qubits(self):
return cirq.LineQubit.range(5)
assert QubitMethodDevice().qubit_set() == frozenset(cirq.LineQubit.range(5))
class PrivateQubitMethodDevice(cirq.Device):
def _qubits(self):
return cirq.LineQubit.range(6)
assert PrivateQubitMethodDevice().qubit_set() == frozenset(cirq.LineQubit.range(6))
def test_qid_pairs():
class RawDevice(cirq.Device):
pass
assert RawDevice().qid_pairs() is None
class QubitFieldDevice(cirq.Device):
def __init__(self, qubits):
self.qubits = qubits
assert len(QubitFieldDevice(cirq.LineQubit.range(10)).qid_pairs()) == 9
assert len(QubitFieldDevice(cirq.GridQubit.rect(10, 10)).qid_pairs()) == 180
assert len(QubitFieldDevice([cirq.NamedQubit(str(s)) for s in range(10)]).qid_pairs()) == 45
def test_qid_pair():
q0, q1, q2, q3 = cirq.LineQubit.range(4)
e1 = cirq.SymmetricalQidPair(q0, q1)
e2 = cirq.SymmetricalQidPair(q1, q0)
e3 = cirq.SymmetricalQidPair(q2, q3)
assert e1 == e2
assert e2 != e3
assert repr(e1) == "cirq.QidPair(cirq.LineQubit(0), cirq.LineQubit(1))"
assert len(e1) == 2
a, b = e1
assert (a, b) == (q0, q1)
a, b = e2
assert (a, b) == (q0, q1)
assert q0 in e1
assert q1 in e1
assert q2 not in e1
set1 = frozenset([e1, e2])
set2 = frozenset([e2, e3])
assert len(set1) == 1
assert len(set2) == 2
with pytest.raises(ValueError, match='A QidPair cannot have identical qids.'):
cirq.SymmetricalQidPair(q0, q0)
| 28.487179 | 96 | 0.654815 |
import pytest
import cirq
def test_qubit_set():
class RawDevice(cirq.Device):
pass
assert RawDevice().qubit_set() is None
class QubitFieldDevice(cirq.Device):
def __init__(self):
self.qubits = cirq.LineQubit.range(3)
assert QubitFieldDevice().qubit_set() == frozenset(cirq.LineQubit.range(3))
class PrivateQubitFieldDevice(cirq.Device):
def __init__(self):
self._qubits = cirq.LineQubit.range(4)
assert PrivateQubitFieldDevice().qubit_set() == frozenset(cirq.LineQubit.range(4))
class QubitMethodDevice(cirq.Device):
def qubits(self):
return cirq.LineQubit.range(5)
assert QubitMethodDevice().qubit_set() == frozenset(cirq.LineQubit.range(5))
class PrivateQubitMethodDevice(cirq.Device):
def _qubits(self):
return cirq.LineQubit.range(6)
assert PrivateQubitMethodDevice().qubit_set() == frozenset(cirq.LineQubit.range(6))
def test_qid_pairs():
class RawDevice(cirq.Device):
pass
assert RawDevice().qid_pairs() is None
class QubitFieldDevice(cirq.Device):
def __init__(self, qubits):
self.qubits = qubits
assert len(QubitFieldDevice(cirq.LineQubit.range(10)).qid_pairs()) == 9
assert len(QubitFieldDevice(cirq.GridQubit.rect(10, 10)).qid_pairs()) == 180
assert len(QubitFieldDevice([cirq.NamedQubit(str(s)) for s in range(10)]).qid_pairs()) == 45
def test_qid_pair():
q0, q1, q2, q3 = cirq.LineQubit.range(4)
e1 = cirq.SymmetricalQidPair(q0, q1)
e2 = cirq.SymmetricalQidPair(q1, q0)
e3 = cirq.SymmetricalQidPair(q2, q3)
assert e1 == e2
assert e2 != e3
assert repr(e1) == "cirq.QidPair(cirq.LineQubit(0), cirq.LineQubit(1))"
assert len(e1) == 2
a, b = e1
assert (a, b) == (q0, q1)
a, b = e2
assert (a, b) == (q0, q1)
assert q0 in e1
assert q1 in e1
assert q2 not in e1
set1 = frozenset([e1, e2])
set2 = frozenset([e2, e3])
assert len(set1) == 1
assert len(set2) == 2
with pytest.raises(ValueError, match='A QidPair cannot have identical qids.'):
cirq.SymmetricalQidPair(q0, q0)
| true | true |
f7304932a03b54e012707f27c70ff5f74726299a | 1,481 | py | Python | contributions/statistics.py | t170815518/contributions-graph | 7a3c3477afaa578789bdbfb41554f267d89f1ace | [
"MIT"
] | 106 | 2015-09-15T04:53:24.000Z | 2022-01-25T22:11:56.000Z | contributions/statistics.py | t170815518/contributions-graph | 7a3c3477afaa578789bdbfb41554f267d89f1ace | [
"MIT"
] | 2 | 2017-05-03T09:27:24.000Z | 2017-09-14T06:49:42.000Z | contributions/statistics.py | t170815518/contributions-graph | 7a3c3477afaa578789bdbfb41554f267d89f1ace | [
"MIT"
] | 22 | 2016-07-25T06:54:06.000Z | 2021-10-15T00:25:56.000Z | #!/usr/bin/env python
import dateutils
def quartiles(values):
"""
Returns the (rough) quintlines of a series of values. This is not intended
to be statistically correct - it's not a quick 'n' dirty measure.
"""
return [i * max(values) / 4 for i in range(5)]
def longest_streak(dates):
"""
Given a list of datetime.date objects, return the longest sublist of
consecutive dates. If there are multiple longest sublists of the same
length, then the first such sublist is returned.
"""
if not dates:
return []
dates = sorted(dates)
streaks = []
current_streak = [dates[0]]
# For each date, check to see whether it extends the current streak
for idx in range(1, len(dates)):
date = dates[idx]
if dateutils.previous_day(date) == current_streak[-1]:
current_streak.append(date)
else:
streaks.append(current_streak)
current_streak = [date]
# When we've gone through all the dates, save the last streak
streaks.append(current_streak)
return max(streaks, key=len)
def current_streak(dates):
"""
Given a list of datetime.date objects, return today's date (if present)
and all/any preceding consecutive dates.
"""
streak = []
current_date = dateutils.today()
while current_date in dates:
streak.append(current_date)
current_date = dateutils.previous_day(current_date)
return sorted(streak) | 27.425926 | 78 | 0.656313 |
import dateutils
def quartiles(values):
return [i * max(values) / 4 for i in range(5)]
def longest_streak(dates):
if not dates:
return []
dates = sorted(dates)
streaks = []
current_streak = [dates[0]]
for idx in range(1, len(dates)):
date = dates[idx]
if dateutils.previous_day(date) == current_streak[-1]:
current_streak.append(date)
else:
streaks.append(current_streak)
current_streak = [date]
streaks.append(current_streak)
return max(streaks, key=len)
def current_streak(dates):
streak = []
current_date = dateutils.today()
while current_date in dates:
streak.append(current_date)
current_date = dateutils.previous_day(current_date)
return sorted(streak) | true | true |
f730495e98e013a7ae5f3395303f9cca988c62f0 | 439 | py | Python | aoc2020/day_03/part_1.py | en0/aoc2020 | a2f41b909dffe1f366682b3d03fd5fbdbc924ec0 | [
"MIT"
] | null | null | null | aoc2020/day_03/part_1.py | en0/aoc2020 | a2f41b909dffe1f366682b3d03fd5fbdbc924ec0 | [
"MIT"
] | null | null | null | aoc2020/day_03/part_1.py | en0/aoc2020 | a2f41b909dffe1f366682b3d03fd5fbdbc924ec0 | [
"MIT"
] | null | null | null | from aoc2020 import *
class Solution(SolutionABC):
expected = 7
def solve(self) -> any:
x, rt, rows = 0, 0, self.resource_lines("input")
try:
# Discard the first row
next(rows)
while True:
row = next(rows)
x = (x + 3) % len(row)
if row[x] == '#':
rt += 1
except StopIteration:
return rt
| 21.95 | 56 | 0.43508 | from aoc2020 import *
class Solution(SolutionABC):
expected = 7
def solve(self) -> any:
x, rt, rows = 0, 0, self.resource_lines("input")
try:
next(rows)
while True:
row = next(rows)
x = (x + 3) % len(row)
if row[x] == '#':
rt += 1
except StopIteration:
return rt
| true | true |
f73049b42798b7cba68acbf111d74dde4a7db3b6 | 1,642 | py | Python | clock/contact/forms.py | chgad/django-clock | f855cd1253574c0582ed53a0ac34206c242f04c9 | [
"MIT"
] | null | null | null | clock/contact/forms.py | chgad/django-clock | f855cd1253574c0582ed53a0ac34206c242f04c9 | [
"MIT"
] | null | null | null | clock/contact/forms.py | chgad/django-clock | f855cd1253574c0582ed53a0ac34206c242f04c9 | [
"MIT"
] | 1 | 2020-03-13T14:42:11.000Z | 2020-03-13T14:42:11.000Z | # -*- coding: utf-8 -*-
from captcha.fields import ReCaptchaField
from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django import forms
from django.conf import settings
from django.core.mail import send_mail
from django.http import HttpResponseRedirect
from django.utils.translation import get_language
from django.utils.translation import ugettext_lazy as _
class ContactForm(forms.Form):
name = forms.CharField(max_length=200, label=_('Name'))
sender = forms.EmailField(label=_('E-Mail'))
message = forms.CharField(widget=forms.Textarea, label=_('Message'))
cc_myself = forms.BooleanField(label=_('Send a copy of the mail to myself'), required=False)
captcha = ReCaptchaField(attrs={'lang': get_language()})
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_action = '.'
self.helper.form_method = 'post'
self.helper.form_class = 'form-halfpage'
self.helper.layout.append(FormActions(
Submit('submit', _('Submit'), css_class='btn btn-primary pull-right'),
))
def send_mail(self, form):
message = form.cleaned_data['message']
sender = form.cleaned_data['sender']
cc_myself = form.cleaned_data['cc_myself']
recipients = settings.CONTACT_FORM_RECIPIENT
if cc_myself:
recipients.append(sender)
send_mail(settings.CONTACT_FORM_SUBJECT, message, sender, recipients)
return HttpResponseRedirect('/thanks/')
| 38.186047 | 96 | 0.708892 |
from captcha.fields import ReCaptchaField
from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django import forms
from django.conf import settings
from django.core.mail import send_mail
from django.http import HttpResponseRedirect
from django.utils.translation import get_language
from django.utils.translation import ugettext_lazy as _
class ContactForm(forms.Form):
name = forms.CharField(max_length=200, label=_('Name'))
sender = forms.EmailField(label=_('E-Mail'))
message = forms.CharField(widget=forms.Textarea, label=_('Message'))
cc_myself = forms.BooleanField(label=_('Send a copy of the mail to myself'), required=False)
captcha = ReCaptchaField(attrs={'lang': get_language()})
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_action = '.'
self.helper.form_method = 'post'
self.helper.form_class = 'form-halfpage'
self.helper.layout.append(FormActions(
Submit('submit', _('Submit'), css_class='btn btn-primary pull-right'),
))
def send_mail(self, form):
message = form.cleaned_data['message']
sender = form.cleaned_data['sender']
cc_myself = form.cleaned_data['cc_myself']
recipients = settings.CONTACT_FORM_RECIPIENT
if cc_myself:
recipients.append(sender)
send_mail(settings.CONTACT_FORM_SUBJECT, message, sender, recipients)
return HttpResponseRedirect('/thanks/')
| true | true |
f73049e4bd6fa9b18eb2403dd15770e56954fa20 | 1,420 | py | Python | ggplot/geoms/__init__.py | briandk/ggplot | 179bc1bb61501bbfa8aaa7ef2d0a25150c8863dd | [
"BSD-2-Clause"
] | null | null | null | ggplot/geoms/__init__.py | briandk/ggplot | 179bc1bb61501bbfa8aaa7ef2d0a25150c8863dd | [
"BSD-2-Clause"
] | null | null | null | ggplot/geoms/__init__.py | briandk/ggplot | 179bc1bb61501bbfa8aaa7ef2d0a25150c8863dd | [
"BSD-2-Clause"
] | 1 | 2020-11-14T13:31:11.000Z | 2020-11-14T13:31:11.000Z | from __future__ import (absolute_import, division, print_function,
unicode_literals)
# geoms
from .geom_abline import geom_abline
from .geom_area import geom_area
from .geom_bar import geom_bar
from .geom_density import geom_density
from .geom_histogram import geom_histogram
from .geom_hline import geom_hline
from .geom_jitter import geom_jitter
from .geom_line import geom_line
from .geom_now_its_art import geom_now_its_art
from .geom_point import geom_point
from .geom_rect import geom_rect
from .geom_step import geom_step
from .geom_text import geom_text
from .geom_tile import geom_tile
from .geom_vline import geom_vline
# stats
from .stat_bin2d import stat_bin2d
from .stat_function import stat_function
from .stat_smooth import stat_smooth
# misc
from .facet_grid import facet_grid
from .facet_wrap import facet_wrap
from .chart_components import *
__facet__ = ['facet_grid', 'facet_wrap']
__geoms__ = ['geom_abline', 'geom_area', 'geom_bar', 'geom_density',
'geom_histogram', 'geom_hline', 'geom_jitter', 'geom_line',
'geom_now_its_art', 'geom_point', 'geom_rect', 'geom_step',
'geom_text', 'geom_tile', 'geom_vline']
__stats__ = ['stat_bin2d', 'stat_smooth', 'stat_function']
__components__ = ['ylab', 'xlab', 'ylim', 'xlim', 'labs', 'ggtitle']
__all__ = __geoms__ + __facet__ + __stats__ + __components__
__all__ = [str(u) for u in __all__]
| 38.378378 | 73 | 0.762676 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .geom_abline import geom_abline
from .geom_area import geom_area
from .geom_bar import geom_bar
from .geom_density import geom_density
from .geom_histogram import geom_histogram
from .geom_hline import geom_hline
from .geom_jitter import geom_jitter
from .geom_line import geom_line
from .geom_now_its_art import geom_now_its_art
from .geom_point import geom_point
from .geom_rect import geom_rect
from .geom_step import geom_step
from .geom_text import geom_text
from .geom_tile import geom_tile
from .geom_vline import geom_vline
from .stat_bin2d import stat_bin2d
from .stat_function import stat_function
from .stat_smooth import stat_smooth
from .facet_grid import facet_grid
from .facet_wrap import facet_wrap
from .chart_components import *
__facet__ = ['facet_grid', 'facet_wrap']
__geoms__ = ['geom_abline', 'geom_area', 'geom_bar', 'geom_density',
'geom_histogram', 'geom_hline', 'geom_jitter', 'geom_line',
'geom_now_its_art', 'geom_point', 'geom_rect', 'geom_step',
'geom_text', 'geom_tile', 'geom_vline']
__stats__ = ['stat_bin2d', 'stat_smooth', 'stat_function']
__components__ = ['ylab', 'xlab', 'ylim', 'xlim', 'labs', 'ggtitle']
__all__ = __geoms__ + __facet__ + __stats__ + __components__
__all__ = [str(u) for u in __all__]
| true | true |
f7304c8702cd45f0614fd6d7a10b566c1f7152f7 | 8,043 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/tlvprofile/defaulttlv_8e41257d3d01ec013783dd0fd6697862.py | rfrye-github/ixnetwork_restpy | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/tlvprofile/defaulttlv_8e41257d3d01ec013783dd0fd6697862.py | rfrye-github/ixnetwork_restpy | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/tlvprofile/defaulttlv_8e41257d3d01ec013783dd0fd6697862.py | rfrye-github/ixnetwork_restpy | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | [
"MIT"
] | null | null | null | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class DefaultTlv(Base):
"""Default Tlv container created by protocols
The DefaultTlv class encapsulates a list of defaultTlv resources that are managed by the system.
A list of resources can be retrieved from the server using the DefaultTlv.find() method.
"""
__slots__ = ()
_SDM_NAME = 'defaultTlv'
_SDM_ATT_MAP = {
'AvailableIncludeInMessages': 'availableIncludeInMessages',
'Description': 'description',
'EnablePerSession': 'enablePerSession',
'IncludeInMessages': 'includeInMessages',
'IsEnabled': 'isEnabled',
'Name': 'name',
}
def __init__(self, parent):
super(DefaultTlv, self).__init__(parent)
@property
def Value(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.value_ac1d7b13584a86b9cf1c28dca3390bca.Value): An instance of the Value class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.value_ac1d7b13584a86b9cf1c28dca3390bca import Value
return Value(self)._select()
@property
def AvailableIncludeInMessages(self):
"""
Returns
-------
- list(str): A list of available messages which are used in the includeInMessages attribute
"""
return self._get_attribute(self._SDM_ATT_MAP['AvailableIncludeInMessages'])
@property
def Description(self):
"""
Returns
-------
- str: Description of the tlv
"""
return self._get_attribute(self._SDM_ATT_MAP['Description'])
@Description.setter
def Description(self, value):
self._set_attribute(self._SDM_ATT_MAP['Description'], value)
@property
def EnablePerSession(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable TLV per session
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnablePerSession']))
@property
def IncludeInMessages(self):
"""
Returns
-------
- list(str): Include the TLV in these protocol messages
"""
return self._get_attribute(self._SDM_ATT_MAP['IncludeInMessages'])
@IncludeInMessages.setter
def IncludeInMessages(self, value):
self._set_attribute(self._SDM_ATT_MAP['IncludeInMessages'], value)
@property
def IsEnabled(self):
"""
Returns
-------
- bool: Enables/disables this tlv
"""
return self._get_attribute(self._SDM_ATT_MAP['IsEnabled'])
@IsEnabled.setter
def IsEnabled(self, value):
self._set_attribute(self._SDM_ATT_MAP['IsEnabled'], value)
@property
def Name(self):
"""
Returns
-------
- str: Name of the tlv
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
def update(self, Description=None, IncludeInMessages=None, IsEnabled=None, Name=None):
"""Updates defaultTlv resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Description (str): Description of the tlv
- IncludeInMessages (list(str)): Include the TLV in these protocol messages
- IsEnabled (bool): Enables/disables this tlv
- Name (str): Name of the tlv
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, AvailableIncludeInMessages=None, Description=None, IncludeInMessages=None, IsEnabled=None, Name=None):
"""Finds and retrieves defaultTlv resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve defaultTlv resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all defaultTlv resources from the server.
Args
----
- AvailableIncludeInMessages (list(str)): A list of available messages which are used in the includeInMessages attribute
- Description (str): Description of the tlv
- IncludeInMessages (list(str)): Include the TLV in these protocol messages
- IsEnabled (bool): Enables/disables this tlv
- Name (str): Name of the tlv
Returns
-------
- self: This instance with matching defaultTlv resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of defaultTlv data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the defaultTlv resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, EnablePerSession=None):
"""Base class infrastructure that gets a list of defaultTlv device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- EnablePerSession (str): optional regex of enablePerSession
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| 38.668269 | 165 | 0.652617 |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class DefaultTlv(Base):
__slots__ = ()
_SDM_NAME = 'defaultTlv'
_SDM_ATT_MAP = {
'AvailableIncludeInMessages': 'availableIncludeInMessages',
'Description': 'description',
'EnablePerSession': 'enablePerSession',
'IncludeInMessages': 'includeInMessages',
'IsEnabled': 'isEnabled',
'Name': 'name',
}
def __init__(self, parent):
super(DefaultTlv, self).__init__(parent)
@property
def Value(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.value_ac1d7b13584a86b9cf1c28dca3390bca import Value
return Value(self)._select()
@property
def AvailableIncludeInMessages(self):
return self._get_attribute(self._SDM_ATT_MAP['AvailableIncludeInMessages'])
@property
def Description(self):
return self._get_attribute(self._SDM_ATT_MAP['Description'])
@Description.setter
def Description(self, value):
self._set_attribute(self._SDM_ATT_MAP['Description'], value)
@property
def EnablePerSession(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnablePerSession']))
@property
def IncludeInMessages(self):
return self._get_attribute(self._SDM_ATT_MAP['IncludeInMessages'])
@IncludeInMessages.setter
def IncludeInMessages(self, value):
self._set_attribute(self._SDM_ATT_MAP['IncludeInMessages'], value)
@property
def IsEnabled(self):
return self._get_attribute(self._SDM_ATT_MAP['IsEnabled'])
@IsEnabled.setter
def IsEnabled(self, value):
self._set_attribute(self._SDM_ATT_MAP['IsEnabled'], value)
@property
def Name(self):
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
def update(self, Description=None, IncludeInMessages=None, IsEnabled=None, Name=None):
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, AvailableIncludeInMessages=None, Description=None, IncludeInMessages=None, IsEnabled=None, Name=None):
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
return self._read(href)
def get_device_ids(self, PortNames=None, EnablePerSession=None):
return self._get_ngpf_device_ids(locals())
| true | true |
f7304dbc9e1394e220698ba5c8e2752e04b4528b | 5,563 | py | Python | sources/ebf-demo/scripts/test/imu.py | zwg0106/imx-yocto | e378ca25352a59d1ef84ee95f3386b7314f4565b | [
"MIT"
] | 1 | 2020-01-13T13:16:52.000Z | 2020-01-13T13:16:52.000Z | sources/ebf-demo/scripts/test/imu.py | zwg0106/imx-yocto | e378ca25352a59d1ef84ee95f3386b7314f4565b | [
"MIT"
] | 3 | 2019-11-20T02:53:01.000Z | 2019-12-26T03:00:15.000Z | sources/ebf-demo/scripts/test/imu.py | zwg0106/imx-yocto | e378ca25352a59d1ef84ee95f3386b7314f4565b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pygame
from OpenGL.GL import *
from OpenGL.GLU import *
import socket
import json
from pygame.locals import *
SCREEN_SIZE = (800, 600)
address = ('', 5000)
def resize(width, height):
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(width) / height, 0.001, 10.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(1.0, 2.0, -5.0,
0.0, 0.0, 0.0,
0.0, 1.0, 0.0)
def init():
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 0.0)
glShadeModel(GL_SMOOTH)
glEnable(GL_BLEND)
glEnable(GL_POLYGON_SMOOTH)
glHint(GL_POLYGON_SMOOTH_HINT, GL_NICEST)
def setupSocket():
# setup socket, blocking by default
global sock
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(address)
def read_data():
global ax, ay, az, acx, acy, acz, temp
ax = ay = az = acx = acy = acz = temp = 0.0
msg, addr = sock.recvfrom(1024)
if msg:
msg.decode()
data = json.loads(msg)
#print(data)
ax, ay ,az = data["filter"]
acx, acy, acz = data["accel"]
temp = data["temp"]
def drawText(position, textString):
font = pygame.font.SysFont("Courier", 18, True)
textSurface = font.render(textString, True, (255,255,255,255), (0,0,0,255))
textData = pygame.image.tostring(textSurface, "RGBA", True)
glRasterPos3d(*position)
glDrawPixels(textSurface.get_width(), textSurface.get_height(), GL_RGBA, GL_UNSIGNED_BYTE, textData)
def run():
setupSocket()
pygame.init()
screen = pygame.display.set_mode(SCREEN_SIZE, HWSURFACE | OPENGL | DOUBLEBUF)
resize(*SCREEN_SIZE)
init()
clock = pygame.time.Clock()
cube = Cube((0.0, 0.0, 0.0), (.5, .5, .7))
angle = 0
while True:
then = pygame.time.get_ticks()
for event in pygame.event.get():
if event.type == QUIT:
return
if event.type == KEYUP and event.key == K_ESCAPE:
return
read_data()
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
text = "pitch: " + str("{0:.1f}".format(ay)) + " roll: " + str("{0:.1f}".format(ax))
drawText((1, -4, 2), text)
text = "accx: " + str("{0:.2f}".format(acx)) + " accy: " + str("{0:.2f}".format(acy)) + " accz: " + str(
"{0:.2f}".format(acz))
drawText((1, -4.3, 2), text)
text = "temp: " + str("{0:.1f}".format(temp))
drawText((1, -4.6, 2), text)
glColor((1., 1., 1.))
glLineWidth(1)
glBegin(GL_LINES)
for x in range(-20, 22, 2):
glVertex3f(x / 10., -1, -1)
glVertex3f(x / 10., -1, 1)
for x in range(-20, 22, 2):
glVertex3f(x / 10., -1, 1)
glVertex3f(x / 10., 1, 1)
for z in range(-10, 12, 2):
glVertex3f(-2, -1, z / 10.)
glVertex3f(2, -1, z / 10.)
for z in range(-10, 12, 2):
glVertex3f(-2, -1, z / 10.)
glVertex3f(-2, 1, z / 10.)
for z in range(-10, 12, 2):
glVertex3f(2, -1, z / 10.)
glVertex3f(2, 1, z / 10.)
for y in range(-10, 12, 2):
glVertex3f(-2, y / 10., 1)
glVertex3f(2, y / 10., 1)
for y in range(-10, 12, 2):
glVertex3f(-2, y / 10., 1)
glVertex3f(-2, y / 10., -1)
for y in range(-10, 12, 2):
glVertex3f(2, y / 10., 1)
glVertex3f(2, y / 10., -1)
glEnd()
glPushMatrix()
glRotate(az, 0, 1, 0)
glRotate(ay, 1, 0, 0)
glRotate(ax, 0, 0, 1)
cube.render()
glPopMatrix()
pygame.display.flip()
class Cube(object):
def __init__(self, position, color):
self.position = position
self.color = color
# Cube information
num_faces = 6
vertices = [(-1.0, -0.2, 0.5),
(1.0, -0.2, 0.5),
(1.0, 0.2, 0.5),
(-1.0, 0.2, 0.5),
(-1.0, -0.2, -0.5),
(1.0, -0.2, -0.5),
(1.0, 0.2, -0.5),
(-1.0, 0.2, -0.5)]
normals = [(0.0, 0.0, +1.0), # front
(0.0, 0.0, -1.0), # back
(+1.0, 0.0, 0.0), # right
(-1.0, 0.0, 0.0), # left
(0.0, +1.0, 0.0), # top
(0.0, -1.0, 0.0)] # bottom
vertex_indices = [(0, 1, 2, 3), # front
(4, 5, 6, 7), # back
(1, 5, 6, 2), # right
(0, 4, 7, 3), # left
(3, 2, 6, 7), # top
(0, 1, 5, 4)] # bottom
def render(self):
then = pygame.time.get_ticks()
vertices = self.vertices
# Draw all 6 faces of the cube
glBegin(GL_QUADS)
for face_no in range(self.num_faces):
if face_no == 1:
glColor(1.0, 0.0, 0.0)
else:
glColor(self.color)
glNormal3dv(self.normals[face_no])
v1, v2, v3, v4 = self.vertex_indices[face_no]
glVertex(vertices[v1])
glVertex(vertices[v2])
glVertex(vertices[v3])
glVertex(vertices[v4])
glEnd()
if __name__ == "__main__":
run() | 27.815 | 113 | 0.472047 |
import pygame
from OpenGL.GL import *
from OpenGL.GLU import *
import socket
import json
from pygame.locals import *
SCREEN_SIZE = (800, 600)
address = ('', 5000)
def resize(width, height):
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(width) / height, 0.001, 10.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(1.0, 2.0, -5.0,
0.0, 0.0, 0.0,
0.0, 1.0, 0.0)
def init():
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 0.0)
glShadeModel(GL_SMOOTH)
glEnable(GL_BLEND)
glEnable(GL_POLYGON_SMOOTH)
glHint(GL_POLYGON_SMOOTH_HINT, GL_NICEST)
def setupSocket():
global sock
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(address)
def read_data():
global ax, ay, az, acx, acy, acz, temp
ax = ay = az = acx = acy = acz = temp = 0.0
msg, addr = sock.recvfrom(1024)
if msg:
msg.decode()
data = json.loads(msg)
ax, ay ,az = data["filter"]
acx, acy, acz = data["accel"]
temp = data["temp"]
def drawText(position, textString):
font = pygame.font.SysFont("Courier", 18, True)
textSurface = font.render(textString, True, (255,255,255,255), (0,0,0,255))
textData = pygame.image.tostring(textSurface, "RGBA", True)
glRasterPos3d(*position)
glDrawPixels(textSurface.get_width(), textSurface.get_height(), GL_RGBA, GL_UNSIGNED_BYTE, textData)
def run():
setupSocket()
pygame.init()
screen = pygame.display.set_mode(SCREEN_SIZE, HWSURFACE | OPENGL | DOUBLEBUF)
resize(*SCREEN_SIZE)
init()
clock = pygame.time.Clock()
cube = Cube((0.0, 0.0, 0.0), (.5, .5, .7))
angle = 0
while True:
then = pygame.time.get_ticks()
for event in pygame.event.get():
if event.type == QUIT:
return
if event.type == KEYUP and event.key == K_ESCAPE:
return
read_data()
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
text = "pitch: " + str("{0:.1f}".format(ay)) + " roll: " + str("{0:.1f}".format(ax))
drawText((1, -4, 2), text)
text = "accx: " + str("{0:.2f}".format(acx)) + " accy: " + str("{0:.2f}".format(acy)) + " accz: " + str(
"{0:.2f}".format(acz))
drawText((1, -4.3, 2), text)
text = "temp: " + str("{0:.1f}".format(temp))
drawText((1, -4.6, 2), text)
glColor((1., 1., 1.))
glLineWidth(1)
glBegin(GL_LINES)
for x in range(-20, 22, 2):
glVertex3f(x / 10., -1, -1)
glVertex3f(x / 10., -1, 1)
for x in range(-20, 22, 2):
glVertex3f(x / 10., -1, 1)
glVertex3f(x / 10., 1, 1)
for z in range(-10, 12, 2):
glVertex3f(-2, -1, z / 10.)
glVertex3f(2, -1, z / 10.)
for z in range(-10, 12, 2):
glVertex3f(-2, -1, z / 10.)
glVertex3f(-2, 1, z / 10.)
for z in range(-10, 12, 2):
glVertex3f(2, -1, z / 10.)
glVertex3f(2, 1, z / 10.)
for y in range(-10, 12, 2):
glVertex3f(-2, y / 10., 1)
glVertex3f(2, y / 10., 1)
for y in range(-10, 12, 2):
glVertex3f(-2, y / 10., 1)
glVertex3f(-2, y / 10., -1)
for y in range(-10, 12, 2):
glVertex3f(2, y / 10., 1)
glVertex3f(2, y / 10., -1)
glEnd()
glPushMatrix()
glRotate(az, 0, 1, 0)
glRotate(ay, 1, 0, 0)
glRotate(ax, 0, 0, 1)
cube.render()
glPopMatrix()
pygame.display.flip()
class Cube(object):
def __init__(self, position, color):
self.position = position
self.color = color
num_faces = 6
vertices = [(-1.0, -0.2, 0.5),
(1.0, -0.2, 0.5),
(1.0, 0.2, 0.5),
(-1.0, 0.2, 0.5),
(-1.0, -0.2, -0.5),
(1.0, -0.2, -0.5),
(1.0, 0.2, -0.5),
(-1.0, 0.2, -0.5)]
normals = [(0.0, 0.0, +1.0),
(0.0, 0.0, -1.0),
(+1.0, 0.0, 0.0),
(-1.0, 0.0, 0.0),
(0.0, +1.0, 0.0),
(0.0, -1.0, 0.0)]
vertex_indices = [(0, 1, 2, 3),
(4, 5, 6, 7),
(1, 5, 6, 2),
(0, 4, 7, 3),
(3, 2, 6, 7),
(0, 1, 5, 4)]
def render(self):
then = pygame.time.get_ticks()
vertices = self.vertices
glBegin(GL_QUADS)
for face_no in range(self.num_faces):
if face_no == 1:
glColor(1.0, 0.0, 0.0)
else:
glColor(self.color)
glNormal3dv(self.normals[face_no])
v1, v2, v3, v4 = self.vertex_indices[face_no]
glVertex(vertices[v1])
glVertex(vertices[v2])
glVertex(vertices[v3])
glVertex(vertices[v4])
glEnd()
if __name__ == "__main__":
run() | true | true |
f7304e9d38fb2af01ffb1db1e0e42104960448d1 | 13,856 | py | Python | fox/connection.py | piger/fox | d411189e5eba9eafb334a059853a9af6ea52ba06 | [
"BSD-2-Clause"
] | 6 | 2019-04-23T16:10:40.000Z | 2021-11-17T10:18:25.000Z | fox/connection.py | piger/fox | d411189e5eba9eafb334a059853a9af6ea52ba06 | [
"BSD-2-Clause"
] | null | null | null | fox/connection.py | piger/fox | d411189e5eba9eafb334a059853a9af6ea52ba06 | [
"BSD-2-Clause"
] | null | null | null | import os
import shlex
import getpass
import warnings
import asyncio
import logging
import collections
import atexit
from typing import Optional, Dict, Deque
import tqdm
import asyncssh
from .conf import env, options_to_connect
from .utils import run_in_loop, CommandResult, prepare_environment, split_lines
# disable annoying warnings (we can't fix the problems in 3rd party libs)
warnings.simplefilter("ignore")
log = logging.getLogger(__name__)
# A cache of Connection objects indexed by *name* (not hostname!). We only cache connections creates
# with the global run() and sudo() methods. Maybe the tunnels too?
_connections_cache: Dict[str, "Connection"] = {}
def _clean_connections():
# would be better to close them all at once with gather() or similar
for hostname, conn in _connections_cache.items():
if conn.connected:
log.info(f"Cleaning up connection for {hostname}")
conn.disconnect()
atexit.register(_clean_connections)
class Connection:
"""A SSH connection to a remote server.
:param hostname: hostname of the remote server.
:param username: the username used to log into the remote server.
:param port: the optional port for connecting to the remote server (default: 22).
:param private_key: the optional path to a OpenSSH private key.
:param password: the optional password used to authenticate to the remote server.
:param agent_path: the optional path to a OpenSSH agent socket.
:param tunnel: the optional hostname of another server that will be used as tunnel.
:param nickname: the hostname of the server as passed on the command line (could be different
from the real hostname configured in `~/.ssh/config`).
"""
def __init__(
self,
hostname: str,
username: str,
port: int,
private_key=None,
password: Optional[str] = None,
agent_path: Optional[str] = None,
tunnel: Optional[str] = None,
nickname: Optional[str] = None,
):
self.hostname = hostname
self.username = username
self.port = port
self.private_key = private_key
self.password = password
self.agent_path = agent_path
self.tunnel = tunnel
if nickname:
self.nickname = nickname
else:
self.nickname = self.hostname
self._connection: Optional[asyncssh.SSHClientConnection] = None
self._sftp_client: Optional[asyncssh.SFTPClient] = None
async def _read_from(self, stream, writer, maxlen=10, echo=True) -> str:
buf: Deque[str] = collections.deque(maxlen=maxlen)
trail = ""
while True:
data = await stream.read(1024)
if data == "":
break
# everything gets stored in `buf` (within its limits)
buf.append(data)
# handle previously unprinted output, if any
if trail:
data = trail + data
trail = ""
# split lines and keep any non-newline ended data
lines, rest = split_lines(data)
if echo:
for line in lines:
print(f"[{self.nickname}] {line}")
# if the last part of `data` contains the sudo prompt, handle it
if rest.endswith(env.sudo_prompt):
print(f"[{self.nickname}] {rest}")
# we need to handle sudo erroring because the password was wrong
if lines[-1] == "Sorry, try again.":
print("Unsetting env.sudo_password")
env.sudo_password = None
if env.sudo_password is None:
env.sudo_password = getpass.getpass("Need password for sudo: ")
writer.write(f"{env.sudo_password}\n")
else:
if rest:
trail += rest
output = "".join(list(buf))
return output
async def _run(
self,
command: str,
sudo=False,
cd: Optional[str] = None,
pty=False,
environ: Optional[Dict[str, str]] = None,
echo=True,
**kwargs,
) -> CommandResult:
"""Run a shell command on the remote host"""
if self._connection is None:
await self._connect()
original_command = command
if cd:
command = 'cd "{}" && {}'.format(cd, command)
env_command = prepare_environment(environ)
log.debug(f"*{self.nickname}* environment for command: {env_command}")
if sudo:
command = f"{env_command}{command}"
command = f"sudo -S -p {shlex.quote(env.sudo_prompt)} $SHELL -c {shlex.quote(command)}"
else:
command = f"{env_command}{command}"
log.debug(f"*{self.nickname}* final command: {command}")
args = {}
if pty:
args.update({"term_type": env.term_type, "term_size": env.term_size})
async with self._connection.create_process(command, **args) as proc: # type: ignore
stdout, stderr = await asyncio.gather(
self._read_from(proc.stdout, proc.stdin, echo=echo),
self._read_from(proc.stderr, proc.stdin, echo=echo),
)
return CommandResult(
command=original_command,
actual_command=command,
exit_code=proc.exit_status,
stdout=stdout,
# if we use a pty this will be empty
stderr=stderr,
hostname=self.nickname,
sudo=sudo,
)
# use the event loop
def run(self, command, pty=True, cd=None, environ=None, echo=True) -> CommandResult:
"""Execute a command on the remote server.
:param command: the command line string to execute.
:param pty: wether to request a remote pty.
:param cd: the optional name of the directory where the command will be executed.
:param environ: an optional dictionary containing environment variables to set when
executing the command.
:param echo: set to `False` to hide the output of the command.
"""
print(f"*{self.nickname}* Running: {command}")
kwargs = {"pty": pty, "cd": cd, "environ": environ}
return run_in_loop(self._run(command, **kwargs))
# use the event loop
def sudo(self, command, pty=True, cd=None, environ=None, echo=True) -> CommandResult:
"""Execute a command with sudo on the remote server.
:param command: the command line string to execute.
:param pty: wether to request a remote pty.
:param cd: the optional name of the directory where the command will be executed.
:param environ: an optional dictionary containing environment variables to set when
executing the command.
:param echo: set to `False` to hide the output of the command.
"""
print(f"*{self.nickname}* - Sudo: {command}")
kwargs = {"pty": pty, "cd": cd, "sudo": True, "environ": environ}
return run_in_loop(self._run(command, **kwargs))
async def _connect(self):
log.info(f"Connecting to {self.hostname}:{self.port}")
args = {"username": self.username}
if env.use_known_hosts is False:
args["known_hosts"] = None
if self.tunnel:
log.info(f"Connecting to tunnel {self.tunnel}")
tunnel_conn = _get_connection(self.tunnel, use_cache=False)
await tunnel_conn._connect()
args["tunnel"] = tunnel_conn
# we either use the private key OR the agent; loading the private key might fail while the
# agent could still be working.
if self.agent_path:
args["agent_path"] = self.agent_path
elif self.private_key:
args["client_keys"] = [self.private_key]
# this may throw several exceptions:
# asyncssh.misc.HostKeyNotVerifiable: Host key is not trusted
self._connection = await asyncssh.connect(self.hostname, self.port, **args)
# use the event loop
def disconnect(self):
"""Close the SSH connection to the server."""
# Maybe here we should also delete ourself from the connection cache, but we don't know our
# own "nickname"!
if self._connection is not None:
self._connection.close()
run_in_loop(self._connection.wait_closed())
self._connection = None
print("disconnected")
@property
def connected(self) -> bool:
return self._connection is not None
async def get_sftp_client(self) -> asyncssh.SFTPClient:
if self._connection is None:
await self._connect()
if self._sftp_client is None:
self._sftp_client = await self._connection.start_sftp_client() # type: ignore
return self._sftp_client
async def _get(self, remotefile, localfile):
sftp_client = await self.get_sftp_client()
try:
size = await sftp_client.getsize(remotefile)
# from https://asyncssh.readthedocs.io/en/latest/api.html#asyncssh.SFTPClient.get
block_size = 16384
i = size // block_size + 1
if i < 0:
i = 1
bar = tqdm.tqdm(total=i, desc=os.path.basename(remotefile))
def _update_bar(source, dest, cur, tot):
bar.update(1)
await sftp_client.get(
remotefile, localfile, progress_handler=_update_bar, block_size=block_size
)
bar.close()
except (OSError, asyncssh.SFTPError):
raise
# use the event loop
def get(self, remotefile, localfile):
"""Download a file from the remote server.
:param remotefile: the path to the remote file to download.
:param localfile: the local path where to write the downloaded file.
"""
run_in_loop(self._get(remotefile, localfile))
async def _read(self, remotefile) -> bytes:
sftp_client = await self.get_sftp_client()
try:
size = await sftp_client.getsize(remotefile)
bar = tqdm.tqdm(total=size, desc=os.path.basename(remotefile))
fd = await sftp_client.open(remotefile, "rb")
data = []
while True:
# 16384 is the default block size
buf = await fd.read(16384)
if buf == b"":
break
data.append(buf)
bar.update(len(buf))
fd.close()
bar.close()
return b"".join(data)
except (OSError, asyncssh.SFTPError):
raise
# use the event loop
def read(self, remotefile) -> bytes:
"""Read the contents of a remote file.
:param remotefile: the path of the remote file to read.
This is useful when you just want to read the contents of a remote file without downloading
it.
"""
return run_in_loop(self._read(remotefile))
async def _put(self, localfile, remotefile):
sftp_client = await self.get_sftp_client()
try:
size = os.path.getsize(localfile)
# from https://asyncssh.readthedocs.io/en/latest/api.html#asyncssh.SFTPClient.get
block_size = 16384
i = size // block_size + 1
if i < 0:
i = 1
bar = tqdm.tqdm(total=i, desc=os.path.basename(localfile))
def _update_bar(source, dest, cur, tot):
bar.update(1)
await sftp_client.put(
localfile, remotefile, progress_handler=_update_bar, block_size=block_size
)
bar.close()
except (OSError, asyncssh.SFTPError):
raise
# use the event loop
def put(self, localfile, remotefile):
"""Upload a local file to a remote server.
:param localfile: the path of the local file to upload.
:param remotefile: the path where to write the file on the remote server.
"""
run_in_loop(self._put(localfile, remotefile))
async def _file_exists(self, remotefile) -> bool:
sftp_client = await self.get_sftp_client()
return await sftp_client.exists(remotefile)
# use the event loop
def file_exists(self, remotefile) -> bool:
"""Check if a file exists on the remote server.
:param remotefile: the path of the remote file that will be checked.
"""
return run_in_loop(self._file_exists(remotefile))
def _get_connection(name=None, use_cache=True) -> Connection:
"""Get a connection for `name`.
`name` does not need to be a FQDN; it can be a "nickname" from a SSH configuration file.
"""
global _connections_cache
if name is None and env.host_string is None:
raise RuntimeError("env.host_string is empty!")
if name is None:
name = env.host_string
if use_cache and name in _connections_cache:
conn = _connections_cache[name]
# here we delete stale Connections objects.
if conn.connected:
return conn
del _connections_cache[name]
ssh_options = options_to_connect(name)
args = {}
if "identityfile" in ssh_options:
args["private_key"] = ssh_options["identityfile"]
if "identityagent" in ssh_options:
args["agent_path"] = ssh_options["identityagent"]
# TODO:
# identitiesonly yes
# NOTE: we only cache connections created here, and maybe the tunnels.
# maybe by default we should not re-use the tunnels, as the default behavior of SSH
c = Connection(
ssh_options["hostname"], ssh_options["user"], ssh_options["port"], nickname=name, **args
)
if use_cache:
_connections_cache[name] = c
return c
| 33.795122 | 100 | 0.608978 | import os
import shlex
import getpass
import warnings
import asyncio
import logging
import collections
import atexit
from typing import Optional, Dict, Deque
import tqdm
import asyncssh
from .conf import env, options_to_connect
from .utils import run_in_loop, CommandResult, prepare_environment, split_lines
warnings.simplefilter("ignore")
log = logging.getLogger(__name__)
# A cache of Connection objects indexed by *name* (not hostname!). We only cache connections creates
# with the global run() and sudo() methods. Maybe the tunnels too?
_connections_cache: Dict[str, "Connection"] = {}
def _clean_connections():
# would be better to close them all at once with gather() or similar
for hostname, conn in _connections_cache.items():
if conn.connected:
log.info(f"Cleaning up connection for {hostname}")
conn.disconnect()
atexit.register(_clean_connections)
class Connection:
def __init__(
self,
hostname: str,
username: str,
port: int,
private_key=None,
password: Optional[str] = None,
agent_path: Optional[str] = None,
tunnel: Optional[str] = None,
nickname: Optional[str] = None,
):
self.hostname = hostname
self.username = username
self.port = port
self.private_key = private_key
self.password = password
self.agent_path = agent_path
self.tunnel = tunnel
if nickname:
self.nickname = nickname
else:
self.nickname = self.hostname
self._connection: Optional[asyncssh.SSHClientConnection] = None
self._sftp_client: Optional[asyncssh.SFTPClient] = None
async def _read_from(self, stream, writer, maxlen=10, echo=True) -> str:
buf: Deque[str] = collections.deque(maxlen=maxlen)
trail = ""
while True:
data = await stream.read(1024)
if data == "":
break
# everything gets stored in `buf` (within its limits)
buf.append(data)
# handle previously unprinted output, if any
if trail:
data = trail + data
trail = ""
# split lines and keep any non-newline ended data
lines, rest = split_lines(data)
if echo:
for line in lines:
print(f"[{self.nickname}] {line}")
# if the last part of `data` contains the sudo prompt, handle it
if rest.endswith(env.sudo_prompt):
print(f"[{self.nickname}] {rest}")
# we need to handle sudo erroring because the password was wrong
if lines[-1] == "Sorry, try again.":
print("Unsetting env.sudo_password")
env.sudo_password = None
if env.sudo_password is None:
env.sudo_password = getpass.getpass("Need password for sudo: ")
writer.write(f"{env.sudo_password}\n")
else:
if rest:
trail += rest
output = "".join(list(buf))
return output
async def _run(
self,
command: str,
sudo=False,
cd: Optional[str] = None,
pty=False,
environ: Optional[Dict[str, str]] = None,
echo=True,
**kwargs,
) -> CommandResult:
if self._connection is None:
await self._connect()
original_command = command
if cd:
command = 'cd "{}" && {}'.format(cd, command)
env_command = prepare_environment(environ)
log.debug(f"*{self.nickname}* environment for command: {env_command}")
if sudo:
command = f"{env_command}{command}"
command = f"sudo -S -p {shlex.quote(env.sudo_prompt)} $SHELL -c {shlex.quote(command)}"
else:
command = f"{env_command}{command}"
log.debug(f"*{self.nickname}* final command: {command}")
args = {}
if pty:
args.update({"term_type": env.term_type, "term_size": env.term_size})
async with self._connection.create_process(command, **args) as proc: # type: ignore
stdout, stderr = await asyncio.gather(
self._read_from(proc.stdout, proc.stdin, echo=echo),
self._read_from(proc.stderr, proc.stdin, echo=echo),
)
return CommandResult(
command=original_command,
actual_command=command,
exit_code=proc.exit_status,
stdout=stdout,
# if we use a pty this will be empty
stderr=stderr,
hostname=self.nickname,
sudo=sudo,
)
# use the event loop
def run(self, command, pty=True, cd=None, environ=None, echo=True) -> CommandResult:
print(f"*{self.nickname}* Running: {command}")
kwargs = {"pty": pty, "cd": cd, "environ": environ}
return run_in_loop(self._run(command, **kwargs))
# use the event loop
def sudo(self, command, pty=True, cd=None, environ=None, echo=True) -> CommandResult:
print(f"*{self.nickname}* - Sudo: {command}")
kwargs = {"pty": pty, "cd": cd, "sudo": True, "environ": environ}
return run_in_loop(self._run(command, **kwargs))
async def _connect(self):
log.info(f"Connecting to {self.hostname}:{self.port}")
args = {"username": self.username}
if env.use_known_hosts is False:
args["known_hosts"] = None
if self.tunnel:
log.info(f"Connecting to tunnel {self.tunnel}")
tunnel_conn = _get_connection(self.tunnel, use_cache=False)
await tunnel_conn._connect()
args["tunnel"] = tunnel_conn
# we either use the private key OR the agent; loading the private key might fail while the
# agent could still be working.
if self.agent_path:
args["agent_path"] = self.agent_path
elif self.private_key:
args["client_keys"] = [self.private_key]
# this may throw several exceptions:
# asyncssh.misc.HostKeyNotVerifiable: Host key is not trusted
self._connection = await asyncssh.connect(self.hostname, self.port, **args)
# use the event loop
def disconnect(self):
# Maybe here we should also delete ourself from the connection cache, but we don't know our
if self._connection is not None:
self._connection.close()
run_in_loop(self._connection.wait_closed())
self._connection = None
print("disconnected")
@property
def connected(self) -> bool:
return self._connection is not None
async def get_sftp_client(self) -> asyncssh.SFTPClient:
if self._connection is None:
await self._connect()
if self._sftp_client is None:
self._sftp_client = await self._connection.start_sftp_client()
return self._sftp_client
async def _get(self, remotefile, localfile):
sftp_client = await self.get_sftp_client()
try:
size = await sftp_client.getsize(remotefile)
= 16384
i = size // block_size + 1
if i < 0:
i = 1
bar = tqdm.tqdm(total=i, desc=os.path.basename(remotefile))
def _update_bar(source, dest, cur, tot):
bar.update(1)
await sftp_client.get(
remotefile, localfile, progress_handler=_update_bar, block_size=block_size
)
bar.close()
except (OSError, asyncssh.SFTPError):
raise
def get(self, remotefile, localfile):
run_in_loop(self._get(remotefile, localfile))
async def _read(self, remotefile) -> bytes:
sftp_client = await self.get_sftp_client()
try:
size = await sftp_client.getsize(remotefile)
bar = tqdm.tqdm(total=size, desc=os.path.basename(remotefile))
fd = await sftp_client.open(remotefile, "rb")
data = []
while True:
buf = await fd.read(16384)
if buf == b"":
break
data.append(buf)
bar.update(len(buf))
fd.close()
bar.close()
return b"".join(data)
except (OSError, asyncssh.SFTPError):
raise
def read(self, remotefile) -> bytes:
return run_in_loop(self._read(remotefile))
async def _put(self, localfile, remotefile):
sftp_client = await self.get_sftp_client()
try:
size = os.path.getsize(localfile)
= 16384
i = size // block_size + 1
if i < 0:
i = 1
bar = tqdm.tqdm(total=i, desc=os.path.basename(localfile))
def _update_bar(source, dest, cur, tot):
bar.update(1)
await sftp_client.put(
localfile, remotefile, progress_handler=_update_bar, block_size=block_size
)
bar.close()
except (OSError, asyncssh.SFTPError):
raise
def put(self, localfile, remotefile):
run_in_loop(self._put(localfile, remotefile))
async def _file_exists(self, remotefile) -> bool:
sftp_client = await self.get_sftp_client()
return await sftp_client.exists(remotefile)
def file_exists(self, remotefile) -> bool:
return run_in_loop(self._file_exists(remotefile))
def _get_connection(name=None, use_cache=True) -> Connection:
global _connections_cache
if name is None and env.host_string is None:
raise RuntimeError("env.host_string is empty!")
if name is None:
name = env.host_string
if use_cache and name in _connections_cache:
conn = _connections_cache[name]
if conn.connected:
return conn
del _connections_cache[name]
ssh_options = options_to_connect(name)
args = {}
if "identityfile" in ssh_options:
args["private_key"] = ssh_options["identityfile"]
if "identityagent" in ssh_options:
args["agent_path"] = ssh_options["identityagent"]
c = Connection(
ssh_options["hostname"], ssh_options["user"], ssh_options["port"], nickname=name, **args
)
if use_cache:
_connections_cache[name] = c
return c
| true | true |
f7304f72fe5aeb256f1ab7cc3f24471399e0baaf | 6,041 | py | Python | ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py | hortonworks/ambari-perf | 71305effa9ac00e2e9adb36e6a66a13c9105a811 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-05-06T06:24:04.000Z | 2021-05-06T06:24:04.000Z | ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py | gcxtx/ambari | 133d9c4661b21182482c25f96c3f0bf0a9740a9f | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py | gcxtx/ambari | 133d9c4661b21182482c25f96c3f0bf0a9740a9f | [
"Apache-2.0"
] | 3 | 2017-10-31T11:42:31.000Z | 2021-04-26T07:17:53.000Z | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
import socket
import sys
import time
import subprocess
from hcat_service_check import hcat_service_check
from webhcat_service_check import webhcat_service_check
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
from resource_management.core import shell
from resource_management.core.logger import Logger
from resource_management.libraries.functions import get_unique_id_and_date
class HiveServiceCheck(Script):
pass
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class HiveServiceCheckWindows(HiveServiceCheck):
def service_check(self, env):
import params
env.set_params(params)
smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
service = "HIVE"
Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hive_user, logoutput=True)
hcat_service_check()
webhcat_service_check()
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class HiveServiceCheckDefault(HiveServiceCheck):
def __init__(self):
super(HiveServiceCheckDefault, self).__init__()
Logger.initialize_logger()
def service_check(self, env):
import params
env.set_params(params)
if params.security_enabled:
kinit_cmd = format(
"{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
else:
kinit_cmd = ""
# Check HiveServer
Logger.info("Running Hive Server checks")
Logger.info("--------------------------\n")
self.check_hive_server(env, 'Hive Server', kinit_cmd, params.hive_server_hosts,
int(format("{hive_server_port}")))
if params.has_hive_interactive and params.hive_interactive_enabled:
Logger.info("Running Hive Server2 checks")
Logger.info("--------------------------\n")
self.check_hive_server(env, 'Hive Server2', kinit_cmd, params.hive_interactive_hosts,
int(format("{hive_server_interactive_port}")))
Logger.info("Running LLAP checks")
Logger.info("-------------------\n")
self.check_llap(env, kinit_cmd)
Logger.info("Running HCAT checks")
Logger.info("-------------------\n")
hcat_service_check()
Logger.info("Running WEBHCAT checks")
Logger.info("---------------------\n")
webhcat_service_check()
def check_hive_server(self, env, server_component_name, kinit_cmd, address_list, server_port):
import params
env.set_params(params)
Logger.info("Server Address List : {0}, Port : {1}".format(address_list, server_port))
if not address_list:
raise Fail("Can not find any "+server_component_name+" ,host. Please check configuration.")
SOCKET_WAIT_SECONDS = 290
start_time = time.time()
end_time = start_time + SOCKET_WAIT_SECONDS
Logger.info("Waiting for the {0} to start...".format(server_component_name))
workable_server_available = False
i = 0
while time.time() < end_time and not workable_server_available:
address = address_list[i]
try:
check_thrift_port_sasl(address, server_port, params.hive_server2_authentication,
params.hive_server_principal, kinit_cmd, params.smokeuser,
transport_mode=params.hive_transport_mode, http_endpoint=params.hive_http_endpoint,
ssl=params.hive_ssl, ssl_keystore=params.hive_ssl_keystore_path,
ssl_password=params.hive_ssl_keystore_password)
Logger.info("Successfully connected to {0} on port {1}".format(address, server_port))
workable_server_available = True
except:
Logger.info("Connection to {0} on port {1} failed".format(address, server_port))
time.sleep(5)
i += 1
if i == len(address_list):
i = 0
elapsed_time = time.time() - start_time
if not workable_server_available:
raise Fail("Connection to '{0}' on host: {1} and port {2} failed after {3} seconds"
.format(server_component_name, params.hostname, server_port, elapsed_time))
Logger.info("Successfully stayed connected to '{0}' on host: {1} and port {2} after {3} seconds"
.format(server_component_name, params.hostname, server_port, elapsed_time))
def check_llap(self, env, kinit_cmd):
import params
env.set_params(params)
File(format("{tmp_dir}/hiveLlapSmoke.sh"),
content=StaticFile("hiveLlapSmoke.sh"),
mode=0755
)
unique_id = get_unique_id_and_date()
llap_cmd = format("{kinit_cmd}env JAVA_HOME={java64_home} {tmp_dir}/hiveLlapSmoke.sh {stack_root} llap_smoke_{unique_id} prepare")
exec_path = params.execute_path
if params.version and params.stack_root:
upgrade_hive_bin = format("{stack_root}/{version}/hive2/bin")
exec_path = os.environ['PATH'] + os.pathsep + params.hadoop_bin_dir + os.pathsep + upgrade_hive_bin
Execute(llap_cmd,
user=params.hive_user,
path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', exec_path],
tries=1,
try_sleep=5,
wait_for_finish=True,
stderr=subprocess.PIPE,
logoutput=True)
if __name__ == "__main__":
HiveServiceCheck().execute() | 36.173653 | 134 | 0.691442 |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
import socket
import sys
import time
import subprocess
from hcat_service_check import hcat_service_check
from webhcat_service_check import webhcat_service_check
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
from resource_management.core import shell
from resource_management.core.logger import Logger
from resource_management.libraries.functions import get_unique_id_and_date
class HiveServiceCheck(Script):
pass
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class HiveServiceCheckWindows(HiveServiceCheck):
def service_check(self, env):
import params
env.set_params(params)
smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
service = "HIVE"
Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hive_user, logoutput=True)
hcat_service_check()
webhcat_service_check()
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class HiveServiceCheckDefault(HiveServiceCheck):
def __init__(self):
super(HiveServiceCheckDefault, self).__init__()
Logger.initialize_logger()
def service_check(self, env):
import params
env.set_params(params)
if params.security_enabled:
kinit_cmd = format(
"{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
else:
kinit_cmd = ""
Logger.info("Running Hive Server checks")
Logger.info("--------------------------\n")
self.check_hive_server(env, 'Hive Server', kinit_cmd, params.hive_server_hosts,
int(format("{hive_server_port}")))
if params.has_hive_interactive and params.hive_interactive_enabled:
Logger.info("Running Hive Server2 checks")
Logger.info("--------------------------\n")
self.check_hive_server(env, 'Hive Server2', kinit_cmd, params.hive_interactive_hosts,
int(format("{hive_server_interactive_port}")))
Logger.info("Running LLAP checks")
Logger.info("-------------------\n")
self.check_llap(env, kinit_cmd)
Logger.info("Running HCAT checks")
Logger.info("-------------------\n")
hcat_service_check()
Logger.info("Running WEBHCAT checks")
Logger.info("---------------------\n")
webhcat_service_check()
def check_hive_server(self, env, server_component_name, kinit_cmd, address_list, server_port):
import params
env.set_params(params)
Logger.info("Server Address List : {0}, Port : {1}".format(address_list, server_port))
if not address_list:
raise Fail("Can not find any "+server_component_name+" ,host. Please check configuration.")
SOCKET_WAIT_SECONDS = 290
start_time = time.time()
end_time = start_time + SOCKET_WAIT_SECONDS
Logger.info("Waiting for the {0} to start...".format(server_component_name))
workable_server_available = False
i = 0
while time.time() < end_time and not workable_server_available:
address = address_list[i]
try:
check_thrift_port_sasl(address, server_port, params.hive_server2_authentication,
params.hive_server_principal, kinit_cmd, params.smokeuser,
transport_mode=params.hive_transport_mode, http_endpoint=params.hive_http_endpoint,
ssl=params.hive_ssl, ssl_keystore=params.hive_ssl_keystore_path,
ssl_password=params.hive_ssl_keystore_password)
Logger.info("Successfully connected to {0} on port {1}".format(address, server_port))
workable_server_available = True
except:
Logger.info("Connection to {0} on port {1} failed".format(address, server_port))
time.sleep(5)
i += 1
if i == len(address_list):
i = 0
elapsed_time = time.time() - start_time
if not workable_server_available:
raise Fail("Connection to '{0}' on host: {1} and port {2} failed after {3} seconds"
.format(server_component_name, params.hostname, server_port, elapsed_time))
Logger.info("Successfully stayed connected to '{0}' on host: {1} and port {2} after {3} seconds"
.format(server_component_name, params.hostname, server_port, elapsed_time))
def check_llap(self, env, kinit_cmd):
import params
env.set_params(params)
File(format("{tmp_dir}/hiveLlapSmoke.sh"),
content=StaticFile("hiveLlapSmoke.sh"),
mode=0755
)
unique_id = get_unique_id_and_date()
llap_cmd = format("{kinit_cmd}env JAVA_HOME={java64_home} {tmp_dir}/hiveLlapSmoke.sh {stack_root} llap_smoke_{unique_id} prepare")
exec_path = params.execute_path
if params.version and params.stack_root:
upgrade_hive_bin = format("{stack_root}/{version}/hive2/bin")
exec_path = os.environ['PATH'] + os.pathsep + params.hadoop_bin_dir + os.pathsep + upgrade_hive_bin
Execute(llap_cmd,
user=params.hive_user,
path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', exec_path],
tries=1,
try_sleep=5,
wait_for_finish=True,
stderr=subprocess.PIPE,
logoutput=True)
if __name__ == "__main__":
HiveServiceCheck().execute() | false | true |
f7304f89593d1379710d96a3531d54d1dcff8680 | 733 | py | Python | tests/test_toboday.py | Jesse-Yung/jsonclasses | d40c52aec42bcb978a80ceb98b93ab38134dc790 | [
"MIT"
] | 50 | 2021-08-18T08:08:04.000Z | 2022-03-20T07:23:26.000Z | tests/test_toboday.py | Jesse-Yung/jsonclasses | d40c52aec42bcb978a80ceb98b93ab38134dc790 | [
"MIT"
] | 1 | 2021-02-21T03:18:09.000Z | 2021-03-08T01:07:52.000Z | tests/test_toboday.py | Jesse-Yung/jsonclasses | d40c52aec42bcb978a80ceb98b93ab38134dc790 | [
"MIT"
] | 8 | 2021-07-01T02:39:15.000Z | 2021-12-10T02:20:18.000Z | from __future__ import annotations
from datetime import date, datetime
from unittest import TestCase
from tests.classes.super_datetime import SuperDateTime
class TestToboday(TestCase):
def test_toboday_transforms_datetime_into_the_time_of_beginning_of_day(self):
d = SuperDateTime(dtbd=datetime(2021, 10, 11, 17, 37, 27,43235))
self.assertEqual(d.dtbd, datetime(2021,10, 11, 0, 0))
def test_toboday_transforms_date_into_the_time_of_beginning_of_day(self):
d = SuperDateTime(dbd=date(2021, 10, 11))
self.assertEqual(d.dbd, date(2021,10, 11))
def test_toboday_does_not_transform_if_is_not_datetime(self):
s = SuperDateTime(sbd="12345")
self.assertEqual(s.sbd, "12345")
| 34.904762 | 81 | 0.747613 | from __future__ import annotations
from datetime import date, datetime
from unittest import TestCase
from tests.classes.super_datetime import SuperDateTime
class TestToboday(TestCase):
def test_toboday_transforms_datetime_into_the_time_of_beginning_of_day(self):
d = SuperDateTime(dtbd=datetime(2021, 10, 11, 17, 37, 27,43235))
self.assertEqual(d.dtbd, datetime(2021,10, 11, 0, 0))
def test_toboday_transforms_date_into_the_time_of_beginning_of_day(self):
d = SuperDateTime(dbd=date(2021, 10, 11))
self.assertEqual(d.dbd, date(2021,10, 11))
def test_toboday_does_not_transform_if_is_not_datetime(self):
s = SuperDateTime(sbd="12345")
self.assertEqual(s.sbd, "12345")
| true | true |
f7305096c852a5bd53317118877b8128c82fd818 | 20,647 | py | Python | web/datasets/migrations/0011_auto_20200515_1115.py | RaulBSantos/maria-quiteria | 9eb1a307099e208ce666bcc0d65be9c9a4cae150 | [
"MIT"
] | 151 | 2019-11-10T02:18:25.000Z | 2022-01-18T14:28:25.000Z | web/datasets/migrations/0011_auto_20200515_1115.py | RaulBSantos/maria-quiteria | 9eb1a307099e208ce666bcc0d65be9c9a4cae150 | [
"MIT"
] | 202 | 2019-11-09T16:27:19.000Z | 2022-03-22T12:41:27.000Z | web/datasets/migrations/0011_auto_20200515_1115.py | RaulBSantos/maria-quiteria | 9eb1a307099e208ce666bcc0d65be9c9a4cae150 | [
"MIT"
] | 69 | 2020-02-05T01:33:35.000Z | 2022-03-30T10:39:27.000Z | # Generated by Django 2.2.12 on 2020-05-15 14:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("datasets", "0010_auto_20200515_0959"),
]
operations = [
migrations.AlterField(
model_name="citycouncilagenda",
name="crawled_at",
field=models.DateTimeField(verbose_name="Coletado em"),
),
migrations.AlterField(
model_name="citycouncilagenda",
name="crawled_from",
field=models.URLField(verbose_name="Fonte"),
),
migrations.AlterField(
model_name="citycouncilagenda",
name="created_at",
field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"),
),
migrations.AlterField(
model_name="citycouncilagenda",
name="date",
field=models.DateField(verbose_name="Data"),
),
migrations.AlterField(
model_name="citycouncilagenda",
name="details",
field=models.TextField(blank=True, null=True, verbose_name="Detalhes"),
),
migrations.AlterField(
model_name="citycouncilagenda",
name="event_type",
field=models.CharField(
choices=[
("sessao_ordinaria", "Sessão Ordinária"),
("ordem_do_dia", "Ordem do Dia"),
("sessao_solene", "Sessão Solene"),
("sessao_especial", "Sessão Especial"),
("audiencia_publica", "Audiência Pública"),
],
max_length=20,
verbose_name="Tipo do evento",
),
),
migrations.AlterField(
model_name="citycouncilagenda",
name="notes",
field=models.TextField(blank=True, null=True, verbose_name="Anotações"),
),
migrations.AlterField(
model_name="citycouncilagenda",
name="title",
field=models.CharField(
blank=True, max_length=100, null=True, verbose_name="Título"
),
),
migrations.AlterField(
model_name="citycouncilagenda",
name="updated_at",
field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
),
migrations.AlterField(
model_name="citycouncilattendancelist",
name="council_member",
field=models.CharField(max_length=200, verbose_name="Vereador"),
),
migrations.AlterField(
model_name="citycouncilattendancelist",
name="crawled_at",
field=models.DateTimeField(verbose_name="Coletado em"),
),
migrations.AlterField(
model_name="citycouncilattendancelist",
name="crawled_from",
field=models.URLField(verbose_name="Fonte"),
),
migrations.AlterField(
model_name="citycouncilattendancelist",
name="created_at",
field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"),
),
migrations.AlterField(
model_name="citycouncilattendancelist",
name="date",
field=models.DateField(verbose_name="Data"),
),
migrations.AlterField(
model_name="citycouncilattendancelist",
name="description",
field=models.CharField(
blank=True, max_length=200, null=True, verbose_name="Descrição"
),
),
migrations.AlterField(
model_name="citycouncilattendancelist",
name="notes",
field=models.TextField(blank=True, null=True, verbose_name="Anotações"),
),
migrations.AlterField(
model_name="citycouncilattendancelist",
name="status",
field=models.CharField(
choices=[
("presente", "Presente"),
("falta_justificada", "Falta Justificada"),
("licenca_justificada", "Licença Justificada"),
("ausente", "Ausente"),
],
max_length=20,
verbose_name="Situação",
),
),
migrations.AlterField(
model_name="citycouncilattendancelist",
name="updated_at",
field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
),
migrations.AlterField(
model_name="citycouncilcontract",
name="crawled_at",
field=models.DateTimeField(verbose_name="Coletado em"),
),
migrations.AlterField(
model_name="citycouncilcontract",
name="crawled_from",
field=models.URLField(verbose_name="Fonte"),
),
migrations.AlterField(
model_name="citycouncilcontract",
name="created_at",
field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"),
),
migrations.AlterField(
model_name="citycouncilcontract",
name="excluded",
field=models.BooleanField(default=False, verbose_name="Excluído?"),
),
migrations.AlterField(
model_name="citycouncilcontract",
name="notes",
field=models.TextField(blank=True, null=True, verbose_name="Anotações"),
),
migrations.AlterField(
model_name="citycouncilcontract",
name="updated_at",
field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="budget_unit",
field=models.PositiveIntegerField(
default=101, verbose_name="Unidade orçamentária"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="company_or_person",
field=models.TextField(
blank=True, null=True, verbose_name="Empresa ou pessoa"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="crawled_at",
field=models.DateTimeField(verbose_name="Coletado em"),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="crawled_from",
field=models.URLField(verbose_name="Fonte"),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="created_at",
field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="date",
field=models.DateField(verbose_name="Data"),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="document",
field=models.CharField(
blank=True, max_length=50, null=True, verbose_name="CNPJ ou CPF"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="excluded",
field=models.BooleanField(default=False, verbose_name="Excluído?"),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="external_file_code",
field=models.CharField(
blank=True,
max_length=50,
null=True,
verbose_name="Código do arquivo (externo)",
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="external_file_line",
field=models.CharField(
blank=True,
max_length=50,
null=True,
verbose_name="Linha do arquivo (externo)",
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="function",
field=models.CharField(
blank=True, max_length=50, null=True, verbose_name="Função"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="group",
field=models.CharField(
blank=True, max_length=100, null=True, verbose_name="Grupo"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="legal_status",
field=models.CharField(
blank=True, max_length=200, null=True, verbose_name="Natureza"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="modality",
field=models.CharField(
blank=True,
choices=[
("convenio", "Convênio"),
("tomada_de_precos", "Tomada de Preço"),
("pregao", "Pregão"),
("inexigibilidade", "Inexigibilidade"),
("convite", "Convite"),
("concorrencia", "Concorrência"),
("dispensa", "Dispensa"),
("isento", "Isento"),
],
max_length=50,
null=True,
verbose_name="Modalidade",
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="notes",
field=models.TextField(blank=True, null=True, verbose_name="Anotações"),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="number",
field=models.CharField(
blank=True, max_length=50, null=True, verbose_name="Número"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="phase",
field=models.CharField(
choices=[
("empenho", "Empenho"),
("liquidacao", "Liquidação"),
("pagamento", "Pagamento"),
],
max_length=20,
verbose_name="Fase",
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="phase_code",
field=models.CharField(
blank=True, max_length=20, null=True, verbose_name="Código da fase"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="process_number",
field=models.CharField(
blank=True, max_length=50, null=True, verbose_name="Número do processo"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="published_at",
field=models.DateField(blank=True, null=True, verbose_name="Publicado em"),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="resource",
field=models.CharField(
blank=True, max_length=200, null=True, verbose_name="Fonte"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="subfunction",
field=models.CharField(
blank=True, max_length=50, null=True, verbose_name="Subfunção"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="subgroup",
field=models.CharField(
blank=True, max_length=100, null=True, verbose_name="Subgrupos"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="summary",
field=models.TextField(blank=True, null=True, verbose_name="Descrição"),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="updated_at",
field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
),
migrations.AlterField(
model_name="citycouncilminute",
name="crawled_at",
field=models.DateTimeField(verbose_name="Coletado em"),
),
migrations.AlterField(
model_name="citycouncilminute",
name="crawled_from",
field=models.URLField(verbose_name="Fonte"),
),
migrations.AlterField(
model_name="citycouncilminute",
name="created_at",
field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"),
),
migrations.AlterField(
model_name="citycouncilminute",
name="date",
field=models.DateField(verbose_name="Data"),
),
migrations.AlterField(
model_name="citycouncilminute",
name="event_type",
field=models.CharField(
choices=[
("sessao_ordinaria", "Sessão Ordinária"),
("ordem_do_dia", "Ordem do Dia"),
("sessao_solene", "Sessão Solene"),
("sessao_especial", "Sessão Especial"),
("audiencia_publica", "Audiência Pública"),
],
max_length=20,
verbose_name="Tipo de evento",
),
),
migrations.AlterField(
model_name="citycouncilminute",
name="file_content",
field=models.TextField(
blank=True, null=True, verbose_name="Conteúdo do arquivo"
),
),
migrations.AlterField(
model_name="citycouncilminute",
name="file_url",
field=models.URLField(blank=True, null=True, verbose_name="Endereço (URL)"),
),
migrations.AlterField(
model_name="citycouncilminute",
name="notes",
field=models.TextField(blank=True, null=True, verbose_name="Anotações"),
),
migrations.AlterField(
model_name="citycouncilminute",
name="title",
field=models.CharField(
blank=True, max_length=300, null=True, verbose_name="Título"
),
),
migrations.AlterField(
model_name="citycouncilminute",
name="updated_at",
field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
),
migrations.AlterField(
model_name="cityhallbid",
name="crawled_at",
field=models.DateTimeField(verbose_name="Coletado em"),
),
migrations.AlterField(
model_name="cityhallbid",
name="crawled_from",
field=models.URLField(verbose_name="Fonte"),
),
migrations.AlterField(
model_name="cityhallbid",
name="created_at",
field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"),
),
migrations.AlterField(
model_name="cityhallbid",
name="notes",
field=models.TextField(blank=True, null=True, verbose_name="Anotações"),
),
migrations.AlterField(
model_name="cityhallbid",
name="updated_at",
field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
),
migrations.AlterField(
model_name="cityhallbidevent",
name="crawled_at",
field=models.DateTimeField(verbose_name="Coletado em"),
),
migrations.AlterField(
model_name="cityhallbidevent",
name="crawled_from",
field=models.URLField(verbose_name="Fonte"),
),
migrations.AlterField(
model_name="cityhallbidevent",
name="created_at",
field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"),
),
migrations.AlterField(
model_name="cityhallbidevent",
name="notes",
field=models.TextField(blank=True, null=True, verbose_name="Anotações"),
),
migrations.AlterField(
model_name="cityhallbidevent",
name="updated_at",
field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
),
migrations.AlterField(
model_name="gazette",
name="crawled_at",
field=models.DateTimeField(verbose_name="Coletado em"),
),
migrations.AlterField(
model_name="gazette",
name="crawled_from",
field=models.URLField(verbose_name="Fonte"),
),
migrations.AlterField(
model_name="gazette",
name="created_at",
field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"),
),
migrations.AlterField(
model_name="gazette",
name="date",
field=models.DateField(null=True, verbose_name="Data"),
),
migrations.AlterField(
model_name="gazette",
name="file_content",
field=models.TextField(
blank=True, null=True, verbose_name="Conteúdo do arquivo"
),
),
migrations.AlterField(
model_name="gazette",
name="file_url",
field=models.URLField(blank=True, null=True, verbose_name="Endereço (URL)"),
),
migrations.AlterField(
model_name="gazette",
name="is_legacy",
field=models.BooleanField(default=False, verbose_name="É do site antigo?"),
),
migrations.AlterField(
model_name="gazette",
name="notes",
field=models.TextField(blank=True, null=True, verbose_name="Anotações"),
),
migrations.AlterField(
model_name="gazette",
name="power",
field=models.CharField(
choices=[
("executivo", "Poder Executivo"),
("legislativo", "Poder Legislativo"),
],
max_length=25,
verbose_name="Poder",
),
),
migrations.AlterField(
model_name="gazette",
name="updated_at",
field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
),
migrations.AlterField(
model_name="gazette",
name="year_and_edition",
field=models.CharField(max_length=100, verbose_name="Ano e edição"),
),
migrations.AlterField(
model_name="gazetteevent",
name="crawled_at",
field=models.DateTimeField(verbose_name="Coletado em"),
),
migrations.AlterField(
model_name="gazetteevent",
name="crawled_from",
field=models.URLField(verbose_name="Fonte"),
),
migrations.AlterField(
model_name="gazetteevent",
name="created_at",
field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"),
),
migrations.AlterField(
model_name="gazetteevent",
name="notes",
field=models.TextField(blank=True, null=True, verbose_name="Anotações"),
),
migrations.AlterField(
model_name="gazetteevent",
name="published_on",
field=models.CharField(
blank=True, max_length=100, null=True, verbose_name="Publicado em"
),
),
migrations.AlterField(
model_name="gazetteevent",
name="secretariat",
field=models.CharField(
blank=True, max_length=100, null=True, verbose_name="Secretaria"
),
),
migrations.AlterField(
model_name="gazetteevent",
name="summary",
field=models.TextField(blank=True, null=True, verbose_name="Sumário"),
),
migrations.AlterField(
model_name="gazetteevent",
name="title",
field=models.CharField(
blank=True, max_length=300, null=True, verbose_name="Título"
),
),
migrations.AlterField(
model_name="gazetteevent",
name="updated_at",
field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
),
]
| 36.350352 | 88 | 0.539885 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("datasets", "0010_auto_20200515_0959"),
]
operations = [
migrations.AlterField(
model_name="citycouncilagenda",
name="crawled_at",
field=models.DateTimeField(verbose_name="Coletado em"),
),
migrations.AlterField(
model_name="citycouncilagenda",
name="crawled_from",
field=models.URLField(verbose_name="Fonte"),
),
migrations.AlterField(
model_name="citycouncilagenda",
name="created_at",
field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"),
),
migrations.AlterField(
model_name="citycouncilagenda",
name="date",
field=models.DateField(verbose_name="Data"),
),
migrations.AlterField(
model_name="citycouncilagenda",
name="details",
field=models.TextField(blank=True, null=True, verbose_name="Detalhes"),
),
migrations.AlterField(
model_name="citycouncilagenda",
name="event_type",
field=models.CharField(
choices=[
("sessao_ordinaria", "Sessão Ordinária"),
("ordem_do_dia", "Ordem do Dia"),
("sessao_solene", "Sessão Solene"),
("sessao_especial", "Sessão Especial"),
("audiencia_publica", "Audiência Pública"),
],
max_length=20,
verbose_name="Tipo do evento",
),
),
migrations.AlterField(
model_name="citycouncilagenda",
name="notes",
field=models.TextField(blank=True, null=True, verbose_name="Anotações"),
),
migrations.AlterField(
model_name="citycouncilagenda",
name="title",
field=models.CharField(
blank=True, max_length=100, null=True, verbose_name="Título"
),
),
migrations.AlterField(
model_name="citycouncilagenda",
name="updated_at",
field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
),
migrations.AlterField(
model_name="citycouncilattendancelist",
name="council_member",
field=models.CharField(max_length=200, verbose_name="Vereador"),
),
migrations.AlterField(
model_name="citycouncilattendancelist",
name="crawled_at",
field=models.DateTimeField(verbose_name="Coletado em"),
),
migrations.AlterField(
model_name="citycouncilattendancelist",
name="crawled_from",
field=models.URLField(verbose_name="Fonte"),
),
migrations.AlterField(
model_name="citycouncilattendancelist",
name="created_at",
field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"),
),
migrations.AlterField(
model_name="citycouncilattendancelist",
name="date",
field=models.DateField(verbose_name="Data"),
),
migrations.AlterField(
model_name="citycouncilattendancelist",
name="description",
field=models.CharField(
blank=True, max_length=200, null=True, verbose_name="Descrição"
),
),
migrations.AlterField(
model_name="citycouncilattendancelist",
name="notes",
field=models.TextField(blank=True, null=True, verbose_name="Anotações"),
),
migrations.AlterField(
model_name="citycouncilattendancelist",
name="status",
field=models.CharField(
choices=[
("presente", "Presente"),
("falta_justificada", "Falta Justificada"),
("licenca_justificada", "Licença Justificada"),
("ausente", "Ausente"),
],
max_length=20,
verbose_name="Situação",
),
),
migrations.AlterField(
model_name="citycouncilattendancelist",
name="updated_at",
field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
),
migrations.AlterField(
model_name="citycouncilcontract",
name="crawled_at",
field=models.DateTimeField(verbose_name="Coletado em"),
),
migrations.AlterField(
model_name="citycouncilcontract",
name="crawled_from",
field=models.URLField(verbose_name="Fonte"),
),
migrations.AlterField(
model_name="citycouncilcontract",
name="created_at",
field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"),
),
migrations.AlterField(
model_name="citycouncilcontract",
name="excluded",
field=models.BooleanField(default=False, verbose_name="Excluído?"),
),
migrations.AlterField(
model_name="citycouncilcontract",
name="notes",
field=models.TextField(blank=True, null=True, verbose_name="Anotações"),
),
migrations.AlterField(
model_name="citycouncilcontract",
name="updated_at",
field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="budget_unit",
field=models.PositiveIntegerField(
default=101, verbose_name="Unidade orçamentária"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="company_or_person",
field=models.TextField(
blank=True, null=True, verbose_name="Empresa ou pessoa"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="crawled_at",
field=models.DateTimeField(verbose_name="Coletado em"),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="crawled_from",
field=models.URLField(verbose_name="Fonte"),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="created_at",
field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="date",
field=models.DateField(verbose_name="Data"),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="document",
field=models.CharField(
blank=True, max_length=50, null=True, verbose_name="CNPJ ou CPF"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="excluded",
field=models.BooleanField(default=False, verbose_name="Excluído?"),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="external_file_code",
field=models.CharField(
blank=True,
max_length=50,
null=True,
verbose_name="Código do arquivo (externo)",
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="external_file_line",
field=models.CharField(
blank=True,
max_length=50,
null=True,
verbose_name="Linha do arquivo (externo)",
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="function",
field=models.CharField(
blank=True, max_length=50, null=True, verbose_name="Função"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="group",
field=models.CharField(
blank=True, max_length=100, null=True, verbose_name="Grupo"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="legal_status",
field=models.CharField(
blank=True, max_length=200, null=True, verbose_name="Natureza"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="modality",
field=models.CharField(
blank=True,
choices=[
("convenio", "Convênio"),
("tomada_de_precos", "Tomada de Preço"),
("pregao", "Pregão"),
("inexigibilidade", "Inexigibilidade"),
("convite", "Convite"),
("concorrencia", "Concorrência"),
("dispensa", "Dispensa"),
("isento", "Isento"),
],
max_length=50,
null=True,
verbose_name="Modalidade",
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="notes",
field=models.TextField(blank=True, null=True, verbose_name="Anotações"),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="number",
field=models.CharField(
blank=True, max_length=50, null=True, verbose_name="Número"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="phase",
field=models.CharField(
choices=[
("empenho", "Empenho"),
("liquidacao", "Liquidação"),
("pagamento", "Pagamento"),
],
max_length=20,
verbose_name="Fase",
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="phase_code",
field=models.CharField(
blank=True, max_length=20, null=True, verbose_name="Código da fase"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="process_number",
field=models.CharField(
blank=True, max_length=50, null=True, verbose_name="Número do processo"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="published_at",
field=models.DateField(blank=True, null=True, verbose_name="Publicado em"),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="resource",
field=models.CharField(
blank=True, max_length=200, null=True, verbose_name="Fonte"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="subfunction",
field=models.CharField(
blank=True, max_length=50, null=True, verbose_name="Subfunção"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="subgroup",
field=models.CharField(
blank=True, max_length=100, null=True, verbose_name="Subgrupos"
),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="summary",
field=models.TextField(blank=True, null=True, verbose_name="Descrição"),
),
migrations.AlterField(
model_name="citycouncilexpense",
name="updated_at",
field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
),
migrations.AlterField(
model_name="citycouncilminute",
name="crawled_at",
field=models.DateTimeField(verbose_name="Coletado em"),
),
migrations.AlterField(
model_name="citycouncilminute",
name="crawled_from",
field=models.URLField(verbose_name="Fonte"),
),
migrations.AlterField(
model_name="citycouncilminute",
name="created_at",
field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"),
),
migrations.AlterField(
model_name="citycouncilminute",
name="date",
field=models.DateField(verbose_name="Data"),
),
migrations.AlterField(
model_name="citycouncilminute",
name="event_type",
field=models.CharField(
choices=[
("sessao_ordinaria", "Sessão Ordinária"),
("ordem_do_dia", "Ordem do Dia"),
("sessao_solene", "Sessão Solene"),
("sessao_especial", "Sessão Especial"),
("audiencia_publica", "Audiência Pública"),
],
max_length=20,
verbose_name="Tipo de evento",
),
),
migrations.AlterField(
model_name="citycouncilminute",
name="file_content",
field=models.TextField(
blank=True, null=True, verbose_name="Conteúdo do arquivo"
),
),
migrations.AlterField(
model_name="citycouncilminute",
name="file_url",
field=models.URLField(blank=True, null=True, verbose_name="Endereço (URL)"),
),
migrations.AlterField(
model_name="citycouncilminute",
name="notes",
field=models.TextField(blank=True, null=True, verbose_name="Anotações"),
),
migrations.AlterField(
model_name="citycouncilminute",
name="title",
field=models.CharField(
blank=True, max_length=300, null=True, verbose_name="Título"
),
),
migrations.AlterField(
model_name="citycouncilminute",
name="updated_at",
field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
),
migrations.AlterField(
model_name="cityhallbid",
name="crawled_at",
field=models.DateTimeField(verbose_name="Coletado em"),
),
migrations.AlterField(
model_name="cityhallbid",
name="crawled_from",
field=models.URLField(verbose_name="Fonte"),
),
migrations.AlterField(
model_name="cityhallbid",
name="created_at",
field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"),
),
migrations.AlterField(
model_name="cityhallbid",
name="notes",
field=models.TextField(blank=True, null=True, verbose_name="Anotações"),
),
migrations.AlterField(
model_name="cityhallbid",
name="updated_at",
field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
),
migrations.AlterField(
model_name="cityhallbidevent",
name="crawled_at",
field=models.DateTimeField(verbose_name="Coletado em"),
),
migrations.AlterField(
model_name="cityhallbidevent",
name="crawled_from",
field=models.URLField(verbose_name="Fonte"),
),
migrations.AlterField(
model_name="cityhallbidevent",
name="created_at",
field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"),
),
migrations.AlterField(
model_name="cityhallbidevent",
name="notes",
field=models.TextField(blank=True, null=True, verbose_name="Anotações"),
),
migrations.AlterField(
model_name="cityhallbidevent",
name="updated_at",
field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
),
migrations.AlterField(
model_name="gazette",
name="crawled_at",
field=models.DateTimeField(verbose_name="Coletado em"),
),
migrations.AlterField(
model_name="gazette",
name="crawled_from",
field=models.URLField(verbose_name="Fonte"),
),
migrations.AlterField(
model_name="gazette",
name="created_at",
field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"),
),
migrations.AlterField(
model_name="gazette",
name="date",
field=models.DateField(null=True, verbose_name="Data"),
),
migrations.AlterField(
model_name="gazette",
name="file_content",
field=models.TextField(
blank=True, null=True, verbose_name="Conteúdo do arquivo"
),
),
migrations.AlterField(
model_name="gazette",
name="file_url",
field=models.URLField(blank=True, null=True, verbose_name="Endereço (URL)"),
),
migrations.AlterField(
model_name="gazette",
name="is_legacy",
field=models.BooleanField(default=False, verbose_name="É do site antigo?"),
),
migrations.AlterField(
model_name="gazette",
name="notes",
field=models.TextField(blank=True, null=True, verbose_name="Anotações"),
),
migrations.AlterField(
model_name="gazette",
name="power",
field=models.CharField(
choices=[
("executivo", "Poder Executivo"),
("legislativo", "Poder Legislativo"),
],
max_length=25,
verbose_name="Poder",
),
),
migrations.AlterField(
model_name="gazette",
name="updated_at",
field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
),
migrations.AlterField(
model_name="gazette",
name="year_and_edition",
field=models.CharField(max_length=100, verbose_name="Ano e edição"),
),
migrations.AlterField(
model_name="gazetteevent",
name="crawled_at",
field=models.DateTimeField(verbose_name="Coletado em"),
),
migrations.AlterField(
model_name="gazetteevent",
name="crawled_from",
field=models.URLField(verbose_name="Fonte"),
),
migrations.AlterField(
model_name="gazetteevent",
name="created_at",
field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"),
),
migrations.AlterField(
model_name="gazetteevent",
name="notes",
field=models.TextField(blank=True, null=True, verbose_name="Anotações"),
),
migrations.AlterField(
model_name="gazetteevent",
name="published_on",
field=models.CharField(
blank=True, max_length=100, null=True, verbose_name="Publicado em"
),
),
migrations.AlterField(
model_name="gazetteevent",
name="secretariat",
field=models.CharField(
blank=True, max_length=100, null=True, verbose_name="Secretaria"
),
),
migrations.AlterField(
model_name="gazetteevent",
name="summary",
field=models.TextField(blank=True, null=True, verbose_name="Sumário"),
),
migrations.AlterField(
model_name="gazetteevent",
name="title",
field=models.CharField(
blank=True, max_length=300, null=True, verbose_name="Título"
),
),
migrations.AlterField(
model_name="gazetteevent",
name="updated_at",
field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
),
]
| true | true |
f73051c3feff91789b9c7364b801010e5c39b0e2 | 149 | py | Python | 13-python-integration/1-python-introduction/hello.py | tehilabk/cpp-5781 | 736ed05dddb2a7270bbcdbb04a3ffb4b9046e358 | [
"MIT"
] | 14 | 2021-01-30T16:36:18.000Z | 2022-03-30T17:24:44.000Z | 13-python-integration/1-python-introduction/hello.py | dimastar2310/cpp-5781 | 615ba07e0841522df74384f380172557f5e305a7 | [
"MIT"
] | null | null | null | 13-python-integration/1-python-introduction/hello.py | dimastar2310/cpp-5781 | 615ba07e0841522df74384f380172557f5e305a7 | [
"MIT"
] | 23 | 2020-03-12T13:21:29.000Z | 2021-02-22T21:29:48.000Z | #!python3
# single-line string:
print("hello")
# multi-line strings:
print("""
w
o
r
l
d
"""
)
print("abc"*3)
a = [1,2,3,4]
print(a*3)
| 7.45 | 21 | 0.536913 |
print("hello")
print("""
w
o
r
l
d
"""
)
print("abc"*3)
a = [1,2,3,4]
print(a*3)
| true | true |
f7305317714c65d2ae52341ae5236b5e05b703e3 | 703 | py | Python | main.py | benayas1/FastAPI-demo | 9f242acf499a4f896ac7102d0c301c90d11acb2e | [
"MIT"
] | null | null | null | main.py | benayas1/FastAPI-demo | 9f242acf499a4f896ac7102d0c301c90d11acb2e | [
"MIT"
] | null | null | null | main.py | benayas1/FastAPI-demo | 9f242acf499a4f896ac7102d0c301c90d11acb2e | [
"MIT"
] | null | null | null | from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from src.build_model import convert, predict
app = FastAPI()
# pydantic models
class StockIn(BaseModel):
ticker: str
class StockOut(StockIn):
forecast: dict
# routes
@app.get("/ping")
async def pong():
return {"ping": "pong!"}
@app.post("/predict", response_model=StockOut, status_code=200)
def get_prediction(payload: StockIn):
ticker = payload.ticker
prediction_list = predict(ticker)
if not prediction_list:
raise HTTPException(status_code=400, detail="Model not found.")
response_object = {"ticker": ticker, "forecast": convert(prediction_list)}
return response_object | 18.5 | 78 | 0.721195 | from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from src.build_model import convert, predict
app = FastAPI()
class StockIn(BaseModel):
ticker: str
class StockOut(StockIn):
forecast: dict
@app.get("/ping")
async def pong():
return {"ping": "pong!"}
@app.post("/predict", response_model=StockOut, status_code=200)
def get_prediction(payload: StockIn):
ticker = payload.ticker
prediction_list = predict(ticker)
if not prediction_list:
raise HTTPException(status_code=400, detail="Model not found.")
response_object = {"ticker": ticker, "forecast": convert(prediction_list)}
return response_object | true | true |
f730535cc3c8c3715aaee00884ba41ed22be7ce5 | 8,550 | py | Python | cinder/volume/drivers/open_e/jovian_common/rest_proxy.py | shubhamdang/cinder | 03a8ca07d5710771c597fd92de50103313ec7f76 | [
"Apache-2.0"
] | 3 | 2016-08-23T20:37:47.000Z | 2016-10-12T11:16:35.000Z | cinder/volume/drivers/open_e/jovian_common/rest_proxy.py | shubhamdang/cinder | 03a8ca07d5710771c597fd92de50103313ec7f76 | [
"Apache-2.0"
] | null | null | null | cinder/volume/drivers/open_e/jovian_common/rest_proxy.py | shubhamdang/cinder | 03a8ca07d5710771c597fd92de50103313ec7f76 | [
"Apache-2.0"
] | 1 | 2019-09-25T11:15:01.000Z | 2019-09-25T11:15:01.000Z | # Copyright (c) 2020 Open-E, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Network connection handling class for JovianDSS driver."""
import json
import time
from oslo_log import log as logging
from oslo_utils import netutils as o_netutils
import requests
import urllib3
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.open_e.jovian_common import exception as jexc
LOG = logging.getLogger(__name__)
class JovianRESTProxy(object):
"""Jovian REST API proxy."""
def __init__(self, config):
""":param config: config is like dict."""
self.proto = 'http'
if config.get('driver_use_ssl', True):
self.proto = 'https'
self.hosts = config.safe_get('san_hosts')
self.port = str(config.get('san_api_port', 82))
self.active_host = 0
for host in self.hosts:
if o_netutils.is_valid_ip(host) is False:
err_msg = ('Invalid value of jovian_host property: '
'%(addr)s, IP address expected.' %
{'addr': host})
LOG.debug(err_msg)
raise exception.InvalidConfigurationValue(err_msg)
self.api_path = "/api/v3"
self.delay = config.get('jovian_recovery_delay', 40)
self.pool = config.safe_get('jovian_pool')
self.user = config.get('san_login', 'admin')
self.password = config.get('san_password', 'admin')
self.auth = requests.auth.HTTPBasicAuth(self.user, self.password)
self.verify = False
self.retry_n = config.get('jovian_rest_send_repeats', 3)
self.header = {'connection': 'keep-alive',
'Content-Type': 'application/json',
'authorization': 'Basic '}
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def _get_pool_url(self, host):
url = ('%(proto)s://%(host)s:%(port)s/api/v3/pools/%(pool)s' % {
'proto': self.proto,
'host': host,
'port': self.port,
'pool': self.pool})
return url
def _get_url(self, host):
url = ('%(proto)s://%(host)s:%(port)s/api/v3' % {
'proto': self.proto,
'host': host,
'port': self.port})
return url
def request(self, request_method, req, json_data=None):
"""Send request to the specific url.
:param request_method: GET, POST, DELETE
:param url: where to send
:param json_data: data
"""
for j in range(self.retry_n):
for i in range(len(self.hosts)):
host = self.hosts[self.active_host]
url = self._get_url(host) + req
LOG.debug(
"sending request of type %(type)s to %(url)s "
"attempt: %(num)s.",
{'type': request_method,
'url': url,
'num': j})
if json_data is not None:
LOG.debug(
"sending data: %s.", json_data)
try:
ret = self._request_routine(url, request_method, json_data)
if len(ret) == 0:
self.active_host = ((self.active_host + 1)
% len(self.hosts))
continue
return ret
except requests.ConnectionError as err:
LOG.debug("Connection error %s", err)
self.active_host = (self.active_host + 1) % len(self.hosts)
continue
time.sleep(self.delay)
msg = (_('%(times) faild in a row') % {'times': j})
raise jexc.JDSSRESTProxyException(host=url, reason=msg)
def pool_request(self, request_method, req, json_data=None):
"""Send request to the specific url.
:param request_method: GET, POST, DELETE
:param url: where to send
:param json_data: data
"""
url = ""
for j in range(self.retry_n):
for i in range(len(self.hosts)):
host = self.hosts[self.active_host]
url = self._get_pool_url(host) + req
LOG.debug(
"sending pool request of type %(type)s to %(url)s "
"attempt: %(num)s.",
{'type': request_method,
'url': url,
'num': j})
if json_data is not None:
LOG.debug(
"JovianDSS: Sending data: %s.", str(json_data))
try:
ret = self._request_routine(url, request_method, json_data)
if len(ret) == 0:
self.active_host = ((self.active_host + 1)
% len(self.hosts))
continue
return ret
except requests.ConnectionError as err:
LOG.debug("Connection error %s", err)
self.active_host = (self.active_host + 1) % len(self.hosts)
continue
time.sleep(int(self.delay))
msg = (_('%(times) faild in a row') % {'times': j})
raise jexc.JDSSRESTProxyException(host=url, reason=msg)
def _request_routine(self, url, request_method, json_data=None):
"""Make an HTTPS request and return the results."""
ret = None
for i in range(3):
ret = dict()
try:
response_obj = requests.request(request_method,
auth=self.auth,
url=url,
headers=self.header,
data=json.dumps(json_data),
verify=self.verify)
LOG.debug('response code: %s', response_obj.status_code)
LOG.debug('response data: %s', response_obj.text)
ret['code'] = response_obj.status_code
if '{' in response_obj.text and '}' in response_obj.text:
if "error" in response_obj.text:
ret["error"] = json.loads(response_obj.text)["error"]
else:
ret["error"] = None
if "data" in response_obj.text:
ret["data"] = json.loads(response_obj.text)["data"]
else:
ret["data"] = None
if ret["code"] == 500:
if ret["error"] is not None:
if (("errno" in ret["error"]) and
("class" in ret["error"])):
if (ret["error"]["class"] ==
"opene.tools.scstadmin.ScstAdminError"):
LOG.debug("ScstAdminError %(code)d %(msg)s", {
"code": ret["error"]["errno"],
"msg": ret["error"]["message"]})
continue
if (ret["error"]["class"] ==
"exceptions.OSError"):
LOG.debug("OSError %(code)d %(msg)s", {
"code": ret["error"]["errno"],
"msg": ret["error"]["message"]})
continue
break
except requests.HTTPError as err:
LOG.debug("HTTP parsing error %s", err)
self.active_host = (self.active_host + 1) % len(self.hosts)
return ret
def get_active_host(self):
"""Return address of currently used host."""
return self.hosts[self.active_host]
| 37.665198 | 79 | 0.491579 |
import json
import time
from oslo_log import log as logging
from oslo_utils import netutils as o_netutils
import requests
import urllib3
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.open_e.jovian_common import exception as jexc
LOG = logging.getLogger(__name__)
class JovianRESTProxy(object):
def __init__(self, config):
self.proto = 'http'
if config.get('driver_use_ssl', True):
self.proto = 'https'
self.hosts = config.safe_get('san_hosts')
self.port = str(config.get('san_api_port', 82))
self.active_host = 0
for host in self.hosts:
if o_netutils.is_valid_ip(host) is False:
err_msg = ('Invalid value of jovian_host property: '
'%(addr)s, IP address expected.' %
{'addr': host})
LOG.debug(err_msg)
raise exception.InvalidConfigurationValue(err_msg)
self.api_path = "/api/v3"
self.delay = config.get('jovian_recovery_delay', 40)
self.pool = config.safe_get('jovian_pool')
self.user = config.get('san_login', 'admin')
self.password = config.get('san_password', 'admin')
self.auth = requests.auth.HTTPBasicAuth(self.user, self.password)
self.verify = False
self.retry_n = config.get('jovian_rest_send_repeats', 3)
self.header = {'connection': 'keep-alive',
'Content-Type': 'application/json',
'authorization': 'Basic '}
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def _get_pool_url(self, host):
url = ('%(proto)s://%(host)s:%(port)s/api/v3/pools/%(pool)s' % {
'proto': self.proto,
'host': host,
'port': self.port,
'pool': self.pool})
return url
def _get_url(self, host):
url = ('%(proto)s://%(host)s:%(port)s/api/v3' % {
'proto': self.proto,
'host': host,
'port': self.port})
return url
def request(self, request_method, req, json_data=None):
for j in range(self.retry_n):
for i in range(len(self.hosts)):
host = self.hosts[self.active_host]
url = self._get_url(host) + req
LOG.debug(
"sending request of type %(type)s to %(url)s "
"attempt: %(num)s.",
{'type': request_method,
'url': url,
'num': j})
if json_data is not None:
LOG.debug(
"sending data: %s.", json_data)
try:
ret = self._request_routine(url, request_method, json_data)
if len(ret) == 0:
self.active_host = ((self.active_host + 1)
% len(self.hosts))
continue
return ret
except requests.ConnectionError as err:
LOG.debug("Connection error %s", err)
self.active_host = (self.active_host + 1) % len(self.hosts)
continue
time.sleep(self.delay)
msg = (_('%(times) faild in a row') % {'times': j})
raise jexc.JDSSRESTProxyException(host=url, reason=msg)
def pool_request(self, request_method, req, json_data=None):
url = ""
for j in range(self.retry_n):
for i in range(len(self.hosts)):
host = self.hosts[self.active_host]
url = self._get_pool_url(host) + req
LOG.debug(
"sending pool request of type %(type)s to %(url)s "
"attempt: %(num)s.",
{'type': request_method,
'url': url,
'num': j})
if json_data is not None:
LOG.debug(
"JovianDSS: Sending data: %s.", str(json_data))
try:
ret = self._request_routine(url, request_method, json_data)
if len(ret) == 0:
self.active_host = ((self.active_host + 1)
% len(self.hosts))
continue
return ret
except requests.ConnectionError as err:
LOG.debug("Connection error %s", err)
self.active_host = (self.active_host + 1) % len(self.hosts)
continue
time.sleep(int(self.delay))
msg = (_('%(times) faild in a row') % {'times': j})
raise jexc.JDSSRESTProxyException(host=url, reason=msg)
def _request_routine(self, url, request_method, json_data=None):
ret = None
for i in range(3):
ret = dict()
try:
response_obj = requests.request(request_method,
auth=self.auth,
url=url,
headers=self.header,
data=json.dumps(json_data),
verify=self.verify)
LOG.debug('response code: %s', response_obj.status_code)
LOG.debug('response data: %s', response_obj.text)
ret['code'] = response_obj.status_code
if '{' in response_obj.text and '}' in response_obj.text:
if "error" in response_obj.text:
ret["error"] = json.loads(response_obj.text)["error"]
else:
ret["error"] = None
if "data" in response_obj.text:
ret["data"] = json.loads(response_obj.text)["data"]
else:
ret["data"] = None
if ret["code"] == 500:
if ret["error"] is not None:
if (("errno" in ret["error"]) and
("class" in ret["error"])):
if (ret["error"]["class"] ==
"opene.tools.scstadmin.ScstAdminError"):
LOG.debug("ScstAdminError %(code)d %(msg)s", {
"code": ret["error"]["errno"],
"msg": ret["error"]["message"]})
continue
if (ret["error"]["class"] ==
"exceptions.OSError"):
LOG.debug("OSError %(code)d %(msg)s", {
"code": ret["error"]["errno"],
"msg": ret["error"]["message"]})
continue
break
except requests.HTTPError as err:
LOG.debug("HTTP parsing error %s", err)
self.active_host = (self.active_host + 1) % len(self.hosts)
return ret
def get_active_host(self):
return self.hosts[self.active_host]
| true | true |
f73053fd0a165b1b4054dc4e1336ef1364710f9d | 2,296 | py | Python | Experiments/ST_MGCN/deprecated/didi_trial.py | TempAnonymous/Context_Analysis | bbeba1ed7ea7001c22a12721fc4f390d4cc01a6e | [
"MIT"
] | 28 | 2020-02-28T03:16:43.000Z | 2022-03-31T07:24:47.000Z | Experiments/ST_MGCN/deprecated/didi_trial.py | TempAnonymous/Context_Analysis | bbeba1ed7ea7001c22a12721fc4f390d4cc01a6e | [
"MIT"
] | 8 | 2020-06-30T09:34:56.000Z | 2022-01-17T12:20:28.000Z | Experiments/ST_MGCN/deprecated/didi_trial.py | TempAnonymous/Context_Analysis | bbeba1ed7ea7001c22a12721fc4f390d4cc01a6e | [
"MIT"
] | 13 | 2020-06-04T09:47:36.000Z | 2022-02-25T09:50:52.000Z | import os
import warnings
warnings.filterwarnings("ignore")
shared_params_st_mgcn = ('python ST_MGCN_Obj.py '
'--Dataset DiDi '
'--CT 6 '
'--PT 7 '
'--TT 4 '
'--LSTMUnits 64 '
'--LSTMLayers 3 '
'--DataRange All '
'--TrainDays All '
'--TC 0.65 '
'--TD 7500 '
'--TI 30 '
'--Epoch 10000 '
'--Train True '
'--lr 1e-4 '
'--patience 0.1 '
'--ESlength 100 '
'--BatchSize 16 '
'--MergeWay sum '
'--Device 1 ')
if __name__ == "__main__":
"""
Multiple Graphs
"""
# Chengdu
os.system(shared_params_st_mgcn + ' --City Chengdu --K 1 --L 1 '
' --Graph Distance-Correlation-Interaction --MergeIndex 1')
os.system(shared_params_st_mgcn + ' --City Chengdu --K 1 --L 1 '
' --Graph Distance-Correlation-Interaction --MergeIndex 3')
os.system(shared_params_st_mgcn + ' --City Chengdu --K 1 --L 1 '
' --Graph Distance-Correlation-Interaction --MergeIndex 6')
os.system(shared_params_st_mgcn + ' --City Chengdu --K 1 --L 1 '
' --Graph Distance-Correlation-Interaction --MergeIndex 12')
# Xian
os.system(shared_params_st_mgcn + ' --City Xian --K 1 --L 1 '
' --Graph Distance-Correlation-Interaction --MergeIndex 1')
os.system(shared_params_st_mgcn + ' --City Xian --K 1 --L 1 '
' --Graph Distance-Correlation-Interaction --MergeIndex 3')
os.system(shared_params_st_mgcn + ' --City Xian --K 1 --L 1 '
' --Graph Distance-Correlation-Interaction --MergeIndex 6')
os.system(shared_params_st_mgcn + ' --City Xian --K 1 --L 1 '
' --Graph Distance-Correlation-Interaction --MergeIndex 12')
| 45.019608 | 98 | 0.434669 | import os
import warnings
warnings.filterwarnings("ignore")
shared_params_st_mgcn = ('python ST_MGCN_Obj.py '
'--Dataset DiDi '
'--CT 6 '
'--PT 7 '
'--TT 4 '
'--LSTMUnits 64 '
'--LSTMLayers 3 '
'--DataRange All '
'--TrainDays All '
'--TC 0.65 '
'--TD 7500 '
'--TI 30 '
'--Epoch 10000 '
'--Train True '
'--lr 1e-4 '
'--patience 0.1 '
'--ESlength 100 '
'--BatchSize 16 '
'--MergeWay sum '
'--Device 1 ')
if __name__ == "__main__":
os.system(shared_params_st_mgcn + ' --City Chengdu --K 1 --L 1 '
' --Graph Distance-Correlation-Interaction --MergeIndex 1')
os.system(shared_params_st_mgcn + ' --City Chengdu --K 1 --L 1 '
' --Graph Distance-Correlation-Interaction --MergeIndex 3')
os.system(shared_params_st_mgcn + ' --City Chengdu --K 1 --L 1 '
' --Graph Distance-Correlation-Interaction --MergeIndex 6')
os.system(shared_params_st_mgcn + ' --City Chengdu --K 1 --L 1 '
' --Graph Distance-Correlation-Interaction --MergeIndex 12')
os.system(shared_params_st_mgcn + ' --City Xian --K 1 --L 1 '
' --Graph Distance-Correlation-Interaction --MergeIndex 1')
os.system(shared_params_st_mgcn + ' --City Xian --K 1 --L 1 '
' --Graph Distance-Correlation-Interaction --MergeIndex 3')
os.system(shared_params_st_mgcn + ' --City Xian --K 1 --L 1 '
' --Graph Distance-Correlation-Interaction --MergeIndex 6')
os.system(shared_params_st_mgcn + ' --City Xian --K 1 --L 1 '
' --Graph Distance-Correlation-Interaction --MergeIndex 12')
| true | true |
f73054605f8bd8b1ec1a6f2b7c0863270a33ba11 | 1,022 | py | Python | setup.py | TaruniSurampally/testpatrolev | 7c7fe1cad2967e0be84ca74b9a200ae2fde356db | [
"Apache-2.0"
] | null | null | null | setup.py | TaruniSurampally/testpatrolev | 7c7fe1cad2967e0be84ca74b9a200ae2fde356db | [
"Apache-2.0"
] | null | null | null | setup.py | TaruniSurampally/testpatrolev | 7c7fe1cad2967e0be84ca74b9a200ae2fde356db | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 ATT Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr>=1.8'],
pbr=True)
| 32.967742 | 69 | 0.749511 |
import setuptools
import multiprocessing
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr>=1.8'],
pbr=True)
| true | true |
f73055f4dfa87a425c2bda83e8e320add863b887 | 490 | py | Python | backend/src/myCU_App/migrations/0001_initial.py | citz73/myCUProject | afad36d6cf072e44d4707860496a023053d34789 | [
"MIT"
] | 1 | 2020-03-15T04:27:30.000Z | 2020-03-15T04:27:30.000Z | backend/src/myCU_App/migrations/0001_initial.py | citz73/myCUSide_Project | afad36d6cf072e44d4707860496a023053d34789 | [
"MIT"
] | null | null | null | backend/src/myCU_App/migrations/0001_initial.py | citz73/myCUSide_Project | afad36d6cf072e44d4707860496a023053d34789 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.7 on 2020-03-01 01:09
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MyModelTest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('col', models.CharField(max_length=100)),
],
),
]
| 22.272727 | 114 | 0.577551 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MyModelTest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('col', models.CharField(max_length=100)),
],
),
]
| true | true |
f73056d77fb654514f04a7ff33b8c84ed4722dee | 1,732 | py | Python | data_postprocessing_10.py | KokBob/InitProject | 63b7cefb9a130118db9ff5405c5dd87bbe34e9f3 | [
"BSD-2-Clause"
] | null | null | null | data_postprocessing_10.py | KokBob/InitProject | 63b7cefb9a130118db9ff5405c5dd87bbe34e9f3 | [
"BSD-2-Clause"
] | null | null | null | data_postprocessing_10.py | KokBob/InitProject | 63b7cefb9a130118db9ff5405c5dd87bbe34e9f3 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
20181010
ciklaminima
"""
import numpy as np
import matplotlib.pyplot as plt
import os
import pandas as pd
import _dataPostprLib_ as lib
import seaborn as sns
import importlib
#%%
sns.set()
#sns.set_context("poster")
sns.set_context("paper")
#sns.color_palette("Paired")
seq_col_brew = sns.color_palette('hls', 12)
sns.set_palette(seq_col_brew)
plt.close('all')
path_glob = r'U:\projects\0005_Moventas_RCA\40_measurement'
test_bench_name = ['Data_test_run_63526_PPH-5700', 'Data_test_run_63527_PPH-5700']
#%%
path_test_bench_i = path_glob + '\\' + test_bench_name[0]
path_meas = os.listdir(path_test_bench_i)
#%%
i = 0
lc_repos = []
for lc in path_meas:
#load_collection = path_meas[0]
load_collection = lc
#load_collection = path_meas[-1]
path_mea_i = path_test_bench_i + '\\' + load_collection
meas_i = os.listdir(path_mea_i)
data_repos = []
for mf in meas_i:
h_,r_,freq_,name_ = lib.catch_mea(mf)
mea_file = path_mea_i + '\\' + mf
data_i = pd.read_csv(mea_file,sep=';',header=3, skiprows = [4])
t_i = lib.time_vector(freq_,data_i)
mea_dict = {'data': data_i,
't': t_i,
'name': name_,
'load': load_collection}
data_repos.append(mea_dict)
# lib.plot_Torque_Temp_pls1(data_repos)
# lib.plot_Torque_Temp_pls2(data_repos)
lib.plot_Torque_Temp_pls(data_repos)
lc_repos.append(data_repos)
# data_repos_actual = data_repos[i]
#%%
# lib.plot_Torque_Temp_pls1(data_repos)
# lib.plot_Torque_Temp_pls2(data_repos)
# lib.plot_Torque_Temp_pls(data_repos)
# i += 1 | 28.866667 | 83 | 0.646074 |
import numpy as np
import matplotlib.pyplot as plt
import os
import pandas as pd
import _dataPostprLib_ as lib
import seaborn as sns
import importlib
sns.set()
sns.set_context("paper")
seq_col_brew = sns.color_palette('hls', 12)
sns.set_palette(seq_col_brew)
plt.close('all')
path_glob = r'U:\projects\0005_Moventas_RCA\40_measurement'
test_bench_name = ['Data_test_run_63526_PPH-5700', 'Data_test_run_63527_PPH-5700']
path_test_bench_i = path_glob + '\\' + test_bench_name[0]
path_meas = os.listdir(path_test_bench_i)
i = 0
lc_repos = []
for lc in path_meas:
load_collection = lc
path_mea_i = path_test_bench_i + '\\' + load_collection
meas_i = os.listdir(path_mea_i)
data_repos = []
for mf in meas_i:
h_,r_,freq_,name_ = lib.catch_mea(mf)
mea_file = path_mea_i + '\\' + mf
data_i = pd.read_csv(mea_file,sep=';',header=3, skiprows = [4])
t_i = lib.time_vector(freq_,data_i)
mea_dict = {'data': data_i,
't': t_i,
'name': name_,
'load': load_collection}
data_repos.append(mea_dict)
lib.plot_Torque_Temp_pls(data_repos)
lc_repos.append(data_repos)
| true | true |
f73057eeee7582782ed1d122e1834de489b95405 | 698 | py | Python | github_stats/cli.py | mrlesmithjr/python-github-stats | 5e9237b8caf372b1b0cb791e593f8209f1d19204 | [
"MIT"
] | null | null | null | github_stats/cli.py | mrlesmithjr/python-github-stats | 5e9237b8caf372b1b0cb791e593f8209f1d19204 | [
"MIT"
] | 6 | 2021-04-23T12:45:04.000Z | 2021-08-08T01:07:41.000Z | github_stats/cli.py | mrlesmithjr/python-github-stats | 5e9237b8caf372b1b0cb791e593f8209f1d19204 | [
"MIT"
] | null | null | null | """Console script for python-github-stats."""
import argparse
import os
def cli_args():
"""Parse CLI arguments."""
parser = argparse.ArgumentParser(description="Manage GitHub via API.")
parser.add_argument(
"action", help="Define action to take.", choices=["user-attrs", "user-repos"]
)
parser.add_argument(
"--netrcfile",
help="Path to Netrc file",
default=os.path.join(os.path.expanduser("~"), ".netrc"),
)
parser.add_argument("--token", help="Your GitHub API private token.")
parser.add_argument(
"--url", help="GitHub API url", default="https://api.github.com"
)
args = parser.parse_args()
return args
| 24.068966 | 85 | 0.627507 |
import argparse
import os
def cli_args():
parser = argparse.ArgumentParser(description="Manage GitHub via API.")
parser.add_argument(
"action", help="Define action to take.", choices=["user-attrs", "user-repos"]
)
parser.add_argument(
"--netrcfile",
help="Path to Netrc file",
default=os.path.join(os.path.expanduser("~"), ".netrc"),
)
parser.add_argument("--token", help="Your GitHub API private token.")
parser.add_argument(
"--url", help="GitHub API url", default="https://api.github.com"
)
args = parser.parse_args()
return args
| true | true |
f730580ba95c7eaadd448112b44a1b8b774609d7 | 1,012 | py | Python | application.py | WengChaoxi/flask-mvc | 250a0c5811745f674b45e39262bc81c92756ce0c | [
"MIT"
] | 1 | 2020-10-25T15:15:01.000Z | 2020-10-25T15:15:01.000Z | application.py | WengChaoxi/flask-mvc | 250a0c5811745f674b45e39262bc81c92756ce0c | [
"MIT"
] | null | null | null | application.py | WengChaoxi/flask-mvc | 250a0c5811745f674b45e39262bc81c92756ce0c | [
"MIT"
] | null | null | null | # coding: utf-8
from flask import Flask as FlaskBase
from flask_sqlalchemy import SQLAlchemy
from flask_apscheduler import APScheduler
import os
from jobs.tasks.timer import SchedulerConfig
from common.libs.utils import correctPath
db = SQLAlchemy()
scheduler = APScheduler() # 定时任务
class Flask(FlaskBase):
def __init__(self, import_name, static_folder, template_folder, root_path):
super(Flask, self).__init__(import_name, static_folder=static_folder, template_folder=template_folder, root_path=root_path)
self.config.from_pyfile(correctPath('config/config.py'))
db.init_app(self)
self.config.from_object(SchedulerConfig())
scheduler.init_app(self)
scheduler.start()
static_path = correctPath('web/static')
templates_path = correctPath('web/templates')
app = Flask(__name__, static_folder=static_path, template_folder=templates_path, root_path=os.getcwd())
# 解决跨域问题
# from flask_cors import CORS
# CORS(app, supports_credentials = True)
| 33.733333 | 131 | 0.759881 |
from flask import Flask as FlaskBase
from flask_sqlalchemy import SQLAlchemy
from flask_apscheduler import APScheduler
import os
from jobs.tasks.timer import SchedulerConfig
from common.libs.utils import correctPath
db = SQLAlchemy()
scheduler = APScheduler()
class Flask(FlaskBase):
def __init__(self, import_name, static_folder, template_folder, root_path):
super(Flask, self).__init__(import_name, static_folder=static_folder, template_folder=template_folder, root_path=root_path)
self.config.from_pyfile(correctPath('config/config.py'))
db.init_app(self)
self.config.from_object(SchedulerConfig())
scheduler.init_app(self)
scheduler.start()
static_path = correctPath('web/static')
templates_path = correctPath('web/templates')
app = Flask(__name__, static_folder=static_path, template_folder=templates_path, root_path=os.getcwd())
| true | true |
f73058b87fae3279f8d290c4759bd446c990a5a0 | 437 | py | Python | app/model/tables.py | fabiomvieira/users-flask | 3e57c673b51e2f1cbb6f32a70ddd2c90cd86c1cb | [
"MIT"
] | null | null | null | app/model/tables.py | fabiomvieira/users-flask | 3e57c673b51e2f1cbb6f32a70ddd2c90cd86c1cb | [
"MIT"
] | null | null | null | app/model/tables.py | fabiomvieira/users-flask | 3e57c673b51e2f1cbb6f32a70ddd2c90cd86c1cb | [
"MIT"
] | null | null | null | from app import app, db
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, autoincrement = True, primary_key = True)
name = db.Column(db.String(100))
age = db.Column(db.Integer)
email = db.Column(db.String(30))
phone = db.Column(db.Integer)
def __init__(self, name, age, email, phone):
self.name = name
self.age = age
self.email= email
self.phone = phone | 29.133333 | 72 | 0.622426 | from app import app, db
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, autoincrement = True, primary_key = True)
name = db.Column(db.String(100))
age = db.Column(db.Integer)
email = db.Column(db.String(30))
phone = db.Column(db.Integer)
def __init__(self, name, age, email, phone):
self.name = name
self.age = age
self.email= email
self.phone = phone | true | true |
f7305921f05bca4dc553d0619e616ffdbebb8450 | 7,712 | py | Python | Prototype/template.py | TalaatHarb/PredictOceanHealth | 998717a8b6eab59dbedd7b9ffdd86a05b03ec079 | [
"MIT"
] | null | null | null | Prototype/template.py | TalaatHarb/PredictOceanHealth | 998717a8b6eab59dbedd7b9ffdd86a05b03ec079 | [
"MIT"
] | null | null | null | Prototype/template.py | TalaatHarb/PredictOceanHealth | 998717a8b6eab59dbedd7b9ffdd86a05b03ec079 | [
"MIT"
] | null | null | null | #Import libraries for doing image analysis
from skimage.io import imread
from skimage.transform import resize
from sklearn.ensemble import RandomForestClassifier as RF
import glob
import os
from sklearn import cross_validation
from sklearn.cross_validation import StratifiedKFold as KFold
from sklearn.metrics import classification_report
from matplotlib import pyplot as plt
from matplotlib import colors
from pylab import cm
from skimage import segmentation
from skimage.morphology import watershed
from skimage import measure
from skimage import morphology
import numpy as np
import pandas as pd
from scipy import ndimage
from skimage.feature import peak_local_max
# make graphics inline
#%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
def getLargestRegion(props, labelmap, imagethres):
regionmaxprop = None
for regionprop in props:
# check to see if the region is at least 50% nonzero
if sum(imagethres[labelmap == regionprop.label])*1.0/regionprop.area < 0.50:
continue
if regionmaxprop is None:
regionmaxprop = regionprop
if regionmaxprop.filled_area < regionprop.filled_area:
regionmaxprop = regionprop
return regionmaxprop
def getMinorMajorRatio(image):
image = image.copy()
# Create the thresholded image to eliminate some of the background
imagethr = np.where(image > np.mean(image),0.,1.0)
#Dilate the image
imdilated = morphology.dilation(imagethr, np.ones((4,4)))
# Create the label list
label_list = measure.label(imdilated)
label_list = imagethr*label_list
label_list = label_list.astype(int)
region_list = measure.regionprops(label_list)
maxregion = getLargestRegion(region_list, label_list, imagethr)
# guard against cases where the segmentation fails by providing zeros
ratio = 0.0
if ((not maxregion is None) and (maxregion.major_axis_length != 0.0)):
ratio = 0.0 if maxregion is None else maxregion.minor_axis_length*1.0 / maxregion.major_axis_length
return ratio
def multiclass_log_loss(y_true, y_pred, eps=1e-15):
"""Multi class version of Logarithmic Loss metric.
https://www.kaggle.com/wiki/MultiClassLogLoss
Parameters
----------
y_true : array, shape = [n_samples]
true class, intergers in [0, n_classes - 1)
y_pred : array, shape = [n_samples, n_classes]
Returns
-------
loss : float
"""
predictions = np.clip(y_pred, eps, 1 - eps)
# normalize row sums to 1
predictions /= predictions.sum(axis=1)[:, np.newaxis]
actual = np.zeros(y_pred.shape)
n_samples = actual.shape[0]
actual[np.arange(n_samples), y_true.astype(int)] = 1
vectsum = np.sum(actual * np.log(predictions))
loss = -1.0 / n_samples * vectsum
return loss
# get the classnames from the directory structure
competition_data = 'H:\\Kaggle\\Predict_ocean_health\\'
directory_names = list(set(glob.glob(os.path.join(competition_data,"train", "*"))\
).difference(set(glob.glob(os.path.join(competition_data,"train","*.*")))))
# Rescale the images and create the combined metrics and training labels
#get the total training images
numberofImages = 0
for folder in directory_names:
for fileNameDir in os.walk(folder):
for fileName in fileNameDir[2]:
# Only read in the images
if fileName[-4:] != ".jpg":
continue
numberofImages += 1
# We'll rescale the images to be 25x25
maxPixel = 25
imageSize = maxPixel * maxPixel
num_rows = numberofImages # one row for each image in the training dataset
num_add_features = 1 # Number of additional features
num_features = imageSize + num_add_features
# X is the feature vector with one row of features per image
# consisting of the pixel values and our metric
X = np.zeros((num_rows, num_features), dtype=float)
# y is the numeric class label
y = np.zeros((num_rows))
files = []
# Generate training data
i = 0
label = 0
# List of string of class names
namesClasses = list()
print "Reading images"
# Navigate through the list of directories
for folder in directory_names:
# Append the string class name for each class
currentClass = folder.split(os.pathsep)[-1]
namesClasses.append(currentClass)
for fileNameDir in os.walk(folder):
for fileName in fileNameDir[2]:
# Only read in the images
if fileName[-4:] != ".jpg":
continue
# Read in the images and create the features
nameFileImage = "{0}{1}{2}".format(fileNameDir[0], os.sep, fileName)
image = imread(nameFileImage, as_grey=True)
files.append(nameFileImage)
axisratio = getMinorMajorRatio(image)
image = resize(image, (maxPixel, maxPixel))
# Store the rescaled image pixels and the axis ratio
X[i, 0:imageSize] = np.reshape(image, (1, imageSize))
X[i, imageSize] = axisratio
# Store the classlabel
y[i] = label
i += 1
# report progress for each 5% done
report = [int((j+1)*num_rows/20.) for j in range(20)]
if i in report: print np.ceil(i *100.0 / num_rows), "% done"
label += 1
# Loop through the classes two at a time and compare their distributions of the Width/Length Ratio
#Create a DataFrame object to make subsetting the data on the class
df = pd.DataFrame({"class": y[:], "ratio": X[:, num_features-1]})
f = plt.figure(figsize=(30, 20))
#we suppress zeros and choose a few large classes to better highlight the distributions.
df = df.loc[df["ratio"] > 0]
minimumSize = 20
counts = df["class"].value_counts()
largeclasses = [int(x) for x in list(counts.loc[counts > minimumSize].index)]
# Loop through 40 of the classes
for j in range(0,40,2):
subfig = plt.subplot(4, 5, j/2 +1)
# Plot the normalized histograms for two classes
classind1 = largeclasses[j]
classind2 = largeclasses[j+1]
n, bins,p = plt.hist(df.loc[df["class"] == classind1]["ratio"].values,\
alpha=0.5, bins=[x*0.01 for x in range(100)], \
label=namesClasses[classind1].split(os.sep)[-1], normed=1)
n2, bins,p = plt.hist(df.loc[df["class"] == (classind2)]["ratio"].values,\
alpha=0.5, bins=bins, label=namesClasses[classind2].split(os.sep)[-1],normed=1)
subfig.set_ylim([0.,10.])
plt.legend(loc='upper right')
plt.xlabel("Width/Length Ratio")
print "Training"
# n_estimators is the number of decision trees
# max_features also known as m_try is set to the default value of the square root of the number of features
clf = RF(n_estimators=100, n_jobs=3);
scores = cross_validation.cross_val_score(clf, X, y, cv=5, n_jobs=1);
print "Accuracy of all classes"
print np.mean(scores)
kf = KFold(y, n_folds=5)
y_pred = y * 0
for train, test in kf:
X_train, X_test, y_train, y_test = X[train,:], X[test,:], y[train], y[test]
clf = RF(n_estimators=100, n_jobs=3)
clf.fit(X_train, y_train)
y_pred[test] = clf.predict(X_test)
print classification_report(y, y_pred, target_names=namesClasses)
# Get the probability predictions for computing the log-loss function
kf = KFold(y, n_folds=5)
# prediction probabilities number of samples, by number of classes
y_pred = np.zeros((len(y),len(set(y))))
for train, test in kf:
X_train, X_test, y_train, y_test = X[train,:], X[test,:], y[train], y[test]
clf = RF(n_estimators=100, n_jobs=3)
clf.fit(X_train, y_train)
y_pred[test] = clf.predict_proba(X_test)
loss = multiclass_log_loss(y, y_pred)
print loss | 36.549763 | 108 | 0.678683 |
from skimage.io import imread
from skimage.transform import resize
from sklearn.ensemble import RandomForestClassifier as RF
import glob
import os
from sklearn import cross_validation
from sklearn.cross_validation import StratifiedKFold as KFold
from sklearn.metrics import classification_report
from matplotlib import pyplot as plt
from matplotlib import colors
from pylab import cm
from skimage import segmentation
from skimage.morphology import watershed
from skimage import measure
from skimage import morphology
import numpy as np
import pandas as pd
from scipy import ndimage
from skimage.feature import peak_local_max
import warnings
warnings.filterwarnings("ignore")
def getLargestRegion(props, labelmap, imagethres):
regionmaxprop = None
for regionprop in props:
if sum(imagethres[labelmap == regionprop.label])*1.0/regionprop.area < 0.50:
continue
if regionmaxprop is None:
regionmaxprop = regionprop
if regionmaxprop.filled_area < regionprop.filled_area:
regionmaxprop = regionprop
return regionmaxprop
def getMinorMajorRatio(image):
image = image.copy()
imagethr = np.where(image > np.mean(image),0.,1.0)
imdilated = morphology.dilation(imagethr, np.ones((4,4)))
label_list = measure.label(imdilated)
label_list = imagethr*label_list
label_list = label_list.astype(int)
region_list = measure.regionprops(label_list)
maxregion = getLargestRegion(region_list, label_list, imagethr)
ratio = 0.0
if ((not maxregion is None) and (maxregion.major_axis_length != 0.0)):
ratio = 0.0 if maxregion is None else maxregion.minor_axis_length*1.0 / maxregion.major_axis_length
return ratio
def multiclass_log_loss(y_true, y_pred, eps=1e-15):
"""Multi class version of Logarithmic Loss metric.
https://www.kaggle.com/wiki/MultiClassLogLoss
Parameters
----------
y_true : array, shape = [n_samples]
true class, intergers in [0, n_classes - 1)
y_pred : array, shape = [n_samples, n_classes]
Returns
-------
loss : float
"""
predictions = np.clip(y_pred, eps, 1 - eps)
predictions /= predictions.sum(axis=1)[:, np.newaxis]
actual = np.zeros(y_pred.shape)
n_samples = actual.shape[0]
actual[np.arange(n_samples), y_true.astype(int)] = 1
vectsum = np.sum(actual * np.log(predictions))
loss = -1.0 / n_samples * vectsum
return loss
competition_data = 'H:\\Kaggle\\Predict_ocean_health\\'
directory_names = list(set(glob.glob(os.path.join(competition_data,"train", "*"))\
).difference(set(glob.glob(os.path.join(competition_data,"train","*.*")))))
numberofImages = 0
for folder in directory_names:
for fileNameDir in os.walk(folder):
for fileName in fileNameDir[2]:
if fileName[-4:] != ".jpg":
continue
numberofImages += 1
maxPixel = 25
imageSize = maxPixel * maxPixel
num_rows = numberofImages # one row for each image in the training dataset
num_add_features = 1 # Number of additional features
num_features = imageSize + num_add_features
# X is the feature vector with one row of features per image
# consisting of the pixel values and our metric
X = np.zeros((num_rows, num_features), dtype=float)
# y is the numeric class label
y = np.zeros((num_rows))
files = []
# Generate training data
i = 0
label = 0
# List of string of class names
namesClasses = list()
print "Reading images"
# Navigate through the list of directories
for folder in directory_names:
# Append the string class name for each class
currentClass = folder.split(os.pathsep)[-1]
namesClasses.append(currentClass)
for fileNameDir in os.walk(folder):
for fileName in fileNameDir[2]:
# Only read in the images
if fileName[-4:] != ".jpg":
continue
# Read in the images and create the features
nameFileImage = "{0}{1}{2}".format(fileNameDir[0], os.sep, fileName)
image = imread(nameFileImage, as_grey=True)
files.append(nameFileImage)
axisratio = getMinorMajorRatio(image)
image = resize(image, (maxPixel, maxPixel))
# Store the rescaled image pixels and the axis ratio
X[i, 0:imageSize] = np.reshape(image, (1, imageSize))
X[i, imageSize] = axisratio
# Store the classlabel
y[i] = label
i += 1
# report progress for each 5% done
report = [int((j+1)*num_rows/20.) for j in range(20)]
if i in report: print np.ceil(i *100.0 / num_rows), "% done"
label += 1
# Loop through the classes two at a time and compare their distributions of the Width/Length Ratio
#Create a DataFrame object to make subsetting the data on the class
df = pd.DataFrame({"class": y[:], "ratio": X[:, num_features-1]})
f = plt.figure(figsize=(30, 20))
#we suppress zeros and choose a few large classes to better highlight the distributions.
df = df.loc[df["ratio"] > 0]
minimumSize = 20
counts = df["class"].value_counts()
largeclasses = [int(x) for x in list(counts.loc[counts > minimumSize].index)]
# Loop through 40 of the classes
for j in range(0,40,2):
subfig = plt.subplot(4, 5, j/2 +1)
# Plot the normalized histograms for two classes
classind1 = largeclasses[j]
classind2 = largeclasses[j+1]
n, bins,p = plt.hist(df.loc[df["class"] == classind1]["ratio"].values,\
alpha=0.5, bins=[x*0.01 for x in range(100)], \
label=namesClasses[classind1].split(os.sep)[-1], normed=1)
n2, bins,p = plt.hist(df.loc[df["class"] == (classind2)]["ratio"].values,\
alpha=0.5, bins=bins, label=namesClasses[classind2].split(os.sep)[-1],normed=1)
subfig.set_ylim([0.,10.])
plt.legend(loc='upper right')
plt.xlabel("Width/Length Ratio")
print "Training"
# n_estimators is the number of decision trees
# max_features also known as m_try is set to the default value of the square root of the number of features
clf = RF(n_estimators=100, n_jobs=3);
scores = cross_validation.cross_val_score(clf, X, y, cv=5, n_jobs=1);
print "Accuracy of all classes"
print np.mean(scores)
kf = KFold(y, n_folds=5)
y_pred = y * 0
for train, test in kf:
X_train, X_test, y_train, y_test = X[train,:], X[test,:], y[train], y[test]
clf = RF(n_estimators=100, n_jobs=3)
clf.fit(X_train, y_train)
y_pred[test] = clf.predict(X_test)
print classification_report(y, y_pred, target_names=namesClasses)
# Get the probability predictions for computing the log-loss function
kf = KFold(y, n_folds=5)
# prediction probabilities number of samples, by number of classes
y_pred = np.zeros((len(y),len(set(y))))
for train, test in kf:
X_train, X_test, y_train, y_test = X[train,:], X[test,:], y[train], y[test]
clf = RF(n_estimators=100, n_jobs=3)
clf.fit(X_train, y_train)
y_pred[test] = clf.predict_proba(X_test)
loss = multiclass_log_loss(y, y_pred)
print loss | false | true |
f7305a0dbb2a8aae8064a95ec3fa52386dab4833 | 37,699 | py | Python | website/venv/lib/python2.7/site-packages/psutil/tests/test_misc.py | wenhuiyang/ARgot | 3fd1eacca6f81a3157649dda95ab427ca1f5efe1 | [
"MIT"
] | 2 | 2017-11-24T12:44:30.000Z | 2020-04-11T17:28:43.000Z | psutil/tests/test_misc.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 6 | 2017-11-10T19:45:18.000Z | 2017-11-12T14:50:42.000Z | psutil/tests/test_misc.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Miscellaneous tests.
"""
import ast
import collections
import contextlib
import errno
import json
import os
import pickle
import socket
import stat
import sys
from psutil import LINUX
from psutil import POSIX
from psutil import WINDOWS
from psutil._common import memoize
from psutil._common import memoize_when_activated
from psutil._common import supports_ipv6
from psutil._common import wrap_numbers
from psutil._compat import PY3
from psutil.tests import APPVEYOR
from psutil.tests import bind_socket
from psutil.tests import bind_unix_socket
from psutil.tests import call_until
from psutil.tests import chdir
from psutil.tests import create_proc_children_pair
from psutil.tests import create_sockets
from psutil.tests import create_zombie_proc
from psutil.tests import DEVNULL
from psutil.tests import get_free_port
from psutil.tests import get_test_subprocess
from psutil.tests import HAS_BATTERY
from psutil.tests import HAS_CONNECTIONS_UNIX
from psutil.tests import HAS_MEMORY_FULL_INFO
from psutil.tests import HAS_MEMORY_MAPS
from psutil.tests import HAS_SENSORS_BATTERY
from psutil.tests import HAS_SENSORS_FANS
from psutil.tests import HAS_SENSORS_TEMPERATURES
from psutil.tests import import_module_by_path
from psutil.tests import is_namedtuple
from psutil.tests import mock
from psutil.tests import reap_children
from psutil.tests import reload_module
from psutil.tests import retry
from psutil.tests import ROOT_DIR
from psutil.tests import run_test_module_by_name
from psutil.tests import safe_rmpath
from psutil.tests import SCRIPTS_DIR
from psutil.tests import sh
from psutil.tests import tcp_socketpair
from psutil.tests import TESTFN
from psutil.tests import TOX
from psutil.tests import TRAVIS
from psutil.tests import unittest
from psutil.tests import unix_socket_path
from psutil.tests import unix_socketpair
from psutil.tests import wait_for_file
from psutil.tests import wait_for_pid
import psutil
import psutil.tests
# ===================================================================
# --- Misc / generic tests.
# ===================================================================
class TestMisc(unittest.TestCase):
def test_process__repr__(self, func=repr):
p = psutil.Process()
r = func(p)
self.assertIn("psutil.Process", r)
self.assertIn("pid=%s" % p.pid, r)
self.assertIn("name=", r)
self.assertIn(p.name(), r)
with mock.patch.object(psutil.Process, "name",
side_effect=psutil.ZombieProcess(os.getpid())):
p = psutil.Process()
r = func(p)
self.assertIn("pid=%s" % p.pid, r)
self.assertIn("zombie", r)
self.assertNotIn("name=", r)
with mock.patch.object(psutil.Process, "name",
side_effect=psutil.NoSuchProcess(os.getpid())):
p = psutil.Process()
r = func(p)
self.assertIn("pid=%s" % p.pid, r)
self.assertIn("terminated", r)
self.assertNotIn("name=", r)
with mock.patch.object(psutil.Process, "name",
side_effect=psutil.AccessDenied(os.getpid())):
p = psutil.Process()
r = func(p)
self.assertIn("pid=%s" % p.pid, r)
self.assertNotIn("name=", r)
def test_process__str__(self):
self.test_process__repr__(func=str)
def test_no_such_process__repr__(self, func=repr):
self.assertEqual(
repr(psutil.NoSuchProcess(321)),
"psutil.NoSuchProcess process no longer exists (pid=321)")
self.assertEqual(
repr(psutil.NoSuchProcess(321, name='foo')),
"psutil.NoSuchProcess process no longer exists (pid=321, "
"name='foo')")
self.assertEqual(
repr(psutil.NoSuchProcess(321, msg='foo')),
"psutil.NoSuchProcess foo")
def test_zombie_process__repr__(self, func=repr):
self.assertEqual(
repr(psutil.ZombieProcess(321)),
"psutil.ZombieProcess process still exists but it's a zombie "
"(pid=321)")
self.assertEqual(
repr(psutil.ZombieProcess(321, name='foo')),
"psutil.ZombieProcess process still exists but it's a zombie "
"(pid=321, name='foo')")
self.assertEqual(
repr(psutil.ZombieProcess(321, name='foo', ppid=1)),
"psutil.ZombieProcess process still exists but it's a zombie "
"(pid=321, name='foo', ppid=1)")
self.assertEqual(
repr(psutil.ZombieProcess(321, msg='foo')),
"psutil.ZombieProcess foo")
def test_access_denied__repr__(self, func=repr):
self.assertEqual(
repr(psutil.AccessDenied(321)),
"psutil.AccessDenied (pid=321)")
self.assertEqual(
repr(psutil.AccessDenied(321, name='foo')),
"psutil.AccessDenied (pid=321, name='foo')")
self.assertEqual(
repr(psutil.AccessDenied(321, msg='foo')),
"psutil.AccessDenied foo")
def test_timeout_expired__repr__(self, func=repr):
self.assertEqual(
repr(psutil.TimeoutExpired(321)),
"psutil.TimeoutExpired timeout after 321 seconds")
self.assertEqual(
repr(psutil.TimeoutExpired(321, pid=111)),
"psutil.TimeoutExpired timeout after 321 seconds (pid=111)")
self.assertEqual(
repr(psutil.TimeoutExpired(321, pid=111, name='foo')),
"psutil.TimeoutExpired timeout after 321 seconds "
"(pid=111, name='foo')")
def test_process__eq__(self):
p1 = psutil.Process()
p2 = psutil.Process()
self.assertEqual(p1, p2)
p2._ident = (0, 0)
self.assertNotEqual(p1, p2)
self.assertNotEqual(p1, 'foo')
def test_process__hash__(self):
s = set([psutil.Process(), psutil.Process()])
self.assertEqual(len(s), 1)
def test__all__(self):
dir_psutil = dir(psutil)
for name in dir_psutil:
if name in ('callable', 'error', 'namedtuple', 'tests',
'long', 'test', 'NUM_CPUS', 'BOOT_TIME',
'TOTAL_PHYMEM'):
continue
if not name.startswith('_'):
try:
__import__(name)
except ImportError:
if name not in psutil.__all__:
fun = getattr(psutil, name)
if fun is None:
continue
if (fun.__doc__ is not None and
'deprecated' not in fun.__doc__.lower()):
self.fail('%r not in psutil.__all__' % name)
# Import 'star' will break if __all__ is inconsistent, see:
# https://github.com/giampaolo/psutil/issues/656
# Can't do `from psutil import *` as it won't work on python 3
# so we simply iterate over __all__.
for name in psutil.__all__:
self.assertIn(name, dir_psutil)
def test_version(self):
self.assertEqual('.'.join([str(x) for x in psutil.version_info]),
psutil.__version__)
def test_process_as_dict_no_new_names(self):
# See https://github.com/giampaolo/psutil/issues/813
p = psutil.Process()
p.foo = '1'
self.assertNotIn('foo', p.as_dict())
def test_memoize(self):
@memoize
def foo(*args, **kwargs):
"foo docstring"
calls.append(None)
return (args, kwargs)
calls = []
# no args
for x in range(2):
ret = foo()
expected = ((), {})
self.assertEqual(ret, expected)
self.assertEqual(len(calls), 1)
# with args
for x in range(2):
ret = foo(1)
expected = ((1, ), {})
self.assertEqual(ret, expected)
self.assertEqual(len(calls), 2)
# with args + kwargs
for x in range(2):
ret = foo(1, bar=2)
expected = ((1, ), {'bar': 2})
self.assertEqual(ret, expected)
self.assertEqual(len(calls), 3)
# clear cache
foo.cache_clear()
ret = foo()
expected = ((), {})
self.assertEqual(ret, expected)
self.assertEqual(len(calls), 4)
# docstring
self.assertEqual(foo.__doc__, "foo docstring")
def test_memoize_when_activated(self):
class Foo:
@memoize_when_activated
def foo(self):
calls.append(None)
f = Foo()
calls = []
f.foo()
f.foo()
self.assertEqual(len(calls), 2)
# activate
calls = []
f.foo.cache_activate()
f.foo()
f.foo()
self.assertEqual(len(calls), 1)
# deactivate
calls = []
f.foo.cache_deactivate()
f.foo()
f.foo()
self.assertEqual(len(calls), 2)
def test_parse_environ_block(self):
from psutil._common import parse_environ_block
def k(s):
return s.upper() if WINDOWS else s
self.assertEqual(parse_environ_block("a=1\0"),
{k("a"): "1"})
self.assertEqual(parse_environ_block("a=1\0b=2\0\0"),
{k("a"): "1", k("b"): "2"})
self.assertEqual(parse_environ_block("a=1\0b=\0\0"),
{k("a"): "1", k("b"): ""})
# ignore everything after \0\0
self.assertEqual(parse_environ_block("a=1\0b=2\0\0c=3\0"),
{k("a"): "1", k("b"): "2"})
# ignore everything that is not an assignment
self.assertEqual(parse_environ_block("xxx\0a=1\0"), {k("a"): "1"})
self.assertEqual(parse_environ_block("a=1\0=b=2\0"), {k("a"): "1"})
# do not fail if the block is incomplete
self.assertEqual(parse_environ_block("a=1\0b=2"), {k("a"): "1"})
def test_supports_ipv6(self):
self.addCleanup(supports_ipv6.cache_clear)
if supports_ipv6():
with mock.patch('psutil._common.socket') as s:
s.has_ipv6 = False
supports_ipv6.cache_clear()
assert not supports_ipv6()
supports_ipv6.cache_clear()
with mock.patch('psutil._common.socket.socket',
side_effect=socket.error) as s:
assert not supports_ipv6()
assert s.called
supports_ipv6.cache_clear()
with mock.patch('psutil._common.socket.socket',
side_effect=socket.gaierror) as s:
assert not supports_ipv6()
supports_ipv6.cache_clear()
assert s.called
supports_ipv6.cache_clear()
with mock.patch('psutil._common.socket.socket.bind',
side_effect=socket.gaierror) as s:
assert not supports_ipv6()
supports_ipv6.cache_clear()
assert s.called
else:
with self.assertRaises(Exception):
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(("::1", 0))
def test_isfile_strict(self):
from psutil._common import isfile_strict
this_file = os.path.abspath(__file__)
assert isfile_strict(this_file)
assert not isfile_strict(os.path.dirname(this_file))
with mock.patch('psutil._common.os.stat',
side_effect=OSError(errno.EPERM, "foo")):
self.assertRaises(OSError, isfile_strict, this_file)
with mock.patch('psutil._common.os.stat',
side_effect=OSError(errno.EACCES, "foo")):
self.assertRaises(OSError, isfile_strict, this_file)
with mock.patch('psutil._common.os.stat',
side_effect=OSError(errno.EINVAL, "foo")):
assert not isfile_strict(this_file)
with mock.patch('psutil._common.stat.S_ISREG', return_value=False):
assert not isfile_strict(this_file)
def test_serialization(self):
def check(ret):
if json is not None:
json.loads(json.dumps(ret))
a = pickle.dumps(ret)
b = pickle.loads(a)
self.assertEqual(ret, b)
check(psutil.Process().as_dict())
check(psutil.virtual_memory())
check(psutil.swap_memory())
check(psutil.cpu_times())
check(psutil.cpu_times_percent(interval=0))
check(psutil.net_io_counters())
if LINUX and not os.path.exists('/proc/diskstats'):
pass
else:
if not APPVEYOR:
check(psutil.disk_io_counters())
check(psutil.disk_partitions())
check(psutil.disk_usage(os.getcwd()))
check(psutil.users())
def test_setup_script(self):
setup_py = os.path.join(ROOT_DIR, 'setup.py')
module = import_module_by_path(setup_py)
self.assertRaises(SystemExit, module.setup)
self.assertEqual(module.get_version(), psutil.__version__)
def test_ad_on_process_creation(self):
# We are supposed to be able to instantiate Process also in case
# of zombie processes or access denied.
with mock.patch.object(psutil.Process, 'create_time',
side_effect=psutil.AccessDenied) as meth:
psutil.Process()
assert meth.called
with mock.patch.object(psutil.Process, 'create_time',
side_effect=psutil.ZombieProcess(1)) as meth:
psutil.Process()
assert meth.called
with mock.patch.object(psutil.Process, 'create_time',
side_effect=ValueError) as meth:
with self.assertRaises(ValueError):
psutil.Process()
assert meth.called
def test_sanity_version_check(self):
# see: https://github.com/giampaolo/psutil/issues/564
with mock.patch(
"psutil._psplatform.cext.version", return_value="0.0.0"):
with self.assertRaises(ImportError) as cm:
reload_module(psutil)
self.assertIn("version conflict", str(cm.exception).lower())
# ===================================================================
# --- Tests for wrap_numbers() function.
# ===================================================================
nt = collections.namedtuple('foo', 'a b c')
class TestWrapNumbers(unittest.TestCase):
def setUp(self):
wrap_numbers.cache_clear()
tearDown = setUp
def test_first_call(self):
input = {'disk1': nt(5, 5, 5)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
def test_input_hasnt_changed(self):
input = {'disk1': nt(5, 5, 5)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
def test_increase_but_no_wrap(self):
input = {'disk1': nt(5, 5, 5)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
input = {'disk1': nt(10, 15, 20)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
input = {'disk1': nt(20, 25, 30)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
input = {'disk1': nt(20, 25, 30)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
def test_wrap(self):
# let's say 100 is the threshold
input = {'disk1': nt(100, 100, 100)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
# first wrap restarts from 10
input = {'disk1': nt(100, 100, 10)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(100, 100, 110)})
# then it remains the same
input = {'disk1': nt(100, 100, 10)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(100, 100, 110)})
# then it goes up
input = {'disk1': nt(100, 100, 90)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(100, 100, 190)})
# then it wraps again
input = {'disk1': nt(100, 100, 20)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(100, 100, 210)})
# and remains the same
input = {'disk1': nt(100, 100, 20)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(100, 100, 210)})
# now wrap another num
input = {'disk1': nt(50, 100, 20)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(150, 100, 210)})
# and again
input = {'disk1': nt(40, 100, 20)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(190, 100, 210)})
# keep it the same
input = {'disk1': nt(40, 100, 20)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(190, 100, 210)})
def test_changing_keys(self):
# Emulate a case where the second call to disk_io()
# (or whatever) provides a new disk, then the new disk
# disappears on the third call.
input = {'disk1': nt(5, 5, 5)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
input = {'disk1': nt(5, 5, 5),
'disk2': nt(7, 7, 7)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
input = {'disk1': nt(8, 8, 8)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
def test_changing_keys_w_wrap(self):
input = {'disk1': nt(50, 50, 50),
'disk2': nt(100, 100, 100)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
# disk 2 wraps
input = {'disk1': nt(50, 50, 50),
'disk2': nt(100, 100, 10)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(50, 50, 50),
'disk2': nt(100, 100, 110)})
# disk 2 disappears
input = {'disk1': nt(50, 50, 50)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
# then it appears again; the old wrap is supposed to be
# gone.
input = {'disk1': nt(50, 50, 50),
'disk2': nt(100, 100, 100)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
# remains the same
input = {'disk1': nt(50, 50, 50),
'disk2': nt(100, 100, 100)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
# and then wraps again
input = {'disk1': nt(50, 50, 50),
'disk2': nt(100, 100, 10)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(50, 50, 50),
'disk2': nt(100, 100, 110)})
def test_real_data(self):
d = {'nvme0n1': (300, 508, 640, 1571, 5970, 1987, 2049, 451751, 47048),
'nvme0n1p1': (1171, 2, 5600256, 1024, 516, 0, 0, 0, 8),
'nvme0n1p2': (54, 54, 2396160, 5165056, 4, 24, 30, 1207, 28),
'nvme0n1p3': (2389, 4539, 5154, 150, 4828, 1844, 2019, 398, 348)}
self.assertEqual(wrap_numbers(d, 'disk_io'), d)
self.assertEqual(wrap_numbers(d, 'disk_io'), d)
# decrease this ↓
d = {'nvme0n1': (100, 508, 640, 1571, 5970, 1987, 2049, 451751, 47048),
'nvme0n1p1': (1171, 2, 5600256, 1024, 516, 0, 0, 0, 8),
'nvme0n1p2': (54, 54, 2396160, 5165056, 4, 24, 30, 1207, 28),
'nvme0n1p3': (2389, 4539, 5154, 150, 4828, 1844, 2019, 398, 348)}
out = wrap_numbers(d, 'disk_io')
self.assertEqual(out['nvme0n1'][0], 400)
# --- cache tests
def test_cache_first_call(self):
input = {'disk1': nt(5, 5, 5)}
wrap_numbers(input, 'disk_io')
cache = wrap_numbers.cache_info()
self.assertEqual(cache[0], {'disk_io': input})
self.assertEqual(cache[1], {'disk_io': {}})
self.assertEqual(cache[2], {'disk_io': {}})
def test_cache_call_twice(self):
input = {'disk1': nt(5, 5, 5)}
wrap_numbers(input, 'disk_io')
input = {'disk1': nt(10, 10, 10)}
wrap_numbers(input, 'disk_io')
cache = wrap_numbers.cache_info()
self.assertEqual(cache[0], {'disk_io': input})
self.assertEqual(
cache[1],
{'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 0}})
self.assertEqual(cache[2], {'disk_io': {}})
def test_cache_wrap(self):
# let's say 100 is the threshold
input = {'disk1': nt(100, 100, 100)}
wrap_numbers(input, 'disk_io')
# first wrap restarts from 10
input = {'disk1': nt(100, 100, 10)}
wrap_numbers(input, 'disk_io')
cache = wrap_numbers.cache_info()
self.assertEqual(cache[0], {'disk_io': input})
self.assertEqual(
cache[1],
{'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 100}})
self.assertEqual(cache[2], {'disk_io': {'disk1': set([('disk1', 2)])}})
def assert_():
cache = wrap_numbers.cache_info()
self.assertEqual(
cache[1],
{'disk_io': {('disk1', 0): 0, ('disk1', 1): 0,
('disk1', 2): 100}})
self.assertEqual(cache[2],
{'disk_io': {'disk1': set([('disk1', 2)])}})
# then it remains the same
input = {'disk1': nt(100, 100, 10)}
wrap_numbers(input, 'disk_io')
cache = wrap_numbers.cache_info()
self.assertEqual(cache[0], {'disk_io': input})
assert_()
# then it goes up
input = {'disk1': nt(100, 100, 90)}
wrap_numbers(input, 'disk_io')
cache = wrap_numbers.cache_info()
self.assertEqual(cache[0], {'disk_io': input})
assert_()
# then it wraps again
input = {'disk1': nt(100, 100, 20)}
wrap_numbers(input, 'disk_io')
cache = wrap_numbers.cache_info()
self.assertEqual(cache[0], {'disk_io': input})
self.assertEqual(
cache[1],
{'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 190}})
self.assertEqual(cache[2], {'disk_io': {'disk1': set([('disk1', 2)])}})
def test_cache_changing_keys(self):
input = {'disk1': nt(5, 5, 5)}
wrap_numbers(input, 'disk_io')
input = {'disk1': nt(5, 5, 5),
'disk2': nt(7, 7, 7)}
wrap_numbers(input, 'disk_io')
cache = wrap_numbers.cache_info()
self.assertEqual(cache[0], {'disk_io': input})
self.assertEqual(
cache[1],
{'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 0}})
self.assertEqual(cache[2], {'disk_io': {}})
def test_cache_clear(self):
input = {'disk1': nt(5, 5, 5)}
wrap_numbers(input, 'disk_io')
wrap_numbers(input, 'disk_io')
wrap_numbers.cache_clear('disk_io')
self.assertEqual(wrap_numbers.cache_info(), ({}, {}, {}))
wrap_numbers.cache_clear('disk_io')
wrap_numbers.cache_clear('?!?')
@unittest.skipIf(
not psutil.disk_io_counters() or not psutil.net_io_counters(),
"no disks or NICs available")
def test_cache_clear_public_apis(self):
psutil.disk_io_counters()
psutil.net_io_counters()
caches = wrap_numbers.cache_info()
for cache in caches:
self.assertIn('psutil.disk_io_counters', cache)
self.assertIn('psutil.net_io_counters', cache)
psutil.disk_io_counters.cache_clear()
caches = wrap_numbers.cache_info()
for cache in caches:
self.assertIn('psutil.net_io_counters', cache)
self.assertNotIn('psutil.disk_io_counters', cache)
psutil.net_io_counters.cache_clear()
caches = wrap_numbers.cache_info()
self.assertEqual(caches, ({}, {}, {}))
# ===================================================================
# --- Example script tests
# ===================================================================
@unittest.skipIf(TOX, "can't test on TOX")
# See: https://travis-ci.org/giampaolo/psutil/jobs/295224806
@unittest.skipIf(TRAVIS and not
os.path.exists(os.path.join(SCRIPTS_DIR, 'free.py')),
"can't locate scripts directory")
class TestScripts(unittest.TestCase):
"""Tests for scripts in the "scripts" directory."""
@staticmethod
def assert_stdout(exe, args=None, **kwds):
exe = '"%s"' % os.path.join(SCRIPTS_DIR, exe)
if args:
exe = exe + ' ' + args
try:
out = sh(sys.executable + ' ' + exe, **kwds).strip()
except RuntimeError as err:
if 'AccessDenied' in str(err):
return str(err)
else:
raise
assert out, out
return out
@staticmethod
def assert_syntax(exe, args=None):
exe = os.path.join(SCRIPTS_DIR, exe)
if PY3:
f = open(exe, 'rt', encoding='utf8')
else:
f = open(exe, 'rt')
with f:
src = f.read()
ast.parse(src)
def test_coverage(self):
# make sure all example scripts have a test method defined
meths = dir(self)
for name in os.listdir(SCRIPTS_DIR):
if name.endswith('.py'):
if 'test_' + os.path.splitext(name)[0] not in meths:
# self.assert_stdout(name)
self.fail('no test defined for %r script'
% os.path.join(SCRIPTS_DIR, name))
@unittest.skipIf(not POSIX, "POSIX only")
def test_executable(self):
for name in os.listdir(SCRIPTS_DIR):
if name.endswith('.py'):
path = os.path.join(SCRIPTS_DIR, name)
if not stat.S_IXUSR & os.stat(path)[stat.ST_MODE]:
self.fail('%r is not executable' % path)
def test_disk_usage(self):
self.assert_stdout('disk_usage.py')
def test_free(self):
self.assert_stdout('free.py')
def test_meminfo(self):
self.assert_stdout('meminfo.py')
def test_procinfo(self):
self.assert_stdout('procinfo.py', args=str(os.getpid()))
# can't find users on APPVEYOR or TRAVIS
@unittest.skipIf(APPVEYOR or TRAVIS and not psutil.users(),
"unreliable on APPVEYOR or TRAVIS")
def test_who(self):
self.assert_stdout('who.py')
def test_ps(self):
self.assert_stdout('ps.py')
def test_pstree(self):
self.assert_stdout('pstree.py')
def test_netstat(self):
self.assert_stdout('netstat.py')
# permission denied on travis
@unittest.skipIf(TRAVIS, "unreliable on TRAVIS")
def test_ifconfig(self):
self.assert_stdout('ifconfig.py')
@unittest.skipIf(not HAS_MEMORY_MAPS, "not supported")
def test_pmap(self):
self.assert_stdout('pmap.py', args=str(os.getpid()))
@unittest.skipIf(not HAS_MEMORY_FULL_INFO, "not supported")
def test_procsmem(self):
self.assert_stdout('procsmem.py', stderr=DEVNULL)
def test_killall(self):
self.assert_syntax('killall.py')
def test_nettop(self):
self.assert_syntax('nettop.py')
def test_top(self):
self.assert_syntax('top.py')
def test_iotop(self):
self.assert_syntax('iotop.py')
def test_pidof(self):
output = self.assert_stdout('pidof.py', args=psutil.Process().name())
self.assertIn(str(os.getpid()), output)
@unittest.skipIf(not WINDOWS, "WINDOWS only")
def test_winservices(self):
self.assert_stdout('winservices.py')
def test_cpu_distribution(self):
self.assert_syntax('cpu_distribution.py')
@unittest.skipIf(not HAS_SENSORS_TEMPERATURES, "not supported")
@unittest.skipIf(TRAVIS, "unreliable on TRAVIS")
def test_temperatures(self):
self.assert_stdout('temperatures.py')
@unittest.skipIf(not HAS_SENSORS_FANS, "not supported")
@unittest.skipIf(TRAVIS, "unreliable on TRAVIS")
def test_fans(self):
self.assert_stdout('fans.py')
@unittest.skipIf(not HAS_SENSORS_BATTERY, "not supported")
@unittest.skipIf(not HAS_BATTERY, "no battery")
def test_battery(self):
self.assert_stdout('battery.py')
def test_sensors(self):
self.assert_stdout('sensors.py')
# ===================================================================
# --- Unit tests for test utilities.
# ===================================================================
class TestRetryDecorator(unittest.TestCase):
@mock.patch('time.sleep')
def test_retry_success(self, sleep):
# Fail 3 times out of 5; make sure the decorated fun returns.
@retry(retries=5, interval=1, logfun=None)
def foo():
while queue:
queue.pop()
1 / 0
return 1
queue = list(range(3))
self.assertEqual(foo(), 1)
self.assertEqual(sleep.call_count, 3)
@mock.patch('time.sleep')
def test_retry_failure(self, sleep):
# Fail 6 times out of 5; th function is supposed to raise exc.
@retry(retries=5, interval=1, logfun=None)
def foo():
while queue:
queue.pop()
1 / 0
return 1
queue = list(range(6))
self.assertRaises(ZeroDivisionError, foo)
self.assertEqual(sleep.call_count, 5)
@mock.patch('time.sleep')
def test_exception_arg(self, sleep):
@retry(exception=ValueError, interval=1)
def foo():
raise TypeError
self.assertRaises(TypeError, foo)
self.assertEqual(sleep.call_count, 0)
@mock.patch('time.sleep')
def test_no_interval_arg(self, sleep):
# if interval is not specified sleep is not supposed to be called
@retry(retries=5, interval=None, logfun=None)
def foo():
1 / 0
self.assertRaises(ZeroDivisionError, foo)
self.assertEqual(sleep.call_count, 0)
@mock.patch('time.sleep')
def test_retries_arg(self, sleep):
@retry(retries=5, interval=1, logfun=None)
def foo():
1 / 0
self.assertRaises(ZeroDivisionError, foo)
self.assertEqual(sleep.call_count, 5)
@mock.patch('time.sleep')
def test_retries_and_timeout_args(self, sleep):
self.assertRaises(ValueError, retry, retries=5, timeout=1)
class TestSyncTestUtils(unittest.TestCase):
def tearDown(self):
safe_rmpath(TESTFN)
def test_wait_for_pid(self):
wait_for_pid(os.getpid())
nopid = max(psutil.pids()) + 99999
with mock.patch('psutil.tests.retry.__iter__', return_value=iter([0])):
self.assertRaises(psutil.NoSuchProcess, wait_for_pid, nopid)
def test_wait_for_file(self):
with open(TESTFN, 'w') as f:
f.write('foo')
wait_for_file(TESTFN)
assert not os.path.exists(TESTFN)
def test_wait_for_file_empty(self):
with open(TESTFN, 'w'):
pass
wait_for_file(TESTFN, empty=True)
assert not os.path.exists(TESTFN)
def test_wait_for_file_no_file(self):
with mock.patch('psutil.tests.retry.__iter__', return_value=iter([0])):
self.assertRaises(IOError, wait_for_file, TESTFN)
def test_wait_for_file_no_delete(self):
with open(TESTFN, 'w') as f:
f.write('foo')
wait_for_file(TESTFN, delete=False)
assert os.path.exists(TESTFN)
def test_call_until(self):
ret = call_until(lambda: 1, "ret == 1")
self.assertEqual(ret, 1)
class TestFSTestUtils(unittest.TestCase):
def setUp(self):
safe_rmpath(TESTFN)
tearDown = setUp
def test_safe_rmpath(self):
# test file is removed
open(TESTFN, 'w').close()
safe_rmpath(TESTFN)
assert not os.path.exists(TESTFN)
# test no exception if path does not exist
safe_rmpath(TESTFN)
# test dir is removed
os.mkdir(TESTFN)
safe_rmpath(TESTFN)
assert not os.path.exists(TESTFN)
# test other exceptions are raised
with mock.patch('psutil.tests.os.stat',
side_effect=OSError(errno.EINVAL, "")) as m:
with self.assertRaises(OSError):
safe_rmpath(TESTFN)
assert m.called
def test_chdir(self):
base = os.getcwd()
os.mkdir(TESTFN)
with chdir(TESTFN):
self.assertEqual(os.getcwd(), os.path.join(base, TESTFN))
self.assertEqual(os.getcwd(), base)
class TestProcessUtils(unittest.TestCase):
def test_reap_children(self):
subp = get_test_subprocess()
p = psutil.Process(subp.pid)
assert p.is_running()
reap_children()
assert not p.is_running()
assert not psutil.tests._pids_started
assert not psutil.tests._subprocesses_started
def test_create_proc_children_pair(self):
p1, p2 = create_proc_children_pair()
self.assertNotEqual(p1.pid, p2.pid)
assert p1.is_running()
assert p2.is_running()
children = psutil.Process().children(recursive=True)
self.assertEqual(len(children), 2)
self.assertIn(p1, children)
self.assertIn(p2, children)
self.assertEqual(p1.ppid(), os.getpid())
self.assertEqual(p2.ppid(), p1.pid)
# make sure both of them are cleaned up
reap_children()
assert not p1.is_running()
assert not p2.is_running()
assert not psutil.tests._pids_started
assert not psutil.tests._subprocesses_started
@unittest.skipIf(not POSIX, "POSIX only")
def test_create_zombie_proc(self):
zpid = create_zombie_proc()
self.addCleanup(reap_children, recursive=True)
p = psutil.Process(zpid)
self.assertEqual(p.status(), psutil.STATUS_ZOMBIE)
class TestNetUtils(unittest.TestCase):
def bind_socket(self):
port = get_free_port()
with contextlib.closing(bind_socket(addr=('', port))) as s:
self.assertEqual(s.getsockname()[1], port)
@unittest.skipIf(not POSIX, "POSIX only")
def test_bind_unix_socket(self):
with unix_socket_path() as name:
sock = bind_unix_socket(name)
with contextlib.closing(sock):
self.assertEqual(sock.family, socket.AF_UNIX)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.getsockname(), name)
assert os.path.exists(name)
assert stat.S_ISSOCK(os.stat(name).st_mode)
# UDP
with unix_socket_path() as name:
sock = bind_unix_socket(name, type=socket.SOCK_DGRAM)
with contextlib.closing(sock):
self.assertEqual(sock.type, socket.SOCK_DGRAM)
def tcp_tcp_socketpair(self):
addr = ("127.0.0.1", get_free_port())
server, client = tcp_socketpair(socket.AF_INET, addr=addr)
with contextlib.closing(server):
with contextlib.closing(client):
# Ensure they are connected and the positions are
# correct.
self.assertEqual(server.getsockname(), addr)
self.assertEqual(client.getpeername(), addr)
self.assertNotEqual(client.getsockname(), addr)
@unittest.skipIf(not POSIX, "POSIX only")
def test_unix_socketpair(self):
p = psutil.Process()
num_fds = p.num_fds()
assert not p.connections(kind='unix')
with unix_socket_path() as name:
server, client = unix_socketpair(name)
try:
assert os.path.exists(name)
assert stat.S_ISSOCK(os.stat(name).st_mode)
self.assertEqual(p.num_fds() - num_fds, 2)
self.assertEqual(len(p.connections(kind='unix')), 2)
self.assertEqual(server.getsockname(), name)
self.assertEqual(client.getpeername(), name)
finally:
client.close()
server.close()
def test_create_sockets(self):
with create_sockets() as socks:
fams = collections.defaultdict(int)
types = collections.defaultdict(int)
for s in socks:
fams[s.family] += 1
# work around http://bugs.python.org/issue30204
types[s.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)] += 1
self.assertGreaterEqual(fams[socket.AF_INET], 2)
self.assertGreaterEqual(fams[socket.AF_INET6], 2)
if POSIX and HAS_CONNECTIONS_UNIX:
self.assertGreaterEqual(fams[socket.AF_UNIX], 2)
self.assertGreaterEqual(types[socket.SOCK_STREAM], 2)
self.assertGreaterEqual(types[socket.SOCK_DGRAM], 2)
class TestOtherUtils(unittest.TestCase):
def test_is_namedtuple(self):
assert is_namedtuple(collections.namedtuple('foo', 'a b c')(1, 2, 3))
assert not is_namedtuple(tuple())
if __name__ == '__main__':
run_test_module_by_name(__file__)
| 36.353905 | 79 | 0.577257 |
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ast
import collections
import contextlib
import errno
import json
import os
import pickle
import socket
import stat
import sys
from psutil import LINUX
from psutil import POSIX
from psutil import WINDOWS
from psutil._common import memoize
from psutil._common import memoize_when_activated
from psutil._common import supports_ipv6
from psutil._common import wrap_numbers
from psutil._compat import PY3
from psutil.tests import APPVEYOR
from psutil.tests import bind_socket
from psutil.tests import bind_unix_socket
from psutil.tests import call_until
from psutil.tests import chdir
from psutil.tests import create_proc_children_pair
from psutil.tests import create_sockets
from psutil.tests import create_zombie_proc
from psutil.tests import DEVNULL
from psutil.tests import get_free_port
from psutil.tests import get_test_subprocess
from psutil.tests import HAS_BATTERY
from psutil.tests import HAS_CONNECTIONS_UNIX
from psutil.tests import HAS_MEMORY_FULL_INFO
from psutil.tests import HAS_MEMORY_MAPS
from psutil.tests import HAS_SENSORS_BATTERY
from psutil.tests import HAS_SENSORS_FANS
from psutil.tests import HAS_SENSORS_TEMPERATURES
from psutil.tests import import_module_by_path
from psutil.tests import is_namedtuple
from psutil.tests import mock
from psutil.tests import reap_children
from psutil.tests import reload_module
from psutil.tests import retry
from psutil.tests import ROOT_DIR
from psutil.tests import run_test_module_by_name
from psutil.tests import safe_rmpath
from psutil.tests import SCRIPTS_DIR
from psutil.tests import sh
from psutil.tests import tcp_socketpair
from psutil.tests import TESTFN
from psutil.tests import TOX
from psutil.tests import TRAVIS
from psutil.tests import unittest
from psutil.tests import unix_socket_path
from psutil.tests import unix_socketpair
from psutil.tests import wait_for_file
from psutil.tests import wait_for_pid
import psutil
import psutil.tests
# ===================================================================
# --- Misc / generic tests.
# ===================================================================
class TestMisc(unittest.TestCase):
def test_process__repr__(self, func=repr):
p = psutil.Process()
r = func(p)
self.assertIn("psutil.Process", r)
self.assertIn("pid=%s" % p.pid, r)
self.assertIn("name=", r)
self.assertIn(p.name(), r)
with mock.patch.object(psutil.Process, "name",
side_effect=psutil.ZombieProcess(os.getpid())):
p = psutil.Process()
r = func(p)
self.assertIn("pid=%s" % p.pid, r)
self.assertIn("zombie", r)
self.assertNotIn("name=", r)
with mock.patch.object(psutil.Process, "name",
side_effect=psutil.NoSuchProcess(os.getpid())):
p = psutil.Process()
r = func(p)
self.assertIn("pid=%s" % p.pid, r)
self.assertIn("terminated", r)
self.assertNotIn("name=", r)
with mock.patch.object(psutil.Process, "name",
side_effect=psutil.AccessDenied(os.getpid())):
p = psutil.Process()
r = func(p)
self.assertIn("pid=%s" % p.pid, r)
self.assertNotIn("name=", r)
def test_process__str__(self):
self.test_process__repr__(func=str)
def test_no_such_process__repr__(self, func=repr):
self.assertEqual(
repr(psutil.NoSuchProcess(321)),
"psutil.NoSuchProcess process no longer exists (pid=321)")
self.assertEqual(
repr(psutil.NoSuchProcess(321, name='foo')),
"psutil.NoSuchProcess process no longer exists (pid=321, "
"name='foo')")
self.assertEqual(
repr(psutil.NoSuchProcess(321, msg='foo')),
"psutil.NoSuchProcess foo")
def test_zombie_process__repr__(self, func=repr):
self.assertEqual(
repr(psutil.ZombieProcess(321)),
"psutil.ZombieProcess process still exists but it's a zombie "
"(pid=321)")
self.assertEqual(
repr(psutil.ZombieProcess(321, name='foo')),
"psutil.ZombieProcess process still exists but it's a zombie "
"(pid=321, name='foo')")
self.assertEqual(
repr(psutil.ZombieProcess(321, name='foo', ppid=1)),
"psutil.ZombieProcess process still exists but it's a zombie "
"(pid=321, name='foo', ppid=1)")
self.assertEqual(
repr(psutil.ZombieProcess(321, msg='foo')),
"psutil.ZombieProcess foo")
def test_access_denied__repr__(self, func=repr):
self.assertEqual(
repr(psutil.AccessDenied(321)),
"psutil.AccessDenied (pid=321)")
self.assertEqual(
repr(psutil.AccessDenied(321, name='foo')),
"psutil.AccessDenied (pid=321, name='foo')")
self.assertEqual(
repr(psutil.AccessDenied(321, msg='foo')),
"psutil.AccessDenied foo")
def test_timeout_expired__repr__(self, func=repr):
self.assertEqual(
repr(psutil.TimeoutExpired(321)),
"psutil.TimeoutExpired timeout after 321 seconds")
self.assertEqual(
repr(psutil.TimeoutExpired(321, pid=111)),
"psutil.TimeoutExpired timeout after 321 seconds (pid=111)")
self.assertEqual(
repr(psutil.TimeoutExpired(321, pid=111, name='foo')),
"psutil.TimeoutExpired timeout after 321 seconds "
"(pid=111, name='foo')")
def test_process__eq__(self):
p1 = psutil.Process()
p2 = psutil.Process()
self.assertEqual(p1, p2)
p2._ident = (0, 0)
self.assertNotEqual(p1, p2)
self.assertNotEqual(p1, 'foo')
def test_process__hash__(self):
s = set([psutil.Process(), psutil.Process()])
self.assertEqual(len(s), 1)
def test__all__(self):
dir_psutil = dir(psutil)
for name in dir_psutil:
if name in ('callable', 'error', 'namedtuple', 'tests',
'long', 'test', 'NUM_CPUS', 'BOOT_TIME',
'TOTAL_PHYMEM'):
continue
if not name.startswith('_'):
try:
__import__(name)
except ImportError:
if name not in psutil.__all__:
fun = getattr(psutil, name)
if fun is None:
continue
if (fun.__doc__ is not None and
'deprecated' not in fun.__doc__.lower()):
self.fail('%r not in psutil.__all__' % name)
for name in psutil.__all__:
self.assertIn(name, dir_psutil)
def test_version(self):
self.assertEqual('.'.join([str(x) for x in psutil.version_info]),
psutil.__version__)
def test_process_as_dict_no_new_names(self):
p = psutil.Process()
p.foo = '1'
self.assertNotIn('foo', p.as_dict())
def test_memoize(self):
@memoize
def foo(*args, **kwargs):
calls.append(None)
return (args, kwargs)
calls = []
for x in range(2):
ret = foo()
expected = ((), {})
self.assertEqual(ret, expected)
self.assertEqual(len(calls), 1)
for x in range(2):
ret = foo(1)
expected = ((1, ), {})
self.assertEqual(ret, expected)
self.assertEqual(len(calls), 2)
for x in range(2):
ret = foo(1, bar=2)
expected = ((1, ), {'bar': 2})
self.assertEqual(ret, expected)
self.assertEqual(len(calls), 3)
foo.cache_clear()
ret = foo()
expected = ((), {})
self.assertEqual(ret, expected)
self.assertEqual(len(calls), 4)
self.assertEqual(foo.__doc__, "foo docstring")
def test_memoize_when_activated(self):
class Foo:
@memoize_when_activated
def foo(self):
calls.append(None)
f = Foo()
calls = []
f.foo()
f.foo()
self.assertEqual(len(calls), 2)
calls = []
f.foo.cache_activate()
f.foo()
f.foo()
self.assertEqual(len(calls), 1)
calls = []
f.foo.cache_deactivate()
f.foo()
f.foo()
self.assertEqual(len(calls), 2)
def test_parse_environ_block(self):
from psutil._common import parse_environ_block
def k(s):
return s.upper() if WINDOWS else s
self.assertEqual(parse_environ_block("a=1\0"),
{k("a"): "1"})
self.assertEqual(parse_environ_block("a=1\0b=2\0\0"),
{k("a"): "1", k("b"): "2"})
self.assertEqual(parse_environ_block("a=1\0b=\0\0"),
{k("a"): "1", k("b"): ""})
self.assertEqual(parse_environ_block("a=1\0b=2\0\0c=3\0"),
{k("a"): "1", k("b"): "2"})
self.assertEqual(parse_environ_block("xxx\0a=1\0"), {k("a"): "1"})
self.assertEqual(parse_environ_block("a=1\0=b=2\0"), {k("a"): "1"})
self.assertEqual(parse_environ_block("a=1\0b=2"), {k("a"): "1"})
def test_supports_ipv6(self):
self.addCleanup(supports_ipv6.cache_clear)
if supports_ipv6():
with mock.patch('psutil._common.socket') as s:
s.has_ipv6 = False
supports_ipv6.cache_clear()
assert not supports_ipv6()
supports_ipv6.cache_clear()
with mock.patch('psutil._common.socket.socket',
side_effect=socket.error) as s:
assert not supports_ipv6()
assert s.called
supports_ipv6.cache_clear()
with mock.patch('psutil._common.socket.socket',
side_effect=socket.gaierror) as s:
assert not supports_ipv6()
supports_ipv6.cache_clear()
assert s.called
supports_ipv6.cache_clear()
with mock.patch('psutil._common.socket.socket.bind',
side_effect=socket.gaierror) as s:
assert not supports_ipv6()
supports_ipv6.cache_clear()
assert s.called
else:
with self.assertRaises(Exception):
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(("::1", 0))
def test_isfile_strict(self):
from psutil._common import isfile_strict
this_file = os.path.abspath(__file__)
assert isfile_strict(this_file)
assert not isfile_strict(os.path.dirname(this_file))
with mock.patch('psutil._common.os.stat',
side_effect=OSError(errno.EPERM, "foo")):
self.assertRaises(OSError, isfile_strict, this_file)
with mock.patch('psutil._common.os.stat',
side_effect=OSError(errno.EACCES, "foo")):
self.assertRaises(OSError, isfile_strict, this_file)
with mock.patch('psutil._common.os.stat',
side_effect=OSError(errno.EINVAL, "foo")):
assert not isfile_strict(this_file)
with mock.patch('psutil._common.stat.S_ISREG', return_value=False):
assert not isfile_strict(this_file)
def test_serialization(self):
def check(ret):
if json is not None:
json.loads(json.dumps(ret))
a = pickle.dumps(ret)
b = pickle.loads(a)
self.assertEqual(ret, b)
check(psutil.Process().as_dict())
check(psutil.virtual_memory())
check(psutil.swap_memory())
check(psutil.cpu_times())
check(psutil.cpu_times_percent(interval=0))
check(psutil.net_io_counters())
if LINUX and not os.path.exists('/proc/diskstats'):
pass
else:
if not APPVEYOR:
check(psutil.disk_io_counters())
check(psutil.disk_partitions())
check(psutil.disk_usage(os.getcwd()))
check(psutil.users())
def test_setup_script(self):
setup_py = os.path.join(ROOT_DIR, 'setup.py')
module = import_module_by_path(setup_py)
self.assertRaises(SystemExit, module.setup)
self.assertEqual(module.get_version(), psutil.__version__)
def test_ad_on_process_creation(self):
with mock.patch.object(psutil.Process, 'create_time',
side_effect=psutil.AccessDenied) as meth:
psutil.Process()
assert meth.called
with mock.patch.object(psutil.Process, 'create_time',
side_effect=psutil.ZombieProcess(1)) as meth:
psutil.Process()
assert meth.called
with mock.patch.object(psutil.Process, 'create_time',
side_effect=ValueError) as meth:
with self.assertRaises(ValueError):
psutil.Process()
assert meth.called
def test_sanity_version_check(self):
with mock.patch(
"psutil._psplatform.cext.version", return_value="0.0.0"):
with self.assertRaises(ImportError) as cm:
reload_module(psutil)
self.assertIn("version conflict", str(cm.exception).lower())
nt = collections.namedtuple('foo', 'a b c')
class TestWrapNumbers(unittest.TestCase):
def setUp(self):
wrap_numbers.cache_clear()
tearDown = setUp
def test_first_call(self):
input = {'disk1': nt(5, 5, 5)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
def test_input_hasnt_changed(self):
input = {'disk1': nt(5, 5, 5)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
def test_increase_but_no_wrap(self):
input = {'disk1': nt(5, 5, 5)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
input = {'disk1': nt(10, 15, 20)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
input = {'disk1': nt(20, 25, 30)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
input = {'disk1': nt(20, 25, 30)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
def test_wrap(self):
input = {'disk1': nt(100, 100, 100)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
# first wrap restarts from 10
input = {'disk1': nt(100, 100, 10)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(100, 100, 110)})
# then it remains the same
input = {'disk1': nt(100, 100, 10)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(100, 100, 110)})
# then it goes up
input = {'disk1': nt(100, 100, 90)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(100, 100, 190)})
# then it wraps again
input = {'disk1': nt(100, 100, 20)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(100, 100, 210)})
# and remains the same
input = {'disk1': nt(100, 100, 20)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(100, 100, 210)})
# now wrap another num
input = {'disk1': nt(50, 100, 20)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(150, 100, 210)})
# and again
input = {'disk1': nt(40, 100, 20)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(190, 100, 210)})
# keep it the same
input = {'disk1': nt(40, 100, 20)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(190, 100, 210)})
def test_changing_keys(self):
# Emulate a case where the second call to disk_io()
# (or whatever) provides a new disk, then the new disk
# disappears on the third call.
input = {'disk1': nt(5, 5, 5)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
input = {'disk1': nt(5, 5, 5),
'disk2': nt(7, 7, 7)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
input = {'disk1': nt(8, 8, 8)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
def test_changing_keys_w_wrap(self):
input = {'disk1': nt(50, 50, 50),
'disk2': nt(100, 100, 100)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
# disk 2 wraps
input = {'disk1': nt(50, 50, 50),
'disk2': nt(100, 100, 10)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(50, 50, 50),
'disk2': nt(100, 100, 110)})
# disk 2 disappears
input = {'disk1': nt(50, 50, 50)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
# then it appears again; the old wrap is supposed to be
# gone.
input = {'disk1': nt(50, 50, 50),
'disk2': nt(100, 100, 100)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
# remains the same
input = {'disk1': nt(50, 50, 50),
'disk2': nt(100, 100, 100)}
self.assertEqual(wrap_numbers(input, 'disk_io'), input)
# and then wraps again
input = {'disk1': nt(50, 50, 50),
'disk2': nt(100, 100, 10)}
self.assertEqual(wrap_numbers(input, 'disk_io'),
{'disk1': nt(50, 50, 50),
'disk2': nt(100, 100, 110)})
def test_real_data(self):
d = {'nvme0n1': (300, 508, 640, 1571, 5970, 1987, 2049, 451751, 47048),
'nvme0n1p1': (1171, 2, 5600256, 1024, 516, 0, 0, 0, 8),
'nvme0n1p2': (54, 54, 2396160, 5165056, 4, 24, 30, 1207, 28),
'nvme0n1p3': (2389, 4539, 5154, 150, 4828, 1844, 2019, 398, 348)}
self.assertEqual(wrap_numbers(d, 'disk_io'), d)
self.assertEqual(wrap_numbers(d, 'disk_io'), d)
# decrease this ↓
d = {'nvme0n1': (100, 508, 640, 1571, 5970, 1987, 2049, 451751, 47048),
'nvme0n1p1': (1171, 2, 5600256, 1024, 516, 0, 0, 0, 8),
'nvme0n1p2': (54, 54, 2396160, 5165056, 4, 24, 30, 1207, 28),
'nvme0n1p3': (2389, 4539, 5154, 150, 4828, 1844, 2019, 398, 348)}
out = wrap_numbers(d, 'disk_io')
self.assertEqual(out['nvme0n1'][0], 400)
# --- cache tests
def test_cache_first_call(self):
input = {'disk1': nt(5, 5, 5)}
wrap_numbers(input, 'disk_io')
cache = wrap_numbers.cache_info()
self.assertEqual(cache[0], {'disk_io': input})
self.assertEqual(cache[1], {'disk_io': {}})
self.assertEqual(cache[2], {'disk_io': {}})
def test_cache_call_twice(self):
input = {'disk1': nt(5, 5, 5)}
wrap_numbers(input, 'disk_io')
input = {'disk1': nt(10, 10, 10)}
wrap_numbers(input, 'disk_io')
cache = wrap_numbers.cache_info()
self.assertEqual(cache[0], {'disk_io': input})
self.assertEqual(
cache[1],
{'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 0}})
self.assertEqual(cache[2], {'disk_io': {}})
def test_cache_wrap(self):
# let's say 100 is the threshold
input = {'disk1': nt(100, 100, 100)}
wrap_numbers(input, 'disk_io')
input = {'disk1': nt(100, 100, 10)}
wrap_numbers(input, 'disk_io')
cache = wrap_numbers.cache_info()
self.assertEqual(cache[0], {'disk_io': input})
self.assertEqual(
cache[1],
{'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 100}})
self.assertEqual(cache[2], {'disk_io': {'disk1': set([('disk1', 2)])}})
def assert_():
cache = wrap_numbers.cache_info()
self.assertEqual(
cache[1],
{'disk_io': {('disk1', 0): 0, ('disk1', 1): 0,
('disk1', 2): 100}})
self.assertEqual(cache[2],
{'disk_io': {'disk1': set([('disk1', 2)])}})
input = {'disk1': nt(100, 100, 10)}
wrap_numbers(input, 'disk_io')
cache = wrap_numbers.cache_info()
self.assertEqual(cache[0], {'disk_io': input})
assert_()
input = {'disk1': nt(100, 100, 90)}
wrap_numbers(input, 'disk_io')
cache = wrap_numbers.cache_info()
self.assertEqual(cache[0], {'disk_io': input})
assert_()
input = {'disk1': nt(100, 100, 20)}
wrap_numbers(input, 'disk_io')
cache = wrap_numbers.cache_info()
self.assertEqual(cache[0], {'disk_io': input})
self.assertEqual(
cache[1],
{'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 190}})
self.assertEqual(cache[2], {'disk_io': {'disk1': set([('disk1', 2)])}})
def test_cache_changing_keys(self):
input = {'disk1': nt(5, 5, 5)}
wrap_numbers(input, 'disk_io')
input = {'disk1': nt(5, 5, 5),
'disk2': nt(7, 7, 7)}
wrap_numbers(input, 'disk_io')
cache = wrap_numbers.cache_info()
self.assertEqual(cache[0], {'disk_io': input})
self.assertEqual(
cache[1],
{'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 0}})
self.assertEqual(cache[2], {'disk_io': {}})
def test_cache_clear(self):
input = {'disk1': nt(5, 5, 5)}
wrap_numbers(input, 'disk_io')
wrap_numbers(input, 'disk_io')
wrap_numbers.cache_clear('disk_io')
self.assertEqual(wrap_numbers.cache_info(), ({}, {}, {}))
wrap_numbers.cache_clear('disk_io')
wrap_numbers.cache_clear('?!?')
@unittest.skipIf(
not psutil.disk_io_counters() or not psutil.net_io_counters(),
"no disks or NICs available")
def test_cache_clear_public_apis(self):
psutil.disk_io_counters()
psutil.net_io_counters()
caches = wrap_numbers.cache_info()
for cache in caches:
self.assertIn('psutil.disk_io_counters', cache)
self.assertIn('psutil.net_io_counters', cache)
psutil.disk_io_counters.cache_clear()
caches = wrap_numbers.cache_info()
for cache in caches:
self.assertIn('psutil.net_io_counters', cache)
self.assertNotIn('psutil.disk_io_counters', cache)
psutil.net_io_counters.cache_clear()
caches = wrap_numbers.cache_info()
self.assertEqual(caches, ({}, {}, {}))
@unittest.skipIf(TOX, "can't test on TOX")
# See: https://travis-ci.org/giampaolo/psutil/jobs/295224806
@unittest.skipIf(TRAVIS and not
os.path.exists(os.path.join(SCRIPTS_DIR, 'free.py')),
"can't locate scripts directory")
class TestScripts(unittest.TestCase):
@staticmethod
def assert_stdout(exe, args=None, **kwds):
exe = '"%s"' % os.path.join(SCRIPTS_DIR, exe)
if args:
exe = exe + ' ' + args
try:
out = sh(sys.executable + ' ' + exe, **kwds).strip()
except RuntimeError as err:
if 'AccessDenied' in str(err):
return str(err)
else:
raise
assert out, out
return out
@staticmethod
def assert_syntax(exe, args=None):
exe = os.path.join(SCRIPTS_DIR, exe)
if PY3:
f = open(exe, 'rt', encoding='utf8')
else:
f = open(exe, 'rt')
with f:
src = f.read()
ast.parse(src)
def test_coverage(self):
meths = dir(self)
for name in os.listdir(SCRIPTS_DIR):
if name.endswith('.py'):
if 'test_' + os.path.splitext(name)[0] not in meths:
self.fail('no test defined for %r script'
% os.path.join(SCRIPTS_DIR, name))
@unittest.skipIf(not POSIX, "POSIX only")
def test_executable(self):
for name in os.listdir(SCRIPTS_DIR):
if name.endswith('.py'):
path = os.path.join(SCRIPTS_DIR, name)
if not stat.S_IXUSR & os.stat(path)[stat.ST_MODE]:
self.fail('%r is not executable' % path)
def test_disk_usage(self):
self.assert_stdout('disk_usage.py')
def test_free(self):
self.assert_stdout('free.py')
def test_meminfo(self):
self.assert_stdout('meminfo.py')
def test_procinfo(self):
self.assert_stdout('procinfo.py', args=str(os.getpid()))
@unittest.skipIf(APPVEYOR or TRAVIS and not psutil.users(),
"unreliable on APPVEYOR or TRAVIS")
def test_who(self):
self.assert_stdout('who.py')
def test_ps(self):
self.assert_stdout('ps.py')
def test_pstree(self):
self.assert_stdout('pstree.py')
def test_netstat(self):
self.assert_stdout('netstat.py')
# permission denied on travis
@unittest.skipIf(TRAVIS, "unreliable on TRAVIS")
def test_ifconfig(self):
self.assert_stdout('ifconfig.py')
@unittest.skipIf(not HAS_MEMORY_MAPS, "not supported")
def test_pmap(self):
self.assert_stdout('pmap.py', args=str(os.getpid()))
@unittest.skipIf(not HAS_MEMORY_FULL_INFO, "not supported")
def test_procsmem(self):
self.assert_stdout('procsmem.py', stderr=DEVNULL)
def test_killall(self):
self.assert_syntax('killall.py')
def test_nettop(self):
self.assert_syntax('nettop.py')
def test_top(self):
self.assert_syntax('top.py')
def test_iotop(self):
self.assert_syntax('iotop.py')
def test_pidof(self):
output = self.assert_stdout('pidof.py', args=psutil.Process().name())
self.assertIn(str(os.getpid()), output)
@unittest.skipIf(not WINDOWS, "WINDOWS only")
def test_winservices(self):
self.assert_stdout('winservices.py')
def test_cpu_distribution(self):
self.assert_syntax('cpu_distribution.py')
@unittest.skipIf(not HAS_SENSORS_TEMPERATURES, "not supported")
@unittest.skipIf(TRAVIS, "unreliable on TRAVIS")
def test_temperatures(self):
self.assert_stdout('temperatures.py')
@unittest.skipIf(not HAS_SENSORS_FANS, "not supported")
@unittest.skipIf(TRAVIS, "unreliable on TRAVIS")
def test_fans(self):
self.assert_stdout('fans.py')
@unittest.skipIf(not HAS_SENSORS_BATTERY, "not supported")
@unittest.skipIf(not HAS_BATTERY, "no battery")
def test_battery(self):
self.assert_stdout('battery.py')
def test_sensors(self):
self.assert_stdout('sensors.py')
# ===================================================================
# --- Unit tests for test utilities.
# ===================================================================
class TestRetryDecorator(unittest.TestCase):
@mock.patch('time.sleep')
def test_retry_success(self, sleep):
# Fail 3 times out of 5; make sure the decorated fun returns.
@retry(retries=5, interval=1, logfun=None)
def foo():
while queue:
queue.pop()
1 / 0
return 1
queue = list(range(3))
self.assertEqual(foo(), 1)
self.assertEqual(sleep.call_count, 3)
@mock.patch('time.sleep')
def test_retry_failure(self, sleep):
# Fail 6 times out of 5; th function is supposed to raise exc.
@retry(retries=5, interval=1, logfun=None)
def foo():
while queue:
queue.pop()
1 / 0
return 1
queue = list(range(6))
self.assertRaises(ZeroDivisionError, foo)
self.assertEqual(sleep.call_count, 5)
@mock.patch('time.sleep')
def test_exception_arg(self, sleep):
@retry(exception=ValueError, interval=1)
def foo():
raise TypeError
self.assertRaises(TypeError, foo)
self.assertEqual(sleep.call_count, 0)
@mock.patch('time.sleep')
def test_no_interval_arg(self, sleep):
# if interval is not specified sleep is not supposed to be called
@retry(retries=5, interval=None, logfun=None)
def foo():
1 / 0
self.assertRaises(ZeroDivisionError, foo)
self.assertEqual(sleep.call_count, 0)
@mock.patch('time.sleep')
def test_retries_arg(self, sleep):
@retry(retries=5, interval=1, logfun=None)
def foo():
1 / 0
self.assertRaises(ZeroDivisionError, foo)
self.assertEqual(sleep.call_count, 5)
@mock.patch('time.sleep')
def test_retries_and_timeout_args(self, sleep):
self.assertRaises(ValueError, retry, retries=5, timeout=1)
class TestSyncTestUtils(unittest.TestCase):
def tearDown(self):
safe_rmpath(TESTFN)
def test_wait_for_pid(self):
wait_for_pid(os.getpid())
nopid = max(psutil.pids()) + 99999
with mock.patch('psutil.tests.retry.__iter__', return_value=iter([0])):
self.assertRaises(psutil.NoSuchProcess, wait_for_pid, nopid)
def test_wait_for_file(self):
with open(TESTFN, 'w') as f:
f.write('foo')
wait_for_file(TESTFN)
assert not os.path.exists(TESTFN)
def test_wait_for_file_empty(self):
with open(TESTFN, 'w'):
pass
wait_for_file(TESTFN, empty=True)
assert not os.path.exists(TESTFN)
def test_wait_for_file_no_file(self):
with mock.patch('psutil.tests.retry.__iter__', return_value=iter([0])):
self.assertRaises(IOError, wait_for_file, TESTFN)
def test_wait_for_file_no_delete(self):
with open(TESTFN, 'w') as f:
f.write('foo')
wait_for_file(TESTFN, delete=False)
assert os.path.exists(TESTFN)
def test_call_until(self):
ret = call_until(lambda: 1, "ret == 1")
self.assertEqual(ret, 1)
class TestFSTestUtils(unittest.TestCase):
def setUp(self):
safe_rmpath(TESTFN)
tearDown = setUp
def test_safe_rmpath(self):
# test file is removed
open(TESTFN, 'w').close()
safe_rmpath(TESTFN)
assert not os.path.exists(TESTFN)
# test no exception if path does not exist
safe_rmpath(TESTFN)
# test dir is removed
os.mkdir(TESTFN)
safe_rmpath(TESTFN)
assert not os.path.exists(TESTFN)
# test other exceptions are raised
with mock.patch('psutil.tests.os.stat',
side_effect=OSError(errno.EINVAL, "")) as m:
with self.assertRaises(OSError):
safe_rmpath(TESTFN)
assert m.called
def test_chdir(self):
base = os.getcwd()
os.mkdir(TESTFN)
with chdir(TESTFN):
self.assertEqual(os.getcwd(), os.path.join(base, TESTFN))
self.assertEqual(os.getcwd(), base)
class TestProcessUtils(unittest.TestCase):
def test_reap_children(self):
subp = get_test_subprocess()
p = psutil.Process(subp.pid)
assert p.is_running()
reap_children()
assert not p.is_running()
assert not psutil.tests._pids_started
assert not psutil.tests._subprocesses_started
def test_create_proc_children_pair(self):
p1, p2 = create_proc_children_pair()
self.assertNotEqual(p1.pid, p2.pid)
assert p1.is_running()
assert p2.is_running()
children = psutil.Process().children(recursive=True)
self.assertEqual(len(children), 2)
self.assertIn(p1, children)
self.assertIn(p2, children)
self.assertEqual(p1.ppid(), os.getpid())
self.assertEqual(p2.ppid(), p1.pid)
# make sure both of them are cleaned up
reap_children()
assert not p1.is_running()
assert not p2.is_running()
assert not psutil.tests._pids_started
assert not psutil.tests._subprocesses_started
@unittest.skipIf(not POSIX, "POSIX only")
def test_create_zombie_proc(self):
zpid = create_zombie_proc()
self.addCleanup(reap_children, recursive=True)
p = psutil.Process(zpid)
self.assertEqual(p.status(), psutil.STATUS_ZOMBIE)
class TestNetUtils(unittest.TestCase):
def bind_socket(self):
port = get_free_port()
with contextlib.closing(bind_socket(addr=('', port))) as s:
self.assertEqual(s.getsockname()[1], port)
@unittest.skipIf(not POSIX, "POSIX only")
def test_bind_unix_socket(self):
with unix_socket_path() as name:
sock = bind_unix_socket(name)
with contextlib.closing(sock):
self.assertEqual(sock.family, socket.AF_UNIX)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.getsockname(), name)
assert os.path.exists(name)
assert stat.S_ISSOCK(os.stat(name).st_mode)
# UDP
with unix_socket_path() as name:
sock = bind_unix_socket(name, type=socket.SOCK_DGRAM)
with contextlib.closing(sock):
self.assertEqual(sock.type, socket.SOCK_DGRAM)
def tcp_tcp_socketpair(self):
addr = ("127.0.0.1", get_free_port())
server, client = tcp_socketpair(socket.AF_INET, addr=addr)
with contextlib.closing(server):
with contextlib.closing(client):
# Ensure they are connected and the positions are
# correct.
self.assertEqual(server.getsockname(), addr)
self.assertEqual(client.getpeername(), addr)
self.assertNotEqual(client.getsockname(), addr)
@unittest.skipIf(not POSIX, "POSIX only")
def test_unix_socketpair(self):
p = psutil.Process()
num_fds = p.num_fds()
assert not p.connections(kind='unix')
with unix_socket_path() as name:
server, client = unix_socketpair(name)
try:
assert os.path.exists(name)
assert stat.S_ISSOCK(os.stat(name).st_mode)
self.assertEqual(p.num_fds() - num_fds, 2)
self.assertEqual(len(p.connections(kind='unix')), 2)
self.assertEqual(server.getsockname(), name)
self.assertEqual(client.getpeername(), name)
finally:
client.close()
server.close()
def test_create_sockets(self):
with create_sockets() as socks:
fams = collections.defaultdict(int)
types = collections.defaultdict(int)
for s in socks:
fams[s.family] += 1
# work around http://bugs.python.org/issue30204
types[s.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)] += 1
self.assertGreaterEqual(fams[socket.AF_INET], 2)
self.assertGreaterEqual(fams[socket.AF_INET6], 2)
if POSIX and HAS_CONNECTIONS_UNIX:
self.assertGreaterEqual(fams[socket.AF_UNIX], 2)
self.assertGreaterEqual(types[socket.SOCK_STREAM], 2)
self.assertGreaterEqual(types[socket.SOCK_DGRAM], 2)
class TestOtherUtils(unittest.TestCase):
def test_is_namedtuple(self):
assert is_namedtuple(collections.namedtuple('foo', 'a b c')(1, 2, 3))
assert not is_namedtuple(tuple())
if __name__ == '__main__':
run_test_module_by_name(__file__)
| true | true |
f7305a7160e771c29930fa09a63b5a4213102df1 | 2,604 | py | Python | PyDictAPI/__init__.py | imshawan/PyDictAPI | 937001ce896b634132e6dce3e0f1a59986c4551c | [
"MIT"
] | null | null | null | PyDictAPI/__init__.py | imshawan/PyDictAPI | 937001ce896b634132e6dce3e0f1a59986c4551c | [
"MIT"
] | null | null | null | PyDictAPI/__init__.py | imshawan/PyDictAPI | 937001ce896b634132e6dce3e0f1a59986c4551c | [
"MIT"
] | 1 | 2021-08-29T11:08:07.000Z | 2021-08-29T11:08:07.000Z |
"""
----------------------
Python Dictionary API
----------------------
PyDictAPI is library written in Python, that can be used to fetch meanings and translation.
Both the Finder and Translator class takes an arguement "jsonify" that is set to False by default.
If jsonify is set to True, than the processed queries are returned in JSON. While by default the queries are returned in the form of a Python List (Array)
Currently supports only English-English dictionary searches
Basic usage:
>>> from PyDictAPI import Finder
>>> Meanings = Finder(jsonify=True)
>>> print(Meanings.findMeanings('apple'))
Output:
`{
"word": "Apple",
"meanings": [
{
"partOfSpeech": "Noun",
"definition": "The usually round, red or yellow, edible fruit of a small tree, Malus sylvestris, of the rose family."
},
{
"partOfSpeech": "Noun",
"definition": "A rosaceous tree, Malus sieversii, native to Central Asia but widely cultivated in temperate regions in many varieties, having pink or white fragrant flowers and firm rounded edible fruits. See also crab apple"
}
]
}`
---------------------------------------
Finding Examples, Synonyms and Antonyms
---------------------------------------
>>> print(Meanings.findUsage('help', 2)) #Finding Examples
# Here 2 defines the maximum number of examples to be included in the response,
# by default it is set to 5
>>> print(Meanings.findSynonyms('help', 4)) #Finding Synonyms
>>> print(Meanings.findAntonyms('help', 4)) #Finding Antonyms
----------------
Translating text
----------------
Example:
>>> # Import the module first
>>> from PyDictAPI import Translate
>>> t = Translate(jsonify=True) # Creates an instance of Translate class
>>>
>>> # You can get all supported language list through languages_help()
>>> languages = t.languages_help(pretty=True)
>>> # Pretty=true returns the list of supported languages in a well structured manner. By default Pretty is set to False
>>>
>>> # Tranlate English into Hindi
>>> print(t.translateItems("Hello, How are you?", "hi"))
`{'query': 'Hello, How are you?', 'language_detected': 'Hindi', 'translation': 'नमस्कार किसे हो आप?'}`
Full documentation is at <https://github.com/imshawan/PyDictAPI>.
copyright: (c) 2021 by Shawan Mandal.
license: MIT License, see LICENSE for more details.
"""
__author__ = "Shawan Mandal"
__email__ = "imshawan.dev049@gmail.com"
__version__ = "1.6.0"
try:
from .scrape import *
from .translator import *
except:
from scrape import *
from translator import *
| 30.635294 | 231 | 0.656682 |
__author__ = "Shawan Mandal"
__email__ = "imshawan.dev049@gmail.com"
__version__ = "1.6.0"
try:
from .scrape import *
from .translator import *
except:
from scrape import *
from translator import *
| true | true |
f7305a74ccf623557222c020c2382bc476227606 | 3,177 | py | Python | tests/e2e_scenarios.py | rneatherway/CCF | e04c6bbbe0b5ba044abaab9f972287194b6fc6cc | [
"Apache-2.0"
] | 2 | 2020-08-06T04:12:36.000Z | 2021-09-09T04:15:25.000Z | tests/e2e_scenarios.py | rajdhandus/CCF | 96edbc9db6bd14c559a8c59bcda1c2a4835768d2 | [
"Apache-2.0"
] | 2 | 2022-02-03T06:32:47.000Z | 2022-02-09T23:00:07.000Z | tests/e2e_scenarios.py | securelogicgroup/CCF | 2bad8ca6caa146e6b7cd4167fea551d61fecabfa | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import os
import json
import http
import random
import infra.network
import infra.proc
import infra.e2e_args
import infra.checker
from loguru import logger as LOG
def run(args):
# SNIPPET_START: parsing
with open(args.scenario) as f:
scenario = json.load(f)
hosts = scenario.get("hosts", infra.e2e_args.max_nodes(args, f=0))
args.package = scenario["package"]
# SNIPPET_END: parsing
scenario_dir = os.path.dirname(args.scenario)
# SNIPPET_START: create_network
with infra.network.network(
hosts, args.binary_dir, args.debug_nodes, args.perf_nodes
) as network:
network.start_and_join(args)
# SNIPPET_END: create_network
primary, backups = network.find_nodes()
with primary.client() as mc:
check = infra.checker.Checker()
check_commit = infra.checker.Checker(mc)
for connection in scenario["connections"]:
with (
primary.client("user0")
if not connection.get("on_backup")
else random.choice(backups).client("user0")
) as client:
txs = connection.get("transactions", [])
for include_file in connection.get("include", []):
with open(os.path.join(scenario_dir, include_file)) as f:
txs += json.load(f)
for tx in txs:
r = client.call(
tx["method"],
body=tx["body"],
http_verb=tx.get("verb", "POST"),
)
if tx.get("expected_error") is not None:
check(
r,
error=lambda status, msg, transaction=tx: status
# pylint: disable=no-member
== http.HTTPStatus(
transaction.get("expected_error")
).value,
)
elif tx.get("expected_result") is not None:
check_commit(r, result=tx.get("expected_result"))
else:
check_commit(r, result=lambda res: res is not None)
network.wait_for_node_commit_sync()
if args.network_only:
LOG.info("Keeping network alive with the following nodes:")
LOG.info(" Primary = {}:{}".format(primary.pubhost, primary.pubport))
for i, f in enumerate(backups):
LOG.info(" Backup[{}] = {}:{}".format(i, f.pubhost, f.pubport))
input("Press Enter to shutdown...")
if __name__ == "__main__":
def add(parser):
parser.add_argument(
"--scenario",
help="Path to JSON file listing transactions to execute",
type=str,
required=True,
)
args = infra.e2e_args.cli_args(add=add)
run(args)
| 32.418367 | 81 | 0.514951 |
import os
import json
import http
import random
import infra.network
import infra.proc
import infra.e2e_args
import infra.checker
from loguru import logger as LOG
def run(args):
with open(args.scenario) as f:
scenario = json.load(f)
hosts = scenario.get("hosts", infra.e2e_args.max_nodes(args, f=0))
args.package = scenario["package"]
scenario_dir = os.path.dirname(args.scenario)
with infra.network.network(
hosts, args.binary_dir, args.debug_nodes, args.perf_nodes
) as network:
network.start_and_join(args)
primary, backups = network.find_nodes()
with primary.client() as mc:
check = infra.checker.Checker()
check_commit = infra.checker.Checker(mc)
for connection in scenario["connections"]:
with (
primary.client("user0")
if not connection.get("on_backup")
else random.choice(backups).client("user0")
) as client:
txs = connection.get("transactions", [])
for include_file in connection.get("include", []):
with open(os.path.join(scenario_dir, include_file)) as f:
txs += json.load(f)
for tx in txs:
r = client.call(
tx["method"],
body=tx["body"],
http_verb=tx.get("verb", "POST"),
)
if tx.get("expected_error") is not None:
check(
r,
error=lambda status, msg, transaction=tx: status
== http.HTTPStatus(
transaction.get("expected_error")
).value,
)
elif tx.get("expected_result") is not None:
check_commit(r, result=tx.get("expected_result"))
else:
check_commit(r, result=lambda res: res is not None)
network.wait_for_node_commit_sync()
if args.network_only:
LOG.info("Keeping network alive with the following nodes:")
LOG.info(" Primary = {}:{}".format(primary.pubhost, primary.pubport))
for i, f in enumerate(backups):
LOG.info(" Backup[{}] = {}:{}".format(i, f.pubhost, f.pubport))
input("Press Enter to shutdown...")
if __name__ == "__main__":
def add(parser):
parser.add_argument(
"--scenario",
help="Path to JSON file listing transactions to execute",
type=str,
required=True,
)
args = infra.e2e_args.cli_args(add=add)
run(args)
| true | true |
f7305b4091d94994c4a20cc3634347522a8f0bce | 35,217 | py | Python | qiskit/visualization/pulse/matplotlib.py | siddharthdangwal/qiskit-terra | af34eb06f28de18ef276e1e9029c62a4e35dd6a9 | [
"Apache-2.0"
] | null | null | null | qiskit/visualization/pulse/matplotlib.py | siddharthdangwal/qiskit-terra | af34eb06f28de18ef276e1e9029c62a4e35dd6a9 | [
"Apache-2.0"
] | null | null | null | qiskit/visualization/pulse/matplotlib.py | siddharthdangwal/qiskit-terra | af34eb06f28de18ef276e1e9029c62a4e35dd6a9 | [
"Apache-2.0"
] | 1 | 2020-07-13T17:56:46.000Z | 2020-07-13T17:56:46.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Matplotlib classes for pulse visualization."""
import collections
import warnings
from typing import Dict, List, Tuple, Callable, Union, Any
import numpy as np
try:
from matplotlib import pyplot as plt, gridspec
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
from qiskit.visualization.pulse.qcstyle import PulseStyle, SchedStyle
from qiskit.visualization.pulse.interpolation import step_wise
from qiskit.pulse.channels import (DriveChannel, ControlChannel,
MeasureChannel, AcquireChannel,
SnapshotChannel, Channel)
from qiskit.pulse.commands import FrameChangeInstruction
from qiskit.pulse import (Waveform, SamplePulse, FrameChange, PersistentValue, Snapshot, Play,
Acquire, PulseError, ParametricPulse, SetFrequency, ShiftPhase,
Instruction, ScheduleComponent, ShiftFrequency, SetPhase)
class EventsOutputChannels:
"""Pulse dataset for channel."""
def __init__(self, t0: int, tf: int):
"""Create new channel dataset.
TODO: remove PV
Args:
t0: starting time of plot
tf: ending time of plot
"""
self.pulses = {}
self.t0 = t0
self.tf = tf
self._waveform = None
self._framechanges = None
self._setphase = None
self._frequencychanges = None
self._conditionals = None
self._snapshots = None
self._labels = None
self.enable = False
def add_instruction(self, start_time: int, instruction: Instruction):
"""Add new pulse instruction to channel.
Args:
start_time: Starting time of instruction
instruction: Instruction object to be added
"""
if instruction.command is not None:
pulse = instruction.command
elif isinstance(instruction, Play):
pulse = instruction.pulse
else:
pulse = instruction
if start_time in self.pulses.keys():
self.pulses[start_time].append(pulse)
else:
self.pulses[start_time] = [pulse]
@property
def waveform(self) -> np.ndarray:
"""Get waveform."""
if self._waveform is None:
self._build_waveform()
return self._waveform[self.t0:self.tf]
@property
def framechanges(self) -> Dict[int, FrameChangeInstruction]:
"""Get frame changes."""
if self._framechanges is None:
self._build_waveform()
return self._trim(self._framechanges)
@property
def setphase(self) -> Dict[int, SetPhase]:
"""Get the SetPhase phase values."""
if self._setphase is None:
self._build_waveform()
return self._trim(self._setphase)
@property
def frequencychanges(self) -> Dict[int, SetFrequency]:
"""Get the frequency changes."""
if self._frequencychanges is None:
self._build_waveform()
return self._trim(self._frequencychanges)
@property
def frequencyshift(self) -> Dict[int, ShiftFrequency]:
"""Set the frequency changes."""
if self._frequencychanges is None:
self._build_waveform()
return self._trim(self._frequencychanges)
@property
def conditionals(self) -> Dict[int, str]:
"""Get conditionals."""
if self._conditionals is None:
self._build_waveform()
return self._trim(self._conditionals)
@property
def snapshots(self) -> Dict[int, Snapshot]:
"""Get snapshots."""
if self._snapshots is None:
self._build_waveform()
return self._trim(self._snapshots)
@property
def labels(self) -> Dict[int, Union[Waveform, Acquire]]:
"""Get labels."""
if self._labels is None:
self._build_waveform()
return self._trim(self._labels)
def is_empty(self) -> bool:
"""Return if pulse is empty.
Returns:
bool: if the channel has nothing to plot
"""
if (any(self.waveform) or self.framechanges or self.setphase or
self.conditionals or self.snapshots):
return False
return True
def to_table(self, name: str) -> List[Tuple[int, str, str]]:
"""Get table contains.
Args:
name (str): name of channel
Returns:
A list of events in the channel
"""
time_event = []
framechanges = self.framechanges
setphase = self.setphase
conditionals = self.conditionals
snapshots = self.snapshots
frequencychanges = self.frequencychanges
for key, val in framechanges.items():
data_str = 'shift phase: %.2f' % val
time_event.append((key, name, data_str))
for key, val in setphase.items():
data_str = 'set phase: %.2f' % val
time_event.append((key, name, data_str))
for key, val in conditionals.items():
data_str = 'conditional, %s' % val
time_event.append((key, name, data_str))
for key, val in snapshots.items():
data_str = 'snapshot: %s' % val
time_event.append((key, name, data_str))
for key, val in frequencychanges.items():
data_str = 'frequency: %.4e' % val
time_event.append((key, name, data_str))
return time_event
def _build_waveform(self):
"""Create waveform from stored pulses.
"""
self._framechanges = {}
self._setphase = {}
self._frequencychanges = {}
self._conditionals = {}
self._snapshots = {}
self._labels = {}
fc = 0
pv = np.zeros(self.tf + 1, dtype=np.complex128)
wf = np.zeros(self.tf + 1, dtype=np.complex128)
last_pv = None
for time, commands in sorted(self.pulses.items()):
if time > self.tf:
break
tmp_fc = 0
tmp_set_phase = 0
tmp_sf = None
for command in commands:
if isinstance(command, (FrameChange, ShiftPhase)):
tmp_fc += command.phase
pv[time:] = 0
elif isinstance(command, SetPhase):
tmp_set_phase = command.phase
pv[time:] = 0
elif isinstance(command, SetFrequency):
tmp_sf = command.frequency
elif isinstance(command, ShiftFrequency):
tmp_sf = command.frequency
elif isinstance(command, Snapshot):
self._snapshots[time] = command.name
if tmp_fc != 0:
self._framechanges[time] = tmp_fc
fc += tmp_fc
if tmp_set_phase != 0:
self._setphase[time] = tmp_set_phase
fc = tmp_set_phase
if tmp_sf is not None:
self._frequencychanges[time] = tmp_sf
for command in commands:
if isinstance(command, PersistentValue):
pv[time:] = np.exp(1j*fc) * command.value
last_pv = (time, command)
break
for command in commands:
duration = command.duration
tf = min(time + duration, self.tf)
if isinstance(command, ParametricPulse):
command = command.get_sample_pulse()
if isinstance(command, (Waveform, SamplePulse)):
wf[time:tf] = np.exp(1j*fc) * command.samples[:tf-time]
pv[time:] = 0
self._labels[time] = (tf, command)
if last_pv is not None:
pv_cmd = last_pv[1]
self._labels[last_pv[0]] = (time, pv_cmd)
last_pv = None
elif isinstance(command, Acquire):
wf[time:tf] = np.ones(tf - time)
self._labels[time] = (tf, command)
self._waveform = wf + pv
def _trim(self, events: Dict[int, Any]) -> Dict[int, Any]:
"""Return events during given `time_range`.
Args:
events: time and operation of events.
Returns:
Events within the specified time range.
"""
events_in_time_range = {}
for k, v in events.items():
if self.t0 <= k <= self.tf:
events_in_time_range[k] = v
return events_in_time_range
class SamplePulseDrawer:
"""A class to create figure for sample pulse."""
def __init__(self, style: PulseStyle):
"""Create new figure.
Args:
style: Style sheet for pulse visualization.
"""
self.style = style or PulseStyle()
def draw(self, pulse: Waveform,
dt: float = 1.0,
interp_method: Callable = None,
scale: float = 1, scaling: float = None):
"""Draw figure.
Args:
pulse: Waveform to draw.
dt: time interval.
interp_method: interpolation function.
scale: Relative visual scaling of waveform amplitudes.
scaling: Deprecated, see `scale`.
Returns:
matplotlib.figure.Figure: A matplotlib figure object of the pulse envelope.
"""
if scaling is not None:
warnings.warn('The parameter "scaling" is being replaced by "scale"',
DeprecationWarning, 3)
scale = scaling
# If these self.style.dpi or self.style.figsize are None, they will
# revert back to their default rcParam keys.
figure = plt.figure(dpi=self.style.dpi, figsize=self.style.figsize)
interp_method = interp_method or step_wise
ax = figure.add_subplot(111)
ax.set_facecolor(self.style.bg_color)
samples = pulse.samples
time = np.arange(0, len(samples) + 1, dtype=float) * dt
time, re, im = interp_method(time, samples, self.style.num_points)
# plot
ax.fill_between(x=time, y1=re, y2=np.zeros_like(time),
facecolor=self.style.wave_color[0], alpha=0.3,
edgecolor=self.style.wave_color[0], linewidth=1.5,
label='real part')
ax.fill_between(x=time, y1=im, y2=np.zeros_like(time),
facecolor=self.style.wave_color[1], alpha=0.3,
edgecolor=self.style.wave_color[1], linewidth=1.5,
label='imaginary part')
ax.set_xlim(0, pulse.duration * dt)
if scale:
ax.set_ylim(-1/scale, 1/scale)
else:
v_max = max(max(np.abs(re)), max(np.abs(im)))
ax.set_ylim(-1.2 * v_max, 1.2 * v_max)
bbox = ax.get_position()
# This check is here for backwards compatibility. Before, the check was around
# the suptitle line, however since the font style can take on a type of None
# we need to unfortunately check both the type and the value of the object.
if isinstance(self.style.title_font_size, int) and self.style.title_font_size > 0:
figure.suptitle(str(pulse.name),
fontsize=self.style.title_font_size,
y=bbox.y1 + 0.02,
va='bottom')
return figure
class ScheduleDrawer:
"""A class to create figure for schedule and channel."""
def __init__(self, style: SchedStyle):
"""Create new figure.
Args:
style: Style sheet for pulse schedule visualization.
"""
self.style = style or SchedStyle()
def _build_channels(self, schedule: ScheduleComponent,
channels: List[Channel],
t0: int, tf: int,
show_framechange_channels: bool = True
) -> Tuple[Dict[Channel, EventsOutputChannels],
Dict[Channel, EventsOutputChannels],
Dict[Channel, EventsOutputChannels]]:
"""Create event table of each pulse channels in the given schedule.
Args:
schedule: Schedule object to plot.
channels: Channels to plot.
t0: Start time of plot.
tf: End time of plot.
show_framechange_channels: Plot channels only with FrameChanges.
Returns:
channels: All channels.
output_channels: All (D, M, U, A) channels.
snapshot_channels: Snapshots.
"""
# prepare waveform channels
drive_channels = collections.OrderedDict()
measure_channels = collections.OrderedDict()
control_channels = collections.OrderedDict()
acquire_channels = collections.OrderedDict()
snapshot_channels = collections.OrderedDict()
_channels = set()
if show_framechange_channels:
_channels.update(schedule.channels)
# take channels that do not only contain framechanges
else:
for start_time, instruction in schedule.instructions:
if not isinstance(instruction, (FrameChangeInstruction, ShiftPhase, SetPhase)):
_channels.update(instruction.channels)
_channels.update(channels)
for chan in _channels:
if isinstance(chan, DriveChannel):
try:
drive_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, MeasureChannel):
try:
measure_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, ControlChannel):
try:
control_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, AcquireChannel):
try:
acquire_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, SnapshotChannel):
try:
snapshot_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
output_channels = {**drive_channels, **measure_channels,
**control_channels, **acquire_channels}
channels = {**output_channels, **snapshot_channels}
# sort by index then name to group qubits together.
output_channels = collections.OrderedDict(sorted(output_channels.items(),
key=lambda x: (x[0].index, x[0].name)))
channels = collections.OrderedDict(sorted(channels.items(),
key=lambda x: (x[0].index, x[0].name)))
for start_time, instruction in schedule.instructions:
for channel in instruction.channels:
if channel in output_channels:
output_channels[channel].add_instruction(start_time, instruction)
elif channel in snapshot_channels:
snapshot_channels[channel].add_instruction(start_time, instruction)
return channels, output_channels, snapshot_channels
@staticmethod
def _scale_channels(output_channels: Dict[Channel, EventsOutputChannels],
scale: float,
channel_scales: Dict[Channel, float] = None,
channels: List[Channel] = None,
plot_all: bool = False) -> Dict[Channel, float]:
"""Count number of channels that contains any instruction to show
and find scale factor of that channel.
Args:
output_channels: Event table of channels to show.
scale: Global scale factor.
channel_scales: Channel specific scale factors.
channels: Specified channels to plot.
plot_all: Plot empty channel.
Returns:
scale_dict: Scale factor of each channel.
"""
# count numbers of valid waveform
scale_dict = {chan: 0 for chan in output_channels.keys()}
for channel, events in output_channels.items():
v_max = 0
if channels:
if channel in channels:
waveform = events.waveform
v_max = max(v_max,
max(np.abs(np.real(waveform))),
max(np.abs(np.imag(waveform))))
events.enable = True
else:
if not events.is_empty() or plot_all:
waveform = events.waveform
v_max = max(v_max,
max(np.abs(np.real(waveform))),
max(np.abs(np.imag(waveform))))
events.enable = True
scale_val = channel_scales.get(channel, scale)
if not scale_val:
# when input schedule is empty or comprises only frame changes,
# we need to overwrite maximum amplitude by a value greater than zero,
# otherwise auto axis scaling will fail with zero division.
v_max = v_max or 1
scale_dict[channel] = 1 / v_max
else:
scale_dict[channel] = scale_val
return scale_dict
def _draw_table(self, figure,
channels: Dict[Channel, EventsOutputChannels],
dt: float):
"""Draw event table if events exist.
Args:
figure (matpotlib.figure.Figure): Figure object
channels: Dictionary of channel and event table
dt: Time interval
Returns:
Tuple[matplotlib.axes.Axes]: Axis objects for table and canvas of pulses.
"""
# create table
table_data = []
if self.style.use_table:
for channel, events in channels.items():
if events.enable:
table_data.extend(events.to_table(channel.name))
table_data = sorted(table_data, key=lambda x: x[0])
# plot table
if table_data:
# table area size
ncols = self.style.table_columns
nrows = int(np.ceil(len(table_data)/ncols))
max_size = self.style.max_table_ratio * figure.get_size_inches()[1]
max_rows = np.floor(max_size/self.style.fig_unit_h_table/ncols)
nrows = int(min(nrows, max_rows))
# don't overflow plot with table data
table_data = table_data[:int(nrows*ncols)]
# fig size
h_table = nrows * self.style.fig_unit_h_table
h_waves = (figure.get_size_inches()[1] - h_table)
# create subplots
gs = gridspec.GridSpec(2, 1, height_ratios=[h_table, h_waves], hspace=0)
tb = plt.subplot(gs[0])
ax = plt.subplot(gs[1])
# configure each cell
tb.axis('off')
cell_value = [['' for _kk in range(ncols * 3)] for _jj in range(nrows)]
cell_color = [self.style.table_color * ncols for _jj in range(nrows)]
cell_width = [*([0.2, 0.2, 0.5] * ncols)]
for ii, data in enumerate(table_data):
# pylint: disable=unbalanced-tuple-unpacking
r, c = np.unravel_index(ii, (nrows, ncols), order='f')
# pylint: enable=unbalanced-tuple-unpacking
time, ch_name, data_str = data
# item
cell_value[r][3 * c + 0] = 't = %s' % time * dt
cell_value[r][3 * c + 1] = 'ch %s' % ch_name
cell_value[r][3 * c + 2] = data_str
table = tb.table(cellText=cell_value,
cellLoc='left',
rowLoc='center',
colWidths=cell_width,
bbox=[0, 0, 1, 1],
cellColours=cell_color)
table.auto_set_font_size(False)
table.set_fontsize = self.style.table_font_size
else:
tb = None
ax = figure.add_subplot(111)
return tb, ax
@staticmethod
def _draw_snapshots(ax,
snapshot_channels: Dict[Channel, EventsOutputChannels],
y0: float) -> None:
"""Draw snapshots to given mpl axis.
Args:
ax (matplotlib.axes.Axes): axis object to draw snapshots.
snapshot_channels: Event table of snapshots.
y0: vertical position to draw the snapshots.
"""
for events in snapshot_channels.values():
snapshots = events.snapshots
if snapshots:
for time in snapshots:
ax.annotate(s=u"\u25D8", xy=(time, y0), xytext=(time, y0+0.08),
arrowprops={'arrowstyle': 'wedge'}, ha='center')
def _draw_framechanges(self, ax,
fcs: Dict[int, FrameChangeInstruction],
y0: float) -> bool:
"""Draw frame change of given channel to given mpl axis.
Args:
ax (matplotlib.axes.Axes): axis object to draw frame changes.
fcs: Event table of frame changes.
y0: vertical position to draw the frame changes.
"""
for time in fcs.keys():
ax.text(x=time, y=y0, s=r'$\circlearrowleft$',
fontsize=self.style.icon_font_size,
ha='center', va='center')
def _draw_frequency_changes(self, ax,
sf: Dict[int, SetFrequency],
y0: float) -> bool:
"""Draw set frequency of given channel to given mpl axis.
Args:
ax (matplotlib.axes.Axes): axis object to draw frame changes.
sf: Event table of set frequency.
y0: vertical position to draw the frame changes.
"""
for time in sf.keys():
ax.text(x=time, y=y0, s=r'$\leftrightsquigarrow$',
fontsize=self.style.icon_font_size,
ha='center', va='center', rotation=90)
def _get_channel_color(self, channel: Channel) -> str:
"""Lookup table for waveform color.
Args:
channel: Type of channel.
Return:
Color code or name of color.
"""
# choose color
if isinstance(channel, DriveChannel):
color = self.style.d_ch_color
elif isinstance(channel, ControlChannel):
color = self.style.u_ch_color
elif isinstance(channel, MeasureChannel):
color = self.style.m_ch_color
elif isinstance(channel, AcquireChannel):
color = self.style.a_ch_color
else:
color = 'black'
return color
@staticmethod
def _prev_label_at_time(prev_labels: List[Dict[int, Union[Waveform, Acquire]]],
time: int) -> bool:
"""Check overlap of pulses with pervious channels.
Args:
prev_labels: List of labels in previous channels.
time: Start time of current pulse instruction.
Returns:
`True` if current instruction overlaps with others.
"""
for labels in prev_labels:
for t0, (tf, _) in labels.items():
if time in (t0, tf):
return True
return False
def _draw_labels(self, ax,
labels: Dict[int, Union[Waveform, Acquire]],
prev_labels: List[Dict[int, Union[Waveform, Acquire]]],
y0: float) -> None:
"""Draw label of pulse instructions on given mpl axis.
Args:
ax (matplotlib.axes.Axes): axis object to draw labels.
labels: Pulse labels of channel.
prev_labels: Pulse labels of previous channels.
y0: vertical position to draw the labels.
"""
for t0, (tf, cmd) in labels.items():
if isinstance(cmd, PersistentValue):
name = cmd.name if cmd.name else 'pv'
elif isinstance(cmd, Acquire):
name = cmd.name if cmd.name else 'acquire'
else:
name = cmd.name
ax.annotate(r'%s' % name,
xy=((t0+tf)//2, y0),
xytext=((t0+tf)//2, y0-0.07),
fontsize=self.style.label_font_size,
ha='center', va='center')
linestyle = self.style.label_ch_linestyle
alpha = self.style.label_ch_alpha
color = self.style.label_ch_color
if not self._prev_label_at_time(prev_labels, t0):
ax.axvline(t0, -1, 1, color=color,
linestyle=linestyle, alpha=alpha)
if not (self._prev_label_at_time(prev_labels, tf) or tf in labels):
ax.axvline(tf, -1, 1, color=color,
linestyle=linestyle, alpha=alpha)
def _draw_channels(self, ax,
output_channels: Dict[Channel, EventsOutputChannels],
interp_method: Callable,
t0: int, tf: int,
scale_dict: Dict[Channel, float],
label: bool = False,
framechange: bool = True,
frequencychange: bool = True) -> float:
"""Draw pulse instructions on given mpl axis.
Args:
ax (matplotlib.axes.Axes): axis object to draw pulses.
output_channels: Event table of channels.
interp_method: Callback function for waveform interpolation.
t0: Start time of schedule.
tf: End time of schedule.
scale_dict: Scale factor for each channel.
label: When set `True` draw labels.
framechange: When set `True` draw frame change symbols.
frequencychange: When set `True` draw frequency change symbols.
Return:
Value of final vertical axis of canvas.
"""
y0 = 0
prev_labels = []
for channel, events in output_channels.items():
if events.enable:
# scaling value of this channel
scale = 0.5 * scale_dict.get(channel, 0.5)
# plot waveform
waveform = events.waveform
time = np.arange(t0, tf + 1, dtype=float)
if waveform.any():
time, re, im = interp_method(time, waveform, self.style.num_points)
else:
# when input schedule is empty or comprises only frame changes,
# we should avoid interpolation due to lack of data points.
# instead, it just returns vector of zero.
re, im = np.zeros_like(time), np.zeros_like(time)
color = self._get_channel_color(channel)
# Minimum amplitude scaled
amp_min = scale * abs(min(0, np.nanmin(re), np.nanmin(im)))
# scaling and offset
re = scale * re + y0
im = scale * im + y0
offset = np.zeros_like(time) + y0
# plot
ax.fill_between(x=time, y1=re, y2=offset,
facecolor=color[0], alpha=0.3,
edgecolor=color[0], linewidth=1.5,
label='real part')
ax.fill_between(x=time, y1=im, y2=offset,
facecolor=color[1], alpha=0.3,
edgecolor=color[1], linewidth=1.5,
label='imaginary part')
ax.plot((t0, tf), (y0, y0), color='#000000', linewidth=1.0)
# plot frame changes
fcs = events.framechanges
if fcs and framechange:
self._draw_framechanges(ax, fcs, y0)
# plot frequency changes
sf = events.frequencychanges
if sf and frequencychange:
self._draw_frequency_changes(ax, sf, y0 + scale)
# plot labels
labels = events.labels
if labels and label:
self._draw_labels(ax, labels, prev_labels, y0)
prev_labels.append(labels)
else:
continue
# plot label
ax.text(x=t0, y=y0, s=channel.name,
fontsize=self.style.axis_font_size,
ha='right', va='center')
# show scaling factor
ax.text(x=t0, y=y0 - 0.1, s='x%.1f' % (2 * scale),
fontsize=0.7*self.style.axis_font_size,
ha='right', va='top')
# change the y0 offset for removing spacing when a channel has negative values
if self.style.remove_spacing:
y0 -= 0.5 + amp_min
else:
y0 -= 1
return y0
def draw(self, schedule: ScheduleComponent,
dt: float, interp_method: Callable,
plot_range: Tuple[Union[int, float], Union[int, float]],
scale: float = None,
channel_scales: Dict[Channel, float] = None,
plot_all: bool = True, table: bool = False,
label: bool = False, framechange: bool = True,
scaling: float = None, channels: List[Channel] = None,
show_framechange_channels: bool = True):
"""Draw figure.
Args:
schedule: schedule object to plot.
dt: Time interval of samples. Pulses are visualized in the unit of
cycle time if not provided.
interp_method: Interpolation function. See example.
Interpolation is disabled in default.
See `qiskit.visualization.pulse.interpolation` for more information.
plot_range: A tuple of time range to plot.
scale: Scaling of waveform amplitude. Pulses are automatically
scaled channel by channel if not provided.
channel_scales: Dictionary of scale factor for specific channels.
Scale of channels not specified here is overwritten by `scale`.
plot_all: When set `True` plot empty channels.
table: When set `True` draw event table for supported commands.
label: When set `True` draw label for individual instructions.
framechange: When set `True` draw framechange indicators.
scaling: Deprecated, see `scale`.
channels: A list of channel names to plot.
All non-empty channels are shown if not provided.
show_framechange_channels: When set `True` plot channels
with only framechange instructions.
Returns:
matplotlib.figure.Figure: A matplotlib figure object for the pulse envelope.
Raises:
VisualizationError: When schedule cannot be drawn
"""
if scaling is not None:
warnings.warn('The parameter "scaling" is being replaced by "scale"',
DeprecationWarning, 3)
scale = scaling
figure = plt.figure(dpi=self.style.dpi, figsize=self.style.figsize)
if channels is None:
channels = []
interp_method = interp_method or step_wise
if channel_scales is None:
channel_scales = {}
# setup plot range
if plot_range:
t0 = int(np.floor(plot_range[0]))
tf = int(np.floor(plot_range[1]))
else:
t0 = 0
# when input schedule is empty or comprises only frame changes,
# we need to overwrite pulse duration by an integer greater than zero,
# otherwise waveform returns empty array and matplotlib will be crashed.
if channels:
tf = schedule.ch_duration(*channels)
else:
tf = schedule.stop_time
tf = tf or 1
# prepare waveform channels
(schedule_channels, output_channels,
snapshot_channels) = self._build_channels(schedule, channels, t0, tf,
show_framechange_channels)
# count numbers of valid waveform
scale_dict = self._scale_channels(output_channels,
scale=scale,
channel_scales=channel_scales,
channels=channels,
plot_all=plot_all)
if table:
tb, ax = self._draw_table(figure, schedule_channels, dt)
else:
tb = None
ax = figure.add_subplot(111)
ax.set_facecolor(self.style.bg_color)
y0 = self._draw_channels(ax, output_channels, interp_method,
t0, tf, scale_dict, label=label,
framechange=framechange)
y_ub = 0.5 + self.style.vertical_span
y_lb = y0 + 0.5 - self.style.vertical_span
self._draw_snapshots(ax, snapshot_channels, y_lb)
ax.set_xlim(t0, tf)
tick_labels = np.linspace(t0, tf, 5)
ax.set_xticks(tick_labels)
ax.set_xticklabels([self.style.axis_formatter % label for label in tick_labels * dt],
fontsize=self.style.axis_font_size)
ax.set_ylim(y_lb, y_ub)
ax.set_yticklabels([])
if tb is not None:
bbox = tb.get_position()
else:
bbox = ax.get_position()
# This check is here for backwards compatibility. Before, the check was around
# the suptitle line, however since the font style can take on a type of None
# we need to unfortunately check both the type and the value of the object.
if isinstance(self.style.title_font_size, int) and self.style.title_font_size > 0:
figure.suptitle(str(schedule.name),
fontsize=self.style.title_font_size,
y=bbox.y1 + 0.02,
va='bottom')
return figure
| 39.13 | 96 | 0.548911 |
import collections
import warnings
from typing import Dict, List, Tuple, Callable, Union, Any
import numpy as np
try:
from matplotlib import pyplot as plt, gridspec
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
from qiskit.visualization.pulse.qcstyle import PulseStyle, SchedStyle
from qiskit.visualization.pulse.interpolation import step_wise
from qiskit.pulse.channels import (DriveChannel, ControlChannel,
MeasureChannel, AcquireChannel,
SnapshotChannel, Channel)
from qiskit.pulse.commands import FrameChangeInstruction
from qiskit.pulse import (Waveform, SamplePulse, FrameChange, PersistentValue, Snapshot, Play,
Acquire, PulseError, ParametricPulse, SetFrequency, ShiftPhase,
Instruction, ScheduleComponent, ShiftFrequency, SetPhase)
class EventsOutputChannels:
def __init__(self, t0: int, tf: int):
self.pulses = {}
self.t0 = t0
self.tf = tf
self._waveform = None
self._framechanges = None
self._setphase = None
self._frequencychanges = None
self._conditionals = None
self._snapshots = None
self._labels = None
self.enable = False
def add_instruction(self, start_time: int, instruction: Instruction):
if instruction.command is not None:
pulse = instruction.command
elif isinstance(instruction, Play):
pulse = instruction.pulse
else:
pulse = instruction
if start_time in self.pulses.keys():
self.pulses[start_time].append(pulse)
else:
self.pulses[start_time] = [pulse]
@property
def waveform(self) -> np.ndarray:
if self._waveform is None:
self._build_waveform()
return self._waveform[self.t0:self.tf]
@property
def framechanges(self) -> Dict[int, FrameChangeInstruction]:
if self._framechanges is None:
self._build_waveform()
return self._trim(self._framechanges)
@property
def setphase(self) -> Dict[int, SetPhase]:
if self._setphase is None:
self._build_waveform()
return self._trim(self._setphase)
@property
def frequencychanges(self) -> Dict[int, SetFrequency]:
if self._frequencychanges is None:
self._build_waveform()
return self._trim(self._frequencychanges)
@property
def frequencyshift(self) -> Dict[int, ShiftFrequency]:
if self._frequencychanges is None:
self._build_waveform()
return self._trim(self._frequencychanges)
@property
def conditionals(self) -> Dict[int, str]:
if self._conditionals is None:
self._build_waveform()
return self._trim(self._conditionals)
@property
def snapshots(self) -> Dict[int, Snapshot]:
if self._snapshots is None:
self._build_waveform()
return self._trim(self._snapshots)
@property
def labels(self) -> Dict[int, Union[Waveform, Acquire]]:
if self._labels is None:
self._build_waveform()
return self._trim(self._labels)
def is_empty(self) -> bool:
if (any(self.waveform) or self.framechanges or self.setphase or
self.conditionals or self.snapshots):
return False
return True
def to_table(self, name: str) -> List[Tuple[int, str, str]]:
time_event = []
framechanges = self.framechanges
setphase = self.setphase
conditionals = self.conditionals
snapshots = self.snapshots
frequencychanges = self.frequencychanges
for key, val in framechanges.items():
data_str = 'shift phase: %.2f' % val
time_event.append((key, name, data_str))
for key, val in setphase.items():
data_str = 'set phase: %.2f' % val
time_event.append((key, name, data_str))
for key, val in conditionals.items():
data_str = 'conditional, %s' % val
time_event.append((key, name, data_str))
for key, val in snapshots.items():
data_str = 'snapshot: %s' % val
time_event.append((key, name, data_str))
for key, val in frequencychanges.items():
data_str = 'frequency: %.4e' % val
time_event.append((key, name, data_str))
return time_event
def _build_waveform(self):
self._framechanges = {}
self._setphase = {}
self._frequencychanges = {}
self._conditionals = {}
self._snapshots = {}
self._labels = {}
fc = 0
pv = np.zeros(self.tf + 1, dtype=np.complex128)
wf = np.zeros(self.tf + 1, dtype=np.complex128)
last_pv = None
for time, commands in sorted(self.pulses.items()):
if time > self.tf:
break
tmp_fc = 0
tmp_set_phase = 0
tmp_sf = None
for command in commands:
if isinstance(command, (FrameChange, ShiftPhase)):
tmp_fc += command.phase
pv[time:] = 0
elif isinstance(command, SetPhase):
tmp_set_phase = command.phase
pv[time:] = 0
elif isinstance(command, SetFrequency):
tmp_sf = command.frequency
elif isinstance(command, ShiftFrequency):
tmp_sf = command.frequency
elif isinstance(command, Snapshot):
self._snapshots[time] = command.name
if tmp_fc != 0:
self._framechanges[time] = tmp_fc
fc += tmp_fc
if tmp_set_phase != 0:
self._setphase[time] = tmp_set_phase
fc = tmp_set_phase
if tmp_sf is not None:
self._frequencychanges[time] = tmp_sf
for command in commands:
if isinstance(command, PersistentValue):
pv[time:] = np.exp(1j*fc) * command.value
last_pv = (time, command)
break
for command in commands:
duration = command.duration
tf = min(time + duration, self.tf)
if isinstance(command, ParametricPulse):
command = command.get_sample_pulse()
if isinstance(command, (Waveform, SamplePulse)):
wf[time:tf] = np.exp(1j*fc) * command.samples[:tf-time]
pv[time:] = 0
self._labels[time] = (tf, command)
if last_pv is not None:
pv_cmd = last_pv[1]
self._labels[last_pv[0]] = (time, pv_cmd)
last_pv = None
elif isinstance(command, Acquire):
wf[time:tf] = np.ones(tf - time)
self._labels[time] = (tf, command)
self._waveform = wf + pv
def _trim(self, events: Dict[int, Any]) -> Dict[int, Any]:
events_in_time_range = {}
for k, v in events.items():
if self.t0 <= k <= self.tf:
events_in_time_range[k] = v
return events_in_time_range
class SamplePulseDrawer:
def __init__(self, style: PulseStyle):
self.style = style or PulseStyle()
def draw(self, pulse: Waveform,
dt: float = 1.0,
interp_method: Callable = None,
scale: float = 1, scaling: float = None):
if scaling is not None:
warnings.warn('The parameter "scaling" is being replaced by "scale"',
DeprecationWarning, 3)
scale = scaling
figure = plt.figure(dpi=self.style.dpi, figsize=self.style.figsize)
interp_method = interp_method or step_wise
ax = figure.add_subplot(111)
ax.set_facecolor(self.style.bg_color)
samples = pulse.samples
time = np.arange(0, len(samples) + 1, dtype=float) * dt
time, re, im = interp_method(time, samples, self.style.num_points)
ax.fill_between(x=time, y1=re, y2=np.zeros_like(time),
facecolor=self.style.wave_color[0], alpha=0.3,
edgecolor=self.style.wave_color[0], linewidth=1.5,
label='real part')
ax.fill_between(x=time, y1=im, y2=np.zeros_like(time),
facecolor=self.style.wave_color[1], alpha=0.3,
edgecolor=self.style.wave_color[1], linewidth=1.5,
label='imaginary part')
ax.set_xlim(0, pulse.duration * dt)
if scale:
ax.set_ylim(-1/scale, 1/scale)
else:
v_max = max(max(np.abs(re)), max(np.abs(im)))
ax.set_ylim(-1.2 * v_max, 1.2 * v_max)
bbox = ax.get_position()
if isinstance(self.style.title_font_size, int) and self.style.title_font_size > 0:
figure.suptitle(str(pulse.name),
fontsize=self.style.title_font_size,
y=bbox.y1 + 0.02,
va='bottom')
return figure
class ScheduleDrawer:
def __init__(self, style: SchedStyle):
self.style = style or SchedStyle()
def _build_channels(self, schedule: ScheduleComponent,
channels: List[Channel],
t0: int, tf: int,
show_framechange_channels: bool = True
) -> Tuple[Dict[Channel, EventsOutputChannels],
Dict[Channel, EventsOutputChannels],
Dict[Channel, EventsOutputChannels]]:
drive_channels = collections.OrderedDict()
measure_channels = collections.OrderedDict()
control_channels = collections.OrderedDict()
acquire_channels = collections.OrderedDict()
snapshot_channels = collections.OrderedDict()
_channels = set()
if show_framechange_channels:
_channels.update(schedule.channels)
else:
for start_time, instruction in schedule.instructions:
if not isinstance(instruction, (FrameChangeInstruction, ShiftPhase, SetPhase)):
_channels.update(instruction.channels)
_channels.update(channels)
for chan in _channels:
if isinstance(chan, DriveChannel):
try:
drive_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, MeasureChannel):
try:
measure_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, ControlChannel):
try:
control_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, AcquireChannel):
try:
acquire_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, SnapshotChannel):
try:
snapshot_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
output_channels = {**drive_channels, **measure_channels,
**control_channels, **acquire_channels}
channels = {**output_channels, **snapshot_channels}
output_channels = collections.OrderedDict(sorted(output_channels.items(),
key=lambda x: (x[0].index, x[0].name)))
channels = collections.OrderedDict(sorted(channels.items(),
key=lambda x: (x[0].index, x[0].name)))
for start_time, instruction in schedule.instructions:
for channel in instruction.channels:
if channel in output_channels:
output_channels[channel].add_instruction(start_time, instruction)
elif channel in snapshot_channels:
snapshot_channels[channel].add_instruction(start_time, instruction)
return channels, output_channels, snapshot_channels
@staticmethod
def _scale_channels(output_channels: Dict[Channel, EventsOutputChannels],
scale: float,
channel_scales: Dict[Channel, float] = None,
channels: List[Channel] = None,
plot_all: bool = False) -> Dict[Channel, float]:
scale_dict = {chan: 0 for chan in output_channels.keys()}
for channel, events in output_channels.items():
v_max = 0
if channels:
if channel in channels:
waveform = events.waveform
v_max = max(v_max,
max(np.abs(np.real(waveform))),
max(np.abs(np.imag(waveform))))
events.enable = True
else:
if not events.is_empty() or plot_all:
waveform = events.waveform
v_max = max(v_max,
max(np.abs(np.real(waveform))),
max(np.abs(np.imag(waveform))))
events.enable = True
scale_val = channel_scales.get(channel, scale)
if not scale_val:
v_max = v_max or 1
scale_dict[channel] = 1 / v_max
else:
scale_dict[channel] = scale_val
return scale_dict
def _draw_table(self, figure,
channels: Dict[Channel, EventsOutputChannels],
dt: float):
table_data = []
if self.style.use_table:
for channel, events in channels.items():
if events.enable:
table_data.extend(events.to_table(channel.name))
table_data = sorted(table_data, key=lambda x: x[0])
if table_data:
ncols = self.style.table_columns
nrows = int(np.ceil(len(table_data)/ncols))
max_size = self.style.max_table_ratio * figure.get_size_inches()[1]
max_rows = np.floor(max_size/self.style.fig_unit_h_table/ncols)
nrows = int(min(nrows, max_rows))
table_data = table_data[:int(nrows*ncols)]
# fig size
h_table = nrows * self.style.fig_unit_h_table
h_waves = (figure.get_size_inches()[1] - h_table)
# create subplots
gs = gridspec.GridSpec(2, 1, height_ratios=[h_table, h_waves], hspace=0)
tb = plt.subplot(gs[0])
ax = plt.subplot(gs[1])
# configure each cell
tb.axis('off')
cell_value = [['' for _kk in range(ncols * 3)] for _jj in range(nrows)]
cell_color = [self.style.table_color * ncols for _jj in range(nrows)]
cell_width = [*([0.2, 0.2, 0.5] * ncols)]
for ii, data in enumerate(table_data):
# pylint: disable=unbalanced-tuple-unpacking
r, c = np.unravel_index(ii, (nrows, ncols), order='f')
# pylint: enable=unbalanced-tuple-unpacking
time, ch_name, data_str = data
# item
cell_value[r][3 * c + 0] = 't = %s' % time * dt
cell_value[r][3 * c + 1] = 'ch %s' % ch_name
cell_value[r][3 * c + 2] = data_str
table = tb.table(cellText=cell_value,
cellLoc='left',
rowLoc='center',
colWidths=cell_width,
bbox=[0, 0, 1, 1],
cellColours=cell_color)
table.auto_set_font_size(False)
table.set_fontsize = self.style.table_font_size
else:
tb = None
ax = figure.add_subplot(111)
return tb, ax
@staticmethod
def _draw_snapshots(ax,
snapshot_channels: Dict[Channel, EventsOutputChannels],
y0: float) -> None:
for events in snapshot_channels.values():
snapshots = events.snapshots
if snapshots:
for time in snapshots:
ax.annotate(s=u"\u25D8", xy=(time, y0), xytext=(time, y0+0.08),
arrowprops={'arrowstyle': 'wedge'}, ha='center')
def _draw_framechanges(self, ax,
fcs: Dict[int, FrameChangeInstruction],
y0: float) -> bool:
for time in fcs.keys():
ax.text(x=time, y=y0, s=r'$\circlearrowleft$',
fontsize=self.style.icon_font_size,
ha='center', va='center')
def _draw_frequency_changes(self, ax,
sf: Dict[int, SetFrequency],
y0: float) -> bool:
for time in sf.keys():
ax.text(x=time, y=y0, s=r'$\leftrightsquigarrow$',
fontsize=self.style.icon_font_size,
ha='center', va='center', rotation=90)
def _get_channel_color(self, channel: Channel) -> str:
# choose color
if isinstance(channel, DriveChannel):
color = self.style.d_ch_color
elif isinstance(channel, ControlChannel):
color = self.style.u_ch_color
elif isinstance(channel, MeasureChannel):
color = self.style.m_ch_color
elif isinstance(channel, AcquireChannel):
color = self.style.a_ch_color
else:
color = 'black'
return color
@staticmethod
def _prev_label_at_time(prev_labels: List[Dict[int, Union[Waveform, Acquire]]],
time: int) -> bool:
for labels in prev_labels:
for t0, (tf, _) in labels.items():
if time in (t0, tf):
return True
return False
def _draw_labels(self, ax,
labels: Dict[int, Union[Waveform, Acquire]],
prev_labels: List[Dict[int, Union[Waveform, Acquire]]],
y0: float) -> None:
for t0, (tf, cmd) in labels.items():
if isinstance(cmd, PersistentValue):
name = cmd.name if cmd.name else 'pv'
elif isinstance(cmd, Acquire):
name = cmd.name if cmd.name else 'acquire'
else:
name = cmd.name
ax.annotate(r'%s' % name,
xy=((t0+tf)//2, y0),
xytext=((t0+tf)//2, y0-0.07),
fontsize=self.style.label_font_size,
ha='center', va='center')
linestyle = self.style.label_ch_linestyle
alpha = self.style.label_ch_alpha
color = self.style.label_ch_color
if not self._prev_label_at_time(prev_labels, t0):
ax.axvline(t0, -1, 1, color=color,
linestyle=linestyle, alpha=alpha)
if not (self._prev_label_at_time(prev_labels, tf) or tf in labels):
ax.axvline(tf, -1, 1, color=color,
linestyle=linestyle, alpha=alpha)
def _draw_channels(self, ax,
output_channels: Dict[Channel, EventsOutputChannels],
interp_method: Callable,
t0: int, tf: int,
scale_dict: Dict[Channel, float],
label: bool = False,
framechange: bool = True,
frequencychange: bool = True) -> float:
y0 = 0
prev_labels = []
for channel, events in output_channels.items():
if events.enable:
# scaling value of this channel
scale = 0.5 * scale_dict.get(channel, 0.5)
# plot waveform
waveform = events.waveform
time = np.arange(t0, tf + 1, dtype=float)
if waveform.any():
time, re, im = interp_method(time, waveform, self.style.num_points)
else:
# when input schedule is empty or comprises only frame changes,
# we should avoid interpolation due to lack of data points.
# instead, it just returns vector of zero.
re, im = np.zeros_like(time), np.zeros_like(time)
color = self._get_channel_color(channel)
# Minimum amplitude scaled
amp_min = scale * abs(min(0, np.nanmin(re), np.nanmin(im)))
# scaling and offset
re = scale * re + y0
im = scale * im + y0
offset = np.zeros_like(time) + y0
# plot
ax.fill_between(x=time, y1=re, y2=offset,
facecolor=color[0], alpha=0.3,
edgecolor=color[0], linewidth=1.5,
label='real part')
ax.fill_between(x=time, y1=im, y2=offset,
facecolor=color[1], alpha=0.3,
edgecolor=color[1], linewidth=1.5,
label='imaginary part')
ax.plot((t0, tf), (y0, y0), color='
# plot frame changes
fcs = events.framechanges
if fcs and framechange:
self._draw_framechanges(ax, fcs, y0)
# plot frequency changes
sf = events.frequencychanges
if sf and frequencychange:
self._draw_frequency_changes(ax, sf, y0 + scale)
# plot labels
labels = events.labels
if labels and label:
self._draw_labels(ax, labels, prev_labels, y0)
prev_labels.append(labels)
else:
continue
# plot label
ax.text(x=t0, y=y0, s=channel.name,
fontsize=self.style.axis_font_size,
ha='right', va='center')
# show scaling factor
ax.text(x=t0, y=y0 - 0.1, s='x%.1f' % (2 * scale),
fontsize=0.7*self.style.axis_font_size,
ha='right', va='top')
# change the y0 offset for removing spacing when a channel has negative values
if self.style.remove_spacing:
y0 -= 0.5 + amp_min
else:
y0 -= 1
return y0
def draw(self, schedule: ScheduleComponent,
dt: float, interp_method: Callable,
plot_range: Tuple[Union[int, float], Union[int, float]],
scale: float = None,
channel_scales: Dict[Channel, float] = None,
plot_all: bool = True, table: bool = False,
label: bool = False, framechange: bool = True,
scaling: float = None, channels: List[Channel] = None,
show_framechange_channels: bool = True):
if scaling is not None:
warnings.warn('The parameter "scaling" is being replaced by "scale"',
DeprecationWarning, 3)
scale = scaling
figure = plt.figure(dpi=self.style.dpi, figsize=self.style.figsize)
if channels is None:
channels = []
interp_method = interp_method or step_wise
if channel_scales is None:
channel_scales = {}
# setup plot range
if plot_range:
t0 = int(np.floor(plot_range[0]))
tf = int(np.floor(plot_range[1]))
else:
t0 = 0
# when input schedule is empty or comprises only frame changes,
# we need to overwrite pulse duration by an integer greater than zero,
# otherwise waveform returns empty array and matplotlib will be crashed.
if channels:
tf = schedule.ch_duration(*channels)
else:
tf = schedule.stop_time
tf = tf or 1
# prepare waveform channels
(schedule_channels, output_channels,
snapshot_channels) = self._build_channels(schedule, channels, t0, tf,
show_framechange_channels)
# count numbers of valid waveform
scale_dict = self._scale_channels(output_channels,
scale=scale,
channel_scales=channel_scales,
channels=channels,
plot_all=plot_all)
if table:
tb, ax = self._draw_table(figure, schedule_channels, dt)
else:
tb = None
ax = figure.add_subplot(111)
ax.set_facecolor(self.style.bg_color)
y0 = self._draw_channels(ax, output_channels, interp_method,
t0, tf, scale_dict, label=label,
framechange=framechange)
y_ub = 0.5 + self.style.vertical_span
y_lb = y0 + 0.5 - self.style.vertical_span
self._draw_snapshots(ax, snapshot_channels, y_lb)
ax.set_xlim(t0, tf)
tick_labels = np.linspace(t0, tf, 5)
ax.set_xticks(tick_labels)
ax.set_xticklabels([self.style.axis_formatter % label for label in tick_labels * dt],
fontsize=self.style.axis_font_size)
ax.set_ylim(y_lb, y_ub)
ax.set_yticklabels([])
if tb is not None:
bbox = tb.get_position()
else:
bbox = ax.get_position()
# This check is here for backwards compatibility. Before, the check was around
# the suptitle line, however since the font style can take on a type of None
# we need to unfortunately check both the type and the value of the object.
if isinstance(self.style.title_font_size, int) and self.style.title_font_size > 0:
figure.suptitle(str(schedule.name),
fontsize=self.style.title_font_size,
y=bbox.y1 + 0.02,
va='bottom')
return figure
| true | true |
f7305d3f7c5dd7b9094f0a20b4e9f5a957e94535 | 1,443 | py | Python | numpylint/lintbits.py | perimosocordiae/numpylint | 67e6c077b393760bffe59524ede1d4904476a1ce | [
"MIT"
] | null | null | null | numpylint/lintbits.py | perimosocordiae/numpylint | 67e6c077b393760bffe59524ede1d4904476a1ce | [
"MIT"
] | null | null | null | numpylint/lintbits.py | perimosocordiae/numpylint | 67e6c077b393760bffe59524ede1d4904476a1ce | [
"MIT"
] | null | null | null | import numpy as np
# Dict of all the patterns with their replacements.
# Structure:
# name of replacement -> list of (pattern, replacement, kwargs) tuples
LINTBITS = {
'diagonal matrix dot product': [
# diag(x).dot(y)
('${diag}(${x}).dot(${y})', '((${x}) * (${y}).T).T',
dict(diag='name=numpy.diag')),
# dot(diag(x), y)
('${dot}(${diag}(${x}), ${y})', '((${x}) * (${y}).T).T',
dict(diag='name=numpy.diag', dot='name=numpy.dot')),
# x.dot(diag(y))
('${x}.dot(${diag}(${y}))', '((${x}) * (${y}))',
dict(diag='name=numpy.diag')),
# dot(x, diag(y))
('${dot}(${x}, ${diag}(${y}))', '((${x}) * (${y}))',
dict(diag='name=numpy.diag', dot='name=numpy.dot')),
],
'inverting result of in1d': [
# ~np.in1d(x, y)
('~${in1d}(${x}, ${y})', '${in1d}(${x}, ${y}, invert=True)',
dict(in1d='name=numpy.in1d')),
# ~np.in1d(x, y, assume_unique=z)
('~${in1d}(${x}, ${y}, assume_unique=${z})',
'${in1d}(${x}, ${y}, assume_unique=${z}, invert=True)',
dict(in1d='name=numpy.in1d')),
],
}
if np.lib.NumpyVersion(np.__version__) < '1.3.0':
# this bug was fixed in numpy 1.3.0
LINTBITS['in-place transpose'] = [
# x += x.T
('${x} += ${x}.T', '${x} = ${x} + ${x}.T', dict()),
# x += x.transpose()
('${x} += ${x}.transpose()', '${x} = ${x} + ${x}.T', dict()),
]
| 35.195122 | 72 | 0.444213 | import numpy as np
LINTBITS = {
'diagonal matrix dot product': [
('${diag}(${x}).dot(${y})', '((${x}) * (${y}).T).T',
dict(diag='name=numpy.diag')),
('${dot}(${diag}(${x}), ${y})', '((${x}) * (${y}).T).T',
dict(diag='name=numpy.diag', dot='name=numpy.dot')),
('${x}.dot(${diag}(${y}))', '((${x}) * (${y}))',
dict(diag='name=numpy.diag')),
('${dot}(${x}, ${diag}(${y}))', '((${x}) * (${y}))',
dict(diag='name=numpy.diag', dot='name=numpy.dot')),
],
'inverting result of in1d': [
('~${in1d}(${x}, ${y})', '${in1d}(${x}, ${y}, invert=True)',
dict(in1d='name=numpy.in1d')),
('~${in1d}(${x}, ${y}, assume_unique=${z})',
'${in1d}(${x}, ${y}, assume_unique=${z}, invert=True)',
dict(in1d='name=numpy.in1d')),
],
}
if np.lib.NumpyVersion(np.__version__) < '1.3.0':
LINTBITS['in-place transpose'] = [
('${x} += ${x}.T', '${x} = ${x} + ${x}.T', dict()),
('${x} += ${x}.transpose()', '${x} = ${x} + ${x}.T', dict()),
]
| true | true |
f7305eb7c580daac5db72b2c4ac0c1258330b442 | 26,460 | py | Python | bert4keras/snippets.py | vecent-don/bert4keras | 3c31cbbf87d6574ddad038e4ea17a941ddd027dc | [
"Apache-2.0"
] | 1 | 2021-06-03T12:39:23.000Z | 2021-06-03T12:39:23.000Z | bert4keras/snippets.py | vecent-don/bert4keras | 3c31cbbf87d6574ddad038e4ea17a941ddd027dc | [
"Apache-2.0"
] | null | null | null | bert4keras/snippets.py | vecent-don/bert4keras | 3c31cbbf87d6574ddad038e4ea17a941ddd027dc | [
"Apache-2.0"
] | null | null | null | #! -*- coding: utf-8 -*-
# 代码合集
import os, sys, six, re, json
import logging
import numpy as np
from collections import defaultdict
from bert4keras.backend import K, keras, tf
_open_ = open
is_py2 = six.PY2
if not is_py2:
basestring = str
def to_array(*args):
"""批量转numpy的array
"""
results = [np.array(a) for a in args]
if len(args) == 1:
return results[0]
else:
return results
def is_string(s):
"""判断是否是字符串
"""
return isinstance(s, basestring)
def strQ2B(ustring):
"""全角符号转对应的半角符号
"""
rstring = ''
for uchar in ustring:
inside_code = ord(uchar)
# 全角空格直接转换
if inside_code == 12288:
inside_code = 32
# 全角字符(除空格)根据关系转化
elif (inside_code >= 65281 and inside_code <= 65374):
inside_code -= 65248
rstring += unichr(inside_code)
return rstring
def string_matching(s, keywords):
"""判断s是否至少包含keywords中的至少一个字符串
"""
for k in keywords:
if re.search(k, s):
return True
return False
def convert_to_unicode(text, encoding='utf-8', errors='ignore'):
"""字符串转换为unicode格式(假设输入为utf-8格式)
"""
if is_py2:
if isinstance(text, str):
text = text.decode(encoding, errors=errors)
else:
if isinstance(text, bytes):
text = text.decode(encoding, errors=errors)
return text
def convert_to_str(text, encoding='utf-8', errors='ignore'):
"""字符串转换为str格式(假设输入为utf-8格式)
"""
if is_py2:
if isinstance(text, unicode):
text = text.encode(encoding, errors=errors)
else:
if isinstance(text, bytes):
text = text.decode(encoding, errors=errors)
return text
class open:
"""模仿python自带的open函数
作用:1.主要是为了同时兼容py2和py3;2.增加了索引功能,方便读取大文件。
"""
def __init__(
self, name, mode='r', encoding=None, errors='strict', indexable=False
):
self.name = name
if is_py2:
self.file = _open_(name, mode)
else:
self.file = _open_(name, mode, encoding=encoding, errors=errors)
self.encoding = encoding
self.errors = errors
self.iterator = None
if indexable:
if is_string(indexable) and os.path.exists(indexable):
self.offsets = json.load(_open_(indexable))
else:
self.create_indexes()
if is_string(indexable):
json.dump(self.offsets, _open_(indexable, 'w'))
def create_indexes(self):
print('creating indexes ...')
self.offsets, offset = [], 0
pbar = keras.utils.Progbar(os.path.getsize(self.name))
while self.readline():
self.offsets.append(offset)
offset = self.tell()
pbar.update(offset)
self.seek(0)
print('indexes created.')
def __getitem__(self, key):
self.seek(self.offsets[key])
l = self.readline()
if self.encoding:
l = convert_to_unicode(l, self.encoding, self.errors)
return l
def __len__(self):
return len(self.offsets)
def __iter__(self):
if hasattr(self, 'offsets'):
for i in range(len(self)):
yield self[i]
else:
for l in self.file:
if self.encoding:
l = convert_to_unicode(l, self.encoding, self.errors)
yield l
def next(self):
if self.iterator is None:
self.iterator = self.__iter__()
return next(self.iterator)
def __next__(self):
return self.next()
def read(self):
text = self.file.read()
if self.encoding:
text = convert_to_unicode(text, self.encoding, self.errors)
return text
def readline(self):
text = self.file.readline()
if self.encoding:
text = convert_to_unicode(text, self.encoding, self.errors)
return text
def readlines(self):
if self.encoding:
return [
convert_to_unicode(text, self.encoding, self.errors)
for text in self.file.readlines()
]
else:
return self.file.readlines()
def write(self, text):
if self.encoding:
text = convert_to_str(text, self.encoding, self.errors)
self.file.write(text)
def flush(self):
self.file.flush()
def close(self):
self.file.close()
def tell(self):
return self.file.tell()
def seek(self, offset=0):
return self.file.seek(offset)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
def parallel_apply(
func,
iterable,
workers,
max_queue_size,
callback=None,
dummy=False,
random_seeds=True
):
"""多进程或多线程地将func应用到iterable的每个元素中。
注意这个apply是异步且无序的,也就是说依次输入a,b,c,但是
输出可能是func(c), func(a), func(b)。
参数:
callback: 处理单个输出的回调函数;
dummy: False是多进程/线性,True则是多线程/线性;
random_seeds: 每个进程的随机种子。
"""
if dummy:
from multiprocessing.dummy import Pool, Queue
else:
from multiprocessing import Pool, Queue
in_queue, out_queue, seed_queue = Queue(max_queue_size), Queue(), Queue()
if random_seeds is True:
random_seeds = [None] * workers
elif random_seeds is None or random_seeds is False:
random_seeds = []
for seed in random_seeds:
seed_queue.put(seed)
def worker_step(in_queue, out_queue):
"""单步函数包装成循环执行
"""
if not seed_queue.empty():
np.random.seed(seed_queue.get())
while True:
i, d = in_queue.get()
r = func(d)
out_queue.put((i, r))
# 启动多进程/线程
pool = Pool(workers, worker_step, (in_queue, out_queue))
if callback is None:
results = []
# 后处理函数
def process_out_queue():
out_count = 0
for _ in range(out_queue.qsize()):
i, d = out_queue.get()
out_count += 1
if callback is None:
results.append((i, d))
else:
callback(d)
return out_count
# 存入数据,取出结果
in_count, out_count = 0, 0
for i, d in enumerate(iterable):
in_count += 1
while True:
try:
in_queue.put((i, d), block=False)
break
except six.moves.queue.Full:
out_count += process_out_queue()
if in_count % max_queue_size == 0:
out_count += process_out_queue()
while out_count != in_count:
out_count += process_out_queue()
pool.terminate()
if callback is None:
results = sorted(results, key=lambda r: r[0])
return [r[1] for r in results]
def sequence_padding(inputs, length=None, value=0, seq_dims=1, mode='post'):
"""Numpy函数,将序列padding到同一长度
"""
if length is None:
length = np.max([np.shape(x)[:seq_dims] for x in inputs], axis=0)
elif not hasattr(length, '__getitem__'):
length = [length]
slices = [np.s_[:length[i]] for i in range(seq_dims)]
slices = tuple(slices) if len(slices) > 1 else slices[0]
pad_width = [(0, 0) for _ in np.shape(inputs[0])]
outputs = []
for x in inputs:
x = x[slices]
for i in range(seq_dims):
if mode == 'post':
pad_width[i] = (0, length[i] - np.shape(x)[i])
elif mode == 'pre':
pad_width[i] = (length[i] - np.shape(x)[i], 0)
else:
raise ValueError('"mode" argument must be "post" or "pre".')
x = np.pad(x, pad_width, 'constant', constant_values=value)
outputs.append(x)
return np.array(outputs)
def truncate_sequences(maxlen, indices, *sequences):
"""截断总长度至不超过maxlen
"""
sequences = [s for s in sequences if s]
if not isinstance(indices, (list, tuple)):
indices = [indices] * len(sequences)
while True:
lengths = [len(s) for s in sequences]
if sum(lengths) > maxlen:
i = np.argmax(lengths)
sequences[i].pop(indices[i])
else:
return sequences
def text_segmentate(text, maxlen, seps='\n', strips=None):
"""将文本按照标点符号划分为若干个短句
"""
text = text.strip().strip(strips)
if seps and len(text) > maxlen:
pieces = text.split(seps[0])
text, texts = '', []
for i, p in enumerate(pieces):
if text and p and len(text) + len(p) > maxlen - 1:
texts.extend(text_segmentate(text, maxlen, seps[1:], strips))
text = ''
if i + 1 == len(pieces):
text = text + p
else:
text = text + p + seps[0]
if text:
texts.extend(text_segmentate(text, maxlen, seps[1:], strips))
return texts
else:
return [text]
def is_one_of(x, ys):
"""判断x是否在ys之中
等价于x in ys,但有些情况下x in ys会报错
"""
for y in ys:
if x is y:
return True
return False
class DataGenerator(object):
"""数据生成器模版
"""
def __init__(self, data, batch_size=32, buffer_size=None):
self.data = data
self.batch_size = batch_size
if hasattr(self.data, '__len__'):
self.steps = len(self.data) // self.batch_size
if len(self.data) % self.batch_size != 0:
self.steps += 1
else:
self.steps = None
self.buffer_size = buffer_size or batch_size * 1000
def __len__(self):
return self.steps
def sample(self, random=False):
"""采样函数,每个样本同时返回一个is_end标记
"""
if random:
if self.steps is None:
def generator():
caches, isfull = [], False
for d in self.data:
caches.append(d)
if isfull:
i = np.random.randint(len(caches))
yield caches.pop(i)
elif len(caches) == self.buffer_size:
isfull = True
while caches:
i = np.random.randint(len(caches))
yield caches.pop(i)
else:
def generator():
for i in np.random.permutation(len(self.data)):
yield self.data[i]
data = generator()
else:
data = iter(self.data)
d_current = next(data)
for d_next in data:
yield False, d_current
d_current = d_next
yield True, d_current
def __iter__(self, random=False):
raise NotImplementedError
def forfit(self, random=True):
while True:
for d in self.__iter__(random):
yield d
def to_dataset(self, types, shapes, names=None, padded_batch=False):
"""转为tf.data.Dataset格式
如果传入names的话,自动把数据包装成dict形式。
"""
if names is None:
generator = self.forfit
else:
if is_string(names):
warps = lambda k, v: {k: v}
elif is_string(names[0]):
warps = lambda k, v: dict(zip(k, v))
else:
warps = lambda k, v: tuple(
dict(zip(i, j)) for i, j in zip(k, v)
)
def generator():
for d in self.forfit():
yield warps(names, d)
types = warps(names, types)
shapes = warps(names, shapes)
if padded_batch:
dataset = tf.data.Dataset.from_generator(
generator, output_types=types
)
dataset = dataset.padded_batch(self.batch_size, shapes)
else:
dataset = tf.data.Dataset.from_generator(
generator, output_types=types, output_shapes=shapes
)
dataset = dataset.batch(self.batch_size)
return dataset
class ViterbiDecoder(object):
"""Viterbi解码算法基类
"""
def __init__(self, trans, starts=None, ends=None):
self.trans = trans
self.num_labels = len(trans)
self.non_starts = []
self.non_ends = []
if starts is not None:
for i in range(self.num_labels):
if i not in starts:
self.non_starts.append(i)
if ends is not None:
for i in range(self.num_labels):
if i not in ends:
self.non_ends.append(i)
def decode(self, nodes):
"""nodes.shape=[seq_len, num_labels]
"""
# 预处理
nodes[0, self.non_starts] -= np.inf
nodes[-1, self.non_ends] -= np.inf
# 动态规划
labels = np.arange(self.num_labels).reshape((1, -1))
scores = nodes[0].reshape((-1, 1))
paths = labels
for l in range(1, len(nodes)):
M = scores + self.trans + nodes[l].reshape((1, -1))
idxs = M.argmax(0)
scores = M.max(0).reshape((-1, 1))
paths = np.concatenate([paths[:, idxs], labels], 0)
# 最优路径
return paths[:, scores[:, 0].argmax()]
def softmax(x, axis=-1):
"""numpy版softmax
"""
x = x - x.max(axis=axis, keepdims=True)
x = np.exp(x)
return x / x.sum(axis=axis, keepdims=True)
class AutoRegressiveDecoder(object):
"""通用自回归生成模型解码基类
包含beam search和random sample两种策略
"""
def __init__(self, start_id, end_id, maxlen, minlen=1):
self.start_id = start_id
self.end_id = end_id
self.maxlen = maxlen
self.minlen = minlen
self.models = {}
if start_id is None:
self.first_output_ids = np.empty((1, 0), dtype=int)
else:
self.first_output_ids = np.array([[self.start_id]])
@staticmethod
def wraps(default_rtype='probas', use_states=False):
"""用来进一步完善predict函数
目前包含:1. 设置rtype参数,并做相应处理;
2. 确定states的使用,并做相应处理;
3. 设置温度参数,并做相应处理。
"""
def actual_decorator(predict):
def new_predict(
self,
inputs,
output_ids,
states,
temperature=1,
rtype=default_rtype
):
assert rtype in ['probas', 'logits']
prediction = predict(self, inputs, output_ids, states)
if not use_states:
prediction = (prediction, None)
if default_rtype == 'logits':
prediction = (
softmax(prediction[0] / temperature), prediction[1]
)
elif temperature != 1:
probas = np.power(prediction[0], 1.0 / temperature)
probas = probas / probas.sum(axis=-1, keepdims=True)
prediction = (probas, prediction[1])
if rtype == 'probas':
return prediction
else:
return np.log(prediction[0] + 1e-12), prediction[1]
return new_predict
return actual_decorator
def last_token(self, model):
"""创建一个只返回最后一个token输出的新Model
"""
if model not in self.models:
outputs = [
keras.layers.Lambda(lambda x: x[:, -1])(output)
for output in model.outputs
]
self.models[model] = keras.models.Model(model.inputs, outputs)
return self.models[model]
def predict(self, inputs, output_ids, states=None):
"""用户需自定义递归预测函数
说明:定义的时候,需要用wraps方法进行装饰,传入default_rtype和use_states,
其中default_rtype为字符串logits或probas,probas时返回归一化的概率,
rtype=logits时则返回softmax前的结果或者概率对数。
返回:二元组 (得分或概率, states)
"""
raise NotImplementedError
def beam_search(self, inputs, topk, states=None, temperature=1, min_ends=1):
"""beam search解码
说明:这里的topk即beam size;
返回:最优解码序列。
"""
inputs = [np.array([i]) for i in inputs]
output_ids, output_scores = self.first_output_ids, np.zeros(1)
for step in range(self.maxlen):
scores, states = self.predict(
inputs, output_ids, states, temperature, 'logits'
) # 计算当前得分
if step == 0: # 第1步预测后将输入重复topk次
inputs = [np.repeat(i, topk, axis=0) for i in inputs]
scores = output_scores.reshape((-1, 1)) + scores # 综合累积得分
indices = scores.argpartition(-topk, axis=None)[-topk:] # 仅保留topk
indices_1 = indices // scores.shape[1] # 行索引
indices_2 = (indices % scores.shape[1]).reshape((-1, 1)) # 列索引
output_ids = np.concatenate([output_ids[indices_1], indices_2],
1) # 更新输出
output_scores = np.take_along_axis(
scores, indices, axis=None
) # 更新得分
end_counts = (output_ids == self.end_id).sum(1) # 统计出现的end标记
if output_ids.shape[1] >= self.minlen: # 最短长度判断
best_one = output_scores.argmax() # 得分最大的那个
if end_counts[best_one] == min_ends: # 如果已经终止
return output_ids[best_one] # 直接输出
else: # 否则,只保留未完成部分
flag = (end_counts < min_ends) # 标记未完成序列
if not flag.all(): # 如果有已完成的
inputs = [i[flag] for i in inputs] # 扔掉已完成序列
output_ids = output_ids[flag] # 扔掉已完成序列
output_scores = output_scores[flag] # 扔掉已完成序列
end_counts = end_counts[flag] # 扔掉已完成end计数
topk = flag.sum() # topk相应变化
# 达到长度直接输出
return output_ids[output_scores.argmax()]
def random_sample(
self,
inputs,
n,
topk=None,
topp=None,
states=None,
temperature=1,
min_ends=1
):
"""随机采样n个结果
说明:非None的topk表示每一步只从概率最高的topk个中采样;而非None的topp
表示每一步只从概率最高的且概率之和刚好达到topp的若干个token中采样。
返回:n个解码序列组成的list。
"""
inputs = [np.array([i]) for i in inputs]
output_ids = self.first_output_ids
results = []
for step in range(self.maxlen):
probas, states = self.predict(
inputs, output_ids, states, temperature, 'probas'
) # 计算当前概率
probas /= probas.sum(axis=1, keepdims=True) # 确保归一化
if step == 0: # 第1步预测后将结果重复n次
probas = np.repeat(probas, n, axis=0)
inputs = [np.repeat(i, n, axis=0) for i in inputs]
output_ids = np.repeat(output_ids, n, axis=0)
if topk is not None:
k_indices = probas.argpartition(-topk,
axis=1)[:, -topk:] # 仅保留topk
probas = np.take_along_axis(probas, k_indices, axis=1) # topk概率
probas /= probas.sum(axis=1, keepdims=True) # 重新归一化
if topp is not None:
p_indices = probas.argsort(axis=1)[:, ::-1] # 从高到低排序
probas = np.take_along_axis(probas, p_indices, axis=1) # 排序概率
cumsum_probas = np.cumsum(probas, axis=1) # 累积概率
flag = np.roll(cumsum_probas >= topp, 1, axis=1) # 标记超过topp的部分
flag[:, 0] = False # 结合上面的np.roll,实现平移一位的效果
probas[flag] = 0 # 后面的全部置零
probas /= probas.sum(axis=1, keepdims=True) # 重新归一化
sample_func = lambda p: np.random.choice(len(p), p=p) # 按概率采样函数
sample_ids = np.apply_along_axis(sample_func, 1, probas) # 执行采样
sample_ids = sample_ids.reshape((-1, 1)) # 对齐形状
if topp is not None:
sample_ids = np.take_along_axis(
p_indices, sample_ids, axis=1
) # 对齐原id
if topk is not None:
sample_ids = np.take_along_axis(
k_indices, sample_ids, axis=1
) # 对齐原id
output_ids = np.concatenate([output_ids, sample_ids], 1) # 更新输出
end_counts = (output_ids == self.end_id).sum(1) # 统计出现的end标记
if output_ids.shape[1] >= self.minlen: # 最短长度判断
flag = (end_counts == min_ends) # 标记已完成序列
if flag.any(): # 如果有已完成的
for ids in output_ids[flag]: # 存好已完成序列
results.append(ids)
flag = (flag == False) # 标记未完成序列
inputs = [i[flag] for i in inputs] # 只保留未完成部分输入
output_ids = output_ids[flag] # 只保留未完成部分候选集
end_counts = end_counts[flag] # 只保留未完成部分end计数
if len(output_ids) == 0:
break
# 如果还有未完成序列,直接放入结果
for ids in output_ids:
results.append(ids)
# 返回结果
return results
def insert_arguments(**arguments):
"""装饰器,为类方法增加参数
(主要用于类的__init__方法)
"""
def actual_decorator(func):
def new_func(self, *args, **kwargs):
for k, v in arguments.items():
if k in kwargs:
v = kwargs.pop(k)
setattr(self, k, v)
return func(self, *args, **kwargs)
return new_func
return actual_decorator
def delete_arguments(*arguments):
"""装饰器,为类方法删除参数
(主要用于类的__init__方法)
"""
def actual_decorator(func):
def new_func(self, *args, **kwargs):
for k in arguments:
if k in kwargs:
raise TypeError(
'%s got an unexpected keyword argument \'%s\'' %
(self.__class__.__name__, k)
)
return func(self, *args, **kwargs)
return new_func
return actual_decorator
def longest_common_substring(source, target):
"""最长公共子串(source和target的最长公共切片区间)
返回:子串长度, 所在区间(四元组)
注意:最长公共子串可能不止一个,所返回的区间只代表其中一个。
"""
c, l, span = defaultdict(int), 0, (0, 0, 0, 0)
for i, si in enumerate(source, 1):
for j, tj in enumerate(target, 1):
if si == tj:
c[i, j] = c[i - 1, j - 1] + 1
if c[i, j] > l:
l = c[i, j]
span = (i - l, i, j - l, j)
return l, span
def longest_common_subsequence(source, target):
"""最长公共子序列(source和target的最长非连续子序列)
返回:子序列长度, 映射关系(映射对组成的list)
注意:最长公共子序列可能不止一个,所返回的映射只代表其中一个。
"""
c = defaultdict(int)
for i, si in enumerate(source, 1):
for j, tj in enumerate(target, 1):
if si == tj:
c[i, j] = c[i - 1, j - 1] + 1
elif c[i, j - 1] > c[i - 1, j]:
c[i, j] = c[i, j - 1]
else:
c[i, j] = c[i - 1, j]
l, mapping = c[len(source), len(target)], []
i, j = len(source) - 1, len(target) - 1
while len(mapping) < l:
if source[i] == target[j]:
mapping.append((i, j))
i, j = i - 1, j - 1
elif c[i + 1, j] > c[i, j + 1]:
j = j - 1
else:
i = i - 1
return l, mapping[::-1]
class WebServing(object):
"""简单的Web接口
用法:
arguments = {'text': (None, True), 'n': (int, False)}
web = WebServing(port=8864)
web.route('/gen_synonyms', gen_synonyms, arguments)
web.start()
# 然后访问 http://127.0.0.1:8864/gen_synonyms?text=你好
说明:
基于bottlepy简单封装,仅作为临时测试使用,不保证性能。
目前仅保证支持 Tensorflow 1.x + Keras <= 2.3.1。
欢迎有经验的开发者帮忙改进。
依赖:
pip install bottle
pip install paste
(如果不用 server='paste' 的话,可以不装paste库)
"""
def __init__(self, host='0.0.0.0', port=8000, server='paste'):
import bottle
self.host = host
self.port = port
self.server = server
self.graph = tf.get_default_graph()
self.sess = K.get_session()
self.set_session = K.set_session
self.bottle = bottle
def wraps(self, func, arguments, method='GET'):
"""封装为接口函数
参数:
func:要转换为接口的函数,需要保证输出可以json化,即需要
保证 json.dumps(func(inputs)) 能被执行成功;
arguments:声明func所需参数,其中key为参数名,value[0]为
对应的转换函数(接口获取到的参数值都是字符串
型),value[1]为该参数是否必须;
method:GET或者POST。
"""
def new_func():
outputs = {'code': 0, 'desc': u'succeeded', 'data': {}}
kwargs = {}
for key, value in arguments.items():
if method == 'GET':
result = self.bottle.request.GET.getunicode(key)
else:
result = self.bottle.request.POST.getunicode(key)
if result is None:
if value[1]:
outputs['code'] = 1
outputs['desc'] = 'lack of "%s" argument' % key
return json.dumps(outputs, ensure_ascii=False)
else:
if value[0] is not None:
result = value[0](result)
kwargs[key] = result
try:
with self.graph.as_default():
self.set_session(self.sess)
outputs['data'] = func(**kwargs)
except Exception as e:
outputs['code'] = 2
outputs['desc'] = str(e)
return json.dumps(outputs, ensure_ascii=False)
return new_func
def route(self, path, func, arguments, method='GET'):
"""添加接口
"""
func = self.wraps(func, arguments, method)
self.bottle.route(path, method=method)(func)
def start(self):
"""启动服务
"""
self.bottle.run(host=self.host, port=self.port, server=self.server)
class Hook:
"""注入uniout模块,实现import时才触发
"""
def __init__(self, module):
self.module = module
def __getattr__(self, attr):
"""使得 from bert4keras.backend import uniout
等效于 import uniout (自动识别Python版本,Python3
下则无操作。)
"""
if attr == 'uniout':
if is_py2:
import uniout
else:
return getattr(self.module, attr)
Hook.__name__ = __name__
sys.modules[__name__] = Hook(sys.modules[__name__])
del Hook
| 30.875146 | 80 | 0.524339 |
import os, sys, six, re, json
import logging
import numpy as np
from collections import defaultdict
from bert4keras.backend import K, keras, tf
_open_ = open
is_py2 = six.PY2
if not is_py2:
basestring = str
def to_array(*args):
results = [np.array(a) for a in args]
if len(args) == 1:
return results[0]
else:
return results
def is_string(s):
return isinstance(s, basestring)
def strQ2B(ustring):
rstring = ''
for uchar in ustring:
inside_code = ord(uchar)
if inside_code == 12288:
inside_code = 32
elif (inside_code >= 65281 and inside_code <= 65374):
inside_code -= 65248
rstring += unichr(inside_code)
return rstring
def string_matching(s, keywords):
for k in keywords:
if re.search(k, s):
return True
return False
def convert_to_unicode(text, encoding='utf-8', errors='ignore'):
if is_py2:
if isinstance(text, str):
text = text.decode(encoding, errors=errors)
else:
if isinstance(text, bytes):
text = text.decode(encoding, errors=errors)
return text
def convert_to_str(text, encoding='utf-8', errors='ignore'):
if is_py2:
if isinstance(text, unicode):
text = text.encode(encoding, errors=errors)
else:
if isinstance(text, bytes):
text = text.decode(encoding, errors=errors)
return text
class open:
def __init__(
self, name, mode='r', encoding=None, errors='strict', indexable=False
):
self.name = name
if is_py2:
self.file = _open_(name, mode)
else:
self.file = _open_(name, mode, encoding=encoding, errors=errors)
self.encoding = encoding
self.errors = errors
self.iterator = None
if indexable:
if is_string(indexable) and os.path.exists(indexable):
self.offsets = json.load(_open_(indexable))
else:
self.create_indexes()
if is_string(indexable):
json.dump(self.offsets, _open_(indexable, 'w'))
def create_indexes(self):
print('creating indexes ...')
self.offsets, offset = [], 0
pbar = keras.utils.Progbar(os.path.getsize(self.name))
while self.readline():
self.offsets.append(offset)
offset = self.tell()
pbar.update(offset)
self.seek(0)
print('indexes created.')
def __getitem__(self, key):
self.seek(self.offsets[key])
l = self.readline()
if self.encoding:
l = convert_to_unicode(l, self.encoding, self.errors)
return l
def __len__(self):
return len(self.offsets)
def __iter__(self):
if hasattr(self, 'offsets'):
for i in range(len(self)):
yield self[i]
else:
for l in self.file:
if self.encoding:
l = convert_to_unicode(l, self.encoding, self.errors)
yield l
def next(self):
if self.iterator is None:
self.iterator = self.__iter__()
return next(self.iterator)
def __next__(self):
return self.next()
def read(self):
text = self.file.read()
if self.encoding:
text = convert_to_unicode(text, self.encoding, self.errors)
return text
def readline(self):
text = self.file.readline()
if self.encoding:
text = convert_to_unicode(text, self.encoding, self.errors)
return text
def readlines(self):
if self.encoding:
return [
convert_to_unicode(text, self.encoding, self.errors)
for text in self.file.readlines()
]
else:
return self.file.readlines()
def write(self, text):
if self.encoding:
text = convert_to_str(text, self.encoding, self.errors)
self.file.write(text)
def flush(self):
self.file.flush()
def close(self):
self.file.close()
def tell(self):
return self.file.tell()
def seek(self, offset=0):
return self.file.seek(offset)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
def parallel_apply(
func,
iterable,
workers,
max_queue_size,
callback=None,
dummy=False,
random_seeds=True
):
if dummy:
from multiprocessing.dummy import Pool, Queue
else:
from multiprocessing import Pool, Queue
in_queue, out_queue, seed_queue = Queue(max_queue_size), Queue(), Queue()
if random_seeds is True:
random_seeds = [None] * workers
elif random_seeds is None or random_seeds is False:
random_seeds = []
for seed in random_seeds:
seed_queue.put(seed)
def worker_step(in_queue, out_queue):
if not seed_queue.empty():
np.random.seed(seed_queue.get())
while True:
i, d = in_queue.get()
r = func(d)
out_queue.put((i, r))
pool = Pool(workers, worker_step, (in_queue, out_queue))
if callback is None:
results = []
def process_out_queue():
out_count = 0
for _ in range(out_queue.qsize()):
i, d = out_queue.get()
out_count += 1
if callback is None:
results.append((i, d))
else:
callback(d)
return out_count
in_count, out_count = 0, 0
for i, d in enumerate(iterable):
in_count += 1
while True:
try:
in_queue.put((i, d), block=False)
break
except six.moves.queue.Full:
out_count += process_out_queue()
if in_count % max_queue_size == 0:
out_count += process_out_queue()
while out_count != in_count:
out_count += process_out_queue()
pool.terminate()
if callback is None:
results = sorted(results, key=lambda r: r[0])
return [r[1] for r in results]
def sequence_padding(inputs, length=None, value=0, seq_dims=1, mode='post'):
if length is None:
length = np.max([np.shape(x)[:seq_dims] for x in inputs], axis=0)
elif not hasattr(length, '__getitem__'):
length = [length]
slices = [np.s_[:length[i]] for i in range(seq_dims)]
slices = tuple(slices) if len(slices) > 1 else slices[0]
pad_width = [(0, 0) for _ in np.shape(inputs[0])]
outputs = []
for x in inputs:
x = x[slices]
for i in range(seq_dims):
if mode == 'post':
pad_width[i] = (0, length[i] - np.shape(x)[i])
elif mode == 'pre':
pad_width[i] = (length[i] - np.shape(x)[i], 0)
else:
raise ValueError('"mode" argument must be "post" or "pre".')
x = np.pad(x, pad_width, 'constant', constant_values=value)
outputs.append(x)
return np.array(outputs)
def truncate_sequences(maxlen, indices, *sequences):
sequences = [s for s in sequences if s]
if not isinstance(indices, (list, tuple)):
indices = [indices] * len(sequences)
while True:
lengths = [len(s) for s in sequences]
if sum(lengths) > maxlen:
i = np.argmax(lengths)
sequences[i].pop(indices[i])
else:
return sequences
def text_segmentate(text, maxlen, seps='\n', strips=None):
text = text.strip().strip(strips)
if seps and len(text) > maxlen:
pieces = text.split(seps[0])
text, texts = '', []
for i, p in enumerate(pieces):
if text and p and len(text) + len(p) > maxlen - 1:
texts.extend(text_segmentate(text, maxlen, seps[1:], strips))
text = ''
if i + 1 == len(pieces):
text = text + p
else:
text = text + p + seps[0]
if text:
texts.extend(text_segmentate(text, maxlen, seps[1:], strips))
return texts
else:
return [text]
def is_one_of(x, ys):
for y in ys:
if x is y:
return True
return False
class DataGenerator(object):
def __init__(self, data, batch_size=32, buffer_size=None):
self.data = data
self.batch_size = batch_size
if hasattr(self.data, '__len__'):
self.steps = len(self.data) // self.batch_size
if len(self.data) % self.batch_size != 0:
self.steps += 1
else:
self.steps = None
self.buffer_size = buffer_size or batch_size * 1000
def __len__(self):
return self.steps
def sample(self, random=False):
if random:
if self.steps is None:
def generator():
caches, isfull = [], False
for d in self.data:
caches.append(d)
if isfull:
i = np.random.randint(len(caches))
yield caches.pop(i)
elif len(caches) == self.buffer_size:
isfull = True
while caches:
i = np.random.randint(len(caches))
yield caches.pop(i)
else:
def generator():
for i in np.random.permutation(len(self.data)):
yield self.data[i]
data = generator()
else:
data = iter(self.data)
d_current = next(data)
for d_next in data:
yield False, d_current
d_current = d_next
yield True, d_current
def __iter__(self, random=False):
raise NotImplementedError
def forfit(self, random=True):
while True:
for d in self.__iter__(random):
yield d
def to_dataset(self, types, shapes, names=None, padded_batch=False):
if names is None:
generator = self.forfit
else:
if is_string(names):
warps = lambda k, v: {k: v}
elif is_string(names[0]):
warps = lambda k, v: dict(zip(k, v))
else:
warps = lambda k, v: tuple(
dict(zip(i, j)) for i, j in zip(k, v)
)
def generator():
for d in self.forfit():
yield warps(names, d)
types = warps(names, types)
shapes = warps(names, shapes)
if padded_batch:
dataset = tf.data.Dataset.from_generator(
generator, output_types=types
)
dataset = dataset.padded_batch(self.batch_size, shapes)
else:
dataset = tf.data.Dataset.from_generator(
generator, output_types=types, output_shapes=shapes
)
dataset = dataset.batch(self.batch_size)
return dataset
class ViterbiDecoder(object):
def __init__(self, trans, starts=None, ends=None):
self.trans = trans
self.num_labels = len(trans)
self.non_starts = []
self.non_ends = []
if starts is not None:
for i in range(self.num_labels):
if i not in starts:
self.non_starts.append(i)
if ends is not None:
for i in range(self.num_labels):
if i not in ends:
self.non_ends.append(i)
def decode(self, nodes):
nodes[0, self.non_starts] -= np.inf
nodes[-1, self.non_ends] -= np.inf
labels = np.arange(self.num_labels).reshape((1, -1))
scores = nodes[0].reshape((-1, 1))
paths = labels
for l in range(1, len(nodes)):
M = scores + self.trans + nodes[l].reshape((1, -1))
idxs = M.argmax(0)
scores = M.max(0).reshape((-1, 1))
paths = np.concatenate([paths[:, idxs], labels], 0)
return paths[:, scores[:, 0].argmax()]
def softmax(x, axis=-1):
x = x - x.max(axis=axis, keepdims=True)
x = np.exp(x)
return x / x.sum(axis=axis, keepdims=True)
class AutoRegressiveDecoder(object):
def __init__(self, start_id, end_id, maxlen, minlen=1):
self.start_id = start_id
self.end_id = end_id
self.maxlen = maxlen
self.minlen = minlen
self.models = {}
if start_id is None:
self.first_output_ids = np.empty((1, 0), dtype=int)
else:
self.first_output_ids = np.array([[self.start_id]])
@staticmethod
def wraps(default_rtype='probas', use_states=False):
def actual_decorator(predict):
def new_predict(
self,
inputs,
output_ids,
states,
temperature=1,
rtype=default_rtype
):
assert rtype in ['probas', 'logits']
prediction = predict(self, inputs, output_ids, states)
if not use_states:
prediction = (prediction, None)
if default_rtype == 'logits':
prediction = (
softmax(prediction[0] / temperature), prediction[1]
)
elif temperature != 1:
probas = np.power(prediction[0], 1.0 / temperature)
probas = probas / probas.sum(axis=-1, keepdims=True)
prediction = (probas, prediction[1])
if rtype == 'probas':
return prediction
else:
return np.log(prediction[0] + 1e-12), prediction[1]
return new_predict
return actual_decorator
def last_token(self, model):
if model not in self.models:
outputs = [
keras.layers.Lambda(lambda x: x[:, -1])(output)
for output in model.outputs
]
self.models[model] = keras.models.Model(model.inputs, outputs)
return self.models[model]
def predict(self, inputs, output_ids, states=None):
raise NotImplementedError
def beam_search(self, inputs, topk, states=None, temperature=1, min_ends=1):
inputs = [np.array([i]) for i in inputs]
output_ids, output_scores = self.first_output_ids, np.zeros(1)
for step in range(self.maxlen):
scores, states = self.predict(
inputs, output_ids, states, temperature, 'logits'
)
if step == 0:
inputs = [np.repeat(i, topk, axis=0) for i in inputs]
scores = output_scores.reshape((-1, 1)) + scores
indices = scores.argpartition(-topk, axis=None)[-topk:]
indices_1 = indices // scores.shape[1]
indices_2 = (indices % scores.shape[1]).reshape((-1, 1))
output_ids = np.concatenate([output_ids[indices_1], indices_2],
1)
output_scores = np.take_along_axis(
scores, indices, axis=None
)
end_counts = (output_ids == self.end_id).sum(1)
if output_ids.shape[1] >= self.minlen:
best_one = output_scores.argmax()
if end_counts[best_one] == min_ends:
return output_ids[best_one]
else:
flag = (end_counts < min_ends)
if not flag.all():
inputs = [i[flag] for i in inputs]
output_ids = output_ids[flag]
output_scores = output_scores[flag]
end_counts = end_counts[flag]
topk = flag.sum()
return output_ids[output_scores.argmax()]
def random_sample(
self,
inputs,
n,
topk=None,
topp=None,
states=None,
temperature=1,
min_ends=1
):
inputs = [np.array([i]) for i in inputs]
output_ids = self.first_output_ids
results = []
for step in range(self.maxlen):
probas, states = self.predict(
inputs, output_ids, states, temperature, 'probas'
)
probas /= probas.sum(axis=1, keepdims=True)
if step == 0:
probas = np.repeat(probas, n, axis=0)
inputs = [np.repeat(i, n, axis=0) for i in inputs]
output_ids = np.repeat(output_ids, n, axis=0)
if topk is not None:
k_indices = probas.argpartition(-topk,
axis=1)[:, -topk:]
probas = np.take_along_axis(probas, k_indices, axis=1)
probas /= probas.sum(axis=1, keepdims=True)
if topp is not None:
p_indices = probas.argsort(axis=1)[:, ::-1]
probas = np.take_along_axis(probas, p_indices, axis=1)
cumsum_probas = np.cumsum(probas, axis=1)
flag = np.roll(cumsum_probas >= topp, 1, axis=1)
flag[:, 0] = False
probas[flag] = 0
probas /= probas.sum(axis=1, keepdims=True)
sample_func = lambda p: np.random.choice(len(p), p=p)
sample_ids = np.apply_along_axis(sample_func, 1, probas)
sample_ids = sample_ids.reshape((-1, 1))
if topp is not None:
sample_ids = np.take_along_axis(
p_indices, sample_ids, axis=1
)
if topk is not None:
sample_ids = np.take_along_axis(
k_indices, sample_ids, axis=1
)
output_ids = np.concatenate([output_ids, sample_ids], 1)
end_counts = (output_ids == self.end_id).sum(1)
if output_ids.shape[1] >= self.minlen:
flag = (end_counts == min_ends)
if flag.any():
for ids in output_ids[flag]:
results.append(ids)
flag = (flag == False)
inputs = [i[flag] for i in inputs]
output_ids = output_ids[flag]
end_counts = end_counts[flag]
if len(output_ids) == 0:
break
for ids in output_ids:
results.append(ids)
return results
def insert_arguments(**arguments):
def actual_decorator(func):
def new_func(self, *args, **kwargs):
for k, v in arguments.items():
if k in kwargs:
v = kwargs.pop(k)
setattr(self, k, v)
return func(self, *args, **kwargs)
return new_func
return actual_decorator
def delete_arguments(*arguments):
def actual_decorator(func):
def new_func(self, *args, **kwargs):
for k in arguments:
if k in kwargs:
raise TypeError(
'%s got an unexpected keyword argument \'%s\'' %
(self.__class__.__name__, k)
)
return func(self, *args, **kwargs)
return new_func
return actual_decorator
def longest_common_substring(source, target):
c, l, span = defaultdict(int), 0, (0, 0, 0, 0)
for i, si in enumerate(source, 1):
for j, tj in enumerate(target, 1):
if si == tj:
c[i, j] = c[i - 1, j - 1] + 1
if c[i, j] > l:
l = c[i, j]
span = (i - l, i, j - l, j)
return l, span
def longest_common_subsequence(source, target):
c = defaultdict(int)
for i, si in enumerate(source, 1):
for j, tj in enumerate(target, 1):
if si == tj:
c[i, j] = c[i - 1, j - 1] + 1
elif c[i, j - 1] > c[i - 1, j]:
c[i, j] = c[i, j - 1]
else:
c[i, j] = c[i - 1, j]
l, mapping = c[len(source), len(target)], []
i, j = len(source) - 1, len(target) - 1
while len(mapping) < l:
if source[i] == target[j]:
mapping.append((i, j))
i, j = i - 1, j - 1
elif c[i + 1, j] > c[i, j + 1]:
j = j - 1
else:
i = i - 1
return l, mapping[::-1]
class WebServing(object):
def __init__(self, host='0.0.0.0', port=8000, server='paste'):
import bottle
self.host = host
self.port = port
self.server = server
self.graph = tf.get_default_graph()
self.sess = K.get_session()
self.set_session = K.set_session
self.bottle = bottle
def wraps(self, func, arguments, method='GET'):
def new_func():
outputs = {'code': 0, 'desc': u'succeeded', 'data': {}}
kwargs = {}
for key, value in arguments.items():
if method == 'GET':
result = self.bottle.request.GET.getunicode(key)
else:
result = self.bottle.request.POST.getunicode(key)
if result is None:
if value[1]:
outputs['code'] = 1
outputs['desc'] = 'lack of "%s" argument' % key
return json.dumps(outputs, ensure_ascii=False)
else:
if value[0] is not None:
result = value[0](result)
kwargs[key] = result
try:
with self.graph.as_default():
self.set_session(self.sess)
outputs['data'] = func(**kwargs)
except Exception as e:
outputs['code'] = 2
outputs['desc'] = str(e)
return json.dumps(outputs, ensure_ascii=False)
return new_func
def route(self, path, func, arguments, method='GET'):
func = self.wraps(func, arguments, method)
self.bottle.route(path, method=method)(func)
def start(self):
self.bottle.run(host=self.host, port=self.port, server=self.server)
class Hook:
def __init__(self, module):
self.module = module
def __getattr__(self, attr):
if attr == 'uniout':
if is_py2:
import uniout
else:
return getattr(self.module, attr)
Hook.__name__ = __name__
sys.modules[__name__] = Hook(sys.modules[__name__])
del Hook
| true | true |
f7305f172f99aa4ee10baf90adde0cdb5a91636b | 432 | py | Python | altair/examples/__init__.py | jakevdp/altair2 | 46d391034c5b72867c9e4d01f3a7c7c536533add | [
"BSD-3-Clause"
] | 2 | 2018-02-03T05:35:52.000Z | 2018-02-05T21:00:18.000Z | altair/examples/__init__.py | jakevdp/altair2 | 46d391034c5b72867c9e4d01f3a7c7c536533add | [
"BSD-3-Clause"
] | null | null | null | altair/examples/__init__.py | jakevdp/altair2 | 46d391034c5b72867c9e4d01f3a7c7c536533add | [
"BSD-3-Clause"
] | null | null | null | import os
import json
def iter_example_names():
specdir = os.path.join(os.path.dirname(__file__), 'spec')
for spec in sorted(os.listdir(specdir)):
yield spec
def load_example(name):
filename = os.path.join(os.path.dirname(__file__), 'spec', name)
with open(filename, 'r') as f:
return json.load(f)
def iter_example_json():
for name in iter_example_names():
yield load_example(name)
| 21.6 | 68 | 0.671296 | import os
import json
def iter_example_names():
specdir = os.path.join(os.path.dirname(__file__), 'spec')
for spec in sorted(os.listdir(specdir)):
yield spec
def load_example(name):
filename = os.path.join(os.path.dirname(__file__), 'spec', name)
with open(filename, 'r') as f:
return json.load(f)
def iter_example_json():
for name in iter_example_names():
yield load_example(name)
| true | true |
f7305ff6dcb783b887e7c18ac02bfe8c87cfeacf | 112 | py | Python | vl/wsgi.py | verifid/vl | 39fb3056658fbc2360eb3d8bfcd74bdcfd12cc67 | [
"MIT"
] | 3 | 2019-06-30T21:09:05.000Z | 2021-05-09T17:56:19.000Z | vl/wsgi.py | verifid/vl | 39fb3056658fbc2360eb3d8bfcd74bdcfd12cc67 | [
"MIT"
] | null | null | null | vl/wsgi.py | verifid/vl | 39fb3056658fbc2360eb3d8bfcd74bdcfd12cc67 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from vl.app import app as application
if __name__ == "__main__":
application.run()
| 16 | 37 | 0.705357 |
from vl.app import app as application
if __name__ == "__main__":
application.run()
| true | true |
f73060075d4c066e5e8285761903993ce0cf2935 | 31,110 | py | Python | pymatgen/core/composition.py | rousseab/pymatgen | ecfba4a576a21f31c222be8fd20ce2ddaa77495a | [
"MIT"
] | 1 | 2015-05-18T14:31:20.000Z | 2015-05-18T14:31:20.000Z | pymatgen/core/composition.py | rousseab/pymatgen | ecfba4a576a21f31c222be8fd20ce2ddaa77495a | [
"MIT"
] | null | null | null | pymatgen/core/composition.py | rousseab/pymatgen | ecfba4a576a21f31c222be8fd20ce2ddaa77495a | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import division, unicode_literals
"""
This module implements a Composition class to represent compositions,
and a ChemicalPotential class to represent potentials.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Nov 10, 2012"
import collections
import numbers
import re
import string
import six
from six.moves import filter, map, zip
from fractions import Fraction
from functools import total_ordering
from monty.fractions import gcd
from pymatgen.core.periodic_table import get_el_sp, Element
from pymatgen.util.string_utils import formula_double_format
from pymatgen.serializers.json_coders import PMGSONable
from pymatgen.core.units import unitized
@total_ordering
class Composition(collections.Mapping, collections.Hashable, PMGSONable):
"""
Represents a Composition, which is essentially a {element:amount} mapping
type. Composition is written to be immutable and hashable,
unlike a standard Python dict.
Note that the key can be either an Element or a Specie. Elements and Specie
are treated differently. i.e., a Fe2+ is not the same as a Fe3+ Specie and
would be put in separate keys. This differentiation is deliberate to
support using Composition to determine the fraction of a particular Specie.
Works almost completely like a standard python dictionary, except that
__getitem__ is overridden to return 0 when an element is not found.
(somewhat like a defaultdict, except it is immutable).
Also adds more convenience methods relevant to compositions, e.g.,
get_fraction.
It should also be noted that many Composition related functionality takes
in a standard string as a convenient input. For example,
even though the internal representation of a Fe2O3 composition is
{Element("Fe"): 2, Element("O"): 3}, you can obtain the amount of Fe
simply by comp["Fe"] instead of the more verbose comp[Element("Fe")].
>>> comp = Composition("LiFePO4")
>>> comp.get_atomic_fraction(Element("Li"))
0.14285714285714285
>>> comp.num_atoms
7.0
>>> comp.reduced_formula
'LiFePO4'
>>> comp.formula
'Li1 Fe1 P1 O4'
>>> comp.get_wt_fraction(Element("Li"))
0.04399794666951898
>>> comp.num_atoms
7.0
"""
"""
Tolerance in distinguishing different composition amounts.
1e-8 is fairly tight, but should cut out most floating point arithmetic
errors.
"""
amount_tolerance = 1e-8
"""
Special formula handling for peroxides and certain elements. This is so
that formula output does not write LiO instead of Li2O2 for example.
"""
special_formulas = {"LiO": "Li2O2", "NaO": "Na2O2", "KO": "K2O2",
"HO": "H2O2", "CsO": "Cs2O2", "RbO": "Rb2O2",
"O": "O2", "N": "N2", "F": "F2", "Cl": "Cl2",
"H": "H2"}
def __init__(self, *args, **kwargs): #allow_negative=False
"""
Very flexible Composition construction, similar to the built-in Python
dict(). Also extended to allow simple string init.
Args:
Any form supported by the Python built-in dict() function.
1. A dict of either {Element/Specie: amount},
{string symbol:amount}, or {atomic number:amount} or any mixture
of these. E.g., {Element("Li"):2 ,Element("O"):1},
{"Li":2, "O":1}, {3:2, 8:1} all result in a Li2O composition.
2. Keyword arg initialization, similar to a dict, e.g.,
Composition(Li = 2, O = 1)
In addition, the Composition constructor also allows a single
string as an input formula. E.g., Composition("Li2O").
allow_negative: Whether to allow negative compositions. This
argument must be popped from the \*\*kwargs due to \*args
ambiguity.
"""
self.allow_negative = kwargs.pop('allow_negative', False)
# it's much faster to recognize a composition and use the elmap than
# to pass the composition to dict()
if len(args) == 1 and isinstance(args[0], Composition):
elmap = args[0]._elmap
elif len(args) == 1 and isinstance(args[0], six.string_types):
elmap = self._parse_formula(args[0])
else:
elmap = dict(*args, **kwargs)
self._elmap = {}
self._natoms = 0
for k, v in elmap.items():
if v < -Composition.amount_tolerance and not self.allow_negative:
raise CompositionError("Amounts in Composition cannot be "
"negative!")
if abs(v) >= Composition.amount_tolerance:
self._elmap[get_el_sp(k)] = v
self._natoms += abs(v)
def __getitem__(self, el):
"""
Get the amount for element.
"""
return self._elmap.get(get_el_sp(el), 0)
def __eq__(self, other):
# elements with amounts < Composition.amount_tolerance don't show up
# in the elmap, so checking len enables us to only check one
# compositions elements
if len(self) != len(other):
return False
for el, v in self._elmap.items():
if abs(v - other[el]) > Composition.amount_tolerance:
return False
return True
def __ge__(self, other):
"""
Defines >= for Compositions. Should ONLY be used for defining a sort
order (the behavior is probably not what you'd expect)
"""
for el in sorted(set(self.elements + other.elements)):
if other[el] - self[el] >= Composition.amount_tolerance:
return False
elif self[el] - other[el] >= Composition.amount_tolerance:
return True
return True
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
"""
Adds two compositions. For example, an Fe2O3 composition + an FeO
composition gives a Fe3O4 composition.
"""
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] += v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __sub__(self, other):
"""
Subtracts two compositions. For example, an Fe2O3 composition - an FeO
composition gives an FeO2 composition.
Raises:
CompositionError if the subtracted composition is greater than the
original composition in any of its elements, unless allow_negative
is True
"""
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] -= v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __mul__(self, other):
"""
Multiply a Composition by an integer or a float.
Fe2O3 * 4 -> Fe8O12
"""
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] * other for el in self},
allow_negative=self.allow_negative)
__rmul__ = __mul__
def __truediv__(self, other):
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] / other for el in self},
allow_negative=self.allow_negative)
__div__ = __truediv__
def __hash__(self):
"""
Minimally effective hash function that just distinguishes between
Compositions with different elements.
"""
hashcode = 0
for el in self._elmap.keys():
hashcode += el.Z
return hashcode
def __contains__(self, el):
return el in self._elmap
def __len__(self):
return len(self._elmap)
def __iter__(self):
return self._elmap.__iter__()
@property
def average_electroneg(self):
return sum((el.X * abs(amt) for el, amt in self._elmap.items())) / \
self.num_atoms
def almost_equals(self, other, rtol=0.1, atol=1e-8):
"""
Returns true if compositions are equal within a tolerance.
Args:
other (Composition): Other composition to check
rtol (float): Relative tolerance
atol (float): Absolute tolerance
"""
sps = set(self.elements + other.elements)
for sp in sps:
a = self[sp]
b = other[sp]
tol = atol + rtol * (abs(a) + abs(b)) / 2
if abs(b - a) > tol:
return False
return True
@property
def is_element(self):
"""
True if composition is for an element.
"""
return len(self._elmap) == 1
def copy(self):
return Composition(self._elmap, allow_negative=self.allow_negative)
@property
def formula(self):
"""
Returns a formula string, with elements sorted by electronegativity,
e.g., Li4 Fe4 P4 O16.
"""
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys(), key=lambda sym: get_el_sp(sym).X)
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def alphabetical_formula(self):
"""
Returns a formula string, with elements sorted by alphabetically
e.g., Fe4 Li4 O16 P4.
"""
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys())
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def element_composition(self):
"""
Returns the composition replacing any species by the corresponding
element.
"""
return Composition(self.get_el_amt_dict(),
allow_negative=self.allow_negative)
@property
def fractional_composition(self):
"""
Returns the normalized composition which the number of species sum to
1.
Returns:
Normalized composition which the number of species sum to 1.
"""
return self / self._natoms
@property
def reduced_composition(self):
"""
Returns the reduced composition,i.e. amounts normalized by greatest
common denominator. e.g., Composition("FePO4") for
Composition("Fe4P4O16").
"""
return self.get_reduced_composition_and_factor()[0]
def get_reduced_composition_and_factor(self):
"""
Calculates a reduced composition and factor.
Returns:
A normalized composition and a multiplicative factor, i.e.,
Li4Fe4P4O16 returns (Composition("LiFePO4"), 4).
"""
factor = self.get_reduced_formula_and_factor()[1]
return self / factor, factor
def get_reduced_formula_and_factor(self):
"""
Calculates a reduced formula and factor.
Returns:
A pretty normalized formula and a multiplicative factor, i.e.,
Li4Fe4P4O16 returns (LiFePO4, 4).
"""
all_int = all(x == int(x) for x in self._elmap.values())
if not all_int:
return self.formula.replace(" ", ""), 1
d = self.get_el_amt_dict()
(formula, factor) = reduce_formula(d)
if formula in Composition.special_formulas:
formula = Composition.special_formulas[formula]
factor /= 2
return formula, factor
def get_integer_formula_and_factor(self, max_denominator=10000):
"""
Calculates an integer formula and factor.
Args:
max_denominator (int): all amounts in the el:amt dict are
first converted to a Fraction with this maximum denominator
Returns:
A pretty normalized formula and a multiplicative factor, i.e.,
Li0.5O0.25 returns (Li2O, 0.25). O0.25 returns (O2, 0.125)
"""
mul = gcd(*[Fraction(v).limit_denominator(max_denominator) for v
in self.values()])
d = {k: round(v / mul) for k, v in self.get_el_amt_dict().items()}
(formula, factor) = reduce_formula(d)
if formula in Composition.special_formulas:
formula = Composition.special_formulas[formula]
factor /= 2
return formula, factor * mul
@property
def reduced_formula(self):
"""
Returns a pretty normalized formula, i.e., LiFePO4 instead of
Li4Fe4P4O16.
"""
return self.get_reduced_formula_and_factor()[0]
@property
def elements(self):
"""
Returns view of elements in Composition.
"""
return list(self._elmap.keys())
def __str__(self):
return " ".join([
"{}{}".format(k, formula_double_format(v, ignore_ones=False))
for k, v in self.as_dict().items()])
@property
def num_atoms(self):
"""
Total number of atoms in Composition. For negative amounts, sum
of absolute values
"""
return self._natoms
@property
@unitized("amu")
def weight(self):
"""
Total molecular weight of Composition
"""
return sum([amount * el.atomic_mass
for el, amount in self._elmap.items()])
def get_atomic_fraction(self, el):
"""
Calculate atomic fraction of an Element or Specie.
Args:
el (Element/Specie): Element or Specie to get fraction for.
Returns:
Atomic fraction for element el in Composition
"""
return abs(self[el]) / self._natoms
def get_wt_fraction(self, el):
"""
Calculate weight fraction of an Element or Specie.
Args:
el (Element/Specie): Element or Specie to get fraction for.
Returns:
Weight fraction for element el in Composition
"""
return get_el_sp(el).atomic_mass * abs(self[el]) / self.weight
def _parse_formula(self, formula):
"""
Args:
formula (str): A string formula, e.g. Fe2O3, Li3Fe2(PO4)3
Returns:
Composition with that formula.
"""
def get_sym_dict(f, factor):
sym_dict = collections.defaultdict(float)
for m in re.finditer(r"([A-Z][a-z]*)([-*\.\d]*)", f):
el = m.group(1)
amt = 1
if m.group(2).strip() != "":
amt = float(m.group(2))
sym_dict[el] += amt * factor
f = f.replace(m.group(), "", 1)
if f.strip():
raise CompositionError("{} is an invalid formula!".format(f))
return sym_dict
m = re.search(r"\(([^\(\)]+)\)([\.\d]*)", formula)
if m:
factor = 1
if m.group(2) != "":
factor = float(m.group(2))
unit_sym_dict = get_sym_dict(m.group(1), factor)
expanded_sym = "".join(["{}{}".format(el, amt)
for el, amt in unit_sym_dict.items()])
expanded_formula = formula.replace(m.group(), expanded_sym)
return self._parse_formula(expanded_formula)
return get_sym_dict(formula, 1)
@property
def anonymized_formula(self):
"""
An anonymized formula. Unique species are arranged in ordering of
increasing amounts and assigned ascending alphabets. Useful for
prototyping formulas. For example, all stoichiometric perovskites have
anonymized_formula ABC3.
"""
reduced = self.element_composition
if all(x == int(x) for x in self._elmap.values()):
reduced /= gcd(*self._elmap.values())
anon = ""
for e, amt in zip(string.ascii_uppercase, sorted(reduced.values())):
if amt == 1:
amt_str = ""
elif abs(amt % 1) < 1e-8:
amt_str = str(int(amt))
else:
amt_str = str(amt)
anon += ("{}{}".format(e, amt_str))
return anon
def __repr__(self):
return "Comp: " + self.formula
@classmethod
def from_dict(cls, d):
"""
Creates a composition from a dict generated by as_dict(). Strictly not
necessary given that the standard constructor already takes in such an
input, but this method preserves the standard pymatgen API of having
from_dict methods to reconstitute objects generated by as_dict(). Allows
for easier introspection.
Args:
d (dict): {symbol: amount} dict.
"""
return cls(d)
def get_el_amt_dict(self):
"""
Returns:
Dict with element symbol and (unreduced) amount e.g.,
{"Fe": 4.0, "O":6.0} or {"Fe3+": 4.0, "O2-":6.0}
"""
d = collections.defaultdict(float)
for e, a in self.items():
d[e.symbol] += a
return d
def as_dict(self):
"""
Returns:
dict with species symbol and (unreduced) amount e.g.,
{"Fe": 4.0, "O":6.0} or {"Fe3+": 4.0, "O2-":6.0}
"""
d = collections.defaultdict(float)
for e, a in self.items():
d[str(e)] += a
return d
@property
def to_reduced_dict(self):
"""
Returns:
Dict with element symbol and reduced amount e.g.,
{"Fe": 2.0, "O":3.0}
"""
c = Composition(self.reduced_formula)
return c.as_dict()
@property
def to_data_dict(self):
"""
Returns:
A dict with many keys and values relating to Composition/Formula,
including reduced_cell_composition, unit_cell_composition,
reduced_cell_formula, elements and nelements.
"""
return {"reduced_cell_composition": self.to_reduced_dict,
"unit_cell_composition": self.as_dict(),
"reduced_cell_formula": self.reduced_formula,
"elements": self.as_dict().keys(),
"nelements": len(self.as_dict().keys())}
@staticmethod
def ranked_compositions_from_indeterminate_formula(fuzzy_formula,
lock_if_strict=True):
"""
Takes in a formula where capitilization might not be correctly entered,
and suggests a ranked list of potential Composition matches.
Author: Anubhav Jain
Args:
fuzzy_formula (str): A formula string, such as "co2o3" or "MN",
that may or may not have multiple interpretations
lock_if_strict (bool): If true, a properly entered formula will
only return the one correct interpretation. For example,
"Co1" will only return "Co1" if true, but will return both
"Co1" and "C1 O1" if false.
Returns:
A ranked list of potential Composition matches
"""
#if we have an exact match and the user specifies lock_if_strict, just
#return the exact match!
if lock_if_strict:
#the strict composition parsing might throw an error, we can ignore
#it and just get on with fuzzy matching
try:
comp = Composition(fuzzy_formula)
return [comp]
except (CompositionError, ValueError):
pass
all_matches = Composition._comps_from_fuzzy_formula(fuzzy_formula)
#remove duplicates
all_matches = list(set(all_matches))
#sort matches by rank descending
all_matches = sorted(all_matches,
key=lambda match: match[1], reverse=True)
all_matches = [m[0] for m in all_matches]
return all_matches
@staticmethod
def _comps_from_fuzzy_formula(fuzzy_formula, m_dict={}, m_points=0,
factor=1):
"""
A recursive helper method for formula parsing that helps in
interpreting and ranking indeterminate formulas.
Author: Anubhav Jain
Args:
fuzzy_formula (str): A formula string, such as "co2o3" or "MN",
that may or may not have multiple interpretations.
m_dict (dict): A symbol:amt dictionary from the previously parsed
formula.
m_points: Number of points gained from the previously parsed
formula.
factor: Coefficient for this parse, e.g. (PO4)2 will feed in PO4
as the fuzzy_formula with a coefficient of 2.
Returns:
A list of tuples, with the first element being a Composition and
the second element being the number of points awarded that
Composition intepretation.
"""
def _parse_chomp_and_rank(m, f, m_dict, m_points):
"""
A helper method for formula parsing that helps in interpreting and
ranking indeterminate formulas
Author: Anubhav Jain
Args:
m: A regex match, with the first group being the element and
the second group being the amount
f: The formula part containing the match
m_dict: A symbol:amt dictionary from the previously parsed
formula
m_points: Number of points gained from the previously parsed
formula
Returns:
A tuple of (f, m_dict, points) where m_dict now contains data
from the match and the match has been removed (chomped) from
the formula f. The "goodness" of the match determines the
number of points returned for chomping. Returns
(None, None, None) if no element could be found...
"""
points = 0
# Points awarded if the first element of the element is correctly
# specified as a capital
points_first_capital = 100
# Points awarded if the second letter of the element is correctly
# specified as lowercase
points_second_lowercase = 100
#get element and amount from regex match
el = m.group(1)
if len(el) > 2 or len(el) < 1:
raise CompositionError("Invalid element symbol entered!")
amt = float(m.group(2)) if m.group(2).strip() != "" else 1
#convert the element string to proper [uppercase,lowercase] format
#and award points if it is already in that format
char1 = el[0]
char2 = el[1] if len(el) > 1 else ""
if char1 == char1.upper():
points += points_first_capital
if char2 and char2 == char2.lower():
points += points_second_lowercase
el = char1.upper() + char2.lower()
#if it's a valid element, chomp and add to the points
if Element.is_valid_symbol(el):
if el in m_dict:
m_dict[el] += amt * factor
else:
m_dict[el] = amt * factor
return f.replace(m.group(), "", 1), m_dict, m_points + points
#else return None
return None, None, None
fuzzy_formula = fuzzy_formula.strip()
if len(fuzzy_formula) == 0:
#The entire formula has been parsed into m_dict. Return the
#corresponding Composition and number of points
if m_dict:
yield (Composition.from_dict(m_dict), m_points)
else:
#if there is a parenthesis, remove it and match the remaining stuff
#with the appropriate factor
for mp in re.finditer(r"\(([^\(\)]+)\)([\.\d]*)", fuzzy_formula):
mp_points = m_points
mp_form = fuzzy_formula.replace(mp.group(), " ", 1)
mp_dict = dict(m_dict)
mp_factor = 1 if mp.group(2) == "" else float(mp.group(2))
#Match the stuff inside the parenthesis with the appropriate
#factor
for match in \
Composition._comps_from_fuzzy_formula(mp.group(1),
mp_dict,
mp_points,
factor=mp_factor):
only_me = True
# Match the stuff outside the parentheses and return the
# sum.
for match2 in \
Composition._comps_from_fuzzy_formula(mp_form,
mp_dict,
mp_points,
factor=1):
only_me = False
yield (match[0] + match2[0], match[1] + match2[1])
#if the stuff inside the parenthesis is nothing, then just
#return the stuff inside the parentheses
if only_me:
yield match
return
#try to match the single-letter elements
m1 = re.match(r"([A-z])([\.\d]*)", fuzzy_formula)
if m1:
m_points1 = m_points
m_form1 = fuzzy_formula
m_dict1 = dict(m_dict)
(m_form1, m_dict1, m_points1) = \
_parse_chomp_and_rank(m1, m_form1, m_dict1, m_points1)
if m_dict1:
#there was a real match
for match in \
Composition._comps_from_fuzzy_formula(m_form1,
m_dict1,
m_points1,
factor):
yield match
#try to match two-letter elements
m2 = re.match(r"([A-z]{2})([\.\d]*)", fuzzy_formula)
if m2:
m_points2 = m_points
m_form2 = fuzzy_formula
m_dict2 = dict(m_dict)
(m_form2, m_dict2, m_points2) = \
_parse_chomp_and_rank(m2, m_form2, m_dict2, m_points2)
if m_dict2:
#there was a real match
for match in \
Composition._comps_from_fuzzy_formula(m_form2, m_dict2,
m_points2,
factor):
yield match
def reduce_formula(sym_amt):
"""
Helper method to reduce a sym_amt dict to a reduced formula and factor.
Args:
sym_amt (dict): {symbol: amount}.
Returns:
(reduced_formula, factor).
"""
syms = sorted(sym_amt.keys(),
key=lambda s: get_el_sp(s).X)
syms = list(filter(lambda s: abs(sym_amt[s]) >
Composition.amount_tolerance, syms))
num_el = len(syms)
contains_polyanion = (num_el >= 3 and
get_el_sp(syms[num_el - 1]).X
- get_el_sp(syms[num_el - 2]).X < 1.65)
factor = abs(gcd(*sym_amt.values()))
reduced_form = []
n = num_el - 2 if contains_polyanion else num_el
for i in range(0, n):
s = syms[i]
normamt = sym_amt[s] * 1.0 / factor
reduced_form.append(s)
reduced_form.append(formula_double_format(normamt))
if contains_polyanion:
poly_sym_amt = {syms[i]: sym_amt[syms[i]] / factor
for i in range(n, num_el)}
(poly_form, poly_factor) = reduce_formula(poly_sym_amt)
if poly_factor != 1:
reduced_form.append("({}){}".format(poly_form, int(poly_factor)))
else:
reduced_form.append(poly_form)
reduced_form = "".join(reduced_form)
return reduced_form, factor
class CompositionError(Exception):
"""Exception class for composition errors"""
pass
class ChemicalPotential(dict, PMGSONable):
"""
Class to represent set of chemical potentials. Can be:
multiplied/divided by a Number
multiplied by a Composition (returns an energy)
added/subtracted with other ChemicalPotentials.
"""
def __init__(self, *args, **kwargs):
"""
Args:
*args, **kwargs: any valid dict init arguments
"""
d = dict(*args, **kwargs)
super(ChemicalPotential, self).__init__((get_el_sp(k), v)
for k, v in d.items())
if len(d) != len(self):
raise ValueError("Duplicate potential specified")
def __mul__(self, other):
if isinstance(other, numbers.Number):
return ChemicalPotential({k: v * other for k, v in self.items()})
else:
return NotImplemented
__rmul__ = __mul__
def __truediv__(self, other):
if isinstance(other, numbers.Number):
return ChemicalPotential({k: v / other for k, v in self.items()})
else:
return NotImplemented
__div__ = __truediv__
def __sub__(self, other):
if isinstance(other, ChemicalPotential):
els = set(self.keys()).union(other.keys())
return ChemicalPotential({e: self.get(e, 0) - other.get(e, 0)
for e in els})
else:
return NotImplemented
def __add__(self, other):
if isinstance(other, ChemicalPotential):
els = set(self.keys()).union(other.keys())
return ChemicalPotential({e: self.get(e, 0) + other.get(e, 0)
for e in els})
else:
return NotImplemented
def get_energy(self, composition, strict=True):
"""
Calculates the energy of a composition
Args:
composition (Composition): input composition
strict (bool): Whether all potentials must be specified
"""
if strict and set(composition.keys()) > set(self.keys()):
s = set(composition.keys()) - set(self.keys())
raise ValueError("Potentials not specified for {}".format(s))
return sum(self.get(k, 0) * v for k, v in composition.items())
def __repr__(self):
return "ChemPots: " + super(ChemicalPotential, self).__repr__()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36.006944 | 80 | 0.564899 |
from __future__ import division, unicode_literals
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Nov 10, 2012"
import collections
import numbers
import re
import string
import six
from six.moves import filter, map, zip
from fractions import Fraction
from functools import total_ordering
from monty.fractions import gcd
from pymatgen.core.periodic_table import get_el_sp, Element
from pymatgen.util.string_utils import formula_double_format
from pymatgen.serializers.json_coders import PMGSONable
from pymatgen.core.units import unitized
@total_ordering
class Composition(collections.Mapping, collections.Hashable, PMGSONable):
amount_tolerance = 1e-8
special_formulas = {"LiO": "Li2O2", "NaO": "Na2O2", "KO": "K2O2",
"HO": "H2O2", "CsO": "Cs2O2", "RbO": "Rb2O2",
"O": "O2", "N": "N2", "F": "F2", "Cl": "Cl2",
"H": "H2"}
def __init__(self, *args, **kwargs):
self.allow_negative = kwargs.pop('allow_negative', False)
# to pass the composition to dict()
if len(args) == 1 and isinstance(args[0], Composition):
elmap = args[0]._elmap
elif len(args) == 1 and isinstance(args[0], six.string_types):
elmap = self._parse_formula(args[0])
else:
elmap = dict(*args, **kwargs)
self._elmap = {}
self._natoms = 0
for k, v in elmap.items():
if v < -Composition.amount_tolerance and not self.allow_negative:
raise CompositionError("Amounts in Composition cannot be "
"negative!")
if abs(v) >= Composition.amount_tolerance:
self._elmap[get_el_sp(k)] = v
self._natoms += abs(v)
def __getitem__(self, el):
return self._elmap.get(get_el_sp(el), 0)
def __eq__(self, other):
# elements with amounts < Composition.amount_tolerance don't show up
if len(self) != len(other):
return False
for el, v in self._elmap.items():
if abs(v - other[el]) > Composition.amount_tolerance:
return False
return True
def __ge__(self, other):
for el in sorted(set(self.elements + other.elements)):
if other[el] - self[el] >= Composition.amount_tolerance:
return False
elif self[el] - other[el] >= Composition.amount_tolerance:
return True
return True
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] += v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __sub__(self, other):
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] -= v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __mul__(self, other):
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] * other for el in self},
allow_negative=self.allow_negative)
__rmul__ = __mul__
def __truediv__(self, other):
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] / other for el in self},
allow_negative=self.allow_negative)
__div__ = __truediv__
def __hash__(self):
hashcode = 0
for el in self._elmap.keys():
hashcode += el.Z
return hashcode
def __contains__(self, el):
return el in self._elmap
def __len__(self):
return len(self._elmap)
def __iter__(self):
return self._elmap.__iter__()
@property
def average_electroneg(self):
return sum((el.X * abs(amt) for el, amt in self._elmap.items())) / \
self.num_atoms
def almost_equals(self, other, rtol=0.1, atol=1e-8):
sps = set(self.elements + other.elements)
for sp in sps:
a = self[sp]
b = other[sp]
tol = atol + rtol * (abs(a) + abs(b)) / 2
if abs(b - a) > tol:
return False
return True
@property
def is_element(self):
return len(self._elmap) == 1
def copy(self):
return Composition(self._elmap, allow_negative=self.allow_negative)
@property
def formula(self):
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys(), key=lambda sym: get_el_sp(sym).X)
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def alphabetical_formula(self):
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys())
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def element_composition(self):
return Composition(self.get_el_amt_dict(),
allow_negative=self.allow_negative)
@property
def fractional_composition(self):
return self / self._natoms
@property
def reduced_composition(self):
return self.get_reduced_composition_and_factor()[0]
def get_reduced_composition_and_factor(self):
factor = self.get_reduced_formula_and_factor()[1]
return self / factor, factor
def get_reduced_formula_and_factor(self):
all_int = all(x == int(x) for x in self._elmap.values())
if not all_int:
return self.formula.replace(" ", ""), 1
d = self.get_el_amt_dict()
(formula, factor) = reduce_formula(d)
if formula in Composition.special_formulas:
formula = Composition.special_formulas[formula]
factor /= 2
return formula, factor
def get_integer_formula_and_factor(self, max_denominator=10000):
mul = gcd(*[Fraction(v).limit_denominator(max_denominator) for v
in self.values()])
d = {k: round(v / mul) for k, v in self.get_el_amt_dict().items()}
(formula, factor) = reduce_formula(d)
if formula in Composition.special_formulas:
formula = Composition.special_formulas[formula]
factor /= 2
return formula, factor * mul
@property
def reduced_formula(self):
return self.get_reduced_formula_and_factor()[0]
@property
def elements(self):
return list(self._elmap.keys())
def __str__(self):
return " ".join([
"{}{}".format(k, formula_double_format(v, ignore_ones=False))
for k, v in self.as_dict().items()])
@property
def num_atoms(self):
return self._natoms
@property
@unitized("amu")
def weight(self):
return sum([amount * el.atomic_mass
for el, amount in self._elmap.items()])
def get_atomic_fraction(self, el):
return abs(self[el]) / self._natoms
def get_wt_fraction(self, el):
return get_el_sp(el).atomic_mass * abs(self[el]) / self.weight
def _parse_formula(self, formula):
def get_sym_dict(f, factor):
sym_dict = collections.defaultdict(float)
for m in re.finditer(r"([A-Z][a-z]*)([-*\.\d]*)", f):
el = m.group(1)
amt = 1
if m.group(2).strip() != "":
amt = float(m.group(2))
sym_dict[el] += amt * factor
f = f.replace(m.group(), "", 1)
if f.strip():
raise CompositionError("{} is an invalid formula!".format(f))
return sym_dict
m = re.search(r"\(([^\(\)]+)\)([\.\d]*)", formula)
if m:
factor = 1
if m.group(2) != "":
factor = float(m.group(2))
unit_sym_dict = get_sym_dict(m.group(1), factor)
expanded_sym = "".join(["{}{}".format(el, amt)
for el, amt in unit_sym_dict.items()])
expanded_formula = formula.replace(m.group(), expanded_sym)
return self._parse_formula(expanded_formula)
return get_sym_dict(formula, 1)
@property
def anonymized_formula(self):
reduced = self.element_composition
if all(x == int(x) for x in self._elmap.values()):
reduced /= gcd(*self._elmap.values())
anon = ""
for e, amt in zip(string.ascii_uppercase, sorted(reduced.values())):
if amt == 1:
amt_str = ""
elif abs(amt % 1) < 1e-8:
amt_str = str(int(amt))
else:
amt_str = str(amt)
anon += ("{}{}".format(e, amt_str))
return anon
def __repr__(self):
return "Comp: " + self.formula
@classmethod
def from_dict(cls, d):
return cls(d)
def get_el_amt_dict(self):
d = collections.defaultdict(float)
for e, a in self.items():
d[e.symbol] += a
return d
def as_dict(self):
d = collections.defaultdict(float)
for e, a in self.items():
d[str(e)] += a
return d
@property
def to_reduced_dict(self):
c = Composition(self.reduced_formula)
return c.as_dict()
@property
def to_data_dict(self):
return {"reduced_cell_composition": self.to_reduced_dict,
"unit_cell_composition": self.as_dict(),
"reduced_cell_formula": self.reduced_formula,
"elements": self.as_dict().keys(),
"nelements": len(self.as_dict().keys())}
@staticmethod
def ranked_compositions_from_indeterminate_formula(fuzzy_formula,
lock_if_strict=True):
if lock_if_strict:
try:
comp = Composition(fuzzy_formula)
return [comp]
except (CompositionError, ValueError):
pass
all_matches = Composition._comps_from_fuzzy_formula(fuzzy_formula)
all_matches = list(set(all_matches))
all_matches = sorted(all_matches,
key=lambda match: match[1], reverse=True)
all_matches = [m[0] for m in all_matches]
return all_matches
@staticmethod
def _comps_from_fuzzy_formula(fuzzy_formula, m_dict={}, m_points=0,
factor=1):
def _parse_chomp_and_rank(m, f, m_dict, m_points):
points = 0
points_first_capital = 100
points_second_lowercase = 100
el = m.group(1)
if len(el) > 2 or len(el) < 1:
raise CompositionError("Invalid element symbol entered!")
amt = float(m.group(2)) if m.group(2).strip() != "" else 1
char1 = el[0]
char2 = el[1] if len(el) > 1 else ""
if char1 == char1.upper():
points += points_first_capital
if char2 and char2 == char2.lower():
points += points_second_lowercase
el = char1.upper() + char2.lower()
if Element.is_valid_symbol(el):
if el in m_dict:
m_dict[el] += amt * factor
else:
m_dict[el] = amt * factor
return f.replace(m.group(), "", 1), m_dict, m_points + points
#else return None
return None, None, None
fuzzy_formula = fuzzy_formula.strip()
if len(fuzzy_formula) == 0:
#The entire formula has been parsed into m_dict. Return the
#corresponding Composition and number of points
if m_dict:
yield (Composition.from_dict(m_dict), m_points)
else:
#if there is a parenthesis, remove it and match the remaining stuff
#with the appropriate factor
for mp in re.finditer(r"\(([^\(\)]+)\)([\.\d]*)", fuzzy_formula):
mp_points = m_points
mp_form = fuzzy_formula.replace(mp.group(), " ", 1)
mp_dict = dict(m_dict)
mp_factor = 1 if mp.group(2) == "" else float(mp.group(2))
#Match the stuff inside the parenthesis with the appropriate
#factor
for match in \
Composition._comps_from_fuzzy_formula(mp.group(1),
mp_dict,
mp_points,
factor=mp_factor):
only_me = True
# Match the stuff outside the parentheses and return the
# sum.
for match2 in \
Composition._comps_from_fuzzy_formula(mp_form,
mp_dict,
mp_points,
factor=1):
only_me = False
yield (match[0] + match2[0], match[1] + match2[1])
#if the stuff inside the parenthesis is nothing, then just
#return the stuff inside the parentheses
if only_me:
yield match
return
#try to match the single-letter elements
m1 = re.match(r"([A-z])([\.\d]*)", fuzzy_formula)
if m1:
m_points1 = m_points
m_form1 = fuzzy_formula
m_dict1 = dict(m_dict)
(m_form1, m_dict1, m_points1) = \
_parse_chomp_and_rank(m1, m_form1, m_dict1, m_points1)
if m_dict1:
#there was a real match
for match in \
Composition._comps_from_fuzzy_formula(m_form1,
m_dict1,
m_points1,
factor):
yield match
#try to match two-letter elements
m2 = re.match(r"([A-z]{2})([\.\d]*)", fuzzy_formula)
if m2:
m_points2 = m_points
m_form2 = fuzzy_formula
m_dict2 = dict(m_dict)
(m_form2, m_dict2, m_points2) = \
_parse_chomp_and_rank(m2, m_form2, m_dict2, m_points2)
if m_dict2:
#there was a real match
for match in \
Composition._comps_from_fuzzy_formula(m_form2, m_dict2,
m_points2,
factor):
yield match
def reduce_formula(sym_amt):
syms = sorted(sym_amt.keys(),
key=lambda s: get_el_sp(s).X)
syms = list(filter(lambda s: abs(sym_amt[s]) >
Composition.amount_tolerance, syms))
num_el = len(syms)
contains_polyanion = (num_el >= 3 and
get_el_sp(syms[num_el - 1]).X
- get_el_sp(syms[num_el - 2]).X < 1.65)
factor = abs(gcd(*sym_amt.values()))
reduced_form = []
n = num_el - 2 if contains_polyanion else num_el
for i in range(0, n):
s = syms[i]
normamt = sym_amt[s] * 1.0 / factor
reduced_form.append(s)
reduced_form.append(formula_double_format(normamt))
if contains_polyanion:
poly_sym_amt = {syms[i]: sym_amt[syms[i]] / factor
for i in range(n, num_el)}
(poly_form, poly_factor) = reduce_formula(poly_sym_amt)
if poly_factor != 1:
reduced_form.append("({}){}".format(poly_form, int(poly_factor)))
else:
reduced_form.append(poly_form)
reduced_form = "".join(reduced_form)
return reduced_form, factor
class CompositionError(Exception):
pass
class ChemicalPotential(dict, PMGSONable):
def __init__(self, *args, **kwargs):
d = dict(*args, **kwargs)
super(ChemicalPotential, self).__init__((get_el_sp(k), v)
for k, v in d.items())
if len(d) != len(self):
raise ValueError("Duplicate potential specified")
def __mul__(self, other):
if isinstance(other, numbers.Number):
return ChemicalPotential({k: v * other for k, v in self.items()})
else:
return NotImplemented
__rmul__ = __mul__
def __truediv__(self, other):
if isinstance(other, numbers.Number):
return ChemicalPotential({k: v / other for k, v in self.items()})
else:
return NotImplemented
__div__ = __truediv__
def __sub__(self, other):
if isinstance(other, ChemicalPotential):
els = set(self.keys()).union(other.keys())
return ChemicalPotential({e: self.get(e, 0) - other.get(e, 0)
for e in els})
else:
return NotImplemented
def __add__(self, other):
if isinstance(other, ChemicalPotential):
els = set(self.keys()).union(other.keys())
return ChemicalPotential({e: self.get(e, 0) + other.get(e, 0)
for e in els})
else:
return NotImplemented
def get_energy(self, composition, strict=True):
if strict and set(composition.keys()) > set(self.keys()):
s = set(composition.keys()) - set(self.keys())
raise ValueError("Potentials not specified for {}".format(s))
return sum(self.get(k, 0) * v for k, v in composition.items())
def __repr__(self):
return "ChemPots: " + super(ChemicalPotential, self).__repr__()
if __name__ == "__main__":
import doctest
doctest.testmod()
| true | true |
f730609d863d7ec81eadcaed88ab2c70b888fad3 | 22,602 | py | Python | GearGenerator_by_UI.py | manguel1980-dev/Gear-Generator | c4f2eea957340201e42a213b458cc1c28d04833b | [
"MIT"
] | null | null | null | GearGenerator_by_UI.py | manguel1980-dev/Gear-Generator | c4f2eea957340201e42a213b458cc1c28d04833b | [
"MIT"
] | null | null | null | GearGenerator_by_UI.py | manguel1980-dev/Gear-Generator | c4f2eea957340201e42a213b458cc1c28d04833b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#-------------------------------------------------------------------------------
# Name: Gear Generator
# Purpose: Just for fun
#
# Author: Manuel Astros
# Email: manuel.astros1980@gmail.com
# Web: https://sites.google.com/view/interpolation/home
#
# Created: 25/06/2021
# Copyright: (c) astros 2021
# Licence: MIT
# Based on: Gear Drawing with Bézier Curves (https://www.arc.id.au/GearDrawing.html)
# -------------------------------------------------------------------------------
#
# Reelases:
# 0.1: First Release
# ______________________________________________________________________________________
import sys
from PyQt5.uic import loadUi
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtGui import QCloseEvent, QFont
from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QHeaderView, \
QCheckBox, QComboBox, QMessageBox, QWidget, QVBoxLayout
from Gear_Mpl_Draw import MplWidget
# --------------------------Mpl Import------------
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from math import radians, degrees, atan, hypot, sin, cos
# import numpy as np
import random
# ---------------Internal modules import--------------
from gear_calc import createGearOutline, createIntGearOutline, displace, rotate
# ----------------------------------------
class mainWindow(QMainWindow):
def __init__(self):
self.ErrInt = True
self.ErrFloat = True
self.ErrPitchDiam = True
self.ErrInternalGear = False
self.ErrDiameter = False
super(mainWindow, self).__init__()
loadUi('Gear_Generator.ui', self)
self.tableWidget.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
check_box = internal(self)
check_box.stateChanged.connect(self._clickCheckBox)
self.tableWidget.setCellWidget(0, 0, check_box)
angle = QtWidgets.QTableWidgetItem(str(20))
angle.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 3, angle)
lista = ['Not Linked'] + [str(i) for i in range(1, self.tableWidget.rowCount())]
mesh = Mesh(self, lista)
self.tableWidget.setCellWidget(0, 6, mesh)
# m: module, m = pitch diameter / teeth number
m = float(self.tableWidget.item(0, 1).text()) / float(self.tableWidget.item(0, 2).text())
m = QtWidgets.QTableWidgetItem(str(m))
m.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 5, m)
Acell = QtWidgets.QTableWidgetItem('0')
Acell.setFlags(QtCore.Qt.ItemIsEnabled)
Acell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 7, Acell)
# ------------------------------------Mpl Widget insertion---------------------------------------
self._gearGraphic()
# self.Graph = CanvasGraph(self.mplWidget)
# self.Graph.setObjectName("Gear-View")
# ---------------------------------------------------------------------------
# ------------Signals-----------------------------------
self.add_gear.clicked.connect(self._addRow)
self.remove_gear.clicked.connect(self._removeRow)
self.generate_gear.clicked.connect(self._gearGraphic)
self.tableWidget.itemChanged.connect(self._cellChange)
self._dataRevision()
# self._cancel.clicked.connect(self._close)
# self.add_gear.clicked.connect(self._addRow)
def _gearGraphic(self):
gear_outline = self._gearCalculation()
# self.mplW = MplWidget(self.mplWidget)
# self.addToolBar(QtCore.Qt.BottomToolBarArea, NavigationToolbar(mplW.canvas, self))
self.Graph = MplWidget(self.mplWidget, gear_outline)
self.Graph.show()
def _clickCheckBox(self):
check_row = self.tableWidget.currentRow()
check = self.tableWidget.cellWidget(check_row, 0).getCheckValue()
print(check)
if check:
self.statusLabel.setText('Row: ' + str(check_row + 1) + ' - ' + 'Draw Internal Gear')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
else:
self.statusLabel.setText('Row: ' + str(check_row + 1) + ' - ' + 'Draw Normal Gear')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
def _comboBoxRevision(self):
combo_row = self.tableWidget.currentRow()
# current_col = self.tableWidget.currentRow()
mesh_row_value_pointed = self.tableWidget.cellWidget(combo_row, 6).currentText()
print('actual cell: ', combo_row)
print('valor apuntado: ', mesh_row_value_pointed)
if mesh_row_value_pointed == 'Not Linked':
Acell = self.tableWidget.item(combo_row, 7).text()
print(Acell)
Acell = QtWidgets.QTableWidgetItem(Acell)
Acell.setFlags(QtCore.Qt.ItemIsEnabled)
Acell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 7, Acell)
Xcell = self.tableWidget.item(combo_row, 8).text()
Xcell = QtWidgets.QTableWidgetItem(Xcell)
# Xcell.setFlags(QtCore.Qt.ItemIsEnabled)
Xcell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 8, Xcell)
Ycell = self.tableWidget.item(combo_row, 9).text()
Ycell = QtWidgets.QTableWidgetItem(Ycell)
# Ycell.setFlags(QtCore.Qt.ItemIsEnabled)
Ycell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 9, Ycell)
self.statusLabel.setText('Row: ' + str(combo_row + 1) + ' - Gear is ' + mesh_row_value_pointed)
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
print(mesh_row_value_pointed)
self.ErrPitchDiam = False
else:
try:
A_pitchDiam = float(self.tableWidget.item(combo_row, 1).text())
except ValueError:
Acell = '0'
Xcell ='0'
Ycell = '0'
self.meshMessage = 'Pith diameter missing in current row (' + str(combo_row + 1) + ')'
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
print('Pith diameter missing in current row (' + str(combo_row + 1) + ')')
self.ErrPitchDiam = True
else:
try:
A_pitchDiam_pointed = float(self.tableWidget.item(int(mesh_row_value_pointed) - 1, 1).text())
Acell = float(self.tableWidget.item(combo_row, 7).text())
Xcell = float(self.tableWidget.item(combo_row, 8).text())
Ycell = float(self.tableWidget.item(combo_row, 9).text())
CCell = self.tableWidget.cellWidget(combo_row, 0).getCheckValue()
print('Este es elcheck value: ', CCell)
Acell_pointed = float(self.tableWidget.item(int(mesh_row_value_pointed) - 1, 7).text())
Xcell_pointed = float(self.tableWidget.item(int(mesh_row_value_pointed) - 1, 8).text())
Ycell_pointed = float(self.tableWidget.item(int(mesh_row_value_pointed) - 1, 9).text())
Cell_pointed = self.tableWidget.cellWidget(int(mesh_row_value_pointed) - 1, 0).getCheckValue()
print('Este es elcheck value apuntado: ', Cell_pointed)
if CCell and Cell_pointed:
self.ErrInternalGear = True
Acell = '0'
Xcell ='0'
Ycell = '0'
self.meshMessage = 'Gears ' + str(mesh_row_value_pointed) + ' and ' + str(combo_row + 1) + ' can not be meshed'
elif Cell_pointed:
if A_pitchDiam_pointed <= A_pitchDiam:
self.ErrDiameter = True
Acell = '0'
Xcell ='0'
Ycell = '0'
self.meshMessage = 'Gears ' + str(mesh_row_value_pointed) + ' must be higher than' + str(combo_row + 1) + ' | Imposible meshed'
else:
pitchDiam_dist = (A_pitchDiam_pointed / 2) - (A_pitchDiam / 2)
Xcell = str(Xcell_pointed - pitchDiam_dist * cos(radians(Acell)))
Ycell = str(Ycell_pointed - pitchDiam_dist * sin(radians(Acell)))
Acell = str(Acell)
self.ErrDiameter = False
elif CCell:
if A_pitchDiam <= A_pitchDiam_pointed:
Acell = '0'
Xcell ='0'
Ycell = '0'
self.ErrDiameter = True
self.meshMessage = 'Gears ' + str(combo_row + 1) + ' must be higher than' + str(mesh_row_value_pointed) + ' | Imposible meshed'
else:
pitchDiam_dist = (A_pitchDiam_pointed / 2) - (A_pitchDiam / 2)
Xcell = str(Xcell_pointed - pitchDiam_dist * cos(radians(Acell)))
Ycell = str(Ycell_pointed - pitchDiam_dist * sin(radians(Acell)))
Acell = str(Acell)
self.ErrDiameter = False
else:
pitchDiam_dist = (A_pitchDiam_pointed / 2) + (A_pitchDiam / 2)
Xcell = str(Xcell_pointed + pitchDiam_dist * cos(radians(Acell)))
Ycell = str(Ycell_pointed + pitchDiam_dist * sin(radians(Acell)))
Acell = str(Acell)
self.ErrPitchDiam = False
except:
Acell = '0'
Xcell ='0'
Ycell = '0'
self.meshMessage = 'Pith diameter missing in row (' + str(mesh_row_value_pointed) + ')'
print('Pith diameter missing in row (' + str(mesh_row_value_pointed) + ')')
self.ErrPitchDiam = True
# Acell = self.tableWidget.item(combo_row, 7).text()
Acell = QtWidgets.QTableWidgetItem(Acell)
# Acell.setFlags(QtCore.Qt.ItemIsEnabled)
Acell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 7, Acell)
# Xcell = self.tableWidget.item(combo_row, 8).text()
Xcell = QtWidgets.QTableWidgetItem(Xcell)
Xcell.setFlags(QtCore.Qt.ItemIsEnabled)
Xcell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 8, Xcell)
# Ycell = self.tableWidget.item(combo_row, 9).text()
Ycell = QtWidgets.QTableWidgetItem(Ycell)
Ycell.setFlags(QtCore.Qt.ItemIsEnabled)
Ycell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 9, Ycell)
# todo: corregir funcionamiento de mensajes de error
if self.ErrPitchDiam:
self.statusLabel.setText(self.meshMessage + ' | Row: ' + str(combo_row + 1) + ' - ' + 'meshing with row ' + mesh_row_value_pointed + ' gear')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
print('meshing with ', mesh_row_value_pointed)
elif self.ErrInternalGear:
self.statusLabel.setText(self.meshMessage)
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
print('meshing with ', mesh_row_value_pointed)
elif self.ErrDiameter:
self.statusLabel.setText(self.meshMessage)
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
print('meshing with ', mesh_row_value_pointed)
else:
self.statusLabel.setText('Row: ' + str(combo_row + 1) + ' - ' + 'meshing with row ' + mesh_row_value_pointed + ' gear')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
print('meshing with ', mesh_row_value_pointed)
def _dataRevision(self):
self.ErrInt = True
self.ErrFloat = True
verification = []
row_rev = self.tableWidget.rowCount()
print(row_rev)
for r in range(row_rev):
try:
check_val_rev = self.tableWidget.cellWidget(r, 0).getCheckValue()
teeth_pitch_diam_rev = int(self.tableWidget.item(r, 1).text())
teeth_n_rev = int(self.tableWidget.item(r, 2).text())
pressure_ang_rev = float(self.tableWidget.item(r, 3).text())
s_or_r_radius_rev = float(self.tableWidget.item(r, 4).text()) / 2
module_g_rev = float(self.tableWidget.item(r, 5).text())
mesh_rev = self.tableWidget.cellWidget(r, 6).currentText()
angle_rev = float(self.tableWidget.item(r, 7).text())
x_rev = float(self.tableWidget.item(r, 8).text())
y_rev = float(self.tableWidget.item(r, 9).text())
if mesh_rev != 'Not Linked':
pass
verification.append(True)
except:
verification.append(False)
return verification
def _gearCalculation(self):
# verif = [True, False, True]
verif = self._dataRevision()
gears=[]
location = []
for row_g in range(len(verif)):
gears.append([row_g + 1])
print('intento: ', verif[row_g])
if (verif[row_g]):
teeth_n = int(self.tableWidget.item(row_g, 2).text())
pressure_ang = float(self.tableWidget.item(row_g, 3).text())
s_or_r_radius = float(self.tableWidget.item(row_g, 4).text()) / 2
module_g = float(self.tableWidget.item(row_g, 5).text())
check_val = self.tableWidget.cellWidget(row_g, 0).getCheckValue()
Acell = float(self.tableWidget.item(row_g, 7).text())
Xcell = float(self.tableWidget.item(row_g, 8).text())
Ycell = float(self.tableWidget.item(row_g, 9).text())
if check_val:
outline = createIntGearOutline(module_g, teeth_n, pressure_ang, s_or_r_radius)
else:
outline = createGearOutline(module_g, teeth_n, pressure_ang, s_or_r_radius)
print('outline', outline)
if Xcell != 0 or Ycell != 0:
outline_diplaced = displace(outline, Xcell, Ycell)
outline = outline_diplaced
print('outline displaces:', outline)
location.append([Acell, [Xcell, Ycell]])
gears[row_g].append(outline)
print('True: ', row_g + 1)
else:
gears[row_g].append([False])
location.append([False])
print('False: ', row_g + 1)
# print(gears)
return [location, gears]
def _cellChange(self):
items = self.tableWidget.selectedItems()
col = self.tableWidget.currentColumn()
row = self.tableWidget.currentRow()
print('_cellChange: ', row, col)
enteros = [2]
decimales = [1, 3, 4, 5, 7, 8, 9]
if col in enteros:
try:
cellType = int(items[0].text())
self.ErrInt = True
self.statusLabel.setText('OK: Current cell data is an integer')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
except ValueError:
self.ErrInt = False
self.statusLabel.setText('Error: Value cell most be an integer')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
return self.alertDialog('integer')
elif col in decimales:
try:
cellType = float(items[0].text())
self.ErrFloat = True
self.statusLabel.setText('OK: Current cell data is a float')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
except ValueError:
self.ErrFloat = False
self.statusLabel.setText('Error: Value cell most be an Float')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
return self.alertDialog('Float')
# print(str(items[0].text()))
def alertDialog(self, val):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Information)
message = val + " input is required"
msgBox.setText(message)
msgBox.setWindowTitle("Input Error")
msgBox.setStandardButtons(QMessageBox.Ok)
returnValue = msgBox.exec()
if returnValue == QMessageBox.Ok:
print('OK clicked')
def _addRow(self):
if self.ErrInt or self.ErrFloat:
rowCount = self.tableWidget.rowCount()
self.tableWidget.insertRow(rowCount)
columnCount = self.tableWidget.columnCount()
for col in range(columnCount):
print(col)
if col == 0:
check_box = internal(self)
check_box.stateChanged.connect(self._clickCheckBox)
self.tableWidget.setCellWidget(rowCount, col, check_box)
elif col == 3:
angle = QtWidgets.QTableWidgetItem('20')
angle.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, angle)
elif col == 6:
lista = ['Not Linked'] + [str(i) for i in range(1, self.tableWidget.rowCount())]
mesh = Mesh(self, lista)
self.tableWidget.setCellWidget(rowCount, col, mesh)
mesh.currentIndexChanged.connect(self._comboBoxRevision)
elif col == 7:
Acell = QtWidgets.QTableWidgetItem('0')
Acell.setFlags(QtCore.Qt.ItemIsEnabled)
Acell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, Acell)
elif col == 8:
Xcell = QtWidgets.QTableWidgetItem('0')
# Xcell.setFlags(QtCore.Qt.ItemIsEnabled)
Xcell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, Xcell)
elif col == 9:
Ycell = QtWidgets.QTableWidgetItem('0')
# Ycell.setFlags(QtCore.Qt.ItemIsEnabled)
Ycell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, Ycell)
else:
cellCenter = QtWidgets.QTableWidgetItem()
cellCenter.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, cellCenter)
self.statusLabel.setText('OK: Row just added')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color:rgb(0, 0, 0)")
def _removeRow(self):
if self.tableWidget.rowCount() > 0:
self.tableWidget.removeRow(self.tableWidget.rowCount()-1)
self.statusLabel.setText('OK: Row just deleted')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color:rgb(0, 0, 0)")
# ----------------Events----------------------------------------------
# Properly defined in the future
# def closeEvent(self, event):
# reply = QMessageBox.question(self, 'Window Close', 'Are you sure you want to close the window?',
# QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
#
# if reply == QMessageBox.Yes:
# event.accept()
# # self.action_close_window.triggered.emit(True)
# print('Window closed')
# else:
# event.ignore()
#
# def resizeEvent(self, event):
# print("resize")
# QMainWindow.resizeEvent(self, event)
# ----------------------------------------------------------------------------
class internal(QCheckBox):
def __init__(self, parent):
super().__init__(parent)
self.stateChanged.connect(self.getCheckValue)
def getCheckValue(self):
if self.isChecked() == True:
print('Check Value Active')
return True
elif self.isChecked() == False:
print('Check Value Deactivated')
return False
class Mesh(QComboBox):
def __init__(self, parent, aa):
super().__init__(parent)
self.addItems(aa)
self.currentIndexChanged.connect(self.getComboValue)
def getComboValue(self):
print(self.currentText())
return self.currentText()
app = QApplication(sys.argv)
main_window = mainWindow()
widget = QtWidgets.QStackedWidget()
widget.addWidget(main_window)
# widget.setFixedHeight(300)
# widget.setFixedWidth(1060)
widget.resize(658, 650)
widget.show()
try:
sys.exit(app.exec_())
except:
print('Exiting') | 44.756436 | 160 | 0.543713 |
import sys
from PyQt5.uic import loadUi
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtGui import QCloseEvent, QFont
from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QHeaderView, \
QCheckBox, QComboBox, QMessageBox, QWidget, QVBoxLayout
from Gear_Mpl_Draw import MplWidget
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from math import radians, degrees, atan, hypot, sin, cos
import random
from gear_calc import createGearOutline, createIntGearOutline, displace, rotate
class mainWindow(QMainWindow):
def __init__(self):
self.ErrInt = True
self.ErrFloat = True
self.ErrPitchDiam = True
self.ErrInternalGear = False
self.ErrDiameter = False
super(mainWindow, self).__init__()
loadUi('Gear_Generator.ui', self)
self.tableWidget.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
check_box = internal(self)
check_box.stateChanged.connect(self._clickCheckBox)
self.tableWidget.setCellWidget(0, 0, check_box)
angle = QtWidgets.QTableWidgetItem(str(20))
angle.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 3, angle)
lista = ['Not Linked'] + [str(i) for i in range(1, self.tableWidget.rowCount())]
mesh = Mesh(self, lista)
self.tableWidget.setCellWidget(0, 6, mesh)
m = float(self.tableWidget.item(0, 1).text()) / float(self.tableWidget.item(0, 2).text())
m = QtWidgets.QTableWidgetItem(str(m))
m.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 5, m)
Acell = QtWidgets.QTableWidgetItem('0')
Acell.setFlags(QtCore.Qt.ItemIsEnabled)
Acell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 7, Acell)
self._gearGraphic()
self.add_gear.clicked.connect(self._addRow)
self.remove_gear.clicked.connect(self._removeRow)
self.generate_gear.clicked.connect(self._gearGraphic)
self.tableWidget.itemChanged.connect(self._cellChange)
self._dataRevision()
def _gearGraphic(self):
gear_outline = self._gearCalculation()
self.Graph = MplWidget(self.mplWidget, gear_outline)
self.Graph.show()
def _clickCheckBox(self):
check_row = self.tableWidget.currentRow()
check = self.tableWidget.cellWidget(check_row, 0).getCheckValue()
print(check)
if check:
self.statusLabel.setText('Row: ' + str(check_row + 1) + ' - ' + 'Draw Internal Gear')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
else:
self.statusLabel.setText('Row: ' + str(check_row + 1) + ' - ' + 'Draw Normal Gear')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
def _comboBoxRevision(self):
combo_row = self.tableWidget.currentRow()
mesh_row_value_pointed = self.tableWidget.cellWidget(combo_row, 6).currentText()
print('actual cell: ', combo_row)
print('valor apuntado: ', mesh_row_value_pointed)
if mesh_row_value_pointed == 'Not Linked':
Acell = self.tableWidget.item(combo_row, 7).text()
print(Acell)
Acell = QtWidgets.QTableWidgetItem(Acell)
Acell.setFlags(QtCore.Qt.ItemIsEnabled)
Acell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 7, Acell)
Xcell = self.tableWidget.item(combo_row, 8).text()
Xcell = QtWidgets.QTableWidgetItem(Xcell)
Xcell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 8, Xcell)
Ycell = self.tableWidget.item(combo_row, 9).text()
Ycell = QtWidgets.QTableWidgetItem(Ycell)
Ycell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 9, Ycell)
self.statusLabel.setText('Row: ' + str(combo_row + 1) + ' - Gear is ' + mesh_row_value_pointed)
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
print(mesh_row_value_pointed)
self.ErrPitchDiam = False
else:
try:
A_pitchDiam = float(self.tableWidget.item(combo_row, 1).text())
except ValueError:
Acell = '0'
Xcell ='0'
Ycell = '0'
self.meshMessage = 'Pith diameter missing in current row (' + str(combo_row + 1) + ')'
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
print('Pith diameter missing in current row (' + str(combo_row + 1) + ')')
self.ErrPitchDiam = True
else:
try:
A_pitchDiam_pointed = float(self.tableWidget.item(int(mesh_row_value_pointed) - 1, 1).text())
Acell = float(self.tableWidget.item(combo_row, 7).text())
Xcell = float(self.tableWidget.item(combo_row, 8).text())
Ycell = float(self.tableWidget.item(combo_row, 9).text())
CCell = self.tableWidget.cellWidget(combo_row, 0).getCheckValue()
print('Este es elcheck value: ', CCell)
Acell_pointed = float(self.tableWidget.item(int(mesh_row_value_pointed) - 1, 7).text())
Xcell_pointed = float(self.tableWidget.item(int(mesh_row_value_pointed) - 1, 8).text())
Ycell_pointed = float(self.tableWidget.item(int(mesh_row_value_pointed) - 1, 9).text())
Cell_pointed = self.tableWidget.cellWidget(int(mesh_row_value_pointed) - 1, 0).getCheckValue()
print('Este es elcheck value apuntado: ', Cell_pointed)
if CCell and Cell_pointed:
self.ErrInternalGear = True
Acell = '0'
Xcell ='0'
Ycell = '0'
self.meshMessage = 'Gears ' + str(mesh_row_value_pointed) + ' and ' + str(combo_row + 1) + ' can not be meshed'
elif Cell_pointed:
if A_pitchDiam_pointed <= A_pitchDiam:
self.ErrDiameter = True
Acell = '0'
Xcell ='0'
Ycell = '0'
self.meshMessage = 'Gears ' + str(mesh_row_value_pointed) + ' must be higher than' + str(combo_row + 1) + ' | Imposible meshed'
else:
pitchDiam_dist = (A_pitchDiam_pointed / 2) - (A_pitchDiam / 2)
Xcell = str(Xcell_pointed - pitchDiam_dist * cos(radians(Acell)))
Ycell = str(Ycell_pointed - pitchDiam_dist * sin(radians(Acell)))
Acell = str(Acell)
self.ErrDiameter = False
elif CCell:
if A_pitchDiam <= A_pitchDiam_pointed:
Acell = '0'
Xcell ='0'
Ycell = '0'
self.ErrDiameter = True
self.meshMessage = 'Gears ' + str(combo_row + 1) + ' must be higher than' + str(mesh_row_value_pointed) + ' | Imposible meshed'
else:
pitchDiam_dist = (A_pitchDiam_pointed / 2) - (A_pitchDiam / 2)
Xcell = str(Xcell_pointed - pitchDiam_dist * cos(radians(Acell)))
Ycell = str(Ycell_pointed - pitchDiam_dist * sin(radians(Acell)))
Acell = str(Acell)
self.ErrDiameter = False
else:
pitchDiam_dist = (A_pitchDiam_pointed / 2) + (A_pitchDiam / 2)
Xcell = str(Xcell_pointed + pitchDiam_dist * cos(radians(Acell)))
Ycell = str(Ycell_pointed + pitchDiam_dist * sin(radians(Acell)))
Acell = str(Acell)
self.ErrPitchDiam = False
except:
Acell = '0'
Xcell ='0'
Ycell = '0'
self.meshMessage = 'Pith diameter missing in row (' + str(mesh_row_value_pointed) + ')'
print('Pith diameter missing in row (' + str(mesh_row_value_pointed) + ')')
self.ErrPitchDiam = True
Acell = QtWidgets.QTableWidgetItem(Acell)
Acell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 7, Acell)
Xcell = QtWidgets.QTableWidgetItem(Xcell)
Xcell.setFlags(QtCore.Qt.ItemIsEnabled)
Xcell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 8, Xcell)
Ycell = QtWidgets.QTableWidgetItem(Ycell)
Ycell.setFlags(QtCore.Qt.ItemIsEnabled)
Ycell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 9, Ycell)
if self.ErrPitchDiam:
self.statusLabel.setText(self.meshMessage + ' | Row: ' + str(combo_row + 1) + ' - ' + 'meshing with row ' + mesh_row_value_pointed + ' gear')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
print('meshing with ', mesh_row_value_pointed)
elif self.ErrInternalGear:
self.statusLabel.setText(self.meshMessage)
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
print('meshing with ', mesh_row_value_pointed)
elif self.ErrDiameter:
self.statusLabel.setText(self.meshMessage)
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
print('meshing with ', mesh_row_value_pointed)
else:
self.statusLabel.setText('Row: ' + str(combo_row + 1) + ' - ' + 'meshing with row ' + mesh_row_value_pointed + ' gear')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
print('meshing with ', mesh_row_value_pointed)
def _dataRevision(self):
self.ErrInt = True
self.ErrFloat = True
verification = []
row_rev = self.tableWidget.rowCount()
print(row_rev)
for r in range(row_rev):
try:
check_val_rev = self.tableWidget.cellWidget(r, 0).getCheckValue()
teeth_pitch_diam_rev = int(self.tableWidget.item(r, 1).text())
teeth_n_rev = int(self.tableWidget.item(r, 2).text())
pressure_ang_rev = float(self.tableWidget.item(r, 3).text())
s_or_r_radius_rev = float(self.tableWidget.item(r, 4).text()) / 2
module_g_rev = float(self.tableWidget.item(r, 5).text())
mesh_rev = self.tableWidget.cellWidget(r, 6).currentText()
angle_rev = float(self.tableWidget.item(r, 7).text())
x_rev = float(self.tableWidget.item(r, 8).text())
y_rev = float(self.tableWidget.item(r, 9).text())
if mesh_rev != 'Not Linked':
pass
verification.append(True)
except:
verification.append(False)
return verification
def _gearCalculation(self):
verif = self._dataRevision()
gears=[]
location = []
for row_g in range(len(verif)):
gears.append([row_g + 1])
print('intento: ', verif[row_g])
if (verif[row_g]):
teeth_n = int(self.tableWidget.item(row_g, 2).text())
pressure_ang = float(self.tableWidget.item(row_g, 3).text())
s_or_r_radius = float(self.tableWidget.item(row_g, 4).text()) / 2
module_g = float(self.tableWidget.item(row_g, 5).text())
check_val = self.tableWidget.cellWidget(row_g, 0).getCheckValue()
Acell = float(self.tableWidget.item(row_g, 7).text())
Xcell = float(self.tableWidget.item(row_g, 8).text())
Ycell = float(self.tableWidget.item(row_g, 9).text())
if check_val:
outline = createIntGearOutline(module_g, teeth_n, pressure_ang, s_or_r_radius)
else:
outline = createGearOutline(module_g, teeth_n, pressure_ang, s_or_r_radius)
print('outline', outline)
if Xcell != 0 or Ycell != 0:
outline_diplaced = displace(outline, Xcell, Ycell)
outline = outline_diplaced
print('outline displaces:', outline)
location.append([Acell, [Xcell, Ycell]])
gears[row_g].append(outline)
print('True: ', row_g + 1)
else:
gears[row_g].append([False])
location.append([False])
print('False: ', row_g + 1)
return [location, gears]
def _cellChange(self):
items = self.tableWidget.selectedItems()
col = self.tableWidget.currentColumn()
row = self.tableWidget.currentRow()
print('_cellChange: ', row, col)
enteros = [2]
decimales = [1, 3, 4, 5, 7, 8, 9]
if col in enteros:
try:
cellType = int(items[0].text())
self.ErrInt = True
self.statusLabel.setText('OK: Current cell data is an integer')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
except ValueError:
self.ErrInt = False
self.statusLabel.setText('Error: Value cell most be an integer')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
return self.alertDialog('integer')
elif col in decimales:
try:
cellType = float(items[0].text())
self.ErrFloat = True
self.statusLabel.setText('OK: Current cell data is a float')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
except ValueError:
self.ErrFloat = False
self.statusLabel.setText('Error: Value cell most be an Float')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
return self.alertDialog('Float')
def alertDialog(self, val):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Information)
message = val + " input is required"
msgBox.setText(message)
msgBox.setWindowTitle("Input Error")
msgBox.setStandardButtons(QMessageBox.Ok)
returnValue = msgBox.exec()
if returnValue == QMessageBox.Ok:
print('OK clicked')
def _addRow(self):
if self.ErrInt or self.ErrFloat:
rowCount = self.tableWidget.rowCount()
self.tableWidget.insertRow(rowCount)
columnCount = self.tableWidget.columnCount()
for col in range(columnCount):
print(col)
if col == 0:
check_box = internal(self)
check_box.stateChanged.connect(self._clickCheckBox)
self.tableWidget.setCellWidget(rowCount, col, check_box)
elif col == 3:
angle = QtWidgets.QTableWidgetItem('20')
angle.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, angle)
elif col == 6:
lista = ['Not Linked'] + [str(i) for i in range(1, self.tableWidget.rowCount())]
mesh = Mesh(self, lista)
self.tableWidget.setCellWidget(rowCount, col, mesh)
mesh.currentIndexChanged.connect(self._comboBoxRevision)
elif col == 7:
Acell = QtWidgets.QTableWidgetItem('0')
Acell.setFlags(QtCore.Qt.ItemIsEnabled)
Acell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, Acell)
elif col == 8:
Xcell = QtWidgets.QTableWidgetItem('0')
Xcell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, Xcell)
elif col == 9:
Ycell = QtWidgets.QTableWidgetItem('0')
Ycell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, Ycell)
else:
cellCenter = QtWidgets.QTableWidgetItem()
cellCenter.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, cellCenter)
self.statusLabel.setText('OK: Row just added')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color:rgb(0, 0, 0)")
def _removeRow(self):
if self.tableWidget.rowCount() > 0:
self.tableWidget.removeRow(self.tableWidget.rowCount()-1)
self.statusLabel.setText('OK: Row just deleted')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color:rgb(0, 0, 0)")
ef __init__(self, parent):
super().__init__(parent)
self.stateChanged.connect(self.getCheckValue)
def getCheckValue(self):
if self.isChecked() == True:
print('Check Value Active')
return True
elif self.isChecked() == False:
print('Check Value Deactivated')
return False
class Mesh(QComboBox):
def __init__(self, parent, aa):
super().__init__(parent)
self.addItems(aa)
self.currentIndexChanged.connect(self.getComboValue)
def getComboValue(self):
print(self.currentText())
return self.currentText()
app = QApplication(sys.argv)
main_window = mainWindow()
widget = QtWidgets.QStackedWidget()
widget.addWidget(main_window)
widget.resize(658, 650)
widget.show()
try:
sys.exit(app.exec_())
except:
print('Exiting') | true | true |
f730617f1ec79e7ee02ce125d82a1b33d9acb7f8 | 4,003 | py | Python | workflow/powerbiCatToM.py | sixtysecondrevit/dynamoPython | dfb4b001800ebf9ab308510db40cfc5a5a953fee | [
"MIT"
] | 114 | 2018-07-17T17:47:11.000Z | 2022-03-08T09:33:39.000Z | workflow/powerbiCatToM.py | sixtysecondrevit/dynamoPython | dfb4b001800ebf9ab308510db40cfc5a5a953fee | [
"MIT"
] | 28 | 2018-07-18T10:43:37.000Z | 2020-11-24T06:08:18.000Z | workflow/powerbiCatToM.py | sixtysecondrevit/dynamoPython | dfb4b001800ebf9ab308510db40cfc5a5a953fee | [
"MIT"
] | 56 | 2018-07-17T17:57:28.000Z | 2022-03-26T12:30:39.000Z | '''
CATEGORIES TO M SCRIPT - CREATE CONDITIONAL STATEMENT CODE FOR POWER BI
-
a dynamoPython script, visit the website for more details
https://github.com/Amoursol/dynamoPython
'''
__author__ = 'Adam Bear - adam@ukbear.com'
__twitter__ = '@adambear82'
__github__ = '@adambear82'
__version__ = '1.0.0'
'''
for large projects with lots of clashes it is useful to analyse in
a business inteligence or data visualisation tool such as ms power bi.
creating the conditonal statement in power bi can take a long time if
there are a lot of categories to include
'''
# ------------------------
# import modules
# ------------------------
# refer to the clipboard
import clr
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import Clipboard
# refer to the document manager
clr.AddReference('RevitServices')
import RevitServices
from RevitServices.Persistence import DocumentManager
doc = DocumentManager.Instance.CurrentDBDocument
# refer to the revit API
clr.AddReference('RevitAPI')
import Autodesk
from Autodesk.Revit.DB import *
# ------------------------
# inputs & variables
# ------------------------
# some categoreies exported from navisworks are not included as
# categories in visibility graphics, for examplevv
# Handrails, Landings, Pads, Runs, Slab Edges, Top Rails, Wall Sweeps
# remove single and double spaces after commas and split into list
catsInput = IN[0]
catsReplace1 = catsInput.replace(', ', ',')
catsReplace2 = catsReplace1.replace(', ', ',')
catsManual = catsReplace2.split(',')
catsManual.sort()
# provide reference strings
hashtag = 'Renamed Columns1'
pathlink = 'pathlink'
filterIn = 'filter_in'
filterOut = 'filter_out'
# ------------------------
# get categories
# ------------------------
# get categories that can add sub categories
# ie the categories which appear in vis graphics
# annotated from forum post with kudos to René Picazo
# https://forum.dynamobim.com/t/get-all-elements-in-model-categories/9447/7
modelCats = []
for cat in doc.Settings.Categories :
if cat.CategoryType == CategoryType.Model and cat.CanAddSubcategory:
modelCats.append(cat.Name)
# only append extra categories if they have been defined in input
if catsInput :
for cat in catsManual :
modelCats.append(cat)
# sort alphabetically so its easier to read
cats = sorted(modelCats)
# ------------------------
# strings
# ------------------------
# the 1st line adds a column to the table based on a filter on the hash
table = ''.join(('= Table.AddColumn(#"', hashtag, '", "filter",'))
# define strings to be used in M code
each = 'each if ['
elif0 = 'else if ['
elif1 = '] = "'
elif2 = '" then "'
elif3 = '"'
# the 2nd line is a special case
# where cats[0] requires 'each' instead of 'else if'
catJoin = each, pathlink, elif1, cats[0], elif2, filterIn, elif3
temp = ''.join(catJoin)
listLines = []
listLines.append(temp)
# the 3rd line and onwards starts with else if
# each row is checked if it is equall to one of the remaining cats
# cats is sliced by [1:] to return items from index 1 to the last index
for c in cats[1:] :
catJoin = elif0, pathlink, elif1, c, elif2, filterIn, elif3
temp = ''.join(catJoin)
listLines.append(temp)
lines = '\r\n'.join(listLines)
# the final line starts with else
# rows not in cats are given the filterOut value
strElse = ''.join(('else "', filterOut, '")'))
# the code is brought together with new lines between each line
code = '\r\n'.join((table, lines, strElse))
# ------------------------
# send to clipboard
# ------------------------
# annotated with kudos to bakery 'by send to clipboard from revit' (sic)
# https://github.com/LukeyJohnson/BakeryForDynamo/blob/97e5622db7ba14cd42caac9b8bd4fdba6b66871e/nodes/bv%20Send%20to%20Clipboard%20from%20Revit.dyf#L5-L12
# try to copy the code, provide a message if it fails
try:
Clipboard.SetText(code)
copyMsg = code
except:
copyMsg = 'Data could not be copied to clipboard'
# ------------------------
# output
# ------------------------
OUT = copyMsg
| 29.433824 | 154 | 0.683987 | __author__ = 'Adam Bear - adam@ukbear.com'
__twitter__ = '@adambear82'
__github__ = '@adambear82'
__version__ = '1.0.0'
import clr
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import Clipboard
clr.AddReference('RevitServices')
import RevitServices
from RevitServices.Persistence import DocumentManager
doc = DocumentManager.Instance.CurrentDBDocument
clr.AddReference('RevitAPI')
import Autodesk
from Autodesk.Revit.DB import *
catsInput = IN[0]
catsReplace1 = catsInput.replace(', ', ',')
catsReplace2 = catsReplace1.replace(', ', ',')
catsManual = catsReplace2.split(',')
catsManual.sort()
hashtag = 'Renamed Columns1'
pathlink = 'pathlink'
filterIn = 'filter_in'
filterOut = 'filter_out'
modelCats = []
for cat in doc.Settings.Categories :
if cat.CategoryType == CategoryType.Model and cat.CanAddSubcategory:
modelCats.append(cat.Name)
if catsInput :
for cat in catsManual :
modelCats.append(cat)
cats = sorted(modelCats)
table = ''.join(('= Table.AddColumn(#"', hashtag, '", "filter",'))
each = 'each if ['
elif0 = 'else if ['
elif1 = '] = "'
elif2 = '" then "'
elif3 = '"'
catJoin = each, pathlink, elif1, cats[0], elif2, filterIn, elif3
temp = ''.join(catJoin)
listLines = []
listLines.append(temp)
for c in cats[1:] :
catJoin = elif0, pathlink, elif1, c, elif2, filterIn, elif3
temp = ''.join(catJoin)
listLines.append(temp)
lines = '\r\n'.join(listLines)
strElse = ''.join(('else "', filterOut, '")'))
code = '\r\n'.join((table, lines, strElse))
Clipboard.SetText(code)
copyMsg = code
except:
copyMsg = 'Data could not be copied to clipboard'
OUT = copyMsg
| true | true |
f730625ed1d40a9135df52e51742c9f032d3ce20 | 1,930 | py | Python | results/rabi_and_lmg_optimizations_20190227/script_rabi_bangramp_neldermead.py | lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381 | 29f80dcf914096555cee9bc2e18249a2c95d6a50 | [
"MIT"
] | 1 | 2020-07-21T02:31:41.000Z | 2020-07-21T02:31:41.000Z | results/rabi_and_lmg_optimizations_20190228/script_rabi_bangramp_neldermead.py | lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381 | 29f80dcf914096555cee9bc2e18249a2c95d6a50 | [
"MIT"
] | null | null | null | results/rabi_and_lmg_optimizations_20190228/script_rabi_bangramp_neldermead.py | lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381 | 29f80dcf914096555cee9bc2e18249a2c95d6a50 | [
"MIT"
] | null | null | null | import os
import sys
import numpy as np
import pandas as pd
import logging
if '../../' not in sys.path:
sys.path.append('../../')
import src.optimization as optimization
model = 'rabi'
model_parameters = dict(N=100, Omega=100, omega_0=1.)
protocol = 'bangramp'
optimization_method = 'Nelder-Mead'
# ------ build and check name for output file
additional_file_name_qualifiers = None
output_file_name = (model + '_' + protocol + '_' +
optimization_method.replace('-', '').lower())
if additional_file_name_qualifiers is not None:
output_file_name += '_' + additional_file_name_qualifiers
filenum = 1
_output_file_name = output_file_name
while os.path.isfile(_output_file_name + '.csv'):
_output_file_name = output_file_name + '({:02})'.format(filenum)
filenum += 1
output_file_name = _output_file_name + '.csv'
# ------ set up logger
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s]"
"[%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
# consoleHandler = logging.StreamHandler()
# consoleHandler.setFormatter(logFormatter)
# rootLogger.addHandler(consoleHandler)
fileHandler = logging.FileHandler(output_file_name[:-4] + '.log')
fileHandler.setFormatter(logFormatter)
fileHandler.setLevel(logging.DEBUG)
rootLogger.addHandler(fileHandler)
logging.info('Output file name will be "{}"'.format(output_file_name))
# ------ start optimization
results = optimization.find_best_protocol(
problem_specification=dict(
model=model,
model_parameters=model_parameters,
task='critical point state generation'
),
optimization_specs=dict(
protocol=protocol,
optimization_method=optimization_method
),
other_options=dict(
scan_times=np.linspace(0.1, 4, 100)
)
)
# ------ save results to file
results.to_csv(output_file_name)
| 29.242424 | 70 | 0.707254 | import os
import sys
import numpy as np
import pandas as pd
import logging
if '../../' not in sys.path:
sys.path.append('../../')
import src.optimization as optimization
model = 'rabi'
model_parameters = dict(N=100, Omega=100, omega_0=1.)
protocol = 'bangramp'
optimization_method = 'Nelder-Mead'
additional_file_name_qualifiers = None
output_file_name = (model + '_' + protocol + '_' +
optimization_method.replace('-', '').lower())
if additional_file_name_qualifiers is not None:
output_file_name += '_' + additional_file_name_qualifiers
filenum = 1
_output_file_name = output_file_name
while os.path.isfile(_output_file_name + '.csv'):
_output_file_name = output_file_name + '({:02})'.format(filenum)
filenum += 1
output_file_name = _output_file_name + '.csv'
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s]"
"[%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(output_file_name[:-4] + '.log')
fileHandler.setFormatter(logFormatter)
fileHandler.setLevel(logging.DEBUG)
rootLogger.addHandler(fileHandler)
logging.info('Output file name will be "{}"'.format(output_file_name))
results = optimization.find_best_protocol(
problem_specification=dict(
model=model,
model_parameters=model_parameters,
task='critical point state generation'
),
optimization_specs=dict(
protocol=protocol,
optimization_method=optimization_method
),
other_options=dict(
scan_times=np.linspace(0.1, 4, 100)
)
)
results.to_csv(output_file_name)
| true | true |
f730640adaf31ffe5b7c8de73e4fd29b5bc4983e | 9,252 | py | Python | doc/source/conf.py | ifxit/nidho | 7d49bb7d879d0f3d444df50f2c18c2cdf883216c | [
"MIT"
] | 11 | 2016-06-09T12:07:14.000Z | 2018-01-18T08:01:08.000Z | doc/source/conf.py | ifxit/nidho | 7d49bb7d879d0f3d444df50f2c18c2cdf883216c | [
"MIT"
] | 4 | 2016-07-06T11:06:34.000Z | 2020-01-02T10:11:48.000Z | doc/source/conf.py | ifxit/nidhogg | 7d49bb7d879d0f3d444df50f2c18c2cdf883216c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Nidhogg documentation build configuration file, created by
# sphinx-quickstart on Thu May 28 09:48:45 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import shlex
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Nidhogg'
copyright = u'2018, Roland Wohlfahrt, Christian Assing'
author = u'Roland Wohlfahrt, Christian Assing'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.9.0'
# The full version, including alpha/beta/rc tags.
release = '3.9.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Nidhoggdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Nidhogg.tex', u'Nidhogg Documentation',
u'Roland Wohlfahrt, Christian Assing', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nidhogg', u'Nidhogg Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Nidhogg', u'Nidhogg Documentation',
author, 'Nidhogg', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.236934 | 79 | 0.718223 |
import os
import shlex
import sys
sys.path.insert(0, os.path.abspath('../../'))
extensions = [
'sphinx.ext.autodoc',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Nidhogg'
copyright = u'2018, Roland Wohlfahrt, Christian Assing'
author = u'Roland Wohlfahrt, Christian Assing'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.9.0'
# The full version, including alpha/beta/rc tags.
release = '3.9.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Nidhoggdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Nidhogg.tex', u'Nidhogg Documentation',
u'Roland Wohlfahrt, Christian Assing', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nidhogg', u'Nidhogg Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Nidhogg', u'Nidhogg Documentation',
author, 'Nidhogg', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
| true | true |
f730647bf68842c5a3eff14c987502a2fca1c432 | 141,522 | py | Python | scripts/automation/trex_control_plane/interactive/trex/stl/trex_stl_client.py | GabrielGanne/trex-core | 688a0fe0adb890964691473723d70ffa98e00dd3 | [
"Apache-2.0"
] | null | null | null | scripts/automation/trex_control_plane/interactive/trex/stl/trex_stl_client.py | GabrielGanne/trex-core | 688a0fe0adb890964691473723d70ffa98e00dd3 | [
"Apache-2.0"
] | null | null | null | scripts/automation/trex_control_plane/interactive/trex/stl/trex_stl_client.py | GabrielGanne/trex-core | 688a0fe0adb890964691473723d70ffa98e00dd3 | [
"Apache-2.0"
] | null | null | null | import time
import sys
import os
from collections import OrderedDict
from functools import wraps
from ..utils.common import get_current_user, list_intersect, is_sub_list, user_input, list_difference, parse_ports_from_profiles
from ..utils import parsing_opts, text_tables
from ..utils.text_opts import format_text, format_num
from ..common.trex_exceptions import *
from ..common.trex_events import Event
from ..common.trex_logger import Logger
from ..common.trex_client import TRexClient, PacketBuffer
from ..common.trex_types import *
from ..common.trex_types import PortProfileID, ALL_PROFILE_ID
from ..common.trex_psv import *
from ..common.trex_api_annotators import client_api, console_api
from .trex_stl_port import STLPort
from .trex_stl_streams import STLStream, STLProfile, STLTaggedPktGroupTagConf
from .trex_stl_stats import CPgIdStats
def validate_port_input(port_arg):
"""Decorator to support PortProfileID type input.
Convert int,str argument to PortProfileID type
"""
def wrap (func):
@wraps(func)
def wrapper(self, *args, **kwargs):
code = func.__code__
fname = func.__name__
names = code.co_varnames[:code.co_argcount]
argname = port_arg
try:
port_index = names.index(argname) - 1
argval = args[port_index]
args = list(args)
args[port_index] = convert_port_to_profile(argval)
args = tuple(args)
except (ValueError, IndexError):
argval = kwargs.get(argname)
kwargs[argname] = convert_port_to_profile(argval)
return func(self, *args, **kwargs)
def convert_port_to_profile(port):
if port is None:
return port
if isinstance(port, list):
result = list(port)
for idx, val in enumerate(result):
validate_type('port', val, (int, str, PortProfileID))
result[idx] = PortProfileID(str(val))
else:
validate_type('port', port, (int, str, PortProfileID))
result = PortProfileID(str(port))
return result
return wrapper
return wrap
class TPGState:
"""
A simple class representing the states of Tagged Packet Group State machine.
This class should be always kept in Sync with the state machine in the server.
"""
DISABLED = 0 # Tagged Packet Group is disabled.
ENABLED_CP = 1 # Tagged Packet Group Control Plane is enabled, message sent to Rx.
ENABLED_CP_RX = 2 # Tagged Packet Group Control Plane and Rx are enabled. Awaiting Data Plane.
ENABLED = 3 # Tagged Packet Group is enabled.
DISABLED_DP = 4 # Tagged Packet Group Data Plane disabled, message sent to Rx.
DISABLED_DP_RX = 5 # Tagged Packet Group Data Plane and Rx disabled. Object can be destroyed.
RX_ALLOC_FAILED = 6 # Rx Allocation Failed
DP_ALLOC_FAILED = 7 # Dp Allocation Failed
ALL_STATES = [DISABLED, ENABLED_CP, ENABLED_CP_RX, ENABLED, DISABLED_DP, DISABLED_DP_RX, RX_ALLOC_FAILED, DP_ALLOC_FAILED]
ERROR_STATES = [RX_ALLOC_FAILED, DP_ALLOC_FAILED]
def __init__(self, initial_state):
if initial_state not in TPGState.ALL_STATES:
raise TRexError("Invalid TPG State {}".format(initial_state))
self._state = initial_state
self.fail_messages = {
TPGState.RX_ALLOC_FAILED: "Rx counter allocation failed!",
TPGState.DP_ALLOC_FAILED: "Tx counter allocation failed!"
}
def is_error_state(self):
"""
Indicate if this TPGState is an error state.
"""
return self._state in TPGState.ERROR_STATES
def get_fail_message(self):
"""
Get the fail message to print to the user for this state.
"""
if not self.is_error_state():
return "TPG State is valid!"
return self.fail_messages[self._state]
def __eq__(self, other):
if not isinstance(other, TPGState):
raise TRexError("Invalid comparision for TPGState")
return self._state == other._state
class STLClient(TRexClient):
# different modes for attaching traffic to ports
CORE_MASK_SPLIT = 1
CORE_MASK_PIN = 2
CORE_MASK_SINGLE = 3
def __init__(self,
username = get_current_user(),
server = "localhost",
sync_port = 4501,
async_port = 4500,
verbose_level = "error",
logger = None,
sync_timeout = None,
async_timeout = None
):
"""
TRex stateless client
:parameters:
username : string
the user name, for example imarom
server : string
the server name or ip
sync_port : int
the RPC port
async_port : int
the ASYNC port (subscriber port)
verbose_level: str
one of "none", "critical", "error", "info", "debug"
logger: instance of AbstractLogger
if None, will use ScreenLogger
sync_timeout: int
time in sec for timeout for RPC commands. for local lab keep it as default (3 sec)
higher number would be more resilient for Firewalls but slower to identify real server crash
async_timeout: int
time in sec for timeout for async notification. for local lab keep it as default (3 sec)
higher number would be more resilient for Firewalls but slower to identify real server crash
"""
api_ver = {'name': 'STL', 'major': 5, 'minor': 1}
TRexClient.__init__(self,
api_ver,
username,
server,
sync_port,
async_port,
verbose_level,
logger,
sync_timeout,
async_timeout)
self.pgid_stats = CPgIdStats(self.conn.rpc)
self.tpg_status = None # TPG Status cached in Python Side
def get_mode (self):
return "STL"
############################ called #############################
############################ by base #############################
############################ TRex Client #############################
def _on_connect(self):
return RC_OK()
def _on_connect_create_ports(self, system_info):
"""
called when connecting to the server
triggered by the common client object
"""
# create ports
port_map = {}
for info in system_info['ports']:
port_id = info['index']
port_map[port_id] = STLPort(self.ctx, port_id, self.conn.rpc, info, self.is_dynamic)
return self._assign_ports(port_map)
def _on_connect_clear_stats(self):
# clear stats to baseline
with self.ctx.logger.suppress(verbose = "warning"):
self.clear_stats(ports = self.get_all_ports(), clear_xstats = False)
return RC_OK()
############################ events #############################
############################ #############################
############################ #############################
# register all common events
def _register_events (self):
super(STLClient, self)._register_events()
self.ctx.event_handler.register_event_handler("profile started", self._on_profile_started)
self.ctx.event_handler.register_event_handler("profile stopped", self._on_profile_stopped)
self.ctx.event_handler.register_event_handler("profile paused", self._on_profile_paused)
self.ctx.event_handler.register_event_handler("profile resumed", self._on_profile_resumed)
self.ctx.event_handler.register_event_handler("profile finished tx", self._on_profile_finished_tx)
self.ctx.event_handler.register_event_handler("profile error", self._on_profile_error)
def _on_profile_started (self, port_id, profile_id):
msg = "Profile {0}.{1} has started".format(port_id, profile_id)
if port_id in self.ports:
self.ports[port_id].async_event_profile_started(profile_id)
return Event('server', 'info', msg)
def _on_profile_stopped (self, port_id, profile_id):
msg = "Profile {0}.{1} has stopped".format(port_id, profile_id)
if port_id in self.ports:
self.ports[port_id].async_event_profile_stopped(profile_id)
return Event('server', 'info', msg)
def _on_profile_paused (self, port_id, profile_id):
msg = "Profile {0}.{1} has paused".format(port_id, profile_id)
if port_id in self.ports:
self.ports[port_id].async_event_profile_paused(profile_id)
return Event('server', 'info', msg)
def _on_profile_resumed (self, port_id, profile_id):
msg = "Profile {0}.{1} has resumed".format(port_id, profile_id)
if port_id in self.ports:
self.ports[port_id].async_event_profile_resumed(profile_id)
return Event('server', 'info', msg)
def _on_profile_finished_tx (self, port_id, profile_id):
msg = "Profile {0}.{1} job done".format(port_id, profile_id)
if port_id in self.ports:
self.ports[port_id].async_event_profile_job_done(profile_id)
ev = Event('server', 'info', msg)
if port_id in self.get_acquired_ports():
self.ctx.logger.info(ev)
return ev
def _on_profile_error (self, port_id, profile_id):
msg = "Profile {0}.{1} job failed".format(port_id, profile_id)
return Event('server', 'warning', msg)
######################### private/helper #########################
############################ functions #############################
############################ #############################
# remove all RX filters in a safe manner
@validate_port_input("ports")
def _remove_rx_filters (self, ports, rx_delay_ms):
# get the enabled RX profiles
rx_ports = [p for p in ports if self.ports[p.port_id].has_profile_rx_enabled(p.profile_id)]
if not rx_ports:
return RC_OK()
# block while any RX configured profile has not yet have it's delay expired
while any([not self.ports[p.port_id].has_rx_delay_expired(p.profile_id, rx_delay_ms) for p in rx_ports]):
time.sleep(0.01)
# remove RX filters
return self._for_each_port('remove_rx_filters', rx_ports)
# Check console API ports argument
def validate_profile_input(self, input_profiles):
ports = []
result_profiles = []
for profile in input_profiles:
if profile.profile_id == ALL_PROFILE_ID:
if int(profile) not in ports:
ports.append(int(profile))
else:
raise TRexError("Cannot have more than on %d.* in the params" %int(profile))
for pid in ports:
for profile in input_profiles:
if int(profile) == pid and profile.profile_id != ALL_PROFILE_ID:
raise TRexError("Cannot have %d.* and %s passed together as --ports" %(int(profile), str(profile)))
port_profiles = self.ports[pid].get_port_profiles("all")
result_profiles.extend(port_profiles)
for profile in input_profiles:
if profile.profile_id != ALL_PROFILE_ID:
if profile not in result_profiles:
result_profiles.append(profile)
return result_profiles
# Get all profiles with the certain state from ports
# state = {"active", "transmitting", "paused", "streams"}
def get_profiles_with_state(self, state):
active_ports = self.get_acquired_ports()
active_profiles = []
for port in active_ports:
port_profiles = self.ports[port].get_port_profiles(state)
active_profiles.extend(port_profiles)
return active_profiles
############################ Stateless #############################
############################ API #############################
############################ #############################
@client_api('command', True)
def reset(self, ports = None, restart = False):
"""
Force acquire ports, stop the traffic, remove all streams and clear stats
:parameters:
ports : list
Ports on which to execute the command
restart: bool
Restart the NICs (link down / up)
:raises:
+ :exc:`TRexError`
"""
ports = ports if ports is not None else self.get_all_ports()
ports = self.psv.validate('reset', ports)
all_profiles = []
for port in ports:
profile = PortProfileID(str(port) + ".*")
all_profiles.append(profile)
if restart:
if not all([p.is_link_change_supported() for p in self.ports.values()]):
raise TRexError("NICs of this type do not support link down, can't use restart flag.")
self.ctx.logger.pre_cmd("Hard resetting ports {0}:".format(ports))
else:
self.ctx.logger.pre_cmd("Resetting ports {0}:".format(ports))
try:
with self.ctx.logger.suppress():
# force take the port and ignore any streams on it
self.acquire(ports, force = True, sync_streams = False)
self.stop(all_profiles)
self.remove_all_streams(all_profiles)
self.clear_stats(ports)
self.set_port_attr(ports,
promiscuous = False if self.any_port.is_prom_supported() else None,
link_up = True if restart else None)
self.remove_rx_queue(ports)
self._for_each_port('stop_capture_port', ports)
self.remove_all_captures()
self.set_service_mode(ports, False)
self.ctx.logger.post_cmd(RC_OK())
except TRexError as e:
self.ctx.logger.post_cmd(False)
raise
@client_api('command', True)
def acquire (self, ports = None, force = False, sync_streams = True):
"""
Acquires ports for executing commands
:parameters:
ports : list
Ports on which to execute the command
force : bool
Force acquire the ports.
sync_streams: bool
sync with the server about the configured streams
:raises:
+ :exc:`TRexError`
"""
# by default use all ports
ports = ports if ports is not None else self.get_all_ports()
# validate ports
ports = self.psv.validate('acquire', ports)
if force:
self.ctx.logger.pre_cmd("Force acquiring ports {0}:".format(ports))
for port in ports:
tpg_status = self.get_tpg_status(port=port)
enabled = tpg_status.get("enabled", False)
if enabled:
username = tpg_status["data"]["username"]
tpg_ports = tpg_status["data"]["acquired_ports"]
self.ctx.logger.pre_cmd(format_text("Found TPG Context of user {} in ports {}".format(username, tpg_ports), "yellow"))
self.disable_tpg(username)
else:
self.ctx.logger.pre_cmd("Acquiring ports {0}:".format(ports))
rc = self._for_each_port('acquire', ports, force)
self.ctx.logger.post_cmd(rc)
if not rc:
# cleanup
self._for_each_port('release', ports)
raise TRexError(rc)
self._post_acquire_common(ports)
# sync streams
if sync_streams:
rc = self._for_each_port('sync_streams', ports)
if not rc:
raise TRexError(rc)
@client_api('command', True)
def release(self, ports = None):
"""
Release ports
:parameters:
ports : list
Ports on which to execute the command
:raises:
+ :exc:`TRexError`
"""
ports = ports if ports is not None else self.get_acquired_ports()
# validate ports
ports = self.psv.validate('release', ports, PSV_ACQUIRED)
if self.tpg_status is None:
# Nothing in cache
self.get_tpg_status()
if self.tpg_status["enabled"]:
self.disable_tpg()
self.ctx.logger.pre_cmd("Releasing ports {0}:".format(ports))
rc = self._for_each_port('release', ports)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
def set_service_mode (self, ports = None, enabled = True, filtered = False, mask = None):
''' based on :meth:`trex.stl.trex_stl_client.STLClient.set_service_mode_base` '''
# call the base method
self.set_service_mode_base(ports, enabled, filtered, mask)
rc = self._for_each_port('set_service_mode', ports, enabled, filtered, mask)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("ports")
def remove_all_streams (self, ports = None):
"""
remove all streams from port(s)
:parameters:
ports : list
Ports on which to execute the command
:raises:
+ :exc:`TRexError`
"""
ports = ports if ports is not None else self.get_acquired_ports()
# validate ports
ports = self.psv.validate('remove_all_streams', ports, (PSV_ACQUIRED, PSV_IDLE))
self.ctx.logger.pre_cmd("Removing all streams from port(s) {0}:".format(ports))
rc = self._for_each_port('remove_all_streams', ports)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("ports")
def add_streams (self, streams, ports = None):
"""
Add a list of streams to port(s)
:parameters:
ports : list
Ports on which to execute the command
streams: list
Streams to attach (or profile)
:returns:
List of stream IDs in order of the stream list
:raises:
+ :exc:`TRexError`
"""
ports = ports if ports is not None else self.get_acquired_ports()
# validate ports
ports = self.psv.validate('add_streams', ports, (PSV_ACQUIRED, PSV_IDLE))
if isinstance(streams, STLProfile):
streams = streams.get_streams()
# transform single stream
if not isinstance(streams, list):
streams = [streams]
# check streams
if not all([isinstance(stream, STLStream) for stream in streams]):
raise TRexArgumentError('streams', streams)
self.ctx.logger.pre_cmd("Attaching {0} streams to port(s) {1}:".format(len(streams), ports))
rc = self._for_each_port('add_streams', ports, streams)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
# return the stream IDs
return rc.data()
@client_api('command', True)
def add_profile(self, filename, ports = None, **kwargs):
""" | Add streams from profile by its type. Supported types are:
| .py
| .yaml
| .pcap file that converted to profile automatically
:parameters:
filename : string
filename (with path) of the profile
ports : list
list of ports to add the profile (default: all acquired)
kwargs : dict
forward those key-value pairs to the profile (tunables)
:returns:
List of stream IDs in order of the stream list
:raises:
+ :exc:`TRexError`
"""
validate_type('filename', filename, basestring)
profile = STLProfile.load(filename, **kwargs)
return self.add_streams(profile.get_streams(), ports)
@client_api('command', True)
@validate_port_input("ports")
def remove_streams (self, stream_id_list, ports = None):
"""
Remove a list of streams from ports
:parameters:
stream_id_list: int or list of ints
Stream id list to remove
ports : list
Ports on which to execute the command
:raises:
+ :exc:`TRexError`
"""
validate_type('streams_id_list', stream_id_list, (int, list))
# transform single stream
stream_id_list = listify(stream_id_list)
# check at least one exists
if not stream_id_list:
raise TRexError("remove_streams - 'stream_id_list' cannot be empty")
# check stream IDs
for i, stream_id in enumerate(stream_id_list):
validate_type('stream ID:{0}'.format(i), stream_id, int)
ports = ports if ports is not None else self.get_acquired_ports()
ports = self.psv.validate('remove_streams', ports, (PSV_ACQUIRED, PSV_IDLE))
# transform single stream
if not isinstance(stream_id_list, list):
stream_id_list = [stream_id_list]
# check streams
for stream_id in stream_id_list:
validate_type('stream_id', stream_id, int)
# remove streams
self.ctx.logger.pre_cmd("Removing {0} streams from port(s) {1}:".format(len(stream_id_list), ports))
rc = self._for_each_port("remove_streams", ports, stream_id_list)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
# check that either port is resolved or all streams have explicit dest MAC
def __check_streams_explicit_dest(self, streams_per_port):
for port_id, streams in streams_per_port.items():
if self.ports[port_id].is_resolved():
continue
for stream in streams:
if not stream.is_explicit_dst_mac():
err = 'Port %s dest MAC is invalid and there are streams without explicit dest MAC.' % port_id
raise TRexError(err)
# common checks for start API
def __pre_start_check (self, cmd_name, ports, force, streams_per_port = None):
ports = listify(ports)
for port in ports:
if isinstance(port, PortProfileID):
if port.profile_id == ALL_PROFILE_ID:
err = 'Profile id * is invalid for starting the traffic. Please assign a specific profile id'
raise TRexError(err)
if force:
return self.psv.validate(cmd_name, ports)
states = {PSV_UP: "check the connection or specify 'force'",
PSV_IDLE: "please stop them or specify 'force'",
PSV_NON_SERVICE: "please disable service mode or specify 'force'"}
if streams_per_port:
self.__check_streams_explicit_dest(streams_per_port)
else:
states[PSV_RESOLVED] = "please resolve them or specify 'force'";
return self.psv.validate(cmd_name, ports, states)
def __decode_core_mask (self, ports, core_mask):
available_modes = [self.CORE_MASK_PIN, self.CORE_MASK_SPLIT, self.CORE_MASK_SINGLE]
# predefined modes
if isinstance(core_mask, int):
if core_mask not in available_modes:
raise TRexError("'core_mask' can be either %s or a list of masks" % ', '.join(available_modes))
decoded_mask = {}
for port in ports:
# a pin mode was requested and we have
# the second port from the group in the start list
if (core_mask == self.CORE_MASK_PIN) and ( (port ^ 0x1) in ports ):
decoded_mask[port] = 0x55555555 if( port % 2) == 0 else 0xAAAAAAAA
elif core_mask == self.CORE_MASK_SINGLE:
decoded_mask[port] = 0x1
else:
decoded_mask[port] = None
return decoded_mask
# list of masks
elif isinstance(core_mask, list):
if len(ports) != len(core_mask):
raise TRexError("'core_mask' list must be the same length as 'ports' list")
decoded_mask = {}
for i, port in enumerate(ports):
decoded_mask[port] = core_mask[i]
return decoded_mask
@client_api('command', True)
@validate_port_input("ports")
def start (self,
ports = None,
mult = "1",
force = False,
duration = -1,
total = False,
core_mask = None,
synchronized = False):
"""
Start traffic on port(s)
:parameters:
ports : list
Ports on which to execute the command
mult : str
Multiplier in a form of pps, bps, or line util in %
Examples: "5kpps", "10gbps", "85%", "32mbps"
force : bool
If the ports are not in stopped mode or do not have sufficient bandwidth for the traffic, determines whether to stop the current traffic and force start.
True: Force start
False: Do not force start
duration : int
Limit the run time (seconds)
-1 = unlimited
total : bool
Determines whether to divide the configured bandwidth among the ports, or to duplicate the bandwidth for each port.
True: Divide bandwidth among the ports
False: Duplicate
core_mask: CORE_MASK_SPLIT, CORE_MASK_PIN, CORE_MASK_SINGLE or a list of masks (one per port)
Determine the allocation of cores per port
In CORE_MASK_SPLIT all the traffic will be divided equally between all the cores
associated with each port
In CORE_MASK_PIN, for each dual ports (a group that shares the same cores)
the cores will be divided half pinned for each port
synchronized: bool
In case of several ports, ensure their transmitting time is synchronized.
Must use adjacent ports (belong to same set of cores).
Will set default core_mask to 0x1.
Recommended ipg 1ms and more.
:raises:
+ :exc:`TRexError`
"""
if ports is None:
ports = []
for pid in self.get_acquired_ports():
port = PortProfileID(pid)
ports.append(port)
else:
ports = listify(ports)
port_id_list = parse_ports_from_profiles(ports)
streams_per_port = {}
for port in port_id_list:
streams_per_port[port] = self.ports[port].streams.values()
ports = self.__pre_start_check('START', ports, force, streams_per_port)
validate_type('mult', mult, basestring)
validate_type('force', force, bool)
validate_type('duration', duration, (int, float))
validate_type('total', total, bool)
validate_type('core_mask', core_mask, (type(None), int, list))
#########################
# decode core mask argument
if core_mask is None:
core_mask = self.CORE_MASK_SINGLE if synchronized else self.CORE_MASK_SPLIT
decoded_mask = self.__decode_core_mask(port_id_list, core_mask)
#######################
# verify multiplier
mult_obj = parsing_opts.decode_multiplier(mult,
allow_update = False,
divide_count = len(ports) if total else 1)
if not mult_obj:
raise TRexArgumentError('mult', mult)
# stop active ports if needed
active_profiles = list_intersect(self.get_profiles_with_state("active"), ports)
if active_profiles and force:
self.stop(active_profiles)
if synchronized:
# start synchronized (per pair of ports) traffic
if len(ports) % 2:
raise TRexError('Must use even number of ports in synchronized mode')
for port in ports:
pair_port = int(port) ^ 0x1
if isinstance(port, PortProfileID):
pair_port = str(pair_port) + "." + str(port.profile_id)
pair_port = PortProfileID(pair_port)
if pair_port not in ports:
raise TRexError('Must use adjacent ports in synchronized mode. Port "%s" has not pair.' % port)
start_time = time.time()
with self.ctx.logger.supress():
ping_data = self.ping_rpc_server()
start_at_ts = ping_data['ts'] + max((time.time() - start_time), 0.5) * len(ports)
synchronized_str = 'synchronized '
else:
start_at_ts = 0
synchronized_str = ''
# clear flow stats and latency stats when starting traffic. (Python cache only)
self.pgid_stats.clear_stats(clear_flow_stats=True, clear_latency_stats=True)
# start traffic
self.ctx.logger.pre_cmd("Starting {}traffic on port(s) {}:".format(synchronized_str, ports))
# mask is port specific information
pargs = {k:{'mask': v} for k, v in decoded_mask.items()}
rc = self._for_each_port("start", ports, mult_obj, duration, force, start_at_ts = start_at_ts, pargs = pargs)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
return rc
@client_api('command', True)
@validate_port_input("ports")
def stop (self, ports = None, rx_delay_ms = None):
"""
Stop port(s)
:parameters:
ports : list
Ports on which to execute the command
rx_delay_ms : int
time to wait until RX filters are removed
this value should reflect the time it takes
packets which were transmitted to arrive
to the destination.
after this time the RX filters will be removed
:raises:
+ :exc:`TRexError`
"""
if ports is None:
ports = self.get_profiles_with_state("active")
if not ports:
return
ports = self.psv.validate('STOP', ports, PSV_ACQUIRED)
if not ports:
return
port_id_list = parse_ports_from_profiles(ports)
self.ctx.logger.pre_cmd("Stopping traffic on port(s) {0}:".format(ports))
rc = self._for_each_port('stop', ports)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
if rx_delay_ms is None:
if self.ports[port_id_list[0]].is_virtual(): # assume all ports have same type
rx_delay_ms = 100
else:
rx_delay_ms = 10
# remove any RX filters
rc = self._remove_rx_filters(ports, rx_delay_ms)
if not rc:
raise TRexError(rc)
@client_api('command', True)
def wait_on_traffic (self, ports = None, timeout = None, rx_delay_ms = None):
"""
.. _wait_on_traffic:
Block until traffic on specified port(s) has ended
:parameters:
ports : list
Ports on which to execute the command
timeout : int
timeout in seconds
default will be blocking
rx_delay_ms : int
Time to wait (in milliseconds) after last packet was sent, until RX filters used for
measuring flow statistics and latency are removed.
This value should reflect the time it takes packets which were transmitted to arrive
to the destination.
After this time, RX filters will be removed, and packets arriving for per flow statistics feature and latency flows will be counted as errors.
:raises:
+ :exc:`TRexTimeoutError` - in case timeout has expired
+ :exe:'TRexError'
"""
# call the base implementation
ports = ports if ports is not None else self.get_acquired_ports()
ports = self.psv.validate('wait_on_traffic', ports, PSV_ACQUIRED)
TRexClient.wait_on_traffic(self, ports, timeout)
if rx_delay_ms is None:
if self.ports[ports[0]].is_virtual(): # assume all ports have same type
rx_delay_ms = 100
else:
rx_delay_ms = 10
# remove any RX filters
rc = self._remove_rx_filters(ports, rx_delay_ms = rx_delay_ms)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("ports")
def update (self, ports = None, mult = "1", total = False, force = False):
"""
Update traffic on port(s)
:parameters:
ports : list
Ports on which to execute the command
mult : str
Multiplier in a form of pps, bps, or line util in %
Can also specify +/-
Examples: "5kpps+", "10gbps-", "85%", "32mbps", "20%+"
force : bool
If the ports are not in stopped mode or do not have sufficient bandwidth for the traffic, determines whether to stop the current traffic and force start.
True: Force start
False: Do not force start
total : bool
Determines whether to divide the configured bandwidth among the ports, or to duplicate the bandwidth for each port.
True: Divide bandwidth among the ports
False: Duplicate
:raises:
+ :exc:`TRexError`
"""
ports = ports if ports is not None else self.get_profiles_with_state("active")
ports = self.psv.validate('update', ports, (PSV_ACQUIRED, PSV_TX))
validate_type('mult', mult, basestring)
validate_type('force', force, bool)
validate_type('total', total, bool)
# verify multiplier
mult_obj = parsing_opts.decode_multiplier(mult,
allow_update = True,
divide_count = len(ports) if total else 1)
if not mult_obj:
raise TRexArgumentError('mult', mult)
# call low level functions
self.ctx.logger.pre_cmd("Updating traffic on port(s) {0}:".format(ports))
rc = self._for_each_port("update", ports, mult_obj, force)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("port")
def update_streams(self, port, mult = "1", force = False, stream_ids = None):
"""
| Temporary hack to update specific streams.
| Do not rely on this function, might be removed in future!
| Warning: Changing rates of specific streams causes out of sync between CP and DP regarding streams rate.
| In order to update rate of whole port, need to revert changes made to rates of those streams.
:parameters:
port : int
Port on which to execute the command
mult : str
Multiplier in a form of pps, bps, or line util in %
Examples: "5kpps", "10gbps", "85%", "32mbps"
force : bool
If the port are not in stopped mode or do not have sufficient bandwidth for the traffic, determines whether to stop the current traffic and force start.
True: Force start
False: Do not force start
:raises:
+ :exc:`TRexError`
"""
validate_type('mult', mult, basestring)
validate_type('force', force, bool)
validate_type('stream_ids', stream_ids, list)
ports = self.psv.validate('update_streams', port, (PSV_ACQUIRED, PSV_TX))
if not stream_ids:
raise TRexError('Please specify stream IDs to update')
# verify multiplier
mult_obj = parsing_opts.decode_multiplier(mult, allow_update = False)
if not mult_obj:
raise TRexArgumentError('mult', mult)
# call low level functions
self.ctx.logger.pre_cmd('Updating streams %s on port %s:' % (stream_ids, port))
rc = self._for_each_port("update_streams", port, mult_obj, force, stream_ids)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("ports")
def pause (self, ports = None):
"""
Pause traffic on port(s). Works only for ports that are active, and only if all streams are in Continuous mode.
:parameters:
ports : list
Ports on which to execute the command
:raises:
+ :exc:`TRexError`
"""
ports = ports if ports is not None else self.get_profiles_with_state("transmitting")
ports = self.psv.validate('pause', ports, (PSV_ACQUIRED, PSV_TX))
self.ctx.logger.pre_cmd("Pausing traffic on port(s) {0}:".format(ports))
rc = self._for_each_port("pause", ports)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("port")
def pause_streams(self, port, stream_ids):
"""
Temporary hack to pause specific streams.
Does not change state of port.
Do not rely on this function, might be removed in future!
:parameters:
port : int
Port on which to execute the command
stream_ids : list
Stream IDs to pause
:raises:
+ :exc:`TRexError`
"""
validate_type('stream_ids', stream_ids, list)
ports = self.psv.validate('pause_streams', port, (PSV_ACQUIRED, PSV_TX))
if not stream_ids:
raise TRexError('Please specify stream IDs to pause')
self.ctx.logger.pre_cmd('Pause streams %s on port %s:' % (stream_ids, port))
rc = self._for_each_port("pause_streams", port, stream_ids)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("ports")
def resume (self, ports = None):
"""
Resume traffic on port(s)
:parameters:
ports : list
Ports on which to execute the command
:raises:
+ :exc:`TRexError`
"""
ports = ports if ports is not None else self.get_profiles_with_state("paused")
ports = self.psv.validate('resume', ports, (PSV_ACQUIRED, PSV_PAUSED))
self.ctx.logger.pre_cmd("Resume traffic on port(s) {0}:".format(ports))
rc = self._for_each_port('resume', ports)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("port")
def resume_streams(self, port, stream_ids):
"""
Temporary hack to resume specific streams.
Does not change state of port.
Do not rely on this function, might be removed in future!
:parameters:
port : int
Port on which to execute the command
stream_ids : list
Stream IDs to resume
:raises:
+ :exc:`TRexError`
"""
validate_type('stream_ids', stream_ids, list)
ports = self.psv.validate('resume_streams', port, (PSV_ACQUIRED))
if not stream_ids:
raise TRexError('Please specify stream IDs to resume')
self.ctx.logger.pre_cmd('Resume streams %s on port %s:' % (stream_ids, port))
rc = self._for_each_port("resume_streams", port, stream_ids)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
def __push_remote (self, pcap_filename, port_id_list, ipg_usec, speedup, count, duration, is_dual, min_ipg_usec):
rc = RC()
for port_id in port_id_list:
# for dual, provide the slave handler as well
slave_handler = self.ports[port_id ^ 0x1].handler if is_dual else ""
rc.add(self.ports[port_id].push_remote(pcap_filename,
ipg_usec,
speedup,
count,
duration,
is_dual,
slave_handler,
min_ipg_usec))
return rc
@client_api('command', True)
def push_remote (self,
pcap_filename,
ports = None,
ipg_usec = None,
speedup = 1.0,
count = 1,
duration = -1,
is_dual = False,
min_ipg_usec = None,
force = False,
src_mac_pcap = False,
dst_mac_pcap = False):
"""
Push a remote server-reachable PCAP file
the path must be fullpath accessible to the server
:parameters:
pcap_filename : str
PCAP file name in full path and accessible to the server
ports : list
Ports on which to execute the command
ipg_usec : float
Inter-packet gap in microseconds.
Exclusive with min_ipg_usec
speedup : float
A factor to adjust IPG. effectively IPG = IPG / speedup
count: int
How many times to transmit the cap
duration: float
Limit runtime by duration in seconds
is_dual: bool
Inject from both directions.
requires ERF file with meta data for direction.
also requires that all the ports will be in master mode
with their adjacent ports as slaves
min_ipg_usec : float
Minimum inter-packet gap in microseconds to guard from too small ipg.
Exclusive with ipg_usec
force : bool
Ignore if port is active
src_mac_pcap : bool
Source MAC address will be taken from pcap file if True.
dst_mac_pcap : bool
Destination MAC address will be taken from pcap file if True.
:raises:
+ :exc:`TRexError`
"""
ports = ports if ports is not None else self.get_acquired_ports()
ports = self.__pre_start_check('PUSH', ports, force)
validate_type('pcap_filename', pcap_filename, basestring)
validate_type('ipg_usec', ipg_usec, (float, int, type(None)))
validate_type('speedup', speedup, (float, int))
validate_type('count', count, int)
validate_type('duration', duration, (float, int))
validate_type('is_dual', is_dual, bool)
validate_type('min_ipg_usec', min_ipg_usec, (float, int, type(None)))
validate_type('src_mac_pcap', src_mac_pcap, bool)
validate_type('dst_mac_pcap', dst_mac_pcap, bool)
# if force - stop any active ports
if force:
active_ports = list(set(self.get_active_ports()).intersection(ports))
all_profiles = []
for port in active_ports:
profile = PortProfileID(str(port) + ".*")
all_profiles.append(profile)
if all_profiles:
self.stop(all_profiles)
# for dual mode check that all are masters
if is_dual:
if not pcap_filename.endswith('erf'):
raise TRexError("dual mode: only ERF format is supported for dual mode")
for port in ports:
master = port
slave = port ^ 0x1
if slave in ports:
raise TRexError("dual mode: cannot provide adjacent ports ({0}, {1}) in a batch".format(master, slave))
if slave not in self.get_acquired_ports():
raise TRexError("dual mode: adjacent port {0} must be owned during dual mode".format(slave))
# overload the count in new version, workaround instead of passing new variable
if count & 0xC0000000:
raise TRexError("count is limited to 0x3fff,ffff")
count = count & 0x3FFFFFFF
if src_mac_pcap:
count |= 0x80000000
if dst_mac_pcap:
count |= 0x40000000
self.ctx.logger.pre_cmd("Pushing remote PCAP on port(s) {0}:".format(ports))
rc = self.__push_remote(pcap_filename, ports, ipg_usec, speedup, count, duration, is_dual, min_ipg_usec)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
def push_pcap (self,
pcap_filename,
ports = None,
ipg_usec = None,
speedup = 1.0,
count = 1,
duration = -1,
force = False,
vm = None,
packet_hook = None,
is_dual = False,
min_ipg_usec = None,
src_mac_pcap = False,
dst_mac_pcap = False):
"""
Push a local PCAP to the server
This is equivalent to loading a PCAP file to a profile
and attaching the profile to port(s)
file size is limited to 1MB
:parameters:
pcap_filename : str
PCAP filename (accessible locally)
ports : list
Ports on which to execute the command
ipg_usec : float
Inter-packet gap in microseconds.
Exclusive with min_ipg_usec
speedup : float
A factor to adjust IPG. effectively IPG = IPG / speedup
count: int
How many times to transmit the cap
duration: float
Limit runtime by duration in seconds
force: bool
Ignore file size limit - push any file size to the server
also ignore if port is active
vm: list of VM instructions
VM instructions to apply for every packet
packet_hook : Callable or function
Will be applied to every packet
is_dual: bool
Inject from both directions.
Requires that all the ports will be in master mode
with their adjacent ports as slaves
min_ipg_usec : float
Minimum inter-packet gap in microseconds to guard from too small ipg.
Exclusive with ipg_usec
src_mac_pcap : bool
Source MAC address will be taken from pcap file if True.
dst_mac_pcap : bool
Destination MAC address will be taken from pcap file if True.
:raises:
+ :exc:`TRexError`
"""
ports = ports if ports is not None else self.get_acquired_ports()
ports = self.__pre_start_check('PUSH', ports, force)
validate_type('pcap_filename', pcap_filename, basestring)
validate_type('ipg_usec', ipg_usec, (float, int, type(None)))
validate_type('speedup', speedup, (float, int))
validate_type('count', count, int)
validate_type('duration', duration, (float, int))
validate_type('vm', vm, (list, type(None)))
validate_type('is_dual', is_dual, bool)
validate_type('min_ipg_usec', min_ipg_usec, (float, int, type(None)))
validate_type('src_mac_pcap', src_mac_pcap, bool)
validate_type('dst_mac_pcap', dst_mac_pcap, bool)
if all([ipg_usec, min_ipg_usec]):
raise TRexError('Please specify either ipg or minimal ipg, not both.')
# if force - stop any active ports
if force:
active_ports = list(set(self.get_active_ports()).intersection(ports))
if active_ports:
self.stop(active_ports)
# no support for > 1MB PCAP - use push remote
file_size = os.path.getsize(pcap_filename)
if not force and file_size > (1024 * 1024):
file_size_str = format_num(file_size, suffix = 'B')
url = 'https://trex-tgn.cisco.com/trex/doc/trex_stateless.html#_pcap_based_traffic'
raise TRexError("PCAP size of {:} is too big for local push - consider using remote (-r):\n{}".format(file_size_str, url))
if is_dual:
for port in ports:
master = port
slave = port ^ 0x1
if slave in ports:
raise TRexError("dual mode: please specify only one of adjacent ports ({0}, {1}) in a batch".format(master, slave))
if slave not in self.get_acquired_ports():
raise TRexError("dual mode: adjacent port {0} must be owned during dual mode".format(slave))
# regular push
if not is_dual:
# create the profile from the PCAP
try:
self.ctx.logger.pre_cmd("Converting '{0}' to streams:".format(pcap_filename))
profile = STLProfile.load_pcap(pcap_filename,
ipg_usec,
speedup,
count,
vm = vm,
packet_hook = packet_hook,
min_ipg_usec = min_ipg_usec,
src_mac_pcap = src_mac_pcap,
dst_mac_pcap = dst_mac_pcap)
self.ctx.logger.post_cmd(RC_OK())
except TRexError as e:
self.ctx.logger.post_cmd(RC_ERR(e))
raise
self.remove_all_streams(ports = ports)
id_list = self.add_streams(profile.get_streams(), ports)
return self.start(ports = ports, duration = duration, force = force)
else:
# create a dual profile
split_mode = 'MAC'
if (ipg_usec and ipg_usec < 1000 * speedup) or (min_ipg_usec and min_ipg_usec < 1000):
self.ctx.logger.warning('In order to get synchronized traffic, ensure that effective ipg is at least 1000 usec')
try:
self.ctx.logger.pre_cmd("Analyzing '{0}' for dual ports based on {1}:".format(pcap_filename, split_mode))
profile_a, profile_b = STLProfile.load_pcap(pcap_filename,
ipg_usec,
speedup,
count,
vm = vm,
packet_hook = packet_hook,
split_mode = split_mode,
min_ipg_usec = min_ipg_usec,
src_mac_pcap = src_mac_pcap,
dst_mac_pcap = dst_mac_pcap)
self.ctx.logger.post_cmd(RC_OK())
except TRexError as e:
self.ctx.logger.post_cmd(RC_ERR(e))
raise
all_ports = ports + [p ^ 0x1 for p in ports if profile_b]
self.remove_all_streams(ports = all_ports)
for port in ports:
master = port
slave = port ^ 0x1
self.add_streams(profile_a.get_streams(), master)
if profile_b:
self.add_streams(profile_b.get_streams(), slave)
return self.start(ports = all_ports, duration = duration, force = force, synchronized = True)
# get stats
@client_api('getter', True)
def get_stats (self, ports = None, sync_now = True):
"""
Gets all statistics on given ports, flow stats and latency.
:parameters:
ports: list
sync_now: boolean
"""
output = self._get_stats_common(ports, sync_now)
# TODO: move this to a generic protocol (AbstractStats)
pgid_stats = self.get_pgid_stats()
if not pgid_stats:
raise TRexError(pgid_stats)
output['flow_stats'] = pgid_stats.get('flow_stats', {})
output['latency'] = pgid_stats.get('latency', {})
return output
# clear stats
@client_api('command', True)
def clear_stats (self,
ports = None,
clear_global = True,
clear_flow_stats = True,
clear_latency_stats = True,
clear_xstats = True):
"""
Clears statistics in given ports.
:parameters:
ports: list
clear_global: boolean
clear_flow_stats: boolean
clear_latency_stats: boolean
clear_xstats: boolean
"""
self._clear_stats_common(ports, clear_global, clear_xstats)
# TODO: move this to a generic protocol
if clear_flow_stats or clear_latency_stats:
self.pgid_stats.clear_stats(clear_flow_stats=clear_flow_stats, clear_latency_stats=clear_latency_stats)
@client_api('getter', True)
def get_active_pgids(self):
"""
Get active packet group IDs
:Parameters:
None
:returns:
Dict with entries 'latency' and 'flow_stats'. Each entry contains list of used packet group IDs
of the given type.
:Raises:
+ :exc:`TRexError`
"""
return self.pgid_stats.get_active_pgids()
@client_api('getter', True)
def get_pgid_stats (self, pgid_list = []):
"""
.. _get_pgid_stats:
Get flow statistics for give list of pgids
:parameters:
pgid_list: list
pgids to get statistics on. If empty list, get statistics for all pgids.
Allows to get statistics for 1024 flows in one call (will return error if asking for more).
:return:
Return dictionary containing packet group id statistics information gathered from the server.
=============================== ===============
key Meaning
=============================== ===============
:ref:`flow_stats <flow_stats>` Per flow statistics
:ref:`latency <latency>` Per flow statistics regarding flow latency
=============================== ===============
Below is description of each of the inner dictionaries.
.. _flow_stats:
**flow_stats** contains :ref:`global dictionary <flow_stats_global>`, and dictionaries per packet group id (pg id). See structures below.
**per pg_id flow stat** dictionaries have following structure:
================= ===============
key Meaning
================= ===============
rx_bps Received bits per second rate
rx_bps_l1 Received bits per second rate, including layer one
rx_bytes Total number of received bytes
rx_pkts Total number of received packets
rx_pps Received packets per second
tx_bps Transmit bits per second rate
tx_bps_l1 Transmit bits per second rate, including layer one
tx_bytes Total number of sent bits
tx_pkts Total number of sent packets
tx_pps Transmit packets per second rate
================= ===============
.. _flow_stats_global:
**global flow stats** dictionary has the following structure:
================= ===============
key Meaning
================= ===============
rx_err Number of flow statistics packets received that we could not associate to any pg_id. This can happen if latency on the used setup is large. See :ref:`wait_on_traffic <wait_on_traffic>` rx_delay_ms parameter for details.
tx_err Number of flow statistics packets transmitted that we could not associate to any pg_id. This is never expected. If you see this different than 0, please report.
================= ===============
.. _latency:
**latency** contains :ref:`global dictionary <lat_stats_global>`, and dictionaries per packet group id (pg id). Each one with the following structure.
**per pg_id latency stat** dictionaries have following structure:
=========================== ===============
key Meaning
=========================== ===============
:ref:`err_cntrs<err-cntrs>` Counters describing errors that occurred with this pg id
:ref:`latency<lat_inner>` Information regarding packet latency
=========================== ===============
Following are the inner dictionaries of latency
.. _err-cntrs:
**err-cntrs**
================= ===============
key Meaning (see better explanation below the table)
================= ===============
dropped How many packets were dropped (estimation)
dup How many packets were duplicated.
out_of_order How many packets we received out of order.
seq_too_high How many events of packet with sequence number too high we saw.
seq_too_low How many events of packet with sequence number too low we saw.
================= ===============
For calculating packet error events, we add sequence number to each packet's payload. We decide what went wrong only according to sequence number
of last packet received and that of the previous packet. 'seq_too_low' and 'seq_too_high' count events we see. 'dup', 'out_of_order' and 'dropped'
are heuristics we apply to try and understand what happened. They will be accurate in common error scenarios.
We describe few scenarios below to help understand this.
Scenario 1: Received packet with seq num 10, and another one with seq num 10. We increment 'dup' and 'seq_too_low' by 1.
Scenario 2: Received packet with seq num 10 and then packet with seq num 15. We assume 4 packets were dropped, and increment 'dropped' by 4, and 'seq_too_high' by 1.
We expect next packet to arrive with sequence number 16.
Scenario 2 continue: Received packet with seq num 11. We increment 'seq_too_low' by 1. We increment 'out_of_order' by 1. We *decrement* 'dropped' by 1.
(We assume here that one of the packets we considered as dropped before, actually arrived out of order).
.. _lat_inner:
**latency**
================= ===============
key Meaning
================= ===============
average Average latency over the stream lifetime (usec).Low pass filter is applied to the last window average.It is computed each sampling period by following formula: <average> = <prev average>/2 + <last sampling period average>/2
histogram Dictionary describing logarithmic distribution histogram of packet latencies. Keys in the dictionary represent range of latencies (in usec). Values are the total number of packets received in this latency range. For example, an entry {100:13} would mean that we saw 13 packets with latency in the range between 100 and 200 usec.
jitter Jitter of latency samples, computed as described in :rfc:`3550#appendix-A.8`
last_max Maximum latency measured between last two data reads from server (0.5 sec window).
total_max Maximum latency measured over the stream lifetime (in usec).
total_min Minimum latency measured over the stream lifetime (in usec).
================= ===============
.. _lat_stats_global:
**global latency stats** dictionary has the following structure:
================= ===============
key Meaning
================= ===============
old_flow Number of latency statistics packets received that we could not associate to any pg_id. This can happen if latency on the used setup is large. See :ref:`wait_on_traffic <wait_on_traffic>` rx_delay_ms parameter for details.
bad_hdr Number of latency packets received with bad latency data. This can happen because of garbage packets in the network, or if the DUT causes packet corruption.
================= ===============
:raises:
+ :exc:`TRexError`
"""
# transform single stream
pgid_list = listify(pgid_list)
return self.pgid_stats.get_stats(pgid_list)
##########################
# Tagged Packet Grouping #
##########################
@staticmethod
def _validate_tpg_tag(tag, update, num_tags):
"""
Validate Tagged Packet Group tags.
:parameters:
tag: dict
Tag to validate
update: bool
Are we verifying the tags for update?
num_tags: int
Number of tags in total
"""
def _verify_vlan(vlan):
"""
Verify vlan is a valid Vlan value.
:parameters:
vlan: int
Vlan to verify
:raises:
TRexError: In case the Vlan is not a valid vlan
"""
validate_type("vlan", vlan, int)
MIN_VLAN, MAX_VLAN = 1, 4094
if not MIN_VLAN <= vlan <= MAX_VLAN:
raise TRexError("Invalid vlan value {}, vlan must be in [{}, {}]".format(vlan, MIN_VLAN, MAX_VLAN))
SUPPORTED_TAG_TYPES = ["Dot1Q", "QinQ"]
if update:
SUPPORTED_TAG_TYPES.append(None)
validate_type("tag", tag, dict)
tag_type = tag.get("type", "-")
if tag_type == "-":
raise TRexError("Please provide a type field for each TPG tag!")
if tag_type not in SUPPORTED_TAG_TYPES:
raise TRexError("Tag type {} not supported. Supported tag types are = {}".format(tag_type, SUPPORTED_TAG_TYPES))
tag_value = tag.get("value", None)
if tag_value is None and not update:
raise TRexError("You must provide a value field for each TPG tag!")
if not update:
validate_type("tag_value", tag_value, (dict, type(None)))
if tag_type == "Dot1Q":
validate_type("tag_value", tag_value, dict)
vlan = tag_value.get("vlan", None)
if vlan is None: # Check explicitly if it is none, since it can be 0.
raise TRexError("You must provide a vlan key for each Dot1Q tag!")
_verify_vlan(vlan)
elif tag_type == "QinQ":
validate_type("tag_value", tag_value, dict)
vlans = tag_value.get("vlans", None)
if not vlans:
raise TRexError("You must provide vlans key for each QinQ tag!")
validate_type("vlans", vlans, list)
if len(vlans) != 2:
raise TRexError("You must provide 2 vlans for QinQ tag.")
for vlan in vlans:
_verify_vlan(vlan)
if update:
tag_id = tag.get("tag_id", None)
if tag_id is None:
raise TRexError("You must provide a tag id when updating TPG tags.")
validate_type("tag_id", tag_id, int)
if not 0 <= tag_id < num_tags:
raise TRexError("Invalid Tag Id {}. Must be in [0-{}).".format(tag_id, num_tags))
@client_api('command', True)
def enable_tpg(self, num_tpgids, tags, rx_ports = None):
"""
Enable Tagged Packet Grouping.
This method has 3 phases:
1. Enable TPG in Control Plane and send message to Rx to allocate memory.
2. Wait until Rx finishes allocating.
3. Enable the feature in Data Plane.
:parameters:
num_tpgids: uint32
Number of Tagged Packet Groups that we are expecting to send. The number is an upper bound, and tpgids
should be in *[0, num_tpgids)*.
.. note:: This number is important in allocating server memory, hence be careful with it.
tags: list
List of dictionaries that represents the mapping of actual tags to tag ids.
.. highlight:: python
.. code-block:: python
[
{
"type": "Dot1Q",
"value": {
"vlan": 5,
}
},
{
"type": "QinQ",
"value": {
"vlans": [20, 30]
}
}
]
Currently supports only **Dot1Q**, **QinQ** tags. In our example, Dot1Q (5) is mapped to tag id 0,
and QinQ (20, 30) is mapped to tag id 1 and so on.
Each dictionary should be of the following format:
=============================== ===============
key Meaning
=============================== ===============
type String that represents type of tag, only **Dot1Q** and **QinQ** supported at the moment.
value Dictionary that contains the value for the tag. Differs on each tag type.
=============================== ===============
rx_ports: list
List of rx ports on which we gather Tagged Packet Group Statistics. Optional. If not provided,
data will be gathered on all acquired ports.
"""
acquired_ports = self.get_acquired_ports()
rx_ports = rx_ports if rx_ports is not None else acquired_ports
self.psv.validate('enable_tpg', rx_ports)
validate_type("num_tpgids", num_tpgids, int)
validate_type("tags", tags, list)
for tag in tags:
STLClient._validate_tpg_tag(tag, update=False, num_tags=len(tags))
# Validate that Rx ports are included in Acquired Ports
if not set(rx_ports).issubset(set(acquired_ports)):
raise TRexError("TPG Rx Ports {} must be acquired".format(rx_ports))
self.ctx.logger.pre_cmd("Enabling Tagged Packet Group")
# Invalidate cache
self.tpg_status = None
# Enable TPG in CP and Rx async.
params = {
"num_tpgids": num_tpgids,
"ports": acquired_ports,
"rx_ports": rx_ports,
"username": self.ctx.username,
"session_id": self.ctx.session_id,
"tags": tags
}
rc = self._transmit("enable_tpg", params=params)
if not rc:
self.ctx.logger.post_cmd(rc)
raise TRexError(rc)
tpg_state = TPGState(TPGState.DISABLED)
while tpg_state != TPGState(TPGState.ENABLED_CP_RX):
rc = self._transmit("get_tpg_state", params={"username": self.ctx.username})
if not rc:
self.ctx.logger.post_cmd(rc)
raise TRexError(rc)
tpg_state = TPGState(rc.data())
if tpg_state.is_error_state():
self.disable_tpg(surpress_log=True)
self.ctx.logger.post_cmd(False)
raise TRexError(tpg_state.get_fail_message())
# Enable TPG in DP Sync
rc = self._transmit("enable_tpg", params={"username": self.ctx.username})
if not rc:
rc = self._transmit("get_tpg_state", params={"username": self.ctx.username})
if not rc:
self.ctx.logger.post_cmd(rc)
raise TRexError(rc)
tpg_state = TPGState(rc.data())
if tpg_state.is_error_state():
self.disable_tpg(surpress_log=True)
self.ctx.logger.post_cmd(False)
raise TRexError(tpg_state.get_fail_message())
else:
raise TRexError("TPG enablement failed but server doesn't indicate of errors.")
self.ctx.logger.post_cmd(rc)
@client_api('command', True)
def disable_tpg(self, username=None, surpress_log=False):
"""
Disable Tagged Packet Grouping.
This method has 2 phases.
1. Disable TPG in DPs and Cp. Send a message to Rx to start deallocating.
2. Wait until Rx finishes deallocating.
:parameters:
username: string
Username whose TPG context we want to disable. Optional. If not provided, we disable for the calling user.
surpress_log: bool
Surpress logs, in case disable TPG is run as a subroutine. Defaults to False.
"""
# Invalidate cache
self.tpg_status = None
if not surpress_log:
self.ctx.logger.pre_cmd("Disabling Tagged Packet Group")
# Disable TPG RPC simply indicates to the server to start deallocating the memory, it doesn't mean
# it has finished deallocating
username = self.ctx.username if username is None else username
rc = self._transmit("disable_tpg", params={"username": username})
if not rc:
raise TRexError(rc)
tpg_state = TPGState(TPGState.ENABLED)
while tpg_state != TPGState(TPGState.DISABLED_DP_RX):
rc = self._transmit("get_tpg_state", params={"username": username})
if not rc:
raise TRexError(rc)
tpg_state = TPGState(rc.data())
# State is set to TPGState.DISABLED_DP_RX, we can proceed to destroying the context.
rc = self._transmit("disable_tpg", params={"username": username})
if not rc:
raise TRexError(rc)
if not surpress_log:
self.ctx.logger.post_cmd(rc)
@client_api('getter', True)
def get_tpg_status(self, username=None, port=None):
"""
Get Tagged Packet Group Status from the server. We can collect the TPG status for a user or for a port.
If no parameters are provided we will collect for the calling user.
.. note:: Only one between username and port should be provided.
:parameters:
username: str
Username whose TPG status we want to check. Optional. In case it isn't provided,
the username that runs the command will be used.
port: uint8
Port whose TPG status we want to check. Optional.
:returns:
dict: Tagged Packet Group Status from the server. The dictionary contains the following keys:
.. highlight:: python
.. code-block:: python
{
"enabled": true,
"data": {
"rx_ports": [1],
"acquired_ports": [0, 1],
"num_tpgids": 3,
"num_tags": 10,
"username": "bdollma"
}
}
=============================== ===============
key Meaning
=============================== ===============
enabled Boolean indicated if TPG is enabled/disabled.
rx_ports Ports on which TPG is collecting stats. Relevant only if TPG is enabled.
acquired_ports Ports on which TPG can transmit. Relevant only if TPG is enabled.
num_tpgids Number of Tagged Packet Groups. Relevant only if TPG is enabled.
num_tags Number of Tagged Packet Group Tags. Relevant only if TPG is enabled.
username User that owns this instance of TPG. Relevant only if TPG is enabled.
=============================== ===============
"""
default_params = (username is None and port is None)
if default_params and self.tpg_status is not None:
# Default Params and value is cached, no need to query the server.
return self.tpg_status
params = {}
if port is None:
params = {"username": self.ctx.username if username is None else username}
else:
self.psv.validate('get_tpg_status', [port])
if username is not None:
raise TRexError("Should provide only one between port and username for TPG status.")
params = {"port_id": port}
rc = self._transmit("get_tpg_status", params=params)
if not rc:
raise TRexError(rc)
if default_params:
# Cache status only if default parameters
self.tpg_status = rc.data()
return rc.data()
@client_api('command', True)
def update_tpg_tags(self, new_tags, clear=False):
"""
Update Tagged Packet Grouping Tags.
:parameters:
new_tags: list
List of dictionaries that represents the tags to replace.
.. highlight:: python
.. code-block:: python
[
{
"type": "Dot1Q",
"value": {
"vlan": 5,
}
"tag_id": 20
},
{
"type": "QinQ",
"value": {
"vlans": [20, 30]
}
"tag_id": 7
},
{
"type": None,
"tag_id": 0
}
]
Currently supports only **Dot1Q**, **QinQ**, **None** types. In our example, tag_id 20 is now replaced with Dot1Q(5).
Note that Dot1Q(5) must not be present, or at least invalidated in one of the previous entries.
When the type is None, it invalidates the tag.
Each dictionary should be of the following format:
=============================== ===============
key Meaning
=============================== ===============
type String that represents type of tag, only **Dot1Q**, **QinQ** or None supported at the moment.
value Dictionary that contains the value for the tag. Differs on each tag type. Not needed in case of None.
tag_id The tag id that this new tag is going to have.
=============================== ===============
clear: bool
Clear stats for the tags we updated. Defaults to False.
.. note:: This can take some time, since we need to clear the stats in all the receiveing ports for all tpgids.
"""
def clear_update(self, port, min_tpgid, max_tpgid, tag_list):
params = {
"username": self.ctx.username,
"port_id": port,
"min_tpgid": min_tpgid,
"max_tpgid": max_tpgid,
"tag_list": tag_list
}
self._transmit("clear_updated", params=params)
self.ctx.logger.pre_cmd("Updating Tagged Packet Group Tags")
validate_type("new_tags", new_tags, list)
tpg_status = self.get_tpg_status()
if not tpg_status["enabled"]:
raise TRexError("Tagged Packet Group is not enabled.")
num_tags = tpg_status["data"]["num_tags"]
for tag in new_tags:
STLClient._validate_tpg_tag(tag, update=True, num_tags=num_tags)
rc = self._transmit("update_tpg_tags", params={"username": self.ctx.username, "tags": new_tags})
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
if clear:
tag_list = [tag["tag_id"] for tag in new_tags]
rx_ports = tpg_status["data"]["rx_ports"]
num_tpgids = tpg_status["data"]["num_tpgids"]
NUM_STATS_CHUNK = 2048
TPGID_CHUNK_SIZE = NUM_STATS_CHUNK // len(tag_list)
min_tpgid = 0
for port in rx_ports:
while min_tpgid != num_tpgids:
max_tpgid = min(min_tpgid + TPGID_CHUNK_SIZE, num_tpgids)
clear_update(self, port, min_tpgid, max_tpgid, tag_list)
min_tpgid = max_tpgid
@client_api('getter', True)
def get_tpg_tags(self, min_tag = 0, max_tag = None, username = None, port = None):
"""
Get Tagged Packet Group Tags from the server. It will return as a list starting from
*min_tag* until *max_tag*. If not provided, we will collect for all tags.
We can collect the TPG status for a user or for a port.
If no parameters are provided we will collect for the calling user.
:parameters:
min_tag: int
Minimal tag to collect the tag for. Optional. If not provided, we will start from 0.
max_tag: int
Maximal tag to collect the tag for. Defaults to None. If not provided, we will collect
for the max possible tag.
username: str
Username whose TPG status we want to check. Optional. In case it isn't provided,
the username that runs the command will be used.
port: uint8
Port whose TPG status we want to check. Optional.
:returns:
list: Tagged Packet Group Tags from the server. At index *i* in the list we can find the descripton
for tag number *i*. If the value is None, it means that this tag index was invalidated.
.. highlight:: python
.. code-block:: python
[
{
"type": "Dot1Q",
"value": {
"vlan": 7
}
},
None,
{
"type": "QinQ",
"value": {
"vlans": [1, 11]
}
}
]
"""
CHUNK_SIZE = 500
def get_tpg_tags_chunk(self, params):
"""
Assumes that the amount of tags requested is at most CHUNKS_SIZE.
"""
rc = self._transmit("get_tpg_tags", params=params)
if not rc:
raise TRexError(rc)
return rc.data()
validate_type("min_tag", min_tag, int)
validate_type("max_tag", max_tag, (int, type(None)))
validate_type("username", username, (str, type(None)))
tpg_status = self.get_tpg_status(username=username, port=port)
if not tpg_status["enabled"]:
raise TRexError("Tagged Packet Group is not enabled.")
num_tags = tpg_status["data"]["num_tags"]
if max_tag is None:
max_tag = num_tags
if max_tag > num_tags:
raise TRexError("Max Tag {} must be less than number of tags defined: {}".format(max_tag, num_tags))
if min_tag > max_tag:
raise TRexError("Min Tag {} must be less than Max Tag {}".format(min_tag, max_tag))
params = {}
if port is None:
params = {"username": self.ctx.username if username is None else username}
else:
self.psv.validate('get_tpg_tags', [port])
if username is not None:
raise TRexError("Should provide only one between port and username for get_tpg_tags.")
params = {"port_id": port}
tpg_tags = [] # List that will contain all tags
current_max_tag = 0
while current_max_tag != max_tag:
current_max_tag = min(max_tag, min_tag + CHUNK_SIZE)
params["min_tag"], params["max_tag"] = min_tag, current_max_tag
tpg_tags += get_tpg_tags_chunk(self, params)
min_tag = current_max_tag
return tpg_tags
@client_api('getter', True)
def get_tpg_stats(self, port, tpgid, min_tag, max_tag, max_sections = 50, unknown_tag = False, untagged = False):
"""
Get Tagged Packet Group statistics that are received in this port,
for this Tagged Packet Group Identifier in [min, max) tag_range.
:parameters:
port: uint8
Port on which we collect the stats.
tpgid: uint32
Tagged Packet Group Identifier whose stats we are interested to collect.
min_tag: uint16
Minimal Tag to collect stats for.
max_tag: uint16
Maximal Tag to collect stats for. Non inclusive.
max_sections: int
Maximal sections to collect in the stats. Defaults to 50.
.. note:: If we have the same stats for two consequent tags, their values will assembled into one section
in order to compress the stats. The common use case is that stats are the same on each tag, hence the compression is effective.
If all the tags from *[min-max)* can be compressed in less than *max_sections*, we will get all tags
from [min-max), otherwise we will get *max_sections* entries in the stats dictionary.
unknown_tag: bool
Get the stats of packets received with this tpgid but with a tag that isn't provided in the mapping,
i.e an unknown tag.
untagged: bool
Get the stats of packets received with this tpgid but without any tag.
:returns:
(dict, uint16): Stats collected the next tag to start collecting from (relevant if not all the data was collected)
Dictionary contains Tagged Packet Group statistics gathered from the server. For example:
.. highlight:: python
.. code-block:: python
print(get_tpg_stats(port=3, tpgid=1, min_tag=0, max_tag=4000, unknown_tag=True)[0])
{'3': {'1': {
'0-200': {'bytes': 0,
'dup': 0,
'ooo': 0,
'pkts': 0,
'seq_err': 0,
'seq_err_too_big': 0,
'seq_err_too_small': 0
},
'201': {'bytes': 204,
'dup': 0,
'ooo': 0,
'pkts': 3,
'seq_err': 2,
'seq_err_too_big': 1,
'seq_err_too_small': 0},
'202-3999': {'bytes': 0,
'dup': 0,
'ooo': 0,
'pkts': 0,
'seq_err': 0,
'seq_err_too_big': 0,
'seq_err_too_small': 0},
'untagged': {'bytes': 0,
'dup': 0,
'ooo': 0,
'pkts': 0,
'seq_err': 0,
'seq_err_too_big': 0,
'seq_err_too_small': 0},
'unknown_tag': {'bytes': 0,
'pkts': 0}}}}
The returned data is separated per port and per tpgid, so it can be easily merged with data from other ports/tpgids.
In this example we can see that all the data is compressed in 3 sections (excluding the *unknown_tag* and *untagged*.).
uint16: Indicates the next tag to start collecting from. In case all the tags were collected this will equal *max_tag*.
In case the user provided min_tag = max tag, the user collected only unknown or untagged, hence this will be None.
"""
self.psv.validate('get_tpg_stats', [port])
validate_type("tpgid", tpgid, int)
validate_type("min_tag", min_tag, int)
validate_type("max_tag", max_tag, int)
validate_type("max_sections", max_sections, int)
validate_type("unknown_tag", unknown_tag, bool)
validate_type("untagged", untagged, bool)
if min_tag > max_tag:
raise TRexError("Min Tag {} must be smaller/equal than Max Tag {}".format(min_tag, max_tag))
if min_tag == max_tag and not untagged and not unknown_tag:
raise TRexError("Min Tag can equal Max Tag iff untagged or unknown tag flags provided.")
def get_tpg_stats_section(self, port, tpgid, min_tag, max_tag, unknown_tag, untagged):
"""
Get TPGID stats from the server for one section only.
:parameters:
port: uint8
Port on which we collected the stats.
tpgid: uint32
Tagged Packet Group Identifier for the group we collect stats.
min_tag: uint16
Min Tag to collect stats for.
max_tag: uint16
Max Tag to collect stats for.
unknown_tag: bool
Collect stats of unknown tags.
untagged: bool
Collect stats of untagged packets.
:returns:
dict: Stats of one section collected from the server.
"""
params = {
"port_id": port,
"tpgid": tpgid,
"min_tag": min_tag,
"max_tag": max_tag,
"unknown_tag": unknown_tag,
"untagged": untagged
}
rc = self._transmit("get_tpg_stats", params=params)
if not rc:
raise TRexError(rc)
return rc.data()
def _get_next_min_tag(section_stats, port, tpgid):
"""
Calculate the next min value based on the stats we received until now.
:parameters:
section_stats: dict
The latest stats as received by the server.
port: uint8
Port on which we collected the stats.
tpgid: uint32
Tagged Packet Group Identifier for the group we collect stats.
:returns:
uint32: The next value to use as a minimal tag.
"""
tpgid_stats = section_stats[str(port)][str(tpgid)]
# Keys of tpgid_stats can be:
# 1. "unknown", "untagged"
# 2. "min_tag-new_min_tag"
# 3. "min_tag"
for key in tpgid_stats.keys():
if "unknown" in key or "untagged" in key:
continue
elif "-" in key:
return int(key.split("-")[1]) + 1 # return the second value, add one for the new minimum
else:
return (int(key)) + 1
return None
# Initialize some variables
stats = {}
sections = 0
done = False
_min_tag = min_tag
# Loop until finished or reached max sections
while not done and sections < max_sections:
# Collect one section of stats from the server
section_stats = get_tpg_stats_section(self, port, tpgid, _min_tag, max_tag, unknown_tag, untagged)
# Calculate the next min tag.
_min_tag = _get_next_min_tag(section_stats, port, tpgid)
if _min_tag is None or _min_tag == max_tag:
done = True
if not stats:
# First section, set the stats dictionary
stats = section_stats
else:
# Update the stats dictionary with new sections
tpgid_stats = stats[str(port)][str(tpgid)]
new_tpgid_stats = section_stats[str(port)][str(tpgid)]
tpgid_stats.update(new_tpgid_stats)
unknown_tag = False # after the first iteration set unknown_tag to False
untagged = False # after the first iteration set untagged to False
sections += 1
return (stats, _min_tag)
@client_api('command', True)
def clear_tpg_stats(self, port, tpgid, min_tag = 0, max_tag = None, tag_list = None, unknown_tag = False, untagged = False):
"""
Clear Packet Group Identifier statistics that are received in this port,
for this Tagged Packet Group Identifier in [min, max) tag_range.
:parameters:
port: uint8
Port on which we want to clear the stats.
tpgid: uint32
Tagged Packet Group Identifier whose stats we are interested to clear.
min_tag: uint16
Minimal Tag to clear stats for. Defaults to 0.
max_tag: uint16
Maximal Tag to clear stats for. Non inclusive. Defaults to None. Exclusive to *tag_list*.
tag_list: list or None
List of tags to clear, if provided takes precedence over the range [min-max). Exclusive to *max_tag*.
unknown_tag: bool
Clear the stats of packets received with this tpgid but with a tag that isn't provided in the mapping,
i.e an unknown tag.
untagged: bool
Clear the stats of packets received with this tpgid but without any tag.
"""
self.ctx.logger.pre_cmd("Clearing TPG stats")
self.psv.validate('clear_tpg_tx_stats', [port])
validate_type("tpgid", tpgid, int)
validate_type("min_tag", min_tag, int)
validate_type("max_tag", max_tag, (int, type(None)))
validate_type("tag_list", tag_list, (list, type(None)))
validate_type("unknown_tag", unknown_tag, bool)
validate_type("untagged", untagged, bool)
if (max_tag is None and not tag_list) or (max_tag is not None and tag_list):
raise TRexError("One between max_tag and tag_list must be provided.")
if max_tag is not None:
if min_tag > max_tag:
raise TRexError("Min Tag {} must be smaller/equal than Max Tag {}".format(min_tag, max_tag))
if min_tag == max_tag and not untagged and not unknown_tag:
raise TRexError("Min Tag can equal Max Tag iff untagged or unknown tag flags provided.")
if tag_list:
for tag in tag_list:
validate_type("tag", tag, int)
if tag < 0:
raise TRexError("Invalid tag {}. Tag must be positive.".format(tag))
params = {
"port_id": port,
"tpgid": tpgid,
"min_tag": min_tag,
"max_tag": max_tag,
"tag_list": tag_list if tag_list else None, # Send None in case of empty list too
"unknown_tag": unknown_tag,
"untagged": untagged,
}
rc = self._transmit("clear_tpg_stats", params=params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('getter', True)
def get_tpg_tx_stats(self, port, tpgid):
"""
Get Tagged Packet Group Identifier statistics that are *transmitted* in this port,
for this Tagged Packet Group Identifier.
:parameters:
port: uint8
Port on which we transmit TPG packets.
tpgid: uint32
Tagged Packet Group Identifier
:returns:
dict: Dictionary contains Tagged Packet Group statistics gathered from the server. For example:
.. highlight:: python
.. code-block:: python
print(get_tpg_tx_stats(port=0, tpgid=1))
{'0':
{'1':
{ 'bytes': 0,
'pkts': 0}}}
"""
self.psv.validate('get_tpg_tx_stats', [port])
validate_type("tpgid", tpgid, int)
rc = self._transmit("get_tpg_tx_stats", params={"port_id": port, "tpgid": tpgid})
if not rc:
raise TRexError(rc)
return rc.data()
@client_api('command', True)
def clear_tpg_tx_stats(self, port, tpgid):
"""
Clear Tagged Packet Group Identifier statistics that are transmitted in this port,
for this Tagged Packet Group Identifier.
:parameters:
port: uint8
Port on which we transmit TPG packets.
tpgid: uint32
Tagged Packet Group Identifier
"""
self.ctx.logger.pre_cmd("Clearing TPG Tx stats")
self.psv.validate('clear_tpg_tx_stats', [port])
validate_type("tpgid", tpgid, int)
rc = self._transmit("clear_tpg_tx_stats", params={"port_id": port, "tpgid": tpgid})
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('getter', True)
def get_tpg_unknown_tags(self, port):
"""
Get Tagged Packet Group Unknown tags found in this port.
:parameters:
port: uint8
Port on which we collect TPG stats.
:returns:
dict: Dictionary contains Tagged Packet Group unknown tags gathered on each port. For example:
.. highlight:: python
.. code-block:: python
print(get_tpg_unknown_tags(port=1)
{'1': [
{'tag': {'type': 'Dot1Q', 'value': {'vlan': 12}}, 'tpgid': 12},
{'tag': {'type': 'QinQ', 'value': {'vlans': [1, 100]}}, 'tpgid': 0},
{'tag': {'type': 'Dot1Q', 'value': {'vlan': 11}}, 'tpgid': 11}
]}
"""
self.psv.validate('get_tpg_unknown_tags', [port])
rc = self._transmit("get_tpg_unknown_tags", params={"port_id": port})
if not rc:
raise TRexError(rc)
return rc.data()
@client_api('command', True)
def clear_tpg_unknown_tags(self, port):
"""
Clear Tagged Packet Group Unknown tags found in this port.
:parameters:
port: uint8
Port on which we collect TPG packets.
"""
self.ctx.logger.pre_cmd("Clearing TPG unknown tags")
self.psv.validate('clear_tpg_unknown_tags', [port])
rc = self._transmit("clear_tpg_unknown_tags", params={"port_id": port})
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
############################ console #############################
############################ commands #############################
############################ #############################
def _show_streams_stats(self, buffer = sys.stdout):
all_pg_ids = self.get_active_pgids()
# Display data for at most 4 pgids. If there are latency PG IDs, use them first
pg_ids = all_pg_ids['latency'][:4]
pg_ids += all_pg_ids['flow_stats'][:4 - len(pg_ids)]
table = self.pgid_stats.streams_stats_to_table(pg_ids)
# show
text_tables.print_table_with_header(table, table.title, buffer = buffer)
def _show_latency_stats(self, buffer = sys.stdout):
all_pg_ids = self.get_active_pgids()
# Display data for at most 5 pgids.
pg_ids = all_pg_ids['latency'][:5]
table = self.pgid_stats.latency_stats_to_table(pg_ids)
# show
text_tables.print_table_with_header(table, table.title, buffer = buffer)
def _show_latency_histogram(self, buffer = sys.stdout):
all_pg_ids = self.get_active_pgids()
# Display data for at most 5 pgids.
pg_ids = all_pg_ids['latency'][:5]
table = self.pgid_stats.latency_histogram_to_table(pg_ids)
# show
text_tables.print_table_with_header(table, table.title, buffer = buffer)
@console_api('reset', 'common', True)
def reset_line (self, line):
'''Reset ports'''
parser = parsing_opts.gen_parser(self,
"reset",
self.reset_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL,
parsing_opts.PORT_RESTART)
opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True)
self.reset(ports = opts.ports, restart = opts.restart)
return True
@console_api('acquire', 'common', True)
def acquire_line (self, line):
'''Acquire ports\n'''
# define a parser
parser = parsing_opts.gen_parser(self,
"acquire",
self.acquire_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL,
parsing_opts.FORCE)
opts = parser.parse_args(line.split(), default_ports = self.get_all_ports())
# filter out all the already owned ports
ports = list_difference(opts.ports, self.get_acquired_ports())
if not ports:
raise TRexError("acquire - all of port(s) {0} are already acquired".format(opts.ports))
self.acquire(ports = ports, force = opts.force)
# show time if success
return True
@console_api('release', 'common', True)
def release_line (self, line):
'''Release ports\n'''
parser = parsing_opts.gen_parser(self,
"release",
self.release_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL)
opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports())
ports = list_intersect(opts.ports, self.get_acquired_ports())
if not ports:
if not opts.ports:
raise TRexError("no acquired ports")
else:
raise TRexError("none of port(s) {0} are acquired".format(opts.ports))
self.release(ports = ports)
# show time if success
return True
@console_api('stats', 'common', True)
def show_stats_line (self, line):
'''Show various statistics\n'''
# define a parser
parser = parsing_opts.gen_parser(self,
"stats",
self.show_stats_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL,
parsing_opts.STL_STATS)
opts = parser.parse_args(line.split())
# without parameters show only global and ports
if not opts.stats:
self._show_global_stats()
self._show_port_stats(opts.ports)
return
# decode which stats to show
if opts.stats == 'global':
self._show_global_stats()
elif opts.stats == 'ports':
self._show_port_stats(opts.ports)
elif opts.stats == 'xstats':
self._show_port_xstats(opts.ports, False)
elif opts.stats == 'xstats_inc_zero':
self._show_port_xstats(opts.ports, True)
elif opts.stats == 'status':
self._show_port_status(opts.ports)
elif opts.stats == 'cpu':
self._show_cpu_util()
elif opts.stats == 'mbuf':
self._show_mbuf_util()
elif opts.stats == 'streams':
self._show_streams_stats()
elif opts.stats == 'latency':
self._show_latency_stats()
elif opts.stats == 'latency_histogram':
self._show_latency_histogram()
else:
raise TRexError('Unhandled stats: %s' % opts.stats)
def _get_profiles(self, port_id_list):
profiles_per_port = OrderedDict()
for port_id in port_id_list:
data = self.ports[port_id].generate_loaded_profiles()
if data:
profiles_per_port[port_id] = data
return profiles_per_port
def _get_streams(self, port_id_list, streams_mask, table_format):
streams_per_port = OrderedDict()
for port_id in port_id_list:
data = self.ports[port_id].generate_loaded_streams_sum(streams_mask, table_format)
if data:
streams_per_port[port_id] = data
return streams_per_port
@console_api('profiles', 'STL', True, True)
def profiles_line(self, line):
'''Get loaded to server profiles information'''
parser = parsing_opts.gen_parser(self,
"profiles",
self.profiles_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL)
opts = parser.parse_args(line.split())
if not opts:
return opts
profiles_per_port = self._get_profiles(opts.ports)
if not profiles_per_port:
self.logger.info(format_text("No profiles found with desired filter.\n", "bold", "magenta"))
for port_id, port_profiles_table in profiles_per_port.items():
if port_profiles_table:
text_tables.print_table_with_header(port_profiles_table,
header = 'Port %s:' % port_id)
@console_api('streams', 'STL', True, True)
def streams_line(self, line):
'''Get loaded to server streams information'''
parser = parsing_opts.gen_parser(self,
"streams",
self.streams_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL,
parsing_opts.STREAMS_MASK,
parsing_opts.STREAMS_CODE)
opts = parser.parse_args(line.split())
if not opts:
return opts
streams_per_port = self._get_streams(opts.ports, set(opts.ids), table_format = opts.code is None)
if not streams_per_port:
self.logger.info(format_text("No streams found with desired filter.\n", "bold", "magenta"))
elif opts.code is None: # Just print the summary table of streams
for port_id, port_streams_table in streams_per_port.items():
if port_streams_table:
text_tables.print_table_with_header(port_streams_table,
header = 'Port %s:' % port_id)
elif opts.code: # Save the code that generates streams to file
if not opts.code.endswith('.py'):
raise TRexError('Saved filename should end with .py')
is_several_ports = len(streams_per_port) > 1
if is_several_ports:
print(format_text('\nWarning: several ports specified, will save in separate file per port.', 'bold'))
for port_id, port_streams_data in streams_per_port.items():
if not port_streams_data:
print('No streams to save at port %s, skipping.' % port_id)
continue
filename = ('%s_port%s.py' % (opts.code[:-3], port_id)) if is_several_ports else opts.code
if os.path.exists(filename):
sys.stdout.write('\nFilename %s already exists, overwrite? (y/N) ' % filename)
ans = user_input().strip()
if ans.lower() not in ('y', 'yes'):
print('Not saving.')
continue
self.logger.pre_cmd('Saving file as: %s' % filename)
try:
profile = STLProfile(list(port_streams_data.values()))
with open(filename, 'w') as f:
f.write(profile.dump_to_code())
except Exception as e:
self.logger.post_cmd(False)
print(e)
print('')
else:
self.logger.post_cmd(True)
else: # Print the code that generates streams
for port_id, port_streams_data in streams_per_port.items():
if not port_streams_data:
continue
print(format_text('Port: %s' % port_id, 'cyan', 'underline') + '\n')
for stream_id, stream in port_streams_data.items():
print(format_text('Stream ID: %s' % stream_id, 'cyan', 'underline'))
print(' ' + '\n '.join(stream.to_code().splitlines()) + '\n')
@console_api('push', 'STL', True)
def push_line(self, line):
'''Push a pcap file '''
args = [self,
"push",
self.push_line.__doc__,
parsing_opts.REMOTE_FILE,
parsing_opts.PORT_LIST_WITH_ALL,
parsing_opts.COUNT,
parsing_opts.DURATION,
parsing_opts.IPG,
parsing_opts.MIN_IPG,
parsing_opts.SPEEDUP,
parsing_opts.FORCE,
parsing_opts.DUAL,
parsing_opts.SRC_MAC_PCAP,
parsing_opts.DST_MAC_PCAP]
parser = parsing_opts.gen_parser(*(args + [parsing_opts.FILE_PATH_NO_CHECK]))
opts = parser.parse_args(line.split(), verify_acquired = True)
if not opts:
return opts
if not opts.remote:
parser = parsing_opts.gen_parser(*(args + [parsing_opts.FILE_PATH]))
opts = parser.parse_args(line.split(), verify_acquired = True)
if not opts:
return opts
if opts.remote:
self.push_remote(opts.file[0],
ports = opts.ports,
ipg_usec = opts.ipg_usec,
min_ipg_usec = opts.min_ipg_usec,
speedup = opts.speedup,
count = opts.count,
duration = opts.duration,
force = opts.force,
is_dual = opts.dual,
src_mac_pcap = opts.src_mac_pcap,
dst_mac_pcap = opts.dst_mac_pcap)
else:
self.push_pcap(opts.file[0],
ports = opts.ports,
ipg_usec = opts.ipg_usec,
min_ipg_usec = opts.min_ipg_usec,
speedup = opts.speedup,
count = opts.count,
duration = opts.duration,
force = opts.force,
is_dual = opts.dual,
src_mac_pcap = opts.src_mac_pcap,
dst_mac_pcap = opts.dst_mac_pcap)
return RC_OK()
@console_api('service', 'STL', True)
def service_line (self, line):
'''Configures port for service mode.
In service mode ports will reply to ARP, PING
and etc.
'''
parser = parsing_opts.gen_parser(self,
"service",
self.service_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL,
parsing_opts.SERVICE_GROUP)
opts = parser.parse_args(line.split())
enabled, filtered, mask = self._get_service_params(opts)
self.set_service_mode(ports = opts.ports, enabled = enabled, filtered = filtered, mask = mask)
return True
@console_api('start', 'STL', True)
def start_line (self, line):
'''Start selected traffic on specified ports on TRex\n'''
# parser for parsing the start command arguments
parser = parsing_opts.gen_parser(self,
"start",
self.start_line.__doc__,
parsing_opts.PROFILE_LIST,
parsing_opts.TOTAL,
parsing_opts.FORCE,
parsing_opts.FILE_PATH,
parsing_opts.DURATION,
parsing_opts.ARGPARSE_TUNABLES,
parsing_opts.MULTIPLIER_STRICT,
parsing_opts.DRY_RUN,
parsing_opts.CORE_MASK_GROUP,
parsing_opts.SYNCHRONIZED)
opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True)
help_flags = ('-h', '--help')
# if the user chose to pass the tunables arguments in previous version (-t var1=x1,var2=x2..)
# we decode the tunables and then convert the output from dictionary to list in order to have the same format with the
# newer version.
tunable_dict = {}
if "-t" in line and '=' in line:
tun_list = opts.tunables
tunable_dict = parsing_opts.decode_tunables(tun_list[0])
opts.tunables = parsing_opts.convert_old_tunables_to_new_tunables(tun_list[0])
opts.tunables.extend(tun_list[1:])
tunable_dict["tunables"] = opts.tunables
ports = []
for port in opts.ports:
if not isinstance(port, PortProfileID):
port = PortProfileID(port)
ports.append(port)
port_id_list = parse_ports_from_profiles(ports)
# core mask
if opts.core_mask is not None:
core_mask = opts.core_mask
else:
core_mask = self.CORE_MASK_PIN if opts.pin_cores else self.CORE_MASK_SPLIT
# just for sanity - will be checked on the API as well
self.__decode_core_mask(port_id_list, core_mask)
streams_per_profile = {}
streams_per_port = {}
# pack the profile
try:
for profile in ports:
profile_name = str(profile)
port_id = int(profile)
profile = STLProfile.load(opts.file[0],
direction = port_id % 2,
port_id = port_id,
**tunable_dict)
if any(h in opts.tunables for h in help_flags):
return True
if profile is None:
print('Failed to convert STL profile')
return False
stream_list = profile.get_streams()
streams_per_profile[profile_name] = stream_list
if port_id not in streams_per_port:
streams_per_port[port_id] = list(stream_list)
else:
streams_per_port[port_id].extend(list(stream_list))
except TRexError as e:
s = format_text("\nError loading profile '{0}'\n".format(opts.file[0]), 'bold')
s += "\n" + e.brief()
raise TRexError(s)
# for better use experience - check this before any other action on port
self.__pre_start_check('START', ports, opts.force, streams_per_port)
ports = self.validate_profile_input(ports)
# stop ports if needed
active_profiles = list_intersect(self.get_profiles_with_state("active"), ports)
if active_profiles and opts.force:
self.stop(active_profiles)
# remove all streams
self.remove_all_streams(ports)
for profile in ports:
profile_name = str(profile)
self.add_streams(streams_per_profile[profile_name], ports = profile)
if opts.dry:
self.validate(ports, opts.mult, opts.duration, opts.total)
else:
self.start(ports,
opts.mult,
opts.force,
opts.duration,
opts.total,
core_mask,
opts.sync)
return True
@console_api('stop', 'STL', True)
def stop_line (self, line):
'''Stop active traffic on specified ports on TRex\n'''
parser = parsing_opts.gen_parser(self,
"stop",
self.stop_line.__doc__,
parsing_opts.PROFILE_LIST_WITH_ALL,
parsing_opts.REMOVE)
opts = parser.parse_args(line.split(), default_ports = self.get_profiles_with_state("active"), verify_acquired = True, allow_empty = True)
ports = self.validate_profile_input(opts.ports)
# find the relevant ports
port_id_list = parse_ports_from_profiles(ports)
active_ports = list_intersect(ports, self.get_profiles_with_state("active"))
if not active_ports:
if not ports:
msg = 'no active ports'
else:
msg = 'no active traffic on ports {0}'.format(ports)
print(msg)
else:
# call API
self.stop(active_ports)
if opts.remove:
streams_ports = list_intersect(ports, self.get_profiles_with_state("streams"))
if not streams_ports:
if not ports:
msg = 'no ports with streams'
else:
msg = 'no streams on ports {0}'.format(ports)
print(msg)
else:
# call API
self.remove_all_streams(ports)
return True
@console_api('update', 'STL', True)
def update_line (self, line):
'''Update port(s) speed currently active\n'''
parser = parsing_opts.gen_parser(self,
"update",
self.update_line.__doc__,
parsing_opts.PROFILE_LIST,
parsing_opts.MULTIPLIER,
parsing_opts.TOTAL,
parsing_opts.FORCE,
parsing_opts.STREAMS_MASK)
opts = parser.parse_args(line.split(), default_ports = self.get_profiles_with_state("active"), verify_acquired = True)
ports = self.validate_profile_input(opts.ports)
if opts.ids:
if len(ports) != 1:
raise TRexError('must provide exactly one port when specifying stream_ids, got: %s' % ports)
self.update_streams(ports[0], opts.mult, opts.force, opts.ids)
return True
# find the relevant ports
profiles = list_intersect(ports, self.get_profiles_with_state("active"))
if not profiles:
if not ports:
msg = 'no active ports'
else:
msg = 'no active traffic on ports {0}'.format(ports)
raise TRexError(msg)
self.update(profiles, opts.mult, opts.total, opts.force)
return True
@console_api('pause', 'STL', True)
def pause_line (self, line):
'''Pause active traffic on specified ports on TRex\n'''
parser = parsing_opts.gen_parser(self,
"pause",
self.pause_line.__doc__,
parsing_opts.PROFILE_LIST,
parsing_opts.STREAMS_MASK)
opts = parser.parse_args(line.split(), default_ports = self.get_profiles_with_state("transmitting"), verify_acquired = True)
ports = self.validate_profile_input(opts.ports)
if opts.ids:
if len(ports) != 1:
raise TRexError('pause - must provide exactly one port when specifying stream_ids, got: %s' % ports)
self.pause_streams(ports[0], opts.ids)
return True
# check for already paused case
if ports and is_sub_list(ports, self.get_profiles_with_state("paused")):
raise TRexError('all of ports(s) {0} are already paused'.format(ports))
# find the relevant ports
profiles = list_intersect(ports, self.get_profiles_with_state("transmitting"))
if not profiles:
if not ports:
msg = 'no transmitting ports'
else:
msg = 'none of ports {0} are transmitting'.format(ports)
raise TRexError(msg)
self.pause(profiles)
return True
@console_api('resume', 'STL', True)
def resume_line (self, line):
'''Resume active traffic on specified ports on TRex\n'''
parser = parsing_opts.gen_parser(self,
"resume",
self.resume_line.__doc__,
parsing_opts.PROFILE_LIST,
parsing_opts.STREAMS_MASK)
opts = parser.parse_args(line.split(), default_ports = self.get_profiles_with_state("paused"), verify_acquired = True)
ports = self.validate_profile_input(opts.ports)
if opts.ids:
if len(ports) != 1:
raise TRexError('must provide exactly one port when specifying stream_ids, got: %s' % ports)
self.resume_streams(ports[0], opts.ids)
return True
# find the relevant ports
profiles = list_intersect(ports, self.get_profiles_with_state("paused"))
if not profiles:
if not ports:
msg = 'no paused ports'
else:
msg = 'none of ports {0} are paused'.format(ports)
raise TRexError(msg)
self.resume(profiles)
# true means print time
return True
##########################
# Tagged Packet Grouping #
##########################
@staticmethod
def _tpg_tag_value_2str(tag_type, value):
"""
Convert the structured tag type and value to a printable string.
:parameters:
tag_type: str
String represeting the tag type. Supported tag types are Dot1Q and QinQ.
value: dict
Value of the tag.
:return:
String representing the tag type and value.
"""
known_types = ["Dot1Q", "QinQ"]
if tag_type not in known_types:
return "Unknown Type"
if tag_type == "Dot1Q":
return "Dot1Q({})".format(value["vlan"])
if tag_type == "QinQ":
return "QinQ({}, {})".format(value["vlans"][0], value["vlans"][1])
@console_api('tpg_enable', 'STL', True)
def tpg_enable(self, line):
"""Enable Tagged Packet Group"""
parser = parsing_opts.gen_parser(self,
"tpg_enable",
self.tpg_enable.__doc__,
parsing_opts.TPG_ENABLE
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
tpg_conf = STLTaggedPktGroupTagConf.load(opts.tags_conf, **{"tunables": opts.tunables})
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
if tpg_conf is None:
# Can be a --help call.
return None
try:
self.enable_tpg(opts.num_tpgids, tpg_conf, opts.ports)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_disable', 'STL', True)
def tpg_disable(self, line):
"""Disable Tagged Packet Group"""
parser = parsing_opts.gen_parser(self,
"tpg_disable",
self.tpg_disable.__doc__,
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
self.disable_tpg()
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_status', 'STL', True)
def show_tpg_status(self, line):
'''Show Tagged Packet Group Status\n'''
parser = parsing_opts.gen_parser(self,
"tpg_status",
self.show_tpg_status.__doc__,
parsing_opts.TPG_USERNAME,
parsing_opts.SINGLE_PORT_NOT_REQ
)
opts = parser.parse_args(line.split())
if not opts:
return opts
status = None
try:
status = self.get_tpg_status(opts.username, opts.port)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
if status is None:
self.logger.info(format_text("Couldn't get status from STL Server.\n", "bold", "magenta"))
enabled = status.get("enabled", None)
if enabled is None:
self.logger.info(format_text("Enabled not found in server status response.\n", "bold", "magenta"))
msg = "\nTagged Packet Group is enabled\n" if enabled else "\nTagged Packet Group is disabled\n"
self.logger.info(format_text(msg, "bold", "yellow"))
# If Tagged Packet Group is enabled, print its details in a table.
if enabled:
data = status.get("data", None)
if data is None:
self.logger.info(format_text("Data not found in server status response.\n", "bold", "magenta"))
keys_to_headers = [ {'key': 'username', 'header': 'Username'},
{'key': 'acquired_ports', 'header': 'Acquired Ports'},
{'key': 'rx_ports', 'header': 'Rx Ports'},
{'key': 'num_tpgids', 'header': 'Num TPGId'},
{'key': 'num_tags', 'header': 'Num Tags'},
]
kwargs = {'title': 'Tagged Packet Group Data',
'empty_msg': 'No status found',
'keys_to_headers': keys_to_headers}
text_tables.print_table_by_keys(data, **kwargs)
@console_api('tpg_update', 'STL', True)
def tpg_update(self, line):
'''Update Tagged Packet Group Tag\n'''
parser = parsing_opts.gen_parser(self,
"tpg_tags",
self.show_tpg_tags.__doc__,
parsing_opts.TPG_UPDATE
)
opts = parser.parse_args(line.split())
if not opts:
return opts
tag_type = opts.tag_type if opts.tag_type != "Invalidate" else None
new_tag = {
"type": tag_type,
"tag_id": opts.tag_id
}
if tag_type is not None:
# Not invalidating tag, value is needed
if opts.value is None:
raise TRexError(format_text("Value must be present for type {}.".format(tag_type), "red", "bold"))
if tag_type == "Dot1Q":
if len(opts.value) != 1:
raise TRexError(format_text("Only one value must be presented for Dot1Q tags. Invalid value {}.".format(opts.value), "red", "bold"))
new_tag["value"] = {
"vlan": opts.value[0]
}
if tag_type == "QinQ":
if len(opts.value) != 2:
raise TRexError(format_text("Exactly two values must be presented for QinQ tags. Invalid value {}.".format(opts.value), "red", "bold"))
new_tag["value"] = {
"vlans": opts.value
}
try:
self.update_tpg_tags([new_tag], opts.clear)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_tags', 'STL', True)
def show_tpg_tags(self, line):
'''Show Tagged Packet Group Tags\n'''
parser = parsing_opts.gen_parser(self,
"tpg_tags",
self.show_tpg_tags.__doc__,
parsing_opts.TPG_USERNAME,
parsing_opts.SINGLE_PORT_NOT_REQ,
parsing_opts.TPG_MIN_TAG,
parsing_opts.TPG_MAX_TAG_NOT_REQ,
)
opts = parser.parse_args(line.split())
if not opts:
return opts
MAX_TAGS_TO_SHOW = 20
table_keys_to_headers = [ {'key': 'tag_id', 'header': 'Tag Id'},
{'key': 'tag', 'header': 'Tag Type'}
]
table_kwargs = {'empty_msg': '\nNo tags found',
'keys_to_headers': table_keys_to_headers}
tpg_status = self.get_tpg_status(username=opts.username, port=opts.port)
if not tpg_status["enabled"]:
raise TRexError(format_text("Tagged Packet Group is not enabled.", "bold", "red"))
num_tags_total = tpg_status["data"]["num_tags"]
last_tag = num_tags_total if opts.max_tag is None else min(num_tags_total, opts.max_tag)
current_tag = opts.min_tag
while current_tag != last_tag:
next_current_tag = min(current_tag + MAX_TAGS_TO_SHOW, last_tag)
try:
tags = self.get_tpg_tags(current_tag, next_current_tag, opts.username, opts.port)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
tags_to_print = []
for i in range(len(tags)):
tags_to_print.append(
{
"tag_id": current_tag + i,
"tag": '-' if tags[i] is None else STLClient._tpg_tag_value_2str(tags[i]['type'], tags[i]['value'])
}
)
table_kwargs['title'] = "Tags [{}-{})".format(current_tag, next_current_tag)
text_tables.print_table_by_keys(tags_to_print, **table_kwargs)
current_tag = next_current_tag
if current_tag != last_tag:
# The message should be printed iff there will be another iteration.
input("Press Enter to see the rest of the tags")
@console_api('tpg_stats', 'STL', True)
def show_tpg_stats(self, line):
'''Show Tagged Packet Group Statistics\n'''
parser = parsing_opts.gen_parser(self,
"tpg_stats",
self.show_tpg_stats.__doc__,
parsing_opts.TPG_STL_STATS
)
opts = parser.parse_args(line.split())
if not opts:
return opts
if opts.max_tag < opts.min_tag:
# The client Api checks this as well but our loop logic requires this condition.
raise TRexError(format_text("Max Tag {} must be greater/equal than Min Tag {}".format(opts.max_tag, opts.min_tag), "bold", "red"))
if opts.min_tag == opts.max_tag and not opts.untagged and not opts.unknown_tag:
raise TRexError(format_text("Min Tag can equal Max Tag iff untagged or unknown tag flags provided.", "bold", "red"))
MAX_TAGS_TO_SHOW = 20
current_tag = opts.min_tag
new_current_tag = current_tag
first_iteration = True
table_keys_to_headers = [ {'key': 'tags', 'header': 'Tag Id'},
{'key': 'pkts', 'header': 'Packets'},
{'key': 'bytes', 'header': 'Bytes'},
{'key': 'seq_err', 'header': 'Seq Error'},
{'key': 'seq_err_too_big', 'header': 'Seq Too Big'},
{'key': 'seq_err_too_small', 'header': 'Seq Too Small'},
{'key': 'dup', 'header': 'Duplicates'},
{'key': 'ooo', 'header': 'Out of Order'},
]
table_kwargs = {'empty_msg': 'No stats found',
'keys_to_headers': table_keys_to_headers}
# Loop until we get all the tags
while current_tag != opts.max_tag or first_iteration:
stats = None
try:
unknown_tag = first_iteration and opts.unknown_tag
untagged = first_iteration and opts.untagged
stats, new_current_tag = self.get_tpg_stats(opts.port, opts.tpgid, current_tag, opts.max_tag, max_sections=MAX_TAGS_TO_SHOW, unknown_tag=unknown_tag, untagged=untagged)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
if stats is None:
self.logger.info(format_text("\nNo stats found for the provided params.\n", "bold", "yellow"))
return
port_stats = stats.get(str(opts.port), None)
if port_stats is None:
self.logger.info(format_text("\nNo stats found for the provided port.\n", "bold", "yellow"))
return
tpgid_stats = port_stats.get(str(opts.tpgid), None)
if tpgid_stats is None:
self.logger.info(format_text("\nNo stats found for the provided tpgid.\n", "bold", "yellow"))
return
stats_list = []
for tag_id, tag_stats in tpgid_stats.items():
tag_stats['tags'] = tag_id.replace("_tag", "") # remove _tag keyword when printing
stats_list.append(tag_stats)
table_kwargs['title'] = "Port {}, tpgid {}, Tags = [{}, {})".format(opts.port, opts.tpgid, current_tag, new_current_tag)
text_tables.print_table_by_keys(stats_list, **table_kwargs)
if new_current_tag is not None and new_current_tag != opts.max_tag:
# The message should be printed iff there will be another iteration.
input("Press Enter to see the rest of the stats")
first_iteration = False # Set this false after the first iteration
current_tag = new_current_tag if new_current_tag is not None else current_tag # Update the current tag in case it is a new one.
@console_api('tpg_clear_stats', 'STL', True)
def tpg_clear_stats(self, line):
'''Clear Tagged Packet Group Stats\n'''
parser = parsing_opts.gen_parser(self,
"tpg_clear_stats",
self.tpg_clear_stats.__doc__,
parsing_opts.TPG_STL_CLEAR_STATS
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
self.clear_tpg_stats(opts.port, opts.tpgid, opts.min_tag, opts.max_tag, opts.tag_list, opts.unknown_tag, opts.untagged)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_tx_stats', 'STL', True)
def show_tpg_tx_stats(self, line):
'''Show Tagged Packet Group Tx Statistics\n'''
parser = parsing_opts.gen_parser(self,
"tpg_tx_stats",
self.show_tpg_tx_stats.__doc__,
parsing_opts.TPG_STL_TX_STATS
)
opts = parser.parse_args(line.split())
if not opts:
return opts
table_keys_to_headers = [ {'key': 'pkts', 'header': 'Packets'},
{'key': 'bytes', 'header': 'Bytes'},
]
table_kwargs = {'empty_msg': 'No stats found',
'keys_to_headers': table_keys_to_headers}
tx_stats = {}
try:
tx_stats = self.get_tpg_tx_stats(opts.port, opts.tpgid)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
port_stats = tx_stats.get(str(opts.port), None)
if port_stats is None:
self.logger.info(format_text("\nNo stats found for the provided port.\n", "bold", "yellow"))
return
tpgid_stats = port_stats.get(str(opts.tpgid), None)
if tpgid_stats is None:
self.logger.info(format_text("\nNo stats found for the provided tpgid.\n", "bold", "yellow"))
return
table_kwargs['title'] = "Port {}, tpgid {}".format(opts.port, opts.tpgid)
text_tables.print_table_by_keys(tpgid_stats, **table_kwargs)
@console_api('tpg_clear_tx_stats', 'STL', True)
def tpg_clear_tx_stats(self, line):
'''Clear Tagged Packet Group Tx Stats\n'''
parser = parsing_opts.gen_parser(self,
"tpg_clear_tx_stats",
self.tpg_clear_tx_stats.__doc__,
parsing_opts.TPG_STL_TX_STATS
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
self.clear_tpg_tx_stats(opts.port, opts.tpgid)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_show_unknown_tags', 'STL', True)
def show_tpg_unknown_stats(self, line):
'''Show Tagged Packet Group Unknown Tags\n'''
parser = parsing_opts.gen_parser(self,
"tpg_show_unknown_stats",
self.show_tpg_unknown_stats.__doc__,
parsing_opts.TPG_PORT,
)
opts = parser.parse_args(line.split())
if not opts:
return opts
table_keys_to_headers = [{'key': 'tpgid', 'header': 'tpgid'},
{'key': 'type', 'header': 'Type'}]
table_kwargs = {'empty_msg': '\nNo unknown tags found in port {}.'.format(opts.port),
'keys_to_headers': table_keys_to_headers}
unknown_tags = {}
try:
unknown_tags = self.get_tpg_unknown_tags(opts.port)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
port_unknown_tags = unknown_tags.get(str(opts.port), None)
if port_unknown_tags is None:
self.logger.info(format_text("\nNo unknown tags found in the provided port.\n", "bold", "yellow"))
return
unknown_tags_to_print = []
for val in port_unknown_tags:
unknown_tag = {
'tpgid': val['tpgid'],
'type': STLClient._tpg_tag_value_2str(val['tag']['type'], val['tag']['value'])
}
if unknown_tag not in unknown_tags_to_print:
# This list is at max 10 elements. Dict is not hashable.
unknown_tags_to_print.append(unknown_tag)
table_kwargs['title'] = "Port {} unknown tags".format(opts.port)
text_tables.print_table_by_keys(unknown_tags_to_print, **table_kwargs)
@console_api('tpg_clear_unknown_tags', 'STL', True)
def tpg_clear_unknown_stats(self, line):
'''Clear Tagged Packet Group Unknown Tags\n'''
parser = parsing_opts.gen_parser(self,
"tpg_clear_unknown_stats",
self.tpg_clear_unknown_stats.__doc__,
parsing_opts.TPG_PORT,
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
self.clear_tpg_unknown_tags(opts.port)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
| 39.268036 | 360 | 0.531084 | import time
import sys
import os
from collections import OrderedDict
from functools import wraps
from ..utils.common import get_current_user, list_intersect, is_sub_list, user_input, list_difference, parse_ports_from_profiles
from ..utils import parsing_opts, text_tables
from ..utils.text_opts import format_text, format_num
from ..common.trex_exceptions import *
from ..common.trex_events import Event
from ..common.trex_logger import Logger
from ..common.trex_client import TRexClient, PacketBuffer
from ..common.trex_types import *
from ..common.trex_types import PortProfileID, ALL_PROFILE_ID
from ..common.trex_psv import *
from ..common.trex_api_annotators import client_api, console_api
from .trex_stl_port import STLPort
from .trex_stl_streams import STLStream, STLProfile, STLTaggedPktGroupTagConf
from .trex_stl_stats import CPgIdStats
def validate_port_input(port_arg):
def wrap (func):
@wraps(func)
def wrapper(self, *args, **kwargs):
code = func.__code__
fname = func.__name__
names = code.co_varnames[:code.co_argcount]
argname = port_arg
try:
port_index = names.index(argname) - 1
argval = args[port_index]
args = list(args)
args[port_index] = convert_port_to_profile(argval)
args = tuple(args)
except (ValueError, IndexError):
argval = kwargs.get(argname)
kwargs[argname] = convert_port_to_profile(argval)
return func(self, *args, **kwargs)
def convert_port_to_profile(port):
if port is None:
return port
if isinstance(port, list):
result = list(port)
for idx, val in enumerate(result):
validate_type('port', val, (int, str, PortProfileID))
result[idx] = PortProfileID(str(val))
else:
validate_type('port', port, (int, str, PortProfileID))
result = PortProfileID(str(port))
return result
return wrapper
return wrap
class TPGState:
DISABLED = 0
ENABLED_CP = 1
ENABLED_CP_RX = 2
ENABLED = 3
DISABLED_DP = 4
DISABLED_DP_RX = 5
RX_ALLOC_FAILED = 6
DP_ALLOC_FAILED = 7
ALL_STATES = [DISABLED, ENABLED_CP, ENABLED_CP_RX, ENABLED, DISABLED_DP, DISABLED_DP_RX, RX_ALLOC_FAILED, DP_ALLOC_FAILED]
ERROR_STATES = [RX_ALLOC_FAILED, DP_ALLOC_FAILED]
def __init__(self, initial_state):
if initial_state not in TPGState.ALL_STATES:
raise TRexError("Invalid TPG State {}".format(initial_state))
self._state = initial_state
self.fail_messages = {
TPGState.RX_ALLOC_FAILED: "Rx counter allocation failed!",
TPGState.DP_ALLOC_FAILED: "Tx counter allocation failed!"
}
def is_error_state(self):
return self._state in TPGState.ERROR_STATES
def get_fail_message(self):
if not self.is_error_state():
return "TPG State is valid!"
return self.fail_messages[self._state]
def __eq__(self, other):
if not isinstance(other, TPGState):
raise TRexError("Invalid comparision for TPGState")
return self._state == other._state
class STLClient(TRexClient):
CORE_MASK_SPLIT = 1
CORE_MASK_PIN = 2
CORE_MASK_SINGLE = 3
def __init__(self,
username = get_current_user(),
server = "localhost",
sync_port = 4501,
async_port = 4500,
verbose_level = "error",
logger = None,
sync_timeout = None,
async_timeout = None
):
api_ver = {'name': 'STL', 'major': 5, 'minor': 1}
TRexClient.__init__(self,
api_ver,
username,
server,
sync_port,
async_port,
verbose_level,
logger,
sync_timeout,
async_timeout)
self.pgid_stats = CPgIdStats(self.conn.rpc)
self.tpg_status = None
def get_mode (self):
return "STL"
e, rx_delay_ms = None):
ports = ports if ports is not None else self.get_acquired_ports()
ports = self.psv.validate('wait_on_traffic', ports, PSV_ACQUIRED)
TRexClient.wait_on_traffic(self, ports, timeout)
if rx_delay_ms is None:
if self.ports[ports[0]].is_virtual():
rx_delay_ms = 100
else:
rx_delay_ms = 10
rc = self._remove_rx_filters(ports, rx_delay_ms = rx_delay_ms)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("ports")
def update (self, ports = None, mult = "1", total = False, force = False):
ports = ports if ports is not None else self.get_profiles_with_state("active")
ports = self.psv.validate('update', ports, (PSV_ACQUIRED, PSV_TX))
validate_type('mult', mult, basestring)
validate_type('force', force, bool)
validate_type('total', total, bool)
mult_obj = parsing_opts.decode_multiplier(mult,
allow_update = True,
divide_count = len(ports) if total else 1)
if not mult_obj:
raise TRexArgumentError('mult', mult)
self.ctx.logger.pre_cmd("Updating traffic on port(s) {0}:".format(ports))
rc = self._for_each_port("update", ports, mult_obj, force)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("port")
def update_streams(self, port, mult = "1", force = False, stream_ids = None):
validate_type('mult', mult, basestring)
validate_type('force', force, bool)
validate_type('stream_ids', stream_ids, list)
ports = self.psv.validate('update_streams', port, (PSV_ACQUIRED, PSV_TX))
if not stream_ids:
raise TRexError('Please specify stream IDs to update')
mult_obj = parsing_opts.decode_multiplier(mult, allow_update = False)
if not mult_obj:
raise TRexArgumentError('mult', mult)
self.ctx.logger.pre_cmd('Updating streams %s on port %s:' % (stream_ids, port))
rc = self._for_each_port("update_streams", port, mult_obj, force, stream_ids)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("ports")
def pause (self, ports = None):
ports = ports if ports is not None else self.get_profiles_with_state("transmitting")
ports = self.psv.validate('pause', ports, (PSV_ACQUIRED, PSV_TX))
self.ctx.logger.pre_cmd("Pausing traffic on port(s) {0}:".format(ports))
rc = self._for_each_port("pause", ports)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("port")
def pause_streams(self, port, stream_ids):
validate_type('stream_ids', stream_ids, list)
ports = self.psv.validate('pause_streams', port, (PSV_ACQUIRED, PSV_TX))
if not stream_ids:
raise TRexError('Please specify stream IDs to pause')
self.ctx.logger.pre_cmd('Pause streams %s on port %s:' % (stream_ids, port))
rc = self._for_each_port("pause_streams", port, stream_ids)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("ports")
def resume (self, ports = None):
ports = ports if ports is not None else self.get_profiles_with_state("paused")
ports = self.psv.validate('resume', ports, (PSV_ACQUIRED, PSV_PAUSED))
self.ctx.logger.pre_cmd("Resume traffic on port(s) {0}:".format(ports))
rc = self._for_each_port('resume', ports)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("port")
def resume_streams(self, port, stream_ids):
validate_type('stream_ids', stream_ids, list)
ports = self.psv.validate('resume_streams', port, (PSV_ACQUIRED))
if not stream_ids:
raise TRexError('Please specify stream IDs to resume')
self.ctx.logger.pre_cmd('Resume streams %s on port %s:' % (stream_ids, port))
rc = self._for_each_port("resume_streams", port, stream_ids)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
def __push_remote (self, pcap_filename, port_id_list, ipg_usec, speedup, count, duration, is_dual, min_ipg_usec):
rc = RC()
for port_id in port_id_list:
slave_handler = self.ports[port_id ^ 0x1].handler if is_dual else ""
rc.add(self.ports[port_id].push_remote(pcap_filename,
ipg_usec,
speedup,
count,
duration,
is_dual,
slave_handler,
min_ipg_usec))
return rc
@client_api('command', True)
def push_remote (self,
pcap_filename,
ports = None,
ipg_usec = None,
speedup = 1.0,
count = 1,
duration = -1,
is_dual = False,
min_ipg_usec = None,
force = False,
src_mac_pcap = False,
dst_mac_pcap = False):
ports = ports if ports is not None else self.get_acquired_ports()
ports = self.__pre_start_check('PUSH', ports, force)
validate_type('pcap_filename', pcap_filename, basestring)
validate_type('ipg_usec', ipg_usec, (float, int, type(None)))
validate_type('speedup', speedup, (float, int))
validate_type('count', count, int)
validate_type('duration', duration, (float, int))
validate_type('is_dual', is_dual, bool)
validate_type('min_ipg_usec', min_ipg_usec, (float, int, type(None)))
validate_type('src_mac_pcap', src_mac_pcap, bool)
validate_type('dst_mac_pcap', dst_mac_pcap, bool)
if force:
active_ports = list(set(self.get_active_ports()).intersection(ports))
all_profiles = []
for port in active_ports:
profile = PortProfileID(str(port) + ".*")
all_profiles.append(profile)
if all_profiles:
self.stop(all_profiles)
if is_dual:
if not pcap_filename.endswith('erf'):
raise TRexError("dual mode: only ERF format is supported for dual mode")
for port in ports:
master = port
slave = port ^ 0x1
if slave in ports:
raise TRexError("dual mode: cannot provide adjacent ports ({0}, {1}) in a batch".format(master, slave))
if slave not in self.get_acquired_ports():
raise TRexError("dual mode: adjacent port {0} must be owned during dual mode".format(slave))
if count & 0xC0000000:
raise TRexError("count is limited to 0x3fff,ffff")
count = count & 0x3FFFFFFF
if src_mac_pcap:
count |= 0x80000000
if dst_mac_pcap:
count |= 0x40000000
self.ctx.logger.pre_cmd("Pushing remote PCAP on port(s) {0}:".format(ports))
rc = self.__push_remote(pcap_filename, ports, ipg_usec, speedup, count, duration, is_dual, min_ipg_usec)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
def push_pcap (self,
pcap_filename,
ports = None,
ipg_usec = None,
speedup = 1.0,
count = 1,
duration = -1,
force = False,
vm = None,
packet_hook = None,
is_dual = False,
min_ipg_usec = None,
src_mac_pcap = False,
dst_mac_pcap = False):
ports = ports if ports is not None else self.get_acquired_ports()
ports = self.__pre_start_check('PUSH', ports, force)
validate_type('pcap_filename', pcap_filename, basestring)
validate_type('ipg_usec', ipg_usec, (float, int, type(None)))
validate_type('speedup', speedup, (float, int))
validate_type('count', count, int)
validate_type('duration', duration, (float, int))
validate_type('vm', vm, (list, type(None)))
validate_type('is_dual', is_dual, bool)
validate_type('min_ipg_usec', min_ipg_usec, (float, int, type(None)))
validate_type('src_mac_pcap', src_mac_pcap, bool)
validate_type('dst_mac_pcap', dst_mac_pcap, bool)
if all([ipg_usec, min_ipg_usec]):
raise TRexError('Please specify either ipg or minimal ipg, not both.')
if force:
active_ports = list(set(self.get_active_ports()).intersection(ports))
if active_ports:
self.stop(active_ports)
file_size = os.path.getsize(pcap_filename)
if not force and file_size > (1024 * 1024):
file_size_str = format_num(file_size, suffix = 'B')
url = 'https://trex-tgn.cisco.com/trex/doc/trex_stateless.html#_pcap_based_traffic'
raise TRexError("PCAP size of {:} is too big for local push - consider using remote (-r):\n{}".format(file_size_str, url))
if is_dual:
for port in ports:
master = port
slave = port ^ 0x1
if slave in ports:
raise TRexError("dual mode: please specify only one of adjacent ports ({0}, {1}) in a batch".format(master, slave))
if slave not in self.get_acquired_ports():
raise TRexError("dual mode: adjacent port {0} must be owned during dual mode".format(slave))
if not is_dual:
try:
self.ctx.logger.pre_cmd("Converting '{0}' to streams:".format(pcap_filename))
profile = STLProfile.load_pcap(pcap_filename,
ipg_usec,
speedup,
count,
vm = vm,
packet_hook = packet_hook,
min_ipg_usec = min_ipg_usec,
src_mac_pcap = src_mac_pcap,
dst_mac_pcap = dst_mac_pcap)
self.ctx.logger.post_cmd(RC_OK())
except TRexError as e:
self.ctx.logger.post_cmd(RC_ERR(e))
raise
self.remove_all_streams(ports = ports)
id_list = self.add_streams(profile.get_streams(), ports)
return self.start(ports = ports, duration = duration, force = force)
else:
split_mode = 'MAC'
if (ipg_usec and ipg_usec < 1000 * speedup) or (min_ipg_usec and min_ipg_usec < 1000):
self.ctx.logger.warning('In order to get synchronized traffic, ensure that effective ipg is at least 1000 usec')
try:
self.ctx.logger.pre_cmd("Analyzing '{0}' for dual ports based on {1}:".format(pcap_filename, split_mode))
profile_a, profile_b = STLProfile.load_pcap(pcap_filename,
ipg_usec,
speedup,
count,
vm = vm,
packet_hook = packet_hook,
split_mode = split_mode,
min_ipg_usec = min_ipg_usec,
src_mac_pcap = src_mac_pcap,
dst_mac_pcap = dst_mac_pcap)
self.ctx.logger.post_cmd(RC_OK())
except TRexError as e:
self.ctx.logger.post_cmd(RC_ERR(e))
raise
all_ports = ports + [p ^ 0x1 for p in ports if profile_b]
self.remove_all_streams(ports = all_ports)
for port in ports:
master = port
slave = port ^ 0x1
self.add_streams(profile_a.get_streams(), master)
if profile_b:
self.add_streams(profile_b.get_streams(), slave)
return self.start(ports = all_ports, duration = duration, force = force, synchronized = True)
@client_api('getter', True)
def get_stats (self, ports = None, sync_now = True):
output = self._get_stats_common(ports, sync_now)
pgid_stats = self.get_pgid_stats()
if not pgid_stats:
raise TRexError(pgid_stats)
output['flow_stats'] = pgid_stats.get('flow_stats', {})
output['latency'] = pgid_stats.get('latency', {})
return output
@client_api('command', True)
def clear_stats (self,
ports = None,
clear_global = True,
clear_flow_stats = True,
clear_latency_stats = True,
clear_xstats = True):
self._clear_stats_common(ports, clear_global, clear_xstats)
if clear_flow_stats or clear_latency_stats:
self.pgid_stats.clear_stats(clear_flow_stats=clear_flow_stats, clear_latency_stats=clear_latency_stats)
@client_api('getter', True)
def get_active_pgids(self):
return self.pgid_stats.get_active_pgids()
@client_api('getter', True)
def get_pgid_stats (self, pgid_list = []):
pgid_list = listify(pgid_list)
return self.pgid_stats.get_stats(pgid_list)
PG tag!")
if tag_type not in SUPPORTED_TAG_TYPES:
raise TRexError("Tag type {} not supported. Supported tag types are = {}".format(tag_type, SUPPORTED_TAG_TYPES))
tag_value = tag.get("value", None)
if tag_value is None and not update:
raise TRexError("You must provide a value field for each TPG tag!")
if not update:
validate_type("tag_value", tag_value, (dict, type(None)))
if tag_type == "Dot1Q":
validate_type("tag_value", tag_value, dict)
vlan = tag_value.get("vlan", None)
if vlan is None:
raise TRexError("You must provide a vlan key for each Dot1Q tag!")
_verify_vlan(vlan)
elif tag_type == "QinQ":
validate_type("tag_value", tag_value, dict)
vlans = tag_value.get("vlans", None)
if not vlans:
raise TRexError("You must provide vlans key for each QinQ tag!")
validate_type("vlans", vlans, list)
if len(vlans) != 2:
raise TRexError("You must provide 2 vlans for QinQ tag.")
for vlan in vlans:
_verify_vlan(vlan)
if update:
tag_id = tag.get("tag_id", None)
if tag_id is None:
raise TRexError("You must provide a tag id when updating TPG tags.")
validate_type("tag_id", tag_id, int)
if not 0 <= tag_id < num_tags:
raise TRexError("Invalid Tag Id {}. Must be in [0-{}).".format(tag_id, num_tags))
@client_api('command', True)
def enable_tpg(self, num_tpgids, tags, rx_ports = None):
acquired_ports = self.get_acquired_ports()
rx_ports = rx_ports if rx_ports is not None else acquired_ports
self.psv.validate('enable_tpg', rx_ports)
validate_type("num_tpgids", num_tpgids, int)
validate_type("tags", tags, list)
for tag in tags:
STLClient._validate_tpg_tag(tag, update=False, num_tags=len(tags))
if not set(rx_ports).issubset(set(acquired_ports)):
raise TRexError("TPG Rx Ports {} must be acquired".format(rx_ports))
self.ctx.logger.pre_cmd("Enabling Tagged Packet Group")
self.tpg_status = None
params = {
"num_tpgids": num_tpgids,
"ports": acquired_ports,
"rx_ports": rx_ports,
"username": self.ctx.username,
"session_id": self.ctx.session_id,
"tags": tags
}
rc = self._transmit("enable_tpg", params=params)
if not rc:
self.ctx.logger.post_cmd(rc)
raise TRexError(rc)
tpg_state = TPGState(TPGState.DISABLED)
while tpg_state != TPGState(TPGState.ENABLED_CP_RX):
rc = self._transmit("get_tpg_state", params={"username": self.ctx.username})
if not rc:
self.ctx.logger.post_cmd(rc)
raise TRexError(rc)
tpg_state = TPGState(rc.data())
if tpg_state.is_error_state():
self.disable_tpg(surpress_log=True)
self.ctx.logger.post_cmd(False)
raise TRexError(tpg_state.get_fail_message())
rc = self._transmit("enable_tpg", params={"username": self.ctx.username})
if not rc:
rc = self._transmit("get_tpg_state", params={"username": self.ctx.username})
if not rc:
self.ctx.logger.post_cmd(rc)
raise TRexError(rc)
tpg_state = TPGState(rc.data())
if tpg_state.is_error_state():
self.disable_tpg(surpress_log=True)
self.ctx.logger.post_cmd(False)
raise TRexError(tpg_state.get_fail_message())
else:
raise TRexError("TPG enablement failed but server doesn't indicate of errors.")
self.ctx.logger.post_cmd(rc)
@client_api('command', True)
def disable_tpg(self, username=None, surpress_log=False):
# Invalidate cache
self.tpg_status = None
if not surpress_log:
self.ctx.logger.pre_cmd("Disabling Tagged Packet Group")
# Disable TPG RPC simply indicates to the server to start deallocating the memory, it doesn't mean
username = self.ctx.username if username is None else username
rc = self._transmit("disable_tpg", params={"username": username})
if not rc:
raise TRexError(rc)
tpg_state = TPGState(TPGState.ENABLED)
while tpg_state != TPGState(TPGState.DISABLED_DP_RX):
rc = self._transmit("get_tpg_state", params={"username": username})
if not rc:
raise TRexError(rc)
tpg_state = TPGState(rc.data())
rc = self._transmit("disable_tpg", params={"username": username})
if not rc:
raise TRexError(rc)
if not surpress_log:
self.ctx.logger.post_cmd(rc)
@client_api('getter', True)
def get_tpg_status(self, username=None, port=None):
default_params = (username is None and port is None)
if default_params and self.tpg_status is not None:
return self.tpg_status
params = {}
if port is None:
params = {"username": self.ctx.username if username is None else username}
else:
self.psv.validate('get_tpg_status', [port])
if username is not None:
raise TRexError("Should provide only one between port and username for TPG status.")
params = {"port_id": port}
rc = self._transmit("get_tpg_status", params=params)
if not rc:
raise TRexError(rc)
if default_params:
self.tpg_status = rc.data()
return rc.data()
@client_api('command', True)
def update_tpg_tags(self, new_tags, clear=False):
def clear_update(self, port, min_tpgid, max_tpgid, tag_list):
params = {
"username": self.ctx.username,
"port_id": port,
"min_tpgid": min_tpgid,
"max_tpgid": max_tpgid,
"tag_list": tag_list
}
self._transmit("clear_updated", params=params)
self.ctx.logger.pre_cmd("Updating Tagged Packet Group Tags")
validate_type("new_tags", new_tags, list)
tpg_status = self.get_tpg_status()
if not tpg_status["enabled"]:
raise TRexError("Tagged Packet Group is not enabled.")
num_tags = tpg_status["data"]["num_tags"]
for tag in new_tags:
STLClient._validate_tpg_tag(tag, update=True, num_tags=num_tags)
rc = self._transmit("update_tpg_tags", params={"username": self.ctx.username, "tags": new_tags})
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
if clear:
tag_list = [tag["tag_id"] for tag in new_tags]
rx_ports = tpg_status["data"]["rx_ports"]
num_tpgids = tpg_status["data"]["num_tpgids"]
NUM_STATS_CHUNK = 2048
TPGID_CHUNK_SIZE = NUM_STATS_CHUNK // len(tag_list)
min_tpgid = 0
for port in rx_ports:
while min_tpgid != num_tpgids:
max_tpgid = min(min_tpgid + TPGID_CHUNK_SIZE, num_tpgids)
clear_update(self, port, min_tpgid, max_tpgid, tag_list)
min_tpgid = max_tpgid
@client_api('getter', True)
def get_tpg_tags(self, min_tag = 0, max_tag = None, username = None, port = None):
CHUNK_SIZE = 500
def get_tpg_tags_chunk(self, params):
rc = self._transmit("get_tpg_tags", params=params)
if not rc:
raise TRexError(rc)
return rc.data()
validate_type("min_tag", min_tag, int)
validate_type("max_tag", max_tag, (int, type(None)))
validate_type("username", username, (str, type(None)))
tpg_status = self.get_tpg_status(username=username, port=port)
if not tpg_status["enabled"]:
raise TRexError("Tagged Packet Group is not enabled.")
num_tags = tpg_status["data"]["num_tags"]
if max_tag is None:
max_tag = num_tags
if max_tag > num_tags:
raise TRexError("Max Tag {} must be less than number of tags defined: {}".format(max_tag, num_tags))
if min_tag > max_tag:
raise TRexError("Min Tag {} must be less than Max Tag {}".format(min_tag, max_tag))
params = {}
if port is None:
params = {"username": self.ctx.username if username is None else username}
else:
self.psv.validate('get_tpg_tags', [port])
if username is not None:
raise TRexError("Should provide only one between port and username for get_tpg_tags.")
params = {"port_id": port}
tpg_tags = []
current_max_tag = 0
while current_max_tag != max_tag:
current_max_tag = min(max_tag, min_tag + CHUNK_SIZE)
params["min_tag"], params["max_tag"] = min_tag, current_max_tag
tpg_tags += get_tpg_tags_chunk(self, params)
min_tag = current_max_tag
return tpg_tags
@client_api('getter', True)
def get_tpg_stats(self, port, tpgid, min_tag, max_tag, max_sections = 50, unknown_tag = False, untagged = False):
self.psv.validate('get_tpg_stats', [port])
validate_type("tpgid", tpgid, int)
validate_type("min_tag", min_tag, int)
validate_type("max_tag", max_tag, int)
validate_type("max_sections", max_sections, int)
validate_type("unknown_tag", unknown_tag, bool)
validate_type("untagged", untagged, bool)
if min_tag > max_tag:
raise TRexError("Min Tag {} must be smaller/equal than Max Tag {}".format(min_tag, max_tag))
if min_tag == max_tag and not untagged and not unknown_tag:
raise TRexError("Min Tag can equal Max Tag iff untagged or unknown tag flags provided.")
def get_tpg_stats_section(self, port, tpgid, min_tag, max_tag, unknown_tag, untagged):
params = {
"port_id": port,
"tpgid": tpgid,
"min_tag": min_tag,
"max_tag": max_tag,
"unknown_tag": unknown_tag,
"untagged": untagged
}
rc = self._transmit("get_tpg_stats", params=params)
if not rc:
raise TRexError(rc)
return rc.data()
def _get_next_min_tag(section_stats, port, tpgid):
tpgid_stats = section_stats[str(port)][str(tpgid)]
for key in tpgid_stats.keys():
if "unknown" in key or "untagged" in key:
continue
elif "-" in key:
return int(key.split("-")[1]) + 1
else:
return (int(key)) + 1
return None
stats = {}
sections = 0
done = False
_min_tag = min_tag
while not done and sections < max_sections:
section_stats = get_tpg_stats_section(self, port, tpgid, _min_tag, max_tag, unknown_tag, untagged)
_min_tag = _get_next_min_tag(section_stats, port, tpgid)
if _min_tag is None or _min_tag == max_tag:
done = True
if not stats:
stats = section_stats
else:
tpgid_stats = stats[str(port)][str(tpgid)]
new_tpgid_stats = section_stats[str(port)][str(tpgid)]
tpgid_stats.update(new_tpgid_stats)
unknown_tag = False
untagged = False
sections += 1
return (stats, _min_tag)
@client_api('command', True)
def clear_tpg_stats(self, port, tpgid, min_tag = 0, max_tag = None, tag_list = None, unknown_tag = False, untagged = False):
self.ctx.logger.pre_cmd("Clearing TPG stats")
self.psv.validate('clear_tpg_tx_stats', [port])
validate_type("tpgid", tpgid, int)
validate_type("min_tag", min_tag, int)
validate_type("max_tag", max_tag, (int, type(None)))
validate_type("tag_list", tag_list, (list, type(None)))
validate_type("unknown_tag", unknown_tag, bool)
validate_type("untagged", untagged, bool)
if (max_tag is None and not tag_list) or (max_tag is not None and tag_list):
raise TRexError("One between max_tag and tag_list must be provided.")
if max_tag is not None:
if min_tag > max_tag:
raise TRexError("Min Tag {} must be smaller/equal than Max Tag {}".format(min_tag, max_tag))
if min_tag == max_tag and not untagged and not unknown_tag:
raise TRexError("Min Tag can equal Max Tag iff untagged or unknown tag flags provided.")
if tag_list:
for tag in tag_list:
validate_type("tag", tag, int)
if tag < 0:
raise TRexError("Invalid tag {}. Tag must be positive.".format(tag))
params = {
"port_id": port,
"tpgid": tpgid,
"min_tag": min_tag,
"max_tag": max_tag,
"tag_list": tag_list if tag_list else None,
"unknown_tag": unknown_tag,
"untagged": untagged,
}
rc = self._transmit("clear_tpg_stats", params=params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('getter', True)
def get_tpg_tx_stats(self, port, tpgid):
self.psv.validate('get_tpg_tx_stats', [port])
validate_type("tpgid", tpgid, int)
rc = self._transmit("get_tpg_tx_stats", params={"port_id": port, "tpgid": tpgid})
if not rc:
raise TRexError(rc)
return rc.data()
@client_api('command', True)
def clear_tpg_tx_stats(self, port, tpgid):
self.ctx.logger.pre_cmd("Clearing TPG Tx stats")
self.psv.validate('clear_tpg_tx_stats', [port])
validate_type("tpgid", tpgid, int)
rc = self._transmit("clear_tpg_tx_stats", params={"port_id": port, "tpgid": tpgid})
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('getter', True)
def get_tpg_unknown_tags(self, port):
self.psv.validate('get_tpg_unknown_tags', [port])
rc = self._transmit("get_tpg_unknown_tags", params={"port_id": port})
if not rc:
raise TRexError(rc)
return rc.data()
@client_api('command', True)
def clear_tpg_unknown_tags(self, port):
self.ctx.logger.pre_cmd("Clearing TPG unknown tags")
self.psv.validate('clear_tpg_unknown_tags', [port])
rc = self._transmit("clear_tpg_unknown_tags", params={"port_id": port})
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
bold", "magenta"))
for port_id, port_profiles_table in profiles_per_port.items():
if port_profiles_table:
text_tables.print_table_with_header(port_profiles_table,
header = 'Port %s:' % port_id)
@console_api('streams', 'STL', True, True)
def streams_line(self, line):
parser = parsing_opts.gen_parser(self,
"streams",
self.streams_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL,
parsing_opts.STREAMS_MASK,
parsing_opts.STREAMS_CODE)
opts = parser.parse_args(line.split())
if not opts:
return opts
streams_per_port = self._get_streams(opts.ports, set(opts.ids), table_format = opts.code is None)
if not streams_per_port:
self.logger.info(format_text("No streams found with desired filter.\n", "bold", "magenta"))
elif opts.code is None:
for port_id, port_streams_table in streams_per_port.items():
if port_streams_table:
text_tables.print_table_with_header(port_streams_table,
header = 'Port %s:' % port_id)
elif opts.code:
if not opts.code.endswith('.py'):
raise TRexError('Saved filename should end with .py')
is_several_ports = len(streams_per_port) > 1
if is_several_ports:
print(format_text('\nWarning: several ports specified, will save in separate file per port.', 'bold'))
for port_id, port_streams_data in streams_per_port.items():
if not port_streams_data:
print('No streams to save at port %s, skipping.' % port_id)
continue
filename = ('%s_port%s.py' % (opts.code[:-3], port_id)) if is_several_ports else opts.code
if os.path.exists(filename):
sys.stdout.write('\nFilename %s already exists, overwrite? (y/N) ' % filename)
ans = user_input().strip()
if ans.lower() not in ('y', 'yes'):
print('Not saving.')
continue
self.logger.pre_cmd('Saving file as: %s' % filename)
try:
profile = STLProfile(list(port_streams_data.values()))
with open(filename, 'w') as f:
f.write(profile.dump_to_code())
except Exception as e:
self.logger.post_cmd(False)
print(e)
print('')
else:
self.logger.post_cmd(True)
else:
for port_id, port_streams_data in streams_per_port.items():
if not port_streams_data:
continue
print(format_text('Port: %s' % port_id, 'cyan', 'underline') + '\n')
for stream_id, stream in port_streams_data.items():
print(format_text('Stream ID: %s' % stream_id, 'cyan', 'underline'))
print(' ' + '\n '.join(stream.to_code().splitlines()) + '\n')
@console_api('push', 'STL', True)
def push_line(self, line):
args = [self,
"push",
self.push_line.__doc__,
parsing_opts.REMOTE_FILE,
parsing_opts.PORT_LIST_WITH_ALL,
parsing_opts.COUNT,
parsing_opts.DURATION,
parsing_opts.IPG,
parsing_opts.MIN_IPG,
parsing_opts.SPEEDUP,
parsing_opts.FORCE,
parsing_opts.DUAL,
parsing_opts.SRC_MAC_PCAP,
parsing_opts.DST_MAC_PCAP]
parser = parsing_opts.gen_parser(*(args + [parsing_opts.FILE_PATH_NO_CHECK]))
opts = parser.parse_args(line.split(), verify_acquired = True)
if not opts:
return opts
if not opts.remote:
parser = parsing_opts.gen_parser(*(args + [parsing_opts.FILE_PATH]))
opts = parser.parse_args(line.split(), verify_acquired = True)
if not opts:
return opts
if opts.remote:
self.push_remote(opts.file[0],
ports = opts.ports,
ipg_usec = opts.ipg_usec,
min_ipg_usec = opts.min_ipg_usec,
speedup = opts.speedup,
count = opts.count,
duration = opts.duration,
force = opts.force,
is_dual = opts.dual,
src_mac_pcap = opts.src_mac_pcap,
dst_mac_pcap = opts.dst_mac_pcap)
else:
self.push_pcap(opts.file[0],
ports = opts.ports,
ipg_usec = opts.ipg_usec,
min_ipg_usec = opts.min_ipg_usec,
speedup = opts.speedup,
count = opts.count,
duration = opts.duration,
force = opts.force,
is_dual = opts.dual,
src_mac_pcap = opts.src_mac_pcap,
dst_mac_pcap = opts.dst_mac_pcap)
return RC_OK()
@console_api('service', 'STL', True)
def service_line (self, line):
parser = parsing_opts.gen_parser(self,
"service",
self.service_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL,
parsing_opts.SERVICE_GROUP)
opts = parser.parse_args(line.split())
enabled, filtered, mask = self._get_service_params(opts)
self.set_service_mode(ports = opts.ports, enabled = enabled, filtered = filtered, mask = mask)
return True
@console_api('start', 'STL', True)
def start_line (self, line):
parser = parsing_opts.gen_parser(self,
"start",
self.start_line.__doc__,
parsing_opts.PROFILE_LIST,
parsing_opts.TOTAL,
parsing_opts.FORCE,
parsing_opts.FILE_PATH,
parsing_opts.DURATION,
parsing_opts.ARGPARSE_TUNABLES,
parsing_opts.MULTIPLIER_STRICT,
parsing_opts.DRY_RUN,
parsing_opts.CORE_MASK_GROUP,
parsing_opts.SYNCHRONIZED)
opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True)
help_flags = ('-h', '--help')
tunable_dict = {}
if "-t" in line and '=' in line:
tun_list = opts.tunables
tunable_dict = parsing_opts.decode_tunables(tun_list[0])
opts.tunables = parsing_opts.convert_old_tunables_to_new_tunables(tun_list[0])
opts.tunables.extend(tun_list[1:])
tunable_dict["tunables"] = opts.tunables
ports = []
for port in opts.ports:
if not isinstance(port, PortProfileID):
port = PortProfileID(port)
ports.append(port)
port_id_list = parse_ports_from_profiles(ports)
if opts.core_mask is not None:
core_mask = opts.core_mask
else:
core_mask = self.CORE_MASK_PIN if opts.pin_cores else self.CORE_MASK_SPLIT
self.__decode_core_mask(port_id_list, core_mask)
streams_per_profile = {}
streams_per_port = {}
try:
for profile in ports:
profile_name = str(profile)
port_id = int(profile)
profile = STLProfile.load(opts.file[0],
direction = port_id % 2,
port_id = port_id,
**tunable_dict)
if any(h in opts.tunables for h in help_flags):
return True
if profile is None:
print('Failed to convert STL profile')
return False
stream_list = profile.get_streams()
streams_per_profile[profile_name] = stream_list
if port_id not in streams_per_port:
streams_per_port[port_id] = list(stream_list)
else:
streams_per_port[port_id].extend(list(stream_list))
except TRexError as e:
s = format_text("\nError loading profile '{0}'\n".format(opts.file[0]), 'bold')
s += "\n" + e.brief()
raise TRexError(s)
self.__pre_start_check('START', ports, opts.force, streams_per_port)
ports = self.validate_profile_input(ports)
active_profiles = list_intersect(self.get_profiles_with_state("active"), ports)
if active_profiles and opts.force:
self.stop(active_profiles)
self.remove_all_streams(ports)
for profile in ports:
profile_name = str(profile)
self.add_streams(streams_per_profile[profile_name], ports = profile)
if opts.dry:
self.validate(ports, opts.mult, opts.duration, opts.total)
else:
self.start(ports,
opts.mult,
opts.force,
opts.duration,
opts.total,
core_mask,
opts.sync)
return True
@console_api('stop', 'STL', True)
def stop_line (self, line):
parser = parsing_opts.gen_parser(self,
"stop",
self.stop_line.__doc__,
parsing_opts.PROFILE_LIST_WITH_ALL,
parsing_opts.REMOVE)
opts = parser.parse_args(line.split(), default_ports = self.get_profiles_with_state("active"), verify_acquired = True, allow_empty = True)
ports = self.validate_profile_input(opts.ports)
port_id_list = parse_ports_from_profiles(ports)
active_ports = list_intersect(ports, self.get_profiles_with_state("active"))
if not active_ports:
if not ports:
msg = 'no active ports'
else:
msg = 'no active traffic on ports {0}'.format(ports)
print(msg)
else:
self.stop(active_ports)
if opts.remove:
streams_ports = list_intersect(ports, self.get_profiles_with_state("streams"))
if not streams_ports:
if not ports:
msg = 'no ports with streams'
else:
msg = 'no streams on ports {0}'.format(ports)
print(msg)
else:
self.remove_all_streams(ports)
return True
@console_api('update', 'STL', True)
def update_line (self, line):
parser = parsing_opts.gen_parser(self,
"update",
self.update_line.__doc__,
parsing_opts.PROFILE_LIST,
parsing_opts.MULTIPLIER,
parsing_opts.TOTAL,
parsing_opts.FORCE,
parsing_opts.STREAMS_MASK)
opts = parser.parse_args(line.split(), default_ports = self.get_profiles_with_state("active"), verify_acquired = True)
ports = self.validate_profile_input(opts.ports)
if opts.ids:
if len(ports) != 1:
raise TRexError('must provide exactly one port when specifying stream_ids, got: %s' % ports)
self.update_streams(ports[0], opts.mult, opts.force, opts.ids)
return True
profiles = list_intersect(ports, self.get_profiles_with_state("active"))
if not profiles:
if not ports:
msg = 'no active ports'
else:
msg = 'no active traffic on ports {0}'.format(ports)
raise TRexError(msg)
self.update(profiles, opts.mult, opts.total, opts.force)
return True
@console_api('pause', 'STL', True)
def pause_line (self, line):
parser = parsing_opts.gen_parser(self,
"pause",
self.pause_line.__doc__,
parsing_opts.PROFILE_LIST,
parsing_opts.STREAMS_MASK)
opts = parser.parse_args(line.split(), default_ports = self.get_profiles_with_state("transmitting"), verify_acquired = True)
ports = self.validate_profile_input(opts.ports)
if opts.ids:
if len(ports) != 1:
raise TRexError('pause - must provide exactly one port when specifying stream_ids, got: %s' % ports)
self.pause_streams(ports[0], opts.ids)
return True
if ports and is_sub_list(ports, self.get_profiles_with_state("paused")):
raise TRexError('all of ports(s) {0} are already paused'.format(ports))
profiles = list_intersect(ports, self.get_profiles_with_state("transmitting"))
if not profiles:
if not ports:
msg = 'no transmitting ports'
else:
msg = 'none of ports {0} are transmitting'.format(ports)
raise TRexError(msg)
self.pause(profiles)
return True
@console_api('resume', 'STL', True)
def resume_line (self, line):
parser = parsing_opts.gen_parser(self,
"resume",
self.resume_line.__doc__,
parsing_opts.PROFILE_LIST,
parsing_opts.STREAMS_MASK)
opts = parser.parse_args(line.split(), default_ports = self.get_profiles_with_state("paused"), verify_acquired = True)
ports = self.validate_profile_input(opts.ports)
if opts.ids:
if len(ports) != 1:
raise TRexError('must provide exactly one port when specifying stream_ids, got: %s' % ports)
self.resume_streams(ports[0], opts.ids)
return True
profiles = list_intersect(ports, self.get_profiles_with_state("paused"))
if not profiles:
if not ports:
msg = 'no paused ports'
else:
msg = 'none of ports {0} are paused'.format(ports)
raise TRexError(msg)
self.resume(profiles)
return True
parsing_opts.TPG_ENABLE
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
tpg_conf = STLTaggedPktGroupTagConf.load(opts.tags_conf, **{"tunables": opts.tunables})
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
if tpg_conf is None:
return None
try:
self.enable_tpg(opts.num_tpgids, tpg_conf, opts.ports)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_disable', 'STL', True)
def tpg_disable(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_disable",
self.tpg_disable.__doc__,
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
self.disable_tpg()
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_status', 'STL', True)
def show_tpg_status(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_status",
self.show_tpg_status.__doc__,
parsing_opts.TPG_USERNAME,
parsing_opts.SINGLE_PORT_NOT_REQ
)
opts = parser.parse_args(line.split())
if not opts:
return opts
status = None
try:
status = self.get_tpg_status(opts.username, opts.port)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
if status is None:
self.logger.info(format_text("Couldn't get status from STL Server.\n", "bold", "magenta"))
enabled = status.get("enabled", None)
if enabled is None:
self.logger.info(format_text("Enabled not found in server status response.\n", "bold", "magenta"))
msg = "\nTagged Packet Group is enabled\n" if enabled else "\nTagged Packet Group is disabled\n"
self.logger.info(format_text(msg, "bold", "yellow"))
# If Tagged Packet Group is enabled, print its details in a table.
if enabled:
data = status.get("data", None)
if data is None:
self.logger.info(format_text("Data not found in server status response.\n", "bold", "magenta"))
keys_to_headers = [ {'key': 'username', 'header': 'Username'},
{'key': 'acquired_ports', 'header': 'Acquired Ports'},
{'key': 'rx_ports', 'header': 'Rx Ports'},
{'key': 'num_tpgids', 'header': 'Num TPGId'},
{'key': 'num_tags', 'header': 'Num Tags'},
]
kwargs = {'title': 'Tagged Packet Group Data',
'empty_msg': 'No status found',
'keys_to_headers': keys_to_headers}
text_tables.print_table_by_keys(data, **kwargs)
@console_api('tpg_update', 'STL', True)
def tpg_update(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_tags",
self.show_tpg_tags.__doc__,
parsing_opts.TPG_UPDATE
)
opts = parser.parse_args(line.split())
if not opts:
return opts
tag_type = opts.tag_type if opts.tag_type != "Invalidate" else None
new_tag = {
"type": tag_type,
"tag_id": opts.tag_id
}
if tag_type is not None:
# Not invalidating tag, value is needed
if opts.value is None:
raise TRexError(format_text("Value must be present for type {}.".format(tag_type), "red", "bold"))
if tag_type == "Dot1Q":
if len(opts.value) != 1:
raise TRexError(format_text("Only one value must be presented for Dot1Q tags. Invalid value {}.".format(opts.value), "red", "bold"))
new_tag["value"] = {
"vlan": opts.value[0]
}
if tag_type == "QinQ":
if len(opts.value) != 2:
raise TRexError(format_text("Exactly two values must be presented for QinQ tags. Invalid value {}.".format(opts.value), "red", "bold"))
new_tag["value"] = {
"vlans": opts.value
}
try:
self.update_tpg_tags([new_tag], opts.clear)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_tags', 'STL', True)
def show_tpg_tags(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_tags",
self.show_tpg_tags.__doc__,
parsing_opts.TPG_USERNAME,
parsing_opts.SINGLE_PORT_NOT_REQ,
parsing_opts.TPG_MIN_TAG,
parsing_opts.TPG_MAX_TAG_NOT_REQ,
)
opts = parser.parse_args(line.split())
if not opts:
return opts
MAX_TAGS_TO_SHOW = 20
table_keys_to_headers = [ {'key': 'tag_id', 'header': 'Tag Id'},
{'key': 'tag', 'header': 'Tag Type'}
]
table_kwargs = {'empty_msg': '\nNo tags found',
'keys_to_headers': table_keys_to_headers}
tpg_status = self.get_tpg_status(username=opts.username, port=opts.port)
if not tpg_status["enabled"]:
raise TRexError(format_text("Tagged Packet Group is not enabled.", "bold", "red"))
num_tags_total = tpg_status["data"]["num_tags"]
last_tag = num_tags_total if opts.max_tag is None else min(num_tags_total, opts.max_tag)
current_tag = opts.min_tag
while current_tag != last_tag:
next_current_tag = min(current_tag + MAX_TAGS_TO_SHOW, last_tag)
try:
tags = self.get_tpg_tags(current_tag, next_current_tag, opts.username, opts.port)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
tags_to_print = []
for i in range(len(tags)):
tags_to_print.append(
{
"tag_id": current_tag + i,
"tag": '-' if tags[i] is None else STLClient._tpg_tag_value_2str(tags[i]['type'], tags[i]['value'])
}
)
table_kwargs['title'] = "Tags [{}-{})".format(current_tag, next_current_tag)
text_tables.print_table_by_keys(tags_to_print, **table_kwargs)
current_tag = next_current_tag
if current_tag != last_tag:
# The message should be printed iff there will be another iteration.
input("Press Enter to see the rest of the tags")
@console_api('tpg_stats', 'STL', True)
def show_tpg_stats(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_stats",
self.show_tpg_stats.__doc__,
parsing_opts.TPG_STL_STATS
)
opts = parser.parse_args(line.split())
if not opts:
return opts
if opts.max_tag < opts.min_tag:
# The client Api checks this as well but our loop logic requires this condition.
raise TRexError(format_text("Max Tag {} must be greater/equal than Min Tag {}".format(opts.max_tag, opts.min_tag), "bold", "red"))
if opts.min_tag == opts.max_tag and not opts.untagged and not opts.unknown_tag:
raise TRexError(format_text("Min Tag can equal Max Tag iff untagged or unknown tag flags provided.", "bold", "red"))
MAX_TAGS_TO_SHOW = 20
current_tag = opts.min_tag
new_current_tag = current_tag
first_iteration = True
table_keys_to_headers = [ {'key': 'tags', 'header': 'Tag Id'},
{'key': 'pkts', 'header': 'Packets'},
{'key': 'bytes', 'header': 'Bytes'},
{'key': 'seq_err', 'header': 'Seq Error'},
{'key': 'seq_err_too_big', 'header': 'Seq Too Big'},
{'key': 'seq_err_too_small', 'header': 'Seq Too Small'},
{'key': 'dup', 'header': 'Duplicates'},
{'key': 'ooo', 'header': 'Out of Order'},
]
table_kwargs = {'empty_msg': 'No stats found',
'keys_to_headers': table_keys_to_headers}
# Loop until we get all the tags
while current_tag != opts.max_tag or first_iteration:
stats = None
try:
unknown_tag = first_iteration and opts.unknown_tag
untagged = first_iteration and opts.untagged
stats, new_current_tag = self.get_tpg_stats(opts.port, opts.tpgid, current_tag, opts.max_tag, max_sections=MAX_TAGS_TO_SHOW, unknown_tag=unknown_tag, untagged=untagged)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
if stats is None:
self.logger.info(format_text("\nNo stats found for the provided params.\n", "bold", "yellow"))
return
port_stats = stats.get(str(opts.port), None)
if port_stats is None:
self.logger.info(format_text("\nNo stats found for the provided port.\n", "bold", "yellow"))
return
tpgid_stats = port_stats.get(str(opts.tpgid), None)
if tpgid_stats is None:
self.logger.info(format_text("\nNo stats found for the provided tpgid.\n", "bold", "yellow"))
return
stats_list = []
for tag_id, tag_stats in tpgid_stats.items():
tag_stats['tags'] = tag_id.replace("_tag", "") # remove _tag keyword when printing
stats_list.append(tag_stats)
table_kwargs['title'] = "Port {}, tpgid {}, Tags = [{}, {})".format(opts.port, opts.tpgid, current_tag, new_current_tag)
text_tables.print_table_by_keys(stats_list, **table_kwargs)
if new_current_tag is not None and new_current_tag != opts.max_tag:
# The message should be printed iff there will be another iteration.
input("Press Enter to see the rest of the stats")
first_iteration = False # Set this false after the first iteration
current_tag = new_current_tag if new_current_tag is not None else current_tag # Update the current tag in case it is a new one.
@console_api('tpg_clear_stats', 'STL', True)
def tpg_clear_stats(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_clear_stats",
self.tpg_clear_stats.__doc__,
parsing_opts.TPG_STL_CLEAR_STATS
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
self.clear_tpg_stats(opts.port, opts.tpgid, opts.min_tag, opts.max_tag, opts.tag_list, opts.unknown_tag, opts.untagged)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_tx_stats', 'STL', True)
def show_tpg_tx_stats(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_tx_stats",
self.show_tpg_tx_stats.__doc__,
parsing_opts.TPG_STL_TX_STATS
)
opts = parser.parse_args(line.split())
if not opts:
return opts
table_keys_to_headers = [ {'key': 'pkts', 'header': 'Packets'},
{'key': 'bytes', 'header': 'Bytes'},
]
table_kwargs = {'empty_msg': 'No stats found',
'keys_to_headers': table_keys_to_headers}
tx_stats = {}
try:
tx_stats = self.get_tpg_tx_stats(opts.port, opts.tpgid)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
port_stats = tx_stats.get(str(opts.port), None)
if port_stats is None:
self.logger.info(format_text("\nNo stats found for the provided port.\n", "bold", "yellow"))
return
tpgid_stats = port_stats.get(str(opts.tpgid), None)
if tpgid_stats is None:
self.logger.info(format_text("\nNo stats found for the provided tpgid.\n", "bold", "yellow"))
return
table_kwargs['title'] = "Port {}, tpgid {}".format(opts.port, opts.tpgid)
text_tables.print_table_by_keys(tpgid_stats, **table_kwargs)
@console_api('tpg_clear_tx_stats', 'STL', True)
def tpg_clear_tx_stats(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_clear_tx_stats",
self.tpg_clear_tx_stats.__doc__,
parsing_opts.TPG_STL_TX_STATS
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
self.clear_tpg_tx_stats(opts.port, opts.tpgid)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_show_unknown_tags', 'STL', True)
def show_tpg_unknown_stats(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_show_unknown_stats",
self.show_tpg_unknown_stats.__doc__,
parsing_opts.TPG_PORT,
)
opts = parser.parse_args(line.split())
if not opts:
return opts
table_keys_to_headers = [{'key': 'tpgid', 'header': 'tpgid'},
{'key': 'type', 'header': 'Type'}]
table_kwargs = {'empty_msg': '\nNo unknown tags found in port {}.'.format(opts.port),
'keys_to_headers': table_keys_to_headers}
unknown_tags = {}
try:
unknown_tags = self.get_tpg_unknown_tags(opts.port)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
port_unknown_tags = unknown_tags.get(str(opts.port), None)
if port_unknown_tags is None:
self.logger.info(format_text("\nNo unknown tags found in the provided port.\n", "bold", "yellow"))
return
unknown_tags_to_print = []
for val in port_unknown_tags:
unknown_tag = {
'tpgid': val['tpgid'],
'type': STLClient._tpg_tag_value_2str(val['tag']['type'], val['tag']['value'])
}
if unknown_tag not in unknown_tags_to_print:
# This list is at max 10 elements. Dict is not hashable.
unknown_tags_to_print.append(unknown_tag)
table_kwargs['title'] = "Port {} unknown tags".format(opts.port)
text_tables.print_table_by_keys(unknown_tags_to_print, **table_kwargs)
@console_api('tpg_clear_unknown_tags', 'STL', True)
def tpg_clear_unknown_stats(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_clear_unknown_stats",
self.tpg_clear_unknown_stats.__doc__,
parsing_opts.TPG_PORT,
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
self.clear_tpg_unknown_tags(opts.port)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
| true | true |
f73064bf0edcae1e29ce194797909ff6107a9759 | 22,176 | py | Python | salt/runners/saltutil.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 2 | 2015-06-18T19:07:20.000Z | 2017-09-27T18:54:29.000Z | salt/runners/saltutil.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 12 | 2015-04-15T22:17:42.000Z | 2016-03-22T08:46:27.000Z | salt/runners/saltutil.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 4 | 2015-04-16T03:24:08.000Z | 2015-04-22T15:33:28.000Z | # -*- coding: utf-8 -*-
'''
The Saltutil runner is used to sync custom types to the Master. See the
:mod:`saltutil module <salt.modules.saltutil>` for documentation on
managing updates to minions.
.. versionadded:: 2016.3.0
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
import salt.utils.extmods
log = logging.getLogger(__name__)
def sync_all(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync all custom types
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
dictionary of modules to sync based on type
extmod_blacklist : None
dictionary of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_all
salt-run saltutil.sync_all extmod_whitelist={'runners': ['custom_runner'], 'grains': []}
'''
log.debug('Syncing all')
ret = {}
ret['clouds'] = sync_clouds(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['modules'] = sync_modules(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['states'] = sync_states(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['grains'] = sync_grains(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['renderers'] = sync_renderers(saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)
ret['returners'] = sync_returners(saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)
ret['output'] = sync_output(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['proxymodules'] = sync_proxymodules(saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)
ret['runners'] = sync_runners(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['wheel'] = sync_wheel(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['engines'] = sync_engines(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['thorium'] = sync_thorium(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['queues'] = sync_queues(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['pillar'] = sync_pillar(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['utils'] = sync_utils(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['sdb'] = sync_sdb(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['cache'] = sync_cache(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['fileserver'] = sync_fileserver(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['tops'] = sync_tops(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['tokens'] = sync_eauth_tokens(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['serializers'] = sync_serializers(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['auth'] = sync_auth(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['executors'] = sync_executors(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
return ret
def sync_auth(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync execution modules from ``salt://_auth`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_auth
'''
return salt.utils.extmods.sync(__opts__, 'auth', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_modules(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync execution modules from ``salt://_modules`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_modules
'''
return salt.utils.extmods.sync(__opts__, 'modules', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_states(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync state modules from ``salt://_states`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_states
'''
return salt.utils.extmods.sync(__opts__, 'states', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_grains(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync grains modules from ``salt://_grains`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_grains
'''
return salt.utils.extmods.sync(__opts__, 'grains', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_renderers(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync renderer modules from from ``salt://_renderers`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_renderers
'''
return salt.utils.extmods.sync(__opts__, 'renderers', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_returners(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync returner modules from ``salt://_returners`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_returners
'''
return salt.utils.extmods.sync(__opts__, 'returners', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_output(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync output modules from ``salt://_output`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_output
'''
return salt.utils.extmods.sync(__opts__, 'output', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_proxymodules(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync proxy modules from ``salt://_proxy`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_proxymodules
'''
return salt.utils.extmods.sync(__opts__, 'proxy', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_runners(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync runners from ``salt://_runners`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_runners
'''
return salt.utils.extmods.sync(__opts__, 'runners', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_wheel(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync wheel modules from ``salt://_wheel`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_wheel
'''
return salt.utils.extmods.sync(__opts__, 'wheel', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_engines(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync engines from ``salt://_engines`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_engines
'''
return salt.utils.extmods.sync(__opts__, 'engines', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_thorium(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2018.3.0
Sync Thorium from ``salt://_thorium`` to the master
saltenv: ``base``
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist
comma-separated list of modules to sync
extmod_blacklist
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_thorium
'''
return salt.utils.extmods.sync(__opts__, 'thorium', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_queues(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync queue modules from ``salt://_queues`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_queues
'''
return salt.utils.extmods.sync(__opts__, 'queues', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_pillar(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync pillar modules from ``salt://_pillar`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_pillar
'''
return salt.utils.extmods.sync(__opts__, 'pillar', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_utils(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2016.11.0
Sync utils modules from ``salt://_utils`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_utils
'''
return salt.utils.extmods.sync(__opts__, 'utils', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_sdb(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2017.7.0
Sync sdb modules from ``salt://_sdb`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_sdb
'''
return salt.utils.extmods.sync(__opts__, 'sdb', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_tops(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2016.3.7,2016.11.4,2017.7.0
Sync master_tops modules from ``salt://_tops`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_tops
'''
return salt.utils.extmods.sync(__opts__, 'tops', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_cache(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2017.7.0
Sync cache modules from ``salt://_cache`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_cache
'''
return salt.utils.extmods.sync(__opts__, 'cache', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_fileserver(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2018.3.0
Sync fileserver modules from ``salt://_fileserver`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_fileserver
'''
return salt.utils.extmods.sync(__opts__, 'fileserver', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_clouds(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2017.7.0
Sync cloud modules from ``salt://_clouds`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_clouds
'''
return salt.utils.extmods.sync(__opts__, 'clouds', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_roster(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2017.7.0
Sync roster modules from ``salt://_roster`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_roster
'''
return salt.utils.extmods.sync(__opts__, 'roster', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_eauth_tokens(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2018.3.0
Sync eauth token modules from ``salt://_tokens`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_eauth_tokens
'''
return salt.utils.extmods.sync(__opts__, 'tokens', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_serializers(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2019.2.0
Sync serializer modules from ``salt://_serializers`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_utils
'''
return salt.utils.extmods.sync(__opts__, 'serializers', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_executors(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2019.2.1
Sync executor modules from ``salt://_executors`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-seperated list of modules to sync
extmod_blacklist : None
comma-seperated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_executors
'''
return salt.utils.extmods.sync(__opts__, 'executors', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
| 33.498489 | 128 | 0.689123 |
from __future__ import absolute_import, print_function, unicode_literals
import logging
import salt.utils.extmods
log = logging.getLogger(__name__)
def sync_all(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
log.debug('Syncing all')
ret = {}
ret['clouds'] = sync_clouds(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['modules'] = sync_modules(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['states'] = sync_states(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['grains'] = sync_grains(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['renderers'] = sync_renderers(saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)
ret['returners'] = sync_returners(saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)
ret['output'] = sync_output(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['proxymodules'] = sync_proxymodules(saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)
ret['runners'] = sync_runners(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['wheel'] = sync_wheel(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['engines'] = sync_engines(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['thorium'] = sync_thorium(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['queues'] = sync_queues(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['pillar'] = sync_pillar(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['utils'] = sync_utils(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['sdb'] = sync_sdb(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['cache'] = sync_cache(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['fileserver'] = sync_fileserver(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['tops'] = sync_tops(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['tokens'] = sync_eauth_tokens(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['serializers'] = sync_serializers(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['auth'] = sync_auth(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['executors'] = sync_executors(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
return ret
def sync_auth(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'auth', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_modules(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'modules', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_states(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'states', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_grains(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'grains', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_renderers(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'renderers', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_returners(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'returners', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_output(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'output', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_proxymodules(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'proxy', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_runners(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'runners', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_wheel(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'wheel', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_engines(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'engines', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_thorium(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'thorium', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_queues(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'queues', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_pillar(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'pillar', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_utils(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'utils', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_sdb(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'sdb', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_tops(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'tops', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_cache(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'cache', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_fileserver(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'fileserver', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_clouds(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'clouds', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_roster(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'roster', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_eauth_tokens(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'tokens', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_serializers(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'serializers', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_executors(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'executors', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
| true | true |
f73064f7b7a9daf8dccd16ffa7b8b4d1a6869ec6 | 21,364 | py | Python | tensorflow/python/tpu/device_assignment.py | wainshine/tensorflow | dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d | [
"Apache-2.0"
] | 54 | 2017-06-17T14:07:48.000Z | 2022-03-29T02:11:20.000Z | tensorflow/python/tpu/device_assignment.py | wainshine/tensorflow | dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d | [
"Apache-2.0"
] | 19 | 2021-12-28T12:44:55.000Z | 2022-01-13T08:11:28.000Z | tensorflow/python/tpu/device_assignment.py | wainshine/tensorflow | dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d | [
"Apache-2.0"
] | 11 | 2018-04-19T22:36:01.000Z | 2021-08-02T08:44:43.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of TPU helper functions."""
import enum
import math
from typing import List, Optional, Text, Tuple
import numpy as np
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu.topology import Topology
from tensorflow.python.util.tf_export import tf_export
SINGLE_CORE_ASSIGNMENT = [[[0, 0, 0, 0]]]
def _compute_task_and_cores_to_replicas(core_assignment, topology):
"""Computes a nested dict which maps task and logical core to replicas."""
task_and_cores_to_replicas = {}
for replica in range(core_assignment.shape[0]):
for logical_core in range(core_assignment.shape[1]):
coordinates = core_assignment[replica, logical_core, :]
task_id = topology.task_ordinal_at_coordinates(coordinates)
if task_id not in task_and_cores_to_replicas:
task_and_cores_to_replicas[task_id] = {}
if logical_core not in task_and_cores_to_replicas[task_id]:
task_and_cores_to_replicas[task_id][logical_core] = set()
task_and_cores_to_replicas[task_id][logical_core].add(replica)
task_to_sorted_replica_id = {}
for task, core_to_replicas in task_and_cores_to_replicas.items():
core_to_sorted_replicas = {}
for core, replicas in core_to_replicas.items():
core_to_sorted_replicas[core] = sorted(replicas)
task_to_sorted_replica_id[task] = core_to_sorted_replicas
return task_to_sorted_replica_id
@tf_export("tpu.experimental.DeviceAssignment")
class DeviceAssignment(object):
"""Mapping from logical cores in a computation to the physical TPU topology.
Prefer to use the `DeviceAssignment.build()` helper to construct a
`DeviceAssignment`; it is easier if less flexible than constructing a
`DeviceAssignment` directly.
"""
def __init__(self, topology: Topology, core_assignment: np.ndarray):
"""Constructs a `DeviceAssignment` object.
Args:
topology: A `Topology` object that describes the physical TPU topology.
core_assignment: A logical to physical core mapping, represented as a
rank 3 numpy array. See the description of the `core_assignment`
property for more details.
Raises:
ValueError: If `topology` is not `Topology` object.
ValueError: If `core_assignment` is not a rank 3 numpy array.
"""
if not isinstance(topology, Topology):
raise ValueError("topology must be a Topology object, got {}".format(
type(topology)))
core_assignment = np.asarray(core_assignment, dtype=np.int32)
self._topology = topology
if core_assignment.ndim != 3:
raise ValueError("core_assignment must be a rank 3 numpy array, "
f"got shape {core_assignment.shape}")
self._num_replicas = core_assignment.shape[0]
self._num_cores_per_replica = core_assignment.shape[1]
if core_assignment.shape[-1] != topology.mesh_rank:
raise ValueError(
"core_assignment.shape[-1] must have size equal to topology "
f"rank ({topology.mesh_rank}), got "
f"core_assignment.shape={core_assignment.shape}")
self._core_assignment = core_assignment
self._task_and_cores_to_replicas = _compute_task_and_cores_to_replicas(
self._core_assignment, topology)
@property
def topology(self) -> Topology:
"""A `Topology` that describes the TPU topology."""
return self._topology
@property
def num_cores_per_replica(self) -> int:
"""The number of cores per replica."""
return self._num_cores_per_replica
@property
def num_replicas(self) -> int:
"""The number of replicas of the computation."""
return self._num_replicas
@property
def core_assignment(self) -> np.ndarray:
"""The logical to physical core mapping.
Returns:
An integer numpy array of rank 3, with shape
`[num_replicas, num_cores_per_replica, topology_rank]`. Maps
(replica, logical core) pairs to physical topology coordinates.
"""
return self._core_assignment
def coordinates(self, replica: int, logical_core: int) -> Tuple: # pylint:disable=g-bare-generic
"""Returns the physical topology coordinates of a logical core."""
return tuple(self.core_assignment[replica, logical_core, :])
def lookup_replicas(self, task_id: int, logical_core: int) -> List[int]:
"""Lookup replica ids by task number and logical core.
Args:
task_id: TensorFlow task number.
logical_core: An integer, identifying a logical core.
Returns:
A sorted list of the replicas that are attached to that task and
logical_core.
Raises:
ValueError: If no replica exists in the task which contains the logical
core.
"""
try:
return self._task_and_cores_to_replicas[task_id][logical_core]
except KeyError:
raise ValueError(
"Can not find any replica in task: {} contains logical_core: {} ".
format(task_id, logical_core))
def tpu_ordinal(self, replica: int = 0, logical_core: int = 0) -> int:
"""Returns the ordinal of the TPU device assigned to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.tpu_device_ordinal_at_coordinates(coordinates)
def host_device(self,
replica: int = 0,
logical_core: int = 0,
job: Optional[Text] = None) -> Text:
"""Returns the CPU device attached to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.cpu_device_name_at_coordinates(coordinates, job=job)
def tpu_device(self,
replica: int = 0,
logical_core: int = 0,
job: Optional[Text] = None) -> Text:
"""Returns the name of the TPU device assigned to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.tpu_device_name_at_coordinates(coordinates, job=job)
@staticmethod
def build(topology: Topology,
computation_shape: Optional[np.ndarray] = None,
computation_stride: Optional[np.ndarray] = None,
num_replicas: int = 1) -> "DeviceAssignment":
return device_assignment(topology, computation_shape, computation_stride,
num_replicas)
def _open_ring_2d(x_size: int, y_size: int,
z_coord: int) -> List[Tuple[int, int, int]]:
"""Ring-order of a X by Y mesh, with a fixed Z coordinate.
For example, in a 4x4 mesh, this returns the following order.
0 -- 1 -- 2 -- 3
| | | |
15-- 6 -- 5 -- 4
| | | |
14-- 7 -- 8 -- 9
| | | |
13-- 12-- 11-- 10
Note that chip 0 is not included in the output.
Args:
x_size: An integer represents the mesh size in the x-dimension. Must be
larger than 1.
y_size: An integer represents the mesh size in the y-dimension. Must be
larger than 1.
z_coord: An integer represents the z-coordinate to use for the chips in the
ring.
Returns:
A list of (x,y,z) triples in ring order.
"""
ret = []
for i in range(y_size // 2):
for j in range(1, x_size):
ret.append((j, 2 * i, z_coord))
for j in range(x_size - 1, 0, -1):
ret.append((j, 2 * i + 1, z_coord))
for i in range(y_size - 1, 0, -1):
ret.append((0, i, z_coord))
return ret
def _ring_3d(x_size: int, y_size: int,
z_size: int) -> List[Tuple[int, int, int]]:
"""Ring-order of a X by Y by Z mesh.
Constructs the 3d ring from 2d rings that are stacked in the Z dimension and
joined in one corner.
z == 0:
0 -- 1 -- 2 -- 3
| | | |
15 - 6 -- 5 -- 4
| | | |
14 - 7 -- 8 -- 9
| | | |
13 - 12 - 11 - 10
z == 1:
63 - 30 - 29 - 28
| | | |
16 - 25 - 26 - 27
| | | |
17 - 24 - 23 - 22
| | | |
18 - 19 - 20 - 21
z == 2:
62 - 31 - 32 - 33
| | | |
45 - 36 - 35 - 34
| | | |
44 - 37 - 38 - 39
| | | |
43 - 42 - 41 - 40
z == 3:
61 - 60 - 59 - 58
| | | |
46 - 55 - 56 - 57
| | | |
47 - 54 - 53 - 52
| | | |
48 - 49 - 50 - 51
Args:
x_size: An integer represents the mesh size in the x-dimension. Must be
larger than 1.
y_size: An integer represents the mesh size in the y-dimension. Must be
larger than 1.
z_size: An integer represents the mesh size in the z-dimension. Must be
larger than 1. For example, in a 4x4x4 mesh, this returns the following
order.
Returns:
A list of (x,y,z) triples in ring order.
"""
# Handle the case where 2 dimensions are size 1.
if x_size == 1 and y_size == 1:
return [(0, 0, i) for i in range(z_size)]
if x_size == 1 and z_size == 1:
return [(0, i, 0) for i in range(y_size)]
if y_size == 1 and z_size == 1:
return [(i, 0, 0) for i in range(x_size)]
# Handle odd mesh dimensions. This never happens in practice, so we don't
# bother to try building something optimal.
if (x_size > 1 and x_size % 2 != 0) or (y_size > 1 and
y_size % 2 != 0) or (z_size > 1 and
z_size % 2 != 0):
logging.warning("Odd dimension")
ret = []
for z in range(z_size):
for y in range(y_size):
ret.extend((x, y, z) for x in range(x_size))
return ret
# Always start with chip 0.
ret = [(0, 0, 0)]
# Handle the case where one dimension is size 1. We just build a flat, 2d
# ring.
if z_size == 1:
ret.extend(_open_ring_2d(x_size, y_size, 0))
return ret
if y_size == 1:
ret = [(0, 0, 0)]
ret.extend((x, y, z) for (x, z, y) in _open_ring_2d(x_size, z_size, 0))
return ret
if x_size == 1:
ret = [(0, 0, 0)]
ret.extend((x, y, z) for (y, z, x) in _open_ring_2d(y_size, z_size, 0))
return ret
# Handle the case where all dimensions have size > 1 and even.
ret = [(0, 0, 0)]
for i in range(0, z_size):
r = _open_ring_2d(x_size, y_size, i)
if i % 2 == 0:
ret.extend(r)
else:
ret.extend(reversed(r))
for i in range(z_size - 1, 0, -1):
ret.append((0, 0, i))
return ret
class DeviceOrderMode(enum.IntEnum):
"""The way of determining device orders when computing device assignment."""
# By default the mode is set to AUTO, the library will choose to form rings
# when that is possible.
AUTO = 0
# Form rings for replicas and model-parallel cores.
RING = 1
# Form meshes for replicas and/or model-parallel cores.
MESH = 2
def device_assignment(
topology: Topology,
computation_shape: Optional[np.ndarray] = None,
computation_stride: Optional[np.ndarray] = None,
num_replicas: int = 1,
device_order_mode: DeviceOrderMode = DeviceOrderMode.AUTO
) -> DeviceAssignment:
"""Computes a device_assignment of a computation across a TPU topology.
Attempts to choose a compact grid of cores for locality.
Returns a `DeviceAssignment` that describes the cores in the topology assigned
to each core of each replica.
`computation_shape` and `computation_stride` values should be powers of 2 for
optimal packing.
Args:
topology: A `Topology` object that describes the TPU cluster topology. To
obtain a TPU topology, evaluate the `Tensor` returned by
`initialize_system` using `Session.run`. Either a serialized
`TopologyProto` or a `Topology` object may be passed. Note: you must
evaluate the `Tensor` first; you cannot pass an unevaluated `Tensor`
here.
computation_shape: A rank 1 int32 numpy array with size equal to the
topology rank, describing the shape of the computation's block of cores.
If None, the `computation_shape` is `[1] * topology_rank`.
computation_stride: A rank 1 int32 numpy array of size `topology_rank`,
describing the inter-core spacing of the `computation_shape` cores in the
TPU topology. If None, the `computation_stride` is `[1] * topology_rank`.
num_replicas: The number of computation replicas to run. The replicas will
be packed into the free spaces of the topology.
device_order_mode: An enum of `DeviceOrderMode` class which indicates
whether to assign devices to form rings or meshes, or let the library to
choose.
Returns:
A DeviceAssignment object, which describes the mapping between the logical
cores in each computation replica and the physical cores in the TPU
topology.
Raises:
ValueError: If `topology` is not a valid `Topology` object.
ValueError: If `computation_shape` or `computation_stride` are not 1D int32
numpy arrays with shape [3] where all values are positive.
ValueError: If computation's replicas cannot fit into the TPU topology.
"""
# Deserialize the Topology proto, if it is a string.
if isinstance(topology, bytes):
topology = Topology(serialized=topology)
if not isinstance(topology, Topology):
raise ValueError(
f"`topology` is not a Topology object; got {type(topology)}")
topology_rank = len(topology.mesh_shape)
mesh_shape = topology.mesh_shape
if computation_shape is None:
computation_shape = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_shape = np.asarray(computation_shape, dtype=np.int32)
if computation_stride is None:
computation_stride = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_stride = np.asarray(computation_stride, dtype=np.int32)
if computation_shape.shape != (topology_rank,):
raise ValueError(
f"computation_shape must have shape [{topology_rank}]; "
f"got {computation_shape.shape}"
)
if computation_stride.shape != (topology_rank,):
raise ValueError(
f"computation_stride must have shape [{topology_rank}]; "
f"got {computation_stride.shape}"
)
if any(computation_shape < 1):
raise ValueError(
"computation_shape must be positive; got computation_shape={}".format(
computation_shape))
if any(computation_stride < 1):
raise ValueError(
"computation_stride must be positive; got computation_stride={}".format(
computation_stride))
# Computes the physical size of one computation instance.
computation_footprint = computation_shape * computation_stride
if any(computation_footprint > mesh_shape):
raise ValueError(
"computation footprint {} does not fit in TPU topology shape {}".format(
computation_footprint, mesh_shape))
# Computes how many copies of the computation footprint fit in the mesh.
block_counts = mesh_shape // computation_footprint
replica_counts = block_counts * computation_stride
max_replicas = np.prod(replica_counts)
if num_replicas > max_replicas:
raise ValueError(
"requested {} replicas but only {} replicas with shape {} and "
"computation_stride {} fit in a TPU mesh of shape {}".format(
num_replicas, max_replicas, computation_shape, computation_stride,
mesh_shape))
def ceil_of_ratio(n, m):
return (n + m - 1) // m
if topology.missing_devices.size == 0:
replica_shape = [0] * topology_rank
if num_replicas > 0:
remaining_replicas = num_replicas
remaining_dims = topology_rank
# Choose dimensions as close to an equal cube as possible,
# in order of increasing dimension size. By visiting dimensions
# in increasing size, we assign the most constrained dimension
# first, so we won't make infeasible choices.
#
# As a secondary sort order, visit the last dimension (core index) first,
# then the other dimensions in increasing order. This means we try to use
# both cores on the same chip in preference to two cores on different
# chips. We visit the x dimension first, and the z dimension last, so
# that we prefer to arrange adjacent replicas on the same machine when
# possible.
#
# For example, if num_replicas == 4, we prefer to use a replica_shape of
# (2,1,1,2) over (1,1,2,2).
for x, ni in sorted(((x, ((i + 1) % topology_rank))
for (i, x) in enumerate(replica_counts))):
i = (ni + topology_rank - 1) % topology_rank
target_size = int(math.ceil(remaining_replicas**(1.0 / remaining_dims)))
replica_shape[i] = min(target_size, x)
remaining_replicas = ceil_of_ratio(remaining_replicas, replica_shape[i])
remaining_dims -= 1
assert remaining_replicas == 1 and remaining_dims == 0
# Assigns an offset to each replica such that no two replicas overlap.
replica_offsets = np.full([num_replicas, topology_rank], -1, dtype=np.int32)
enable_3d_tiling = (
topology_rank == 4 and
computation_shape[-1] == mesh_shape[-1] # Only handle 3D case.
and np.prod(computation_stride) == 1 # Ensure no stride.
and num_replicas == max_replicas) # Full replication.
if device_order_mode != DeviceOrderMode.AUTO:
if device_order_mode == DeviceOrderMode.RING and not enable_3d_tiling:
raise ValueError(
"device_order_mode=DeviceOrderMode.RING is not compatible with the "
"3D tiling current topology. Try setting "
"device_order_mode=DeviceOrderMode.AUTO"
)
enable_3d_tiling = device_order_mode == DeviceOrderMode.RING
if enable_3d_tiling:
assignment = []
inner_ring = _ring_3d(computation_shape[0], computation_shape[1],
computation_shape[2])
outer_ring = _ring_3d(replica_shape[0], replica_shape[1],
replica_shape[2])
for replica in range(num_replicas):
outer_x, outer_y, outer_z = outer_ring[replica]
per_replica_assignment = []
for index in range(np.prod(computation_shape)):
inner_x, inner_y, inner_z = inner_ring[index // mesh_shape[-1]]
px = outer_x * computation_shape[0] + inner_x
py = outer_y * computation_shape[1] + inner_y
pz = outer_z * computation_shape[2] + inner_z
pi = index % mesh_shape[-1]
per_replica_assignment.append([px, py, pz, pi])
assignment.append(per_replica_assignment)
else:
for replica in range(num_replicas):
# Chooses a replica number in each axis.
t = replica
pos = []
# Visit the core number first.
for dim in np.concatenate([[replica_shape[-1]], replica_shape[:-1]]):
pos.append(t % dim)
t //= dim
replica_pos = np.concatenate([pos[1:], [pos[0]]])
# Determines where that replica starts in each axis.
outer = replica_pos // computation_stride
inner = replica_pos % computation_stride
replica_offsets[replica, :] = outer * computation_footprint + inner
# Computes a logical core -> physical core mapping for each replica.
indices = [
np.arange(0, computation_shape[i] * computation_stride[i],
computation_stride[i]) for i in range(topology_rank)
]
indices = np.concatenate(
[i[..., np.newaxis] for i in np.meshgrid(*indices, indexing="ij")],
axis=-1)
indices = indices.reshape((-1, topology_rank))
assignment = indices + replica_offsets[:, np.newaxis, :]
else:
# We have a slice with missing chips. We define a simple assignment by
# ignoring computation stride. This assignment should enable a consistent
# and correct device assignment on degraded slices. It is optimal when
# weights are not sharded. But this device assignment may be sub-optimal for
# other model parallelism scenarios.
assert np.prod(computation_stride) == 1
# Next, we check if we have sufficient devices.
assert num_replicas * np.prod(
computation_shape) <= topology.num_tasks * topology.num_tpus_per_task
# Map replicas to physical devices in task order.
device_coordinates = topology.device_coordinates
assignment = []
devices_per_replica = np.prod(computation_shape)
for rindex in range(num_replicas):
replica_assignment = []
for index in range(devices_per_replica):
logical_id = rindex * devices_per_replica + index
# Pick logical cores in task order
task = logical_id // topology.num_tpus_per_task
device = logical_id % topology.num_tpus_per_task
# Append physical cores to the replica assignment
replica_assignment.append(device_coordinates[task, device, :])
assignment.append(replica_assignment)
return DeviceAssignment(topology, core_assignment=assignment)
| 38.15 | 99 | 0.662236 |
import enum
import math
from typing import List, Optional, Text, Tuple
import numpy as np
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu.topology import Topology
from tensorflow.python.util.tf_export import tf_export
SINGLE_CORE_ASSIGNMENT = [[[0, 0, 0, 0]]]
def _compute_task_and_cores_to_replicas(core_assignment, topology):
task_and_cores_to_replicas = {}
for replica in range(core_assignment.shape[0]):
for logical_core in range(core_assignment.shape[1]):
coordinates = core_assignment[replica, logical_core, :]
task_id = topology.task_ordinal_at_coordinates(coordinates)
if task_id not in task_and_cores_to_replicas:
task_and_cores_to_replicas[task_id] = {}
if logical_core not in task_and_cores_to_replicas[task_id]:
task_and_cores_to_replicas[task_id][logical_core] = set()
task_and_cores_to_replicas[task_id][logical_core].add(replica)
task_to_sorted_replica_id = {}
for task, core_to_replicas in task_and_cores_to_replicas.items():
core_to_sorted_replicas = {}
for core, replicas in core_to_replicas.items():
core_to_sorted_replicas[core] = sorted(replicas)
task_to_sorted_replica_id[task] = core_to_sorted_replicas
return task_to_sorted_replica_id
@tf_export("tpu.experimental.DeviceAssignment")
class DeviceAssignment(object):
def __init__(self, topology: Topology, core_assignment: np.ndarray):
if not isinstance(topology, Topology):
raise ValueError("topology must be a Topology object, got {}".format(
type(topology)))
core_assignment = np.asarray(core_assignment, dtype=np.int32)
self._topology = topology
if core_assignment.ndim != 3:
raise ValueError("core_assignment must be a rank 3 numpy array, "
f"got shape {core_assignment.shape}")
self._num_replicas = core_assignment.shape[0]
self._num_cores_per_replica = core_assignment.shape[1]
if core_assignment.shape[-1] != topology.mesh_rank:
raise ValueError(
"core_assignment.shape[-1] must have size equal to topology "
f"rank ({topology.mesh_rank}), got "
f"core_assignment.shape={core_assignment.shape}")
self._core_assignment = core_assignment
self._task_and_cores_to_replicas = _compute_task_and_cores_to_replicas(
self._core_assignment, topology)
@property
def topology(self) -> Topology:
return self._topology
@property
def num_cores_per_replica(self) -> int:
return self._num_cores_per_replica
@property
def num_replicas(self) -> int:
return self._num_replicas
@property
def core_assignment(self) -> np.ndarray:
return self._core_assignment
def coordinates(self, replica: int, logical_core: int) -> Tuple:
return tuple(self.core_assignment[replica, logical_core, :])
def lookup_replicas(self, task_id: int, logical_core: int) -> List[int]:
try:
return self._task_and_cores_to_replicas[task_id][logical_core]
except KeyError:
raise ValueError(
"Can not find any replica in task: {} contains logical_core: {} ".
format(task_id, logical_core))
def tpu_ordinal(self, replica: int = 0, logical_core: int = 0) -> int:
coordinates = self.coordinates(replica, logical_core)
return self._topology.tpu_device_ordinal_at_coordinates(coordinates)
def host_device(self,
replica: int = 0,
logical_core: int = 0,
job: Optional[Text] = None) -> Text:
coordinates = self.coordinates(replica, logical_core)
return self._topology.cpu_device_name_at_coordinates(coordinates, job=job)
def tpu_device(self,
replica: int = 0,
logical_core: int = 0,
job: Optional[Text] = None) -> Text:
coordinates = self.coordinates(replica, logical_core)
return self._topology.tpu_device_name_at_coordinates(coordinates, job=job)
@staticmethod
def build(topology: Topology,
computation_shape: Optional[np.ndarray] = None,
computation_stride: Optional[np.ndarray] = None,
num_replicas: int = 1) -> "DeviceAssignment":
return device_assignment(topology, computation_shape, computation_stride,
num_replicas)
def _open_ring_2d(x_size: int, y_size: int,
z_coord: int) -> List[Tuple[int, int, int]]:
ret = []
for i in range(y_size // 2):
for j in range(1, x_size):
ret.append((j, 2 * i, z_coord))
for j in range(x_size - 1, 0, -1):
ret.append((j, 2 * i + 1, z_coord))
for i in range(y_size - 1, 0, -1):
ret.append((0, i, z_coord))
return ret
def _ring_3d(x_size: int, y_size: int,
z_size: int) -> List[Tuple[int, int, int]]:
if x_size == 1 and y_size == 1:
return [(0, 0, i) for i in range(z_size)]
if x_size == 1 and z_size == 1:
return [(0, i, 0) for i in range(y_size)]
if y_size == 1 and z_size == 1:
return [(i, 0, 0) for i in range(x_size)]
# bother to try building something optimal.
if (x_size > 1 and x_size % 2 != 0) or (y_size > 1 and
y_size % 2 != 0) or (z_size > 1 and
z_size % 2 != 0):
logging.warning("Odd dimension")
ret = []
for z in range(z_size):
for y in range(y_size):
ret.extend((x, y, z) for x in range(x_size))
return ret
# Always start with chip 0.
ret = [(0, 0, 0)]
# Handle the case where one dimension is size 1. We just build a flat, 2d
# ring.
if z_size == 1:
ret.extend(_open_ring_2d(x_size, y_size, 0))
return ret
if y_size == 1:
ret = [(0, 0, 0)]
ret.extend((x, y, z) for (x, z, y) in _open_ring_2d(x_size, z_size, 0))
return ret
if x_size == 1:
ret = [(0, 0, 0)]
ret.extend((x, y, z) for (y, z, x) in _open_ring_2d(y_size, z_size, 0))
return ret
# Handle the case where all dimensions have size > 1 and even.
ret = [(0, 0, 0)]
for i in range(0, z_size):
r = _open_ring_2d(x_size, y_size, i)
if i % 2 == 0:
ret.extend(r)
else:
ret.extend(reversed(r))
for i in range(z_size - 1, 0, -1):
ret.append((0, 0, i))
return ret
class DeviceOrderMode(enum.IntEnum):
# By default the mode is set to AUTO, the library will choose to form rings
# when that is possible.
AUTO = 0
# Form rings for replicas and model-parallel cores.
RING = 1
# Form meshes for replicas and/or model-parallel cores.
MESH = 2
def device_assignment(
topology: Topology,
computation_shape: Optional[np.ndarray] = None,
computation_stride: Optional[np.ndarray] = None,
num_replicas: int = 1,
device_order_mode: DeviceOrderMode = DeviceOrderMode.AUTO
) -> DeviceAssignment:
# Deserialize the Topology proto, if it is a string.
if isinstance(topology, bytes):
topology = Topology(serialized=topology)
if not isinstance(topology, Topology):
raise ValueError(
f"`topology` is not a Topology object; got {type(topology)}")
topology_rank = len(topology.mesh_shape)
mesh_shape = topology.mesh_shape
if computation_shape is None:
computation_shape = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_shape = np.asarray(computation_shape, dtype=np.int32)
if computation_stride is None:
computation_stride = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_stride = np.asarray(computation_stride, dtype=np.int32)
if computation_shape.shape != (topology_rank,):
raise ValueError(
f"computation_shape must have shape [{topology_rank}]; "
f"got {computation_shape.shape}"
)
if computation_stride.shape != (topology_rank,):
raise ValueError(
f"computation_stride must have shape [{topology_rank}]; "
f"got {computation_stride.shape}"
)
if any(computation_shape < 1):
raise ValueError(
"computation_shape must be positive; got computation_shape={}".format(
computation_shape))
if any(computation_stride < 1):
raise ValueError(
"computation_stride must be positive; got computation_stride={}".format(
computation_stride))
# Computes the physical size of one computation instance.
computation_footprint = computation_shape * computation_stride
if any(computation_footprint > mesh_shape):
raise ValueError(
"computation footprint {} does not fit in TPU topology shape {}".format(
computation_footprint, mesh_shape))
# Computes how many copies of the computation footprint fit in the mesh.
block_counts = mesh_shape // computation_footprint
replica_counts = block_counts * computation_stride
max_replicas = np.prod(replica_counts)
if num_replicas > max_replicas:
raise ValueError(
"requested {} replicas but only {} replicas with shape {} and "
"computation_stride {} fit in a TPU mesh of shape {}".format(
num_replicas, max_replicas, computation_shape, computation_stride,
mesh_shape))
def ceil_of_ratio(n, m):
return (n + m - 1) // m
if topology.missing_devices.size == 0:
replica_shape = [0] * topology_rank
if num_replicas > 0:
remaining_replicas = num_replicas
remaining_dims = topology_rank
# Choose dimensions as close to an equal cube as possible,
# in order of increasing dimension size. By visiting dimensions
# in increasing size, we assign the most constrained dimension
# first, so we won't make infeasible choices.
for x, ni in sorted(((x, ((i + 1) % topology_rank))
for (i, x) in enumerate(replica_counts))):
i = (ni + topology_rank - 1) % topology_rank
target_size = int(math.ceil(remaining_replicas**(1.0 / remaining_dims)))
replica_shape[i] = min(target_size, x)
remaining_replicas = ceil_of_ratio(remaining_replicas, replica_shape[i])
remaining_dims -= 1
assert remaining_replicas == 1 and remaining_dims == 0
replica_offsets = np.full([num_replicas, topology_rank], -1, dtype=np.int32)
enable_3d_tiling = (
topology_rank == 4 and
computation_shape[-1] == mesh_shape[-1]
and np.prod(computation_stride) == 1
and num_replicas == max_replicas)
if device_order_mode != DeviceOrderMode.AUTO:
if device_order_mode == DeviceOrderMode.RING and not enable_3d_tiling:
raise ValueError(
"device_order_mode=DeviceOrderMode.RING is not compatible with the "
"3D tiling current topology. Try setting "
"device_order_mode=DeviceOrderMode.AUTO"
)
enable_3d_tiling = device_order_mode == DeviceOrderMode.RING
if enable_3d_tiling:
assignment = []
inner_ring = _ring_3d(computation_shape[0], computation_shape[1],
computation_shape[2])
outer_ring = _ring_3d(replica_shape[0], replica_shape[1],
replica_shape[2])
for replica in range(num_replicas):
outer_x, outer_y, outer_z = outer_ring[replica]
per_replica_assignment = []
for index in range(np.prod(computation_shape)):
inner_x, inner_y, inner_z = inner_ring[index // mesh_shape[-1]]
px = outer_x * computation_shape[0] + inner_x
py = outer_y * computation_shape[1] + inner_y
pz = outer_z * computation_shape[2] + inner_z
pi = index % mesh_shape[-1]
per_replica_assignment.append([px, py, pz, pi])
assignment.append(per_replica_assignment)
else:
for replica in range(num_replicas):
t = replica
pos = []
for dim in np.concatenate([[replica_shape[-1]], replica_shape[:-1]]):
pos.append(t % dim)
t //= dim
replica_pos = np.concatenate([pos[1:], [pos[0]]])
outer = replica_pos // computation_stride
inner = replica_pos % computation_stride
replica_offsets[replica, :] = outer * computation_footprint + inner
indices = [
np.arange(0, computation_shape[i] * computation_stride[i],
computation_stride[i]) for i in range(topology_rank)
]
indices = np.concatenate(
[i[..., np.newaxis] for i in np.meshgrid(*indices, indexing="ij")],
axis=-1)
indices = indices.reshape((-1, topology_rank))
assignment = indices + replica_offsets[:, np.newaxis, :]
else:
assert np.prod(computation_stride) == 1
assert num_replicas * np.prod(
computation_shape) <= topology.num_tasks * topology.num_tpus_per_task
device_coordinates = topology.device_coordinates
assignment = []
devices_per_replica = np.prod(computation_shape)
for rindex in range(num_replicas):
replica_assignment = []
for index in range(devices_per_replica):
logical_id = rindex * devices_per_replica + index
task = logical_id // topology.num_tpus_per_task
device = logical_id % topology.num_tpus_per_task
replica_assignment.append(device_coordinates[task, device, :])
assignment.append(replica_assignment)
return DeviceAssignment(topology, core_assignment=assignment)
| true | true |
f730660ea29c7c979e17a2993b7cf5dd2dd52d57 | 7,981 | py | Python | tests/unit/streamalert/alert_processor/outputs/test_output_base.py | Meliairon/streamalert | 3b774a59d260b2822cd156e837781bd34f3625f7 | [
"Apache-2.0"
] | null | null | null | tests/unit/streamalert/alert_processor/outputs/test_output_base.py | Meliairon/streamalert | 3b774a59d260b2822cd156e837781bd34f3625f7 | [
"Apache-2.0"
] | null | null | null | tests/unit/streamalert/alert_processor/outputs/test_output_base.py | Meliairon/streamalert | 3b774a59d260b2822cd156e837781bd34f3625f7 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=abstract-class-instantiated,protected-access,attribute-defined-outside-init
from mock import Mock, patch, MagicMock
from moto import mock_kms, mock_ssm
from nose.tools import (
assert_equal,
assert_is_instance,
assert_is_not_none,
assert_is_none,
assert_count_equal
)
from requests.exceptions import Timeout as ReqTimeout
from streamalert.alert_processor.outputs.output_base import (
OutputDispatcher,
OutputProperty,
OutputRequestFailure,
StreamAlertOutput
)
from streamalert.alert_processor.outputs.aws import S3Output
from tests.unit.streamalert.alert_processor import (
CONFIG,
KMS_ALIAS,
MOCK_ENV,
REGION,
PREFIX
)
from tests.unit.streamalert.alert_processor.helpers import (
put_mock_ssm_parameters
)
def test_output_property_default():
"""OutputProperty defaults"""
prop = OutputProperty()
assert_equal(prop.description, '')
assert_equal(prop.value, '')
assert_equal(prop.input_restrictions, {' ', ':'})
assert_equal(prop.mask_input, False)
assert_equal(prop.cred_requirement, False)
def test_get_dispatcher_good():
"""StreamAlertOutput - Get Valid Dispatcher"""
dispatcher = StreamAlertOutput.get_dispatcher('aws-s3')
assert_is_not_none(dispatcher)
@patch('logging.Logger.error')
def test_get_dispatcher_bad(log_mock):
"""StreamAlertOutput - Get Invalid Dispatcher"""
dispatcher = StreamAlertOutput.get_dispatcher('aws-s4')
assert_is_none(dispatcher)
log_mock.assert_called_with('Designated output service [%s] does not exist', 'aws-s4')
@patch.dict('os.environ', MOCK_ENV)
def test_create_dispatcher():
"""StreamAlertOutput - Create Dispatcher"""
dispatcher = StreamAlertOutput.create_dispatcher('aws-s3', CONFIG)
assert_is_instance(dispatcher, S3Output)
def test_user_defined_properties():
"""OutputDispatcher - User Defined Properties"""
for output in list(StreamAlertOutput.get_all_outputs().values()):
props = output.get_user_defined_properties()
# The user defined properties should at a minimum contain a descriptor
assert_is_not_none(props.get('descriptor'))
def test_output_loading():
"""OutputDispatcher - Loading Output Classes"""
loaded_outputs = set(StreamAlertOutput.get_all_outputs())
# Add new outputs to this list to make sure they're loaded properly
expected_outputs = {
'aws-firehose',
'aws-lambda',
'aws-s3',
'aws-ses',
'aws-sns',
'aws-sqs',
'aws-cloudwatch-log',
'carbonblack',
'demisto',
'github',
'jira',
'komand',
'pagerduty',
'pagerduty-v2',
'pagerduty-incident',
'phantom',
'slack',
'teams'
}
assert_count_equal(loaded_outputs, expected_outputs)
@patch.object(OutputDispatcher, '__service__', 'test_service')
class TestOutputDispatcher:
"""Test class for OutputDispatcher"""
@patch.object(OutputDispatcher, '__service__', 'test_service')
@patch.object(OutputDispatcher, '__abstractmethods__', frozenset())
@patch.dict('os.environ', MOCK_ENV)
def setup(self):
"""Setup before each method"""
self._dispatcher = OutputDispatcher(CONFIG)
self._descriptor = 'desc_test'
@patch.object(OutputDispatcher, '__service__', 'test_service')
@patch.object(OutputDispatcher, '__abstractmethods__', frozenset())
@patch('streamalert.alert_processor.outputs.output_base.OutputCredentialsProvider')
def test_credentials_provider(self, provider_constructor):
"""OutputDispatcher - Constructor"""
provider = MagicMock()
provider_constructor.return_value = provider
_ = OutputDispatcher(CONFIG)
provider_constructor.assert_called_with('test_service',
config=CONFIG, defaults=None, region=REGION)
assert_equal(self._dispatcher._credentials_provider._service_name, 'test_service')
@patch('logging.Logger.info')
def test_log_status_success(self, log_mock):
"""OutputDispatcher - Log status success"""
self._dispatcher._log_status(True, self._descriptor)
log_mock.assert_called_with('Successfully sent alert to %s:%s',
'test_service', self._descriptor)
@patch('logging.Logger.error')
def test_log_status_failed(self, log_mock):
"""OutputDispatcher - Log status failed"""
self._dispatcher._log_status(False, self._descriptor)
log_mock.assert_called_with('Failed to send alert to %s:%s',
'test_service', self._descriptor)
@patch('requests.Response')
def test_check_http_response(self, mock_response):
"""OutputDispatcher - Check HTTP Response"""
# Test with a good response code
mock_response.status_code = 200
result = self._dispatcher._check_http_response(mock_response)
assert_equal(result, True)
# Test with a bad response code
mock_response.status_code = 440
result = self._dispatcher._check_http_response(mock_response)
assert_equal(result, False)
@mock_ssm
@mock_kms
def test_load_creds(self):
"""OutputDispatcher - Load Credentials"""
param_name = '/{}/streamalert/outputs/test_service/desc_test'.format(PREFIX)
creds = {
'url': 'http://www.foo.bar/test',
'token': 'token_to_encrypt'
}
put_mock_ssm_parameters(param_name, creds, KMS_ALIAS, region=REGION)
loaded_creds = self._dispatcher._load_creds(self._descriptor)
assert_is_not_none(loaded_creds)
assert_equal(len(loaded_creds), 2)
assert_equal(loaded_creds['url'], creds['url'])
assert_equal(loaded_creds['token'], creds['token'])
def test_format_output_config(self):
"""OutputDispatcher - Format Output Config"""
with patch.object(OutputDispatcher, '__service__', 'slack'):
props = {'descriptor': OutputProperty('test_desc', 'test_channel')}
formatted = self._dispatcher.format_output_config(CONFIG, props)
assert_equal(len(formatted), 2)
assert_equal(formatted[0], 'unit_test_channel')
assert_equal(formatted[1], 'test_channel')
@patch.object(OutputDispatcher, '_get_exceptions_to_catch', Mock(return_value=(ValueError)))
def test_catch_exceptions_non_default(self):
"""OutputDispatcher - Catch Non Default Exceptions"""
exceptions = self._dispatcher._catch_exceptions()
assert_equal(exceptions, (OutputRequestFailure, ReqTimeout, ValueError))
@patch.object(OutputDispatcher,
'_get_exceptions_to_catch', Mock(return_value=(ValueError, TypeError)))
def test_catch_exceptions_non_default_tuple(self):
"""OutputDispatcher - Catch Non Default Exceptions Tuple"""
exceptions = self._dispatcher._catch_exceptions()
assert_equal(exceptions, (OutputRequestFailure, ReqTimeout, ValueError, TypeError))
@patch.object(OutputDispatcher, '_get_exceptions_to_catch', Mock(return_value=()))
def test_catch_exceptions_default(self):
"""OutputDispatcher - Catch Default Exceptions"""
exceptions = self._dispatcher._catch_exceptions()
assert_equal(exceptions, (OutputRequestFailure, ReqTimeout))
| 36.610092 | 96 | 0.699035 |
from mock import Mock, patch, MagicMock
from moto import mock_kms, mock_ssm
from nose.tools import (
assert_equal,
assert_is_instance,
assert_is_not_none,
assert_is_none,
assert_count_equal
)
from requests.exceptions import Timeout as ReqTimeout
from streamalert.alert_processor.outputs.output_base import (
OutputDispatcher,
OutputProperty,
OutputRequestFailure,
StreamAlertOutput
)
from streamalert.alert_processor.outputs.aws import S3Output
from tests.unit.streamalert.alert_processor import (
CONFIG,
KMS_ALIAS,
MOCK_ENV,
REGION,
PREFIX
)
from tests.unit.streamalert.alert_processor.helpers import (
put_mock_ssm_parameters
)
def test_output_property_default():
prop = OutputProperty()
assert_equal(prop.description, '')
assert_equal(prop.value, '')
assert_equal(prop.input_restrictions, {' ', ':'})
assert_equal(prop.mask_input, False)
assert_equal(prop.cred_requirement, False)
def test_get_dispatcher_good():
dispatcher = StreamAlertOutput.get_dispatcher('aws-s3')
assert_is_not_none(dispatcher)
@patch('logging.Logger.error')
def test_get_dispatcher_bad(log_mock):
dispatcher = StreamAlertOutput.get_dispatcher('aws-s4')
assert_is_none(dispatcher)
log_mock.assert_called_with('Designated output service [%s] does not exist', 'aws-s4')
@patch.dict('os.environ', MOCK_ENV)
def test_create_dispatcher():
dispatcher = StreamAlertOutput.create_dispatcher('aws-s3', CONFIG)
assert_is_instance(dispatcher, S3Output)
def test_user_defined_properties():
for output in list(StreamAlertOutput.get_all_outputs().values()):
props = output.get_user_defined_properties()
assert_is_not_none(props.get('descriptor'))
def test_output_loading():
loaded_outputs = set(StreamAlertOutput.get_all_outputs())
expected_outputs = {
'aws-firehose',
'aws-lambda',
'aws-s3',
'aws-ses',
'aws-sns',
'aws-sqs',
'aws-cloudwatch-log',
'carbonblack',
'demisto',
'github',
'jira',
'komand',
'pagerduty',
'pagerduty-v2',
'pagerduty-incident',
'phantom',
'slack',
'teams'
}
assert_count_equal(loaded_outputs, expected_outputs)
@patch.object(OutputDispatcher, '__service__', 'test_service')
class TestOutputDispatcher:
@patch.object(OutputDispatcher, '__service__', 'test_service')
@patch.object(OutputDispatcher, '__abstractmethods__', frozenset())
@patch.dict('os.environ', MOCK_ENV)
def setup(self):
self._dispatcher = OutputDispatcher(CONFIG)
self._descriptor = 'desc_test'
@patch.object(OutputDispatcher, '__service__', 'test_service')
@patch.object(OutputDispatcher, '__abstractmethods__', frozenset())
@patch('streamalert.alert_processor.outputs.output_base.OutputCredentialsProvider')
def test_credentials_provider(self, provider_constructor):
provider = MagicMock()
provider_constructor.return_value = provider
_ = OutputDispatcher(CONFIG)
provider_constructor.assert_called_with('test_service',
config=CONFIG, defaults=None, region=REGION)
assert_equal(self._dispatcher._credentials_provider._service_name, 'test_service')
@patch('logging.Logger.info')
def test_log_status_success(self, log_mock):
self._dispatcher._log_status(True, self._descriptor)
log_mock.assert_called_with('Successfully sent alert to %s:%s',
'test_service', self._descriptor)
@patch('logging.Logger.error')
def test_log_status_failed(self, log_mock):
self._dispatcher._log_status(False, self._descriptor)
log_mock.assert_called_with('Failed to send alert to %s:%s',
'test_service', self._descriptor)
@patch('requests.Response')
def test_check_http_response(self, mock_response):
# Test with a good response code
mock_response.status_code = 200
result = self._dispatcher._check_http_response(mock_response)
assert_equal(result, True)
# Test with a bad response code
mock_response.status_code = 440
result = self._dispatcher._check_http_response(mock_response)
assert_equal(result, False)
@mock_ssm
@mock_kms
def test_load_creds(self):
param_name = '/{}/streamalert/outputs/test_service/desc_test'.format(PREFIX)
creds = {
'url': 'http://www.foo.bar/test',
'token': 'token_to_encrypt'
}
put_mock_ssm_parameters(param_name, creds, KMS_ALIAS, region=REGION)
loaded_creds = self._dispatcher._load_creds(self._descriptor)
assert_is_not_none(loaded_creds)
assert_equal(len(loaded_creds), 2)
assert_equal(loaded_creds['url'], creds['url'])
assert_equal(loaded_creds['token'], creds['token'])
def test_format_output_config(self):
with patch.object(OutputDispatcher, '__service__', 'slack'):
props = {'descriptor': OutputProperty('test_desc', 'test_channel')}
formatted = self._dispatcher.format_output_config(CONFIG, props)
assert_equal(len(formatted), 2)
assert_equal(formatted[0], 'unit_test_channel')
assert_equal(formatted[1], 'test_channel')
@patch.object(OutputDispatcher, '_get_exceptions_to_catch', Mock(return_value=(ValueError)))
def test_catch_exceptions_non_default(self):
exceptions = self._dispatcher._catch_exceptions()
assert_equal(exceptions, (OutputRequestFailure, ReqTimeout, ValueError))
@patch.object(OutputDispatcher,
'_get_exceptions_to_catch', Mock(return_value=(ValueError, TypeError)))
def test_catch_exceptions_non_default_tuple(self):
exceptions = self._dispatcher._catch_exceptions()
assert_equal(exceptions, (OutputRequestFailure, ReqTimeout, ValueError, TypeError))
@patch.object(OutputDispatcher, '_get_exceptions_to_catch', Mock(return_value=()))
def test_catch_exceptions_default(self):
exceptions = self._dispatcher._catch_exceptions()
assert_equal(exceptions, (OutputRequestFailure, ReqTimeout))
| true | true |
f7306785f709b39146aac848874a28763d800c0c | 20,872 | py | Python | test/functional/test_framework/script.py | bitcoin-money/bitcoinmoney | d208756366292fa7ab16b8f4b9f2dff9b8567bee | [
"MIT"
] | null | null | null | test/functional/test_framework/script.py | bitcoin-money/bitcoinmoney | d208756366292fa7ab16b8f4b9f2dff9b8567bee | [
"MIT"
] | null | null | null | test/functional/test_framework/script.py | bitcoin-money/bitcoinmoney | d208756366292fa7ab16b8f4b9f2dff9b8567bee | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Money developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Functionality to build scripts, as well as SignatureHash().
This file is modified from python-bitcoinlib.
"""
from .mininode import CTransaction, CTxOut, sha256, hash256, uint256_from_str, ser_uint256, ser_string
from binascii import hexlify
import hashlib
import sys
bchr = chr
bord = ord
if sys.version > '3':
long = int
bchr = lambda x: bytes([x])
bord = lambda x: x
import struct
from .bignum import bn2vch
MAX_SCRIPT_ELEMENT_SIZE = 520
OPCODE_NAMES = {}
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
_opcode_instances = []
class CScriptOp(int):
"""A single script opcode"""
__slots__ = []
@staticmethod
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
return b'' + bchr(len(d)) + d # OP_PUSHDATA
elif len(d) <= 0xff:
return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
"""Encode a small integer op, returning an opcode"""
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n-1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1+1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
return _opcode_instances[n]
# Populate opcode instance table
for n in range(0xff+1):
CScriptOp(n)
# push value
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE=OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
# control
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
# stack ops
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
# splice ops
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
# bit logic
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
# numeric
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
# crypto
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
# expansion
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
# template matching params
OP_SMALLINTEGER = CScriptOp(0xfa)
OP_PUBKEYS = CScriptOp(0xfb)
OP_PUBKEYHASH = CScriptOp(0xfd)
OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
OPCODE_NAMES.update({
OP_0 : 'OP_0',
OP_PUSHDATA1 : 'OP_PUSHDATA1',
OP_PUSHDATA2 : 'OP_PUSHDATA2',
OP_PUSHDATA4 : 'OP_PUSHDATA4',
OP_1NEGATE : 'OP_1NEGATE',
OP_RESERVED : 'OP_RESERVED',
OP_1 : 'OP_1',
OP_2 : 'OP_2',
OP_3 : 'OP_3',
OP_4 : 'OP_4',
OP_5 : 'OP_5',
OP_6 : 'OP_6',
OP_7 : 'OP_7',
OP_8 : 'OP_8',
OP_9 : 'OP_9',
OP_10 : 'OP_10',
OP_11 : 'OP_11',
OP_12 : 'OP_12',
OP_13 : 'OP_13',
OP_14 : 'OP_14',
OP_15 : 'OP_15',
OP_16 : 'OP_16',
OP_NOP : 'OP_NOP',
OP_VER : 'OP_VER',
OP_IF : 'OP_IF',
OP_NOTIF : 'OP_NOTIF',
OP_VERIF : 'OP_VERIF',
OP_VERNOTIF : 'OP_VERNOTIF',
OP_ELSE : 'OP_ELSE',
OP_ENDIF : 'OP_ENDIF',
OP_VERIFY : 'OP_VERIFY',
OP_RETURN : 'OP_RETURN',
OP_TOALTSTACK : 'OP_TOALTSTACK',
OP_FROMALTSTACK : 'OP_FROMALTSTACK',
OP_2DROP : 'OP_2DROP',
OP_2DUP : 'OP_2DUP',
OP_3DUP : 'OP_3DUP',
OP_2OVER : 'OP_2OVER',
OP_2ROT : 'OP_2ROT',
OP_2SWAP : 'OP_2SWAP',
OP_IFDUP : 'OP_IFDUP',
OP_DEPTH : 'OP_DEPTH',
OP_DROP : 'OP_DROP',
OP_DUP : 'OP_DUP',
OP_NIP : 'OP_NIP',
OP_OVER : 'OP_OVER',
OP_PICK : 'OP_PICK',
OP_ROLL : 'OP_ROLL',
OP_ROT : 'OP_ROT',
OP_SWAP : 'OP_SWAP',
OP_TUCK : 'OP_TUCK',
OP_CAT : 'OP_CAT',
OP_SUBSTR : 'OP_SUBSTR',
OP_LEFT : 'OP_LEFT',
OP_RIGHT : 'OP_RIGHT',
OP_SIZE : 'OP_SIZE',
OP_INVERT : 'OP_INVERT',
OP_AND : 'OP_AND',
OP_OR : 'OP_OR',
OP_XOR : 'OP_XOR',
OP_EQUAL : 'OP_EQUAL',
OP_EQUALVERIFY : 'OP_EQUALVERIFY',
OP_RESERVED1 : 'OP_RESERVED1',
OP_RESERVED2 : 'OP_RESERVED2',
OP_1ADD : 'OP_1ADD',
OP_1SUB : 'OP_1SUB',
OP_2MUL : 'OP_2MUL',
OP_2DIV : 'OP_2DIV',
OP_NEGATE : 'OP_NEGATE',
OP_ABS : 'OP_ABS',
OP_NOT : 'OP_NOT',
OP_0NOTEQUAL : 'OP_0NOTEQUAL',
OP_ADD : 'OP_ADD',
OP_SUB : 'OP_SUB',
OP_MUL : 'OP_MUL',
OP_DIV : 'OP_DIV',
OP_MOD : 'OP_MOD',
OP_LSHIFT : 'OP_LSHIFT',
OP_RSHIFT : 'OP_RSHIFT',
OP_BOOLAND : 'OP_BOOLAND',
OP_BOOLOR : 'OP_BOOLOR',
OP_NUMEQUAL : 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
OP_LESSTHAN : 'OP_LESSTHAN',
OP_GREATERTHAN : 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
OP_MIN : 'OP_MIN',
OP_MAX : 'OP_MAX',
OP_WITHIN : 'OP_WITHIN',
OP_RIPEMD160 : 'OP_RIPEMD160',
OP_SHA1 : 'OP_SHA1',
OP_SHA256 : 'OP_SHA256',
OP_HASH160 : 'OP_HASH160',
OP_HASH256 : 'OP_HASH256',
OP_CODESEPARATOR : 'OP_CODESEPARATOR',
OP_CHECKSIG : 'OP_CHECKSIG',
OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
OP_NOP1 : 'OP_NOP1',
OP_CHECKLOCKTIMEVERIFY : 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY : 'OP_CHECKSEQUENCEVERIFY',
OP_NOP4 : 'OP_NOP4',
OP_NOP5 : 'OP_NOP5',
OP_NOP6 : 'OP_NOP6',
OP_NOP7 : 'OP_NOP7',
OP_NOP8 : 'OP_NOP8',
OP_NOP9 : 'OP_NOP9',
OP_NOP10 : 'OP_NOP10',
OP_SMALLINTEGER : 'OP_SMALLINTEGER',
OP_PUBKEYS : 'OP_PUBKEYS',
OP_PUBKEYHASH : 'OP_PUBKEYHASH',
OP_PUBKEY : 'OP_PUBKEY',
OP_INVALIDOPCODE : 'OP_INVALIDOPCODE',
})
class CScriptInvalidError(Exception):
"""Base class for CScript exceptions"""
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
"""Invalid pushdata due to truncation"""
def __init__(self, msg, data):
self.data = data
super(CScriptTruncatedPushDataError, self).__init__(msg)
# This is used, eg, for blockchain heights in coinbase scripts (bip34)
class CScriptNum():
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(absvalue & 0xff)
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes(bchr(len(r)) + r)
class CScript(bytes):
"""Serialized script
A bytes subclass, so you can use this directly whenever bytes are accepted.
Note that this means that indexing does *not* work - you'll get an index by
byte rather than opcode. This format was chosen for efficiency so that the
general case would not require creating a lot of little CScriptOP objects.
iter(script) however does iterate by opcode.
"""
@classmethod
def __coerce_instance(cls, other):
# Coerce other into bytes
if isinstance(other, CScriptOp):
other = bchr(other)
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bchr(CScriptOp(OP_0))
else:
other = CScriptNum.encode(other)
elif isinstance(other, int):
if 0 <= other <= 16:
other = bytes(bchr(CScriptOp.encode_op_n(other)))
elif other == -1:
other = bytes(bchr(OP_1NEGATE))
else:
other = CScriptOp.encode_op_pushdata(bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
# Do the coercion outside of the try block so that errors in it are
# noticed.
other = self.__coerce_instance(other)
try:
# bytes.__add__ always returns bytes instances unfortunately
return CScript(super(CScript, self).__add__(other))
except TypeError:
raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
def join(self, iterable):
# join makes no sense for a CScript()
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super(CScript, cls).__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
# Annoyingly on both python2 and python3 bytes.join() always
# returns a bytes instance even when subclassed.
return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
"""Raw iteration
Yields tuples of (opcode, data, sop_idx) so that the different possible
PUSHDATA encodings can be accurately distinguished, as well as
determining the exact opcode byte indexes. (sop_idx)
"""
i = 0
while i < len(self):
sop_idx = i
opcode = bord(self[i])
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = bord(self[i])
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24)
i += 4
else:
assert False # shouldn't happen
data = bytes(self[i:i+datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
"""'Cooked' iteration
Returns either a CScriptOP instance, an integer, or bytes, as
appropriate.
See raw_iter() if you need to distinguish the different possible
PUSHDATA encodings.
"""
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
def _repr(o):
if isinstance(o, bytes):
return "x('%s')" % hexlify(o).decode('ascii')
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
"""Get the SigOp count.
fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
Note that this is consensus-critical.
"""
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
"""Consensus critical, see FindAndDelete() in Satoshi codebase"""
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def SignatureHash(script, txTo, inIdx, hashtype):
"""Consensus-correct SignatureHash
Returns (hash, err) to precisely match the consensus-critical behavior of
the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
"""
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for i in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize_without_witness()
s += struct.pack(b"<I", hashtype)
hash = hash256(s)
return (hash, None)
# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
# Performance optimization probably not necessary for python tests, however.
# Note that this corresponds to sigversion == 1 in EvalScript, which is used
# for version 0 witnesses.
def SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, amount):
hashPrevouts = 0
hashSequence = 0
hashOutputs = 0
if not (hashtype & SIGHASH_ANYONECANPAY):
serialize_prevouts = bytes()
for i in txTo.vin:
serialize_prevouts += i.prevout.serialize()
hashPrevouts = uint256_from_str(hash256(serialize_prevouts))
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = uint256_from_str(hash256(serialize_sequence))
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
serialize_outputs = txTo.vout[inIdx].serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
ss = bytes()
ss += struct.pack("<i", txTo.nVersion)
ss += ser_uint256(hashPrevouts)
ss += ser_uint256(hashSequence)
ss += txTo.vin[inIdx].prevout.serialize()
ss += ser_string(script)
ss += struct.pack("<q", amount)
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
ss += ser_uint256(hashOutputs)
ss += struct.pack("<i", txTo.nLockTime)
ss += struct.pack("<I", hashtype)
return hash256(ss)
| 30.118326 | 146 | 0.613549 |
from .mininode import CTransaction, CTxOut, sha256, hash256, uint256_from_str, ser_uint256, ser_string
from binascii import hexlify
import hashlib
import sys
bchr = chr
bord = ord
if sys.version > '3':
long = int
bchr = lambda x: bytes([x])
bord = lambda x: x
import struct
from .bignum import bn2vch
MAX_SCRIPT_ELEMENT_SIZE = 520
OPCODE_NAMES = {}
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
_opcode_instances = []
class CScriptOp(int):
__slots__ = []
@staticmethod
def encode_op_pushdata(d):
if len(d) < 0x4c:
return b'' + bchr(len(d)) + d
elif len(d) <= 0xff:
return b'\x4c' + bchr(len(d)) + d
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n-1)
def decode_op_n(self):
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1+1)
def is_small_int(self):
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
return _opcode_instances[n]
for n in range(0xff+1):
CScriptOp(n)
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE=OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
OP_SMALLINTEGER = CScriptOp(0xfa)
OP_PUBKEYS = CScriptOp(0xfb)
OP_PUBKEYHASH = CScriptOp(0xfd)
OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
OPCODE_NAMES.update({
OP_0 : 'OP_0',
OP_PUSHDATA1 : 'OP_PUSHDATA1',
OP_PUSHDATA2 : 'OP_PUSHDATA2',
OP_PUSHDATA4 : 'OP_PUSHDATA4',
OP_1NEGATE : 'OP_1NEGATE',
OP_RESERVED : 'OP_RESERVED',
OP_1 : 'OP_1',
OP_2 : 'OP_2',
OP_3 : 'OP_3',
OP_4 : 'OP_4',
OP_5 : 'OP_5',
OP_6 : 'OP_6',
OP_7 : 'OP_7',
OP_8 : 'OP_8',
OP_9 : 'OP_9',
OP_10 : 'OP_10',
OP_11 : 'OP_11',
OP_12 : 'OP_12',
OP_13 : 'OP_13',
OP_14 : 'OP_14',
OP_15 : 'OP_15',
OP_16 : 'OP_16',
OP_NOP : 'OP_NOP',
OP_VER : 'OP_VER',
OP_IF : 'OP_IF',
OP_NOTIF : 'OP_NOTIF',
OP_VERIF : 'OP_VERIF',
OP_VERNOTIF : 'OP_VERNOTIF',
OP_ELSE : 'OP_ELSE',
OP_ENDIF : 'OP_ENDIF',
OP_VERIFY : 'OP_VERIFY',
OP_RETURN : 'OP_RETURN',
OP_TOALTSTACK : 'OP_TOALTSTACK',
OP_FROMALTSTACK : 'OP_FROMALTSTACK',
OP_2DROP : 'OP_2DROP',
OP_2DUP : 'OP_2DUP',
OP_3DUP : 'OP_3DUP',
OP_2OVER : 'OP_2OVER',
OP_2ROT : 'OP_2ROT',
OP_2SWAP : 'OP_2SWAP',
OP_IFDUP : 'OP_IFDUP',
OP_DEPTH : 'OP_DEPTH',
OP_DROP : 'OP_DROP',
OP_DUP : 'OP_DUP',
OP_NIP : 'OP_NIP',
OP_OVER : 'OP_OVER',
OP_PICK : 'OP_PICK',
OP_ROLL : 'OP_ROLL',
OP_ROT : 'OP_ROT',
OP_SWAP : 'OP_SWAP',
OP_TUCK : 'OP_TUCK',
OP_CAT : 'OP_CAT',
OP_SUBSTR : 'OP_SUBSTR',
OP_LEFT : 'OP_LEFT',
OP_RIGHT : 'OP_RIGHT',
OP_SIZE : 'OP_SIZE',
OP_INVERT : 'OP_INVERT',
OP_AND : 'OP_AND',
OP_OR : 'OP_OR',
OP_XOR : 'OP_XOR',
OP_EQUAL : 'OP_EQUAL',
OP_EQUALVERIFY : 'OP_EQUALVERIFY',
OP_RESERVED1 : 'OP_RESERVED1',
OP_RESERVED2 : 'OP_RESERVED2',
OP_1ADD : 'OP_1ADD',
OP_1SUB : 'OP_1SUB',
OP_2MUL : 'OP_2MUL',
OP_2DIV : 'OP_2DIV',
OP_NEGATE : 'OP_NEGATE',
OP_ABS : 'OP_ABS',
OP_NOT : 'OP_NOT',
OP_0NOTEQUAL : 'OP_0NOTEQUAL',
OP_ADD : 'OP_ADD',
OP_SUB : 'OP_SUB',
OP_MUL : 'OP_MUL',
OP_DIV : 'OP_DIV',
OP_MOD : 'OP_MOD',
OP_LSHIFT : 'OP_LSHIFT',
OP_RSHIFT : 'OP_RSHIFT',
OP_BOOLAND : 'OP_BOOLAND',
OP_BOOLOR : 'OP_BOOLOR',
OP_NUMEQUAL : 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
OP_LESSTHAN : 'OP_LESSTHAN',
OP_GREATERTHAN : 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
OP_MIN : 'OP_MIN',
OP_MAX : 'OP_MAX',
OP_WITHIN : 'OP_WITHIN',
OP_RIPEMD160 : 'OP_RIPEMD160',
OP_SHA1 : 'OP_SHA1',
OP_SHA256 : 'OP_SHA256',
OP_HASH160 : 'OP_HASH160',
OP_HASH256 : 'OP_HASH256',
OP_CODESEPARATOR : 'OP_CODESEPARATOR',
OP_CHECKSIG : 'OP_CHECKSIG',
OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
OP_NOP1 : 'OP_NOP1',
OP_CHECKLOCKTIMEVERIFY : 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY : 'OP_CHECKSEQUENCEVERIFY',
OP_NOP4 : 'OP_NOP4',
OP_NOP5 : 'OP_NOP5',
OP_NOP6 : 'OP_NOP6',
OP_NOP7 : 'OP_NOP7',
OP_NOP8 : 'OP_NOP8',
OP_NOP9 : 'OP_NOP9',
OP_NOP10 : 'OP_NOP10',
OP_SMALLINTEGER : 'OP_SMALLINTEGER',
OP_PUBKEYS : 'OP_PUBKEYS',
OP_PUBKEYHASH : 'OP_PUBKEYHASH',
OP_PUBKEY : 'OP_PUBKEY',
OP_INVALIDOPCODE : 'OP_INVALIDOPCODE',
})
class CScriptInvalidError(Exception):
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
def __init__(self, msg, data):
self.data = data
super(CScriptTruncatedPushDataError, self).__init__(msg)
class CScriptNum():
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(absvalue & 0xff)
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes(bchr(len(r)) + r)
class CScript(bytes):
@classmethod
def __coerce_instance(cls, other):
if isinstance(other, CScriptOp):
other = bchr(other)
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bchr(CScriptOp(OP_0))
else:
other = CScriptNum.encode(other)
elif isinstance(other, int):
if 0 <= other <= 16:
other = bytes(bchr(CScriptOp.encode_op_n(other)))
elif other == -1:
other = bytes(bchr(OP_1NEGATE))
else:
other = CScriptOp.encode_op_pushdata(bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
other = self.__coerce_instance(other)
try:
return CScript(super(CScript, self).__add__(other))
except TypeError:
raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
def join(self, iterable):
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super(CScript, cls).__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
i = 0
while i < len(self):
sop_idx = i
opcode = bord(self[i])
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = bord(self[i])
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24)
i += 4
else:
assert False
data = bytes(self[i:i+datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
def _repr(o):
if isinstance(o, bytes):
return "x('%s')" % hexlify(o).decode('ascii')
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def SignatureHash(script, txTo, inIdx, hashtype):
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for i in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize_without_witness()
s += struct.pack(b"<I", hashtype)
hash = hash256(s)
return (hash, None)
# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
# Performance optimization probably not necessary for python tests, however.
# Note that this corresponds to sigversion == 1 in EvalScript, which is used
# for version 0 witnesses.
def SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, amount):
hashPrevouts = 0
hashSequence = 0
hashOutputs = 0
if not (hashtype & SIGHASH_ANYONECANPAY):
serialize_prevouts = bytes()
for i in txTo.vin:
serialize_prevouts += i.prevout.serialize()
hashPrevouts = uint256_from_str(hash256(serialize_prevouts))
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = uint256_from_str(hash256(serialize_sequence))
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
serialize_outputs = txTo.vout[inIdx].serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
ss = bytes()
ss += struct.pack("<i", txTo.nVersion)
ss += ser_uint256(hashPrevouts)
ss += ser_uint256(hashSequence)
ss += txTo.vin[inIdx].prevout.serialize()
ss += ser_string(script)
ss += struct.pack("<q", amount)
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
ss += ser_uint256(hashOutputs)
ss += struct.pack("<i", txTo.nLockTime)
ss += struct.pack("<I", hashtype)
return hash256(ss)
| true | true |
f730697874d74284e3f36d2b3355caa3e355cf54 | 2,770 | py | Python | mwparserfromhell/nodes/wikilink.py | valhallasw/mwparserfromhell | 1607687c37c1b1e7c0c83a39d7803707665151ef | [
"MIT"
] | null | null | null | mwparserfromhell/nodes/wikilink.py | valhallasw/mwparserfromhell | 1607687c37c1b1e7c0c83a39d7803707665151ef | [
"MIT"
] | null | null | null | mwparserfromhell/nodes/wikilink.py | valhallasw/mwparserfromhell | 1607687c37c1b1e7c0c83a39d7803707665151ef | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals
from . import Node
from ..compat import str
from ..utils import parse_anything
__all__ = ["Wikilink"]
class Wikilink(Node):
"""Represents an internal wikilink, like ``[[Foo|Bar]]``."""
def __init__(self, title, text=None):
super(Wikilink, self).__init__()
self._title = title
self._text = text
def __unicode__(self):
if self.text is not None:
return "[[" + str(self.title) + "|" + str(self.text) + "]]"
return "[[" + str(self.title) + "]]"
def __children__(self):
yield self.title
if self.text is not None:
yield self.text
def __strip__(self, normalize, collapse):
if self.text is not None:
return self.text.strip_code(normalize, collapse)
return self.title.strip_code(normalize, collapse)
def __showtree__(self, write, get, mark):
write("[[")
get(self.title)
if self.text is not None:
write(" | ")
mark()
get(self.text)
write("]]")
@property
def title(self):
"""The title of the linked page, as a :py:class:`~.Wikicode` object."""
return self._title
@property
def text(self):
"""The text to display (if any), as a :py:class:`~.Wikicode` object."""
return self._text
@title.setter
def title(self, value):
self._title = parse_anything(value)
@text.setter
def text(self, value):
if value is None:
self._text = None
else:
self._text = parse_anything(value)
| 33.373494 | 79 | 0.654152 |
from __future__ import unicode_literals
from . import Node
from ..compat import str
from ..utils import parse_anything
__all__ = ["Wikilink"]
class Wikilink(Node):
def __init__(self, title, text=None):
super(Wikilink, self).__init__()
self._title = title
self._text = text
def __unicode__(self):
if self.text is not None:
return "[[" + str(self.title) + "|" + str(self.text) + "]]"
return "[[" + str(self.title) + "]]"
def __children__(self):
yield self.title
if self.text is not None:
yield self.text
def __strip__(self, normalize, collapse):
if self.text is not None:
return self.text.strip_code(normalize, collapse)
return self.title.strip_code(normalize, collapse)
def __showtree__(self, write, get, mark):
write("[[")
get(self.title)
if self.text is not None:
write(" | ")
mark()
get(self.text)
write("]]")
@property
def title(self):
return self._title
@property
def text(self):
return self._text
@title.setter
def title(self, value):
self._title = parse_anything(value)
@text.setter
def text(self, value):
if value is None:
self._text = None
else:
self._text = parse_anything(value)
| true | true |
f7306a7014d8a082178a163194f35de56aafd413 | 1,195 | py | Python | addons/log2report_csv.py | geoffrey0822/multilevellabel_NN | faec3303dac2376d6e8a761632aca31bc3868413 | [
"Apache-2.0"
] | null | null | null | addons/log2report_csv.py | geoffrey0822/multilevellabel_NN | faec3303dac2376d6e8a761632aca31bc3868413 | [
"Apache-2.0"
] | null | null | null | addons/log2report_csv.py | geoffrey0822/multilevellabel_NN | faec3303dac2376d6e8a761632aca31bc3868413 | [
"Apache-2.0"
] | null | null | null | import os,sys
import numpy as np
import csv
input_file=sys.argv[1]
output_file=sys.argv[2]
header='I0315'
if len(sys.argv)>3:
header=sys.argv[3]
keys=[]
epoch_data=[]
epoch_row=[]
with open(input_file,'r') as f:
for ln in f:
line=ln.rstrip('\n')
if line.startswith(header) and 'Test net output' in line:
fields=line.split(':')
spaces=fields[len(fields)-2].split(' ')
num=int(spaces[len(spaces)-1][1:])
attr=fields[len(fields)-1]
if num==0 and epoch_row!=[]:
epoch_data.append(epoch_row)
epoch_row=[]
key=''
if 'loss' in attr:
key=attr.split('(')[0].split('=')[0]
loss=float(attr.split('(')[0].split('=')[1])
epoch_row.append(loss)
else:
key=attr.split('=')[0]
epoch_row.append(float(attr.split('=')[1]))
if len(epoch_data)==0 or epoch_data==[]:
keys.append(key)
print keys
with open(output_file,'wb')as f:
csv_writer=csv.writer(f,delimiter=',')
csv_writer.writerow(keys)
csv_writer.writerows(epoch_data)
print 'finished' | 29.146341 | 65 | 0.543096 | import os,sys
import numpy as np
import csv
input_file=sys.argv[1]
output_file=sys.argv[2]
header='I0315'
if len(sys.argv)>3:
header=sys.argv[3]
keys=[]
epoch_data=[]
epoch_row=[]
with open(input_file,'r') as f:
for ln in f:
line=ln.rstrip('\n')
if line.startswith(header) and 'Test net output' in line:
fields=line.split(':')
spaces=fields[len(fields)-2].split(' ')
num=int(spaces[len(spaces)-1][1:])
attr=fields[len(fields)-1]
if num==0 and epoch_row!=[]:
epoch_data.append(epoch_row)
epoch_row=[]
key=''
if 'loss' in attr:
key=attr.split('(')[0].split('=')[0]
loss=float(attr.split('(')[0].split('=')[1])
epoch_row.append(loss)
else:
key=attr.split('=')[0]
epoch_row.append(float(attr.split('=')[1]))
if len(epoch_data)==0 or epoch_data==[]:
keys.append(key)
print keys
with open(output_file,'wb')as f:
csv_writer=csv.writer(f,delimiter=',')
csv_writer.writerow(keys)
csv_writer.writerows(epoch_data)
print 'finished' | false | true |
f7306a936639c0c25548020fae55cc2780443200 | 935 | py | Python | wus_feats_predict.py | FloodCamML/FloodCam-WUSFeats | b58c3f3770bebebf078be4e3804f12f3512b5569 | [
"MIT"
] | 2 | 2021-05-21T10:57:44.000Z | 2021-05-29T17:02:54.000Z | wus_feats_predict.py | FloodCamML/FloodCam-WUSFeats | b58c3f3770bebebf078be4e3804f12f3512b5569 | [
"MIT"
] | null | null | null | wus_feats_predict.py | FloodCamML/FloodCam-WUSFeats | b58c3f3770bebebf078be4e3804f12f3512b5569 | [
"MIT"
] | 1 | 2021-05-28T12:45:38.000Z | 2021-05-28T12:45:38.000Z |
#i/o
import requests, os, random
from glob import glob
from collections import Counter
from collections import defaultdict
from PIL import Image
from skimage.io import imread
import pickle
#numerica
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow import keras
from tensorflow.keras import layers
from skimage.transform import resize
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA #for data dimensionality reduction / viz.
# plots
# from sklearn.metrics import ConfusionMatrixDisplay
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns #extended functionality / style to matplotlib plots
from matplotlib.offsetbox import OffsetImage, AnnotationBbox #for visualizing image thumbnails plotted as markers | 29.21875 | 113 | 0.840642 |
import requests, os, random
from glob import glob
from collections import Counter
from collections import defaultdict
from PIL import Image
from skimage.io import imread
import pickle
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow import keras
from tensorflow.keras import layers
from skimage.transform import resize
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.offsetbox import OffsetImage, AnnotationBbox | true | true |
f7306be7655fa4665aa16310d89615d9df796e9f | 207 | py | Python | leetcode/python/others/hamming_distance.py | ajeet1308/code_problems | 5d99839b6319295c6d81dd86775c46a536e7a1ca | [
"MIT"
] | 61 | 2020-09-26T19:57:44.000Z | 2022-03-09T18:51:44.000Z | leetcode/python/others/hamming_distance.py | ajeet1308/code_problems | 5d99839b6319295c6d81dd86775c46a536e7a1ca | [
"MIT"
] | 88 | 2020-09-19T20:00:27.000Z | 2021-10-31T09:41:57.000Z | leetcode/python/others/hamming_distance.py | ajeet1308/code_problems | 5d99839b6319295c6d81dd86775c46a536e7a1ca | [
"MIT"
] | 218 | 2020-09-20T08:18:03.000Z | 2022-01-30T23:13:16.000Z | class Solution:
def hammingDistance(self, x: int, y: int) -> int:
count = 0
diff = x ^ y
while diff != 0:
count += 1
diff &= (diff-1)
return count
| 23 | 53 | 0.454106 | class Solution:
def hammingDistance(self, x: int, y: int) -> int:
count = 0
diff = x ^ y
while diff != 0:
count += 1
diff &= (diff-1)
return count
| true | true |
f7306cae98bbc792ae9e49ea47e2232347c28149 | 3,218 | py | Python | homeassistant/util/logging.py | GotoCode/home-assistant | 7e39a5c4d50cf5754f5f32a84870ca57a5778b02 | [
"Apache-2.0"
] | 11 | 2017-09-25T13:11:33.000Z | 2020-05-16T21:54:28.000Z | homeassistant/util/logging.py | GotoCode/home-assistant | 7e39a5c4d50cf5754f5f32a84870ca57a5778b02 | [
"Apache-2.0"
] | 125 | 2018-12-11T07:31:20.000Z | 2021-07-27T08:20:03.000Z | homeassistant/util/logging.py | y1ngyang/home-assistant | 7e39a5c4d50cf5754f5f32a84870ca57a5778b02 | [
"Apache-2.0"
] | 3 | 2018-05-22T18:52:01.000Z | 2019-07-18T21:30:45.000Z | """Logging utilities."""
import asyncio
import logging
import threading
from .async_ import run_coroutine_threadsafe
class HideSensitiveDataFilter(logging.Filter):
"""Filter API password calls."""
def __init__(self, text):
"""Initialize sensitive data filter."""
super().__init__()
self.text = text
def filter(self, record):
"""Hide sensitive data in messages."""
record.msg = record.msg.replace(self.text, '*******')
return True
# pylint: disable=invalid-name
class AsyncHandler(object):
"""Logging handler wrapper to add an async layer."""
def __init__(self, loop, handler):
"""Initialize async logging handler wrapper."""
self.handler = handler
self.loop = loop
self._queue = asyncio.Queue(loop=loop)
self._thread = threading.Thread(target=self._process)
# Delegate from handler
self.setLevel = handler.setLevel
self.setFormatter = handler.setFormatter
self.addFilter = handler.addFilter
self.removeFilter = handler.removeFilter
self.filter = handler.filter
self.flush = handler.flush
self.handle = handler.handle
self.handleError = handler.handleError
self.format = handler.format
self._thread.start()
def close(self):
"""Wrap close to handler."""
self.emit(None)
@asyncio.coroutine
def async_close(self, blocking=False):
"""Close the handler.
When blocking=True, will wait till closed.
"""
yield from self._queue.put(None)
if blocking:
while self._thread.is_alive():
yield from asyncio.sleep(0, loop=self.loop)
def emit(self, record):
"""Process a record."""
ident = self.loop.__dict__.get("_thread_ident")
# inside eventloop
if ident is not None and ident == threading.get_ident():
self._queue.put_nowait(record)
# from a thread/executor
else:
self.loop.call_soon_threadsafe(self._queue.put_nowait, record)
def __repr__(self):
"""Return the string names."""
return str(self.handler)
def _process(self):
"""Process log in a thread."""
while True:
record = run_coroutine_threadsafe(
self._queue.get(), self.loop).result()
if record is None:
self.handler.close()
return
self.handler.emit(record)
def createLock(self):
"""Ignore lock stuff."""
pass
def acquire(self):
"""Ignore lock stuff."""
pass
def release(self):
"""Ignore lock stuff."""
pass
@property
def level(self):
"""Wrap property level to handler."""
return self.handler.level
@property
def formatter(self):
"""Wrap property formatter to handler."""
return self.handler.formatter
@property
def name(self):
"""Wrap property set_name to handler."""
return self.handler.get_name()
@name.setter
def name(self, name):
"""Wrap property get_name to handler."""
self.handler.name = name
| 26.377049 | 74 | 0.598198 | import asyncio
import logging
import threading
from .async_ import run_coroutine_threadsafe
class HideSensitiveDataFilter(logging.Filter):
def __init__(self, text):
super().__init__()
self.text = text
def filter(self, record):
record.msg = record.msg.replace(self.text, '*******')
return True
class AsyncHandler(object):
def __init__(self, loop, handler):
self.handler = handler
self.loop = loop
self._queue = asyncio.Queue(loop=loop)
self._thread = threading.Thread(target=self._process)
self.setLevel = handler.setLevel
self.setFormatter = handler.setFormatter
self.addFilter = handler.addFilter
self.removeFilter = handler.removeFilter
self.filter = handler.filter
self.flush = handler.flush
self.handle = handler.handle
self.handleError = handler.handleError
self.format = handler.format
self._thread.start()
def close(self):
self.emit(None)
@asyncio.coroutine
def async_close(self, blocking=False):
yield from self._queue.put(None)
if blocking:
while self._thread.is_alive():
yield from asyncio.sleep(0, loop=self.loop)
def emit(self, record):
ident = self.loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
self._queue.put_nowait(record)
else:
self.loop.call_soon_threadsafe(self._queue.put_nowait, record)
def __repr__(self):
return str(self.handler)
def _process(self):
while True:
record = run_coroutine_threadsafe(
self._queue.get(), self.loop).result()
if record is None:
self.handler.close()
return
self.handler.emit(record)
def createLock(self):
pass
def acquire(self):
pass
def release(self):
pass
@property
def level(self):
return self.handler.level
@property
def formatter(self):
return self.handler.formatter
@property
def name(self):
return self.handler.get_name()
@name.setter
def name(self, name):
self.handler.name = name
| true | true |
f7306e7cc53d34293df4261830687777126b1853 | 107,459 | py | Python | pysnmp-with-texts/HUAWEI-CLOCK-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/HUAWEI-CLOCK-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/HUAWEI-CLOCK-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module HUAWEI-CLOCK-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-CLOCK-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:43:52 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint")
PhysicalIndex, = mibBuilder.importSymbols("ENTITY-MIB", "PhysicalIndex")
hwDatacomm, = mibBuilder.importSymbols("HUAWEI-MIB", "hwDatacomm")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, MibIdentifier, Unsigned32, Gauge32, IpAddress, ObjectIdentity, NotificationType, ModuleIdentity, Counter32, Counter64, Integer32, Bits, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "MibIdentifier", "Unsigned32", "Gauge32", "IpAddress", "ObjectIdentity", "NotificationType", "ModuleIdentity", "Counter32", "Counter64", "Integer32", "Bits", "iso")
DisplayString, TextualConvention, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "RowStatus")
hwClockMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186))
hwClockMIB.setRevisions(('2014-11-29 00:00', '2014-11-03 00:00', '2014-08-13 00:00', '2014-04-21 00:00', '2014-01-07 00:00', '2013-11-12 00:00', '2013-10-31 00:00', '2013-05-23 00:00', '2013-05-14 00:00', '2013-03-20 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hwClockMIB.setRevisionsDescriptions(('Modify alarm hwClockSourceInputBelowThreshold, hwClockSourceInputBelowThresholdResume.', 'Add alarm hwClockCesDcrMasterPwChange, hwClockCesDcrLockFail,hwClockCesDcrLockFailResume,hwClockSsmPktLos,hwClockSsmPktLosResume and add mib hwClockCesDcrSlot,hwClockCesDcrCard,hwClockCesDcrDomain,hwClockCesDcrOldMasterPwName,hwClockCesDcrNewMasterPwName,hwClockCesDcrLockState,hwClockCesMode', 'Add alarm hwClockSourceInputBelowThreshold, hwClockSourceInputBelowThresholdResume.', 'Add alarm hwClockClusterTopoFail, hwClockClusterTopoFailResume and table hwClockClusterTopoTable.', 'Edit the range of hwClockCesAcrDomianInfoDomain.', 'Add mib hwClockBitsCfgFrameFormat, hwClockAttributeLtiSquelch and hwClockAttributeInputThreshold.', 'Edit the range of hwClockCesAcrRecoveryDomain.', 'Re-edit the range of some nodes.', 'Re-edit the default values of hwClockAttributeTodProtocol node.', 'Some errors have been modified in current version and some nodes have been added into the current version.',))
if mibBuilder.loadTexts: hwClockMIB.setLastUpdated('201411290000Z')
if mibBuilder.loadTexts: hwClockMIB.setOrganization('Huawei Technologies Co.,Ltd. ')
if mibBuilder.loadTexts: hwClockMIB.setContactInfo("Huawei Industrial Base Bantian, Longgang Shenzhen 518129 People's Republic of China Website: http://www.huawei.com Email: support@huawei.com ")
if mibBuilder.loadTexts: hwClockMIB.setDescription('The MIB contains objects of module clock management and 1588 interface.')
hwClockManageObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1))
hwClockGlobalObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1))
hwClockSourceEthClkEnable = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 1), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceEthClkEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceEthClkEnable.setDescription('The flag indicates that the ethernet clock is globally enabled.')
hwClockSourceSsmUnknown = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15))).clone('dnu')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceSsmUnknown.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSsmUnknown.setDescription('The quality level of unknown SSM.')
hwClockSourceSysClkWorkMode = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("trace", 1), ("hold", 2), ("freeoscillate", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSourceSysClkWorkMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSysClkWorkMode.setDescription('The work mode of system clock.')
hwClockSourceForceCloseEnableStatus = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 4), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceForceCloseEnableStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceForceCloseEnableStatus.setDescription('The enable status of export forced close.')
hwClockSourceSsmControl = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("extend", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceSsmControl.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSsmControl.setDescription('The flag whether SSM is concerned with the clock source selection.')
hwClockSourceHoldMode = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("hold24Hours", 1), ("holdForever", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSourceHoldMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceHoldMode.setDescription('The hold mode of clock source.')
hwClockSourceFreqCheckEnable = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 7), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceFreqCheckEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheckEnable.setDescription('The enable flag of frequency check.')
hwClockSourceFreqCheckLeftRange = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(50, 1000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceFreqCheckLeftRange.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheckLeftRange.setDescription('The left range of frequency check, unit in 0.01ppm.')
hwClockSourceFreqCheckRightRange = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(50, 1000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceFreqCheckRightRange.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheckRightRange.setDescription('The right range of frequency check, unit in 0.01ppm.')
hwClockSourceRetrieveMode = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("retrieve", 1), ("noRetrieve", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceRetrieveMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceRetrieveMode.setDescription('The retrieve mode of clock source.')
hwClockTimeUsedSource = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("srcDclsTimeBit0", 1), ("srcDclsTimeBit1", 2), ("src1ppsTodBit0", 3), ("src1ppsTodBit1", 4), ("srcPtp", 5), ("srcFreeRun", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockTimeUsedSource.setStatus('current')
if mibBuilder.loadTexts: hwClockTimeUsedSource.setDescription('The clock time used source.')
hwClockExtTimeInputType = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("typeDclsTime", 1), ("type1ppsTodRs232", 2), ("type1ppsTodGps", 3), ("typeNone", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockExtTimeInputType.setStatus('current')
if mibBuilder.loadTexts: hwClockExtTimeInputType.setDescription('The input time type of clock extern time.')
hwClockExtTimeOutputType = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("typeDclsTime", 1), ("type1ppsTodRs232", 2), ("type1ppsTodGps", 3), ("typeNone", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockExtTimeOutputType.setStatus('current')
if mibBuilder.loadTexts: hwClockExtTimeOutputType.setDescription('The output time type of clock extern time.')
hwClockAlarmThresholdFrequencyOffset = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 92)).clone(92)).setUnits('100ppb').setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAlarmThresholdFrequencyOffset.setStatus('current')
if mibBuilder.loadTexts: hwClockAlarmThresholdFrequencyOffset.setDescription('The Threshold of clock alarm.')
hwClockFrequencyOffsetMax = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 15), Integer32()).setUnits('ppb').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockFrequencyOffsetMax.setStatus('current')
if mibBuilder.loadTexts: hwClockFrequencyOffsetMax.setDescription('The max offset of clock frequency.')
hwClockFrequencyOffsetMin = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 16), Integer32()).setUnits('ppb').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockFrequencyOffsetMin.setStatus('current')
if mibBuilder.loadTexts: hwClockFrequencyOffsetMin.setDescription('The min offset of clock frequency.')
hwClockFrequencyOffsetMean = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 17), Integer32()).setUnits('ppb').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockFrequencyOffsetMean.setStatus('current')
if mibBuilder.loadTexts: hwClockFrequencyOffsetMean.setDescription('The mean offset of clock frequency.')
hwClockFrequencyOffset = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 18), Integer32()).setUnits('ppb').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockFrequencyOffset.setStatus('current')
if mibBuilder.loadTexts: hwClockFrequencyOffset.setDescription('The current offset of clock frequency.')
hwClockSourceSelTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2), )
if mibBuilder.loadTexts: hwClockSourceSelTable.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelTable.setDescription('The system clock source selection table.')
hwClockSourceSelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockSourceSelChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockSourceSelType"))
if mibBuilder.loadTexts: hwClockSourceSelEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelEntry.setDescription('The entry of system clock source selection table.')
hwClockSourceSelChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockSourceSelChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelChassisIndex.setDescription('The chassis index.')
hwClockSourceSelType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100)))
if mibBuilder.loadTexts: hwClockSourceSelType.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelType.setDescription('The select type.')
hwClockSourceSelMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("auto", 1), ("manual", 2), ("force", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSourceSelMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelMode.setDescription('The mode of clock source selection.')
hwClockSourceSelSourceId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1, 4), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSourceSelSourceId.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelSourceId.setDescription('The source ID of the clock traced.')
hwClockSourceCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3), )
if mibBuilder.loadTexts: hwClockSourceCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceCfgTable.setDescription('The clock source config table.')
hwClockSourceCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockCfgChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockCfgSourceIndex"))
if mibBuilder.loadTexts: hwClockSourceCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceCfgEntry.setDescription('The entry of clock source config table.')
hwClockCfgChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockCfgChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgChassisIndex.setDescription('The index of the chassis whitch the clock source belongs to.')
hwClockCfgSourceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 20)))
if mibBuilder.loadTexts: hwClockCfgSourceIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceIndex.setDescription('The clock source index.')
hwClockCfgSourceId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCfgSourceId.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceId.setDescription('The clock source ID.')
hwClockCfgSourceDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCfgSourceDescr.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceDescr.setDescription('The clock source description.')
hwClockCfgWtrTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 12))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgWtrTime.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgWtrTime.setDescription('The waiting for restore time of clock source.')
hwClockCfgBadDetect = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 6), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBadDetect.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBadDetect.setDescription('The enable status of clock source bad detecting.')
hwClockCfgSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSystemPriority.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSystemPriority.setDescription('The priority of system clock source.')
hwClockCfgBits0Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBits0Priority.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBits0Priority.setDescription('The priority of BITS0 clock source.')
hwClockCfgBits1Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBits1Priority.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBits1Priority.setDescription('The priority of BITS1 clock source.')
hwClockCfgSystemLockOut = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 10), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSystemLockOut.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSystemLockOut.setDescription('The lock out of system clock source.')
hwClockCfgBits0LockOut = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 11), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBits0LockOut.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBits0LockOut.setDescription('The lock out of BITS0 clock source.')
hwClockCfgBits1LockOut = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 12), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBits1LockOut.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBits1LockOut.setDescription('The lock out of BITS1 clock source.')
hwClockCfgSourceSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("ssmPrc", 1), ("ssmSsut", 2), ("ssmSsul", 3), ("ssmSec", 4), ("ssmDnu", 5), ("ssmUnknown", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSourceSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceSsm.setDescription('The SSM quality of clock source.')
hwClockCfgSourceSsmSetMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("manual", 1), ("auto", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSourceSsmSetMode.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceSsmSetMode.setDescription('The set mode of SSM.')
hwClockCfgExportEnableStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 15), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgExportEnableStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgExportEnableStatus.setDescription('The enable status of clock source export.')
hwClockCfgSwiEnableStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 16), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSwiEnableStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSwiEnableStatus.setDescription('he enable status of clock source switch.')
hwClockCfgSourceState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("normal", 1), ("abnormal", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSourceState.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceState.setDescription('The state of clock source.')
hwClockCfgSsmThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("qlDnu", 1), ("qlSec", 2), ("qlSsub", 3), ("qlSsua", 4), ("qlPrc", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSsmThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSsmThreshold.setDescription('The SSM quality level threshold of clock source.')
hwClockCfgSourceS1Id = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCfgSourceS1Id.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceS1Id.setDescription('The S1 byte of the clock.')
hwClockCfgFreqCheckResult = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 20), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgFreqCheckResult.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgFreqCheckResult.setDescription('The result of frequency check, unit in 0.01ppm.')
hwClockCfgHoldOffTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(3, 18))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgHoldOffTime.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgHoldOffTime.setDescription('The hold off time of clock, unit in 100ms.')
hwClockCfgPriRvtEnableStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 22), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgPriRvtEnableStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgPriRvtEnableStatus.setDescription('The enable status of switch according priority.')
hwClockCfgSwitchCondition = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("noSwitch", 1), ("switch", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSwitchCondition.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSwitchCondition.setDescription('The condition of clock switch.')
hwClockCfgClkSourceType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("bits", 1), ("line", 2), ("inner", 3), ("system", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCfgClkSourceType.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgClkSourceType.setDescription('The type of clock source.')
hwClockBitsCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4), )
if mibBuilder.loadTexts: hwClockBitsCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgTable.setDescription('The clock bits congfig table.')
hwClockBitsCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockBitsCfgChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockBitsCfgBitsIndex"))
if mibBuilder.loadTexts: hwClockBitsCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgEntry.setDescription('The entry of clock bits congfig table.')
hwClockBitsCfgChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockBitsCfgChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgChassisIndex.setDescription('The index of the chassis whitch the clock source belongs to.')
hwClockBitsCfgBitsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)))
if mibBuilder.loadTexts: hwClockBitsCfgBitsIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgBitsIndex.setDescription('The index of BITS clock.')
hwClockBitsCfgName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockBitsCfgName.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgName.setDescription('The name of clock.')
hwClockBitsCfgBitsPortType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("portRj45", 1), ("portSMB", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockBitsCfgBitsPortType.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgBitsPortType.setDescription('The BITS port type.')
hwClockBitsCfgBitsType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("type2Mbps", 0), ("type2Mhz", 1), ("typeDclsTime", 2), ("type1ppsTod", 3), ("none", 4), ("type1544Mbps", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgBitsType.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgBitsType.setDescription('The BITS type.')
hwClockBitsCfgDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("in", 1), ("out", 2), ("inAndOut", 3), ("none", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgDirection.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgDirection.setDescription('The direction of BITS.')
hwClockBitsCfgRecvSaBit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("sa4", 4), ("sa5", 5), ("sa6", 6), ("sa7", 7), ("sa8", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgRecvSaBit.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgRecvSaBit.setDescription('The received SA bit.')
hwClockBitsCfgSendSaBit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("sa4", 4), ("sa5", 5), ("sa6", 6), ("sa7", 7), ("sa8", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgSendSaBit.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgSendSaBit.setDescription('The sent SA bit.')
hwClockBitsCfgForceOutS1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgForceOutS1.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgForceOutS1.setDescription('The S1 byte of forcing out.')
hwClockBitsCfgSaBit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("sa4", 4), ("sa5", 5), ("sa6", 6), ("sa7", 7), ("sa8", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgSaBit.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgSaBit.setDescription('The SA bit of SSM information.')
hwClockBitsCfgInputMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("clk2MBits", 0), ("clk2MHz", 1), ("dclsTime", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgInputMode.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgInputMode.setDescription('The input mode of clock source.')
hwClockBitsCfgOutputMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("clk2MBits", 0), ("clk2MHz", 1), ("dclsTime", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgOutputMode.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgOutputMode.setDescription('The output mode of clock source.')
hwClockBitsCfgInvalidCond = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("no", 1), ("ais", 2), ("lof", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgInvalidCond.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgInvalidCond.setDescription('The invalid condition of clock source.')
hwClockBitsCfgSourceId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockBitsCfgSourceId.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgSourceId.setDescription('The clock source ID.')
hwClockBitsCfgTodSignal = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("nmea", 1), ("ubx", 2), ("none", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgTodSignal.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgTodSignal.setDescription('The tod signal of clock source.')
hwClockBitsCfgFrameFormat = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 0), ("pcm30nocrc", 1), ("pcm30crc", 2), ("pcm31nocrc", 3), ("pcm31crc", 4))).clone(4)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgFrameFormat.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgFrameFormat.setDescription('Encoding type and frame check format of the extern clock port.')
hwClockPortCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5), )
if mibBuilder.loadTexts: hwClockPortCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgTable.setDescription('The clock port config table.')
hwClockPortCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockPortCfgIfIndex"))
if mibBuilder.loadTexts: hwClockPortCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgEntry.setDescription('The entry of clock port config table.')
hwClockPortCfgIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwClockPortCfgIfIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgIfIndex.setDescription('The interface index.')
hwClockPortCfgLeftFramePri = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockPortCfgLeftFramePri.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgLeftFramePri.setDescription('The clock priority of left frame.')
hwClockPortCfgRightFramePri = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockPortCfgRightFramePri.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgRightFramePri.setDescription('The clock priority of right frame.')
hwClockPortCfgForceOutS1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockPortCfgForceOutS1.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgForceOutS1.setDescription('The S1 byte of forcing out.')
hwClockLineClkCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6), )
if mibBuilder.loadTexts: hwClockLineClkCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgTable.setDescription('The line clock config table.')
hwClockLineClkCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockLineClkCfgChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockLineClkCfgSlotIndex"))
if mibBuilder.loadTexts: hwClockLineClkCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgEntry.setDescription('The entry of line clock config table.')
hwClockLineClkCfgChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockLineClkCfgChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgChassisIndex.setDescription('The chassis index.')
hwClockLineClkCfgSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 200)))
if mibBuilder.loadTexts: hwClockLineClkCfgSlotIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgSlotIndex.setDescription('The slot index of the line clock.')
hwClockLineClkCfgCardId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 3), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockLineClkCfgCardId.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgCardId.setDescription('The card index witch is seleced to provide line clock.')
hwClockLineClkCfgPortId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 4), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockLineClkCfgPortId.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgPortId.setDescription('The port index witch is seleced to provide line clock.')
hwClockLineClkCfgRecvS1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockLineClkCfgRecvS1.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgRecvS1.setDescription('The S1 byte value received.')
hwClockLineClkCfgSendS1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 6), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockLineClkCfgSendS1.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgSendS1.setDescription('The S1 byte value sent.')
hwClockLineCfgSoureId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 7), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockLineCfgSoureId.setStatus('current')
if mibBuilder.loadTexts: hwClockLineCfgSoureId.setDescription('Description.')
hwClockTrapOid = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7))
hwClockLastSourceName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 1), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockLastSourceName.setStatus('current')
if mibBuilder.loadTexts: hwClockLastSourceName.setDescription('The last clock source name.')
hwClockCurSourceName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 2), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCurSourceName.setStatus('current')
if mibBuilder.loadTexts: hwClockCurSourceName.setDescription('The current clock source name.')
hwClockSourceOldLockMode = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 16, 19))).clone(namedValues=NamedValues(("freeRun", 0), ("fastLock", 1), ("lock", 2), ("hold", 3), ("freeRunJudge", 16), ("holdJudge", 19)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockSourceOldLockMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceOldLockMode.setDescription('The old lock mode of clock source.')
hwClockChassisId = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 4), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockChassisId.setStatus('current')
if mibBuilder.loadTexts: hwClockChassisId.setDescription('The chassis ID.')
hwClockOldSourceState = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("initial", 0), ("normal", 1), ("abnormal", 2), ("wtr", 3), ("holdoff", 4)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockOldSourceState.setStatus('current')
if mibBuilder.loadTexts: hwClockOldSourceState.setDescription('The old state of clock source.')
hwClockPllId = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("system", 1), ("sync2M1", 2), ("sync2M2", 3)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockPllId.setStatus('current')
if mibBuilder.loadTexts: hwClockPllId.setDescription('The id of pll.')
hwClockAttributeOutValue = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockAttributeOutValue.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeOutValue.setDescription('The current output value.')
hwClockCesAcrSlot = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 8), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrSlot.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrSlot.setDescription('The slot ID of CES ACR clock source.')
hwClockCesAcrCard = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 9), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrCard.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCard.setDescription('The card ID of CES ACR clock source.')
hwClockCesAcrDomain = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 10), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomain.setDescription('The recovery domain value of CES ACR clock source.')
hwClockCesAcrOldMasterPwName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 11), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrOldMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrOldMasterPwName.setDescription('The master pw SerialPort name of CES ACR old clock source.')
hwClockCesAcrNewMasterPwName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 12), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrNewMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrNewMasterPwName.setDescription('The master pw SerialPort name of CES ACR new clock source.')
hwClockCesAcrLockState = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 13), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrLockState.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrLockState.setDescription('The lock state of the CES ACR.')
hwClockCesDcrSlot = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 14), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrSlot.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrSlot.setDescription('The slot ID of CES DCR clock source.')
hwClockCesDcrCard = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 15), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrCard.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrCard.setDescription('The card ID of CES DCR clock source.')
hwClockCesDcrDomain = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 16), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrDomain.setDescription('The recovery domain value of CES DCR clock source.')
hwClockCesDcrOldMasterPwName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 17), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrOldMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrOldMasterPwName.setDescription('The master pw SerialPort name of CES DCR old clock source.')
hwClockCesDcrNewMasterPwName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 18), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrNewMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrNewMasterPwName.setDescription('The master pw SerialPort name of CES DCR new clock source.')
hwClockCesDcrLockState = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 19), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrLockState.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrLockState.setDescription('The lock state of the CES DCR.')
hwClockNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8))
hwClockSourceSwitch = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 1)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockLastSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockSrcSelMode"))
if mibBuilder.loadTexts: hwClockSourceSwitch.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSwitch.setDescription('Clock source switch notification.')
hwClockSourceSysClkLockModeChange = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 2)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOldLockMode"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeSysClkLockMode"))
if mibBuilder.loadTexts: hwClockSourceSysClkLockModeChange.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSysClkLockModeChange.setDescription('The lock mode of system clock source change notification.')
hwClockSourceStateChange = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 3)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockOldSourceState"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceState"))
if mibBuilder.loadTexts: hwClockSourceStateChange.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceStateChange.setDescription('The state of clock source change notification.')
hwClockSourceStateResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 4)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockOldSourceState"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceState"))
if mibBuilder.loadTexts: hwClockSourceStateResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceStateResume.setDescription('The state of clock source resume notification.')
hwClockSourceFreqCheck = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 5)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceDescr"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgFreqCheckResult"))
if mibBuilder.loadTexts: hwClockSourceFreqCheck.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheck.setDescription('The result of clock source frequnce check abnormal notification.')
hwClockSourceOutputBelowThreshold = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 6)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutValue"))
if mibBuilder.loadTexts: hwClockSourceOutputBelowThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceOutputBelowThreshold.setDescription('The SSM of output below threshold notification.')
hwClockNotInLockedMode = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 7)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeSysClkLockMode"))
if mibBuilder.loadTexts: hwClockNotInLockedMode.setStatus('current')
if mibBuilder.loadTexts: hwClockNotInLockedMode.setDescription('The work mode of system clock is not in locked mode.')
hwClockInLockedMode = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 8)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeSysClkLockMode"))
if mibBuilder.loadTexts: hwClockInLockedMode.setStatus('current')
if mibBuilder.loadTexts: hwClockInLockedMode.setDescription('The work mode of system clock is in locked mode.')
hwClockSourceFailed = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 11)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceState"))
if mibBuilder.loadTexts: hwClockSourceFailed.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFailed.setDescription('The state of clock source is failed.')
hwClockSourceValid = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 12)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceState"))
if mibBuilder.loadTexts: hwClockSourceValid.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceValid.setDescription('The state of clock source is valid.')
hwClockSourceFreqCheckResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 13)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceDescr"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgFreqCheckResult"))
if mibBuilder.loadTexts: hwClockSourceFreqCheckResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheckResume.setDescription('The result of clock source frequnce check normal notification.')
hwClockSourceOutputBelowThresholdResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 14)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutValue"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"))
if mibBuilder.loadTexts: hwClockSourceOutputBelowThresholdResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceOutputBelowThresholdResume.setDescription('The SSM of output above threshold notification.')
hwClockCesAcrMasterPwChange = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 15)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesAcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrOldMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrNewMasterPwName"))
if mibBuilder.loadTexts: hwClockCesAcrMasterPwChange.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrMasterPwChange.setDescription('CES ACR master PW status change.')
hwClockCesAcrLockFail = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 16)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesAcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockState"))
if mibBuilder.loadTexts: hwClockCesAcrLockFail.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrLockFail.setDescription('CES ACR clock source lock fail.')
hwClockCesAcrLockFailResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 17)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesAcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockState"))
if mibBuilder.loadTexts: hwClockCesAcrLockFailResume.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrLockFailResume.setDescription('CES ACR clock source lock fail resume.')
hwClockClusterTopoFail = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 22)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockClusterSyncType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoLinkType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoStatus"))
if mibBuilder.loadTexts: hwClockClusterTopoFail.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoFail.setDescription('Clock cluster inter-chassis synchronization topo compute failed.')
hwClockClusterTopoFailResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 23)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockClusterSyncType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoLinkType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoStatus"))
if mibBuilder.loadTexts: hwClockClusterTopoFailResume.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoFailResume.setDescription('Clock inter-chassis synchronization topo compute successfully.')
hwClockSourceInputBelowThreshold = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 24)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeInputThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceSsm"))
if mibBuilder.loadTexts: hwClockSourceInputBelowThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceInputBelowThreshold.setDescription('The SSM of input below threshold notification.')
hwClockSourceInputBelowThresholdResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 25)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeInputThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceSsm"))
if mibBuilder.loadTexts: hwClockSourceInputBelowThresholdResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceInputBelowThresholdResume.setDescription('The SSM of input above or equal threshold notification.')
hwClockSsmPktLos = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 26)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"))
if mibBuilder.loadTexts: hwClockSsmPktLos.setStatus('current')
if mibBuilder.loadTexts: hwClockSsmPktLos.setDescription('The ssm packet of clock source is lost.')
hwClockSsmPktLosResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 27)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"))
if mibBuilder.loadTexts: hwClockSsmPktLosResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSsmPktLosResume.setDescription('The ssm packet of clock source is normal.')
hwClockCesDcrMasterPwChange = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 28)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesDcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrOldMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrNewMasterPwName"))
if mibBuilder.loadTexts: hwClockCesDcrMasterPwChange.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrMasterPwChange.setDescription('CES DCR master PW status change.')
hwClockCesDcrLockFail = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 29)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesDcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockState"))
if mibBuilder.loadTexts: hwClockCesDcrLockFail.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrLockFail.setDescription('CES DCR clock source lock fail.')
hwClockCesDcrLockFailResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 30)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesDcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockState"))
if mibBuilder.loadTexts: hwClockCesDcrLockFailResume.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrLockFailResume.setDescription('CES DCR clock source lock fail resume.')
hwClockAttributeTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9), )
if mibBuilder.loadTexts: hwClockAttributeTable.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeTable.setDescription('The clock Attribute table.')
hwClockAttributeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockAttributeChassisIndex"))
if mibBuilder.loadTexts: hwClockAttributeEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeEntry.setDescription('The entry of clock Attribute table.')
hwClockAttributeChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockAttributeChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeChassisIndex.setDescription('The chassis index.')
hwClockAttributeSysClkRunMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("normal", 0), ("freeRun", 1), ("hold", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeSysClkRunMode.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeSysClkRunMode.setDescription('The run mode of system clock.')
hwClockAttributeSsmControl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("on", 0), ("off", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeSsmControl.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeSsmControl.setDescription('The flag whether SSM is concerned with the clock source selection.')
hwClockAttributeFreqCheckEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 4), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeFreqCheckEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeFreqCheckEnable.setDescription('The enable flag of frequency check.')
hwClockAttributeRetrieveMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("retrieve", 0), ("noRetrieve", 1))).clone('retrieve')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeRetrieveMode.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeRetrieveMode.setDescription('The retrieve mode of system clock.')
hwClockAttributeWtrTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 12)).clone(5)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeWtrTime.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeWtrTime.setDescription('The time waiting for retrieve.')
hwClockAttributeHoldOffTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(300, 1800)).clone(1000)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeHoldOffTime.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeHoldOffTime.setDescription('The holdoff-time when the system source is lost.')
hwClockAttributeOutThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeOutThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeOutThreshold.setDescription('The Threshold of out put.')
hwClockAttributeSysMaxOutSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeSysMaxOutSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeSysMaxOutSsm.setDescription('The max ssm of system out put.')
hwClockAttribute2M1MaxOutSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttribute2M1MaxOutSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockAttribute2M1MaxOutSsm.setDescription('The max ssm of 2msync-1 out put.')
hwClockAttribute2M2MaxOutSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttribute2M2MaxOutSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockAttribute2M2MaxOutSsm.setDescription('The max ssm of 2msync-2 out put.')
hwClockAttributeSysClkLockMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 16, 19))).clone(namedValues=NamedValues(("freeRun", 0), ("fastLock", 1), ("lock", 2), ("hold", 3), ("freeRunJudge", 16), ("holdJudge", 19)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockAttributeSysClkLockMode.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeSysClkLockMode.setDescription('The Lock mode of system clock.')
hwClockAttributeExtendSsmControl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("on", 0), ("off", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeExtendSsmControl.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeExtendSsmControl.setDescription('The flag whether Extend SSM is concerned with the clock source selection.')
hwClockAttributeInternalClockId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeInternalClockId.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeInternalClockId.setDescription('The internal clockid of the device.')
hwClockAttributeTodProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("nmea", 1), ("ubx", 2), ("none", 3), ("ccsa", 4))).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeTodProtocol.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeTodProtocol.setDescription('1pps bits tod protocol.')
hwClockAttributeLtiSquelch = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 16), EnabledStatus().clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeLtiSquelch.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeLtiSquelch.setDescription('The frequency signal output squelch flag upon the frequency loss.')
hwClockAttributeInputThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15))).clone(15)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeInputThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeInputThreshold.setDescription('The squelch threshold of the external input source.')
hwClockSrcSelTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10), )
if mibBuilder.loadTexts: hwClockSrcSelTable.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelTable.setDescription('The system clock source selection table.')
hwClockSrcSelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockSrcSelChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockSrcSelType"))
if mibBuilder.loadTexts: hwClockSrcSelEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelEntry.setDescription('The entry of system clock source selection table.')
hwClockSrcSelChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockSrcSelChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelChassisIndex.setDescription('The chassis index.')
hwClockSrcSelType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("system", 1), ("sync2M1", 2), ("sync2M2", 3))))
if mibBuilder.loadTexts: hwClockSrcSelType.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelType.setDescription('The PLL Id.')
hwClockSrcSelMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("auto", 0), ("manual", 1), ("force", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSrcSelMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelMode.setDescription('The mode of clock source selection.')
hwClockSrcSelSrcName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 4), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSrcSelSrcName.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelSrcName.setDescription('The name of clock source for selection.')
hwClockSrcTraceSrcName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 5), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcTraceSrcName.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcTraceSrcName.setDescription('The name of trace source.')
hwClockSrcCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11), )
if mibBuilder.loadTexts: hwClockSrcCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgTable.setDescription('The clock source config table.')
hwClockSrcCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockSrcCfgChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceTypeIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceIndex"))
if mibBuilder.loadTexts: hwClockSrcCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgEntry.setDescription('The entry of clock source config table.')
hwClockSrcCfgChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockSrcCfgChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgChassisIndex.setDescription('The chassis index.')
hwClockSrcCfgSourceTypeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("bits", 1), ("ptp", 2), ("interface", 3)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockSrcCfgSourceTypeIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceTypeIndex.setDescription('The type of clock source.')
hwClockSrcCfgSourceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 3), Integer32())
if mibBuilder.loadTexts: hwClockSrcCfgSourceIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceIndex.setDescription('The index of clock source.')
hwClockSrcCfgSourceDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgSourceDescr.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceDescr.setDescription('The description of clock source.')
hwClockSrcCfgClkEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 5), EnabledStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgClkEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgClkEnable.setDescription('The enable flag of clock source.')
hwClockSrcCfgSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgSystemPriority.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSystemPriority.setDescription('The priority of system clock source.')
hwClockSrcCfg2M1Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfg2M1Priority.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfg2M1Priority.setDescription('The priority of 2msync-1 clock source.')
hwClockSrcCfg2M2Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfg2M2Priority.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfg2M2Priority.setDescription('The priority of 2msync-2 clock source.')
hwClockSrcCfgSourceSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15, 16))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15), ("unknown", 16)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgSourceSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceSsm.setDescription('The SSM quality of clock source.')
hwClockSrcCfgSsmSetMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("manual", 1), ("auto", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgSsmSetMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSsmSetMode.setDescription('The set mode of SSM.')
hwClockSrcCfgSourceState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("initial", 0), ("normal", 1), ("abnormal", 2), ("waitwtr", 3), ("holdoff", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgSourceState.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceState.setDescription('The state of clock source.')
hwClockSrcCfgFreqCheckResult = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("abnormal", 0), ("normal", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgFreqCheckResult.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgFreqCheckResult.setDescription('The result of frequency check.')
hwClockSrcCfgSsmInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(512, 8000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgSsmInterval.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSsmInterval.setDescription('Description.')
hwClockSrcCfgSsmTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2000, 32000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgSsmTimeout.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSsmTimeout.setDescription('Description.')
hwClockSrcCfgSabit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8, 99))).clone(namedValues=NamedValues(("sa4", 4), ("sa5", 5), ("sa6", 6), ("sa7", 7), ("sa8", 8), ("invalid", 99))).clone(4)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSrcCfgSabit.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSabit.setDescription('The SA bit of E1 Port SSM information.')
hwClockSrcCfgClockId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSrcCfgClockId.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgClockId.setDescription('The clockid of clock source.')
hwClockSrcCfgClockIdSetMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("manual", 1), ("auto", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgClockIdSetMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgClockIdSetMode.setDescription('The set mode of clockid.')
hwClockSrcCfgOutSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15, 16, 99))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15), ("unknown", 16), ("invalid", 99)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgOutSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgOutSsm.setDescription('Current output ssm.')
hwClockSrcCfgOutClockId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 99))).clone(namedValues=NamedValues(("clockid0", 0), ("clockid1", 1), ("clockid2", 2), ("clockid3", 3), ("clockid4", 4), ("clockid5", 5), ("clockid6", 6), ("clockid7", 7), ("clockid8", 8), ("clockid9", 9), ("clockid10", 10), ("clockid11", 11), ("clockid12", 12), ("clockid13", 13), ("clockid14", 14), ("clockid15", 15), ("notsupport", 99)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgOutClockId.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgOutClockId.setDescription('Current output clockid.')
hwClockSrcCfgRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 20), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgRowStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgRowStatus.setDescription('The row status.')
hwClockSrcCfgFreqDeviation = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 21), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgFreqDeviation.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgFreqDeviation.setDescription('Freqdeviation value of clock source.')
hwClockSrcCfgPhyState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("cardTypeNotSupport", 0), ("slave", 1), ("master", 2), ("speedNotSupport", 3), ("portDown", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgPhyState.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgPhyState.setDescription('The PHY clock state of ports.')
hwClockSrcCfgNegotiationSlave = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("notSupport", 0), ("enable", 1), ("disable", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgNegotiationSlave.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgNegotiationSlave.setDescription('Set PHY clock state to slave.')
hwClockCesAcrPortCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12), )
if mibBuilder.loadTexts: hwClockCesAcrPortCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPortCfgTable.setDescription('The CES ACR clock port config table.')
hwClockCesAcrPortCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrParentIfIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrChannelId"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrIfIndex"))
if mibBuilder.loadTexts: hwClockCesAcrPortCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPortCfgEntry.setDescription('The entry of CES ACR clock port config table.')
hwClockCesAcrParentIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwClockCesAcrParentIfIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrParentIfIndex.setDescription('Indicates the index of the parent interface.')
hwClockCesAcrChannelId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 2), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrChannelId.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrChannelId.setDescription('Indicates the channel ID.')
hwClockCesAcrIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 3), InterfaceIndex())
if mibBuilder.loadTexts: hwClockCesAcrIfIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrIfIndex.setDescription('Indicates the interface index.')
hwClockCesAcrPortName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrPortName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPortName.setDescription('Port name.')
hwClockCesAcrChannelType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("t1", 1), ("e1", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrChannelType.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrChannelType.setDescription('Indicates the interface type. The type can be E1/CE1 or T1/CT1.')
hwClockCesAcrSourceMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("master", 1), ("slave", 2), ("recoveryDomain", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrSourceMode.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrSourceMode.setDescription('Indicates the clock mode of the interface. master: indicates that the clock works in master mode and uses the internal clock signal. slave: indicates that the clock works in slave mode and uses the line clock signal. recovery-domain: indicates that the clock works in slave mode and uses the recovery domain clock signal. ')
hwClockCesAcrRecoveryDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrRecoveryDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrRecoveryDomain.setDescription('Indicates the clock recovery domain of the interface. DEFVAL is 0.')
hwClockCesAcrPwDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 8))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrPwDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPwDomain.setDescription('Indicates the clock PW domain of the interface. DEFVAL is 0.')
hwClockCesAcrPortCfgRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrPortCfgRowStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPortCfgRowStatus.setDescription('The row status.')
hwClockCesAcrMasterDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrMasterDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrMasterDomain.setDescription('Indicates the clock master domain of the interface. DEFVAL is 0.')
hwClockCesMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("acr", 1), ("dcr", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesMode.setStatus('current')
if mibBuilder.loadTexts: hwClockCesMode.setDescription('Indicates the clock CES recovery mode of the interface. DEFVAL is 0.')
hwClockCesAcrCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13), )
if mibBuilder.loadTexts: hwClockCesAcrCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgTable.setDescription('The CES ACR clock source config table.')
hwClockCesAcrCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrCfgSlot"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrCfgCard"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrCfgDomain"))
if mibBuilder.loadTexts: hwClockCesAcrCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgEntry.setDescription('The entry of CES ACR clock source config table.')
hwClockCesAcrCfgSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 1), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrCfgSlot.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSlot.setDescription('The slot ID of CES ACR clock source.')
hwClockCesAcrCfgCard = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 2), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrCfgCard.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgCard.setDescription('The card ID of CES ACR clock source.')
hwClockCesAcrCfgDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8)))
if mibBuilder.loadTexts: hwClockCesAcrCfgDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgDomain.setDescription('The recovery domain value of CES ACR clock source.')
hwClockCesAcrCfgDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrCfgDescr.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgDescr.setDescription('The description of clock source.')
hwClockCesAcrCfgSyncEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 5), EnabledStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgSyncEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSyncEnable.setDescription('The enable flag of CES ACR clock source.')
hwClockCesAcrCfgSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgSystemPriority.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSystemPriority.setDescription('The priority of system CES ACR clock source. DEFVAL is 0.')
hwClockCesAcrCfgSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15, 16))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15), ("unknown", 16)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSsm.setDescription('The SSM quality of CES ACR clock source.')
hwClockCesAcrCfgClockId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgClockId.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgClockId.setDescription('The clockid of clock source. DEFVAL is 0.')
hwClockCesAcrCfgSourceState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("initial", 0), ("normal", 1), ("abnormal", 2), ("waitwtr", 3), ("holdoff", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrCfgSourceState.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSourceState.setDescription('The state of CES ACR clock source.')
hwClockCesAcrCfgFreqCheckResult = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("abnormal", 0), ("normal", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrCfgFreqCheckResult.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgFreqCheckResult.setDescription('The result of CES ACR clock source frequency check.')
hwClockCesAcrCfgRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 11), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgRowStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgRowStatus.setDescription('The row status.')
hwClockCesAcrDomainInfoTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14), )
if mibBuilder.loadTexts: hwClockCesAcrDomainInfoTable.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomainInfoTable.setDescription('The CES ACR domain infomation table.')
hwClockCesAcrDomainInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrDomianInfoSlot"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrDomianInfoCard"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrDomianInfoDomain"))
if mibBuilder.loadTexts: hwClockCesAcrDomainInfoEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomainInfoEntry.setDescription('The entry of CES ACR domain infomation table.')
hwClockCesAcrDomianInfoSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 1), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoSlot.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoSlot.setDescription('The slot ID of CES ACR clock source.')
hwClockCesAcrDomianInfoCard = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 2), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoCard.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoCard.setDescription('The card ID of CES ACR clock source.')
hwClockCesAcrDomianInfoDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)))
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoDomain.setDescription('The recovery domain value of CES ACR clock source.')
hwClockCesAcrDomianInfoMasterPwName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoMasterPwName.setDescription('Port name.')
hwClockCesAcrDomianInfoChannelId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoChannelId.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoChannelId.setDescription('Indicates the channel ID.')
hwClockCesAcrDomianInfoState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("wait", 2), ("lock", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoState.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoState.setDescription('The state of CES ACR clock source.')
hwClockClusterTopoTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15), )
if mibBuilder.loadTexts: hwClockClusterTopoTable.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoTable.setDescription('The CES ACR domain infomation table.')
hwClockClusterTopoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockClusterSyncType"), (0, "HUAWEI-CLOCK-MIB", "hwClockClusterTopoType"), (0, "HUAWEI-CLOCK-MIB", "hwClockClusterTopoLinkType"))
if mibBuilder.loadTexts: hwClockClusterTopoEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoEntry.setDescription('Description.')
hwClockClusterSyncType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("frequency", 1), ("time", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockClusterSyncType.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterSyncType.setDescription('The type of clock inter-chassis sync.')
hwClockClusterTopoType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("interlink", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockClusterTopoType.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoType.setDescription('The type of clock inter-chassis topo..')
hwClockClusterTopoLinkType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("bits", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockClusterTopoLinkType.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoLinkType.setDescription('The type of clock inter-chassis link.')
hwClockClusterTopoStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("fail", 1), ("success", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockClusterTopoStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoStatus.setDescription('The status of clock inter-chassis topo.')
hwClockConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10))
hwClockSourceCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 1))
hwClockSourceCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 1, 1)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockManageSysGroup"), ("HUAWEI-CLOCK-MIB", "hwClockSourceCfgGroup"), ("HUAWEI-CLOCK-MIB", "hwClockPortCfgGroup"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgGroup"), ("HUAWEI-CLOCK-MIB", "hwClockNotificationsGroup"), ("HUAWEI-CLOCK-MIB", "hwClockSysSelGroup"), ("HUAWEI-CLOCK-MIB", "hwClockTrapOidGroup"), ("HUAWEI-CLOCK-MIB", "hwClockLineCfgGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockSourceCompliance = hwClockSourceCompliance.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceCompliance.setDescription('The compliance of clock MIB.')
hwClockSourceGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2))
hwClockManageSysGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 8)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockSourceSysClkWorkMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheckEnable"), ("HUAWEI-CLOCK-MIB", "hwClockSourceHoldMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceSsmControl"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheckRightRange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheckLeftRange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceRetrieveMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceForceCloseEnableStatus"), ("HUAWEI-CLOCK-MIB", "hwClockSourceSsmUnknown"), ("HUAWEI-CLOCK-MIB", "hwClockExtTimeOutputType"), ("HUAWEI-CLOCK-MIB", "hwClockExtTimeInputType"), ("HUAWEI-CLOCK-MIB", "hwClockTimeUsedSource"), ("HUAWEI-CLOCK-MIB", "hwClockSourceEthClkEnable"), ("HUAWEI-CLOCK-MIB", "hwClockAlarmThresholdFrequencyOffset"), ("HUAWEI-CLOCK-MIB", "hwClockFrequencyOffsetMax"), ("HUAWEI-CLOCK-MIB", "hwClockFrequencyOffsetMin"), ("HUAWEI-CLOCK-MIB", "hwClockFrequencyOffsetMean"), ("HUAWEI-CLOCK-MIB", "hwClockFrequencyOffset"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockManageSysGroup = hwClockManageSysGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockManageSysGroup.setDescription('The manage group.')
hwClockSysSelGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 9)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockSourceSelMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceSelSourceId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockLastSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOldLockMode"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrOldMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrNewMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutValue"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockState"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrOldMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrNewMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockSysSelGroup = hwClockSysSelGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockSysSelGroup.setDescription('The system selection group.')
hwClockSourceCfgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 10)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCfgSourceId"), ("HUAWEI-CLOCK-MIB", "hwClockCfgPriRvtEnableStatus"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSwitchCondition"), ("HUAWEI-CLOCK-MIB", "hwClockCfgWtrTime"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBadDetect"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceSsm"), ("HUAWEI-CLOCK-MIB", "hwClockCfgExportEnableStatus"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSwiEnableStatus"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceState"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceDescr"), ("HUAWEI-CLOCK-MIB", "hwClockCfgFreqCheckResult"), ("HUAWEI-CLOCK-MIB", "hwClockCfgHoldOffTime"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBits0Priority"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBits1Priority"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSystemPriority"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceSsmSetMode"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceS1Id"), ("HUAWEI-CLOCK-MIB", "hwClockCfgClkSourceType"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSsmThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSystemLockOut"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBits0LockOut"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBits1LockOut"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgTodSignal"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockSourceCfgGroup = hwClockSourceCfgGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceCfgGroup.setDescription('The clock source group.')
hwClockPortCfgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 13)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockPortCfgLeftFramePri"), ("HUAWEI-CLOCK-MIB", "hwClockPortCfgRightFramePri"), ("HUAWEI-CLOCK-MIB", "hwClockPortCfgForceOutS1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockPortCfgGroup = hwClockPortCfgGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgGroup.setDescription('The port config of clock source group.')
hwClockBitsCfgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 14)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockBitsCfgRecvSaBit"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgSendSaBit"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgForceOutS1"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgName"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgBitsType"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgDirection"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgSaBit"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgInputMode"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgOutputMode"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgSourceId"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgInvalidCond"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgBitsPortType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockBitsCfgGroup = hwClockBitsCfgGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgGroup.setDescription('The BITS clock source group.')
hwClockTrapOidGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 15)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockLastSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOldLockMode"), ("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockOldSourceState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockTrapOidGroup = hwClockTrapOidGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockTrapOidGroup.setDescription('The clock trap group.')
hwClockNotificationsGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 16)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockSourceSwitch"), ("HUAWEI-CLOCK-MIB", "hwClockSourceStateChange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceStateResume"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheck"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheckResume"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOutputBelowThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOutputBelowThresholdResume"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockFail"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockFailResume"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrMasterPwChange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceValid"), ("HUAWEI-CLOCK-MIB", "hwClockInLockedMode"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoFailResume"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoFail"), ("HUAWEI-CLOCK-MIB", "hwClockNotInLockedMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceSysClkLockModeChange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFailed"), ("HUAWEI-CLOCK-MIB", "hwClockSourceInputBelowThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockSourceInputBelowThresholdResume"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrMasterPwChange"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockFail"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockFailResume"), ("HUAWEI-CLOCK-MIB", "hwClockSsmPktLos"), ("HUAWEI-CLOCK-MIB", "hwClockSsmPktLosResume"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockNotificationsGroup = hwClockNotificationsGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockNotificationsGroup.setDescription('This is the group of clock notification.')
hwClockLineCfgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 17)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockLineClkCfgRecvS1"), ("HUAWEI-CLOCK-MIB", "hwClockLineClkCfgSendS1"), ("HUAWEI-CLOCK-MIB", "hwClockLineClkCfgCardId"), ("HUAWEI-CLOCK-MIB", "hwClockLineClkCfgPortId"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockLineCfgGroup = hwClockLineCfgGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockLineCfgGroup.setDescription('The line clock group..')
mibBuilder.exportSymbols("HUAWEI-CLOCK-MIB", PYSNMP_MODULE_ID=hwClockMIB, hwClockCfgSourceS1Id=hwClockCfgSourceS1Id, hwClockBitsCfgFrameFormat=hwClockBitsCfgFrameFormat, hwClockCfgSourceId=hwClockCfgSourceId, hwClockAttributeSsmControl=hwClockAttributeSsmControl, hwClockCesAcrDomianInfoDomain=hwClockCesAcrDomianInfoDomain, hwClockSrcCfgNegotiationSlave=hwClockSrcCfgNegotiationSlave, hwClockCurSourceName=hwClockCurSourceName, hwClockSourceInputBelowThresholdResume=hwClockSourceInputBelowThresholdResume, hwClockSrcCfgFreqCheckResult=hwClockSrcCfgFreqCheckResult, hwClockBitsCfgTodSignal=hwClockBitsCfgTodSignal, hwClockSrcCfgSabit=hwClockSrcCfgSabit, hwClockSrcSelSrcName=hwClockSrcSelSrcName, hwClockCesAcrCfgCard=hwClockCesAcrCfgCard, hwClockCesAcrCfgSystemPriority=hwClockCesAcrCfgSystemPriority, hwClockAttributeSysClkRunMode=hwClockAttributeSysClkRunMode, hwClockCesAcrParentIfIndex=hwClockCesAcrParentIfIndex, hwClockCesAcrPortCfgTable=hwClockCesAcrPortCfgTable, hwClockSourceEthClkEnable=hwClockSourceEthClkEnable, hwClockLineClkCfgSlotIndex=hwClockLineClkCfgSlotIndex, hwClockInLockedMode=hwClockInLockedMode, hwClockCesAcrMasterDomain=hwClockCesAcrMasterDomain, hwClockCesAcrCfgSyncEnable=hwClockCesAcrCfgSyncEnable, hwClockPortCfgLeftFramePri=hwClockPortCfgLeftFramePri, hwClockCfgBadDetect=hwClockCfgBadDetect, hwClockSrcCfgSourceTypeIndex=hwClockSrcCfgSourceTypeIndex, hwClockSrcCfgSystemPriority=hwClockSrcCfgSystemPriority, hwClockCesAcrCfgRowStatus=hwClockCesAcrCfgRowStatus, hwClockCfgSourceIndex=hwClockCfgSourceIndex, hwClockSrcCfgClockId=hwClockSrcCfgClockId, hwClockSourceSwitch=hwClockSourceSwitch, hwClockLineClkCfgTable=hwClockLineClkCfgTable, hwClockSrcCfg2M2Priority=hwClockSrcCfg2M2Priority, hwClockSourceValid=hwClockSourceValid, hwClockCesMode=hwClockCesMode, hwClockCfgClkSourceType=hwClockCfgClkSourceType, hwClockBitsCfgDirection=hwClockBitsCfgDirection, hwClockBitsCfgInvalidCond=hwClockBitsCfgInvalidCond, hwClockCfgSwitchCondition=hwClockCfgSwitchCondition, hwClockCesAcrCfgDescr=hwClockCesAcrCfgDescr, hwClockAttribute2M1MaxOutSsm=hwClockAttribute2M1MaxOutSsm, hwClockCesAcrDomianInfoMasterPwName=hwClockCesAcrDomianInfoMasterPwName, hwClockAlarmThresholdFrequencyOffset=hwClockAlarmThresholdFrequencyOffset, hwClockCesAcrCfgSlot=hwClockCesAcrCfgSlot, hwClockChassisId=hwClockChassisId, hwClockGlobalObjects=hwClockGlobalObjects, hwClockBitsCfgSendSaBit=hwClockBitsCfgSendSaBit, hwClockSourceFreqCheckLeftRange=hwClockSourceFreqCheckLeftRange, hwClockSrcCfgFreqDeviation=hwClockSrcCfgFreqDeviation, hwClockSourceCompliances=hwClockSourceCompliances, hwClockClusterTopoType=hwClockClusterTopoType, hwClockSrcCfgSourceSsm=hwClockSrcCfgSourceSsm, hwClockCesAcrDomianInfoSlot=hwClockCesAcrDomianInfoSlot, hwClockSourceCfgGroup=hwClockSourceCfgGroup, hwClockCesDcrOldMasterPwName=hwClockCesDcrOldMasterPwName, hwClockOldSourceState=hwClockOldSourceState, hwClockSourceCompliance=hwClockSourceCompliance, hwClockMIB=hwClockMIB, hwClockLineClkCfgRecvS1=hwClockLineClkCfgRecvS1, hwClockPortCfgIfIndex=hwClockPortCfgIfIndex, hwClockCfgSourceDescr=hwClockCfgSourceDescr, hwClockExtTimeInputType=hwClockExtTimeInputType, hwClockCfgSwiEnableStatus=hwClockCfgSwiEnableStatus, hwClockLineCfgGroup=hwClockLineCfgGroup, hwClockManageObjects=hwClockManageObjects, hwClockBitsCfgSaBit=hwClockBitsCfgSaBit, hwClockSourceFreqCheckRightRange=hwClockSourceFreqCheckRightRange, hwClockSrcSelMode=hwClockSrcSelMode, hwClockClusterTopoTable=hwClockClusterTopoTable, hwClockFrequencyOffset=hwClockFrequencyOffset, hwClockManageSysGroup=hwClockManageSysGroup, hwClockSourceFreqCheckEnable=hwClockSourceFreqCheckEnable, hwClockAttribute2M2MaxOutSsm=hwClockAttribute2M2MaxOutSsm, hwClockCesAcrCfgFreqCheckResult=hwClockCesAcrCfgFreqCheckResult, hwClockCesAcrDomainInfoTable=hwClockCesAcrDomainInfoTable, hwClockCesAcrDomianInfoChannelId=hwClockCesAcrDomianInfoChannelId, hwClockSrcCfgClockIdSetMode=hwClockSrcCfgClockIdSetMode, hwClockSourceSelType=hwClockSourceSelType, hwClockCfgBits0Priority=hwClockCfgBits0Priority, hwClockSrcCfgSsmSetMode=hwClockSrcCfgSsmSetMode, hwClockClusterTopoFail=hwClockClusterTopoFail, hwClockPllId=hwClockPllId, hwClockSrcCfg2M1Priority=hwClockSrcCfg2M1Priority, hwClockSourceHoldMode=hwClockSourceHoldMode, hwClockSrcSelTable=hwClockSrcSelTable, hwClockLineClkCfgCardId=hwClockLineClkCfgCardId, hwClockSsmPktLosResume=hwClockSsmPktLosResume, hwClockSourceSelChassisIndex=hwClockSourceSelChassisIndex, hwClockAttributeExtendSsmControl=hwClockAttributeExtendSsmControl, hwClockSourceOldLockMode=hwClockSourceOldLockMode, hwClockPortCfgRightFramePri=hwClockPortCfgRightFramePri, hwClockCesAcrChannelId=hwClockCesAcrChannelId, hwClockCesAcrCfgSsm=hwClockCesAcrCfgSsm, hwClockSourceSelMode=hwClockSourceSelMode, hwClockSrcCfgSourceDescr=hwClockSrcCfgSourceDescr, hwClockTrapOid=hwClockTrapOid, hwClockAttributeEntry=hwClockAttributeEntry, hwClockCesAcrRecoveryDomain=hwClockCesAcrRecoveryDomain, hwClockCesAcrSlot=hwClockCesAcrSlot, hwClockFrequencyOffsetMax=hwClockFrequencyOffsetMax, hwClockSrcCfgRowStatus=hwClockSrcCfgRowStatus, hwClockCfgSourceState=hwClockCfgSourceState, hwClockBitsCfgOutputMode=hwClockBitsCfgOutputMode, hwClockBitsCfgBitsIndex=hwClockBitsCfgBitsIndex, hwClockFrequencyOffsetMin=hwClockFrequencyOffsetMin, hwClockCfgChassisIndex=hwClockCfgChassisIndex, hwClockLastSourceName=hwClockLastSourceName, hwClockCesAcrNewMasterPwName=hwClockCesAcrNewMasterPwName, hwClockAttributeHoldOffTime=hwClockAttributeHoldOffTime, hwClockClusterTopoLinkType=hwClockClusterTopoLinkType, hwClockCesAcrPortName=hwClockCesAcrPortName, hwClockPortCfgForceOutS1=hwClockPortCfgForceOutS1, hwClockSourceInputBelowThreshold=hwClockSourceInputBelowThreshold, hwClockSrcCfgTable=hwClockSrcCfgTable, hwClockCesAcrChannelType=hwClockCesAcrChannelType, hwClockBitsCfgSourceId=hwClockBitsCfgSourceId, hwClockSourceSelSourceId=hwClockSourceSelSourceId, hwClockAttributeLtiSquelch=hwClockAttributeLtiSquelch, hwClockSourceSysClkWorkMode=hwClockSourceSysClkWorkMode, hwClockCesDcrSlot=hwClockCesDcrSlot, hwClockCfgBits1LockOut=hwClockCfgBits1LockOut, hwClockSrcCfgClkEnable=hwClockSrcCfgClkEnable, hwClockConformance=hwClockConformance, hwClockSysSelGroup=hwClockSysSelGroup, hwClockNotifications=hwClockNotifications, hwClockSourceSelEntry=hwClockSourceSelEntry, hwClockCesAcrDomain=hwClockCesAcrDomain, hwClockCesDcrMasterPwChange=hwClockCesDcrMasterPwChange, hwClockCesAcrCard=hwClockCesAcrCard, hwClockSrcCfgPhyState=hwClockSrcCfgPhyState, hwClockSourceCfgTable=hwClockSourceCfgTable, hwClockNotInLockedMode=hwClockNotInLockedMode, hwClockSourceSsmUnknown=hwClockSourceSsmUnknown, hwClockBitsCfgChassisIndex=hwClockBitsCfgChassisIndex, hwClockCesDcrLockFail=hwClockCesDcrLockFail, hwClockCesAcrPortCfgEntry=hwClockCesAcrPortCfgEntry, hwClockPortCfgTable=hwClockPortCfgTable, hwClockSourceSsmControl=hwClockSourceSsmControl, hwClockCesDcrCard=hwClockCesDcrCard, hwClockSrcTraceSrcName=hwClockSrcTraceSrcName, hwClockSrcCfgSourceState=hwClockSrcCfgSourceState, hwClockBitsCfgForceOutS1=hwClockBitsCfgForceOutS1, hwClockCfgSourceSsm=hwClockCfgSourceSsm, hwClockBitsCfgBitsPortType=hwClockBitsCfgBitsPortType, hwClockLineClkCfgPortId=hwClockLineClkCfgPortId, hwClockCesAcrLockFail=hwClockCesAcrLockFail, hwClockSrcSelChassisIndex=hwClockSrcSelChassisIndex, hwClockAttributeWtrTime=hwClockAttributeWtrTime, hwClockAttributeFreqCheckEnable=hwClockAttributeFreqCheckEnable, hwClockCfgPriRvtEnableStatus=hwClockCfgPriRvtEnableStatus, hwClockLineClkCfgSendS1=hwClockLineClkCfgSendS1, hwClockSourceStateResume=hwClockSourceStateResume, hwClockSrcCfgChassisIndex=hwClockSrcCfgChassisIndex, hwClockCesAcrLockFailResume=hwClockCesAcrLockFailResume, hwClockCesAcrDomianInfoState=hwClockCesAcrDomianInfoState, hwClockExtTimeOutputType=hwClockExtTimeOutputType, hwClockSourceOutputBelowThreshold=hwClockSourceOutputBelowThreshold, hwClockCesAcrMasterPwChange=hwClockCesAcrMasterPwChange, hwClockAttributeInputThreshold=hwClockAttributeInputThreshold, hwClockCesAcrCfgSourceState=hwClockCesAcrCfgSourceState, hwClockSrcCfgEntry=hwClockSrcCfgEntry, hwClockCfgHoldOffTime=hwClockCfgHoldOffTime, hwClockSourceCfgEntry=hwClockSourceCfgEntry, hwClockPortCfgEntry=hwClockPortCfgEntry, hwClockAttributeRetrieveMode=hwClockAttributeRetrieveMode, hwClockCfgSsmThreshold=hwClockCfgSsmThreshold, hwClockSourceFreqCheck=hwClockSourceFreqCheck, hwClockSourceFailed=hwClockSourceFailed, hwClockClusterSyncType=hwClockClusterSyncType, hwClockCesAcrDomianInfoCard=hwClockCesAcrDomianInfoCard, hwClockCfgSystemLockOut=hwClockCfgSystemLockOut, hwClockCesAcrLockState=hwClockCesAcrLockState, hwClockCesAcrCfgClockId=hwClockCesAcrCfgClockId, hwClockLineClkCfgEntry=hwClockLineClkCfgEntry, hwClockSrcSelEntry=hwClockSrcSelEntry, hwClockAttributeSysMaxOutSsm=hwClockAttributeSysMaxOutSsm, hwClockCesAcrPortCfgRowStatus=hwClockCesAcrPortCfgRowStatus, hwClockSourceSysClkLockModeChange=hwClockSourceSysClkLockModeChange, hwClockTrapOidGroup=hwClockTrapOidGroup, hwClockSsmPktLos=hwClockSsmPktLos, hwClockAttributeTable=hwClockAttributeTable, hwClockSourceOutputBelowThresholdResume=hwClockSourceOutputBelowThresholdResume, hwClockSrcCfgOutClockId=hwClockSrcCfgOutClockId, hwClockLineClkCfgChassisIndex=hwClockLineClkCfgChassisIndex, hwClockSrcCfgSsmTimeout=hwClockSrcCfgSsmTimeout, hwClockCesAcrCfgDomain=hwClockCesAcrCfgDomain, hwClockBitsCfgGroup=hwClockBitsCfgGroup, hwClockCfgSourceSsmSetMode=hwClockCfgSourceSsmSetMode, hwClockCfgBits1Priority=hwClockCfgBits1Priority, hwClockBitsCfgRecvSaBit=hwClockBitsCfgRecvSaBit, hwClockSourceStateChange=hwClockSourceStateChange, hwClockAttributeOutThreshold=hwClockAttributeOutThreshold, hwClockClusterTopoStatus=hwClockClusterTopoStatus, hwClockLineCfgSoureId=hwClockLineCfgSoureId, hwClockAttributeOutValue=hwClockAttributeOutValue, hwClockAttributeSysClkLockMode=hwClockAttributeSysClkLockMode, hwClockCesAcrOldMasterPwName=hwClockCesAcrOldMasterPwName, hwClockCesDcrLockState=hwClockCesDcrLockState, hwClockCfgSystemPriority=hwClockCfgSystemPriority, hwClockClusterTopoEntry=hwClockClusterTopoEntry, hwClockCesAcrCfgTable=hwClockCesAcrCfgTable, hwClockClusterTopoFailResume=hwClockClusterTopoFailResume, hwClockCfgFreqCheckResult=hwClockCfgFreqCheckResult, hwClockSrcSelType=hwClockSrcSelType, hwClockBitsCfgInputMode=hwClockBitsCfgInputMode, hwClockAttributeInternalClockId=hwClockAttributeInternalClockId, hwClockSrcCfgOutSsm=hwClockSrcCfgOutSsm, hwClockAttributeChassisIndex=hwClockAttributeChassisIndex, hwClockNotificationsGroup=hwClockNotificationsGroup, hwClockSrcCfgSsmInterval=hwClockSrcCfgSsmInterval, hwClockCesAcrIfIndex=hwClockCesAcrIfIndex, hwClockSourceForceCloseEnableStatus=hwClockSourceForceCloseEnableStatus, hwClockSourceFreqCheckResume=hwClockSourceFreqCheckResume, hwClockSourceGroups=hwClockSourceGroups, hwClockCfgBits0LockOut=hwClockCfgBits0LockOut, hwClockCesDcrDomain=hwClockCesDcrDomain, hwClockTimeUsedSource=hwClockTimeUsedSource, hwClockCfgWtrTime=hwClockCfgWtrTime, hwClockCfgExportEnableStatus=hwClockCfgExportEnableStatus, hwClockBitsCfgEntry=hwClockBitsCfgEntry, hwClockCesAcrDomainInfoEntry=hwClockCesAcrDomainInfoEntry, hwClockFrequencyOffsetMean=hwClockFrequencyOffsetMean, hwClockBitsCfgName=hwClockBitsCfgName, hwClockBitsCfgBitsType=hwClockBitsCfgBitsType, hwClockSrcCfgSourceIndex=hwClockSrcCfgSourceIndex, hwClockCesDcrLockFailResume=hwClockCesDcrLockFailResume, hwClockBitsCfgTable=hwClockBitsCfgTable, hwClockAttributeTodProtocol=hwClockAttributeTodProtocol, hwClockCesAcrSourceMode=hwClockCesAcrSourceMode, hwClockSourceRetrieveMode=hwClockSourceRetrieveMode, hwClockCesDcrNewMasterPwName=hwClockCesDcrNewMasterPwName, hwClockCesAcrCfgEntry=hwClockCesAcrCfgEntry, hwClockSourceSelTable=hwClockSourceSelTable, hwClockPortCfgGroup=hwClockPortCfgGroup, hwClockCesAcrPwDomain=hwClockCesAcrPwDomain)
| 148.629322 | 11,779 | 0.782587 |
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint")
PhysicalIndex, = mibBuilder.importSymbols("ENTITY-MIB", "PhysicalIndex")
hwDatacomm, = mibBuilder.importSymbols("HUAWEI-MIB", "hwDatacomm")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, MibIdentifier, Unsigned32, Gauge32, IpAddress, ObjectIdentity, NotificationType, ModuleIdentity, Counter32, Counter64, Integer32, Bits, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "MibIdentifier", "Unsigned32", "Gauge32", "IpAddress", "ObjectIdentity", "NotificationType", "ModuleIdentity", "Counter32", "Counter64", "Integer32", "Bits", "iso")
DisplayString, TextualConvention, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "RowStatus")
hwClockMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186))
hwClockMIB.setRevisions(('2014-11-29 00:00', '2014-11-03 00:00', '2014-08-13 00:00', '2014-04-21 00:00', '2014-01-07 00:00', '2013-11-12 00:00', '2013-10-31 00:00', '2013-05-23 00:00', '2013-05-14 00:00', '2013-03-20 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hwClockMIB.setRevisionsDescriptions(('Modify alarm hwClockSourceInputBelowThreshold, hwClockSourceInputBelowThresholdResume.', 'Add alarm hwClockCesDcrMasterPwChange, hwClockCesDcrLockFail,hwClockCesDcrLockFailResume,hwClockSsmPktLos,hwClockSsmPktLosResume and add mib hwClockCesDcrSlot,hwClockCesDcrCard,hwClockCesDcrDomain,hwClockCesDcrOldMasterPwName,hwClockCesDcrNewMasterPwName,hwClockCesDcrLockState,hwClockCesMode', 'Add alarm hwClockSourceInputBelowThreshold, hwClockSourceInputBelowThresholdResume.', 'Add alarm hwClockClusterTopoFail, hwClockClusterTopoFailResume and table hwClockClusterTopoTable.', 'Edit the range of hwClockCesAcrDomianInfoDomain.', 'Add mib hwClockBitsCfgFrameFormat, hwClockAttributeLtiSquelch and hwClockAttributeInputThreshold.', 'Edit the range of hwClockCesAcrRecoveryDomain.', 'Re-edit the range of some nodes.', 'Re-edit the default values of hwClockAttributeTodProtocol node.', 'Some errors have been modified in current version and some nodes have been added into the current version.',))
if mibBuilder.loadTexts: hwClockMIB.setLastUpdated('201411290000Z')
if mibBuilder.loadTexts: hwClockMIB.setOrganization('Huawei Technologies Co.,Ltd. ')
if mibBuilder.loadTexts: hwClockMIB.setContactInfo("Huawei Industrial Base Bantian, Longgang Shenzhen 518129 People's Republic of China Website: http://www.huawei.com Email: support@huawei.com ")
if mibBuilder.loadTexts: hwClockMIB.setDescription('The MIB contains objects of module clock management and 1588 interface.')
hwClockManageObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1))
hwClockGlobalObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1))
hwClockSourceEthClkEnable = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 1), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceEthClkEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceEthClkEnable.setDescription('The flag indicates that the ethernet clock is globally enabled.')
hwClockSourceSsmUnknown = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15))).clone('dnu')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceSsmUnknown.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSsmUnknown.setDescription('The quality level of unknown SSM.')
hwClockSourceSysClkWorkMode = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("trace", 1), ("hold", 2), ("freeoscillate", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSourceSysClkWorkMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSysClkWorkMode.setDescription('The work mode of system clock.')
hwClockSourceForceCloseEnableStatus = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 4), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceForceCloseEnableStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceForceCloseEnableStatus.setDescription('The enable status of export forced close.')
hwClockSourceSsmControl = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("extend", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceSsmControl.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSsmControl.setDescription('The flag whether SSM is concerned with the clock source selection.')
hwClockSourceHoldMode = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("hold24Hours", 1), ("holdForever", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSourceHoldMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceHoldMode.setDescription('The hold mode of clock source.')
hwClockSourceFreqCheckEnable = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 7), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceFreqCheckEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheckEnable.setDescription('The enable flag of frequency check.')
hwClockSourceFreqCheckLeftRange = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(50, 1000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceFreqCheckLeftRange.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheckLeftRange.setDescription('The left range of frequency check, unit in 0.01ppm.')
hwClockSourceFreqCheckRightRange = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(50, 1000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceFreqCheckRightRange.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheckRightRange.setDescription('The right range of frequency check, unit in 0.01ppm.')
hwClockSourceRetrieveMode = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("retrieve", 1), ("noRetrieve", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceRetrieveMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceRetrieveMode.setDescription('The retrieve mode of clock source.')
hwClockTimeUsedSource = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("srcDclsTimeBit0", 1), ("srcDclsTimeBit1", 2), ("src1ppsTodBit0", 3), ("src1ppsTodBit1", 4), ("srcPtp", 5), ("srcFreeRun", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockTimeUsedSource.setStatus('current')
if mibBuilder.loadTexts: hwClockTimeUsedSource.setDescription('The clock time used source.')
hwClockExtTimeInputType = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("typeDclsTime", 1), ("type1ppsTodRs232", 2), ("type1ppsTodGps", 3), ("typeNone", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockExtTimeInputType.setStatus('current')
if mibBuilder.loadTexts: hwClockExtTimeInputType.setDescription('The input time type of clock extern time.')
hwClockExtTimeOutputType = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("typeDclsTime", 1), ("type1ppsTodRs232", 2), ("type1ppsTodGps", 3), ("typeNone", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockExtTimeOutputType.setStatus('current')
if mibBuilder.loadTexts: hwClockExtTimeOutputType.setDescription('The output time type of clock extern time.')
hwClockAlarmThresholdFrequencyOffset = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 92)).clone(92)).setUnits('100ppb').setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAlarmThresholdFrequencyOffset.setStatus('current')
if mibBuilder.loadTexts: hwClockAlarmThresholdFrequencyOffset.setDescription('The Threshold of clock alarm.')
hwClockFrequencyOffsetMax = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 15), Integer32()).setUnits('ppb').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockFrequencyOffsetMax.setStatus('current')
if mibBuilder.loadTexts: hwClockFrequencyOffsetMax.setDescription('The max offset of clock frequency.')
hwClockFrequencyOffsetMin = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 16), Integer32()).setUnits('ppb').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockFrequencyOffsetMin.setStatus('current')
if mibBuilder.loadTexts: hwClockFrequencyOffsetMin.setDescription('The min offset of clock frequency.')
hwClockFrequencyOffsetMean = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 17), Integer32()).setUnits('ppb').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockFrequencyOffsetMean.setStatus('current')
if mibBuilder.loadTexts: hwClockFrequencyOffsetMean.setDescription('The mean offset of clock frequency.')
hwClockFrequencyOffset = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 18), Integer32()).setUnits('ppb').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockFrequencyOffset.setStatus('current')
if mibBuilder.loadTexts: hwClockFrequencyOffset.setDescription('The current offset of clock frequency.')
hwClockSourceSelTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2), )
if mibBuilder.loadTexts: hwClockSourceSelTable.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelTable.setDescription('The system clock source selection table.')
hwClockSourceSelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockSourceSelChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockSourceSelType"))
if mibBuilder.loadTexts: hwClockSourceSelEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelEntry.setDescription('The entry of system clock source selection table.')
hwClockSourceSelChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockSourceSelChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelChassisIndex.setDescription('The chassis index.')
hwClockSourceSelType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100)))
if mibBuilder.loadTexts: hwClockSourceSelType.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelType.setDescription('The select type.')
hwClockSourceSelMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("auto", 1), ("manual", 2), ("force", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSourceSelMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelMode.setDescription('The mode of clock source selection.')
hwClockSourceSelSourceId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1, 4), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSourceSelSourceId.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelSourceId.setDescription('The source ID of the clock traced.')
hwClockSourceCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3), )
if mibBuilder.loadTexts: hwClockSourceCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceCfgTable.setDescription('The clock source config table.')
hwClockSourceCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockCfgChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockCfgSourceIndex"))
if mibBuilder.loadTexts: hwClockSourceCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceCfgEntry.setDescription('The entry of clock source config table.')
hwClockCfgChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockCfgChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgChassisIndex.setDescription('The index of the chassis whitch the clock source belongs to.')
hwClockCfgSourceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 20)))
if mibBuilder.loadTexts: hwClockCfgSourceIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceIndex.setDescription('The clock source index.')
hwClockCfgSourceId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCfgSourceId.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceId.setDescription('The clock source ID.')
hwClockCfgSourceDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCfgSourceDescr.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceDescr.setDescription('The clock source description.')
hwClockCfgWtrTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 12))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgWtrTime.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgWtrTime.setDescription('The waiting for restore time of clock source.')
hwClockCfgBadDetect = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 6), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBadDetect.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBadDetect.setDescription('The enable status of clock source bad detecting.')
hwClockCfgSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSystemPriority.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSystemPriority.setDescription('The priority of system clock source.')
hwClockCfgBits0Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBits0Priority.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBits0Priority.setDescription('The priority of BITS0 clock source.')
hwClockCfgBits1Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBits1Priority.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBits1Priority.setDescription('The priority of BITS1 clock source.')
hwClockCfgSystemLockOut = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 10), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSystemLockOut.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSystemLockOut.setDescription('The lock out of system clock source.')
hwClockCfgBits0LockOut = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 11), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBits0LockOut.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBits0LockOut.setDescription('The lock out of BITS0 clock source.')
hwClockCfgBits1LockOut = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 12), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBits1LockOut.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBits1LockOut.setDescription('The lock out of BITS1 clock source.')
hwClockCfgSourceSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("ssmPrc", 1), ("ssmSsut", 2), ("ssmSsul", 3), ("ssmSec", 4), ("ssmDnu", 5), ("ssmUnknown", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSourceSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceSsm.setDescription('The SSM quality of clock source.')
hwClockCfgSourceSsmSetMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("manual", 1), ("auto", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSourceSsmSetMode.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceSsmSetMode.setDescription('The set mode of SSM.')
hwClockCfgExportEnableStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 15), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgExportEnableStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgExportEnableStatus.setDescription('The enable status of clock source export.')
hwClockCfgSwiEnableStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 16), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSwiEnableStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSwiEnableStatus.setDescription('he enable status of clock source switch.')
hwClockCfgSourceState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("normal", 1), ("abnormal", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSourceState.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceState.setDescription('The state of clock source.')
hwClockCfgSsmThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("qlDnu", 1), ("qlSec", 2), ("qlSsub", 3), ("qlSsua", 4), ("qlPrc", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSsmThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSsmThreshold.setDescription('The SSM quality level threshold of clock source.')
hwClockCfgSourceS1Id = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCfgSourceS1Id.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceS1Id.setDescription('The S1 byte of the clock.')
hwClockCfgFreqCheckResult = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 20), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgFreqCheckResult.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgFreqCheckResult.setDescription('The result of frequency check, unit in 0.01ppm.')
hwClockCfgHoldOffTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(3, 18))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgHoldOffTime.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgHoldOffTime.setDescription('The hold off time of clock, unit in 100ms.')
hwClockCfgPriRvtEnableStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 22), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgPriRvtEnableStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgPriRvtEnableStatus.setDescription('The enable status of switch according priority.')
hwClockCfgSwitchCondition = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("noSwitch", 1), ("switch", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSwitchCondition.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSwitchCondition.setDescription('The condition of clock switch.')
hwClockCfgClkSourceType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("bits", 1), ("line", 2), ("inner", 3), ("system", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCfgClkSourceType.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgClkSourceType.setDescription('The type of clock source.')
hwClockBitsCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4), )
if mibBuilder.loadTexts: hwClockBitsCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgTable.setDescription('The clock bits congfig table.')
hwClockBitsCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockBitsCfgChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockBitsCfgBitsIndex"))
if mibBuilder.loadTexts: hwClockBitsCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgEntry.setDescription('The entry of clock bits congfig table.')
hwClockBitsCfgChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockBitsCfgChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgChassisIndex.setDescription('The index of the chassis whitch the clock source belongs to.')
hwClockBitsCfgBitsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)))
if mibBuilder.loadTexts: hwClockBitsCfgBitsIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgBitsIndex.setDescription('The index of BITS clock.')
hwClockBitsCfgName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockBitsCfgName.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgName.setDescription('The name of clock.')
hwClockBitsCfgBitsPortType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("portRj45", 1), ("portSMB", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockBitsCfgBitsPortType.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgBitsPortType.setDescription('The BITS port type.')
hwClockBitsCfgBitsType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("type2Mbps", 0), ("type2Mhz", 1), ("typeDclsTime", 2), ("type1ppsTod", 3), ("none", 4), ("type1544Mbps", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgBitsType.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgBitsType.setDescription('The BITS type.')
hwClockBitsCfgDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("in", 1), ("out", 2), ("inAndOut", 3), ("none", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgDirection.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgDirection.setDescription('The direction of BITS.')
hwClockBitsCfgRecvSaBit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("sa4", 4), ("sa5", 5), ("sa6", 6), ("sa7", 7), ("sa8", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgRecvSaBit.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgRecvSaBit.setDescription('The received SA bit.')
hwClockBitsCfgSendSaBit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("sa4", 4), ("sa5", 5), ("sa6", 6), ("sa7", 7), ("sa8", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgSendSaBit.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgSendSaBit.setDescription('The sent SA bit.')
hwClockBitsCfgForceOutS1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgForceOutS1.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgForceOutS1.setDescription('The S1 byte of forcing out.')
hwClockBitsCfgSaBit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("sa4", 4), ("sa5", 5), ("sa6", 6), ("sa7", 7), ("sa8", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgSaBit.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgSaBit.setDescription('The SA bit of SSM information.')
hwClockBitsCfgInputMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("clk2MBits", 0), ("clk2MHz", 1), ("dclsTime", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgInputMode.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgInputMode.setDescription('The input mode of clock source.')
hwClockBitsCfgOutputMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("clk2MBits", 0), ("clk2MHz", 1), ("dclsTime", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgOutputMode.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgOutputMode.setDescription('The output mode of clock source.')
hwClockBitsCfgInvalidCond = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("no", 1), ("ais", 2), ("lof", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgInvalidCond.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgInvalidCond.setDescription('The invalid condition of clock source.')
hwClockBitsCfgSourceId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockBitsCfgSourceId.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgSourceId.setDescription('The clock source ID.')
hwClockBitsCfgTodSignal = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("nmea", 1), ("ubx", 2), ("none", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgTodSignal.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgTodSignal.setDescription('The tod signal of clock source.')
hwClockBitsCfgFrameFormat = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 0), ("pcm30nocrc", 1), ("pcm30crc", 2), ("pcm31nocrc", 3), ("pcm31crc", 4))).clone(4)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgFrameFormat.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgFrameFormat.setDescription('Encoding type and frame check format of the extern clock port.')
hwClockPortCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5), )
if mibBuilder.loadTexts: hwClockPortCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgTable.setDescription('The clock port config table.')
hwClockPortCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockPortCfgIfIndex"))
if mibBuilder.loadTexts: hwClockPortCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgEntry.setDescription('The entry of clock port config table.')
hwClockPortCfgIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwClockPortCfgIfIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgIfIndex.setDescription('The interface index.')
hwClockPortCfgLeftFramePri = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockPortCfgLeftFramePri.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgLeftFramePri.setDescription('The clock priority of left frame.')
hwClockPortCfgRightFramePri = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockPortCfgRightFramePri.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgRightFramePri.setDescription('The clock priority of right frame.')
hwClockPortCfgForceOutS1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockPortCfgForceOutS1.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgForceOutS1.setDescription('The S1 byte of forcing out.')
hwClockLineClkCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6), )
if mibBuilder.loadTexts: hwClockLineClkCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgTable.setDescription('The line clock config table.')
hwClockLineClkCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockLineClkCfgChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockLineClkCfgSlotIndex"))
if mibBuilder.loadTexts: hwClockLineClkCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgEntry.setDescription('The entry of line clock config table.')
hwClockLineClkCfgChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockLineClkCfgChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgChassisIndex.setDescription('The chassis index.')
hwClockLineClkCfgSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 200)))
if mibBuilder.loadTexts: hwClockLineClkCfgSlotIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgSlotIndex.setDescription('The slot index of the line clock.')
hwClockLineClkCfgCardId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 3), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockLineClkCfgCardId.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgCardId.setDescription('The card index witch is seleced to provide line clock.')
hwClockLineClkCfgPortId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 4), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockLineClkCfgPortId.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgPortId.setDescription('The port index witch is seleced to provide line clock.')
hwClockLineClkCfgRecvS1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockLineClkCfgRecvS1.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgRecvS1.setDescription('The S1 byte value received.')
hwClockLineClkCfgSendS1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 6), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockLineClkCfgSendS1.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgSendS1.setDescription('The S1 byte value sent.')
hwClockLineCfgSoureId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 7), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockLineCfgSoureId.setStatus('current')
if mibBuilder.loadTexts: hwClockLineCfgSoureId.setDescription('Description.')
hwClockTrapOid = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7))
hwClockLastSourceName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 1), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockLastSourceName.setStatus('current')
if mibBuilder.loadTexts: hwClockLastSourceName.setDescription('The last clock source name.')
hwClockCurSourceName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 2), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCurSourceName.setStatus('current')
if mibBuilder.loadTexts: hwClockCurSourceName.setDescription('The current clock source name.')
hwClockSourceOldLockMode = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 16, 19))).clone(namedValues=NamedValues(("freeRun", 0), ("fastLock", 1), ("lock", 2), ("hold", 3), ("freeRunJudge", 16), ("holdJudge", 19)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockSourceOldLockMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceOldLockMode.setDescription('The old lock mode of clock source.')
hwClockChassisId = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 4), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockChassisId.setStatus('current')
if mibBuilder.loadTexts: hwClockChassisId.setDescription('The chassis ID.')
hwClockOldSourceState = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("initial", 0), ("normal", 1), ("abnormal", 2), ("wtr", 3), ("holdoff", 4)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockOldSourceState.setStatus('current')
if mibBuilder.loadTexts: hwClockOldSourceState.setDescription('The old state of clock source.')
hwClockPllId = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("system", 1), ("sync2M1", 2), ("sync2M2", 3)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockPllId.setStatus('current')
if mibBuilder.loadTexts: hwClockPllId.setDescription('The id of pll.')
hwClockAttributeOutValue = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockAttributeOutValue.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeOutValue.setDescription('The current output value.')
hwClockCesAcrSlot = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 8), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrSlot.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrSlot.setDescription('The slot ID of CES ACR clock source.')
hwClockCesAcrCard = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 9), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrCard.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCard.setDescription('The card ID of CES ACR clock source.')
hwClockCesAcrDomain = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 10), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomain.setDescription('The recovery domain value of CES ACR clock source.')
hwClockCesAcrOldMasterPwName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 11), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrOldMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrOldMasterPwName.setDescription('The master pw SerialPort name of CES ACR old clock source.')
hwClockCesAcrNewMasterPwName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 12), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrNewMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrNewMasterPwName.setDescription('The master pw SerialPort name of CES ACR new clock source.')
hwClockCesAcrLockState = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 13), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrLockState.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrLockState.setDescription('The lock state of the CES ACR.')
hwClockCesDcrSlot = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 14), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrSlot.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrSlot.setDescription('The slot ID of CES DCR clock source.')
hwClockCesDcrCard = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 15), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrCard.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrCard.setDescription('The card ID of CES DCR clock source.')
hwClockCesDcrDomain = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 16), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrDomain.setDescription('The recovery domain value of CES DCR clock source.')
hwClockCesDcrOldMasterPwName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 17), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrOldMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrOldMasterPwName.setDescription('The master pw SerialPort name of CES DCR old clock source.')
hwClockCesDcrNewMasterPwName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 18), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrNewMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrNewMasterPwName.setDescription('The master pw SerialPort name of CES DCR new clock source.')
hwClockCesDcrLockState = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 19), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrLockState.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrLockState.setDescription('The lock state of the CES DCR.')
hwClockNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8))
hwClockSourceSwitch = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 1)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockLastSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockSrcSelMode"))
if mibBuilder.loadTexts: hwClockSourceSwitch.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSwitch.setDescription('Clock source switch notification.')
hwClockSourceSysClkLockModeChange = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 2)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOldLockMode"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeSysClkLockMode"))
if mibBuilder.loadTexts: hwClockSourceSysClkLockModeChange.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSysClkLockModeChange.setDescription('The lock mode of system clock source change notification.')
hwClockSourceStateChange = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 3)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockOldSourceState"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceState"))
if mibBuilder.loadTexts: hwClockSourceStateChange.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceStateChange.setDescription('The state of clock source change notification.')
hwClockSourceStateResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 4)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockOldSourceState"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceState"))
if mibBuilder.loadTexts: hwClockSourceStateResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceStateResume.setDescription('The state of clock source resume notification.')
hwClockSourceFreqCheck = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 5)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceDescr"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgFreqCheckResult"))
if mibBuilder.loadTexts: hwClockSourceFreqCheck.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheck.setDescription('The result of clock source frequnce check abnormal notification.')
hwClockSourceOutputBelowThreshold = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 6)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutValue"))
if mibBuilder.loadTexts: hwClockSourceOutputBelowThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceOutputBelowThreshold.setDescription('The SSM of output below threshold notification.')
hwClockNotInLockedMode = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 7)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeSysClkLockMode"))
if mibBuilder.loadTexts: hwClockNotInLockedMode.setStatus('current')
if mibBuilder.loadTexts: hwClockNotInLockedMode.setDescription('The work mode of system clock is not in locked mode.')
hwClockInLockedMode = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 8)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeSysClkLockMode"))
if mibBuilder.loadTexts: hwClockInLockedMode.setStatus('current')
if mibBuilder.loadTexts: hwClockInLockedMode.setDescription('The work mode of system clock is in locked mode.')
hwClockSourceFailed = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 11)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceState"))
if mibBuilder.loadTexts: hwClockSourceFailed.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFailed.setDescription('The state of clock source is failed.')
hwClockSourceValid = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 12)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceState"))
if mibBuilder.loadTexts: hwClockSourceValid.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceValid.setDescription('The state of clock source is valid.')
hwClockSourceFreqCheckResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 13)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceDescr"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgFreqCheckResult"))
if mibBuilder.loadTexts: hwClockSourceFreqCheckResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheckResume.setDescription('The result of clock source frequnce check normal notification.')
hwClockSourceOutputBelowThresholdResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 14)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutValue"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"))
if mibBuilder.loadTexts: hwClockSourceOutputBelowThresholdResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceOutputBelowThresholdResume.setDescription('The SSM of output above threshold notification.')
hwClockCesAcrMasterPwChange = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 15)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesAcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrOldMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrNewMasterPwName"))
if mibBuilder.loadTexts: hwClockCesAcrMasterPwChange.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrMasterPwChange.setDescription('CES ACR master PW status change.')
hwClockCesAcrLockFail = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 16)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesAcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockState"))
if mibBuilder.loadTexts: hwClockCesAcrLockFail.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrLockFail.setDescription('CES ACR clock source lock fail.')
hwClockCesAcrLockFailResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 17)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesAcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockState"))
if mibBuilder.loadTexts: hwClockCesAcrLockFailResume.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrLockFailResume.setDescription('CES ACR clock source lock fail resume.')
hwClockClusterTopoFail = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 22)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockClusterSyncType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoLinkType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoStatus"))
if mibBuilder.loadTexts: hwClockClusterTopoFail.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoFail.setDescription('Clock cluster inter-chassis synchronization topo compute failed.')
hwClockClusterTopoFailResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 23)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockClusterSyncType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoLinkType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoStatus"))
if mibBuilder.loadTexts: hwClockClusterTopoFailResume.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoFailResume.setDescription('Clock inter-chassis synchronization topo compute successfully.')
hwClockSourceInputBelowThreshold = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 24)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeInputThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceSsm"))
if mibBuilder.loadTexts: hwClockSourceInputBelowThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceInputBelowThreshold.setDescription('The SSM of input below threshold notification.')
hwClockSourceInputBelowThresholdResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 25)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeInputThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceSsm"))
if mibBuilder.loadTexts: hwClockSourceInputBelowThresholdResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceInputBelowThresholdResume.setDescription('The SSM of input above or equal threshold notification.')
hwClockSsmPktLos = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 26)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"))
if mibBuilder.loadTexts: hwClockSsmPktLos.setStatus('current')
if mibBuilder.loadTexts: hwClockSsmPktLos.setDescription('The ssm packet of clock source is lost.')
hwClockSsmPktLosResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 27)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"))
if mibBuilder.loadTexts: hwClockSsmPktLosResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSsmPktLosResume.setDescription('The ssm packet of clock source is normal.')
hwClockCesDcrMasterPwChange = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 28)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesDcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrOldMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrNewMasterPwName"))
if mibBuilder.loadTexts: hwClockCesDcrMasterPwChange.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrMasterPwChange.setDescription('CES DCR master PW status change.')
hwClockCesDcrLockFail = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 29)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesDcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockState"))
if mibBuilder.loadTexts: hwClockCesDcrLockFail.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrLockFail.setDescription('CES DCR clock source lock fail.')
hwClockCesDcrLockFailResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 30)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesDcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockState"))
if mibBuilder.loadTexts: hwClockCesDcrLockFailResume.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrLockFailResume.setDescription('CES DCR clock source lock fail resume.')
hwClockAttributeTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9), )
if mibBuilder.loadTexts: hwClockAttributeTable.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeTable.setDescription('The clock Attribute table.')
hwClockAttributeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockAttributeChassisIndex"))
if mibBuilder.loadTexts: hwClockAttributeEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeEntry.setDescription('The entry of clock Attribute table.')
hwClockAttributeChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockAttributeChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeChassisIndex.setDescription('The chassis index.')
hwClockAttributeSysClkRunMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("normal", 0), ("freeRun", 1), ("hold", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeSysClkRunMode.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeSysClkRunMode.setDescription('The run mode of system clock.')
hwClockAttributeSsmControl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("on", 0), ("off", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeSsmControl.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeSsmControl.setDescription('The flag whether SSM is concerned with the clock source selection.')
hwClockAttributeFreqCheckEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 4), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeFreqCheckEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeFreqCheckEnable.setDescription('The enable flag of frequency check.')
hwClockAttributeRetrieveMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("retrieve", 0), ("noRetrieve", 1))).clone('retrieve')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeRetrieveMode.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeRetrieveMode.setDescription('The retrieve mode of system clock.')
hwClockAttributeWtrTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 12)).clone(5)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeWtrTime.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeWtrTime.setDescription('The time waiting for retrieve.')
hwClockAttributeHoldOffTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(300, 1800)).clone(1000)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeHoldOffTime.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeHoldOffTime.setDescription('The holdoff-time when the system source is lost.')
hwClockAttributeOutThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeOutThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeOutThreshold.setDescription('The Threshold of out put.')
hwClockAttributeSysMaxOutSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeSysMaxOutSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeSysMaxOutSsm.setDescription('The max ssm of system out put.')
hwClockAttribute2M1MaxOutSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttribute2M1MaxOutSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockAttribute2M1MaxOutSsm.setDescription('The max ssm of 2msync-1 out put.')
hwClockAttribute2M2MaxOutSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttribute2M2MaxOutSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockAttribute2M2MaxOutSsm.setDescription('The max ssm of 2msync-2 out put.')
hwClockAttributeSysClkLockMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 16, 19))).clone(namedValues=NamedValues(("freeRun", 0), ("fastLock", 1), ("lock", 2), ("hold", 3), ("freeRunJudge", 16), ("holdJudge", 19)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockAttributeSysClkLockMode.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeSysClkLockMode.setDescription('The Lock mode of system clock.')
hwClockAttributeExtendSsmControl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("on", 0), ("off", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeExtendSsmControl.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeExtendSsmControl.setDescription('The flag whether Extend SSM is concerned with the clock source selection.')
hwClockAttributeInternalClockId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeInternalClockId.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeInternalClockId.setDescription('The internal clockid of the device.')
hwClockAttributeTodProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("nmea", 1), ("ubx", 2), ("none", 3), ("ccsa", 4))).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeTodProtocol.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeTodProtocol.setDescription('1pps bits tod protocol.')
hwClockAttributeLtiSquelch = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 16), EnabledStatus().clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeLtiSquelch.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeLtiSquelch.setDescription('The frequency signal output squelch flag upon the frequency loss.')
hwClockAttributeInputThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15))).clone(15)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeInputThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeInputThreshold.setDescription('The squelch threshold of the external input source.')
hwClockSrcSelTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10), )
if mibBuilder.loadTexts: hwClockSrcSelTable.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelTable.setDescription('The system clock source selection table.')
hwClockSrcSelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockSrcSelChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockSrcSelType"))
if mibBuilder.loadTexts: hwClockSrcSelEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelEntry.setDescription('The entry of system clock source selection table.')
hwClockSrcSelChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockSrcSelChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelChassisIndex.setDescription('The chassis index.')
hwClockSrcSelType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("system", 1), ("sync2M1", 2), ("sync2M2", 3))))
if mibBuilder.loadTexts: hwClockSrcSelType.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelType.setDescription('The PLL Id.')
hwClockSrcSelMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("auto", 0), ("manual", 1), ("force", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSrcSelMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelMode.setDescription('The mode of clock source selection.')
hwClockSrcSelSrcName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 4), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSrcSelSrcName.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelSrcName.setDescription('The name of clock source for selection.')
hwClockSrcTraceSrcName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 5), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcTraceSrcName.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcTraceSrcName.setDescription('The name of trace source.')
hwClockSrcCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11), )
if mibBuilder.loadTexts: hwClockSrcCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgTable.setDescription('The clock source config table.')
hwClockSrcCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockSrcCfgChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceTypeIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceIndex"))
if mibBuilder.loadTexts: hwClockSrcCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgEntry.setDescription('The entry of clock source config table.')
hwClockSrcCfgChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockSrcCfgChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgChassisIndex.setDescription('The chassis index.')
hwClockSrcCfgSourceTypeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("bits", 1), ("ptp", 2), ("interface", 3)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockSrcCfgSourceTypeIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceTypeIndex.setDescription('The type of clock source.')
hwClockSrcCfgSourceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 3), Integer32())
if mibBuilder.loadTexts: hwClockSrcCfgSourceIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceIndex.setDescription('The index of clock source.')
hwClockSrcCfgSourceDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgSourceDescr.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceDescr.setDescription('The description of clock source.')
hwClockSrcCfgClkEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 5), EnabledStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgClkEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgClkEnable.setDescription('The enable flag of clock source.')
hwClockSrcCfgSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgSystemPriority.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSystemPriority.setDescription('The priority of system clock source.')
hwClockSrcCfg2M1Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfg2M1Priority.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfg2M1Priority.setDescription('The priority of 2msync-1 clock source.')
hwClockSrcCfg2M2Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfg2M2Priority.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfg2M2Priority.setDescription('The priority of 2msync-2 clock source.')
hwClockSrcCfgSourceSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15, 16))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15), ("unknown", 16)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgSourceSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceSsm.setDescription('The SSM quality of clock source.')
hwClockSrcCfgSsmSetMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("manual", 1), ("auto", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgSsmSetMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSsmSetMode.setDescription('The set mode of SSM.')
hwClockSrcCfgSourceState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("initial", 0), ("normal", 1), ("abnormal", 2), ("waitwtr", 3), ("holdoff", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgSourceState.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceState.setDescription('The state of clock source.')
hwClockSrcCfgFreqCheckResult = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("abnormal", 0), ("normal", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgFreqCheckResult.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgFreqCheckResult.setDescription('The result of frequency check.')
hwClockSrcCfgSsmInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(512, 8000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgSsmInterval.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSsmInterval.setDescription('Description.')
hwClockSrcCfgSsmTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2000, 32000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgSsmTimeout.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSsmTimeout.setDescription('Description.')
hwClockSrcCfgSabit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8, 99))).clone(namedValues=NamedValues(("sa4", 4), ("sa5", 5), ("sa6", 6), ("sa7", 7), ("sa8", 8), ("invalid", 99))).clone(4)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSrcCfgSabit.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSabit.setDescription('The SA bit of E1 Port SSM information.')
hwClockSrcCfgClockId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSrcCfgClockId.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgClockId.setDescription('The clockid of clock source.')
hwClockSrcCfgClockIdSetMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("manual", 1), ("auto", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgClockIdSetMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgClockIdSetMode.setDescription('The set mode of clockid.')
hwClockSrcCfgOutSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15, 16, 99))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15), ("unknown", 16), ("invalid", 99)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgOutSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgOutSsm.setDescription('Current output ssm.')
hwClockSrcCfgOutClockId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 99))).clone(namedValues=NamedValues(("clockid0", 0), ("clockid1", 1), ("clockid2", 2), ("clockid3", 3), ("clockid4", 4), ("clockid5", 5), ("clockid6", 6), ("clockid7", 7), ("clockid8", 8), ("clockid9", 9), ("clockid10", 10), ("clockid11", 11), ("clockid12", 12), ("clockid13", 13), ("clockid14", 14), ("clockid15", 15), ("notsupport", 99)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgOutClockId.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgOutClockId.setDescription('Current output clockid.')
hwClockSrcCfgRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 20), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgRowStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgRowStatus.setDescription('The row status.')
hwClockSrcCfgFreqDeviation = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 21), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgFreqDeviation.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgFreqDeviation.setDescription('Freqdeviation value of clock source.')
hwClockSrcCfgPhyState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("cardTypeNotSupport", 0), ("slave", 1), ("master", 2), ("speedNotSupport", 3), ("portDown", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgPhyState.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgPhyState.setDescription('The PHY clock state of ports.')
hwClockSrcCfgNegotiationSlave = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("notSupport", 0), ("enable", 1), ("disable", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgNegotiationSlave.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgNegotiationSlave.setDescription('Set PHY clock state to slave.')
hwClockCesAcrPortCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12), )
if mibBuilder.loadTexts: hwClockCesAcrPortCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPortCfgTable.setDescription('The CES ACR clock port config table.')
hwClockCesAcrPortCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrParentIfIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrChannelId"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrIfIndex"))
if mibBuilder.loadTexts: hwClockCesAcrPortCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPortCfgEntry.setDescription('The entry of CES ACR clock port config table.')
hwClockCesAcrParentIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwClockCesAcrParentIfIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrParentIfIndex.setDescription('Indicates the index of the parent interface.')
hwClockCesAcrChannelId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 2), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrChannelId.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrChannelId.setDescription('Indicates the channel ID.')
hwClockCesAcrIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 3), InterfaceIndex())
if mibBuilder.loadTexts: hwClockCesAcrIfIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrIfIndex.setDescription('Indicates the interface index.')
hwClockCesAcrPortName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrPortName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPortName.setDescription('Port name.')
hwClockCesAcrChannelType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("t1", 1), ("e1", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrChannelType.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrChannelType.setDescription('Indicates the interface type. The type can be E1/CE1 or T1/CT1.')
hwClockCesAcrSourceMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("master", 1), ("slave", 2), ("recoveryDomain", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrSourceMode.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrSourceMode.setDescription('Indicates the clock mode of the interface. master: indicates that the clock works in master mode and uses the internal clock signal. slave: indicates that the clock works in slave mode and uses the line clock signal. recovery-domain: indicates that the clock works in slave mode and uses the recovery domain clock signal. ')
hwClockCesAcrRecoveryDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrRecoveryDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrRecoveryDomain.setDescription('Indicates the clock recovery domain of the interface. DEFVAL is 0.')
hwClockCesAcrPwDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 8))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrPwDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPwDomain.setDescription('Indicates the clock PW domain of the interface. DEFVAL is 0.')
hwClockCesAcrPortCfgRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrPortCfgRowStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPortCfgRowStatus.setDescription('The row status.')
hwClockCesAcrMasterDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrMasterDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrMasterDomain.setDescription('Indicates the clock master domain of the interface. DEFVAL is 0.')
hwClockCesMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("acr", 1), ("dcr", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesMode.setStatus('current')
if mibBuilder.loadTexts: hwClockCesMode.setDescription('Indicates the clock CES recovery mode of the interface. DEFVAL is 0.')
hwClockCesAcrCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13), )
if mibBuilder.loadTexts: hwClockCesAcrCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgTable.setDescription('The CES ACR clock source config table.')
hwClockCesAcrCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrCfgSlot"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrCfgCard"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrCfgDomain"))
if mibBuilder.loadTexts: hwClockCesAcrCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgEntry.setDescription('The entry of CES ACR clock source config table.')
hwClockCesAcrCfgSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 1), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrCfgSlot.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSlot.setDescription('The slot ID of CES ACR clock source.')
hwClockCesAcrCfgCard = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 2), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrCfgCard.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgCard.setDescription('The card ID of CES ACR clock source.')
hwClockCesAcrCfgDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8)))
if mibBuilder.loadTexts: hwClockCesAcrCfgDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgDomain.setDescription('The recovery domain value of CES ACR clock source.')
hwClockCesAcrCfgDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrCfgDescr.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgDescr.setDescription('The description of clock source.')
hwClockCesAcrCfgSyncEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 5), EnabledStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgSyncEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSyncEnable.setDescription('The enable flag of CES ACR clock source.')
hwClockCesAcrCfgSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgSystemPriority.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSystemPriority.setDescription('The priority of system CES ACR clock source. DEFVAL is 0.')
hwClockCesAcrCfgSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15, 16))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15), ("unknown", 16)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSsm.setDescription('The SSM quality of CES ACR clock source.')
hwClockCesAcrCfgClockId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgClockId.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgClockId.setDescription('The clockid of clock source. DEFVAL is 0.')
hwClockCesAcrCfgSourceState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("initial", 0), ("normal", 1), ("abnormal", 2), ("waitwtr", 3), ("holdoff", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrCfgSourceState.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSourceState.setDescription('The state of CES ACR clock source.')
hwClockCesAcrCfgFreqCheckResult = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("abnormal", 0), ("normal", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrCfgFreqCheckResult.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgFreqCheckResult.setDescription('The result of CES ACR clock source frequency check.')
hwClockCesAcrCfgRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 11), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgRowStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgRowStatus.setDescription('The row status.')
hwClockCesAcrDomainInfoTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14), )
if mibBuilder.loadTexts: hwClockCesAcrDomainInfoTable.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomainInfoTable.setDescription('The CES ACR domain infomation table.')
hwClockCesAcrDomainInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrDomianInfoSlot"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrDomianInfoCard"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrDomianInfoDomain"))
if mibBuilder.loadTexts: hwClockCesAcrDomainInfoEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomainInfoEntry.setDescription('The entry of CES ACR domain infomation table.')
hwClockCesAcrDomianInfoSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 1), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoSlot.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoSlot.setDescription('The slot ID of CES ACR clock source.')
hwClockCesAcrDomianInfoCard = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 2), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoCard.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoCard.setDescription('The card ID of CES ACR clock source.')
hwClockCesAcrDomianInfoDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)))
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoDomain.setDescription('The recovery domain value of CES ACR clock source.')
hwClockCesAcrDomianInfoMasterPwName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoMasterPwName.setDescription('Port name.')
hwClockCesAcrDomianInfoChannelId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoChannelId.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoChannelId.setDescription('Indicates the channel ID.')
hwClockCesAcrDomianInfoState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("wait", 2), ("lock", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoState.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoState.setDescription('The state of CES ACR clock source.')
hwClockClusterTopoTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15), )
if mibBuilder.loadTexts: hwClockClusterTopoTable.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoTable.setDescription('The CES ACR domain infomation table.')
hwClockClusterTopoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockClusterSyncType"), (0, "HUAWEI-CLOCK-MIB", "hwClockClusterTopoType"), (0, "HUAWEI-CLOCK-MIB", "hwClockClusterTopoLinkType"))
if mibBuilder.loadTexts: hwClockClusterTopoEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoEntry.setDescription('Description.')
hwClockClusterSyncType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("frequency", 1), ("time", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockClusterSyncType.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterSyncType.setDescription('The type of clock inter-chassis sync.')
hwClockClusterTopoType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("interlink", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockClusterTopoType.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoType.setDescription('The type of clock inter-chassis topo..')
hwClockClusterTopoLinkType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("bits", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockClusterTopoLinkType.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoLinkType.setDescription('The type of clock inter-chassis link.')
hwClockClusterTopoStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("fail", 1), ("success", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockClusterTopoStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoStatus.setDescription('The status of clock inter-chassis topo.')
hwClockConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10))
hwClockSourceCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 1))
hwClockSourceCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 1, 1)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockManageSysGroup"), ("HUAWEI-CLOCK-MIB", "hwClockSourceCfgGroup"), ("HUAWEI-CLOCK-MIB", "hwClockPortCfgGroup"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgGroup"), ("HUAWEI-CLOCK-MIB", "hwClockNotificationsGroup"), ("HUAWEI-CLOCK-MIB", "hwClockSysSelGroup"), ("HUAWEI-CLOCK-MIB", "hwClockTrapOidGroup"), ("HUAWEI-CLOCK-MIB", "hwClockLineCfgGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockSourceCompliance = hwClockSourceCompliance.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceCompliance.setDescription('The compliance of clock MIB.')
hwClockSourceGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2))
hwClockManageSysGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 8)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockSourceSysClkWorkMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheckEnable"), ("HUAWEI-CLOCK-MIB", "hwClockSourceHoldMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceSsmControl"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheckRightRange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheckLeftRange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceRetrieveMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceForceCloseEnableStatus"), ("HUAWEI-CLOCK-MIB", "hwClockSourceSsmUnknown"), ("HUAWEI-CLOCK-MIB", "hwClockExtTimeOutputType"), ("HUAWEI-CLOCK-MIB", "hwClockExtTimeInputType"), ("HUAWEI-CLOCK-MIB", "hwClockTimeUsedSource"), ("HUAWEI-CLOCK-MIB", "hwClockSourceEthClkEnable"), ("HUAWEI-CLOCK-MIB", "hwClockAlarmThresholdFrequencyOffset"), ("HUAWEI-CLOCK-MIB", "hwClockFrequencyOffsetMax"), ("HUAWEI-CLOCK-MIB", "hwClockFrequencyOffsetMin"), ("HUAWEI-CLOCK-MIB", "hwClockFrequencyOffsetMean"), ("HUAWEI-CLOCK-MIB", "hwClockFrequencyOffset"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockManageSysGroup = hwClockManageSysGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockManageSysGroup.setDescription('The manage group.')
hwClockSysSelGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 9)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockSourceSelMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceSelSourceId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockLastSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOldLockMode"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrOldMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrNewMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutValue"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockState"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrOldMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrNewMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockSysSelGroup = hwClockSysSelGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockSysSelGroup.setDescription('The system selection group.')
hwClockSourceCfgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 10)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCfgSourceId"), ("HUAWEI-CLOCK-MIB", "hwClockCfgPriRvtEnableStatus"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSwitchCondition"), ("HUAWEI-CLOCK-MIB", "hwClockCfgWtrTime"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBadDetect"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceSsm"), ("HUAWEI-CLOCK-MIB", "hwClockCfgExportEnableStatus"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSwiEnableStatus"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceState"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceDescr"), ("HUAWEI-CLOCK-MIB", "hwClockCfgFreqCheckResult"), ("HUAWEI-CLOCK-MIB", "hwClockCfgHoldOffTime"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBits0Priority"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBits1Priority"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSystemPriority"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceSsmSetMode"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceS1Id"), ("HUAWEI-CLOCK-MIB", "hwClockCfgClkSourceType"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSsmThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSystemLockOut"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBits0LockOut"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBits1LockOut"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgTodSignal"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockSourceCfgGroup = hwClockSourceCfgGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceCfgGroup.setDescription('The clock source group.')
hwClockPortCfgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 13)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockPortCfgLeftFramePri"), ("HUAWEI-CLOCK-MIB", "hwClockPortCfgRightFramePri"), ("HUAWEI-CLOCK-MIB", "hwClockPortCfgForceOutS1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockPortCfgGroup = hwClockPortCfgGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgGroup.setDescription('The port config of clock source group.')
hwClockBitsCfgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 14)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockBitsCfgRecvSaBit"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgSendSaBit"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgForceOutS1"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgName"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgBitsType"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgDirection"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgSaBit"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgInputMode"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgOutputMode"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgSourceId"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgInvalidCond"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgBitsPortType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockBitsCfgGroup = hwClockBitsCfgGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgGroup.setDescription('The BITS clock source group.')
hwClockTrapOidGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 15)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockLastSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOldLockMode"), ("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockOldSourceState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockTrapOidGroup = hwClockTrapOidGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockTrapOidGroup.setDescription('The clock trap group.')
hwClockNotificationsGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 16)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockSourceSwitch"), ("HUAWEI-CLOCK-MIB", "hwClockSourceStateChange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceStateResume"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheck"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheckResume"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOutputBelowThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOutputBelowThresholdResume"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockFail"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockFailResume"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrMasterPwChange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceValid"), ("HUAWEI-CLOCK-MIB", "hwClockInLockedMode"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoFailResume"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoFail"), ("HUAWEI-CLOCK-MIB", "hwClockNotInLockedMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceSysClkLockModeChange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFailed"), ("HUAWEI-CLOCK-MIB", "hwClockSourceInputBelowThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockSourceInputBelowThresholdResume"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrMasterPwChange"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockFail"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockFailResume"), ("HUAWEI-CLOCK-MIB", "hwClockSsmPktLos"), ("HUAWEI-CLOCK-MIB", "hwClockSsmPktLosResume"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockNotificationsGroup = hwClockNotificationsGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockNotificationsGroup.setDescription('This is the group of clock notification.')
hwClockLineCfgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 17)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockLineClkCfgRecvS1"), ("HUAWEI-CLOCK-MIB", "hwClockLineClkCfgSendS1"), ("HUAWEI-CLOCK-MIB", "hwClockLineClkCfgCardId"), ("HUAWEI-CLOCK-MIB", "hwClockLineClkCfgPortId"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockLineCfgGroup = hwClockLineCfgGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockLineCfgGroup.setDescription('The line clock group..')
mibBuilder.exportSymbols("HUAWEI-CLOCK-MIB", PYSNMP_MODULE_ID=hwClockMIB, hwClockCfgSourceS1Id=hwClockCfgSourceS1Id, hwClockBitsCfgFrameFormat=hwClockBitsCfgFrameFormat, hwClockCfgSourceId=hwClockCfgSourceId, hwClockAttributeSsmControl=hwClockAttributeSsmControl, hwClockCesAcrDomianInfoDomain=hwClockCesAcrDomianInfoDomain, hwClockSrcCfgNegotiationSlave=hwClockSrcCfgNegotiationSlave, hwClockCurSourceName=hwClockCurSourceName, hwClockSourceInputBelowThresholdResume=hwClockSourceInputBelowThresholdResume, hwClockSrcCfgFreqCheckResult=hwClockSrcCfgFreqCheckResult, hwClockBitsCfgTodSignal=hwClockBitsCfgTodSignal, hwClockSrcCfgSabit=hwClockSrcCfgSabit, hwClockSrcSelSrcName=hwClockSrcSelSrcName, hwClockCesAcrCfgCard=hwClockCesAcrCfgCard, hwClockCesAcrCfgSystemPriority=hwClockCesAcrCfgSystemPriority, hwClockAttributeSysClkRunMode=hwClockAttributeSysClkRunMode, hwClockCesAcrParentIfIndex=hwClockCesAcrParentIfIndex, hwClockCesAcrPortCfgTable=hwClockCesAcrPortCfgTable, hwClockSourceEthClkEnable=hwClockSourceEthClkEnable, hwClockLineClkCfgSlotIndex=hwClockLineClkCfgSlotIndex, hwClockInLockedMode=hwClockInLockedMode, hwClockCesAcrMasterDomain=hwClockCesAcrMasterDomain, hwClockCesAcrCfgSyncEnable=hwClockCesAcrCfgSyncEnable, hwClockPortCfgLeftFramePri=hwClockPortCfgLeftFramePri, hwClockCfgBadDetect=hwClockCfgBadDetect, hwClockSrcCfgSourceTypeIndex=hwClockSrcCfgSourceTypeIndex, hwClockSrcCfgSystemPriority=hwClockSrcCfgSystemPriority, hwClockCesAcrCfgRowStatus=hwClockCesAcrCfgRowStatus, hwClockCfgSourceIndex=hwClockCfgSourceIndex, hwClockSrcCfgClockId=hwClockSrcCfgClockId, hwClockSourceSwitch=hwClockSourceSwitch, hwClockLineClkCfgTable=hwClockLineClkCfgTable, hwClockSrcCfg2M2Priority=hwClockSrcCfg2M2Priority, hwClockSourceValid=hwClockSourceValid, hwClockCesMode=hwClockCesMode, hwClockCfgClkSourceType=hwClockCfgClkSourceType, hwClockBitsCfgDirection=hwClockBitsCfgDirection, hwClockBitsCfgInvalidCond=hwClockBitsCfgInvalidCond, hwClockCfgSwitchCondition=hwClockCfgSwitchCondition, hwClockCesAcrCfgDescr=hwClockCesAcrCfgDescr, hwClockAttribute2M1MaxOutSsm=hwClockAttribute2M1MaxOutSsm, hwClockCesAcrDomianInfoMasterPwName=hwClockCesAcrDomianInfoMasterPwName, hwClockAlarmThresholdFrequencyOffset=hwClockAlarmThresholdFrequencyOffset, hwClockCesAcrCfgSlot=hwClockCesAcrCfgSlot, hwClockChassisId=hwClockChassisId, hwClockGlobalObjects=hwClockGlobalObjects, hwClockBitsCfgSendSaBit=hwClockBitsCfgSendSaBit, hwClockSourceFreqCheckLeftRange=hwClockSourceFreqCheckLeftRange, hwClockSrcCfgFreqDeviation=hwClockSrcCfgFreqDeviation, hwClockSourceCompliances=hwClockSourceCompliances, hwClockClusterTopoType=hwClockClusterTopoType, hwClockSrcCfgSourceSsm=hwClockSrcCfgSourceSsm, hwClockCesAcrDomianInfoSlot=hwClockCesAcrDomianInfoSlot, hwClockSourceCfgGroup=hwClockSourceCfgGroup, hwClockCesDcrOldMasterPwName=hwClockCesDcrOldMasterPwName, hwClockOldSourceState=hwClockOldSourceState, hwClockSourceCompliance=hwClockSourceCompliance, hwClockMIB=hwClockMIB, hwClockLineClkCfgRecvS1=hwClockLineClkCfgRecvS1, hwClockPortCfgIfIndex=hwClockPortCfgIfIndex, hwClockCfgSourceDescr=hwClockCfgSourceDescr, hwClockExtTimeInputType=hwClockExtTimeInputType, hwClockCfgSwiEnableStatus=hwClockCfgSwiEnableStatus, hwClockLineCfgGroup=hwClockLineCfgGroup, hwClockManageObjects=hwClockManageObjects, hwClockBitsCfgSaBit=hwClockBitsCfgSaBit, hwClockSourceFreqCheckRightRange=hwClockSourceFreqCheckRightRange, hwClockSrcSelMode=hwClockSrcSelMode, hwClockClusterTopoTable=hwClockClusterTopoTable, hwClockFrequencyOffset=hwClockFrequencyOffset, hwClockManageSysGroup=hwClockManageSysGroup, hwClockSourceFreqCheckEnable=hwClockSourceFreqCheckEnable, hwClockAttribute2M2MaxOutSsm=hwClockAttribute2M2MaxOutSsm, hwClockCesAcrCfgFreqCheckResult=hwClockCesAcrCfgFreqCheckResult, hwClockCesAcrDomainInfoTable=hwClockCesAcrDomainInfoTable, hwClockCesAcrDomianInfoChannelId=hwClockCesAcrDomianInfoChannelId, hwClockSrcCfgClockIdSetMode=hwClockSrcCfgClockIdSetMode, hwClockSourceSelType=hwClockSourceSelType, hwClockCfgBits0Priority=hwClockCfgBits0Priority, hwClockSrcCfgSsmSetMode=hwClockSrcCfgSsmSetMode, hwClockClusterTopoFail=hwClockClusterTopoFail, hwClockPllId=hwClockPllId, hwClockSrcCfg2M1Priority=hwClockSrcCfg2M1Priority, hwClockSourceHoldMode=hwClockSourceHoldMode, hwClockSrcSelTable=hwClockSrcSelTable, hwClockLineClkCfgCardId=hwClockLineClkCfgCardId, hwClockSsmPktLosResume=hwClockSsmPktLosResume, hwClockSourceSelChassisIndex=hwClockSourceSelChassisIndex, hwClockAttributeExtendSsmControl=hwClockAttributeExtendSsmControl, hwClockSourceOldLockMode=hwClockSourceOldLockMode, hwClockPortCfgRightFramePri=hwClockPortCfgRightFramePri, hwClockCesAcrChannelId=hwClockCesAcrChannelId, hwClockCesAcrCfgSsm=hwClockCesAcrCfgSsm, hwClockSourceSelMode=hwClockSourceSelMode, hwClockSrcCfgSourceDescr=hwClockSrcCfgSourceDescr, hwClockTrapOid=hwClockTrapOid, hwClockAttributeEntry=hwClockAttributeEntry, hwClockCesAcrRecoveryDomain=hwClockCesAcrRecoveryDomain, hwClockCesAcrSlot=hwClockCesAcrSlot, hwClockFrequencyOffsetMax=hwClockFrequencyOffsetMax, hwClockSrcCfgRowStatus=hwClockSrcCfgRowStatus, hwClockCfgSourceState=hwClockCfgSourceState, hwClockBitsCfgOutputMode=hwClockBitsCfgOutputMode, hwClockBitsCfgBitsIndex=hwClockBitsCfgBitsIndex, hwClockFrequencyOffsetMin=hwClockFrequencyOffsetMin, hwClockCfgChassisIndex=hwClockCfgChassisIndex, hwClockLastSourceName=hwClockLastSourceName, hwClockCesAcrNewMasterPwName=hwClockCesAcrNewMasterPwName, hwClockAttributeHoldOffTime=hwClockAttributeHoldOffTime, hwClockClusterTopoLinkType=hwClockClusterTopoLinkType, hwClockCesAcrPortName=hwClockCesAcrPortName, hwClockPortCfgForceOutS1=hwClockPortCfgForceOutS1, hwClockSourceInputBelowThreshold=hwClockSourceInputBelowThreshold, hwClockSrcCfgTable=hwClockSrcCfgTable, hwClockCesAcrChannelType=hwClockCesAcrChannelType, hwClockBitsCfgSourceId=hwClockBitsCfgSourceId, hwClockSourceSelSourceId=hwClockSourceSelSourceId, hwClockAttributeLtiSquelch=hwClockAttributeLtiSquelch, hwClockSourceSysClkWorkMode=hwClockSourceSysClkWorkMode, hwClockCesDcrSlot=hwClockCesDcrSlot, hwClockCfgBits1LockOut=hwClockCfgBits1LockOut, hwClockSrcCfgClkEnable=hwClockSrcCfgClkEnable, hwClockConformance=hwClockConformance, hwClockSysSelGroup=hwClockSysSelGroup, hwClockNotifications=hwClockNotifications, hwClockSourceSelEntry=hwClockSourceSelEntry, hwClockCesAcrDomain=hwClockCesAcrDomain, hwClockCesDcrMasterPwChange=hwClockCesDcrMasterPwChange, hwClockCesAcrCard=hwClockCesAcrCard, hwClockSrcCfgPhyState=hwClockSrcCfgPhyState, hwClockSourceCfgTable=hwClockSourceCfgTable, hwClockNotInLockedMode=hwClockNotInLockedMode, hwClockSourceSsmUnknown=hwClockSourceSsmUnknown, hwClockBitsCfgChassisIndex=hwClockBitsCfgChassisIndex, hwClockCesDcrLockFail=hwClockCesDcrLockFail, hwClockCesAcrPortCfgEntry=hwClockCesAcrPortCfgEntry, hwClockPortCfgTable=hwClockPortCfgTable, hwClockSourceSsmControl=hwClockSourceSsmControl, hwClockCesDcrCard=hwClockCesDcrCard, hwClockSrcTraceSrcName=hwClockSrcTraceSrcName, hwClockSrcCfgSourceState=hwClockSrcCfgSourceState, hwClockBitsCfgForceOutS1=hwClockBitsCfgForceOutS1, hwClockCfgSourceSsm=hwClockCfgSourceSsm, hwClockBitsCfgBitsPortType=hwClockBitsCfgBitsPortType, hwClockLineClkCfgPortId=hwClockLineClkCfgPortId, hwClockCesAcrLockFail=hwClockCesAcrLockFail, hwClockSrcSelChassisIndex=hwClockSrcSelChassisIndex, hwClockAttributeWtrTime=hwClockAttributeWtrTime, hwClockAttributeFreqCheckEnable=hwClockAttributeFreqCheckEnable, hwClockCfgPriRvtEnableStatus=hwClockCfgPriRvtEnableStatus, hwClockLineClkCfgSendS1=hwClockLineClkCfgSendS1, hwClockSourceStateResume=hwClockSourceStateResume, hwClockSrcCfgChassisIndex=hwClockSrcCfgChassisIndex, hwClockCesAcrLockFailResume=hwClockCesAcrLockFailResume, hwClockCesAcrDomianInfoState=hwClockCesAcrDomianInfoState, hwClockExtTimeOutputType=hwClockExtTimeOutputType, hwClockSourceOutputBelowThreshold=hwClockSourceOutputBelowThreshold, hwClockCesAcrMasterPwChange=hwClockCesAcrMasterPwChange, hwClockAttributeInputThreshold=hwClockAttributeInputThreshold, hwClockCesAcrCfgSourceState=hwClockCesAcrCfgSourceState, hwClockSrcCfgEntry=hwClockSrcCfgEntry, hwClockCfgHoldOffTime=hwClockCfgHoldOffTime, hwClockSourceCfgEntry=hwClockSourceCfgEntry, hwClockPortCfgEntry=hwClockPortCfgEntry, hwClockAttributeRetrieveMode=hwClockAttributeRetrieveMode, hwClockCfgSsmThreshold=hwClockCfgSsmThreshold, hwClockSourceFreqCheck=hwClockSourceFreqCheck, hwClockSourceFailed=hwClockSourceFailed, hwClockClusterSyncType=hwClockClusterSyncType, hwClockCesAcrDomianInfoCard=hwClockCesAcrDomianInfoCard, hwClockCfgSystemLockOut=hwClockCfgSystemLockOut, hwClockCesAcrLockState=hwClockCesAcrLockState, hwClockCesAcrCfgClockId=hwClockCesAcrCfgClockId, hwClockLineClkCfgEntry=hwClockLineClkCfgEntry, hwClockSrcSelEntry=hwClockSrcSelEntry, hwClockAttributeSysMaxOutSsm=hwClockAttributeSysMaxOutSsm, hwClockCesAcrPortCfgRowStatus=hwClockCesAcrPortCfgRowStatus, hwClockSourceSysClkLockModeChange=hwClockSourceSysClkLockModeChange, hwClockTrapOidGroup=hwClockTrapOidGroup, hwClockSsmPktLos=hwClockSsmPktLos, hwClockAttributeTable=hwClockAttributeTable, hwClockSourceOutputBelowThresholdResume=hwClockSourceOutputBelowThresholdResume, hwClockSrcCfgOutClockId=hwClockSrcCfgOutClockId, hwClockLineClkCfgChassisIndex=hwClockLineClkCfgChassisIndex, hwClockSrcCfgSsmTimeout=hwClockSrcCfgSsmTimeout, hwClockCesAcrCfgDomain=hwClockCesAcrCfgDomain, hwClockBitsCfgGroup=hwClockBitsCfgGroup, hwClockCfgSourceSsmSetMode=hwClockCfgSourceSsmSetMode, hwClockCfgBits1Priority=hwClockCfgBits1Priority, hwClockBitsCfgRecvSaBit=hwClockBitsCfgRecvSaBit, hwClockSourceStateChange=hwClockSourceStateChange, hwClockAttributeOutThreshold=hwClockAttributeOutThreshold, hwClockClusterTopoStatus=hwClockClusterTopoStatus, hwClockLineCfgSoureId=hwClockLineCfgSoureId, hwClockAttributeOutValue=hwClockAttributeOutValue, hwClockAttributeSysClkLockMode=hwClockAttributeSysClkLockMode, hwClockCesAcrOldMasterPwName=hwClockCesAcrOldMasterPwName, hwClockCesDcrLockState=hwClockCesDcrLockState, hwClockCfgSystemPriority=hwClockCfgSystemPriority, hwClockClusterTopoEntry=hwClockClusterTopoEntry, hwClockCesAcrCfgTable=hwClockCesAcrCfgTable, hwClockClusterTopoFailResume=hwClockClusterTopoFailResume, hwClockCfgFreqCheckResult=hwClockCfgFreqCheckResult, hwClockSrcSelType=hwClockSrcSelType, hwClockBitsCfgInputMode=hwClockBitsCfgInputMode, hwClockAttributeInternalClockId=hwClockAttributeInternalClockId, hwClockSrcCfgOutSsm=hwClockSrcCfgOutSsm, hwClockAttributeChassisIndex=hwClockAttributeChassisIndex, hwClockNotificationsGroup=hwClockNotificationsGroup, hwClockSrcCfgSsmInterval=hwClockSrcCfgSsmInterval, hwClockCesAcrIfIndex=hwClockCesAcrIfIndex, hwClockSourceForceCloseEnableStatus=hwClockSourceForceCloseEnableStatus, hwClockSourceFreqCheckResume=hwClockSourceFreqCheckResume, hwClockSourceGroups=hwClockSourceGroups, hwClockCfgBits0LockOut=hwClockCfgBits0LockOut, hwClockCesDcrDomain=hwClockCesDcrDomain, hwClockTimeUsedSource=hwClockTimeUsedSource, hwClockCfgWtrTime=hwClockCfgWtrTime, hwClockCfgExportEnableStatus=hwClockCfgExportEnableStatus, hwClockBitsCfgEntry=hwClockBitsCfgEntry, hwClockCesAcrDomainInfoEntry=hwClockCesAcrDomainInfoEntry, hwClockFrequencyOffsetMean=hwClockFrequencyOffsetMean, hwClockBitsCfgName=hwClockBitsCfgName, hwClockBitsCfgBitsType=hwClockBitsCfgBitsType, hwClockSrcCfgSourceIndex=hwClockSrcCfgSourceIndex, hwClockCesDcrLockFailResume=hwClockCesDcrLockFailResume, hwClockBitsCfgTable=hwClockBitsCfgTable, hwClockAttributeTodProtocol=hwClockAttributeTodProtocol, hwClockCesAcrSourceMode=hwClockCesAcrSourceMode, hwClockSourceRetrieveMode=hwClockSourceRetrieveMode, hwClockCesDcrNewMasterPwName=hwClockCesDcrNewMasterPwName, hwClockCesAcrCfgEntry=hwClockCesAcrCfgEntry, hwClockSourceSelTable=hwClockSourceSelTable, hwClockPortCfgGroup=hwClockPortCfgGroup, hwClockCesAcrPwDomain=hwClockCesAcrPwDomain)
| true | true |
f7306fd1c3e0405e0d613485262300727fcf5a80 | 340 | py | Python | setup.py | ScottHMcKean/gfracture | 63eec9fd4cf4234149b6a6656ec538d5a6ddcb41 | [
"MIT"
] | null | null | null | setup.py | ScottHMcKean/gfracture | 63eec9fd4cf4234149b6a6656ec538d5a6ddcb41 | [
"MIT"
] | null | null | null | setup.py | ScottHMcKean/gfracture | 63eec9fd4cf4234149b6a6656ec538d5a6ddcb41 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='gfracture',
version='0.1',
description='Fracture segmentation and trace analysis',
url='https://github.com/ScottHMcKean/gfracture',
author='Scott McKean',
author_email='scott.mckean@ucalgary.ca',
license='MIT',
packages=['gfracture'],
zip_safe=False) | 30.909091 | 61 | 0.667647 | from setuptools import setup
setup(name='gfracture',
version='0.1',
description='Fracture segmentation and trace analysis',
url='https://github.com/ScottHMcKean/gfracture',
author='Scott McKean',
author_email='scott.mckean@ucalgary.ca',
license='MIT',
packages=['gfracture'],
zip_safe=False) | true | true |
f7307105658f4dd57c433ca83570b1d9ad3a1448 | 79 | py | Python | src/textacy/tokenizers/__init__.py | austinjp/textacy | dddfdbf0e0ab3bf756bc4eda042eab1001aac709 | [
"Apache-2.0"
] | 1,929 | 2016-02-14T08:30:38.000Z | 2022-03-31T03:00:35.000Z | src/textacy/tokenizers/__init__.py | austinjp/textacy | dddfdbf0e0ab3bf756bc4eda042eab1001aac709 | [
"Apache-2.0"
] | 304 | 2016-02-18T15:52:22.000Z | 2022-03-31T18:06:54.000Z | src/textacy/tokenizers/__init__.py | austinjp/textacy | dddfdbf0e0ab3bf756bc4eda042eab1001aac709 | [
"Apache-2.0"
] | 285 | 2016-03-20T04:25:08.000Z | 2022-03-24T11:31:17.000Z | from .char_ngrams import CharNgramsTokenizer
from .terms import TermsTokenizer
| 26.333333 | 44 | 0.873418 | from .char_ngrams import CharNgramsTokenizer
from .terms import TermsTokenizer
| true | true |
f73071c7b77b19c3949f7425141f76374ee2f03f | 1,667 | py | Python | selfswab/migrations/0012_auto_20201202_1235.py | praekeltfoundation/healthcheck | 3f8b3722ea41c2d81c706e0f9a3473ba2cb2f2ba | [
"BSD-3-Clause"
] | null | null | null | selfswab/migrations/0012_auto_20201202_1235.py | praekeltfoundation/healthcheck | 3f8b3722ea41c2d81c706e0f9a3473ba2cb2f2ba | [
"BSD-3-Clause"
] | 23 | 2020-07-16T15:40:35.000Z | 2021-12-13T13:59:30.000Z | selfswab/migrations/0012_auto_20201202_1235.py | praekeltfoundation/healthcheck | 3f8b3722ea41c2d81c706e0f9a3473ba2cb2f2ba | [
"BSD-3-Clause"
] | 1 | 2021-02-24T04:58:40.000Z | 2021-02-24T04:58:40.000Z | # Generated by Django 3.1 on 2020-12-02 10:35
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
("selfswab", "0011_selfswabtest_should_sync"),
]
operations = [
migrations.AddField(
model_name="selfswabregistration",
name="age",
field=models.CharField(
choices=[
("<18", "<18"),
("18-39", "18-39"),
("40-65", "40-65"),
(">65", ">65"),
],
max_length=5,
null=True,
),
),
migrations.AddField(
model_name="selfswabregistration",
name="gender",
field=models.CharField(
choices=[
("Male", "Male"),
("Female", "Female"),
("Other", "Other"),
("not_say", "not_say"),
],
max_length=10,
null=True,
),
),
migrations.AddField(
model_name="selfswabregistration",
name="should_sync",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="selfswabregistration",
name="timestamp",
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name="selfswabregistration",
name="updated_at",
field=models.DateTimeField(auto_now=True, db_index=True),
),
]
| 28.741379 | 74 | 0.467307 |
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
("selfswab", "0011_selfswabtest_should_sync"),
]
operations = [
migrations.AddField(
model_name="selfswabregistration",
name="age",
field=models.CharField(
choices=[
("<18", "<18"),
("18-39", "18-39"),
("40-65", "40-65"),
(">65", ">65"),
],
max_length=5,
null=True,
),
),
migrations.AddField(
model_name="selfswabregistration",
name="gender",
field=models.CharField(
choices=[
("Male", "Male"),
("Female", "Female"),
("Other", "Other"),
("not_say", "not_say"),
],
max_length=10,
null=True,
),
),
migrations.AddField(
model_name="selfswabregistration",
name="should_sync",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="selfswabregistration",
name="timestamp",
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name="selfswabregistration",
name="updated_at",
field=models.DateTimeField(auto_now=True, db_index=True),
),
]
| true | true |
f7307275427014a14072d16f4f9a637d0116fbc9 | 19,509 | py | Python | tests/unit/common/db/test_api.py | onecloud/gbp-rally | 7589b1788c4de26bb66c531ef340ba080754f8c3 | [
"Apache-2.0"
] | null | null | null | tests/unit/common/db/test_api.py | onecloud/gbp-rally | 7589b1788c4de26bb66c531ef340ba080754f8c3 | [
"Apache-2.0"
] | null | null | null | tests/unit/common/db/test_api.py | onecloud/gbp-rally | 7589b1788c4de26bb66c531ef340ba080754f8c3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for db.api layer."""
from six import moves
from rally.common import db
from rally import consts
from rally import exceptions
from tests.unit import test
class TasksTestCase(test.DBTestCase):
def setUp(self):
super(TasksTestCase, self).setUp()
self.deploy = db.deployment_create({})
def _get_task(self, uuid):
return db.task_get(uuid)
def _create_task(self, values=None):
values = values or {}
if "deployment_uuid" not in values:
values["deployment_uuid"] = self.deploy["uuid"]
return db.task_create(values)
def test_task_get_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_get, "f885f435-f6ca-4f3e-9b3e-aeb6837080f2")
def test_task_create(self):
task = self._create_task()
db_task = self._get_task(task["uuid"])
self.assertIsNotNone(db_task["uuid"])
self.assertIsNotNone(db_task["id"])
self.assertEqual(db_task["status"], consts.TaskStatus.INIT)
def test_task_create_without_uuid(self):
_uuid = "19be8589-48b0-4af1-a369-9bebaaa563ab"
task = self._create_task({"uuid": _uuid})
db_task = self._get_task(task["uuid"])
self.assertEqual(db_task["uuid"], _uuid)
def test_task_update(self):
task = self._create_task({})
db.task_update(task["uuid"], {"status": consts.TaskStatus.FAILED})
db_task = self._get_task(task["uuid"])
self.assertEqual(db_task["status"], consts.TaskStatus.FAILED)
def test_task_update_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_update,
"7ae1da26-feaa-4213-8208-76af2857a5ab", {})
def test_task_update_all_stats(self):
_uuid = self._create_task({})["uuid"]
for status in consts.TaskStatus:
db.task_update(_uuid, {"status": status})
db_task = self._get_task(_uuid)
self.assertEqual(db_task["status"], status)
def test_task_list_empty(self):
self.assertEqual([], db.task_list())
def test_task_list(self):
INIT = consts.TaskStatus.INIT
task_init = sorted(self._create_task()["uuid"] for i in moves.range(3))
FINISHED = consts.TaskStatus.FINISHED
task_finished = sorted(self._create_task(
{"status": FINISHED,
"deployment_uuid": self.deploy["uuid"]}
)["uuid"] for i in moves.range(3))
task_all = sorted(task_init + task_finished)
def get_uuids(status=None, deployment=None):
tasks = db.task_list(status=status, deployment=deployment)
return sorted(task["uuid"] for task in tasks)
self.assertEqual(task_all, get_uuids(None))
self.assertEqual(task_init, get_uuids(status=INIT))
self.assertEqual(task_finished, get_uuids(status=FINISHED))
self.assertRaises(exceptions.DeploymentNotFound,
get_uuids, deployment="non-existing-deployment")
deleted_task_uuid = task_finished.pop()
db.task_delete(deleted_task_uuid)
self.assertEqual(task_init, get_uuids(INIT))
self.assertEqual(sorted(task_finished), get_uuids(FINISHED))
def test_task_delete(self):
task1, task2 = self._create_task()["uuid"], self._create_task()["uuid"]
db.task_delete(task1)
self.assertRaises(exceptions.TaskNotFound, self._get_task, task1)
self.assertEqual(task2, self._get_task(task2)["uuid"])
def test_task_delete_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_delete,
"da6f820c-b133-4b9f-8534-4c3bcc40724b")
def test_task_delete_with_results(self):
task_id = self._create_task()["uuid"]
db.task_result_create(task_id,
{task_id: task_id},
{task_id: task_id})
res = db.task_result_get_all_by_uuid(task_id)
self.assertEqual(len(res), 1)
db.task_delete(task_id)
res = db.task_result_get_all_by_uuid(task_id)
self.assertEqual(len(res), 0)
def test_task_delete_by_uuid_and_status(self):
values = {
"status": consts.TaskStatus.FINISHED,
}
task1 = self._create_task(values=values)["uuid"]
task2 = self._create_task(values=values)["uuid"]
db.task_delete(task1, status=consts.TaskStatus.FINISHED)
self.assertRaises(exceptions.TaskNotFound, self._get_task, task1)
self.assertEqual(task2, self._get_task(task2)["uuid"])
def test_task_delete_by_uuid_and_status_invalid(self):
task = self._create_task(
values={"status": consts.TaskStatus.INIT})["uuid"]
self.assertRaises(exceptions.TaskInvalidStatus, db.task_delete, task,
status=consts.TaskStatus.FINISHED)
def test_task_delete_by_uuid_and_status_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_delete,
"fcd0483f-a405-44c4-b712-99c9e52254eb",
status=consts.TaskStatus.FINISHED)
def test_task_result_get_all_by_uuid(self):
task1 = self._create_task()["uuid"]
task2 = self._create_task()["uuid"]
for task_id in (task1, task2):
db.task_result_create(task_id,
{task_id: task_id},
{task_id: task_id})
for task_id in (task1, task2):
res = db.task_result_get_all_by_uuid(task_id)
data = {task_id: task_id}
self.assertEqual(len(res), 1)
self.assertEqual(res[0]["key"], data)
self.assertEqual(res[0]["data"], data)
def test_task_get_detailed(self):
task1 = self._create_task()
key = {"name": "atata"}
data = {"a": "b", "c": "d"}
db.task_result_create(task1["uuid"], key, data)
task1_full = db.task_get_detailed(task1["uuid"])
results = task1_full["results"]
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["key"], key)
self.assertEqual(results[0]["data"], data)
def test_task_get_detailed_last(self):
task1 = self._create_task()
key = {"name": "atata"}
data = {"a": "b", "c": "d"}
db.task_result_create(task1["uuid"], key, data)
task1_full = db.task_get_detailed_last()
results = task1_full["results"]
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["key"], key)
self.assertEqual(results[0]["data"], data)
class DeploymentTestCase(test.DBTestCase):
def test_deployment_create(self):
deploy = db.deployment_create({"config": {"opt": "val"}})
deploys = db.deployment_list()
self.assertEqual(len(deploys), 1)
self.assertEqual(deploy["uuid"], deploys[0]["uuid"])
self.assertEqual(deploy["status"], consts.DeployStatus.DEPLOY_INIT)
self.assertEqual(deploy["config"], {"opt": "val"})
def test_deployment_create_several(self):
# Create a deployment
deploys = db.deployment_list()
self.assertEqual(len(deploys), 0)
deploy_one = db.deployment_create({"config": {"opt1": "val1"}})
deploys = db.deployment_list()
self.assertEqual(len(deploys), 1)
self.assertEqual(deploy_one["uuid"], deploys[0]["uuid"])
self.assertEqual(deploy_one["status"], consts.DeployStatus.DEPLOY_INIT)
self.assertEqual(deploy_one["config"], {"opt1": "val1"})
# Create another deployment and sure that they are different
deploy_two = db.deployment_create({"config": {"opt2": "val2"}})
deploys = db.deployment_list()
self.assertEqual(len(deploys), 2)
self.assertEqual(set([deploy_one["uuid"], deploy_two["uuid"]]),
set([deploy["uuid"] for deploy in deploys]))
self.assertNotEqual(deploy_one["uuid"], deploy_two["uuid"])
self.assertEqual(deploy_two["status"], consts.DeployStatus.DEPLOY_INIT)
self.assertEqual(deploy_two["config"], {"opt2": "val2"})
def test_deployment_update(self):
deploy = db.deployment_create({})
self.assertEqual(deploy["config"], {})
update_deploy = db.deployment_update(deploy["uuid"],
{"config": {"opt": "val"}})
self.assertEqual(update_deploy["uuid"], deploy["uuid"])
self.assertEqual(update_deploy["config"], {"opt": "val"})
get_deploy = db.deployment_get(deploy["uuid"])
self.assertEqual(get_deploy["uuid"], deploy["uuid"])
self.assertEqual(get_deploy["config"], {"opt": "val"})
def test_deployment_update_several(self):
# Create a deployment and update it
deploy_one = db.deployment_create({})
self.assertEqual(deploy_one["config"], {})
update_deploy_one = db.deployment_update(
deploy_one["uuid"], {"config": {"opt1": "val1"}})
self.assertEqual(update_deploy_one["uuid"], deploy_one["uuid"])
self.assertEqual(update_deploy_one["config"], {"opt1": "val1"})
get_deploy_one = db.deployment_get(deploy_one["uuid"])
self.assertEqual(get_deploy_one["uuid"], deploy_one["uuid"])
self.assertEqual(get_deploy_one["config"], {"opt1": "val1"})
# Create another deployment
deploy_two = db.deployment_create({})
update_deploy_two = db.deployment_update(
deploy_two["uuid"], {"config": {"opt2": "val2"}})
self.assertEqual(update_deploy_two["uuid"], deploy_two["uuid"])
self.assertEqual(update_deploy_two["config"], {"opt2": "val2"})
get_deploy_one_again = db.deployment_get(deploy_one["uuid"])
self.assertEqual(get_deploy_one_again["uuid"], deploy_one["uuid"])
self.assertEqual(get_deploy_one_again["config"], {"opt1": "val1"})
def test_deployment_get(self):
deploy_one = db.deployment_create({"config": {"opt1": "val1"}})
deploy_two = db.deployment_create({"config": {"opt2": "val2"}})
get_deploy_one = db.deployment_get(deploy_one["uuid"])
get_deploy_two = db.deployment_get(deploy_two["uuid"])
self.assertNotEqual(get_deploy_one["uuid"], get_deploy_two["uuid"])
self.assertEqual(get_deploy_one["config"], {"opt1": "val1"})
self.assertEqual(get_deploy_two["config"], {"opt2": "val2"})
def test_deployment_get_not_found(self):
self.assertRaises(exceptions.DeploymentNotFound,
db.deployment_get,
"852e932b-9552-4b2d-89e3-a5915780a5e3")
def test_deployment_list(self):
deploy_one = db.deployment_create({})
deploy_two = db.deployment_create({})
deploys = db.deployment_list()
self.assertEqual(sorted([deploy_one["uuid"], deploy_two["uuid"]]),
sorted([deploy["uuid"] for deploy in deploys]))
def test_deployment_list_with_status_and_name(self):
deploy_one = db.deployment_create({})
deploy_two = db.deployment_create({
"config": {},
"status": consts.DeployStatus.DEPLOY_FAILED,
})
deploy_three = db.deployment_create({"name": "deployment_name"})
deploys = db.deployment_list(status=consts.DeployStatus.DEPLOY_INIT)
deploys.sort(key=lambda x: x["id"])
self.assertEqual(len(deploys), 2)
self.assertEqual(deploys[0]["uuid"], deploy_one["uuid"])
deploys = db.deployment_list(status=consts.DeployStatus.DEPLOY_FAILED)
self.assertEqual(len(deploys), 1)
self.assertEqual(deploys[0]["uuid"], deploy_two["uuid"])
deploys = db.deployment_list(
status=consts.DeployStatus.DEPLOY_FINISHED)
self.assertEqual(len(deploys), 0)
deploys = db.deployment_list(name="deployment_name")
self.assertEqual(deploys[0]["uuid"], deploy_three["uuid"])
self.assertEqual(len(deploys), 1)
def test_deployment_list_parent(self):
deploy = db.deployment_create({})
subdeploy1 = db.deployment_create({"parent_uuid": deploy.uuid})
subdeploy2 = db.deployment_create({"parent_uuid": deploy.uuid})
self.assertEqual([deploy.uuid], [d.uuid for d in db.deployment_list()])
subdeploys = db.deployment_list(parent_uuid=deploy.uuid)
self.assertEqual(set([subdeploy1.uuid, subdeploy2.uuid]),
set([d.uuid for d in subdeploys]))
def test_deployment_delete(self):
deploy_one = db.deployment_create({})
deploy_two = db.deployment_create({})
db.deployment_delete(deploy_two["uuid"])
deploys = db.deployment_list()
self.assertEqual(len(deploys), 1)
self.assertEqual(deploys[0]["uuid"], deploy_one["uuid"])
def test_deployment_delete_not_found(self):
self.assertRaises(exceptions.DeploymentNotFound,
db.deployment_delete,
"5f2883be-46c8-4c4b-a4fe-988ad0c6b20a")
def test_deployment_delete_is_busy(self):
deployment = db.deployment_create({})
db.resource_create({"deployment_uuid": deployment["uuid"]})
db.resource_create({"deployment_uuid": deployment["uuid"]})
self.assertRaises(exceptions.DeploymentIsBusy, db.deployment_delete,
deployment["uuid"])
class ResourceTestCase(test.DBTestCase):
def test_create(self):
deployment = db.deployment_create({})
resource = db.resource_create({
"deployment_uuid": deployment["uuid"],
"provider_name": "fakeprovider",
"type": "faketype",
})
resources = db.resource_get_all(deployment["uuid"])
self.assertTrue(resource["id"])
self.assertEqual(len(resources), 1)
self.assertTrue(resource["id"], resources[0]["id"])
self.assertEqual(resource["deployment_uuid"], deployment["uuid"])
self.assertEqual(resource["provider_name"], "fakeprovider")
self.assertEqual(resource["type"], "faketype")
def test_delete(self):
deployment = db.deployment_create({})
res = db.resource_create({"deployment_uuid": deployment["uuid"]})
db.resource_delete(res["id"])
resources = db.resource_get_all(deployment["uuid"])
self.assertEqual(len(resources), 0)
def test_delete_not_found(self):
self.assertRaises(exceptions.ResourceNotFound,
db.resource_delete, 123456789)
def test_get_all(self):
deployment0 = db.deployment_create({})
deployment1 = db.deployment_create({})
res0 = db.resource_create({"deployment_uuid": deployment0["uuid"]})
res1 = db.resource_create({"deployment_uuid": deployment1["uuid"]})
res2 = db.resource_create({"deployment_uuid": deployment1["uuid"]})
resources = db.resource_get_all(deployment1["uuid"])
self.assertEqual(sorted([res1["id"], res2["id"]]),
sorted([r["id"] for r in resources]))
resources = db.resource_get_all(deployment0["uuid"])
self.assertEqual(len(resources), 1)
self.assertEqual(res0["id"], resources[0]["id"])
def test_get_all_by_provider_name(self):
deployment = db.deployment_create({})
res_one = db.resource_create({
"deployment_uuid": deployment["uuid"],
"provider_name": "one",
})
res_two = db.resource_create({
"deployment_uuid": deployment["uuid"],
"provider_name": "two",
})
resources = db.resource_get_all(deployment["uuid"],
provider_name="one")
self.assertEqual(len(resources), 1)
self.assertEqual(res_one["id"], resources[0]["id"])
resources = db.resource_get_all(deployment["uuid"],
provider_name="two")
self.assertEqual(len(resources), 1)
self.assertEqual(res_two["id"], resources[0]["id"])
def test_get_all_by_provider_type(self):
deployment = db.deployment_create({})
res_one = db.resource_create({
"deployment_uuid": deployment["uuid"],
"type": "one",
})
res_two = db.resource_create({
"deployment_uuid": deployment["uuid"],
"type": "two",
})
resources = db.resource_get_all(deployment["uuid"], type="one")
self.assertEqual(len(resources), 1)
self.assertEqual(res_one["id"], resources[0]["id"])
resources = db.resource_get_all(deployment["uuid"], type="two")
self.assertEqual(len(resources), 1)
self.assertEqual(res_two["id"], resources[0]["id"])
class VerificationTestCase(test.DBTestCase):
def setUp(self):
super(VerificationTestCase, self).setUp()
self.deploy = db.deployment_create({})
def _create_verification(self):
deployment_uuid = self.deploy["uuid"]
return db.verification_create(deployment_uuid)
def test_creation_of_verification(self):
verification = self._create_verification()
db_verification = db.verification_get(verification["uuid"])
self.assertEqual(verification["tests"], db_verification["tests"])
self.assertEqual(verification["time"], db_verification["time"])
self.assertEqual(verification["errors"], db_verification["errors"])
self.assertEqual(verification["failures"], db_verification["failures"])
class WorkerTestCase(test.DBTestCase):
def setUp(self):
super(WorkerTestCase, self).setUp()
self.worker = db.register_worker({"hostname": "test"})
def test_register_worker_duplicate(self):
self.assertRaises(exceptions.WorkerAlreadyRegistered,
db.register_worker, {"hostname": "test"})
def test_get_worker(self):
worker = db.get_worker("test")
self.assertEqual(self.worker["id"], worker["id"])
self.assertEqual(self.worker["hostname"], worker["hostname"])
def test_get_worker_not_found(self):
self.assertRaises(exceptions.WorkerNotFound, db.get_worker, "notfound")
def test_unregister_worker(self):
db.unregister_worker("test")
self.assertRaises(exceptions.WorkerNotFound, db.get_worker, "test")
def test_unregister_worker_not_found(self):
self.assertRaises(exceptions.WorkerNotFound,
db.unregister_worker, "fake")
def test_update_worker(self):
db.update_worker("test")
worker = db.get_worker("test")
self.assertNotEqual(self.worker["updated_at"], worker["updated_at"])
def test_update_worker_not_found(self):
self.assertRaises(exceptions.WorkerNotFound, db.update_worker, "fake")
| 43.066225 | 79 | 0.634579 |
from six import moves
from rally.common import db
from rally import consts
from rally import exceptions
from tests.unit import test
class TasksTestCase(test.DBTestCase):
def setUp(self):
super(TasksTestCase, self).setUp()
self.deploy = db.deployment_create({})
def _get_task(self, uuid):
return db.task_get(uuid)
def _create_task(self, values=None):
values = values or {}
if "deployment_uuid" not in values:
values["deployment_uuid"] = self.deploy["uuid"]
return db.task_create(values)
def test_task_get_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_get, "f885f435-f6ca-4f3e-9b3e-aeb6837080f2")
def test_task_create(self):
task = self._create_task()
db_task = self._get_task(task["uuid"])
self.assertIsNotNone(db_task["uuid"])
self.assertIsNotNone(db_task["id"])
self.assertEqual(db_task["status"], consts.TaskStatus.INIT)
def test_task_create_without_uuid(self):
_uuid = "19be8589-48b0-4af1-a369-9bebaaa563ab"
task = self._create_task({"uuid": _uuid})
db_task = self._get_task(task["uuid"])
self.assertEqual(db_task["uuid"], _uuid)
def test_task_update(self):
task = self._create_task({})
db.task_update(task["uuid"], {"status": consts.TaskStatus.FAILED})
db_task = self._get_task(task["uuid"])
self.assertEqual(db_task["status"], consts.TaskStatus.FAILED)
def test_task_update_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_update,
"7ae1da26-feaa-4213-8208-76af2857a5ab", {})
def test_task_update_all_stats(self):
_uuid = self._create_task({})["uuid"]
for status in consts.TaskStatus:
db.task_update(_uuid, {"status": status})
db_task = self._get_task(_uuid)
self.assertEqual(db_task["status"], status)
def test_task_list_empty(self):
self.assertEqual([], db.task_list())
def test_task_list(self):
INIT = consts.TaskStatus.INIT
task_init = sorted(self._create_task()["uuid"] for i in moves.range(3))
FINISHED = consts.TaskStatus.FINISHED
task_finished = sorted(self._create_task(
{"status": FINISHED,
"deployment_uuid": self.deploy["uuid"]}
)["uuid"] for i in moves.range(3))
task_all = sorted(task_init + task_finished)
def get_uuids(status=None, deployment=None):
tasks = db.task_list(status=status, deployment=deployment)
return sorted(task["uuid"] for task in tasks)
self.assertEqual(task_all, get_uuids(None))
self.assertEqual(task_init, get_uuids(status=INIT))
self.assertEqual(task_finished, get_uuids(status=FINISHED))
self.assertRaises(exceptions.DeploymentNotFound,
get_uuids, deployment="non-existing-deployment")
deleted_task_uuid = task_finished.pop()
db.task_delete(deleted_task_uuid)
self.assertEqual(task_init, get_uuids(INIT))
self.assertEqual(sorted(task_finished), get_uuids(FINISHED))
def test_task_delete(self):
task1, task2 = self._create_task()["uuid"], self._create_task()["uuid"]
db.task_delete(task1)
self.assertRaises(exceptions.TaskNotFound, self._get_task, task1)
self.assertEqual(task2, self._get_task(task2)["uuid"])
def test_task_delete_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_delete,
"da6f820c-b133-4b9f-8534-4c3bcc40724b")
def test_task_delete_with_results(self):
task_id = self._create_task()["uuid"]
db.task_result_create(task_id,
{task_id: task_id},
{task_id: task_id})
res = db.task_result_get_all_by_uuid(task_id)
self.assertEqual(len(res), 1)
db.task_delete(task_id)
res = db.task_result_get_all_by_uuid(task_id)
self.assertEqual(len(res), 0)
def test_task_delete_by_uuid_and_status(self):
values = {
"status": consts.TaskStatus.FINISHED,
}
task1 = self._create_task(values=values)["uuid"]
task2 = self._create_task(values=values)["uuid"]
db.task_delete(task1, status=consts.TaskStatus.FINISHED)
self.assertRaises(exceptions.TaskNotFound, self._get_task, task1)
self.assertEqual(task2, self._get_task(task2)["uuid"])
def test_task_delete_by_uuid_and_status_invalid(self):
task = self._create_task(
values={"status": consts.TaskStatus.INIT})["uuid"]
self.assertRaises(exceptions.TaskInvalidStatus, db.task_delete, task,
status=consts.TaskStatus.FINISHED)
def test_task_delete_by_uuid_and_status_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_delete,
"fcd0483f-a405-44c4-b712-99c9e52254eb",
status=consts.TaskStatus.FINISHED)
def test_task_result_get_all_by_uuid(self):
task1 = self._create_task()["uuid"]
task2 = self._create_task()["uuid"]
for task_id in (task1, task2):
db.task_result_create(task_id,
{task_id: task_id},
{task_id: task_id})
for task_id in (task1, task2):
res = db.task_result_get_all_by_uuid(task_id)
data = {task_id: task_id}
self.assertEqual(len(res), 1)
self.assertEqual(res[0]["key"], data)
self.assertEqual(res[0]["data"], data)
def test_task_get_detailed(self):
task1 = self._create_task()
key = {"name": "atata"}
data = {"a": "b", "c": "d"}
db.task_result_create(task1["uuid"], key, data)
task1_full = db.task_get_detailed(task1["uuid"])
results = task1_full["results"]
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["key"], key)
self.assertEqual(results[0]["data"], data)
def test_task_get_detailed_last(self):
task1 = self._create_task()
key = {"name": "atata"}
data = {"a": "b", "c": "d"}
db.task_result_create(task1["uuid"], key, data)
task1_full = db.task_get_detailed_last()
results = task1_full["results"]
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["key"], key)
self.assertEqual(results[0]["data"], data)
class DeploymentTestCase(test.DBTestCase):
def test_deployment_create(self):
deploy = db.deployment_create({"config": {"opt": "val"}})
deploys = db.deployment_list()
self.assertEqual(len(deploys), 1)
self.assertEqual(deploy["uuid"], deploys[0]["uuid"])
self.assertEqual(deploy["status"], consts.DeployStatus.DEPLOY_INIT)
self.assertEqual(deploy["config"], {"opt": "val"})
def test_deployment_create_several(self):
deploys = db.deployment_list()
self.assertEqual(len(deploys), 0)
deploy_one = db.deployment_create({"config": {"opt1": "val1"}})
deploys = db.deployment_list()
self.assertEqual(len(deploys), 1)
self.assertEqual(deploy_one["uuid"], deploys[0]["uuid"])
self.assertEqual(deploy_one["status"], consts.DeployStatus.DEPLOY_INIT)
self.assertEqual(deploy_one["config"], {"opt1": "val1"})
deploy_two = db.deployment_create({"config": {"opt2": "val2"}})
deploys = db.deployment_list()
self.assertEqual(len(deploys), 2)
self.assertEqual(set([deploy_one["uuid"], deploy_two["uuid"]]),
set([deploy["uuid"] for deploy in deploys]))
self.assertNotEqual(deploy_one["uuid"], deploy_two["uuid"])
self.assertEqual(deploy_two["status"], consts.DeployStatus.DEPLOY_INIT)
self.assertEqual(deploy_two["config"], {"opt2": "val2"})
def test_deployment_update(self):
deploy = db.deployment_create({})
self.assertEqual(deploy["config"], {})
update_deploy = db.deployment_update(deploy["uuid"],
{"config": {"opt": "val"}})
self.assertEqual(update_deploy["uuid"], deploy["uuid"])
self.assertEqual(update_deploy["config"], {"opt": "val"})
get_deploy = db.deployment_get(deploy["uuid"])
self.assertEqual(get_deploy["uuid"], deploy["uuid"])
self.assertEqual(get_deploy["config"], {"opt": "val"})
def test_deployment_update_several(self):
deploy_one = db.deployment_create({})
self.assertEqual(deploy_one["config"], {})
update_deploy_one = db.deployment_update(
deploy_one["uuid"], {"config": {"opt1": "val1"}})
self.assertEqual(update_deploy_one["uuid"], deploy_one["uuid"])
self.assertEqual(update_deploy_one["config"], {"opt1": "val1"})
get_deploy_one = db.deployment_get(deploy_one["uuid"])
self.assertEqual(get_deploy_one["uuid"], deploy_one["uuid"])
self.assertEqual(get_deploy_one["config"], {"opt1": "val1"})
deploy_two = db.deployment_create({})
update_deploy_two = db.deployment_update(
deploy_two["uuid"], {"config": {"opt2": "val2"}})
self.assertEqual(update_deploy_two["uuid"], deploy_two["uuid"])
self.assertEqual(update_deploy_two["config"], {"opt2": "val2"})
get_deploy_one_again = db.deployment_get(deploy_one["uuid"])
self.assertEqual(get_deploy_one_again["uuid"], deploy_one["uuid"])
self.assertEqual(get_deploy_one_again["config"], {"opt1": "val1"})
def test_deployment_get(self):
deploy_one = db.deployment_create({"config": {"opt1": "val1"}})
deploy_two = db.deployment_create({"config": {"opt2": "val2"}})
get_deploy_one = db.deployment_get(deploy_one["uuid"])
get_deploy_two = db.deployment_get(deploy_two["uuid"])
self.assertNotEqual(get_deploy_one["uuid"], get_deploy_two["uuid"])
self.assertEqual(get_deploy_one["config"], {"opt1": "val1"})
self.assertEqual(get_deploy_two["config"], {"opt2": "val2"})
def test_deployment_get_not_found(self):
self.assertRaises(exceptions.DeploymentNotFound,
db.deployment_get,
"852e932b-9552-4b2d-89e3-a5915780a5e3")
def test_deployment_list(self):
deploy_one = db.deployment_create({})
deploy_two = db.deployment_create({})
deploys = db.deployment_list()
self.assertEqual(sorted([deploy_one["uuid"], deploy_two["uuid"]]),
sorted([deploy["uuid"] for deploy in deploys]))
def test_deployment_list_with_status_and_name(self):
deploy_one = db.deployment_create({})
deploy_two = db.deployment_create({
"config": {},
"status": consts.DeployStatus.DEPLOY_FAILED,
})
deploy_three = db.deployment_create({"name": "deployment_name"})
deploys = db.deployment_list(status=consts.DeployStatus.DEPLOY_INIT)
deploys.sort(key=lambda x: x["id"])
self.assertEqual(len(deploys), 2)
self.assertEqual(deploys[0]["uuid"], deploy_one["uuid"])
deploys = db.deployment_list(status=consts.DeployStatus.DEPLOY_FAILED)
self.assertEqual(len(deploys), 1)
self.assertEqual(deploys[0]["uuid"], deploy_two["uuid"])
deploys = db.deployment_list(
status=consts.DeployStatus.DEPLOY_FINISHED)
self.assertEqual(len(deploys), 0)
deploys = db.deployment_list(name="deployment_name")
self.assertEqual(deploys[0]["uuid"], deploy_three["uuid"])
self.assertEqual(len(deploys), 1)
def test_deployment_list_parent(self):
deploy = db.deployment_create({})
subdeploy1 = db.deployment_create({"parent_uuid": deploy.uuid})
subdeploy2 = db.deployment_create({"parent_uuid": deploy.uuid})
self.assertEqual([deploy.uuid], [d.uuid for d in db.deployment_list()])
subdeploys = db.deployment_list(parent_uuid=deploy.uuid)
self.assertEqual(set([subdeploy1.uuid, subdeploy2.uuid]),
set([d.uuid for d in subdeploys]))
def test_deployment_delete(self):
deploy_one = db.deployment_create({})
deploy_two = db.deployment_create({})
db.deployment_delete(deploy_two["uuid"])
deploys = db.deployment_list()
self.assertEqual(len(deploys), 1)
self.assertEqual(deploys[0]["uuid"], deploy_one["uuid"])
def test_deployment_delete_not_found(self):
self.assertRaises(exceptions.DeploymentNotFound,
db.deployment_delete,
"5f2883be-46c8-4c4b-a4fe-988ad0c6b20a")
def test_deployment_delete_is_busy(self):
deployment = db.deployment_create({})
db.resource_create({"deployment_uuid": deployment["uuid"]})
db.resource_create({"deployment_uuid": deployment["uuid"]})
self.assertRaises(exceptions.DeploymentIsBusy, db.deployment_delete,
deployment["uuid"])
class ResourceTestCase(test.DBTestCase):
def test_create(self):
deployment = db.deployment_create({})
resource = db.resource_create({
"deployment_uuid": deployment["uuid"],
"provider_name": "fakeprovider",
"type": "faketype",
})
resources = db.resource_get_all(deployment["uuid"])
self.assertTrue(resource["id"])
self.assertEqual(len(resources), 1)
self.assertTrue(resource["id"], resources[0]["id"])
self.assertEqual(resource["deployment_uuid"], deployment["uuid"])
self.assertEqual(resource["provider_name"], "fakeprovider")
self.assertEqual(resource["type"], "faketype")
def test_delete(self):
deployment = db.deployment_create({})
res = db.resource_create({"deployment_uuid": deployment["uuid"]})
db.resource_delete(res["id"])
resources = db.resource_get_all(deployment["uuid"])
self.assertEqual(len(resources), 0)
def test_delete_not_found(self):
self.assertRaises(exceptions.ResourceNotFound,
db.resource_delete, 123456789)
def test_get_all(self):
deployment0 = db.deployment_create({})
deployment1 = db.deployment_create({})
res0 = db.resource_create({"deployment_uuid": deployment0["uuid"]})
res1 = db.resource_create({"deployment_uuid": deployment1["uuid"]})
res2 = db.resource_create({"deployment_uuid": deployment1["uuid"]})
resources = db.resource_get_all(deployment1["uuid"])
self.assertEqual(sorted([res1["id"], res2["id"]]),
sorted([r["id"] for r in resources]))
resources = db.resource_get_all(deployment0["uuid"])
self.assertEqual(len(resources), 1)
self.assertEqual(res0["id"], resources[0]["id"])
def test_get_all_by_provider_name(self):
deployment = db.deployment_create({})
res_one = db.resource_create({
"deployment_uuid": deployment["uuid"],
"provider_name": "one",
})
res_two = db.resource_create({
"deployment_uuid": deployment["uuid"],
"provider_name": "two",
})
resources = db.resource_get_all(deployment["uuid"],
provider_name="one")
self.assertEqual(len(resources), 1)
self.assertEqual(res_one["id"], resources[0]["id"])
resources = db.resource_get_all(deployment["uuid"],
provider_name="two")
self.assertEqual(len(resources), 1)
self.assertEqual(res_two["id"], resources[0]["id"])
def test_get_all_by_provider_type(self):
deployment = db.deployment_create({})
res_one = db.resource_create({
"deployment_uuid": deployment["uuid"],
"type": "one",
})
res_two = db.resource_create({
"deployment_uuid": deployment["uuid"],
"type": "two",
})
resources = db.resource_get_all(deployment["uuid"], type="one")
self.assertEqual(len(resources), 1)
self.assertEqual(res_one["id"], resources[0]["id"])
resources = db.resource_get_all(deployment["uuid"], type="two")
self.assertEqual(len(resources), 1)
self.assertEqual(res_two["id"], resources[0]["id"])
class VerificationTestCase(test.DBTestCase):
def setUp(self):
super(VerificationTestCase, self).setUp()
self.deploy = db.deployment_create({})
def _create_verification(self):
deployment_uuid = self.deploy["uuid"]
return db.verification_create(deployment_uuid)
def test_creation_of_verification(self):
verification = self._create_verification()
db_verification = db.verification_get(verification["uuid"])
self.assertEqual(verification["tests"], db_verification["tests"])
self.assertEqual(verification["time"], db_verification["time"])
self.assertEqual(verification["errors"], db_verification["errors"])
self.assertEqual(verification["failures"], db_verification["failures"])
class WorkerTestCase(test.DBTestCase):
def setUp(self):
super(WorkerTestCase, self).setUp()
self.worker = db.register_worker({"hostname": "test"})
def test_register_worker_duplicate(self):
self.assertRaises(exceptions.WorkerAlreadyRegistered,
db.register_worker, {"hostname": "test"})
def test_get_worker(self):
worker = db.get_worker("test")
self.assertEqual(self.worker["id"], worker["id"])
self.assertEqual(self.worker["hostname"], worker["hostname"])
def test_get_worker_not_found(self):
self.assertRaises(exceptions.WorkerNotFound, db.get_worker, "notfound")
def test_unregister_worker(self):
db.unregister_worker("test")
self.assertRaises(exceptions.WorkerNotFound, db.get_worker, "test")
def test_unregister_worker_not_found(self):
self.assertRaises(exceptions.WorkerNotFound,
db.unregister_worker, "fake")
def test_update_worker(self):
db.update_worker("test")
worker = db.get_worker("test")
self.assertNotEqual(self.worker["updated_at"], worker["updated_at"])
def test_update_worker_not_found(self):
self.assertRaises(exceptions.WorkerNotFound, db.update_worker, "fake")
| true | true |
f73072bb0fa62e6519596da6e47d9a8d23c2a32a | 1,146 | py | Python | src/wsgi.py | mine-archived/dinner | 0b7e556994a6f8e91450377631f88694a97fdcf7 | [
"MIT"
] | null | null | null | src/wsgi.py | mine-archived/dinner | 0b7e556994a6f8e91450377631f88694a97fdcf7 | [
"MIT"
] | null | null | null | src/wsgi.py | mine-archived/dinner | 0b7e556994a6f8e91450377631f88694a97fdcf7 | [
"MIT"
] | null | null | null | """
WSGI config for src project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 38.2 | 79 | 0.790576 | import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"settings")
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| true | true |
f73073b29e40f88ca0b69b779adcab0c43322bfc | 10,040 | py | Python | modules/feature_extraction.py | OverFitted/ai-academy-2022 | e58a68a13d81f203027cc367f5f335c2b22f0962 | [
"MIT"
] | null | null | null | modules/feature_extraction.py | OverFitted/ai-academy-2022 | e58a68a13d81f203027cc367f5f335c2b22f0962 | [
"MIT"
] | null | null | null | modules/feature_extraction.py | OverFitted/ai-academy-2022 | e58a68a13d81f203027cc367f5f335c2b22f0962 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class VGG_FeatureExtractor(nn.Module):
def __init__(self, input_channel, output_channel=512):
super(VGG_FeatureExtractor, self).__init__()
self.output_channel = [int(output_channel / 8), int(output_channel / 4),
int(output_channel / 2), output_channel] # [64, 128, 256, 512]
self.ConvNet = nn.Sequential(
nn.Conv2d(input_channel, self.output_channel[0], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d(2, 2), # 64x16x50
nn.Conv2d(self.output_channel[0], self.output_channel[1], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d(2, 2), # 128x8x25
nn.Conv2d(self.output_channel[1], self.output_channel[2], 3, 1, 1), nn.ReLU(True), # 256x8x25
nn.Conv2d(self.output_channel[2], self.output_channel[2], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d((2, 1), (2, 1)), # 256x4x25
nn.Conv2d(self.output_channel[2], self.output_channel[3], 3, 1, 1, bias=False),
nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True), # 512x4x25
nn.Conv2d(self.output_channel[3], self.output_channel[3], 3, 1, 1, bias=False),
nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True),
nn.MaxPool2d((2, 1), (2, 1)), # 512x2x25
nn.Conv2d(self.output_channel[3], self.output_channel[3], 2, 1, 0), nn.ReLU(True)) # 512x1x24
def forward(self, input):
return self.ConvNet(input)
class RCNN_FeatureExtractor(nn.Module):
def __init__(self, input_channel, output_channel=512):
super(RCNN_FeatureExtractor, self).__init__()
self.output_channel = [int(output_channel / 8), int(output_channel / 4),
int(output_channel / 2), output_channel] # [64, 128, 256, 512]
self.ConvNet = nn.Sequential(
nn.Conv2d(input_channel, self.output_channel[0], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d(2, 2), # 64 x 16 x 50
GRCL(self.output_channel[0], self.output_channel[0], num_iteration=5, kernel_size=3, pad=1),
nn.MaxPool2d(2, 2), # 64 x 8 x 25
GRCL(self.output_channel[0], self.output_channel[1], num_iteration=5, kernel_size=3, pad=1),
nn.MaxPool2d(2, (2, 1), (0, 1)), # 128 x 4 x 26
GRCL(self.output_channel[1], self.output_channel[2], num_iteration=5, kernel_size=3, pad=1),
nn.MaxPool2d(2, (2, 1), (0, 1)), # 256 x 2 x 27
nn.Conv2d(self.output_channel[2], self.output_channel[3], 2, 1, 0, bias=False),
nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True)) # 512 x 1 x 26
def forward(self, input):
return self.ConvNet(input)
class ResNet_FeatureExtractor(nn.Module):
def __init__(self, input_channel, output_channel=512):
super(ResNet_FeatureExtractor, self).__init__()
self.ConvNet = ResNet(input_channel, output_channel, BasicBlock, [1, 2, 5, 3])
def forward(self, input):
return self.ConvNet(input)
class GRCL(nn.Module):
def __init__(self, input_channel, output_channel, num_iteration, kernel_size, pad):
super(GRCL, self).__init__()
self.wgf_u = nn.Conv2d(input_channel, output_channel, 1, 1, 0, bias=False)
self.wgr_x = nn.Conv2d(output_channel, output_channel, 1, 1, 0, bias=False)
self.wf_u = nn.Conv2d(input_channel, output_channel, kernel_size, 1, pad, bias=False)
self.wr_x = nn.Conv2d(output_channel, output_channel, kernel_size, 1, pad, bias=False)
self.BN_x_init = nn.BatchNorm2d(output_channel)
self.num_iteration = num_iteration
self.GRCL = [GRCL_unit(output_channel) for _ in range(num_iteration)]
self.GRCL = nn.Sequential(*self.GRCL)
def forward(self, input):
""" The input of GRCL is consistant over time t, which is denoted by u(0)
thus wgf_u / wf_u is also consistant over time t.
"""
wgf_u = self.wgf_u(input)
wf_u = self.wf_u(input)
x = F.relu(self.BN_x_init(wf_u))
for i in range(self.num_iteration):
x = self.GRCL[i](wgf_u, self.wgr_x(x), wf_u, self.wr_x(x))
return x
class GRCL_unit(nn.Module):
def __init__(self, output_channel):
super(GRCL_unit, self).__init__()
self.BN_gfu = nn.BatchNorm2d(output_channel)
self.BN_grx = nn.BatchNorm2d(output_channel)
self.BN_fu = nn.BatchNorm2d(output_channel)
self.BN_rx = nn.BatchNorm2d(output_channel)
self.BN_Gx = nn.BatchNorm2d(output_channel)
def forward(self, wgf_u, wgr_x, wf_u, wr_x):
G_first_term = self.BN_gfu(wgf_u)
G_second_term = self.BN_grx(wgr_x)
G = F.sigmoid(G_first_term + G_second_term)
x_first_term = self.BN_fu(wf_u)
x_second_term = self.BN_Gx(self.BN_rx(wr_x) * G)
x = F.relu(x_first_term + x_second_term)
return x
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = self._conv3x3(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = self._conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def _conv3x3(self, in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, input_channel, output_channel, block, layers):
super(ResNet, self).__init__()
self.output_channel_block = [int(output_channel / 4), int(output_channel / 2), output_channel, output_channel]
self.inplanes = int(output_channel / 8)
self.conv0_1 = nn.Conv2d(input_channel, int(output_channel / 16),
kernel_size=3, stride=1, padding=1, bias=False)
self.bn0_1 = nn.BatchNorm2d(int(output_channel / 16))
self.conv0_2 = nn.Conv2d(int(output_channel / 16), self.inplanes,
kernel_size=3, stride=1, padding=1, bias=False)
self.bn0_2 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.layer1 = self._make_layer(block, self.output_channel_block[0], layers[0])
self.conv1 = nn.Conv2d(self.output_channel_block[0], self.output_channel_block[
0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.output_channel_block[0])
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.layer2 = self._make_layer(block, self.output_channel_block[1], layers[1], stride=1)
self.conv2 = nn.Conv2d(self.output_channel_block[1], self.output_channel_block[
1], kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(self.output_channel_block[1])
self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=(2, 1), padding=(0, 1))
self.layer3 = self._make_layer(block, self.output_channel_block[2], layers[2], stride=1)
self.conv3 = nn.Conv2d(self.output_channel_block[2], self.output_channel_block[
2], kernel_size=3, stride=1, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.output_channel_block[2])
self.layer4 = self._make_layer(block, self.output_channel_block[3], layers[3], stride=1)
self.conv4_1 = nn.Conv2d(self.output_channel_block[3], self.output_channel_block[
3], kernel_size=2, stride=(2, 1), padding=(0, 1), bias=False)
self.bn4_1 = nn.BatchNorm2d(self.output_channel_block[3])
self.conv4_2 = nn.Conv2d(self.output_channel_block[3], self.output_channel_block[
3], kernel_size=2, stride=1, padding=0, bias=False)
self.bn4_2 = nn.BatchNorm2d(self.output_channel_block[3])
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv0_1(x)
x = self.bn0_1(x)
x = self.relu(x)
x = self.conv0_2(x)
x = self.bn0_2(x)
x = self.relu(x)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool2(x)
x = self.layer2(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.maxpool3(x)
x = self.layer3(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.layer4(x)
x = self.conv4_1(x)
x = self.bn4_1(x)
x = self.relu(x)
x = self.conv4_2(x)
x = self.bn4_2(x)
x = self.relu(x)
return x
| 41.659751 | 118 | 0.612251 | import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class VGG_FeatureExtractor(nn.Module):
def __init__(self, input_channel, output_channel=512):
super(VGG_FeatureExtractor, self).__init__()
self.output_channel = [int(output_channel / 8), int(output_channel / 4),
int(output_channel / 2), output_channel]
self.ConvNet = nn.Sequential(
nn.Conv2d(input_channel, self.output_channel[0], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d(2, 2),
nn.Conv2d(self.output_channel[0], self.output_channel[1], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d(2, 2),
nn.Conv2d(self.output_channel[1], self.output_channel[2], 3, 1, 1), nn.ReLU(True),
nn.Conv2d(self.output_channel[2], self.output_channel[2], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d((2, 1), (2, 1)),
nn.Conv2d(self.output_channel[2], self.output_channel[3], 3, 1, 1, bias=False),
nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True),
nn.Conv2d(self.output_channel[3], self.output_channel[3], 3, 1, 1, bias=False),
nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True),
nn.MaxPool2d((2, 1), (2, 1)),
nn.Conv2d(self.output_channel[3], self.output_channel[3], 2, 1, 0), nn.ReLU(True))
def forward(self, input):
return self.ConvNet(input)
class RCNN_FeatureExtractor(nn.Module):
def __init__(self, input_channel, output_channel=512):
super(RCNN_FeatureExtractor, self).__init__()
self.output_channel = [int(output_channel / 8), int(output_channel / 4),
int(output_channel / 2), output_channel]
self.ConvNet = nn.Sequential(
nn.Conv2d(input_channel, self.output_channel[0], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d(2, 2),
GRCL(self.output_channel[0], self.output_channel[0], num_iteration=5, kernel_size=3, pad=1),
nn.MaxPool2d(2, 2),
GRCL(self.output_channel[0], self.output_channel[1], num_iteration=5, kernel_size=3, pad=1),
nn.MaxPool2d(2, (2, 1), (0, 1)),
GRCL(self.output_channel[1], self.output_channel[2], num_iteration=5, kernel_size=3, pad=1),
nn.MaxPool2d(2, (2, 1), (0, 1)),
nn.Conv2d(self.output_channel[2], self.output_channel[3], 2, 1, 0, bias=False),
nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True))
def forward(self, input):
return self.ConvNet(input)
class ResNet_FeatureExtractor(nn.Module):
def __init__(self, input_channel, output_channel=512):
super(ResNet_FeatureExtractor, self).__init__()
self.ConvNet = ResNet(input_channel, output_channel, BasicBlock, [1, 2, 5, 3])
def forward(self, input):
return self.ConvNet(input)
class GRCL(nn.Module):
def __init__(self, input_channel, output_channel, num_iteration, kernel_size, pad):
super(GRCL, self).__init__()
self.wgf_u = nn.Conv2d(input_channel, output_channel, 1, 1, 0, bias=False)
self.wgr_x = nn.Conv2d(output_channel, output_channel, 1, 1, 0, bias=False)
self.wf_u = nn.Conv2d(input_channel, output_channel, kernel_size, 1, pad, bias=False)
self.wr_x = nn.Conv2d(output_channel, output_channel, kernel_size, 1, pad, bias=False)
self.BN_x_init = nn.BatchNorm2d(output_channel)
self.num_iteration = num_iteration
self.GRCL = [GRCL_unit(output_channel) for _ in range(num_iteration)]
self.GRCL = nn.Sequential(*self.GRCL)
def forward(self, input):
wgf_u = self.wgf_u(input)
wf_u = self.wf_u(input)
x = F.relu(self.BN_x_init(wf_u))
for i in range(self.num_iteration):
x = self.GRCL[i](wgf_u, self.wgr_x(x), wf_u, self.wr_x(x))
return x
class GRCL_unit(nn.Module):
def __init__(self, output_channel):
super(GRCL_unit, self).__init__()
self.BN_gfu = nn.BatchNorm2d(output_channel)
self.BN_grx = nn.BatchNorm2d(output_channel)
self.BN_fu = nn.BatchNorm2d(output_channel)
self.BN_rx = nn.BatchNorm2d(output_channel)
self.BN_Gx = nn.BatchNorm2d(output_channel)
def forward(self, wgf_u, wgr_x, wf_u, wr_x):
G_first_term = self.BN_gfu(wgf_u)
G_second_term = self.BN_grx(wgr_x)
G = F.sigmoid(G_first_term + G_second_term)
x_first_term = self.BN_fu(wf_u)
x_second_term = self.BN_Gx(self.BN_rx(wr_x) * G)
x = F.relu(x_first_term + x_second_term)
return x
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = self._conv3x3(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = self._conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def _conv3x3(self, in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, input_channel, output_channel, block, layers):
super(ResNet, self).__init__()
self.output_channel_block = [int(output_channel / 4), int(output_channel / 2), output_channel, output_channel]
self.inplanes = int(output_channel / 8)
self.conv0_1 = nn.Conv2d(input_channel, int(output_channel / 16),
kernel_size=3, stride=1, padding=1, bias=False)
self.bn0_1 = nn.BatchNorm2d(int(output_channel / 16))
self.conv0_2 = nn.Conv2d(int(output_channel / 16), self.inplanes,
kernel_size=3, stride=1, padding=1, bias=False)
self.bn0_2 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.layer1 = self._make_layer(block, self.output_channel_block[0], layers[0])
self.conv1 = nn.Conv2d(self.output_channel_block[0], self.output_channel_block[
0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.output_channel_block[0])
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.layer2 = self._make_layer(block, self.output_channel_block[1], layers[1], stride=1)
self.conv2 = nn.Conv2d(self.output_channel_block[1], self.output_channel_block[
1], kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(self.output_channel_block[1])
self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=(2, 1), padding=(0, 1))
self.layer3 = self._make_layer(block, self.output_channel_block[2], layers[2], stride=1)
self.conv3 = nn.Conv2d(self.output_channel_block[2], self.output_channel_block[
2], kernel_size=3, stride=1, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.output_channel_block[2])
self.layer4 = self._make_layer(block, self.output_channel_block[3], layers[3], stride=1)
self.conv4_1 = nn.Conv2d(self.output_channel_block[3], self.output_channel_block[
3], kernel_size=2, stride=(2, 1), padding=(0, 1), bias=False)
self.bn4_1 = nn.BatchNorm2d(self.output_channel_block[3])
self.conv4_2 = nn.Conv2d(self.output_channel_block[3], self.output_channel_block[
3], kernel_size=2, stride=1, padding=0, bias=False)
self.bn4_2 = nn.BatchNorm2d(self.output_channel_block[3])
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv0_1(x)
x = self.bn0_1(x)
x = self.relu(x)
x = self.conv0_2(x)
x = self.bn0_2(x)
x = self.relu(x)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool2(x)
x = self.layer2(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.maxpool3(x)
x = self.layer3(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.layer4(x)
x = self.conv4_1(x)
x = self.bn4_1(x)
x = self.relu(x)
x = self.conv4_2(x)
x = self.bn4_2(x)
x = self.relu(x)
return x
| true | true |
f73075b082a65a62398c6c4d6ab5bdf795ac00ee | 8,704 | py | Python | examples/python-guide/Gaussian_process_mixed_effects_models_example.py | StatMixedML/GPBoost | 786d8be61c5c28da0690e167af636a6d777bf9e1 | [
"Apache-2.0"
] | 2 | 2020-04-12T06:12:17.000Z | 2020-04-12T15:34:01.000Z | examples/python-guide/Gaussian_process_mixed_effects_models_example.py | StatMixedML/GPBoost | 786d8be61c5c28da0690e167af636a6d777bf9e1 | [
"Apache-2.0"
] | null | null | null | examples/python-guide/Gaussian_process_mixed_effects_models_example.py | StatMixedML/GPBoost | 786d8be61c5c28da0690e167af636a6d777bf9e1 | [
"Apache-2.0"
] | 1 | 2020-04-12T15:34:12.000Z | 2020-04-12T15:34:12.000Z | # coding: utf-8
# pylint: disable = invalid-name, C0111
import gpboost as gpb
import numpy as np
# import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# --------------------Grouped random effects model: single-level random effect----------------
# Simulate data
n = 100 # number of samples
m = 25 # number of categories / levels for grouping variable
group = np.arange(n) # grouping variable
for i in range(m):
group[int(i * n / m):int((i + 1) * n / m)] = i
# incidence matrix relating grouped random effects to samples
Z1 = np.zeros((n, m))
for i in range(m):
Z1[np.where(group == i), i] = 1
sigma2_1 = 1 ** 2 # random effect variance
sigma2 = 0.5 ** 2 # error variance
np.random.seed(1)
b1 = np.sqrt(sigma2_1) * np.random.normal(size=m) # simulate random effects
eps = Z1.dot(b1)
xi = np.sqrt(sigma2) * np.random.normal(size=n) # simulate error term
y = eps + xi # observed data
# Define and fit model
gp_model = gpb.GPModel(group_data=group)
gp_model.fit(y=y, std_dev=True)
gp_model.summary()
# Make predictions
group_test = np.arange(m)
pred = gp_model.predict(group_data_pred=group_test)
# Compare true and predicted random effects
plt.scatter(b1, pred['mu'])
plt.title("Comparison of true and predicted random effects")
plt.xlabel("truth")
plt.ylabel("predicted")
plt.show()
# Other optimization specifications (gradient descent with Nesterov acceleration)
gp_model = gpb.GPModel(group_data=group)
gp_model.fit(y=y, std_dev=True, params={"optimizer_cov": "gradient_descent", "lr_cov": 0.1,
"use_nesterov_acc": True})
gp_model.summary()
# --------------------Two crossed random effects and a random slope----------------
# NOTE: run the above example first to create the first random effect
# Simulate data
np.random.seed(1)
x = np.random.uniform(size=n) # covariate data for random slope
n_obs_gr = int(n / m) # number of sampels per group
group2 = np.arange(n) # grouping variable for second random effect
for i in range(m):
group2[(n_obs_gr * i):(n_obs_gr * (i + 1))] = np.arange(n_obs_gr)
# incidence matrix relating grouped random effects to samples
Z2 = np.zeros((n, n_obs_gr))
for i in range(n_obs_gr):
Z2[np.where(group2 == i), i] = 1
Z3 = np.diag(x).dot(Z1)
sigma2_2 = 0.5 ** 2 # variance of second random effect
sigma2_3 = 0.75 ** 2 # variance of random slope for first random effect
b2 = np.sqrt(sigma2_2) * np.random.normal(size=n_obs_gr) # simulate random effects
b3 = np.sqrt(sigma2_3) * np.random.normal(size=m)
eps2 = Z1.dot(b1) + Z2.dot(b2) + Z3.dot(b3)
y = eps2 + xi # observed data
# Define and fit model
group_data = np.column_stack((group, group2))
gp_model = gpb.GPModel(group_data=group_data, group_rand_coef_data=x, ind_effect_group_rand_coef=[1])
gp_model.fit(y=y, std_dev=True)
gp_model.summary()
# --------------------Mixed effects model: random effects and linear fixed effects----------------
# NOTE: run the above example first to create the random effects part
# Simulate data
np.random.seed(1)
X = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n))) # desing matrix / covariate data for fixed effect
beta = np.array([3, 3]) # regression coefficents
y = eps2 + xi + X.dot(beta) # add fixed effect to observed data
# Define and fit model
gp_model = gpb.GPModel(group_data=group_data, group_rand_coef_data=x, ind_effect_group_rand_coef=[1])
gp_model.fit(y=y, X=X, std_dev=True)
gp_model.summary()
# --------------------Gaussian process model----------------
# Simulate data
n = 200 # number of samples
np.random.seed(2)
coords = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n))) # locations (=features) for Gaussian process
sigma2_1 = 1 ** 2 # marginal variance of GP
rho = 0.1 # range parameter
sigma2 = 0.5 ** 2 # error variance
D = np.zeros((n, n)) # distance matrix
for i in range(0, n):
for j in range(i + 1, n):
D[i, j] = np.linalg.norm(coords[i, :] - coords[j, :])
D[j, i] = D[i, j]
Sigma = sigma2_1 * np.exp(-D / rho) + np.diag(np.zeros(n) + 1e-20)
C = np.linalg.cholesky(Sigma)
b1 = np.random.normal(size=n) # simulate random effects
eps = C.dot(b1)
xi = np.sqrt(sigma2) * np.random.normal(size=n) # simulate error term
y = eps + xi
# Define and fit model
gp_model = gpb.GPModel(gp_coords=coords, cov_function="exponential")
## Other covariance functions:
# gp_model = gpb.GPModel(gp_coords=coords, cov_function="gaussian")
# gp_model = gpb.GPModel(gp_coords=coords, cov_function="matern", cov_fct_shape=1.5)
# gp_model = gpb.GPModel(gp_coords=coords, cov_function="powered_exponential", cov_fct_shape=1.1)
gp_model.fit(y=y, std_dev=True, params={"optimizer_cov": "gradient_descent",
"lr_cov": 0.1})
gp_model.summary()
# Make predictions
np.random.seed(1)
ntest = 5
# prediction locations (=features) for Gaussian process
coords_test = np.column_stack(
(np.random.uniform(size=ntest), np.random.uniform(size=ntest))) / 10.
pred = gp_model.predict(gp_coords_pred=coords_test, predict_cov_mat=True)
print("Predicted (posterior/conditional) mean of GP")
pred['mu']
print("Predicted (posterior/conditional) covariance matrix of GP")
pred['cov']
# --------------------Gaussian process model with Vecchia approximation----------------
gp_model = gpb.GPModel(gp_coords=coords, cov_function="exponential",
vecchia_approx=True, num_neighbors=30)
gp_model.fit(y=y, params={"optimizer_cov": "gradient_descent",
"lr_cov": 0.1})
gp_model.summary()
# --------------------Gaussian process model with random coefficents----------------
# Simulate data
n = 500 # number of samples
np.random.seed(1)
coords = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n))) # locations (=features) for Gaussian process
sigma2_1 = 1 ** 2 # marginal variance of GP (for simplicity, all GPs have the same parameters)
rho = 0.1 # range parameter
sigma2 = 0.5 ** 2 # error variance
D = np.zeros((n, n)) # distance matrix
for i in range(0, n):
for j in range(i + 1, n):
D[i, j] = np.linalg.norm(coords[i, :] - coords[j, :])
D[j, i] = D[i, j]
Sigma = sigma2_1 * np.exp(-D / rho) + np.diag(np.zeros(n) + 1e-20)
C = np.linalg.cholesky(Sigma)
X_SVC = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n))) # covariate data for random coeffients
b1 = np.random.normal(size=n) # simulate random effect
b2 = np.random.normal(size=n)
b3 = np.random.normal(size=n)
eps = C.dot(b1) + X_SVC[:, 0] * C.dot(b2) + X_SVC[:, 1] * C.dot(b3)
xi = np.sqrt(sigma2) * np.random.normal(size=n) # simulate error term
y = eps + xi
# Define and fit model (takes a few seconds)
gp_model = gpb.GPModel(gp_coords=coords, cov_function="exponential", gp_rand_coef_data=X_SVC)
gp_model.fit(y=y, std_dev=True, params={"optimizer_cov": "gradient_descent",
"lr_cov": 0.05,
"use_nesterov_acc": True,
"acc_rate_cov": 0.5})
gp_model.summary()
# --------------------Combine Gaussian process with grouped random effects----------------
n = 200 # number of samples
m = 25 # number of categories / levels for grouping variable
group = np.arange(n) # grouping variable
for i in range(m):
group[int(i * n / m):int((i + 1) * n / m)] = i
# incidence matrix relating grouped random effects to samples
Z1 = np.zeros((n, m))
for i in range(m):
Z1[np.where(group == i), i] = 1
np.random.seed(1)
coords = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n))) # locations (=features) for Gaussian process
sigma2_1 = 1 ** 2 # random effect variance
sigma2_2 = 1 ** 2 # marginal variance of GP
rho = 0.1 # range parameter
sigma2 = 0.5 ** 2 # error variance
D = np.zeros((n, n)) # distance matrix
for i in range(0, n):
for j in range(i + 1, n):
D[i, j] = np.linalg.norm(coords[i, :] - coords[j, :])
D[j, i] = D[i, j]
Sigma = sigma2_2 * np.exp(-D / rho) + np.diag(np.zeros(n) + 1e-20)
C = np.linalg.cholesky(Sigma)
b1 = np.random.normal(size=m) # simulate random effect
b2 = np.random.normal(size=n)
eps = Z1.dot(b1) + C.dot(b2)
xi = np.sqrt(sigma2) * np.random.normal(size=n) # simulate error term
y = eps + xi
# Create Gaussian process model
gp_model = gpb.GPModel(group_data=group, gp_coords=coords, cov_function="exponential")
gp_model.fit(y=y, std_dev=True, params={"optimizer_cov": "gradient_descent",
"lr_cov": 0.05,
"use_nesterov_acc": True,
"acc_rate_cov": 0.5})
gp_model.summary()
| 42.048309 | 110 | 0.654756 |
import gpboost as gpb
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
n = 100
m = 25
group = np.arange(n)
for i in range(m):
group[int(i * n / m):int((i + 1) * n / m)] = i
Z1 = np.zeros((n, m))
for i in range(m):
Z1[np.where(group == i), i] = 1
sigma2_1 = 1 ** 2
sigma2 = 0.5 ** 2
np.random.seed(1)
b1 = np.sqrt(sigma2_1) * np.random.normal(size=m)
eps = Z1.dot(b1)
xi = np.sqrt(sigma2) * np.random.normal(size=n)
y = eps + xi
gp_model = gpb.GPModel(group_data=group)
gp_model.fit(y=y, std_dev=True)
gp_model.summary()
group_test = np.arange(m)
pred = gp_model.predict(group_data_pred=group_test)
plt.scatter(b1, pred['mu'])
plt.title("Comparison of true and predicted random effects")
plt.xlabel("truth")
plt.ylabel("predicted")
plt.show()
gp_model = gpb.GPModel(group_data=group)
gp_model.fit(y=y, std_dev=True, params={"optimizer_cov": "gradient_descent", "lr_cov": 0.1,
"use_nesterov_acc": True})
gp_model.summary()
np.random.seed(1)
x = np.random.uniform(size=n)
n_obs_gr = int(n / m)
group2 = np.arange(n)
for i in range(m):
group2[(n_obs_gr * i):(n_obs_gr * (i + 1))] = np.arange(n_obs_gr)
Z2 = np.zeros((n, n_obs_gr))
for i in range(n_obs_gr):
Z2[np.where(group2 == i), i] = 1
Z3 = np.diag(x).dot(Z1)
sigma2_2 = 0.5 ** 2
sigma2_3 = 0.75 ** 2
b2 = np.sqrt(sigma2_2) * np.random.normal(size=n_obs_gr)
b3 = np.sqrt(sigma2_3) * np.random.normal(size=m)
eps2 = Z1.dot(b1) + Z2.dot(b2) + Z3.dot(b3)
y = eps2 + xi
group_data = np.column_stack((group, group2))
gp_model = gpb.GPModel(group_data=group_data, group_rand_coef_data=x, ind_effect_group_rand_coef=[1])
gp_model.fit(y=y, std_dev=True)
gp_model.summary()
np.random.seed(1)
X = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n)))
beta = np.array([3, 3])
y = eps2 + xi + X.dot(beta)
gp_model = gpb.GPModel(group_data=group_data, group_rand_coef_data=x, ind_effect_group_rand_coef=[1])
gp_model.fit(y=y, X=X, std_dev=True)
gp_model.summary()
n = 200
np.random.seed(2)
coords = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n)))
sigma2_1 = 1 ** 2
rho = 0.1
sigma2 = 0.5 ** 2
D = np.zeros((n, n))
for i in range(0, n):
for j in range(i + 1, n):
D[i, j] = np.linalg.norm(coords[i, :] - coords[j, :])
D[j, i] = D[i, j]
Sigma = sigma2_1 * np.exp(-D / rho) + np.diag(np.zeros(n) + 1e-20)
C = np.linalg.cholesky(Sigma)
b1 = np.random.normal(size=n)
eps = C.dot(b1)
xi = np.sqrt(sigma2) * np.random.normal(size=n)
y = eps + xi
gp_model = gpb.GPModel(gp_coords=coords, cov_function="exponential")
=True, params={"optimizer_cov": "gradient_descent",
"lr_cov": 0.1})
gp_model.summary()
np.random.seed(1)
ntest = 5
coords_test = np.column_stack(
(np.random.uniform(size=ntest), np.random.uniform(size=ntest))) / 10.
pred = gp_model.predict(gp_coords_pred=coords_test, predict_cov_mat=True)
print("Predicted (posterior/conditional) mean of GP")
pred['mu']
print("Predicted (posterior/conditional) covariance matrix of GP")
pred['cov']
gp_model = gpb.GPModel(gp_coords=coords, cov_function="exponential",
vecchia_approx=True, num_neighbors=30)
gp_model.fit(y=y, params={"optimizer_cov": "gradient_descent",
"lr_cov": 0.1})
gp_model.summary()
n = 500
np.random.seed(1)
coords = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n)))
sigma2_1 = 1 ** 2
rho = 0.1
sigma2 = 0.5 ** 2
D = np.zeros((n, n))
for i in range(0, n):
for j in range(i + 1, n):
D[i, j] = np.linalg.norm(coords[i, :] - coords[j, :])
D[j, i] = D[i, j]
Sigma = sigma2_1 * np.exp(-D / rho) + np.diag(np.zeros(n) + 1e-20)
C = np.linalg.cholesky(Sigma)
X_SVC = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n)))
b1 = np.random.normal(size=n)
b2 = np.random.normal(size=n)
b3 = np.random.normal(size=n)
eps = C.dot(b1) + X_SVC[:, 0] * C.dot(b2) + X_SVC[:, 1] * C.dot(b3)
xi = np.sqrt(sigma2) * np.random.normal(size=n)
y = eps + xi
gp_model = gpb.GPModel(gp_coords=coords, cov_function="exponential", gp_rand_coef_data=X_SVC)
gp_model.fit(y=y, std_dev=True, params={"optimizer_cov": "gradient_descent",
"lr_cov": 0.05,
"use_nesterov_acc": True,
"acc_rate_cov": 0.5})
gp_model.summary()
n = 200
m = 25
group = np.arange(n)
for i in range(m):
group[int(i * n / m):int((i + 1) * n / m)] = i
Z1 = np.zeros((n, m))
for i in range(m):
Z1[np.where(group == i), i] = 1
np.random.seed(1)
coords = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n)))
sigma2_1 = 1 ** 2
sigma2_2 = 1 ** 2
rho = 0.1
sigma2 = 0.5 ** 2
D = np.zeros((n, n))
for i in range(0, n):
for j in range(i + 1, n):
D[i, j] = np.linalg.norm(coords[i, :] - coords[j, :])
D[j, i] = D[i, j]
Sigma = sigma2_2 * np.exp(-D / rho) + np.diag(np.zeros(n) + 1e-20)
C = np.linalg.cholesky(Sigma)
b1 = np.random.normal(size=m)
b2 = np.random.normal(size=n)
eps = Z1.dot(b1) + C.dot(b2)
xi = np.sqrt(sigma2) * np.random.normal(size=n)
y = eps + xi
gp_model = gpb.GPModel(group_data=group, gp_coords=coords, cov_function="exponential")
gp_model.fit(y=y, std_dev=True, params={"optimizer_cov": "gradient_descent",
"lr_cov": 0.05,
"use_nesterov_acc": True,
"acc_rate_cov": 0.5})
gp_model.summary()
| true | true |
f73075bef3ed405f4ed01eba617bcacbd18b5ea5 | 10,394 | py | Python | tests/test_renault_vehicle.py | Nebukadneza/renault-api | 30fdcc405575ca394c98b556878260da787c9ffc | [
"MIT"
] | null | null | null | tests/test_renault_vehicle.py | Nebukadneza/renault-api | 30fdcc405575ca394c98b556878260da787c9ffc | [
"MIT"
] | null | null | null | tests/test_renault_vehicle.py | Nebukadneza/renault-api | 30fdcc405575ca394c98b556878260da787c9ffc | [
"MIT"
] | null | null | null | """Test cases for the Renault client API keys."""
from datetime import datetime
from typing import List
import aiohttp
import pytest
from aioresponses import aioresponses
from tests import get_file_content
from tests.const import TEST_ACCOUNT_ID
from tests.const import TEST_COUNTRY
from tests.const import TEST_KAMEREON_URL
from tests.const import TEST_LOCALE_DETAILS
from tests.const import TEST_VIN
from tests.test_credential_store import get_logged_in_credential_store
from tests.test_renault_session import get_logged_in_session
from renault_api.kamereon.enums import ChargeMode
from renault_api.kamereon.models import ChargeSchedule
from renault_api.renault_vehicle import RenaultVehicle
TEST_KAMEREON_BASE_URL = f"{TEST_KAMEREON_URL}/commerce/v1"
TEST_KAMEREON_ACCOUNT_URL = f"{TEST_KAMEREON_BASE_URL}/accounts/{TEST_ACCOUNT_ID}"
TEST_KAMEREON_VEHICLE_URL1 = (
f"{TEST_KAMEREON_ACCOUNT_URL}/kamereon/kca/car-adapter/v1/cars/{TEST_VIN}"
)
TEST_KAMEREON_VEHICLE_URL2 = (
f"{TEST_KAMEREON_ACCOUNT_URL}/kamereon/kca/car-adapter/v2/cars/{TEST_VIN}"
)
FIXTURE_PATH = "tests/fixtures/kamereon/"
QUERY_STRING = f"country={TEST_COUNTRY}"
@pytest.fixture
def vehicle(websession: aiohttp.ClientSession) -> RenaultVehicle:
"""Fixture for testing RenaultVehicle."""
return RenaultVehicle(
account_id=TEST_ACCOUNT_ID,
vin=TEST_VIN,
session=get_logged_in_session(websession),
)
def tests_init(websession: aiohttp.ClientSession) -> None:
"""Test RenaultVehicle initialisation."""
assert RenaultVehicle(
account_id=TEST_ACCOUNT_ID,
vin=TEST_VIN,
session=get_logged_in_session(websession),
)
assert RenaultVehicle(
account_id=TEST_ACCOUNT_ID,
vin=TEST_VIN,
websession=websession,
country=TEST_COUNTRY,
locale_details=TEST_LOCALE_DETAILS,
credential_store=get_logged_in_credential_store(),
)
@pytest.mark.asyncio
async def test_get_battery_status(vehicle: RenaultVehicle) -> None:
"""Test get_battery_status."""
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL2}/battery-status?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/battery-status.1.json"),
)
assert await vehicle.get_battery_status()
@pytest.mark.asyncio
async def test_get_location(vehicle: RenaultVehicle) -> None:
"""Test get_location."""
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/location?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/location.json"),
)
assert await vehicle.get_location()
@pytest.mark.asyncio
async def test_get_hvac_status(vehicle: RenaultVehicle) -> None:
"""Test get_hvac_status."""
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/hvac-status?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/hvac-status.json"),
)
assert await vehicle.get_hvac_status()
@pytest.mark.asyncio
async def test_get_charge_mode(vehicle: RenaultVehicle) -> None:
"""Test get_charge_mode."""
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/charge-mode?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/charge-mode.json"),
)
assert await vehicle.get_charge_mode()
@pytest.mark.asyncio
async def test_get_cockpit(vehicle: RenaultVehicle) -> None:
"""Test get_cockpit."""
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL2}/cockpit?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/cockpit.zoe.json"),
)
assert await vehicle.get_cockpit()
@pytest.mark.asyncio
async def test_get_lock_status(vehicle: RenaultVehicle) -> None:
"""Test get_lock_status."""
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/lock-status?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/lock-status.json"),
)
assert await vehicle.get_lock_status()
@pytest.mark.asyncio
async def test_get_charging_settings(vehicle: RenaultVehicle) -> None:
"""Test get_charging_settings."""
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/charging-settings?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_data/charging-settings.json"
),
)
assert await vehicle.get_charging_settings()
@pytest.mark.asyncio
async def test_get_notification_settings(vehicle: RenaultVehicle) -> None:
"""Test get_notification_settings."""
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/notification-settings?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_data/notification-settings.json"
),
)
assert await vehicle.get_notification_settings()
@pytest.mark.asyncio
async def test_get_charge_history(vehicle: RenaultVehicle) -> None:
"""Test get_charge_history."""
query_string = f"{QUERY_STRING}&end=202011&start=202010&type=month"
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/charge-history?{query_string}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/charge-history.json"),
)
assert await vehicle.get_charge_history(
start=datetime(2020, 10, 1),
end=datetime(2020, 11, 15),
)
@pytest.mark.asyncio
async def test_get_charges(vehicle: RenaultVehicle) -> None:
"""Test get_charges."""
query_string = f"{QUERY_STRING}&end=20201115&start=20201001"
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/charges?{query_string}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/charges.json"),
)
assert await vehicle.get_charges(
start=datetime(2020, 10, 1),
end=datetime(2020, 11, 15),
)
@pytest.mark.asyncio
async def test_get_hvac_history(vehicle: RenaultVehicle) -> None:
"""Test get_hvac_history."""
query_string = f"{QUERY_STRING}&end=202011&start=202010&type=month"
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/hvac-history?{query_string}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/hvac-history.json"),
)
assert await vehicle.get_hvac_history(
start=datetime(2020, 10, 1),
end=datetime(2020, 11, 15),
)
@pytest.mark.asyncio
async def test_get_hvac_sessions(vehicle: RenaultVehicle) -> None:
"""Test get_hvac_sessions."""
query_string = f"{QUERY_STRING}&end=20201115&start=20201001"
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/hvac-sessions?{query_string}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/hvac-sessions.json"),
)
assert await vehicle.get_hvac_sessions(
start=datetime(2020, 10, 1),
end=datetime(2020, 11, 15),
)
@pytest.mark.asyncio
async def test_set_ac_start(vehicle: RenaultVehicle) -> None:
"""Test set_ac_start."""
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL1}/actions/hvac-start?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/hvac-start.start.json"
),
)
assert await vehicle.set_ac_start(21, datetime(2020, 11, 24))
@pytest.mark.asyncio
async def test_set_ac_stop(vehicle: RenaultVehicle) -> None:
"""Test set_ac_stop."""
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL1}/actions/hvac-start?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/hvac-start.cancel.json"
),
)
assert await vehicle.set_ac_stop()
@pytest.mark.asyncio
async def test_set_charge_mode(vehicle: RenaultVehicle) -> None:
"""Test set_charge_mode."""
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL1}/actions/charge-mode?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/charge-mode.schedule_mode.json"
),
)
assert await vehicle.set_charge_mode(ChargeMode.SCHEDULE_MODE)
@pytest.mark.asyncio
async def test_set_charge_schedules(vehicle: RenaultVehicle) -> None:
"""Test set_charge_schedules."""
schedules: List[ChargeSchedule] = []
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL2}/actions/charge-schedule?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/charge-schedule.schedules.json"
),
)
assert await vehicle.set_charge_schedules(schedules)
@pytest.mark.asyncio
async def test_set_charge_start(vehicle: RenaultVehicle) -> None:
"""Test set_charge_start."""
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL1}/actions/charging-start?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/charging-start.start.json"
),
)
assert await vehicle.set_charge_start()
| 35.233898 | 88 | 0.681547 | from datetime import datetime
from typing import List
import aiohttp
import pytest
from aioresponses import aioresponses
from tests import get_file_content
from tests.const import TEST_ACCOUNT_ID
from tests.const import TEST_COUNTRY
from tests.const import TEST_KAMEREON_URL
from tests.const import TEST_LOCALE_DETAILS
from tests.const import TEST_VIN
from tests.test_credential_store import get_logged_in_credential_store
from tests.test_renault_session import get_logged_in_session
from renault_api.kamereon.enums import ChargeMode
from renault_api.kamereon.models import ChargeSchedule
from renault_api.renault_vehicle import RenaultVehicle
TEST_KAMEREON_BASE_URL = f"{TEST_KAMEREON_URL}/commerce/v1"
TEST_KAMEREON_ACCOUNT_URL = f"{TEST_KAMEREON_BASE_URL}/accounts/{TEST_ACCOUNT_ID}"
TEST_KAMEREON_VEHICLE_URL1 = (
f"{TEST_KAMEREON_ACCOUNT_URL}/kamereon/kca/car-adapter/v1/cars/{TEST_VIN}"
)
TEST_KAMEREON_VEHICLE_URL2 = (
f"{TEST_KAMEREON_ACCOUNT_URL}/kamereon/kca/car-adapter/v2/cars/{TEST_VIN}"
)
FIXTURE_PATH = "tests/fixtures/kamereon/"
QUERY_STRING = f"country={TEST_COUNTRY}"
@pytest.fixture
def vehicle(websession: aiohttp.ClientSession) -> RenaultVehicle:
return RenaultVehicle(
account_id=TEST_ACCOUNT_ID,
vin=TEST_VIN,
session=get_logged_in_session(websession),
)
def tests_init(websession: aiohttp.ClientSession) -> None:
assert RenaultVehicle(
account_id=TEST_ACCOUNT_ID,
vin=TEST_VIN,
session=get_logged_in_session(websession),
)
assert RenaultVehicle(
account_id=TEST_ACCOUNT_ID,
vin=TEST_VIN,
websession=websession,
country=TEST_COUNTRY,
locale_details=TEST_LOCALE_DETAILS,
credential_store=get_logged_in_credential_store(),
)
@pytest.mark.asyncio
async def test_get_battery_status(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL2}/battery-status?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/battery-status.1.json"),
)
assert await vehicle.get_battery_status()
@pytest.mark.asyncio
async def test_get_location(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/location?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/location.json"),
)
assert await vehicle.get_location()
@pytest.mark.asyncio
async def test_get_hvac_status(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/hvac-status?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/hvac-status.json"),
)
assert await vehicle.get_hvac_status()
@pytest.mark.asyncio
async def test_get_charge_mode(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/charge-mode?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/charge-mode.json"),
)
assert await vehicle.get_charge_mode()
@pytest.mark.asyncio
async def test_get_cockpit(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL2}/cockpit?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/cockpit.zoe.json"),
)
assert await vehicle.get_cockpit()
@pytest.mark.asyncio
async def test_get_lock_status(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/lock-status?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/lock-status.json"),
)
assert await vehicle.get_lock_status()
@pytest.mark.asyncio
async def test_get_charging_settings(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/charging-settings?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_data/charging-settings.json"
),
)
assert await vehicle.get_charging_settings()
@pytest.mark.asyncio
async def test_get_notification_settings(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/notification-settings?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_data/notification-settings.json"
),
)
assert await vehicle.get_notification_settings()
@pytest.mark.asyncio
async def test_get_charge_history(vehicle: RenaultVehicle) -> None:
query_string = f"{QUERY_STRING}&end=202011&start=202010&type=month"
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/charge-history?{query_string}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/charge-history.json"),
)
assert await vehicle.get_charge_history(
start=datetime(2020, 10, 1),
end=datetime(2020, 11, 15),
)
@pytest.mark.asyncio
async def test_get_charges(vehicle: RenaultVehicle) -> None:
query_string = f"{QUERY_STRING}&end=20201115&start=20201001"
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/charges?{query_string}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/charges.json"),
)
assert await vehicle.get_charges(
start=datetime(2020, 10, 1),
end=datetime(2020, 11, 15),
)
@pytest.mark.asyncio
async def test_get_hvac_history(vehicle: RenaultVehicle) -> None:
query_string = f"{QUERY_STRING}&end=202011&start=202010&type=month"
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/hvac-history?{query_string}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/hvac-history.json"),
)
assert await vehicle.get_hvac_history(
start=datetime(2020, 10, 1),
end=datetime(2020, 11, 15),
)
@pytest.mark.asyncio
async def test_get_hvac_sessions(vehicle: RenaultVehicle) -> None:
query_string = f"{QUERY_STRING}&end=20201115&start=20201001"
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/hvac-sessions?{query_string}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/hvac-sessions.json"),
)
assert await vehicle.get_hvac_sessions(
start=datetime(2020, 10, 1),
end=datetime(2020, 11, 15),
)
@pytest.mark.asyncio
async def test_set_ac_start(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL1}/actions/hvac-start?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/hvac-start.start.json"
),
)
assert await vehicle.set_ac_start(21, datetime(2020, 11, 24))
@pytest.mark.asyncio
async def test_set_ac_stop(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL1}/actions/hvac-start?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/hvac-start.cancel.json"
),
)
assert await vehicle.set_ac_stop()
@pytest.mark.asyncio
async def test_set_charge_mode(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL1}/actions/charge-mode?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/charge-mode.schedule_mode.json"
),
)
assert await vehicle.set_charge_mode(ChargeMode.SCHEDULE_MODE)
@pytest.mark.asyncio
async def test_set_charge_schedules(vehicle: RenaultVehicle) -> None:
schedules: List[ChargeSchedule] = []
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL2}/actions/charge-schedule?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/charge-schedule.schedules.json"
),
)
assert await vehicle.set_charge_schedules(schedules)
@pytest.mark.asyncio
async def test_set_charge_start(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL1}/actions/charging-start?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/charging-start.start.json"
),
)
assert await vehicle.set_charge_start()
| true | true |
f73076c02920003b04f8d7e5599366f2dce13c0e | 1,277 | py | Python | randomgenius.py | JosephCottingham/MusicPlaylist-LinkedList | bb8014158fed05b45194705424bc5830ed25527a | [
"MIT"
] | null | null | null | randomgenius.py | JosephCottingham/MusicPlaylist-LinkedList | bb8014158fed05b45194705424bc5830ed25527a | [
"MIT"
] | null | null | null | randomgenius.py | JosephCottingham/MusicPlaylist-LinkedList | bb8014158fed05b45194705424bc5830ed25527a | [
"MIT"
] | null | null | null | import requests, json, random, sys, configparser
#def getData(auth_string, id):
def getData():
global auth_token
s = random.randint(100000, 1000000)
id_str = str(s)
request_url = "http://api.genius.com/songs/" + id_str
headersMap = {
"User-Agent": "CompuServe Classic/1.22",
"Accept": "application/json",
"Authorization": "Bearer " + auth_token
}
response = requests.get(request_url, headers=headersMap)
### Output the HTTP status code and reason text...
#print response.status, response.reason
result = json.loads(response.content)
output = "[" + id_str + "] "
if response.status_code == 200:
title = result["response"]["song"]["full_title"]
song_uri = result["response"]["song"]["path"]
if not title:
return getData()
return title
else:
return getData()
#Copy auth.cfg.sample to auth.cfg and fill in your auth token
Config = configparser.ConfigParser()
Config.read('auth.cfg')
auth_token = Config.get('Auth', 'Token')
#if no argument (count) is given, set default to 1
c = 0
if (len(sys.argv) == 1):
c = 1
else:
c = int(sys.argv[1])
#Concerning the docs, the song id is a 6-digit number
for i in range(0, c):
getData()
| 29.697674 | 61 | 0.628034 | import requests, json, random, sys, configparser
def getData():
global auth_token
s = random.randint(100000, 1000000)
id_str = str(s)
request_url = "http://api.genius.com/songs/" + id_str
headersMap = {
"User-Agent": "CompuServe Classic/1.22",
"Accept": "application/json",
"Authorization": "Bearer " + auth_token
}
response = requests.get(request_url, headers=headersMap)
status_code == 200:
title = result["response"]["song"]["full_title"]
song_uri = result["response"]["song"]["path"]
if not title:
return getData()
return title
else:
return getData()
Config = configparser.ConfigParser()
Config.read('auth.cfg')
auth_token = Config.get('Auth', 'Token')
c = 0
if (len(sys.argv) == 1):
c = 1
else:
c = int(sys.argv[1])
for i in range(0, c):
getData()
| true | true |
f73077be389c6681ad96c3ab9228da1f85632f08 | 2,263 | py | Python | aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/SetHttpHeaderConfigRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/SetHttpHeaderConfigRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/SetHttpHeaderConfigRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcdn.endpoint import endpoint_data
class SetHttpHeaderConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'SetHttpHeaderConfig')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_HeaderValue(self):
return self.get_query_params().get('HeaderValue')
def set_HeaderValue(self,HeaderValue):
self.add_query_param('HeaderValue',HeaderValue)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_ConfigId(self):
return self.get_query_params().get('ConfigId')
def set_ConfigId(self,ConfigId):
self.add_query_param('ConfigId',ConfigId)
def get_HeaderKey(self):
return self.get_query_params().get('HeaderKey')
def set_HeaderKey(self,HeaderKey):
self.add_query_param('HeaderKey',HeaderKey) | 33.776119 | 74 | 0.764914 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcdn.endpoint import endpoint_data
class SetHttpHeaderConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'SetHttpHeaderConfig')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_HeaderValue(self):
return self.get_query_params().get('HeaderValue')
def set_HeaderValue(self,HeaderValue):
self.add_query_param('HeaderValue',HeaderValue)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_ConfigId(self):
return self.get_query_params().get('ConfigId')
def set_ConfigId(self,ConfigId):
self.add_query_param('ConfigId',ConfigId)
def get_HeaderKey(self):
return self.get_query_params().get('HeaderKey')
def set_HeaderKey(self,HeaderKey):
self.add_query_param('HeaderKey',HeaderKey) | true | true |
f7307823c40ee64d03118ec461eb3103c01b9c29 | 5,544 | py | Python | Heats/scoresheetsHtml.py | yuxuibbs/MCC-Competition-Docs | 384726c41434c5a07becb6438c3d2409c6ca6eb4 | [
"MIT"
] | 4 | 2016-11-13T20:49:33.000Z | 2017-12-20T20:03:03.000Z | Heats/scoresheetsHtml.py | yuxuibbs/MCC-Competition-Docs | 384726c41434c5a07becb6438c3d2409c6ca6eb4 | [
"MIT"
] | 5 | 2016-12-26T19:14:46.000Z | 2022-02-11T03:44:39.000Z | Heats/scoresheetsHtml.py | yuxuibbs/MCC-Competition-Docs | 384726c41434c5a07becb6438c3d2409c6ca6eb4 | [
"MIT"
] | 2 | 2016-12-29T12:03:15.000Z | 2017-02-16T15:51:02.000Z | startHTML = '''
<html>
<head>
<style>
table {
border-collapse: collapse;
height: 100%;
width: 100%;
}
table, th, td {
border: 3px solid black;
}
@media print {
table {
page-break-after: always;
}
}
.cutoffs td {
border: 0;
font-weight: bold;
}
.compName {
font-size: 48pt;
font-weight: bold;
}
.labels {
font-size: 24pt;
font-weight: bold;
}
.attempt {
font-size: 36pt;
font-weight: bold;
text-align: center;
}
.event, .personID, .scrambler {
font-size: 24pt;
font-weight: bold;
width: 60px;
}
.round, .heat {
font-size: 24pt;
font-weight: bold;
}
.personName {
font-size: 40pt;
font-weight: bold;
}
.attemptNumber {
width: 60px;
}
.initial {
width: 100px;
}
</style>
</head>
<body>
'''
ao5Table = '''
<table>
<tr>
<th colspan="6" class="compName">competitionName</th>
</tr>
<tr>
<th colspan="1" class="personID">competitorID</th>
<th colspan="3" class="event">eventName</th>
<th colspan="1" class="heat">G: heatNumber</th>
<th colspan="1" class="round">R: roundNumber</th>
</tr>
<tr>
<th colspan="6" class="personName">competitorName</th>
</tr>
<tr class="labels">
<th colspan="1" class="scrambler">Scr</th>
<th colspan="1" class="attemptNumber">#</th>
<th colspan="2">Results</th>
<th colspan="1" class="initial">Judge</th>
<th colspan="1" class="initial">Comp</th>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">1</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">2</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="cutoffs">
<td colspan="1"></td>
<td colspan="1"></td>
<td colspan="1">Cutoff: cutoffTime</td>
<td colspan="1">Time Limit: timeLimit</td>
<td colspan="1"></td>
<td colspan="1"></td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">3</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">4</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">5</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="empty">
<td colspan="6"></td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">E</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
</table>
'''
mo3Table = '''
<table>
<tr>
<th colspan="6" class="compName">competitionName</th>
</tr>
<tr>
<th colspan="1" class="personID">competitorID</th>
<th colspan="3" class="event">eventName</th>
<th colspan="1" class="heat">G: heatNumber</th>
<th colspan="1" class="round">R: roundNumber</th>
</tr>
<tr>
<th colspan="6" class="personName">competitorName</th>
</tr>
<tr class="labels">
<th colspan="1" class="scrambler">Scr</th>
<th colspan="1" class="attemptNumber">#</th>
<th colspan="2">Results</th>
<th colspan="1" class="initial">Judge</th>
<th colspan="1" class="initial">Comp</th>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">1</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="cutoffs">
<td colspan="1"></td>
<td colspan="1"></td>
<td colspan="1">Cutoff: cutoffTime</td>
<td colspan="1">Time Limit: timeLimit</td>
<td colspan="1"></td>
<td colspan="1"></td>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">2</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">3</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="empty">
<td colspan="6"></td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">E</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
</table>
'''
endHTML = '''
</body>
</html>
''' | 25.906542 | 64 | 0.418831 | startHTML = '''
<html>
<head>
<style>
table {
border-collapse: collapse;
height: 100%;
width: 100%;
}
table, th, td {
border: 3px solid black;
}
@media print {
table {
page-break-after: always;
}
}
.cutoffs td {
border: 0;
font-weight: bold;
}
.compName {
font-size: 48pt;
font-weight: bold;
}
.labels {
font-size: 24pt;
font-weight: bold;
}
.attempt {
font-size: 36pt;
font-weight: bold;
text-align: center;
}
.event, .personID, .scrambler {
font-size: 24pt;
font-weight: bold;
width: 60px;
}
.round, .heat {
font-size: 24pt;
font-weight: bold;
}
.personName {
font-size: 40pt;
font-weight: bold;
}
.attemptNumber {
width: 60px;
}
.initial {
width: 100px;
}
</style>
</head>
<body>
'''
ao5Table = '''
<table>
<tr>
<th colspan="6" class="compName">competitionName</th>
</tr>
<tr>
<th colspan="1" class="personID">competitorID</th>
<th colspan="3" class="event">eventName</th>
<th colspan="1" class="heat">G: heatNumber</th>
<th colspan="1" class="round">R: roundNumber</th>
</tr>
<tr>
<th colspan="6" class="personName">competitorName</th>
</tr>
<tr class="labels">
<th colspan="1" class="scrambler">Scr</th>
<th colspan="1" class="attemptNumber">#</th>
<th colspan="2">Results</th>
<th colspan="1" class="initial">Judge</th>
<th colspan="1" class="initial">Comp</th>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">1</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">2</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="cutoffs">
<td colspan="1"></td>
<td colspan="1"></td>
<td colspan="1">Cutoff: cutoffTime</td>
<td colspan="1">Time Limit: timeLimit</td>
<td colspan="1"></td>
<td colspan="1"></td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">3</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">4</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">5</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="empty">
<td colspan="6"></td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">E</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
</table>
'''
mo3Table = '''
<table>
<tr>
<th colspan="6" class="compName">competitionName</th>
</tr>
<tr>
<th colspan="1" class="personID">competitorID</th>
<th colspan="3" class="event">eventName</th>
<th colspan="1" class="heat">G: heatNumber</th>
<th colspan="1" class="round">R: roundNumber</th>
</tr>
<tr>
<th colspan="6" class="personName">competitorName</th>
</tr>
<tr class="labels">
<th colspan="1" class="scrambler">Scr</th>
<th colspan="1" class="attemptNumber">#</th>
<th colspan="2">Results</th>
<th colspan="1" class="initial">Judge</th>
<th colspan="1" class="initial">Comp</th>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">1</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="cutoffs">
<td colspan="1"></td>
<td colspan="1"></td>
<td colspan="1">Cutoff: cutoffTime</td>
<td colspan="1">Time Limit: timeLimit</td>
<td colspan="1"></td>
<td colspan="1"></td>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">2</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">3</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="empty">
<td colspan="6"></td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">E</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
</table>
'''
endHTML = '''
</body>
</html>
''' | true | true |
f7307828ee10c47bd1ad7591cd1e3358963ef61f | 1,063 | py | Python | authors/apps/authentication/tests/test_login.py | andela/ah-backend-thanos | baf7f20a023cc3c3ecae0fcf91bb7d9165e79fc8 | [
"BSD-3-Clause"
] | null | null | null | authors/apps/authentication/tests/test_login.py | andela/ah-backend-thanos | baf7f20a023cc3c3ecae0fcf91bb7d9165e79fc8 | [
"BSD-3-Clause"
] | 42 | 2018-10-24T08:21:07.000Z | 2021-06-10T20:54:39.000Z | authors/apps/authentication/tests/test_login.py | andela/ah-backend-thanos | baf7f20a023cc3c3ecae0fcf91bb7d9165e79fc8 | [
"BSD-3-Clause"
] | 2 | 2018-11-05T08:56:42.000Z | 2019-05-03T12:40:43.000Z | from rest_framework import status
from django.contrib.auth import get_user_model
from rest_framework.reverse import reverse
from .basetest import BaseTestCase
User = get_user_model()
signup_url = reverse("authentication:signup")
login_url = reverse("authentication:login")
class UserApiTestCase(BaseTestCase):
def test_login_user(self):
# Test user login
self.login_response = self.client.post(
login_url, self.login_data, format="json")
self.assertEqual(self.login_response.status_code, status.HTTP_200_OK)
login_token = self.login_response.data['token']
self.assertEqual(
self.login_response.data, {"email": "daniel@test.com",
"username": "daniel",
"token": login_token}
)
def test_get_user_email(self):
""" Test model method to get user's email """
self.email = self.login_data["user"]["email"]
email = self.email.__str__()
self.assertIn(email, "daniel@test.com")
| 34.290323 | 77 | 0.643462 | from rest_framework import status
from django.contrib.auth import get_user_model
from rest_framework.reverse import reverse
from .basetest import BaseTestCase
User = get_user_model()
signup_url = reverse("authentication:signup")
login_url = reverse("authentication:login")
class UserApiTestCase(BaseTestCase):
def test_login_user(self):
self.login_response = self.client.post(
login_url, self.login_data, format="json")
self.assertEqual(self.login_response.status_code, status.HTTP_200_OK)
login_token = self.login_response.data['token']
self.assertEqual(
self.login_response.data, {"email": "daniel@test.com",
"username": "daniel",
"token": login_token}
)
def test_get_user_email(self):
self.email = self.login_data["user"]["email"]
email = self.email.__str__()
self.assertIn(email, "daniel@test.com")
| true | true |
f73078ac1b77d76adea8e2e058894be4fc7fa673 | 4,504 | py | Python | techno_files/eic25_zawieszki.py | lukaszmachura/ejc2019_name_badge | 13996bdfc25958129fda26a8692c1e08c016eae4 | [
"MIT"
] | null | null | null | techno_files/eic25_zawieszki.py | lukaszmachura/ejc2019_name_badge | 13996bdfc25958129fda26a8692c1e08c016eae4 | [
"MIT"
] | null | null | null | techno_files/eic25_zawieszki.py | lukaszmachura/ejc2019_name_badge | 13996bdfc25958129fda26a8692c1e08c016eae4 | [
"MIT"
] | null | null | null | import subprocess
import os
def read_list(fname="lista.csv"):
for row in open(fname):
yield row
def write_preamble(fname):
preamble = r"""%25 EIC technology, (C) LM 2018
\documentclass{article}
\usepackage{graphicx}
\usepackage[space]{grffile}
\usepackage{anyfontsize}
\usepackage{fontspec}
\usepackage{color}
\setromanfont[
BoldFont=NexaB.otf,
]{NexaL.otf}
\usepackage{multicol}
\usepackage[english]{babel}%
\usepackage[a6paper,left=1cm,right=1cm,top=1cm,bottom=1cm]{geometry}
\usepackage{lipsum}% http://ctan.org/pkg/lipsum
\begin{document}
\pagestyle{empty}
% top
\centering{\includegraphics[width=0.9\textwidth,keepaspectratio]{top}}\\
\vspace*{0.3cm}
"""
with open(fname, "w") as f:
f.write(preamble)
def write_player(p, f, photo):
country = p[0].lower()
name = p[1].lower()
surname = p[2].lower()
dan = p[3].lower()
role = p[4].lower()
spec = p[8].lower()
Name = name.capitalize()
Surname = surname.capitalize()
if photo == None:
photo = "nophoto"
if dan == "-1":
dan = "{\color{white}1}"
elif dan.lower() == "mudan":
dan = "mudan"
elif "renshi" in dan:
dan = "renshi " + dan[0] + " dan"
elif "kyoshi" in dan:
dan = "kyoshi " + dan[0] + " dan"
elif "hanshi" in dan:
dan = "hanshi " + dan[0] + " dan"
else:
dan += " dan"
name_size = "huge"
if len(Name) > 8:
name_size = "Large"
surname_size = "huge"
if len(Surname) > 8:
surname_size = "Large"
string = "%"
string += r""" pic name role
\begin{minipage}[c]{0.3\textwidth}
\centering{\includegraphics[height=3cm]{foto/%s}}
\end{minipage}
\begin{minipage}[c]{0.15\textwidth}
\phantom{eic}
\end{minipage}
\begin{minipage}[c]{0.5\textwidth}
\begin{flushright}
{\%s \textbf{%s}}\\
{\%s \textbf{%s}}\\
\vspace*{0.3cm}
{\Large %s}\\{\Large %s}
\end{flushright}
\end{minipage}\\
\vspace*{0.3cm}
""" % (photo, name_size, Name.split(" ")[0],
surname_size, Surname, dan, spec)
with open(f, "a") as f:
f.write(string)
def write_country(p, f, C):
country = p[0].lower()
string = "%"
string += r""" country
\begin{minipage}[c]{0.3\textwidth}
\centering{\includegraphics[height=1.5cm]{flags/%s}}
\end{minipage}
\begin{minipage}[c]{0.15\textwidth}
\phantom{eic}
\end{minipage}
\begin{minipage}[c]{0.5\textwidth}
\begin{center}
{\fontsize{30}{40}\selectfont %s}
\end{center}
\end{minipage}
""" % (country, C)
with open(f, "a") as f:
f.write(string)
def write_role(p, f):
role = p[4].lower()
size = "huge"
if len(role) > len("manager + competitor"):
size = "Large"
string = "%"
string += r""" role
\vspace*{0.25cm}
\begin{center}
{\%s %s}
\end{center}
\vspace*{0.25cm}
""" % (size, role)
with open(f, "a") as f:
f.write(string)
def write_footer(f):
foot = "%"
foot += r""" foot
\centering{\includegraphics[width=\textwidth,keepaspectratio]{footer}}
\end{document}
"""
with open(f, "a") as f:
f.write(foot)
def is_in_foto_dir(ekf, path="./foto"):
for element in os.listdir(path):
if ekf.lower() in element.lower():
return element
return None
if __name__ == "__main__":
DIR = "./zawieszki/"
COUNTRIES = "./countries/"
country = {'pol': 'Poland'}
rl = read_list()
for row in rl:
player = row.split(",")
country = player[0]
cou = country[:3].lower()
fname = player[1]
lname = player[2]
dan = player[3]
role = player[4]
ekf = player[5]
pic = player[6]
fn = fname.lower()
ln = lname.lower()
fname = "{}_{}_{}".format(cou, ln, fn)
fname_tex = fname + ".tex"
write_preamble(fname_tex)
photo = None if ekf == "NA" else is_in_foto_dir(ekf)
write_player(player, fname_tex, photo)
write_country(player, fname_tex, country) #[player[0]])
write_role(player, fname_tex)
write_footer(fname_tex)
subprocess.call(["ls", fname])
subprocess.call(["xelatex", fname_tex])
subprocess.call(["rm", fname_tex, fname + ".log", fname + ".aux"])
subprocess.call(["mv", fname + ".pdf", DIR])
| 23.458333 | 76 | 0.552398 | import subprocess
import os
def read_list(fname="lista.csv"):
for row in open(fname):
yield row
def write_preamble(fname):
preamble = r"""%25 EIC technology, (C) LM 2018
\documentclass{article}
\usepackage{graphicx}
\usepackage[space]{grffile}
\usepackage{anyfontsize}
\usepackage{fontspec}
\usepackage{color}
\setromanfont[
BoldFont=NexaB.otf,
]{NexaL.otf}
\usepackage{multicol}
\usepackage[english]{babel}%
\usepackage[a6paper,left=1cm,right=1cm,top=1cm,bottom=1cm]{geometry}
\usepackage{lipsum}% http://ctan.org/pkg/lipsum
\begin{document}
\pagestyle{empty}
% top
\centering{\includegraphics[width=0.9\textwidth,keepaspectratio]{top}}\\
\vspace*{0.3cm}
"""
with open(fname, "w") as f:
f.write(preamble)
def write_player(p, f, photo):
country = p[0].lower()
name = p[1].lower()
surname = p[2].lower()
dan = p[3].lower()
role = p[4].lower()
spec = p[8].lower()
Name = name.capitalize()
Surname = surname.capitalize()
if photo == None:
photo = "nophoto"
if dan == "-1":
dan = "{\color{white}1}"
elif dan.lower() == "mudan":
dan = "mudan"
elif "renshi" in dan:
dan = "renshi " + dan[0] + " dan"
elif "kyoshi" in dan:
dan = "kyoshi " + dan[0] + " dan"
elif "hanshi" in dan:
dan = "hanshi " + dan[0] + " dan"
else:
dan += " dan"
name_size = "huge"
if len(Name) > 8:
name_size = "Large"
surname_size = "huge"
if len(Surname) > 8:
surname_size = "Large"
string = "%"
string += r""" pic name role
\begin{minipage}[c]{0.3\textwidth}
\centering{\includegraphics[height=3cm]{foto/%s}}
\end{minipage}
\begin{minipage}[c]{0.15\textwidth}
\phantom{eic}
\end{minipage}
\begin{minipage}[c]{0.5\textwidth}
\begin{flushright}
{\%s \textbf{%s}}\\
{\%s \textbf{%s}}\\
\vspace*{0.3cm}
{\Large %s}\\{\Large %s}
\end{flushright}
\end{minipage}\\
\vspace*{0.3cm}
""" % (photo, name_size, Name.split(" ")[0],
surname_size, Surname, dan, spec)
with open(f, "a") as f:
f.write(string)
def write_country(p, f, C):
country = p[0].lower()
string = "%"
string += r""" country
\begin{minipage}[c]{0.3\textwidth}
\centering{\includegraphics[height=1.5cm]{flags/%s}}
\end{minipage}
\begin{minipage}[c]{0.15\textwidth}
\phantom{eic}
\end{minipage}
\begin{minipage}[c]{0.5\textwidth}
\begin{center}
{\fontsize{30}{40}\selectfont %s}
\end{center}
\end{minipage}
""" % (country, C)
with open(f, "a") as f:
f.write(string)
def write_role(p, f):
role = p[4].lower()
size = "huge"
if len(role) > len("manager + competitor"):
size = "Large"
string = "%"
string += r""" role
\vspace*{0.25cm}
\begin{center}
{\%s %s}
\end{center}
\vspace*{0.25cm}
""" % (size, role)
with open(f, "a") as f:
f.write(string)
def write_footer(f):
foot = "%"
foot += r""" foot
\centering{\includegraphics[width=\textwidth,keepaspectratio]{footer}}
\end{document}
"""
with open(f, "a") as f:
f.write(foot)
def is_in_foto_dir(ekf, path="./foto"):
for element in os.listdir(path):
if ekf.lower() in element.lower():
return element
return None
if __name__ == "__main__":
DIR = "./zawieszki/"
COUNTRIES = "./countries/"
country = {'pol': 'Poland'}
rl = read_list()
for row in rl:
player = row.split(",")
country = player[0]
cou = country[:3].lower()
fname = player[1]
lname = player[2]
dan = player[3]
role = player[4]
ekf = player[5]
pic = player[6]
fn = fname.lower()
ln = lname.lower()
fname = "{}_{}_{}".format(cou, ln, fn)
fname_tex = fname + ".tex"
write_preamble(fname_tex)
photo = None if ekf == "NA" else is_in_foto_dir(ekf)
write_player(player, fname_tex, photo)
write_country(player, fname_tex, country)
write_role(player, fname_tex)
write_footer(fname_tex)
subprocess.call(["ls", fname])
subprocess.call(["xelatex", fname_tex])
subprocess.call(["rm", fname_tex, fname + ".log", fname + ".aux"])
subprocess.call(["mv", fname + ".pdf", DIR])
| true | true |
f73079477a3e239cd6a9759ffd6c3d612f6a5ff4 | 9,121 | py | Python | google/ads/google_ads/interceptors/interceptor.py | allandproust/google-ads-python | 004d283f5c9031748782884daad41d97c281cafa | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/interceptors/interceptor.py | allandproust/google-ads-python | 004d283f5c9031748782884daad41d97c281cafa | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/interceptors/interceptor.py | allandproust/google-ads-python | 004d283f5c9031748782884daad41d97c281cafa | [
"Apache-2.0"
] | 1 | 2020-03-13T00:14:31.000Z | 2020-03-13T00:14:31.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A mixin class to store shared functionality for all the gRPC Interceptors.
This mixin class centralizes sets of functionality that are common across all
Interceptors, including retrieving data from gRPC metadata and initializing
instances of grpc.ClientCallDetails.
"""
from collections import namedtuple
from importlib import import_module
import json
from google.protobuf.message import DecodeError
from grpc import ClientCallDetails, StatusCode
from google.ads.google_ads.errors import GoogleAdsException
_REQUEST_ID_KEY = 'request-id'
# Codes that are retried upon by google.api_core.
_RETRY_STATUS_CODES = (StatusCode.INTERNAL, StatusCode.RESOURCE_EXHAUSTED)
_SENSITIVE_INFO_MASK = 'REDACTED'
class Interceptor:
@classmethod
def get_request_id_from_metadata(cls, trailing_metadata):
"""Gets the request ID for the Google Ads API request.
Args:
trailing_metadata: a tuple of metadatum from the service response.
Returns:
A str request ID associated with the Google Ads API request, or None
if it doesn't exist.
"""
for kv in trailing_metadata:
if kv[0] == _REQUEST_ID_KEY:
return kv[1] # Return the found request ID.
return None
@classmethod
def parse_metadata_to_json(cls, metadata):
"""Parses metadata from gRPC request and response messages to a JSON str.
Obscures the value for "developer-token".
Args:
metadata: a tuple of metadatum.
Returns:
A str of metadata formatted as JSON key/value pairs.
"""
metadata_dict = {}
if metadata is None:
return '{}'
for datum in metadata:
key = datum[0]
if key == 'developer-token':
metadata_dict[key] = _SENSITIVE_INFO_MASK
else:
value = datum[1]
metadata_dict[key] = value
return cls.format_json_object(metadata_dict)
@classmethod
def format_json_object(cls, obj):
"""Parses a serializable object into a consistently formatted JSON string.
Returns:
A str of formatted JSON serialized from the given object.
Args:
obj: an object or dict.
Returns:
A str of metadata formatted as JSON key/value pairs.
"""
def default_serializer(value):
if isinstance(value, bytes):
return value.decode(errors='ignore')
else:
return None
return str(json.dumps(obj, indent=2, sort_keys=True, ensure_ascii=False,
default=default_serializer,
separators=(',', ': ')))
@classmethod
def get_trailing_metadata_from_interceptor_exception(cls, exception):
"""Retrieves trailing metadata from an exception object.
Args:
exception: an instance of grpc.Call.
Returns:
A tuple of trailing metadata key value pairs.
"""
try:
# GoogleAdsFailure exceptions will contain trailing metadata on the
# error attribute.
return exception.error.trailing_metadata()
except AttributeError:
try:
# Transport failures, i.e. issues at the gRPC layer, will contain
# trailing metadata on the exception itself.
return exception.trailing_metadata()
except AttributeError:
# if trailing metadata is not found in either location then
# return an empty tuple
return tuple()
@classmethod
def get_client_call_details_instance(cls, method, timeout, metadata,
credentials=None):
"""Initializes an instance of the ClientCallDetails with the given data.
Args:
method: A str of the service method being invoked.
timeout: A float of the request timeout
metadata: A list of metadata tuples
credentials: An optional grpc.CallCredentials instance for the RPC
Returns:
An instance of _ClientCallDetails that wraps grpc.ClientCallDetails.
"""
class _ClientCallDetails(
namedtuple(
'_ClientCallDetails',
('method', 'timeout', 'metadata', 'credentials')),
ClientCallDetails):
"""Wrapper class for initializing a new ClientCallDetails instance.
"""
pass
return _ClientCallDetails(method, timeout, metadata, credentials)
def __init__(self, api_version):
self._error_protos = import_module(
f'google.ads.google_ads.{api_version}.proto.errors.errors_pb2')
self._failure_key = (
f'google.ads.googleads.{api_version}.errors.googleadsfailure-bin')
self._exception = None
def _get_error_from_response(self, response):
"""Attempts to wrap failed responses as GoogleAdsException instances.
Handles failed gRPC responses of by attempting to convert them
to a more readable GoogleAdsException. Certain types of exceptions are
not converted; if the object's trailing metadata does not indicate that
it is a GoogleAdsException, or if it falls under a certain category of
status code, (INTERNAL or RESOURCE_EXHAUSTED). See documentation for
more information about gRPC status codes:
https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
Args:
response: a grpc.Call/grpc.Future instance.
Returns:
GoogleAdsException: If the exception's trailing metadata
indicates that it is a GoogleAdsException.
RpcError: If the exception's is a gRPC exception but the trailing
metadata is empty or is not indicative of a GoogleAdsException,
or if the exception has a status code of INTERNAL or
RESOURCE_EXHAUSTED.
Exception: If not a GoogleAdsException or RpcException the error
will be raised as-is.
"""
if self._exception:
return self._exception
status_code = response.code()
response_exception = response.exception()
if status_code not in _RETRY_STATUS_CODES:
trailing_metadata = response.trailing_metadata()
google_ads_failure = self._get_google_ads_failure(trailing_metadata)
if google_ads_failure:
request_id = self.get_request_id_from_metadata(
trailing_metadata)
# If exception is a GoogleAdsFailure then it gets wrapped in a
# library-specific Error type for easy handling. These errors
# originate from the Google Ads API and are often caused by
# invalid requests.
self._exception = GoogleAdsException(
response_exception, response, google_ads_failure,
request_id)
else:
# Raise the original exception if not a GoogleAdsFailure. This
# type of error is generally caused by problems at the request
# level, such as when an invalid endpoint is given.
self._exception = response_exception
else:
# Raise the original exception if error has status code
# INTERNAL or RESOURCE_EXHAUSTED, meaning that
self._exception = response_exception
return self._exception
def _get_google_ads_failure(self, trailing_metadata):
"""Gets the Google Ads failure details if they exist.
Args:
trailing_metadata: a tuple of metadatum from the service response.
Returns:
A GoogleAdsFailure that describes how a GoogleAds API call failed.
Returns None if either the trailing metadata of the request did not
return the failure details, or if the GoogleAdsFailure fails to
parse.
"""
if trailing_metadata is not None:
for kv in trailing_metadata:
if kv[0] == self._failure_key:
try:
ga_failure = self._error_protos.GoogleAdsFailure()
ga_failure.ParseFromString(kv[1])
return ga_failure
except DecodeError:
return None
return None
| 37.846473 | 82 | 0.63129 |
from collections import namedtuple
from importlib import import_module
import json
from google.protobuf.message import DecodeError
from grpc import ClientCallDetails, StatusCode
from google.ads.google_ads.errors import GoogleAdsException
_REQUEST_ID_KEY = 'request-id'
_RETRY_STATUS_CODES = (StatusCode.INTERNAL, StatusCode.RESOURCE_EXHAUSTED)
_SENSITIVE_INFO_MASK = 'REDACTED'
class Interceptor:
@classmethod
def get_request_id_from_metadata(cls, trailing_metadata):
for kv in trailing_metadata:
if kv[0] == _REQUEST_ID_KEY:
return kv[1]
return None
@classmethod
def parse_metadata_to_json(cls, metadata):
metadata_dict = {}
if metadata is None:
return '{}'
for datum in metadata:
key = datum[0]
if key == 'developer-token':
metadata_dict[key] = _SENSITIVE_INFO_MASK
else:
value = datum[1]
metadata_dict[key] = value
return cls.format_json_object(metadata_dict)
@classmethod
def format_json_object(cls, obj):
def default_serializer(value):
if isinstance(value, bytes):
return value.decode(errors='ignore')
else:
return None
return str(json.dumps(obj, indent=2, sort_keys=True, ensure_ascii=False,
default=default_serializer,
separators=(',', ': ')))
@classmethod
def get_trailing_metadata_from_interceptor_exception(cls, exception):
try:
return exception.error.trailing_metadata()
except AttributeError:
try:
return exception.trailing_metadata()
except AttributeError:
return tuple()
@classmethod
def get_client_call_details_instance(cls, method, timeout, metadata,
credentials=None):
class _ClientCallDetails(
namedtuple(
'_ClientCallDetails',
('method', 'timeout', 'metadata', 'credentials')),
ClientCallDetails):
pass
return _ClientCallDetails(method, timeout, metadata, credentials)
def __init__(self, api_version):
self._error_protos = import_module(
f'google.ads.google_ads.{api_version}.proto.errors.errors_pb2')
self._failure_key = (
f'google.ads.googleads.{api_version}.errors.googleadsfailure-bin')
self._exception = None
def _get_error_from_response(self, response):
if self._exception:
return self._exception
status_code = response.code()
response_exception = response.exception()
if status_code not in _RETRY_STATUS_CODES:
trailing_metadata = response.trailing_metadata()
google_ads_failure = self._get_google_ads_failure(trailing_metadata)
if google_ads_failure:
request_id = self.get_request_id_from_metadata(
trailing_metadata)
self._exception = GoogleAdsException(
response_exception, response, google_ads_failure,
request_id)
else:
self._exception = response_exception
else:
self._exception = response_exception
return self._exception
def _get_google_ads_failure(self, trailing_metadata):
if trailing_metadata is not None:
for kv in trailing_metadata:
if kv[0] == self._failure_key:
try:
ga_failure = self._error_protos.GoogleAdsFailure()
ga_failure.ParseFromString(kv[1])
return ga_failure
except DecodeError:
return None
return None
| true | true |
f7307a29a25257c1fb187abf7a58e0c31eda5993 | 17,968 | py | Python | toolchain/riscv/MSYS/python/Lib/test/test_platform.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | null | null | null | toolchain/riscv/MSYS/python/Lib/test/test_platform.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | 1 | 2020-04-23T02:34:53.000Z | 2020-04-23T02:34:53.000Z | toolchain/riscv/MSYS/python/Lib/test/test_platform.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | 1 | 2020-04-27T15:07:54.000Z | 2020-04-27T15:07:54.000Z | from unittest import mock
import os
import platform
import subprocess
import sys
import sysconfig
import tempfile
import unittest
import warnings
from test import support
class PlatformTest(unittest.TestCase):
def test_architecture(self):
res = platform.architecture()
@support.skip_unless_symlink
def test_architecture_via_symlink(self): # issue3762
if sys.platform == "win32" and not os.path.exists(sys.executable):
# App symlink appears to not exist, but we want the
# real executable here anyway
import _winapi
real = _winapi.GetModuleFileName(0)
else:
real = os.path.realpath(sys.executable)
link = os.path.abspath(support.TESTFN)
os.symlink(real, link)
# On Windows, the EXE needs to know where pythonXY.dll and *.pyd is at
# so we add the directory to the path, PYTHONHOME and PYTHONPATH.
env = None
if sys.platform == "win32":
env = {k.upper(): os.environ[k] for k in os.environ}
env["PATH"] = "{};{}".format(
os.path.dirname(real), env.get("PATH", ""))
env["PYTHONHOME"] = os.path.dirname(real)
if sysconfig.is_python_build(True):
env["PYTHONPATH"] = os.path.dirname(os.__file__)
def get(python, env=None):
cmd = [python, '-c',
'import platform; print(platform.architecture())']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
r = p.communicate()
if p.returncode:
print(repr(r[0]))
print(repr(r[1]), file=sys.stderr)
self.fail('unexpected return code: {0} (0x{0:08X})'
.format(p.returncode))
return r
try:
self.assertEqual(get(sys.executable), get(link, env=env))
finally:
os.remove(link)
def test_platform(self):
for aliased in (False, True):
for terse in (False, True):
res = platform.platform(aliased, terse)
def test_system(self):
res = platform.system()
def test_node(self):
res = platform.node()
def test_release(self):
res = platform.release()
def test_version(self):
res = platform.version()
def test_machine(self):
res = platform.machine()
def test_processor(self):
res = platform.processor()
def setUp(self):
self.save_version = sys.version
self.save_git = sys._git
self.save_platform = sys.platform
def tearDown(self):
sys.version = self.save_version
sys._git = self.save_git
sys.platform = self.save_platform
def test_sys_version(self):
# Old test.
for input, output in (
('2.4.3 (#1, Jun 21 2006, 13:54:21) \n[GCC 3.3.4 (pre 3.3.5 20040809)]',
('CPython', '2.4.3', '', '', '1', 'Jun 21 2006 13:54:21', 'GCC 3.3.4 (pre 3.3.5 20040809)')),
('IronPython 1.0.60816 on .NET 2.0.50727.42',
('IronPython', '1.0.60816', '', '', '', '', '.NET 2.0.50727.42')),
('IronPython 1.0 (1.0.61005.1977) on .NET 2.0.50727.42',
('IronPython', '1.0.0', '', '', '', '', '.NET 2.0.50727.42')),
('2.4.3 (truncation, date, t) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date t', 'GCC')),
('2.4.3 (truncation, date, ) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, date,) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, date) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, d) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'd', 'GCC')),
('2.4.3 (truncation, ) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
('2.4.3 (truncation,) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
('2.4.3 (truncation) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
):
# branch and revision are not "parsed", but fetched
# from sys._git. Ignore them
(name, version, branch, revision, buildno, builddate, compiler) \
= platform._sys_version(input)
self.assertEqual(
(name, version, '', '', buildno, builddate, compiler), output)
# Tests for python_implementation(), python_version(), python_branch(),
# python_revision(), python_build(), and python_compiler().
sys_versions = {
("2.6.1 (r261:67515, Dec 6 2008, 15:26:00) \n[GCC 4.0.1 (Apple Computer, Inc. build 5370)]",
('CPython', 'tags/r261', '67515'), self.save_platform)
:
("CPython", "2.6.1", "tags/r261", "67515",
('r261:67515', 'Dec 6 2008 15:26:00'),
'GCC 4.0.1 (Apple Computer, Inc. build 5370)'),
("IronPython 2.0 (2.0.0.0) on .NET 2.0.50727.3053", None, "cli")
:
("IronPython", "2.0.0", "", "", ("", ""),
".NET 2.0.50727.3053"),
("2.6.1 (IronPython 2.6.1 (2.6.10920.0) on .NET 2.0.50727.1433)", None, "cli")
:
("IronPython", "2.6.1", "", "", ("", ""),
".NET 2.0.50727.1433"),
("2.7.4 (IronPython 2.7.4 (2.7.0.40) on Mono 4.0.30319.1 (32-bit))", None, "cli")
:
("IronPython", "2.7.4", "", "", ("", ""),
"Mono 4.0.30319.1 (32-bit)"),
("2.5 (trunk:6107, Mar 26 2009, 13:02:18) \n[Java HotSpot(TM) Client VM (\"Apple Computer, Inc.\")]",
('Jython', 'trunk', '6107'), "java1.5.0_16")
:
("Jython", "2.5.0", "trunk", "6107",
('trunk:6107', 'Mar 26 2009'), "java1.5.0_16"),
("2.5.2 (63378, Mar 26 2009, 18:03:29)\n[PyPy 1.0.0]",
('PyPy', 'trunk', '63378'), self.save_platform)
:
("PyPy", "2.5.2", "trunk", "63378", ('63378', 'Mar 26 2009'),
"")
}
for (version_tag, scm, sys_platform), info in \
sys_versions.items():
sys.version = version_tag
if scm is None:
if hasattr(sys, "_git"):
del sys._git
else:
sys._git = scm
if sys_platform is not None:
sys.platform = sys_platform
self.assertEqual(platform.python_implementation(), info[0])
self.assertEqual(platform.python_version(), info[1])
self.assertEqual(platform.python_branch(), info[2])
self.assertEqual(platform.python_revision(), info[3])
self.assertEqual(platform.python_build(), info[4])
self.assertEqual(platform.python_compiler(), info[5])
def test_system_alias(self):
res = platform.system_alias(
platform.system(),
platform.release(),
platform.version(),
)
def test_uname(self):
res = platform.uname()
self.assertTrue(any(res))
self.assertEqual(res[0], res.system)
self.assertEqual(res[1], res.node)
self.assertEqual(res[2], res.release)
self.assertEqual(res[3], res.version)
self.assertEqual(res[4], res.machine)
self.assertEqual(res[5], res.processor)
@unittest.skipUnless(sys.platform.startswith('win'), "windows only test")
def test_uname_win32_ARCHITEW6432(self):
# Issue 7860: make sure we get architecture from the correct variable
# on 64 bit Windows: if PROCESSOR_ARCHITEW6432 exists we should be
# using it, per
# http://blogs.msdn.com/david.wang/archive/2006/03/26/HOWTO-Detect-Process-Bitness.aspx
try:
with support.EnvironmentVarGuard() as environ:
if 'PROCESSOR_ARCHITEW6432' in environ:
del environ['PROCESSOR_ARCHITEW6432']
environ['PROCESSOR_ARCHITECTURE'] = 'foo'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'foo')
environ['PROCESSOR_ARCHITEW6432'] = 'bar'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'bar')
finally:
platform._uname_cache = None
def test_java_ver(self):
res = platform.java_ver()
if sys.platform == 'java':
self.assertTrue(all(res))
def test_win32_ver(self):
res = platform.win32_ver()
def test_mac_ver(self):
res = platform.mac_ver()
if platform.uname().system == 'Darwin':
# We're on a MacOSX system, check that
# the right version information is returned
fd = os.popen('sw_vers', 'r')
real_ver = None
for ln in fd:
if ln.startswith('ProductVersion:'):
real_ver = ln.strip().split()[-1]
break
fd.close()
self.assertFalse(real_ver is None)
result_list = res[0].split('.')
expect_list = real_ver.split('.')
len_diff = len(result_list) - len(expect_list)
# On Snow Leopard, sw_vers reports 10.6.0 as 10.6
if len_diff > 0:
expect_list.extend(['0'] * len_diff)
self.assertEqual(result_list, expect_list)
# res[1] claims to contain
# (version, dev_stage, non_release_version)
# That information is no longer available
self.assertEqual(res[1], ('', '', ''))
if sys.byteorder == 'little':
self.assertIn(res[2], ('i386', 'x86_64'))
else:
self.assertEqual(res[2], 'PowerPC')
@unittest.skipUnless(sys.platform == 'darwin', "OSX only test")
def test_mac_ver_with_fork(self):
# Issue7895: platform.mac_ver() crashes when using fork without exec
#
# This test checks that the fix for that issue works.
#
pid = os.fork()
if pid == 0:
# child
info = platform.mac_ver()
os._exit(0)
else:
# parent
cpid, sts = os.waitpid(pid, 0)
self.assertEqual(cpid, pid)
self.assertEqual(sts, 0)
def test_dist(self):
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
DeprecationWarning,
)
res = platform.dist()
def test_libc_ver(self):
if os.path.isdir(sys.executable) and \
os.path.exists(sys.executable+'.exe'):
# Cygwin horror
executable = sys.executable + '.exe'
elif sys.platform == "win32" and not os.path.exists(sys.executable):
# App symlink appears to not exist, but we want the
# real executable here anyway
import _winapi
executable = _winapi.GetModuleFileName(0)
else:
executable = sys.executable
res = platform.libc_ver(executable)
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, 'wb') as f:
f.write(b'x'*(16384-10))
f.write(b'GLIBC_1.23.4\0GLIBC_1.9\0GLIBC_1.21\0')
self.assertEqual(platform.libc_ver(support.TESTFN),
('glibc', '1.23.4'))
@support.cpython_only
def test__comparable_version(self):
from platform import _comparable_version as V
self.assertEqual(V('1.2.3'), V('1.2.3'))
self.assertLess(V('1.2.3'), V('1.2.10'))
self.assertEqual(V('1.2.3.4'), V('1_2-3+4'))
self.assertLess(V('1.2spam'), V('1.2dev'))
self.assertLess(V('1.2dev'), V('1.2alpha'))
self.assertLess(V('1.2dev'), V('1.2a'))
self.assertLess(V('1.2alpha'), V('1.2beta'))
self.assertLess(V('1.2a'), V('1.2b'))
self.assertLess(V('1.2beta'), V('1.2c'))
self.assertLess(V('1.2b'), V('1.2c'))
self.assertLess(V('1.2c'), V('1.2RC'))
self.assertLess(V('1.2c'), V('1.2rc'))
self.assertLess(V('1.2RC'), V('1.2.0'))
self.assertLess(V('1.2rc'), V('1.2.0'))
self.assertLess(V('1.2.0'), V('1.2pl'))
self.assertLess(V('1.2.0'), V('1.2p'))
self.assertLess(V('1.5.1'), V('1.5.2b2'))
self.assertLess(V('3.10a'), V('161'))
self.assertEqual(V('8.02'), V('8.02'))
self.assertLess(V('3.4j'), V('1996.07.12'))
self.assertLess(V('3.1.1.6'), V('3.2.pl0'))
self.assertLess(V('2g6'), V('11g'))
self.assertLess(V('0.9'), V('2.2'))
self.assertLess(V('1.2'), V('1.2.1'))
self.assertLess(V('1.1'), V('1.2.2'))
self.assertLess(V('1.1'), V('1.2'))
self.assertLess(V('1.2.1'), V('1.2.2'))
self.assertLess(V('1.2'), V('1.2.2'))
self.assertLess(V('0.4'), V('0.4.0'))
self.assertLess(V('1.13++'), V('5.5.kw'))
self.assertLess(V('0.960923'), V('2.2beta29'))
def test_parse_release_file(self):
for input, output in (
# Examples of release file contents:
('SuSE Linux 9.3 (x86-64)', ('SuSE Linux ', '9.3', 'x86-64')),
('SUSE LINUX 10.1 (X86-64)', ('SUSE LINUX ', '10.1', 'X86-64')),
('SUSE LINUX 10.1 (i586)', ('SUSE LINUX ', '10.1', 'i586')),
('Fedora Core release 5 (Bordeaux)', ('Fedora Core', '5', 'Bordeaux')),
('Red Hat Linux release 8.0 (Psyche)', ('Red Hat Linux', '8.0', 'Psyche')),
('Red Hat Linux release 9 (Shrike)', ('Red Hat Linux', '9', 'Shrike')),
('Red Hat Enterprise Linux release 4 (Nahant)', ('Red Hat Enterprise Linux', '4', 'Nahant')),
('CentOS release 4', ('CentOS', '4', None)),
('Rocks release 4.2.1 (Cydonia)', ('Rocks', '4.2.1', 'Cydonia')),
('', ('', '', '')), # If there's nothing there.
):
self.assertEqual(platform._parse_release_file(input), output)
def test_popen(self):
mswindows = (sys.platform == "win32")
if mswindows:
command = '"{}" -c "print(\'Hello\')"'.format(sys.executable)
else:
command = "'{}' -c 'print(\"Hello\")'".format(sys.executable)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with platform.popen(command) as stdout:
hello = stdout.read().strip()
stdout.close()
self.assertEqual(hello, "Hello")
data = 'plop'
if mswindows:
command = '"{}" -c "import sys; data=sys.stdin.read(); exit(len(data))"'
else:
command = "'{}' -c 'import sys; data=sys.stdin.read(); exit(len(data))'"
command = command.format(sys.executable)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with platform.popen(command, 'w') as stdin:
stdout = stdin.write(data)
ret = stdin.close()
self.assertIsNotNone(ret)
if os.name == 'nt':
returncode = ret
else:
returncode = ret >> 8
self.assertEqual(returncode, len(data))
def test_linux_distribution_encoding(self):
# Issue #17429
with tempfile.TemporaryDirectory() as tempdir:
filename = os.path.join(tempdir, 'fedora-release')
with open(filename, 'w', encoding='utf-8') as f:
f.write('Fedora release 19 (Schr\xf6dinger\u2019s Cat)\n')
with mock.patch('platform._UNIXCONFDIR', tempdir):
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
DeprecationWarning,
)
distname, version, distid = platform.linux_distribution()
self.assertEqual(distname, 'Fedora')
self.assertEqual(version, '19')
self.assertEqual(distid, 'Schr\xf6dinger\u2019s Cat')
class DeprecationTest(unittest.TestCase):
def test_dist_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
platform.dist()
self.assertEqual(str(cm.warning),
'dist() and linux_distribution() functions are '
'deprecated in Python 3.5')
def test_linux_distribution_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
platform.linux_distribution()
self.assertEqual(str(cm.warning),
'dist() and linux_distribution() functions are '
'deprecated in Python 3.5')
if __name__ == '__main__':
unittest.main()
| 41.689095 | 114 | 0.512077 | from unittest import mock
import os
import platform
import subprocess
import sys
import sysconfig
import tempfile
import unittest
import warnings
from test import support
class PlatformTest(unittest.TestCase):
def test_architecture(self):
res = platform.architecture()
@support.skip_unless_symlink
def test_architecture_via_symlink(self):
if sys.platform == "win32" and not os.path.exists(sys.executable):
import _winapi
real = _winapi.GetModuleFileName(0)
else:
real = os.path.realpath(sys.executable)
link = os.path.abspath(support.TESTFN)
os.symlink(real, link)
env = None
if sys.platform == "win32":
env = {k.upper(): os.environ[k] for k in os.environ}
env["PATH"] = "{};{}".format(
os.path.dirname(real), env.get("PATH", ""))
env["PYTHONHOME"] = os.path.dirname(real)
if sysconfig.is_python_build(True):
env["PYTHONPATH"] = os.path.dirname(os.__file__)
def get(python, env=None):
cmd = [python, '-c',
'import platform; print(platform.architecture())']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
r = p.communicate()
if p.returncode:
print(repr(r[0]))
print(repr(r[1]), file=sys.stderr)
self.fail('unexpected return code: {0} (0x{0:08X})'
.format(p.returncode))
return r
try:
self.assertEqual(get(sys.executable), get(link, env=env))
finally:
os.remove(link)
def test_platform(self):
for aliased in (False, True):
for terse in (False, True):
res = platform.platform(aliased, terse)
def test_system(self):
res = platform.system()
def test_node(self):
res = platform.node()
def test_release(self):
res = platform.release()
def test_version(self):
res = platform.version()
def test_machine(self):
res = platform.machine()
def test_processor(self):
res = platform.processor()
def setUp(self):
self.save_version = sys.version
self.save_git = sys._git
self.save_platform = sys.platform
def tearDown(self):
sys.version = self.save_version
sys._git = self.save_git
sys.platform = self.save_platform
def test_sys_version(self):
for input, output in (
('2.4.3 (#1, Jun 21 2006, 13:54:21) \n[GCC 3.3.4 (pre 3.3.5 20040809)]',
('CPython', '2.4.3', '', '', '1', 'Jun 21 2006 13:54:21', 'GCC 3.3.4 (pre 3.3.5 20040809)')),
('IronPython 1.0.60816 on .NET 2.0.50727.42',
('IronPython', '1.0.60816', '', '', '', '', '.NET 2.0.50727.42')),
('IronPython 1.0 (1.0.61005.1977) on .NET 2.0.50727.42',
('IronPython', '1.0.0', '', '', '', '', '.NET 2.0.50727.42')),
('2.4.3 (truncation, date, t) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date t', 'GCC')),
('2.4.3 (truncation, date, ) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, date,) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, date) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, d) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'd', 'GCC')),
('2.4.3 (truncation, ) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
('2.4.3 (truncation,) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
('2.4.3 (truncation) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
):
(name, version, branch, revision, buildno, builddate, compiler) \
= platform._sys_version(input)
self.assertEqual(
(name, version, '', '', buildno, builddate, compiler), output)
sys_versions = {
("2.6.1 (r261:67515, Dec 6 2008, 15:26:00) \n[GCC 4.0.1 (Apple Computer, Inc. build 5370)]",
('CPython', 'tags/r261', '67515'), self.save_platform)
:
("CPython", "2.6.1", "tags/r261", "67515",
('r261:67515', 'Dec 6 2008 15:26:00'),
'GCC 4.0.1 (Apple Computer, Inc. build 5370)'),
("IronPython 2.0 (2.0.0.0) on .NET 2.0.50727.3053", None, "cli")
:
("IronPython", "2.0.0", "", "", ("", ""),
".NET 2.0.50727.3053"),
("2.6.1 (IronPython 2.6.1 (2.6.10920.0) on .NET 2.0.50727.1433)", None, "cli")
:
("IronPython", "2.6.1", "", "", ("", ""),
".NET 2.0.50727.1433"),
("2.7.4 (IronPython 2.7.4 (2.7.0.40) on Mono 4.0.30319.1 (32-bit))", None, "cli")
:
("IronPython", "2.7.4", "", "", ("", ""),
"Mono 4.0.30319.1 (32-bit)"),
("2.5 (trunk:6107, Mar 26 2009, 13:02:18) \n[Java HotSpot(TM) Client VM (\"Apple Computer, Inc.\")]",
('Jython', 'trunk', '6107'), "java1.5.0_16")
:
("Jython", "2.5.0", "trunk", "6107",
('trunk:6107', 'Mar 26 2009'), "java1.5.0_16"),
("2.5.2 (63378, Mar 26 2009, 18:03:29)\n[PyPy 1.0.0]",
('PyPy', 'trunk', '63378'), self.save_platform)
:
("PyPy", "2.5.2", "trunk", "63378", ('63378', 'Mar 26 2009'),
"")
}
for (version_tag, scm, sys_platform), info in \
sys_versions.items():
sys.version = version_tag
if scm is None:
if hasattr(sys, "_git"):
del sys._git
else:
sys._git = scm
if sys_platform is not None:
sys.platform = sys_platform
self.assertEqual(platform.python_implementation(), info[0])
self.assertEqual(platform.python_version(), info[1])
self.assertEqual(platform.python_branch(), info[2])
self.assertEqual(platform.python_revision(), info[3])
self.assertEqual(platform.python_build(), info[4])
self.assertEqual(platform.python_compiler(), info[5])
def test_system_alias(self):
res = platform.system_alias(
platform.system(),
platform.release(),
platform.version(),
)
def test_uname(self):
res = platform.uname()
self.assertTrue(any(res))
self.assertEqual(res[0], res.system)
self.assertEqual(res[1], res.node)
self.assertEqual(res[2], res.release)
self.assertEqual(res[3], res.version)
self.assertEqual(res[4], res.machine)
self.assertEqual(res[5], res.processor)
@unittest.skipUnless(sys.platform.startswith('win'), "windows only test")
def test_uname_win32_ARCHITEW6432(self):
try:
with support.EnvironmentVarGuard() as environ:
if 'PROCESSOR_ARCHITEW6432' in environ:
del environ['PROCESSOR_ARCHITEW6432']
environ['PROCESSOR_ARCHITECTURE'] = 'foo'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'foo')
environ['PROCESSOR_ARCHITEW6432'] = 'bar'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'bar')
finally:
platform._uname_cache = None
def test_java_ver(self):
res = platform.java_ver()
if sys.platform == 'java':
self.assertTrue(all(res))
def test_win32_ver(self):
res = platform.win32_ver()
def test_mac_ver(self):
res = platform.mac_ver()
if platform.uname().system == 'Darwin':
# the right version information is returned
fd = os.popen('sw_vers', 'r')
real_ver = None
for ln in fd:
if ln.startswith('ProductVersion:'):
real_ver = ln.strip().split()[-1]
break
fd.close()
self.assertFalse(real_ver is None)
result_list = res[0].split('.')
expect_list = real_ver.split('.')
len_diff = len(result_list) - len(expect_list)
# On Snow Leopard, sw_vers reports 10.6.0 as 10.6
if len_diff > 0:
expect_list.extend(['0'] * len_diff)
self.assertEqual(result_list, expect_list)
# res[1] claims to contain
# (version, dev_stage, non_release_version)
# That information is no longer available
self.assertEqual(res[1], ('', '', ''))
if sys.byteorder == 'little':
self.assertIn(res[2], ('i386', 'x86_64'))
else:
self.assertEqual(res[2], 'PowerPC')
@unittest.skipUnless(sys.platform == 'darwin', "OSX only test")
def test_mac_ver_with_fork(self):
# Issue7895: platform.mac_ver() crashes when using fork without exec
#
# This test checks that the fix for that issue works.
#
pid = os.fork()
if pid == 0:
# child
info = platform.mac_ver()
os._exit(0)
else:
# parent
cpid, sts = os.waitpid(pid, 0)
self.assertEqual(cpid, pid)
self.assertEqual(sts, 0)
def test_dist(self):
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
DeprecationWarning,
)
res = platform.dist()
def test_libc_ver(self):
if os.path.isdir(sys.executable) and \
os.path.exists(sys.executable+'.exe'):
# Cygwin horror
executable = sys.executable + '.exe'
elif sys.platform == "win32" and not os.path.exists(sys.executable):
# App symlink appears to not exist, but we want the
# real executable here anyway
import _winapi
executable = _winapi.GetModuleFileName(0)
else:
executable = sys.executable
res = platform.libc_ver(executable)
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, 'wb') as f:
f.write(b'x'*(16384-10))
f.write(b'GLIBC_1.23.4\0GLIBC_1.9\0GLIBC_1.21\0')
self.assertEqual(platform.libc_ver(support.TESTFN),
('glibc', '1.23.4'))
@support.cpython_only
def test__comparable_version(self):
from platform import _comparable_version as V
self.assertEqual(V('1.2.3'), V('1.2.3'))
self.assertLess(V('1.2.3'), V('1.2.10'))
self.assertEqual(V('1.2.3.4'), V('1_2-3+4'))
self.assertLess(V('1.2spam'), V('1.2dev'))
self.assertLess(V('1.2dev'), V('1.2alpha'))
self.assertLess(V('1.2dev'), V('1.2a'))
self.assertLess(V('1.2alpha'), V('1.2beta'))
self.assertLess(V('1.2a'), V('1.2b'))
self.assertLess(V('1.2beta'), V('1.2c'))
self.assertLess(V('1.2b'), V('1.2c'))
self.assertLess(V('1.2c'), V('1.2RC'))
self.assertLess(V('1.2c'), V('1.2rc'))
self.assertLess(V('1.2RC'), V('1.2.0'))
self.assertLess(V('1.2rc'), V('1.2.0'))
self.assertLess(V('1.2.0'), V('1.2pl'))
self.assertLess(V('1.2.0'), V('1.2p'))
self.assertLess(V('1.5.1'), V('1.5.2b2'))
self.assertLess(V('3.10a'), V('161'))
self.assertEqual(V('8.02'), V('8.02'))
self.assertLess(V('3.4j'), V('1996.07.12'))
self.assertLess(V('3.1.1.6'), V('3.2.pl0'))
self.assertLess(V('2g6'), V('11g'))
self.assertLess(V('0.9'), V('2.2'))
self.assertLess(V('1.2'), V('1.2.1'))
self.assertLess(V('1.1'), V('1.2.2'))
self.assertLess(V('1.1'), V('1.2'))
self.assertLess(V('1.2.1'), V('1.2.2'))
self.assertLess(V('1.2'), V('1.2.2'))
self.assertLess(V('0.4'), V('0.4.0'))
self.assertLess(V('1.13++'), V('5.5.kw'))
self.assertLess(V('0.960923'), V('2.2beta29'))
def test_parse_release_file(self):
for input, output in (
# Examples of release file contents:
('SuSE Linux 9.3 (x86-64)', ('SuSE Linux ', '9.3', 'x86-64')),
('SUSE LINUX 10.1 (X86-64)', ('SUSE LINUX ', '10.1', 'X86-64')),
('SUSE LINUX 10.1 (i586)', ('SUSE LINUX ', '10.1', 'i586')),
('Fedora Core release 5 (Bordeaux)', ('Fedora Core', '5', 'Bordeaux')),
('Red Hat Linux release 8.0 (Psyche)', ('Red Hat Linux', '8.0', 'Psyche')),
('Red Hat Linux release 9 (Shrike)', ('Red Hat Linux', '9', 'Shrike')),
('Red Hat Enterprise Linux release 4 (Nahant)', ('Red Hat Enterprise Linux', '4', 'Nahant')),
('CentOS release 4', ('CentOS', '4', None)),
('Rocks release 4.2.1 (Cydonia)', ('Rocks', '4.2.1', 'Cydonia')),
('', ('', '', '')), # If there's nothing there.
):
self.assertEqual(platform._parse_release_file(input), output)
def test_popen(self):
mswindows = (sys.platform == "win32")
if mswindows:
command = '"{}" -c "print(\'Hello\')"'.format(sys.executable)
else:
command = "'{}' -c 'print(\"Hello\")'".format(sys.executable)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with platform.popen(command) as stdout:
hello = stdout.read().strip()
stdout.close()
self.assertEqual(hello, "Hello")
data = 'plop'
if mswindows:
command = '"{}" -c "import sys; data=sys.stdin.read(); exit(len(data))"'
else:
command = "'{}' -c 'import sys; data=sys.stdin.read(); exit(len(data))'"
command = command.format(sys.executable)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with platform.popen(command, 'w') as stdin:
stdout = stdin.write(data)
ret = stdin.close()
self.assertIsNotNone(ret)
if os.name == 'nt':
returncode = ret
else:
returncode = ret >> 8
self.assertEqual(returncode, len(data))
def test_linux_distribution_encoding(self):
with tempfile.TemporaryDirectory() as tempdir:
filename = os.path.join(tempdir, 'fedora-release')
with open(filename, 'w', encoding='utf-8') as f:
f.write('Fedora release 19 (Schr\xf6dinger\u2019s Cat)\n')
with mock.patch('platform._UNIXCONFDIR', tempdir):
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
DeprecationWarning,
)
distname, version, distid = platform.linux_distribution()
self.assertEqual(distname, 'Fedora')
self.assertEqual(version, '19')
self.assertEqual(distid, 'Schr\xf6dinger\u2019s Cat')
class DeprecationTest(unittest.TestCase):
def test_dist_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
platform.dist()
self.assertEqual(str(cm.warning),
'dist() and linux_distribution() functions are '
'deprecated in Python 3.5')
def test_linux_distribution_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
platform.linux_distribution()
self.assertEqual(str(cm.warning),
'dist() and linux_distribution() functions are '
'deprecated in Python 3.5')
if __name__ == '__main__':
unittest.main()
| true | true |
f7307a51d6578e0dff654d9626eca5b71b10855a | 2,435 | py | Python | src/core/magicae/core.py | ravenSanstete/hako | fe72c76e9f319add1921a63dee711f90f4960873 | [
"MIT"
] | 1 | 2016-11-17T07:15:00.000Z | 2016-11-17T07:15:00.000Z | src/core/magicae/core.py | ravenSanstete/hako | fe72c76e9f319add1921a63dee711f90f4960873 | [
"MIT"
] | 6 | 2016-11-17T10:27:38.000Z | 2016-11-18T13:20:05.000Z | src/core/magicae/core.py | ravenSanstete/hako | fe72c76e9f319add1921a63dee711f90f4960873 | [
"MIT"
] | null | null | null | """
Implementation of an auxiliary class that as an intermediate layer of Monad and Prototype
A Function Class
"""
import functools as F
from __init__ import utils
from __init__ import proto
# a magica determines (A) the way to distribute the difference to each prototype
# (B) the way to measure over the prototypes
# By generating functions when invoking the mix functional
# In such a way, magica more like a static class
# constaints should not be contained in this file
# never try to modify the prototype information even it seems necessary
class Magica(object):
"""docstring for Magica"""
def __init__(self):
super(Magica,self).__init__()
# return generated measure method, generated propagate method, individual variables
def mix(self, prototypes):
utils.check_types(prototypes,proto.Prototype);
return F.partial(measure, self, prototypes), F.partial(propagate, self, prototypes), self._allocate(prototypes);
# you may able to set different sample options for each prototype
# options should be a list the same length with prototypes, otherwise, the last several will be considered as None
def measure(self, prototypes, options):
assert(len(options)<=len(prototypes)); # the less part
# padding the options list
while(len(prototypes)!=len(options)): options.append(None);
self._measure([prototypes[i].measure(options[i]) for i in range(len(options))]);
def propagate(self, prototypes, diff, parameters):
sub_diff_list=self._distribute(prototypes, diff, parameters);
F.map(lambda i:prototypes[i].propagate(sub_diff_list[i]),range(len(sub_diff_list)));
#
# <override> is needed. As a measurements
def _measure(self, sub_measurements, parameters):
utils.require_override();
# <override> is needed. return a list of the distributed difference list
# prototypes passed for reference
# diff may be a batch, thus the sublist should be a matrix maybe, along axis-0 do the arrangement
# parameters may be a batch as well, which should correspond to each diff
def _distribute(self, prototypes, diff, parameters):
utils.require_override();
# <override> is needed. return parameters of the instance of this kind of magica for a certain monad
# a dictionary with name is needed
def _allocate(self,prototypes):
utils.require_override();
| 48.7 | 120 | 0.720329 |
import functools as F
from __init__ import utils
from __init__ import proto
class Magica(object):
def __init__(self):
super(Magica,self).__init__()
def mix(self, prototypes):
utils.check_types(prototypes,proto.Prototype);
return F.partial(measure, self, prototypes), F.partial(propagate, self, prototypes), self._allocate(prototypes);
def measure(self, prototypes, options):
assert(len(options)<=len(prototypes));
while(len(prototypes)!=len(options)): options.append(None);
self._measure([prototypes[i].measure(options[i]) for i in range(len(options))]);
def propagate(self, prototypes, diff, parameters):
sub_diff_list=self._distribute(prototypes, diff, parameters);
F.map(lambda i:prototypes[i].propagate(sub_diff_list[i]),range(len(sub_diff_list)));
def _measure(self, sub_measurements, parameters):
utils.require_override();
def _distribute(self, prototypes, diff, parameters):
utils.require_override();
def _allocate(self,prototypes):
utils.require_override();
| true | true |
f7307ab549cd6a961231f29a99a04cc4dd37c4f8 | 2,822 | py | Python | StreamPy/TestExamplesListToStreams/test_example_list_multi_in_multi_out_stateful.py | AnomalyInc/StreamPy | 94abca276b2857de48259f4f42ef95efbdf5f6d1 | [
"Apache-2.0"
] | 2 | 2017-04-27T11:04:27.000Z | 2019-02-07T21:03:32.000Z | StreamPy/TestExamplesListToStreams/test_example_list_multi_in_multi_out_stateful.py | StreamPy/StreamPy | 94abca276b2857de48259f4f42ef95efbdf5f6d1 | [
"Apache-2.0"
] | null | null | null | StreamPy/TestExamplesListToStreams/test_example_list_multi_in_multi_out_stateful.py | StreamPy/StreamPy | 94abca276b2857de48259f4f42ef95efbdf5f6d1 | [
"Apache-2.0"
] | null | null | null | """This module contains examples of stream_func where f_type
is 'element' and stream_func has a list of multiple input streams,
a single output stream, and the operation is stateless. These
examples must have a LIST of input streams and not a single
input stream.
The functions on static Python data structures are of the form:
list -> element
"""
if __package__ is None:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from functools import partial
from Stream import Stream, _no_value
from Operators import stream_func
import numpy as np
from stream_test import *
def inrange_and_outlier_streams(x_and_y_streams, a, b, delta):
def inrange_and_outlier(x_and_y_lists, state):
num_inrange, num_outliers = state
z_list = zip(*x_and_y_lists)
inrange_list, outliers_list = [], []
for v in z_list:
if abs(a*v[0] + b -v[1]) > delta:
# outlier
num_outliers += 1
percentage_outliers = num_outliers/float(num_outliers+num_inrange)
outliers_list.append((v, percentage_outliers))
else:
# in range
num_inrange += 1
percentage_outliers = num_outliers/float(num_outliers+num_inrange)
inrange_list.append((v, percentage_outliers))
state = num_inrange, num_outliers
return ([inrange_list, outliers_list], state)
return stream_func(
inputs=x_and_y_streams, f_type='list',
f=inrange_and_outlier, num_outputs=2,
state=(0,0))
def test():
x = Stream('input_0')
y = Stream('input_1')
inrange_stream, outlier_stream = inrange_and_outlier_streams(
x_and_y_streams=[x,y], a=1, b=0, delta=3)
inrange_stream.set_name('inrange')
outlier_stream.set_name('outlier')
check(inrange_stream, [((3, 4), 0.0), ((8, 8), 1.0 / 3.0), ((12, 12), 0.4)])
check(outlier_stream, [((5, 9), 0.5), ((10, 15), 0.5), ((21, 11), 0.5)])
print
# Add values to the tail of stream x.
x.extend([3, 5, 8, 10])
y.extend([4, 9, 8, 15])
# Print recent values of the streams
print 'recent values of input streams'
x.print_recent()
y.print_recent()
print 'recent values of output streams'
inrange_stream.print_recent()
outlier_stream.print_recent()
print
# Add more values to the tail of stream x.
x.extend([12, 21, 13])
y.extend([12, 11])
# Print recent values of the streams
print 'recent values of input streams'
x.print_recent()
y.print_recent()
print 'recent values of output streams'
inrange_stream.print_recent()
outlier_stream.print_recent()
check_empty()
if __name__ == '__main__':
test()
| 28.795918 | 82 | 0.642807 | """This module contains examples of stream_func where f_type
is 'element' and stream_func has a list of multiple input streams,
a single output stream, and the operation is stateless. These
examples must have a LIST of input streams and not a single
input stream.
The functions on static Python data structures are of the form:
list -> element
"""
if __package__ is None:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from functools import partial
from Stream import Stream, _no_value
from Operators import stream_func
import numpy as np
from stream_test import *
def inrange_and_outlier_streams(x_and_y_streams, a, b, delta):
def inrange_and_outlier(x_and_y_lists, state):
num_inrange, num_outliers = state
z_list = zip(*x_and_y_lists)
inrange_list, outliers_list = [], []
for v in z_list:
if abs(a*v[0] + b -v[1]) > delta:
num_outliers += 1
percentage_outliers = num_outliers/float(num_outliers+num_inrange)
outliers_list.append((v, percentage_outliers))
else:
num_inrange += 1
percentage_outliers = num_outliers/float(num_outliers+num_inrange)
inrange_list.append((v, percentage_outliers))
state = num_inrange, num_outliers
return ([inrange_list, outliers_list], state)
return stream_func(
inputs=x_and_y_streams, f_type='list',
f=inrange_and_outlier, num_outputs=2,
state=(0,0))
def test():
x = Stream('input_0')
y = Stream('input_1')
inrange_stream, outlier_stream = inrange_and_outlier_streams(
x_and_y_streams=[x,y], a=1, b=0, delta=3)
inrange_stream.set_name('inrange')
outlier_stream.set_name('outlier')
check(inrange_stream, [((3, 4), 0.0), ((8, 8), 1.0 / 3.0), ((12, 12), 0.4)])
check(outlier_stream, [((5, 9), 0.5), ((10, 15), 0.5), ((21, 11), 0.5)])
print
x.extend([3, 5, 8, 10])
y.extend([4, 9, 8, 15])
print 'recent values of input streams'
x.print_recent()
y.print_recent()
print 'recent values of output streams'
inrange_stream.print_recent()
outlier_stream.print_recent()
print
x.extend([12, 21, 13])
y.extend([12, 11])
print 'recent values of input streams'
x.print_recent()
y.print_recent()
print 'recent values of output streams'
inrange_stream.print_recent()
outlier_stream.print_recent()
check_empty()
if __name__ == '__main__':
test()
| false | true |
f7307f0a3175323d41f8181d3a53d0ce5b8e591e | 3,465 | py | Python | src/programy/parser/template/nodes/system.py | cen-ai/program-y | a753667638147544c54dbebd9f1c8f9ae7f2159e | [
"MIT"
] | 5 | 2018-08-21T00:13:45.000Z | 2018-09-01T20:00:55.000Z | src/programy/parser/template/nodes/system.py | cen-ai/program-y | a753667638147544c54dbebd9f1c8f9ae7f2159e | [
"MIT"
] | 1 | 2018-09-12T18:30:17.000Z | 2018-09-12T18:30:17.000Z | src/programy/parser/template/nodes/system.py | cen-ai/program-y | a753667638147544c54dbebd9f1c8f9ae7f2159e | [
"MIT"
] | 5 | 2018-08-21T00:08:36.000Z | 2018-09-23T06:11:04.000Z | """
Copyright (c) 2016-2018 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
import subprocess
from programy.parser.exceptions import ParserException
from programy.parser.template.nodes.attrib import TemplateAttribNode
class TemplateSystemNode(TemplateAttribNode):
def __init__(self):
TemplateAttribNode.__init__(self)
self._timeout = 0
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, timeout):
self._timeout = timeout
def resolve_to_string(self, client_context):
if client_context.brain.configuration.overrides.allow_system_aiml is True:
command = self.resolve_children_to_string(client_context)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = []
for line in process.stdout.readlines():
byte_string = line.decode("utf-8")
result.append(byte_string.strip())
process.wait()
resolved = " ".join(result)
else:
YLogger.warning(client_context, "System command node disabled in config")
resolved = ""
YLogger.debug(client_context, "[%s] resolved to [%s]", self.to_string(), resolved)
return resolved
def to_string(self):
return "[SYSTEM timeout=%s]" % (self._timeout)
def set_attrib(self, attrib_name, attrib_value):
if attrib_name != 'timeout':
raise ParserException("Invalid attribute name %s for this node", attrib_name)
YLogger.warning(self, "System node timeout attrib currently ignored")
self._timeout = attrib_value
def to_xml(self, client_context):
xml = "<system"
if self._timeout != 0:
xml += ' timeout="%d"' % self._timeout
xml += ">"
xml += self.children_to_xml(client_context)
xml += "</system>"
return xml
#######################################################################################################
# SYSTEM_EXPRESSION ::==
# <system( TIMEOUT_ATTRIBUTE)>TEMPLATE_EXPRESSION</system> |
# <system><timeout>TEMPLATE_EXPRESSION</timeout></system>
# TIMEOUT_ATTRIBUTE :== timeout=”NUMBER”
def parse_expression(self, graph, expression):
self._parse_node_with_attrib(graph, expression, "timeout", "0")
| 42.777778 | 120 | 0.679076 |
from programy.utils.logging.ylogger import YLogger
import subprocess
from programy.parser.exceptions import ParserException
from programy.parser.template.nodes.attrib import TemplateAttribNode
class TemplateSystemNode(TemplateAttribNode):
def __init__(self):
TemplateAttribNode.__init__(self)
self._timeout = 0
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, timeout):
self._timeout = timeout
def resolve_to_string(self, client_context):
if client_context.brain.configuration.overrides.allow_system_aiml is True:
command = self.resolve_children_to_string(client_context)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = []
for line in process.stdout.readlines():
byte_string = line.decode("utf-8")
result.append(byte_string.strip())
process.wait()
resolved = " ".join(result)
else:
YLogger.warning(client_context, "System command node disabled in config")
resolved = ""
YLogger.debug(client_context, "[%s] resolved to [%s]", self.to_string(), resolved)
return resolved
def to_string(self):
return "[SYSTEM timeout=%s]" % (self._timeout)
def set_attrib(self, attrib_name, attrib_value):
if attrib_name != 'timeout':
raise ParserException("Invalid attribute name %s for this node", attrib_name)
YLogger.warning(self, "System node timeout attrib currently ignored")
self._timeout = attrib_value
def to_xml(self, client_context):
xml = "<system"
if self._timeout != 0:
xml += ' timeout="%d"' % self._timeout
xml += ">"
xml += self.children_to_xml(client_context)
xml += "</system>"
return xml
| true | true |
f73080eed9178010cd0131e215e25b70104b6075 | 1,483 | py | Python | engine/test_cloud_init.py | cnrancher/os-tests | 57d46413954e602e81cad287410dfecf46bfef84 | [
"Apache-2.0"
] | 2 | 2018-11-14T17:02:02.000Z | 2019-07-19T07:13:41.000Z | engine/test_cloud_init.py | cnrancher/os-tests | 57d46413954e602e81cad287410dfecf46bfef84 | [
"Apache-2.0"
] | 13 | 2018-11-06T09:29:50.000Z | 2019-12-23T07:36:07.000Z | engine/test_cloud_init.py | cnrancher/os-tests | 57d46413954e602e81cad287410dfecf46bfef84 | [
"Apache-2.0"
] | 1 | 2018-11-05T04:03:20.000Z | 2018-11-05T04:03:20.000Z | # coding = utf-8
# Create date: 2018-11-20
# Author :Hailong
from utils.connect_to_os import executor, connection
def test_cloud_init(ros_kvm_init, cloud_config_url):
kwargs = dict(cloud_config='{url}test_cloud_init.yml'.format(url=cloud_config_url),
is_install_to_hard_drive=True)
tuple_return = ros_kvm_init(**kwargs)
client = tuple_return[0]
ip = tuple_return[1]
c_export_config = 'sudo ros c export'
output_export_config = executor(client, c_export_config)
assert ('debug' in output_export_config)
# Create a datasource file locally
# test_cloud_init.txt
# # cloud-config
# rancher:
# log: true
c_create_ds = 'sudo tee /var/lib/rancher/conf/cloud-config.d/datasources.yml << EOF \
rancher: \
cloud_init: \
datasources: \
- url:https://gist.githubusercontent.com/Aisuko/4914974de1cf2a3d5127fd482e2c001a/raw/\
ed1e30a8a096c6e10d485d02092eaaf8ca8871bd/test_cloud_init.txt \
EOF'
# Reboot
c_reboot = 'sudo reboot'
executor(client, c_create_ds + c_reboot)
second_client = connection(ip, None)
c_ros_log = 'sudo ros config get rancher.log'
output_ros_log = executor(second_client, c_ros_log)
if output_ros_log:
output_ros_log = output_ros_log.replace('\n', '')
second_client.close()
assert ('true' == output_ros_log)
| 35.309524 | 110 | 0.647336 |
from utils.connect_to_os import executor, connection
def test_cloud_init(ros_kvm_init, cloud_config_url):
kwargs = dict(cloud_config='{url}test_cloud_init.yml'.format(url=cloud_config_url),
is_install_to_hard_drive=True)
tuple_return = ros_kvm_init(**kwargs)
client = tuple_return[0]
ip = tuple_return[1]
c_export_config = 'sudo ros c export'
output_export_config = executor(client, c_export_config)
assert ('debug' in output_export_config)
c_create_ds = 'sudo tee /var/lib/rancher/conf/cloud-config.d/datasources.yml << EOF \
rancher: \
cloud_init: \
datasources: \
- url:https://gist.githubusercontent.com/Aisuko/4914974de1cf2a3d5127fd482e2c001a/raw/\
ed1e30a8a096c6e10d485d02092eaaf8ca8871bd/test_cloud_init.txt \
EOF'
c_reboot = 'sudo reboot'
executor(client, c_create_ds + c_reboot)
second_client = connection(ip, None)
c_ros_log = 'sudo ros config get rancher.log'
output_ros_log = executor(second_client, c_ros_log)
if output_ros_log:
output_ros_log = output_ros_log.replace('\n', '')
second_client.close()
assert ('true' == output_ros_log)
| true | true |
f730815c16e59fb5fe9c61f2d195fe9da671f9b0 | 1,023 | py | Python | Python/as-far-from-land-as-possible.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | Python/as-far-from-land-as-possible.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | Python/as-far-from-land-as-possible.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | # Time: O(m * n)
# Space: O(m * n)
import collections
class Solution(object):
def maxDistance(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
q = collections.deque([(i, j) for i in range(len(grid))
for j in range(len(grid[0])) if grid[i][j] == 1])
if len(q) == len(grid)*len(grid[0]):
return -1
level = -1
while q:
next_q = collections.deque()
while q:
x, y = q.popleft()
for dx, dy in directions:
nx, ny = x+dx, y+dy
if not (0 <= nx < len(grid) and
0 <= ny < len(grid[0]) and
grid[nx][ny] == 0):
continue
next_q.append((nx, ny))
grid[nx][ny] = 1
q = next_q
level += 1
return level
| 30.088235 | 87 | 0.379277 |
import collections
class Solution(object):
def maxDistance(self, grid):
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
q = collections.deque([(i, j) for i in range(len(grid))
for j in range(len(grid[0])) if grid[i][j] == 1])
if len(q) == len(grid)*len(grid[0]):
return -1
level = -1
while q:
next_q = collections.deque()
while q:
x, y = q.popleft()
for dx, dy in directions:
nx, ny = x+dx, y+dy
if not (0 <= nx < len(grid) and
0 <= ny < len(grid[0]) and
grid[nx][ny] == 0):
continue
next_q.append((nx, ny))
grid[nx][ny] = 1
q = next_q
level += 1
return level
| true | true |
f73081bc0d5f8d3350d07a2150f753fd344a22a7 | 623 | py | Python | guanabara/Exercicios/mundo 1 _ aulas 01 a 12/001-003.py | pbittencourt/datasciencestudies | 85f0b2a4366fe7c6daa5628ed4bd2994355963c0 | [
"MIT"
] | null | null | null | guanabara/Exercicios/mundo 1 _ aulas 01 a 12/001-003.py | pbittencourt/datasciencestudies | 85f0b2a4366fe7c6daa5628ed4bd2994355963c0 | [
"MIT"
] | null | null | null | guanabara/Exercicios/mundo 1 _ aulas 01 a 12/001-003.py | pbittencourt/datasciencestudies | 85f0b2a4366fe7c6daa5628ed4bd2994355963c0 | [
"MIT"
] | null | null | null | # PRIMEIROS PASSOS
"""
Imprime OLÁ, MUNDO na tela -- obviamente!
Pede nome do usuário e dois números.
Exibe nome do usuário e a soma dos números inseridos
"""
print('Hello, world!!!')
nome = input('Qual é o seu nome? ')
print('Olá, {}, é um prazer te conhecer!'.format(nome))
#
idade = int(input('Qual é a sua idade? '))
print('Você tem {} anos e ano que vem você terá {} anos' .format(idade, idade + 1))
n1 = int(input('Aproveitando a brincadeira, {}, digita um número aí: ' .format(nome)))
n2 = int(input('Digita outro número, por obséquio: '))
s = n1 + n2
print('A soma entre {} e {} é igual a {}' .format(n1, n2, s))
| 32.789474 | 86 | 0.659711 |
print('Hello, world!!!')
nome = input('Qual é o seu nome? ')
print('Olá, {}, é um prazer te conhecer!'.format(nome))
idade = int(input('Qual é a sua idade? '))
print('Você tem {} anos e ano que vem você terá {} anos' .format(idade, idade + 1))
n1 = int(input('Aproveitando a brincadeira, {}, digita um número aí: ' .format(nome)))
n2 = int(input('Digita outro número, por obséquio: '))
s = n1 + n2
print('A soma entre {} e {} é igual a {}' .format(n1, n2, s))
| true | true |
f73081bcbbb55a3e2f6093ea3985232180c04f1a | 1,812 | py | Python | homework/HW4/HW4-final/P1.py | TangJiahui/cs107_system_devlopment | c46d7769683d9be0c31973e3b0666e3fe2a4099b | [
"MIT"
] | null | null | null | homework/HW4/HW4-final/P1.py | TangJiahui/cs107_system_devlopment | c46d7769683d9be0c31973e3b0666e3fe2a4099b | [
"MIT"
] | null | null | null | homework/HW4/HW4-final/P1.py | TangJiahui/cs107_system_devlopment | c46d7769683d9be0c31973e3b0666e3fe2a4099b | [
"MIT"
] | 1 | 2021-09-21T16:28:51.000Z | 2021-09-21T16:28:51.000Z | import numpy as np
import matplotlib.pyplot as plt
# Part A: Numerical Differentiation Closure
def numerical_diff(f,h):
def inner(x):
return (f(x+h) - f(x))/h
return inner
# Part B:
f = np.log
x = np.linspace(0.2, 0.4, 500)
h = [1e-1, 1e-7, 1e-15]
y_analytical = 1/x
result = {}
for i in h:
y = numerical_diff(f,i)(x)
result[i] = y
# Plotting
plt.figure(figsize = (8,5))
plt.plot(x, y_analytical, 'x-', label='Analytical Derivative')
for i in h:
plt.plot(x, result[i], label='Estimated derivative h = '+str(i))
plt.xlabel("X value")
plt.ylabel("Derivative Value at X")
plt.title("Differentiation Value at X on various h value")
plt.legend()
# Part C:
print("Answer to Q-a: When h value is 1e-7, it most closely approximates the true derivative. \n",
"When h value is too small: The approximation is jumping around stepwise and not displaying a smooth curve approximation, it amplifies floating point errors in numerical operation such as rounding and division\n",
"When h value is too large: The approximation is lower than the true value, it doesn't provide a good approximation to the derivative\n")
print("Answer to Q-b: Automatic differentiation avoids the problem of not choosing a good h value. \n"
"The finite difference approach is quick and easy but suffers from accuracy and stability problems.\n"
"Symbolic derivatives can be evaluated to machine precision, but can be costly to evaluate.\n"
"Automatic differentiation (AD) overcomes both of these deficiencies. It is less costly than symbolic differentiation while evaluating derivatives to machine precision.\n"
"AD uses forward or backward modes to differentiate, via Computational Graph, chain rule and evaluation trace.")
# Show plot
plt.show()
# plt.savefig('P1_fig.png')
| 38.553191 | 219 | 0.724614 | import numpy as np
import matplotlib.pyplot as plt
def numerical_diff(f,h):
def inner(x):
return (f(x+h) - f(x))/h
return inner
f = np.log
x = np.linspace(0.2, 0.4, 500)
h = [1e-1, 1e-7, 1e-15]
y_analytical = 1/x
result = {}
for i in h:
y = numerical_diff(f,i)(x)
result[i] = y
plt.figure(figsize = (8,5))
plt.plot(x, y_analytical, 'x-', label='Analytical Derivative')
for i in h:
plt.plot(x, result[i], label='Estimated derivative h = '+str(i))
plt.xlabel("X value")
plt.ylabel("Derivative Value at X")
plt.title("Differentiation Value at X on various h value")
plt.legend()
print("Answer to Q-a: When h value is 1e-7, it most closely approximates the true derivative. \n",
"When h value is too small: The approximation is jumping around stepwise and not displaying a smooth curve approximation, it amplifies floating point errors in numerical operation such as rounding and division\n",
"When h value is too large: The approximation is lower than the true value, it doesn't provide a good approximation to the derivative\n")
print("Answer to Q-b: Automatic differentiation avoids the problem of not choosing a good h value. \n"
"The finite difference approach is quick and easy but suffers from accuracy and stability problems.\n"
"Symbolic derivatives can be evaluated to machine precision, but can be costly to evaluate.\n"
"Automatic differentiation (AD) overcomes both of these deficiencies. It is less costly than symbolic differentiation while evaluating derivatives to machine precision.\n"
"AD uses forward or backward modes to differentiate, via Computational Graph, chain rule and evaluation trace.")
# Show plot
plt.show()
# plt.savefig('P1_fig.png')
| true | true |
f730824ede58439b2808be47b15c08761386514e | 10,409 | py | Python | neutron/plugins/mlnx/db/mlnx_db_v2.py | petrutlucian94/neutron | 44976d12bbe72331e536d92bb46e35a8835a75ce | [
"Apache-2.0"
] | null | null | null | neutron/plugins/mlnx/db/mlnx_db_v2.py | petrutlucian94/neutron | 44976d12bbe72331e536d92bb46e35a8835a75ce | [
"Apache-2.0"
] | null | null | null | neutron/plugins/mlnx/db/mlnx_db_v2.py | petrutlucian94/neutron | 44976d12bbe72331e536d92bb46e35a8835a75ce | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy.orm import exc
from neutron.common import exceptions as q_exc
import neutron.db.api as db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.mlnx.common import config # noqa
from neutron.plugins.mlnx.db import mlnx_models_v2
LOG = logging.getLogger(__name__)
def initialize():
db.configure_db()
def _remove_non_allocatable_vlans(session, allocations,
physical_network, vlan_ids):
if physical_network in allocations:
for entry in allocations[physical_network]:
try:
# see if vlan is allocatable
vlan_ids.remove(entry.segmentation_id)
except KeyError:
# it's not allocatable, so check if its allocated
if not entry.allocated:
# it's not, so remove it from table
LOG.debug(_(
"Removing vlan %(seg_id)s on "
"physical network "
"%(net)s from pool"),
{'seg_id': entry.segmentation_id,
'net': physical_network})
session.delete(entry)
del allocations[physical_network]
def _add_missing_allocatable_vlans(session, physical_network, vlan_ids):
for vlan_id in sorted(vlan_ids):
entry = mlnx_models_v2.SegmentationIdAllocation(physical_network,
vlan_id)
session.add(entry)
def _remove_unconfigured_vlans(session, allocations):
for entries in allocations.itervalues():
for entry in entries:
if not entry.allocated:
LOG.debug(_("Removing vlan %(seg_id)s on physical "
"network %(net)s from pool"),
{'seg_id': entry.segmentation_id,
'net': entry.physical_network})
session.delete(entry)
def sync_network_states(network_vlan_ranges):
"""Synchronize network_states table with current configured VLAN ranges."""
session = db.get_session()
with session.begin():
# get existing allocations for all physical networks
allocations = dict()
entries = (session.query(mlnx_models_v2.SegmentationIdAllocation).
all())
for entry in entries:
allocations.setdefault(entry.physical_network, set()).add(entry)
# process vlan ranges for each configured physical network
for physical_network, vlan_ranges in network_vlan_ranges.iteritems():
# determine current configured allocatable vlans for this
# physical network
vlan_ids = set()
for vlan_range in vlan_ranges:
vlan_ids |= set(xrange(vlan_range[0], vlan_range[1] + 1))
# remove from table unallocated vlans not currently allocatable
_remove_non_allocatable_vlans(session, allocations,
physical_network, vlan_ids)
# add missing allocatable vlans to table
_add_missing_allocatable_vlans(session, physical_network, vlan_ids)
# remove from table unallocated vlans for any unconfigured physical
# networks
_remove_unconfigured_vlans(session, allocations)
def get_network_state(physical_network, segmentation_id):
"""Get entry of specified network."""
session = db.get_session()
qry = session.query(mlnx_models_v2.SegmentationIdAllocation)
qry = qry.filter_by(physical_network=physical_network,
segmentation_id=segmentation_id)
return qry.first()
def reserve_network(session):
with session.begin(subtransactions=True):
entry = (session.query(mlnx_models_v2.SegmentationIdAllocation).
filter_by(allocated=False).
with_lockmode('update').
first())
if not entry:
raise q_exc.NoNetworkAvailable()
LOG.debug(_("Reserving vlan %(seg_id)s on physical network "
"%(net)s from pool"),
{'seg_id': entry.segmentation_id,
'net': entry.physical_network})
entry.allocated = True
return (entry.physical_network, entry.segmentation_id)
def reserve_specific_network(session, physical_network, segmentation_id):
with session.begin(subtransactions=True):
log_args = {'seg_id': segmentation_id, 'phy_net': physical_network}
try:
entry = (session.query(mlnx_models_v2.SegmentationIdAllocation).
filter_by(physical_network=physical_network,
segmentation_id=segmentation_id).
with_lockmode('update').
one())
if entry.allocated:
raise q_exc.VlanIdInUse(vlan_id=segmentation_id,
physical_network=physical_network)
LOG.debug(_("Reserving specific vlan %(seg_id)s "
"on physical network %(phy_net)s from pool"),
log_args)
entry.allocated = True
except exc.NoResultFound:
LOG.debug(_("Reserving specific vlan %(seg_id)s on "
"physical network %(phy_net)s outside pool"),
log_args)
entry = mlnx_models_v2.SegmentationIdAllocation(physical_network,
segmentation_id)
entry.allocated = True
session.add(entry)
def release_network(session, physical_network,
segmentation_id, network_vlan_ranges):
with session.begin(subtransactions=True):
log_args = {'seg_id': segmentation_id, 'phy_net': physical_network}
try:
state = (session.query(mlnx_models_v2.SegmentationIdAllocation).
filter_by(physical_network=physical_network,
segmentation_id=segmentation_id).
with_lockmode('update').
one())
state.allocated = False
inside = False
for vlan_range in network_vlan_ranges.get(physical_network, []):
if (segmentation_id >= vlan_range[0] and
segmentation_id <= vlan_range[1]):
inside = True
break
if inside:
LOG.debug(_("Releasing vlan %(seg_id)s "
"on physical network "
"%(phy_net)s to pool"),
log_args)
else:
LOG.debug(_("Releasing vlan %(seg_id)s "
"on physical network "
"%(phy_net)s outside pool"),
log_args)
session.delete(state)
except exc.NoResultFound:
LOG.warning(_("vlan_id %(seg_id)s on physical network "
"%(phy_net)s not found"),
log_args)
def add_network_binding(session, network_id, network_type,
physical_network, vlan_id):
with session.begin(subtransactions=True):
binding = mlnx_models_v2.NetworkBinding(network_id, network_type,
physical_network, vlan_id)
session.add(binding)
def get_network_binding(session, network_id):
return (session.query(mlnx_models_v2.NetworkBinding).
filter_by(network_id=network_id).first())
def add_port_profile_binding(session, port_id, vnic_type):
with session.begin(subtransactions=True):
binding = mlnx_models_v2.PortProfileBinding(port_id, vnic_type)
session.add(binding)
def get_port_profile_binding(session, port_id):
return (session.query(mlnx_models_v2.PortProfileBinding).
filter_by(port_id=port_id).first())
def get_port_from_device(device):
"""Get port from database."""
LOG.debug(_("get_port_from_device() called"))
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
query = session.query(models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(device))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = [
sg_id for port_in_db, sg_id in port_and_sgs if sg_id
]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
def get_port_from_device_mac(device_mac):
"""Get port from database."""
LOG.debug(_("Get_port_from_device_mac() called"))
session = db.get_session()
qry = session.query(models_v2.Port).filter_by(mac_address=device_mac)
return qry.first()
def set_port_status(port_id, status):
"""Set the port status."""
LOG.debug(_("Set_port_status as %s called"), status)
session = db.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.merge(port)
session.flush()
except exc.NoResultFound:
raise q_exc.PortNotFound(port_id=port_id)
| 39.729008 | 79 | 0.614372 |
from sqlalchemy.orm import exc
from neutron.common import exceptions as q_exc
import neutron.db.api as db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.mlnx.common import config
from neutron.plugins.mlnx.db import mlnx_models_v2
LOG = logging.getLogger(__name__)
def initialize():
db.configure_db()
def _remove_non_allocatable_vlans(session, allocations,
physical_network, vlan_ids):
if physical_network in allocations:
for entry in allocations[physical_network]:
try:
vlan_ids.remove(entry.segmentation_id)
except KeyError:
if not entry.allocated:
# it's not, so remove it from table
LOG.debug(_(
"Removing vlan %(seg_id)s on "
"physical network "
"%(net)s from pool"),
{'seg_id': entry.segmentation_id,
'net': physical_network})
session.delete(entry)
del allocations[physical_network]
def _add_missing_allocatable_vlans(session, physical_network, vlan_ids):
for vlan_id in sorted(vlan_ids):
entry = mlnx_models_v2.SegmentationIdAllocation(physical_network,
vlan_id)
session.add(entry)
def _remove_unconfigured_vlans(session, allocations):
for entries in allocations.itervalues():
for entry in entries:
if not entry.allocated:
LOG.debug(_("Removing vlan %(seg_id)s on physical "
"network %(net)s from pool"),
{'seg_id': entry.segmentation_id,
'net': entry.physical_network})
session.delete(entry)
def sync_network_states(network_vlan_ranges):
session = db.get_session()
with session.begin():
allocations = dict()
entries = (session.query(mlnx_models_v2.SegmentationIdAllocation).
all())
for entry in entries:
allocations.setdefault(entry.physical_network, set()).add(entry)
for physical_network, vlan_ranges in network_vlan_ranges.iteritems():
vlan_ids = set()
for vlan_range in vlan_ranges:
vlan_ids |= set(xrange(vlan_range[0], vlan_range[1] + 1))
_remove_non_allocatable_vlans(session, allocations,
physical_network, vlan_ids)
_add_missing_allocatable_vlans(session, physical_network, vlan_ids)
_remove_unconfigured_vlans(session, allocations)
def get_network_state(physical_network, segmentation_id):
session = db.get_session()
qry = session.query(mlnx_models_v2.SegmentationIdAllocation)
qry = qry.filter_by(physical_network=physical_network,
segmentation_id=segmentation_id)
return qry.first()
def reserve_network(session):
with session.begin(subtransactions=True):
entry = (session.query(mlnx_models_v2.SegmentationIdAllocation).
filter_by(allocated=False).
with_lockmode('update').
first())
if not entry:
raise q_exc.NoNetworkAvailable()
LOG.debug(_("Reserving vlan %(seg_id)s on physical network "
"%(net)s from pool"),
{'seg_id': entry.segmentation_id,
'net': entry.physical_network})
entry.allocated = True
return (entry.physical_network, entry.segmentation_id)
def reserve_specific_network(session, physical_network, segmentation_id):
with session.begin(subtransactions=True):
log_args = {'seg_id': segmentation_id, 'phy_net': physical_network}
try:
entry = (session.query(mlnx_models_v2.SegmentationIdAllocation).
filter_by(physical_network=physical_network,
segmentation_id=segmentation_id).
with_lockmode('update').
one())
if entry.allocated:
raise q_exc.VlanIdInUse(vlan_id=segmentation_id,
physical_network=physical_network)
LOG.debug(_("Reserving specific vlan %(seg_id)s "
"on physical network %(phy_net)s from pool"),
log_args)
entry.allocated = True
except exc.NoResultFound:
LOG.debug(_("Reserving specific vlan %(seg_id)s on "
"physical network %(phy_net)s outside pool"),
log_args)
entry = mlnx_models_v2.SegmentationIdAllocation(physical_network,
segmentation_id)
entry.allocated = True
session.add(entry)
def release_network(session, physical_network,
segmentation_id, network_vlan_ranges):
with session.begin(subtransactions=True):
log_args = {'seg_id': segmentation_id, 'phy_net': physical_network}
try:
state = (session.query(mlnx_models_v2.SegmentationIdAllocation).
filter_by(physical_network=physical_network,
segmentation_id=segmentation_id).
with_lockmode('update').
one())
state.allocated = False
inside = False
for vlan_range in network_vlan_ranges.get(physical_network, []):
if (segmentation_id >= vlan_range[0] and
segmentation_id <= vlan_range[1]):
inside = True
break
if inside:
LOG.debug(_("Releasing vlan %(seg_id)s "
"on physical network "
"%(phy_net)s to pool"),
log_args)
else:
LOG.debug(_("Releasing vlan %(seg_id)s "
"on physical network "
"%(phy_net)s outside pool"),
log_args)
session.delete(state)
except exc.NoResultFound:
LOG.warning(_("vlan_id %(seg_id)s on physical network "
"%(phy_net)s not found"),
log_args)
def add_network_binding(session, network_id, network_type,
physical_network, vlan_id):
with session.begin(subtransactions=True):
binding = mlnx_models_v2.NetworkBinding(network_id, network_type,
physical_network, vlan_id)
session.add(binding)
def get_network_binding(session, network_id):
return (session.query(mlnx_models_v2.NetworkBinding).
filter_by(network_id=network_id).first())
def add_port_profile_binding(session, port_id, vnic_type):
with session.begin(subtransactions=True):
binding = mlnx_models_v2.PortProfileBinding(port_id, vnic_type)
session.add(binding)
def get_port_profile_binding(session, port_id):
return (session.query(mlnx_models_v2.PortProfileBinding).
filter_by(port_id=port_id).first())
def get_port_from_device(device):
LOG.debug(_("get_port_from_device() called"))
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
query = session.query(models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(device))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = [
sg_id for port_in_db, sg_id in port_and_sgs if sg_id
]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
def get_port_from_device_mac(device_mac):
LOG.debug(_("Get_port_from_device_mac() called"))
session = db.get_session()
qry = session.query(models_v2.Port).filter_by(mac_address=device_mac)
return qry.first()
def set_port_status(port_id, status):
LOG.debug(_("Set_port_status as %s called"), status)
session = db.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.merge(port)
session.flush()
except exc.NoResultFound:
raise q_exc.PortNotFound(port_id=port_id)
| true | true |
f7308345fe160b2a1f7b7c825a95a3b7f2aa10bc | 6,872 | py | Python | scripts/07_tensor_linear_regresssion.py | UnacceptableBehaviour/pytorch_tut_00 | ca74b9bde8485f651bda9314b8f4a7ed277db787 | [
"MIT"
] | null | null | null | scripts/07_tensor_linear_regresssion.py | UnacceptableBehaviour/pytorch_tut_00 | ca74b9bde8485f651bda9314b8f4a7ed277db787 | [
"MIT"
] | null | null | null | scripts/07_tensor_linear_regresssion.py | UnacceptableBehaviour/pytorch_tut_00 | ca74b9bde8485f651bda9314b8f4a7ed277db787 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# PyTorch Tutorial 07 - Linear Regression
# https://www.youtube.com/watch?v=YAJ5XBwlN4o&list=PLqnslRFeH2UrcDBWF5mfPGpqQDSta6VK4&index=7
#from __future__ import print_function
import torch
print("\n" * 20)
print("-" * 80)
print("-" * 80)
print("\n" * 2)
#### Steps in Torch ML pipeline
# 1) Design Model (input, output size, forward pass)
# 2) Construct the loss & optimiser
# 3) Training Loop
# - forward pass: compute prediction
# - backward pass: gradients
# - update weights
# 0m - review Steps in Torch ML pipeline
# 1m - library imports
# 2m - coding starts - prepare data
# 4m30 - 1) Design Model (input, output size, forward pass)
# 5m40 - 2) Construct the loss & optimiser
# 7m - 3) Training Loop
# 10m - plot
import torch
import torch.nn as nn # PyTorch nn module has high-level APIs to build a neural network.
# Torch. nn module uses Tensors and Automatic differentiation modules for training and building layers such as input,
# hidden, and output layers - DOCS: https://pytorch.org/docs/stable/nn.html
import numpy as np # NumPy is a library for the Python programming language, adding support for large,
# multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate
# on these arrays - DOCS: https://numpy.org/doc/stable/user/whatisnumpy.html
from sklearn import datasets # to generate a regression dataset
# Scikit-learn is a library in Python that provides many unsupervised and supervised
# learning algorithms. It contains a lot of efficient tools for machine learning and statistical modeling including
# classification, regression, clustering and dimensionality reduction. Built upon some of the technology you might
# already be familiar with, like NumPy, pandas, and Matplotlib!
# DOCS: https://scikit-learn.org/stable/
import matplotlib.pyplot as plt # Matplotlib is a plotting library for the Python programming language. It provides an
# object-oriented API for embedding plots into applications using general-purpose GUI toolkits like Tkinter,
# wxPython, Qt, or GTK - DOCS:
# cheatsheets: https://github.com/matplotlib/cheatsheets#cheatsheets
# How to plot & save graph hello world: https://github.com/UnacceptableBehaviour/latex_maths#python---matplotlib-numpy
# 0) prepare data - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
x_numpy, y_numpy = datasets.make_regression(n_samples=100, n_features=1, noise=20, random_state=1)
# returned from ^
# change data type from double to float32 - avoid erros later
X = torch.from_numpy(x_numpy.astype(np.float32)) # create torch tensor from numpy array
Y = torch.from_numpy(y_numpy.astype(np.float32))
print(f"\n Y = torch.from_numpy(y_numpy.astype(np.float32)) \n{ Y }")
# Y = torch.from_numpy(y_numpy.astype(np.float32)) # tensor w a single row - see square brackets
# tensor([-5.5539e+01, -1.0662e+01, 2.2757e+01, 1.0110e+02, 1.4434e+02,
# 3.3289e+01, 3.3015e+01, -2.5887e+01, -9.9639e+01, 2.3803e+01,
# -4.5589e+01, -8.3388e+00, -9.5315e+01, 3.6407e+01, -8.7293e+01,
# 6.7669e+01, -1.3687e+01, -5.5441e+01, -6.5340e+01, -5.4450e+01,
# -2.8835e+01, 1.7884e+02, 6.5084e+01, 2.6668e+01, -1.8546e+01,
# -4.1499e+01, 8.5583e-01, 4.4562e+01, 1.1598e+02, -6.4620e+01,
# -2.5931e+01, -6.0882e+01, 1.8720e+01, 7.5070e+01, 1.1720e+02,
# -2.2698e+01, -5.6363e+01, 1.8084e+02, -1.9257e+02, 6.8503e+01,
# 1.6552e+02, 1.0500e+02, -7.0434e+01, -5.8769e+01, -4.1576e+01,
# 7.3247e+01, 4.0966e+01, 8.0462e+01, -2.8794e+01, 3.4234e+01,
# -4.1715e+01, 1.4355e+01, 7.9336e+01, 2.7129e+01, -3.9487e+01,
# 6.6805e+01, 9.5531e+01, 3.5610e+00, 1.0857e-01, 5.6495e+01,
# 5.1575e+01, -2.0974e+00, -2.6656e+01, 3.9742e+01, 3.6101e+01,
# -7.5602e+01, 1.9713e+01, -7.1601e+01, -1.9904e+01, -7.6708e+01,
# -1.1834e+02, -2.9825e+01, 1.5108e+02, 5.2923e+01, -5.9552e+01,
# 3.0721e+01, -2.9355e+01, -4.4786e+01, 1.0006e+02, 1.5058e+02,
# 1.2200e+02, -1.8186e+02, 3.4739e+00, -2.2980e+01, 4.5184e+01,
# 9.8606e+01, -9.2779e+00, -5.2478e+01, 3.8593e+01, -1.9997e+02,
# -9.5201e+00, -3.4724e+00, -3.5312e+01, 7.5406e+01, 1.7570e+01,
# -2.3960e+01, 1.3209e+02, 2.0608e+01, 5.1111e+01, -2.6306e+01])
print(f"\n Y.shape[0] \n{ Y.shape[0] }") # 100
y = Y.view(Y.shape[0], 1) # reshape to a column tensor Y.view(ROW, COL) Y.view(100, 1)
print(f"\n y = Y.view(y.shape[0], 1) \n{ y }")
# tensor([[-5.5539e+01],
# [-1.0662e+01],
# [ 2.2757e+01],
# [ 1.0110e+02],
# .
# 100 in total
# .
# [ 1.3209e+02],
# [ 2.0608e+01],
# [ 5.1111e+01],
# [-2.6306e+01]])
print(f"\n y.shape \n{ y.shape }") # new little y shape = torch.Size([100, 1]) ROWS, COLS
print(f"\n X.shape \n{ X.shape }")
n_samples, n_features = X.shape
#print(f"\n \n{ }")
# 1) model - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# in LINEAR REGRESSION case this is ONE layer
input_size = n_features
output_size = 1
model = nn.Linear(input_size, output_size) # built in Linear model
# 2) loss optimizer - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
learning_rate = 0.01
criterion = nn.MSELoss() # for LINEAR REGRESSION - BUILT IN Loss function Mean Squared Error Loss
# nn.MSELoss() creates a criterion - https://pytorch.org/docs/stable/generated/torch.nn.MSELoss.html
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # SGD - Stocastic Gradient Descent
# https://pytorch.org/docs/stable/optim.html?highlight=torch%20optim%20sgd#torch.optim.SGD
# w/ optional Nesterov momentum :o
# 3) training loop - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
num_epochs = 100
for epoch in range(num_epochs):
# - forward pass: compute prediction
y_predicted = model(X) # call model passing in data X
loss = criterion(y_predicted, y) # actual labels & predicted - output = criterion(input, target)
# - backward pass: gradients
loss.backward()
# - update weights
optimizer.step()
optimizer.zero_grad()
if (epoch+1) % 10 == 0:
print(f'epoch: {epoch+1}, loss = {loss.item():.4f}')
# plot
predicted = model(X).detach().numpy() # prevent gradient tracking?
label_data = plt.plot(x_numpy, y_numpy, 'ro')
label_model = plt.plot(x_numpy, predicted, 'b')
plt.xlabel('X')
plt.ylabel('Y')
plt.legend(['data','model'])
plt.show()
print('plt.show')
print(f"\n x_numpy \n{ x_numpy }")
print(f"\n y_numpy \n{ y_numpy }")
print(f"\n predicted \n{ predicted }")
#print(f"\n \n{ }")
#print(f"\n \n{ }")
print('\n')
| 42.95 | 120 | 0.625437 |
import torch
print("\n" * 20)
print("-" * 80)
print("-" * 80)
print("\n" * 2)
from sklearn import datasets
import matplotlib.pyplot as plt
e_regression(n_samples=100, n_features=1, noise=20, random_state=1)
X = torch.from_numpy(x_numpy.astype(np.float32))
Y = torch.from_numpy(y_numpy.astype(np.float32))
print(f"\n Y = torch.from_numpy(y_numpy.astype(np.float32)) \n{ Y }")
n{ Y.shape[0] }")
y = Y.view(Y.shape[0], 1)
print(f"\n y = Y.view(y.shape[0], 1) \n{ y }")
print(f"\n y.shape \n{ y.shape }")
print(f"\n X.shape \n{ X.shape }")
n_samples, n_features = X.shape
input_size = n_features
output_size = 1
model = nn.Linear(input_size, output_size)
learning_rate = 0.01
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
= 100
for epoch in range(num_epochs):
y_predicted = model(X)
loss = criterion(y_predicted, y)
loss.backward()
optimizer.step()
optimizer.zero_grad()
if (epoch+1) % 10 == 0:
print(f'epoch: {epoch+1}, loss = {loss.item():.4f}')
predicted = model(X).detach().numpy()
label_data = plt.plot(x_numpy, y_numpy, 'ro')
label_model = plt.plot(x_numpy, predicted, 'b')
plt.xlabel('X')
plt.ylabel('Y')
plt.legend(['data','model'])
plt.show()
print('plt.show')
print(f"\n x_numpy \n{ x_numpy }")
print(f"\n y_numpy \n{ y_numpy }")
print(f"\n predicted \n{ predicted }")
print('\n')
| true | true |
f73083901e3441f13ab867dc840908e3619b3a39 | 18,332 | py | Python | zerver/lib/markdown/fenced_code.py | narendrapsgim/zulip | e2df0d171f921d1e2b09d5de72088ffcc6a0f5f4 | [
"Apache-2.0"
] | 17,004 | 2015-09-25T18:27:24.000Z | 2022-03-31T22:02:32.000Z | zerver/lib/markdown/fenced_code.py | narendrapsgim/zulip | e2df0d171f921d1e2b09d5de72088ffcc6a0f5f4 | [
"Apache-2.0"
] | 20,344 | 2015-09-25T19:02:42.000Z | 2022-03-31T23:54:40.000Z | zerver/lib/markdown/fenced_code.py | narendrapsgim/zulip | e2df0d171f921d1e2b09d5de72088ffcc6a0f5f4 | [
"Apache-2.0"
] | 7,271 | 2015-09-25T18:48:39.000Z | 2022-03-31T21:06:11.000Z | """
Fenced Code Extension for Python Markdown
=========================================
This extension adds Fenced Code Blocks to Python-Markdown.
>>> import markdown
>>> text = '''
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> print html
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Works with safe_mode also (we check this because we are using the HtmlStash):
>>> print markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Include tilde's in a code block and wrap with blank lines:
>>> text = '''
... ~~~~~~~~
...
... ~~~~
... ~~~~~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code>
~~~~
</code></pre>
Removes trailing whitespace from code blocks that cause horizontal scrolling
>>> import markdown
>>> text = '''
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block \t\t\t\t\t\t\t
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> print html
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Language tags:
>>> text = '''
... ~~~~{.python}
... # Some python code
... ~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code class="python"># Some python code
</code></pre>
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://packages.python.org/Markdown/extensions/fenced_code_blocks.html>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.4+](http://python.org)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
* [Pygments (optional)](http://pygments.org)
"""
import re
from typing import Any, Callable, Dict, Iterable, List, Mapping, MutableSequence, Optional, Sequence
import lxml.html
from django.utils.html import escape
from markdown import Markdown
from markdown.extensions import Extension, codehilite
from markdown.extensions.codehilite import CodeHiliteExtension, parse_hl_lines
from markdown.preprocessors import Preprocessor
from pygments.lexers import find_lexer_class_by_name
from pygments.util import ClassNotFound
from zerver.lib.exceptions import MarkdownRenderingException
from zerver.lib.markdown.preprocessor_priorities import PREPROCESSOR_PRIORITES
from zerver.lib.tex import render_tex
# Global vars
FENCE_RE = re.compile(
r"""
# ~~~ or ```
(?P<fence>
^(?:~{3,}|`{3,})
)
[ ]* # spaces
(?:
# language, like ".py" or "{javascript}"
\{?\.?
(?P<lang>
[a-zA-Z0-9_+-./#]+
) # "py" or "javascript"
[ ]* # spaces
# header for features that use fenced block header syntax (like spoilers)
(?P<header>
[^ ~`][^~`]*
)?
\}?
)?
$
""",
re.VERBOSE,
)
CODE_WRAP = "<pre><code{}>{}\n</code></pre>"
LANG_TAG = ' class="{}"'
def validate_curl_content(lines: List[str]) -> None:
error_msg = """
Missing required -X argument in curl command:
{command}
""".strip()
for line in lines:
regex = r'curl [-](sS)?X "?(GET|DELETE|PATCH|POST)"?'
if line.startswith("curl"):
if re.search(regex, line) is None:
raise MarkdownRenderingException(error_msg.format(command=line.strip()))
CODE_VALIDATORS: Dict[Optional[str], Callable[[List[str]], None]] = {
"curl": validate_curl_content,
}
class FencedCodeExtension(Extension):
def __init__(self, config: Mapping[str, Any] = {}) -> None:
self.config = {
"run_content_validators": [
config.get("run_content_validators", False),
"Boolean specifying whether to run content validation code in CodeHandler",
],
}
for key, value in config.items():
self.setConfig(key, value)
def extendMarkdown(self, md: Markdown) -> None:
"""Add FencedBlockPreprocessor to the Markdown instance."""
md.registerExtension(self)
processor = FencedBlockPreprocessor(
md, run_content_validators=self.config["run_content_validators"][0]
)
md.preprocessors.register(
processor, "fenced_code_block", PREPROCESSOR_PRIORITES["fenced_code_block"]
)
class ZulipBaseHandler:
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: Optional[str] = None,
process_contents: bool = False,
) -> None:
self.processor = processor
self.output = output
self.fence = fence
self.process_contents = process_contents
self.lines: List[str] = []
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
self.lines.append(line.rstrip())
def done(self) -> None:
if self.lines:
text = "\n".join(self.lines)
text = self.format_text(text)
# For code blocks, the contents should not receive further
# processing. Whereas with quote and spoiler blocks, we
# explicitly want Markdown formatting of the content
# inside. This behavior is controlled by the
# process_contents configuration flag.
if not self.process_contents:
text = self.processor.placeholder(text)
processed_lines = text.split("\n")
self.output.append("")
self.output.extend(processed_lines)
self.output.append("")
self.processor.pop()
def format_text(self, text: str) -> str:
"""Returns a formatted text.
Subclasses should override this method.
"""
raise NotImplementedError()
def generic_handler(
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: str,
lang: Optional[str],
header: Optional[str],
run_content_validators: bool = False,
default_language: Optional[str] = None,
) -> ZulipBaseHandler:
if lang is not None:
lang = lang.lower()
if lang in ("quote", "quoted"):
return QuoteHandler(processor, output, fence, default_language)
elif lang == "math":
return TexHandler(processor, output, fence)
elif lang == "spoiler":
return SpoilerHandler(processor, output, fence, header)
else:
return CodeHandler(processor, output, fence, lang, run_content_validators)
def check_for_new_fence(
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
line: str,
run_content_validators: bool = False,
default_language: Optional[str] = None,
) -> None:
m = FENCE_RE.match(line)
if m:
fence = m.group("fence")
lang: Optional[str] = m.group("lang")
header: Optional[str] = m.group("header")
if not lang and default_language:
lang = default_language
handler = generic_handler(
processor, output, fence, lang, header, run_content_validators, default_language
)
processor.push(handler)
else:
output.append(line)
class OuterHandler(ZulipBaseHandler):
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
run_content_validators: bool = False,
default_language: Optional[str] = None,
) -> None:
self.run_content_validators = run_content_validators
self.default_language = default_language
super().__init__(processor, output)
def handle_line(self, line: str) -> None:
check_for_new_fence(
self.processor, self.output, line, self.run_content_validators, self.default_language
)
class CodeHandler(ZulipBaseHandler):
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: str,
lang: Optional[str],
run_content_validators: bool = False,
) -> None:
self.lang = lang
self.run_content_validators = run_content_validators
super().__init__(processor, output, fence)
def done(self) -> None:
# run content validators (if any)
if self.run_content_validators:
validator = CODE_VALIDATORS.get(self.lang, lambda text: None)
validator(self.lines)
super().done()
def format_text(self, text: str) -> str:
return self.processor.format_code(self.lang, text)
class QuoteHandler(ZulipBaseHandler):
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: str,
default_language: Optional[str] = None,
) -> None:
self.default_language = default_language
super().__init__(processor, output, fence, process_contents=True)
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
check_for_new_fence(
self.processor, self.lines, line, default_language=self.default_language
)
def format_text(self, text: str) -> str:
return self.processor.format_quote(text)
class SpoilerHandler(ZulipBaseHandler):
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: str,
spoiler_header: Optional[str],
) -> None:
self.spoiler_header = spoiler_header
super().__init__(processor, output, fence, process_contents=True)
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
check_for_new_fence(self.processor, self.lines, line)
def format_text(self, text: str) -> str:
return self.processor.format_spoiler(self.spoiler_header, text)
class TexHandler(ZulipBaseHandler):
def format_text(self, text: str) -> str:
return self.processor.format_tex(text)
class CodeHilite(codehilite.CodeHilite):
def _parseHeader(self) -> None:
# Python-Markdown has a feature to parse-and-hide shebang
# lines present in code blocks:
#
# https://python-markdown.github.io/extensions/code_hilite/#shebang-no-path
#
# While using shebang lines for language detection is
# reasonable, we don't want this feature because it can be
# really confusing when doing anything else in a one-line code
# block that starts with `!` (which would then render as an
# empty code block!). So we disable the feature, by
# overriding this function, which implements it in CodeHilite
# upstream.
# split text into lines
lines = self.src.split("\n")
# Python-Markdown pops out the first line which we are avoiding here.
# Examine first line
fl = lines[0]
c = re.compile(
r"""
(?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
(?P<lang>[\w#.+-]*) # The language
\s* # Arbitrary whitespace
# Optional highlight lines, single- or double-quote-delimited
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))?
""",
re.VERBOSE,
)
# Search first line for shebang
m = c.search(fl)
if m:
# We have a match
try:
self.lang = m.group("lang").lower()
except IndexError: # nocoverage
self.lang = None
if self.options["linenos"] is None and m.group("shebang"):
# Overridable and Shebang exists - use line numbers
self.options["linenos"] = True
self.options["hl_lines"] = parse_hl_lines(m.group("hl_lines"))
self.src = "\n".join(lines).strip("\n")
class FencedBlockPreprocessor(Preprocessor):
def __init__(self, md: Markdown, run_content_validators: bool = False) -> None:
super().__init__(md)
self.checked_for_codehilite = False
self.run_content_validators = run_content_validators
self.codehilite_conf: Mapping[str, Sequence[Any]] = {}
def push(self, handler: ZulipBaseHandler) -> None:
self.handlers.append(handler)
def pop(self) -> None:
self.handlers.pop()
def run(self, lines: Iterable[str]) -> List[str]:
"""Match and store Fenced Code Blocks in the HtmlStash."""
output: List[str] = []
processor = self
self.handlers: List[ZulipBaseHandler] = []
default_language = None
try:
default_language = self.md.zulip_realm.default_code_block_language
except AttributeError:
pass
handler = OuterHandler(processor, output, self.run_content_validators, default_language)
self.push(handler)
for line in lines:
self.handlers[-1].handle_line(line)
while self.handlers:
self.handlers[-1].done()
# This fiddly handling of new lines at the end of our output was done to make
# existing tests pass. Markdown is just kind of funny when it comes to new lines,
# but we could probably remove this hack.
if len(output) > 2 and output[-2] != "":
output.append("")
return output
def format_code(self, lang: Optional[str], text: str) -> str:
if lang:
langclass = LANG_TAG.format(lang)
else:
langclass = ""
# Check for code hilite extension
if not self.checked_for_codehilite:
for ext in self.md.registeredExtensions:
if isinstance(ext, CodeHiliteExtension):
self.codehilite_conf = ext.config
break
self.checked_for_codehilite = True
# If config is not empty, then the codehighlite extension
# is enabled, so we call it to highlite the code
if self.codehilite_conf:
highliter = CodeHilite(
text,
linenums=self.codehilite_conf["linenums"][0],
guess_lang=self.codehilite_conf["guess_lang"][0],
css_class=self.codehilite_conf["css_class"][0],
style=self.codehilite_conf["pygments_style"][0],
use_pygments=self.codehilite_conf["use_pygments"][0],
lang=(lang or None),
noclasses=self.codehilite_conf["noclasses"][0],
)
code = highliter.hilite().rstrip("\n")
else:
code = CODE_WRAP.format(langclass, self._escape(text))
# To support our "view in playground" feature, the frontend
# needs to know what Pygments language was used for
# highlighting this code block. We record this in a data
# attribute attached to the outer `pre` element.
# Unfortunately, the pygments API doesn't offer a way to add
# this, so we need to do it in a post-processing step.
if lang:
div_tag = lxml.html.fromstring(code)
# For the value of our data element, we get the lexer
# subclass name instead of directly using the language,
# since that canonicalizes aliases (Eg: `js` and
# `javascript` will be mapped to `JavaScript`).
try:
code_language = find_lexer_class_by_name(lang).name
except ClassNotFound:
# If there isn't a Pygments lexer by this name, we
# still tag it with the user's data-code-language
# value, since this allows hooking up a "playground"
# for custom "languages" that aren't known to Pygments.
code_language = lang
div_tag.attrib["data-code-language"] = code_language
code = lxml.html.tostring(div_tag, encoding="unicode")
return code
def format_quote(self, text: str) -> str:
paragraphs = text.split("\n")
quoted_paragraphs = []
for paragraph in paragraphs:
lines = paragraph.split("\n")
quoted_paragraphs.append("\n".join("> " + line for line in lines))
return "\n".join(quoted_paragraphs)
def format_spoiler(self, header: Optional[str], text: str) -> str:
output = []
header_div_open_html = '<div class="spoiler-block"><div class="spoiler-header">'
end_header_start_content_html = '</div><div class="spoiler-content" aria-hidden="true">'
footer_html = "</div></div>"
output.append(self.placeholder(header_div_open_html))
if header is not None:
output.append(header)
output.append(self.placeholder(end_header_start_content_html))
output.append(text)
output.append(self.placeholder(footer_html))
return "\n\n".join(output)
def format_tex(self, text: str) -> str:
paragraphs = text.split("\n\n")
tex_paragraphs = []
for paragraph in paragraphs:
html = render_tex(paragraph, is_inline=False)
if html is not None:
tex_paragraphs.append(html)
else:
tex_paragraphs.append('<span class="tex-error">' + escape(paragraph) + "</span>")
return "\n\n".join(tex_paragraphs)
def placeholder(self, code: str) -> str:
return self.md.htmlStash.store(code)
def _escape(self, txt: str) -> str:
"""basic html escaping"""
txt = txt.replace("&", "&")
txt = txt.replace("<", "<")
txt = txt.replace(">", ">")
txt = txt.replace('"', """)
return txt
def makeExtension(*args: Any, **kwargs: None) -> FencedCodeExtension:
return FencedCodeExtension(kwargs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32.971223 | 100 | 0.603971 | import re
from typing import Any, Callable, Dict, Iterable, List, Mapping, MutableSequence, Optional, Sequence
import lxml.html
from django.utils.html import escape
from markdown import Markdown
from markdown.extensions import Extension, codehilite
from markdown.extensions.codehilite import CodeHiliteExtension, parse_hl_lines
from markdown.preprocessors import Preprocessor
from pygments.lexers import find_lexer_class_by_name
from pygments.util import ClassNotFound
from zerver.lib.exceptions import MarkdownRenderingException
from zerver.lib.markdown.preprocessor_priorities import PREPROCESSOR_PRIORITES
from zerver.lib.tex import render_tex
FENCE_RE = re.compile(
r"""
# ~~~ or ```
(?P<fence>
^(?:~{3,}|`{3,})
)
[ ]* # spaces
(?:
# language, like ".py" or "{javascript}"
\{?\.?
(?P<lang>
[a-zA-Z0-9_+-./#]+
) # "py" or "javascript"
[ ]* # spaces
# header for features that use fenced block header syntax (like spoilers)
(?P<header>
[^ ~`][^~`]*
)?
\}?
)?
$
""",
re.VERBOSE,
)
CODE_WRAP = "<pre><code{}>{}\n</code></pre>"
LANG_TAG = ' class="{}"'
def validate_curl_content(lines: List[str]) -> None:
error_msg = """
Missing required -X argument in curl command:
{command}
""".strip()
for line in lines:
regex = r'curl [-](sS)?X "?(GET|DELETE|PATCH|POST)"?'
if line.startswith("curl"):
if re.search(regex, line) is None:
raise MarkdownRenderingException(error_msg.format(command=line.strip()))
CODE_VALIDATORS: Dict[Optional[str], Callable[[List[str]], None]] = {
"curl": validate_curl_content,
}
class FencedCodeExtension(Extension):
def __init__(self, config: Mapping[str, Any] = {}) -> None:
self.config = {
"run_content_validators": [
config.get("run_content_validators", False),
"Boolean specifying whether to run content validation code in CodeHandler",
],
}
for key, value in config.items():
self.setConfig(key, value)
def extendMarkdown(self, md: Markdown) -> None:
md.registerExtension(self)
processor = FencedBlockPreprocessor(
md, run_content_validators=self.config["run_content_validators"][0]
)
md.preprocessors.register(
processor, "fenced_code_block", PREPROCESSOR_PRIORITES["fenced_code_block"]
)
class ZulipBaseHandler:
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: Optional[str] = None,
process_contents: bool = False,
) -> None:
self.processor = processor
self.output = output
self.fence = fence
self.process_contents = process_contents
self.lines: List[str] = []
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
self.lines.append(line.rstrip())
def done(self) -> None:
if self.lines:
text = "\n".join(self.lines)
text = self.format_text(text)
if not self.process_contents:
text = self.processor.placeholder(text)
processed_lines = text.split("\n")
self.output.append("")
self.output.extend(processed_lines)
self.output.append("")
self.processor.pop()
def format_text(self, text: str) -> str:
raise NotImplementedError()
def generic_handler(
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: str,
lang: Optional[str],
header: Optional[str],
run_content_validators: bool = False,
default_language: Optional[str] = None,
) -> ZulipBaseHandler:
if lang is not None:
lang = lang.lower()
if lang in ("quote", "quoted"):
return QuoteHandler(processor, output, fence, default_language)
elif lang == "math":
return TexHandler(processor, output, fence)
elif lang == "spoiler":
return SpoilerHandler(processor, output, fence, header)
else:
return CodeHandler(processor, output, fence, lang, run_content_validators)
def check_for_new_fence(
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
line: str,
run_content_validators: bool = False,
default_language: Optional[str] = None,
) -> None:
m = FENCE_RE.match(line)
if m:
fence = m.group("fence")
lang: Optional[str] = m.group("lang")
header: Optional[str] = m.group("header")
if not lang and default_language:
lang = default_language
handler = generic_handler(
processor, output, fence, lang, header, run_content_validators, default_language
)
processor.push(handler)
else:
output.append(line)
class OuterHandler(ZulipBaseHandler):
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
run_content_validators: bool = False,
default_language: Optional[str] = None,
) -> None:
self.run_content_validators = run_content_validators
self.default_language = default_language
super().__init__(processor, output)
def handle_line(self, line: str) -> None:
check_for_new_fence(
self.processor, self.output, line, self.run_content_validators, self.default_language
)
class CodeHandler(ZulipBaseHandler):
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: str,
lang: Optional[str],
run_content_validators: bool = False,
) -> None:
self.lang = lang
self.run_content_validators = run_content_validators
super().__init__(processor, output, fence)
def done(self) -> None:
if self.run_content_validators:
validator = CODE_VALIDATORS.get(self.lang, lambda text: None)
validator(self.lines)
super().done()
def format_text(self, text: str) -> str:
return self.processor.format_code(self.lang, text)
class QuoteHandler(ZulipBaseHandler):
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: str,
default_language: Optional[str] = None,
) -> None:
self.default_language = default_language
super().__init__(processor, output, fence, process_contents=True)
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
check_for_new_fence(
self.processor, self.lines, line, default_language=self.default_language
)
def format_text(self, text: str) -> str:
return self.processor.format_quote(text)
class SpoilerHandler(ZulipBaseHandler):
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: str,
spoiler_header: Optional[str],
) -> None:
self.spoiler_header = spoiler_header
super().__init__(processor, output, fence, process_contents=True)
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
check_for_new_fence(self.processor, self.lines, line)
def format_text(self, text: str) -> str:
return self.processor.format_spoiler(self.spoiler_header, text)
class TexHandler(ZulipBaseHandler):
def format_text(self, text: str) -> str:
return self.processor.format_tex(text)
class CodeHilite(codehilite.CodeHilite):
def _parseHeader(self) -> None:
# really confusing when doing anything else in a one-line code
# block that starts with `!` (which would then render as an
# empty code block!). So we disable the feature, by
# overriding this function, which implements it in CodeHilite
# upstream.
# split text into lines
lines = self.src.split("\n")
# Python-Markdown pops out the first line which we are avoiding here.
# Examine first line
fl = lines[0]
c = re.compile(
r"""
(?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
(?P<lang>[\w#.+-]*) # The language
\s* # Arbitrary whitespace
# Optional highlight lines, single- or double-quote-delimited
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))?
""",
re.VERBOSE,
)
# Search first line for shebang
m = c.search(fl)
if m:
# We have a match
try:
self.lang = m.group("lang").lower()
except IndexError: # nocoverage
self.lang = None
if self.options["linenos"] is None and m.group("shebang"):
# Overridable and Shebang exists - use line numbers
self.options["linenos"] = True
self.options["hl_lines"] = parse_hl_lines(m.group("hl_lines"))
self.src = "\n".join(lines).strip("\n")
class FencedBlockPreprocessor(Preprocessor):
def __init__(self, md: Markdown, run_content_validators: bool = False) -> None:
super().__init__(md)
self.checked_for_codehilite = False
self.run_content_validators = run_content_validators
self.codehilite_conf: Mapping[str, Sequence[Any]] = {}
def push(self, handler: ZulipBaseHandler) -> None:
self.handlers.append(handler)
def pop(self) -> None:
self.handlers.pop()
def run(self, lines: Iterable[str]) -> List[str]:
output: List[str] = []
processor = self
self.handlers: List[ZulipBaseHandler] = []
default_language = None
try:
default_language = self.md.zulip_realm.default_code_block_language
except AttributeError:
pass
handler = OuterHandler(processor, output, self.run_content_validators, default_language)
self.push(handler)
for line in lines:
self.handlers[-1].handle_line(line)
while self.handlers:
self.handlers[-1].done()
# This fiddly handling of new lines at the end of our output was done to make
# existing tests pass. Markdown is just kind of funny when it comes to new lines,
# but we could probably remove this hack.
if len(output) > 2 and output[-2] != "":
output.append("")
return output
def format_code(self, lang: Optional[str], text: str) -> str:
if lang:
langclass = LANG_TAG.format(lang)
else:
langclass = ""
# Check for code hilite extension
if not self.checked_for_codehilite:
for ext in self.md.registeredExtensions:
if isinstance(ext, CodeHiliteExtension):
self.codehilite_conf = ext.config
break
self.checked_for_codehilite = True
# If config is not empty, then the codehighlite extension
# is enabled, so we call it to highlite the code
if self.codehilite_conf:
highliter = CodeHilite(
text,
linenums=self.codehilite_conf["linenums"][0],
guess_lang=self.codehilite_conf["guess_lang"][0],
css_class=self.codehilite_conf["css_class"][0],
style=self.codehilite_conf["pygments_style"][0],
use_pygments=self.codehilite_conf["use_pygments"][0],
lang=(lang or None),
noclasses=self.codehilite_conf["noclasses"][0],
)
code = highliter.hilite().rstrip("\n")
else:
code = CODE_WRAP.format(langclass, self._escape(text))
# To support our "view in playground" feature, the frontend
# needs to know what Pygments language was used for
# highlighting this code block. We record this in a data
# attribute attached to the outer `pre` element.
# Unfortunately, the pygments API doesn't offer a way to add
# this, so we need to do it in a post-processing step.
if lang:
div_tag = lxml.html.fromstring(code)
# For the value of our data element, we get the lexer
# subclass name instead of directly using the language,
# since that canonicalizes aliases (Eg: `js` and
# `javascript` will be mapped to `JavaScript`).
try:
code_language = find_lexer_class_by_name(lang).name
except ClassNotFound:
# If there isn't a Pygments lexer by this name, we
# still tag it with the user's data-code-language
# value, since this allows hooking up a "playground"
# for custom "languages" that aren't known to Pygments.
code_language = lang
div_tag.attrib["data-code-language"] = code_language
code = lxml.html.tostring(div_tag, encoding="unicode")
return code
def format_quote(self, text: str) -> str:
paragraphs = text.split("\n")
quoted_paragraphs = []
for paragraph in paragraphs:
lines = paragraph.split("\n")
quoted_paragraphs.append("\n".join("> " + line for line in lines))
return "\n".join(quoted_paragraphs)
def format_spoiler(self, header: Optional[str], text: str) -> str:
output = []
header_div_open_html = '<div class="spoiler-block"><div class="spoiler-header">'
end_header_start_content_html = '</div><div class="spoiler-content" aria-hidden="true">'
footer_html = "</div></div>"
output.append(self.placeholder(header_div_open_html))
if header is not None:
output.append(header)
output.append(self.placeholder(end_header_start_content_html))
output.append(text)
output.append(self.placeholder(footer_html))
return "\n\n".join(output)
def format_tex(self, text: str) -> str:
paragraphs = text.split("\n\n")
tex_paragraphs = []
for paragraph in paragraphs:
html = render_tex(paragraph, is_inline=False)
if html is not None:
tex_paragraphs.append(html)
else:
tex_paragraphs.append('<span class="tex-error">' + escape(paragraph) + "</span>")
return "\n\n".join(tex_paragraphs)
def placeholder(self, code: str) -> str:
return self.md.htmlStash.store(code)
def _escape(self, txt: str) -> str:
txt = txt.replace("&", "&")
txt = txt.replace("<", "<")
txt = txt.replace(">", ">")
txt = txt.replace('"', """)
return txt
def makeExtension(*args: Any, **kwargs: None) -> FencedCodeExtension:
return FencedCodeExtension(kwargs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| true | true |
f73083f83f4bca4deac6272da174e16b8fa14446 | 3,149 | py | Python | heat/tests/clients/test_glance_client.py | ISCAS-VDI/heat-base | ca8390434edfd8396c7e46651e1e31ff488b2307 | [
"Apache-2.0"
] | 1 | 2015-12-18T21:46:55.000Z | 2015-12-18T21:46:55.000Z | heat/tests/clients/test_glance_client.py | ISCAS-VDI/heat-base | ca8390434edfd8396c7e46651e1e31ff488b2307 | [
"Apache-2.0"
] | null | null | null | heat/tests/clients/test_glance_client.py | ISCAS-VDI/heat-base | ca8390434edfd8396c7e46651e1e31ff488b2307 | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from glanceclient import exc
from glanceclient.openstack.common.apiclient import exceptions
import mock
from heat.engine.clients.os import glance
from heat.tests import common
from heat.tests import utils
class GlanceUtilsTest(common.HeatTestCase):
"""Basic tests for :module:'heat.engine.resources.clients.os.glance'."""
def setUp(self):
super(GlanceUtilsTest, self).setUp()
self.glance_client = mock.MagicMock()
con = utils.dummy_context()
c = con.clients
self.glance_plugin = c.client_plugin('glance')
self.glance_plugin.client = lambda: self.glance_client
self.my_image = mock.MagicMock()
def test_find_image_by_name_or_id(self):
"""Tests the find_image_by_name_or_id function."""
img_id = str(uuid.uuid4())
img_name = 'myfakeimage'
self.my_image.id = img_id
self.my_image.name = img_name
self.glance_client.images.get.side_effect = [
self.my_image,
exc.HTTPNotFound(),
exc.HTTPNotFound(),
exc.HTTPNotFound()]
self.glance_client.images.list.side_effect = [
[self.my_image],
[],
[self.my_image, self.my_image]]
self.assertEqual(img_id,
self.glance_plugin.find_image_by_name_or_id(img_id))
self.assertEqual(img_id,
self.glance_plugin.find_image_by_name_or_id(img_name))
self.assertRaises(exceptions.NotFound,
self.glance_plugin.find_image_by_name_or_id,
'noimage')
self.assertRaises(exceptions.NoUniqueMatch,
self.glance_plugin.find_image_by_name_or_id,
'myfakeimage')
class ImageConstraintTest(common.HeatTestCase):
def setUp(self):
super(ImageConstraintTest, self).setUp()
self.ctx = utils.dummy_context()
self.mock_find_image = mock.Mock()
self.ctx.clients.client_plugin(
'glance').find_image_by_name_or_id = self.mock_find_image
self.constraint = glance.ImageConstraint()
def test_validation(self):
self.mock_find_image.side_effect = ["id1",
exceptions.NotFound(),
exceptions.NoUniqueMatch()]
self.assertTrue(self.constraint.validate("foo", self.ctx))
self.assertFalse(self.constraint.validate("bar", self.ctx))
self.assertFalse(self.constraint.validate("baz", self.ctx))
| 38.876543 | 79 | 0.643061 |
import uuid
from glanceclient import exc
from glanceclient.openstack.common.apiclient import exceptions
import mock
from heat.engine.clients.os import glance
from heat.tests import common
from heat.tests import utils
class GlanceUtilsTest(common.HeatTestCase):
def setUp(self):
super(GlanceUtilsTest, self).setUp()
self.glance_client = mock.MagicMock()
con = utils.dummy_context()
c = con.clients
self.glance_plugin = c.client_plugin('glance')
self.glance_plugin.client = lambda: self.glance_client
self.my_image = mock.MagicMock()
def test_find_image_by_name_or_id(self):
img_id = str(uuid.uuid4())
img_name = 'myfakeimage'
self.my_image.id = img_id
self.my_image.name = img_name
self.glance_client.images.get.side_effect = [
self.my_image,
exc.HTTPNotFound(),
exc.HTTPNotFound(),
exc.HTTPNotFound()]
self.glance_client.images.list.side_effect = [
[self.my_image],
[],
[self.my_image, self.my_image]]
self.assertEqual(img_id,
self.glance_plugin.find_image_by_name_or_id(img_id))
self.assertEqual(img_id,
self.glance_plugin.find_image_by_name_or_id(img_name))
self.assertRaises(exceptions.NotFound,
self.glance_plugin.find_image_by_name_or_id,
'noimage')
self.assertRaises(exceptions.NoUniqueMatch,
self.glance_plugin.find_image_by_name_or_id,
'myfakeimage')
class ImageConstraintTest(common.HeatTestCase):
def setUp(self):
super(ImageConstraintTest, self).setUp()
self.ctx = utils.dummy_context()
self.mock_find_image = mock.Mock()
self.ctx.clients.client_plugin(
'glance').find_image_by_name_or_id = self.mock_find_image
self.constraint = glance.ImageConstraint()
def test_validation(self):
self.mock_find_image.side_effect = ["id1",
exceptions.NotFound(),
exceptions.NoUniqueMatch()]
self.assertTrue(self.constraint.validate("foo", self.ctx))
self.assertFalse(self.constraint.validate("bar", self.ctx))
self.assertFalse(self.constraint.validate("baz", self.ctx))
| true | true |
f730842547db0fa4ac95c76cc51c2fc28fb8e9b8 | 7,385 | py | Python | imcsdk/mometa/export/ExportLdapCACertificate.py | vadimkuznetsov/imcsdk | ed038ce1dbc8031f99d2dfb3ccee3bf0b48309d8 | [
"Apache-2.0"
] | null | null | null | imcsdk/mometa/export/ExportLdapCACertificate.py | vadimkuznetsov/imcsdk | ed038ce1dbc8031f99d2dfb3ccee3bf0b48309d8 | [
"Apache-2.0"
] | null | null | null | imcsdk/mometa/export/ExportLdapCACertificate.py | vadimkuznetsov/imcsdk | ed038ce1dbc8031f99d2dfb3ccee3bf0b48309d8 | [
"Apache-2.0"
] | 1 | 2019-11-10T18:42:04.000Z | 2019-11-10T18:42:04.000Z | """This module contains the general information for ExportLdapCACertificate ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class ExportLdapCACertificateConsts:
PROTOCOL_FTP = "ftp"
PROTOCOL_HTTP = "http"
PROTOCOL_NONE = "none"
PROTOCOL_SCP = "scp"
PROTOCOL_SFTP = "sftp"
PROTOCOL_TFTP = "tftp"
class ExportLdapCACertificate(ManagedObject):
"""This is ExportLdapCACertificate class."""
consts = ExportLdapCACertificateConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("ExportLdapCACertificate", "exportLdapCACertificate", "ldap-ca-cert-export", VersionMeta.Version2013e, "InputOutput", 0x1ff, [], ["admin", "read-only", "user"], [u'ldapCACertificateManagement'], [], ["Get"]),
"modular": MoMeta("ExportLdapCACertificate", "exportLdapCACertificate", "ldap-ca-cert-export", VersionMeta.Version2013e, "InputOutput", 0x1ff, [], ["admin", "read-only", "user"], [u'ldapCACertificateManagement'], [], ["Get"])
}
prop_meta = {
"classic": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"export_progress": MoPropertyMeta("export_progress", "exportProgress", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"export_status": MoPropertyMeta("export_status", "exportStatus", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"protocol": MoPropertyMeta("protocol", "protocol", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["ftp", "http", "none", "scp", "sftp", "tftp"], []),
"pwd": MoPropertyMeta("pwd", "pwd", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"remote_file": MoPropertyMeta("remote_file", "remoteFile", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""[^\(\)~`'\?\\"";<>\|&\*\^$%]{0,255}""", [], []),
"remote_server": MoPropertyMeta("remote_server", "remoteServer", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, r"""(([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:) |((([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6})|(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)+)|([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]))""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"user": MoPropertyMeta("user", "user", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x100, 0, 255, None, [], []),
},
"modular": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"export_progress": MoPropertyMeta("export_progress", "exportProgress", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"export_status": MoPropertyMeta("export_status", "exportStatus", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"protocol": MoPropertyMeta("protocol", "protocol", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["ftp", "http", "none", "scp", "sftp", "tftp"], []),
"pwd": MoPropertyMeta("pwd", "pwd", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"remote_file": MoPropertyMeta("remote_file", "remoteFile", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""[^\(\)~`'\?\\"";<>\|&\*\^$%]{0,255}""", [], []),
"remote_server": MoPropertyMeta("remote_server", "remoteServer", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, r"""([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"user": MoPropertyMeta("user", "user", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x100, 0, 255, None, [], []),
},
}
prop_map = {
"classic": {
"childAction": "child_action",
"dn": "dn",
"exportProgress": "export_progress",
"exportStatus": "export_status",
"protocol": "protocol",
"pwd": "pwd",
"remoteFile": "remote_file",
"remoteServer": "remote_server",
"rn": "rn",
"status": "status",
"user": "user",
},
"modular": {
"childAction": "child_action",
"dn": "dn",
"exportProgress": "export_progress",
"exportStatus": "export_status",
"protocol": "protocol",
"pwd": "pwd",
"remoteFile": "remote_file",
"remoteServer": "remote_server",
"rn": "rn",
"status": "status",
"user": "user",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.export_progress = None
self.export_status = None
self.protocol = None
self.pwd = None
self.remote_file = None
self.remote_server = None
self.status = None
self.user = None
ManagedObject.__init__(self, "ExportLdapCACertificate", parent_mo_or_dn, **kwargs)
| 69.018692 | 907 | 0.579282 |
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class ExportLdapCACertificateConsts:
PROTOCOL_FTP = "ftp"
PROTOCOL_HTTP = "http"
PROTOCOL_NONE = "none"
PROTOCOL_SCP = "scp"
PROTOCOL_SFTP = "sftp"
PROTOCOL_TFTP = "tftp"
class ExportLdapCACertificate(ManagedObject):
consts = ExportLdapCACertificateConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("ExportLdapCACertificate", "exportLdapCACertificate", "ldap-ca-cert-export", VersionMeta.Version2013e, "InputOutput", 0x1ff, [], ["admin", "read-only", "user"], [u'ldapCACertificateManagement'], [], ["Get"]),
"modular": MoMeta("ExportLdapCACertificate", "exportLdapCACertificate", "ldap-ca-cert-export", VersionMeta.Version2013e, "InputOutput", 0x1ff, [], ["admin", "read-only", "user"], [u'ldapCACertificateManagement'], [], ["Get"])
}
prop_meta = {
"classic": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"export_progress": MoPropertyMeta("export_progress", "exportProgress", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"export_status": MoPropertyMeta("export_status", "exportStatus", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"protocol": MoPropertyMeta("protocol", "protocol", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["ftp", "http", "none", "scp", "sftp", "tftp"], []),
"pwd": MoPropertyMeta("pwd", "pwd", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"remote_file": MoPropertyMeta("remote_file", "remoteFile", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""[^\(\)~`'\?\\"";<>\|&\*\^$%]{0,255}""", [], []),
"remote_server": MoPropertyMeta("remote_server", "remoteServer", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, r"""(([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:) |((([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6})|(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)+)|([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]))""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"user": MoPropertyMeta("user", "user", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x100, 0, 255, None, [], []),
},
"modular": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"export_progress": MoPropertyMeta("export_progress", "exportProgress", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"export_status": MoPropertyMeta("export_status", "exportStatus", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"protocol": MoPropertyMeta("protocol", "protocol", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["ftp", "http", "none", "scp", "sftp", "tftp"], []),
"pwd": MoPropertyMeta("pwd", "pwd", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"remote_file": MoPropertyMeta("remote_file", "remoteFile", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""[^\(\)~`'\?\\"";<>\|&\*\^$%]{0,255}""", [], []),
"remote_server": MoPropertyMeta("remote_server", "remoteServer", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, r"""([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"user": MoPropertyMeta("user", "user", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x100, 0, 255, None, [], []),
},
}
prop_map = {
"classic": {
"childAction": "child_action",
"dn": "dn",
"exportProgress": "export_progress",
"exportStatus": "export_status",
"protocol": "protocol",
"pwd": "pwd",
"remoteFile": "remote_file",
"remoteServer": "remote_server",
"rn": "rn",
"status": "status",
"user": "user",
},
"modular": {
"childAction": "child_action",
"dn": "dn",
"exportProgress": "export_progress",
"exportStatus": "export_status",
"protocol": "protocol",
"pwd": "pwd",
"remoteFile": "remote_file",
"remoteServer": "remote_server",
"rn": "rn",
"status": "status",
"user": "user",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.export_progress = None
self.export_status = None
self.protocol = None
self.pwd = None
self.remote_file = None
self.remote_server = None
self.status = None
self.user = None
ManagedObject.__init__(self, "ExportLdapCACertificate", parent_mo_or_dn, **kwargs)
| true | true |
f730844764dcd79f2f76188b5b0b2bdde3759d30 | 30,151 | py | Python | eta/core/labels.py | MagicCodess/eta | 4599292a4de1f5f477e159787e2c2127d9fbde1b | [
"Apache-2.0"
] | null | null | null | eta/core/labels.py | MagicCodess/eta | 4599292a4de1f5f477e159787e2c2127d9fbde1b | [
"Apache-2.0"
] | null | null | null | eta/core/labels.py | MagicCodess/eta | 4599292a4de1f5f477e159787e2c2127d9fbde1b | [
"Apache-2.0"
] | null | null | null | """
Core data structures for working with labels.
Copyright 2017-2022, Voxel51, Inc.
voxel51.com
"""
# pragma pylint: disable=redefined-builtin
# pragma pylint: disable=unused-wildcard-import
# pragma pylint: disable=wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from future.utils import iteritems
# pragma pylint: enable=redefined-builtin
# pragma pylint: enable=unused-wildcard-import
# pragma pylint: enable=wildcard-import
from collections import defaultdict
import logging
import eta.core.serial as etas
import eta.core.utils as etau
logger = logging.getLogger(__name__)
class Labels(etas.Serializable):
"""Base class for `eta.core.serial.Serializable` classes that hold labels
representing attributes, objects, frames, events, images, videos, etc.
Labels classes have associated Schema classes that describe the
ontologies over the labels class.
"""
def __bool__(self):
"""Whether this instance has labels of any kind."""
return not self.is_empty
@property
def is_empty(self):
"""Whether this instance has no labels of any kind."""
raise NotImplementedError("subclasses must implement is_empty")
@classmethod
def get_schema_cls(cls):
"""Gets the LabelsSchema class for the labels.
Subclasses can override this method, but, by default, this
implementation assumes the convention that labels class `<Labels>` has
associated schema class `<Labels>Schema` defined in the same module.
Returns:
the LabelsSchema class
"""
class_name = etau.get_class_name(cls)
return etau.get_class(class_name + "Schema")
def get_active_schema(self):
"""Returns a LabelsSchema that describes the active schema of the
labels.
Returns:
a LabelsSchema
"""
schema_cls = self.get_schema_cls()
return schema_cls.build_active_schema(self)
def filter_by_schema(self, schema):
"""Filters the labels by the given schema.
Args:
schema: a LabelsSchema
"""
raise NotImplementedError(
"subclasses must implement `filter_by_schema()`"
)
class LabelsSchema(etas.Serializable):
"""Base class for schemas of Labels classes."""
def __bool__(self):
"""Whether this schema has labels of any kind."""
return not self.is_empty
@property
def is_empty(self):
"""Whether this schema has no labels of any kind."""
raise NotImplementedError("subclasses must implement is_empty")
def add(self, labels):
"""Incorporates the Labels into the schema.
Args:
label: a Labels instance
"""
labels_schema = self.build_active_schema(labels)
self.merge_schema(labels_schema)
def add_iterable(self, iterable):
"""Incorporates the given iterable of Labels into the schema.
Args:
iterable: an iterable of Labels
"""
for labels in iterable:
self.add(labels)
def validate(self, labels):
"""Validates that the Labels are compliant with the schema.
Args:
labels: a Labels instance
Raises:
LabelsSchemaError: if the labels violate the schema
"""
raise NotImplementedError("subclasses must implement `validate()`")
def validate_subset_of_schema(self, schema):
"""Validates that this schema is a subset of the given LabelsSchema.
Args:
schema: a LabelsSchema
Raises:
LabelsSchemaError: if this schema is not a subset of the given
schema
"""
raise NotImplementedError(
"subclasses must implement `validate_subset_of_schema()`"
)
def validate_schema_type(self, schema):
"""Validates that this schema is an instance of same type as the given
schema.
Args:
schema: a LabelsSchema
Raises:
LabelsSchemaError: if this schema is not of the same type as the
given schema
"""
if not isinstance(self, type(schema)):
raise LabelsSchemaError(
"Expected `self` to match schema type %s; found %s"
% (type(self), type(schema))
)
def is_valid(self, labels):
"""Whether the Labels are compliant with the schema.
Args:
labels: a Labels instance
Returns:
True/False
"""
try:
self.validate(labels)
return True
except LabelsSchemaError:
return False
def is_subset_of_schema(self, schema):
"""Whether this schema is a subset of the given schema.
Args:
schema: a LabelsSchema
Returns:
True/False
"""
try:
self.validate_subset_of_schema(schema)
return True
except LabelsSchemaError:
return False
@classmethod
def build_active_schema(cls, labels):
"""Builds a LabelsSchema that describes the active schema of the
labels.
Args:
labels: a Labels instance
Returns:
a LabelsSchema
"""
raise NotImplementedError(
"subclasses must implement `build_active_schema()`"
)
def merge_schema(self, schema):
"""Merges the given LabelsSchema into this schema.
Args:
schema: a LabelsSchema
"""
raise NotImplementedError("subclasses must implement `merge_schema()`")
class LabelsSchemaError(Exception):
"""Error raisesd when a LabelsSchema is violated."""
pass
class HasLabelsSchema(object):
"""Mixin for Label classes that can optionally store and enforce
`LabelsSchema`s on their labels.
For efficiency, schemas are not automatically enforced when new labels are
added to HasLabelsSchema instances. Rather, users must manually call
`validate_schema()` when they would like to validate the schema.
Attributes:
schema: the enforced LabelsSchema, or None
"""
def __init__(self, schema=None):
"""Initializes the HasLabelsSchema mixin.
Args:
schema: (optional) a LabelsSchema to enforce on the labels. By
default, no schema is enforced
"""
self.schema = schema
@property
def has_schema(self):
"""Whether the labels have an enforced schema."""
return self.schema is not None
def get_schema(self):
"""Gets the current enforced schema for the labels, or None if no
schema is enforced.
Returns:
a LabelsSchema, or None
"""
return self.schema
def set_schema(self, schema, filter_by_schema=False, validate=False):
"""Sets the enforced schema to the given LabelsSchema.
Args:
schema: a LabelsSchema to assign
filter_by_schema: whether to filter labels that are not compliant
with the schema. By default, this is False
validate: whether to validate that the labels (after filtering, if
applicable) are compliant with the new schema. By default, this
is False
Raises:
LabelsSchemaError: if `validate` was `True` and this object
contains labels that are not compliant with the schema
"""
self.schema = schema
if not self.has_schema:
return
if filter_by_schema:
self.filter_by_schema(self.schema) # pylint: disable=no-member
if validate:
self.validate_schema()
def validate_schema(self):
"""Validates that the labels are compliant with the current schema.
Raises:
LabelsSchemaError: if this object contains labels that are not
compliant with the schema
"""
if self.has_schema:
self.schema.validate(self)
def freeze_schema(self):
"""Sets the schema for the labels to the current active schema."""
self.set_schema(self.get_active_schema()) # pylint: disable=no-member
def remove_schema(self):
"""Removes the enforced schema from the labels."""
self.set_schema(None)
class HasLabelsSupport(object):
"""Mixin for Label classes that describe videos and can keep track of
their own support, i.e., the frames for which they contain labels.
The support is represented via a `eta.core.frameutils.FrameRanges`
instance.
For efficiency, supports should not be automatically updated when new
labels are added to HasLabelsSupport instances. Rather, the support is
dynamically computed when the `support` property is accessed.
Alternatively, the current support can be frozen via `freeze_support()`
to avoid recomputing it each time `support` is called.
"""
def __init__(self, support=None):
"""Initializes the HasLabelsSupport mixin.
Args:
support: (optional) a FrameRanges instance describing the frozen
support of the labels. By default, the support is not frozen
"""
self._support = support
@property
def support(self):
"""A FrameRanges instance describing the frames for which this instance
contains labels.
If this instance has a frozen support, it is returned. Otherwise, the
support is dynamically computed via `_compute_support()`.
"""
if self.is_support_frozen:
return self._support
return self._compute_support()
@property
def is_support_frozen(self):
"""Whether the support is currently frozen."""
return self._support is not None
def set_support(self, support):
"""Sets the support to the given value.
This action freezes the support for this instance.
Args:
support: a FrameRanges
"""
self._support = support
def merge_support(self, support):
"""Merges the given support into the current support.
This action freezes the support for this instance.
Args:
support: a FrameRanges
"""
new_support = self.support.merge(support)
self.set_support(new_support)
def freeze_support(self):
"""Freezes the support to the current `support`.
This optional optimization is useful to avoid recalculating the support
of the labels each time `support` is called.
"""
if not self.is_support_frozen:
self._support = self._compute_support()
def clear_support(self):
"""Clears the frozen support, if necessary."""
self._support = None
def _compute_support(self):
"""Computes the current support of the labels in this instance.
Returns:
a FrameRanges
"""
raise NotImplementedError(
"subclasses must implement _compute_support()"
)
class HasFramewiseView(object):
"""Mixin for Label classes that describe videos and can be rendered in
a framewise view by a LabelsFrameRenderer.
"""
@property
def framewise_renderer_cls(self):
"""The LabelsFrameRenderer used by this class."""
raise NotImplementedError(
"subclasses must implement framewise_renderer_cls()"
)
def render_framewise(self, in_place=False):
"""Renders a framewise version of the labels.
Args:
in_place: whether to perform the rendering in-place. By default,
this is False
Returns:
a framewise version of the labels
"""
renderer = self.framewise_renderer_cls(self)
return renderer.render(in_place=in_place)
class HasSpatiotemporalView(object):
"""Mixin for Label classes that describe videos and can be rendered in a
spatiotemporal view by a LabelsSpatiotemporalRenderer.
"""
@property
def spatiotemporal_renderer_cls(self):
"""The LabelsSpatiotemporalRenderer used by this class."""
raise NotImplementedError(
"subclasses must implement spatiotemporal_renderer_cls()"
)
def render_spatiotemporal(self, in_place=False):
"""Renders a spatiotemporal version of the labels.
Args:
in_place: whether to perform the rendering in-place. By default,
this is False
Returns:
a spatiotemporal version of the labels
"""
renderer = self.spatiotemporal_renderer_cls(self)
return renderer.render(in_place=in_place)
class LabelsContainer(Labels, HasLabelsSchema, etas.Container):
"""Base class for `eta.core.serial.Container`s of Labels.
`LabelsContainer`s can optionally store a LabelsContainerSchema instance
that governs the schema of the labels in the container.
"""
def __init__(self, schema=None, **kwargs):
"""Creates a LabelsContainer instance.
Args:
schema: an optional LabelsContainerSchema to enforce on the labels
in this container. By default, no schema is enforced
**kwargs: valid keyword arguments for `eta.core.serial.Container()`
Raises:
LabelsSchemaError: if a schema was provided but the labels added to
the container violate it
"""
HasLabelsSchema.__init__(self, schema=schema)
etas.Container.__init__(self, **kwargs)
def __bool__(self):
return etas.Container.__bool__(self)
@property
def is_empty(self):
"""Whether this container has no labels."""
return etas.Container.is_empty(self)
def remove_empty_labels(self):
"""Removes all empty Labels from the container."""
self.filter_elements([lambda labels: not labels.is_empty])
def add_container(self, container):
"""Appends the labels in the given LabelContainer to the container.
Args:
container: a LabelsContainer
Raises:
LabelsSchemaError: if this container has a schema enforced and any
labels in the container violate it
"""
self.add_iterable(container)
def attributes(self):
"""Returns the list of class attributes that will be serialized.
Returns:
a list of attribute names
"""
_attrs = []
if self.has_schema:
_attrs.append("schema")
_attrs += super(LabelsContainer, self).attributes()
return _attrs
@classmethod
def from_dict(cls, d):
"""Constructs a LabelsContainer from a JSON dictionary.
Args:
d: a JSON dictionary
Returns:
a LabelsContainer
"""
schema = d.get("schema", None)
if schema is not None:
schema_cls = cls.get_schema_cls()
schema = schema_cls.from_dict(schema)
return super(LabelsContainer, cls).from_dict(d, schema=schema)
def validate_schema(self):
"""Validates that the labels are compliant with the current schema.
Raises:
LabelsSchemaError: if the container has labels that are not
compliant with the schema
"""
if self.has_schema:
for labels in self:
self._validate_labels(labels)
def _validate_labels(self, labels):
if self.has_schema:
self.schema.validate(labels)
class LabelsContainerSchema(LabelsSchema):
"""Base class for schemas of `LabelsContainer`s."""
def add(self, labels):
"""Incorporates the Labels into the schema.
Args:
label: a Labels instance
"""
self.merge_schema(labels.get_active_schema())
def add_container(self, container):
"""Incorporates the given `LabelsContainer`s elements into the schema.
Args:
container: a LabelsContainer
"""
self.add_iterable(container)
def add_iterable(self, iterable):
"""Incorporates the given iterable of Labels into the schema.
Args:
iterable: an iterable of Labels
"""
for labels in iterable:
self.add(labels)
@classmethod
def build_active_schema(cls, container):
"""Builds a LabelsContainerSchema describing the active schema of the
LabelsContainer.
Args:
container: a LabelsContainer
Returns:
a LabelsContainerSchema
"""
schema = cls()
for labels in container:
schema.add(labels.get_active_schema())
return schema
class LabelsContainerSchemaError(LabelsSchemaError):
"""Error raisesd when a LabelsContainerSchema is violated."""
pass
class LabelsSet(Labels, HasLabelsSchema, etas.Set):
"""Base class for `eta.core.serial.Set`s of Labels.
`LabelsSet`s can optionally store a LabelsSchema instance that governs
the schemas of the Labels in the set.
"""
def __init__(self, schema=None, **kwargs):
"""Creates a LabelsSet instance.
Args:
schema: an optional LabelsSchema to enforce on each element of the
set. By default, no schema is enforced
**kwargs: valid keyword arguments for `eta.core.serial.Set()`
Raises:
LabelsSchemaError: if a schema was provided but the labels added to
the container violate it
"""
HasLabelsSchema.__init__(self, schema=schema)
etas.Set.__init__(self, **kwargs)
def __getitem__(self, key):
"""Gets the Labels for the given key.
If the key is not found, an empty Labels is created and returned.
Args:
key: the key
Returns:
a Labels instance
"""
if key not in self:
logger.warning(
"Key '%s' not found; creating new %s",
key,
self._ELE_CLS.__name__,
)
# pylint: disable=not-callable
labels = self._ELE_CLS(**{self._ELE_KEY_ATTR: key})
self.add(labels)
return super(LabelsSet, self).__getitem__(key)
def __bool__(self):
return etas.Set.__bool__(self)
@property
def is_empty(self):
"""Whether this set has no labels."""
return etas.Set.is_empty(self)
@classmethod
def get_schema_cls(cls):
"""Gets the schema class for the Labels in the set.
Returns:
the LabelsSchema class
"""
return cls._ELE_CLS.get_schema_cls()
def empty(self):
"""Returns an empty copy of the LabelsSet.
The schema of the set is preserved, if applicable.
Returns:
an empty LabelsSet
"""
return self.__class__(schema=self.schema)
def remove_empty_labels(self):
"""Removes all empty Labels from the set."""
self.filter_elements([lambda labels: not labels.is_empty])
def add_set(self, labels_set):
"""Adds the labels in the given LabelSet to the set.
Args:
labels_set: a LabelsSet
Raises:
LabelsSchemaError: if this set has a schema enforced and any labels
in the set violate it
"""
self.add_iterable(labels_set)
def get_active_schema(self):
"""Gets the LabelsSchema describing the active schema of the set.
Returns:
a LabelsSchema
"""
schema_cls = self.get_schema_cls()
schema = schema_cls()
for labels in self:
schema.merge_schema(schema_cls.build_active_schema(labels))
return schema
def filter_by_schema(self, schema):
"""Removes labels from the set that are not compliant with the given
schema.
Args:
schema: a LabelsSchema
"""
for labels in self:
labels.filter_by_schema(schema)
def set_schema(self, schema, filter_by_schema=False, validate=False):
"""Sets the enforced schema to the given LabelsSchema.
Args:
schema: a LabelsSchema to assign
filter_by_schema: whether to filter labels that are not compliant
with the schema. By default, this is False
validate: whether to validate that the labels (after filtering, if
applicable) are compliant with the new schema. By default, this
is False
Raises:
LabelsSchemaError: if `validate` was `True` and this object
contains labels that are not compliant with the schema
"""
self.schema = schema
for labels in self:
labels.set_schema(
schema, filter_by_schema=filter_by_schema, validate=validate
)
def attributes(self):
"""Returns the list of class attributes that will be serialized.
Returns:
a list of attribute names
"""
_attrs = []
if self.has_schema:
_attrs.append("schema")
_attrs += super(LabelsSet, self).attributes()
return _attrs
@classmethod
def from_dict(cls, d):
"""Constructs a LabelsSet from a JSON dictionary.
Args:
d: a JSON dictionary
Returns:
a LabelsSet
"""
schema = d.get("schema", None)
if schema is not None:
schema_cls = cls.get_schema_cls()
schema = schema_cls.from_dict(schema)
return super(LabelsSet, cls).from_dict(d, schema=schema)
@classmethod
def from_labels_patt(cls, labels_patt):
"""Creates a LabelsSet from a pattern of Labels files on disk.
Args:
labels_patt: a pattern with one or more numeric sequences for
Labels files on disk
Returns:
a LabelsSet
"""
labels_set = cls()
for labels_path in etau.get_pattern_matches(labels_patt):
labels_set.add(cls._ELE_CLS.from_json(labels_path))
return labels_set
def validate_schema(self):
"""Validates that the labels in the set are compliant with the current
schema.
Raises:
LabelsSchemaError: if the set has labels that are not compliant
with the schema
"""
if self.has_schema:
for labels in self:
self._validate_labels(labels)
def _validate_labels(self, labels):
if self.has_schema:
self.schema.validate(labels)
class LabelsRenderer(object):
"""Interface for classes that render Labels instances in a specified
format.
`LabelsRenderer`s must follow the strict convention that, when
`in_place == False`, they do not modify or pass by reference any components
of the source labels that they are rendering. In particular, any labels
they produce are deep copies of the source labels.
"""
#
# The Labels class that this renderer takes as input
#
# Subclasses MUST set this field
#
_LABELS_CLS = None
@property
def labels_cls(self):
"""The Labels subclass that this renderer takes as input."""
return self._LABELS_CLS
def render(self, in_place=False):
"""Renders the labels in the format specified by the class.
Args:
in_place: whether to perform the rendering in-place. By default,
this is False
Returns:
a `labels_cls` instance
"""
raise NotImplementedError("subclasses must implement render()")
class LabelsContainerRenderer(LabelsRenderer):
"""Base class for rendering labels in `LabelsContainer`s in a specified
format.
The only thing that subclasses need to do to implement this interface is
to define their `_LABELS_CLS` and `_ELEMENT_RENDERER_CLS`.
"""
#
# The LabelsRenderer class to use to render elements of the container
#
# Subclasses MUST set this field
#
_ELEMENT_RENDERER_CLS = None
def __init__(self, container):
"""Creates a LabelsContainerRenderer instance.
Args:
container: a LabelsContainer
"""
self._container = container
@property
def element_renderer_cls(self):
"""The LabelsRenderer class to use to render elements of the container.
"""
return self._ELEMENT_RENDERER_CLS
def render(self, in_place=False):
"""Renders the container in the format specified by the class.
Args:
in_place: whether to perform the rendering in-place. By default,
this is False
Returns:
a `labels_cls` instance
"""
if in_place:
return self._render_in_place()
return self._render_copy()
def _render_in_place(self):
for labels in self._container:
# pylint: disable=not-callable
renderer = self.element_renderer_cls(labels)
renderer.render(in_place=True)
return self._container
def _render_copy(self):
new_container = self._container.empty()
for labels in self._container:
# pylint: disable=not-callable
renderer = self.element_renderer_cls(labels)
element = renderer.render(in_place=False)
new_container.add(element)
return new_container
class LabelsFrameRenderer(LabelsRenderer):
"""Interface for classes that render Labels at the frame-level."""
#
# The per-frame Labels class that this renderer outputs
#
# Subclasses MUST set this field
#
_FRAME_LABELS_CLS = None
@property
def frame_labels_cls(self):
"""The per-frame Labels class that this renderer outputs."""
return self._FRAME_LABELS_CLS
def render_frame(self, frame_number, in_place=False):
"""Renders the labels for the given frame.
Args:
frame_number: the frame number
in_place: whether to perform the rendering in-place (i.e., without
deep copying objects). By default, this is False
Returns:
a `frame_labels_cls` instance, or None if no labels exist for the
given frame
"""
raise NotImplementedError("subclasses must implement render_frame()")
def render_all_frames(self, in_place=False):
"""Renders the labels for all possible frames.
Args:
in_place: whether to perform the rendering in-place (i.e., without
deep copying objects). By default, this is False
Returns:
a dictionary mapping frame numbers to `frame_labels_cls` instances
"""
raise NotImplementedError(
"subclasses must implement render_all_frames()"
)
class LabelsContainerFrameRenderer(
LabelsFrameRenderer, LabelsContainerRenderer
):
"""Base class for rendering labels in `LabelsContainer`s at the
frame-level.
The only thing that subclasses need to do to implement this interface is
to define their `_LABELS_CLS`, `_FRAME_LABELS_CLS`, and
`_ELEMENT_RENDERER_CLS`.
"""
#
# The LabelsFrameRenderer class to use to render elements of the container
#
# Subclasses MUST set this field
#
_ELEMENT_RENDERER_CLS = None
@property
def element_renderer_cls(self):
"""The LabelsFrameRenderer class to use to render elements of the
container.
"""
return self._ELEMENT_RENDERER_CLS
def render_frame(self, frame_number, in_place=False):
"""Renders the container for the given frame.
Args:
frame_number: the frame number
in_place: whether to perform the rendering in-place (i.e., without
deep copying objects). By default, this is False
Returns:
a `frame_labels_cls` instance, which may be empty if no labels
exist for the specified frame
"""
# pylint: disable=not-callable
frame_elements = self.frame_labels_cls()
for labels in self._container:
# pylint: disable=not-callable
renderer = self.element_renderer_cls(labels)
frame_element = renderer.render_frame(
frame_number, in_place=in_place
)
if frame_element is not None:
frame_elements.add(frame_element)
return frame_elements
def render_all_frames(self, in_place=False):
"""Renders the container for all possible frames.
Args:
in_place: whether to perform the rendering in-place (i.e., without
deep copying objects). By default, this is False
Returns:
a dictionary mapping frame numbers to `frame_labels_cls` instances
"""
# pylint: disable=not-callable
frame_elements_map = defaultdict(self.frame_labels_cls)
for labels in self._container:
# pylint: disable=not-callable
renderer = self.element_renderer_cls(labels)
frames_map = renderer.render_all_frames(in_place=in_place)
for frame_number, frame_element in iteritems(frames_map):
frame_elements_map[frame_number].add(frame_element)
return dict(frame_elements_map)
class LabelsSpatiotemporalRenderer(LabelsRenderer):
"""Interface for classes that render Labels in spatiotemporal format."""
pass
class LabelsContainerSpatiotemporalRenderer(
LabelsSpatiotemporalRenderer, LabelsContainerRenderer
):
"""Base class for rendering labels for `LabelsContainer`s in spatiotemporal
format.
The only thing that subclasses need to do to implement this interface is
to define their `_LABELS_CLS` and `_ELEMENT_RENDERER_CLS`.
"""
pass
| 30.000995 | 79 | 0.629631 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from future.utils import iteritems
from collections import defaultdict
import logging
import eta.core.serial as etas
import eta.core.utils as etau
logger = logging.getLogger(__name__)
class Labels(etas.Serializable):
def __bool__(self):
return not self.is_empty
@property
def is_empty(self):
raise NotImplementedError("subclasses must implement is_empty")
@classmethod
def get_schema_cls(cls):
class_name = etau.get_class_name(cls)
return etau.get_class(class_name + "Schema")
def get_active_schema(self):
schema_cls = self.get_schema_cls()
return schema_cls.build_active_schema(self)
def filter_by_schema(self, schema):
raise NotImplementedError(
"subclasses must implement `filter_by_schema()`"
)
class LabelsSchema(etas.Serializable):
def __bool__(self):
return not self.is_empty
@property
def is_empty(self):
raise NotImplementedError("subclasses must implement is_empty")
def add(self, labels):
labels_schema = self.build_active_schema(labels)
self.merge_schema(labels_schema)
def add_iterable(self, iterable):
for labels in iterable:
self.add(labels)
def validate(self, labels):
raise NotImplementedError("subclasses must implement `validate()`")
def validate_subset_of_schema(self, schema):
raise NotImplementedError(
"subclasses must implement `validate_subset_of_schema()`"
)
def validate_schema_type(self, schema):
if not isinstance(self, type(schema)):
raise LabelsSchemaError(
"Expected `self` to match schema type %s; found %s"
% (type(self), type(schema))
)
def is_valid(self, labels):
try:
self.validate(labels)
return True
except LabelsSchemaError:
return False
def is_subset_of_schema(self, schema):
try:
self.validate_subset_of_schema(schema)
return True
except LabelsSchemaError:
return False
@classmethod
def build_active_schema(cls, labels):
raise NotImplementedError(
"subclasses must implement `build_active_schema()`"
)
def merge_schema(self, schema):
raise NotImplementedError("subclasses must implement `merge_schema()`")
class LabelsSchemaError(Exception):
pass
class HasLabelsSchema(object):
def __init__(self, schema=None):
self.schema = schema
@property
def has_schema(self):
return self.schema is not None
def get_schema(self):
return self.schema
def set_schema(self, schema, filter_by_schema=False, validate=False):
self.schema = schema
if not self.has_schema:
return
if filter_by_schema:
self.filter_by_schema(self.schema)
if validate:
self.validate_schema()
def validate_schema(self):
if self.has_schema:
self.schema.validate(self)
def freeze_schema(self):
self.set_schema(self.get_active_schema())
def remove_schema(self):
self.set_schema(None)
class HasLabelsSupport(object):
def __init__(self, support=None):
self._support = support
@property
def support(self):
if self.is_support_frozen:
return self._support
return self._compute_support()
@property
def is_support_frozen(self):
return self._support is not None
def set_support(self, support):
self._support = support
def merge_support(self, support):
new_support = self.support.merge(support)
self.set_support(new_support)
def freeze_support(self):
if not self.is_support_frozen:
self._support = self._compute_support()
def clear_support(self):
self._support = None
def _compute_support(self):
raise NotImplementedError(
"subclasses must implement _compute_support()"
)
class HasFramewiseView(object):
@property
def framewise_renderer_cls(self):
raise NotImplementedError(
"subclasses must implement framewise_renderer_cls()"
)
def render_framewise(self, in_place=False):
renderer = self.framewise_renderer_cls(self)
return renderer.render(in_place=in_place)
class HasSpatiotemporalView(object):
@property
def spatiotemporal_renderer_cls(self):
raise NotImplementedError(
"subclasses must implement spatiotemporal_renderer_cls()"
)
def render_spatiotemporal(self, in_place=False):
renderer = self.spatiotemporal_renderer_cls(self)
return renderer.render(in_place=in_place)
class LabelsContainer(Labels, HasLabelsSchema, etas.Container):
def __init__(self, schema=None, **kwargs):
HasLabelsSchema.__init__(self, schema=schema)
etas.Container.__init__(self, **kwargs)
def __bool__(self):
return etas.Container.__bool__(self)
@property
def is_empty(self):
return etas.Container.is_empty(self)
def remove_empty_labels(self):
self.filter_elements([lambda labels: not labels.is_empty])
def add_container(self, container):
self.add_iterable(container)
def attributes(self):
_attrs = []
if self.has_schema:
_attrs.append("schema")
_attrs += super(LabelsContainer, self).attributes()
return _attrs
@classmethod
def from_dict(cls, d):
schema = d.get("schema", None)
if schema is not None:
schema_cls = cls.get_schema_cls()
schema = schema_cls.from_dict(schema)
return super(LabelsContainer, cls).from_dict(d, schema=schema)
def validate_schema(self):
if self.has_schema:
for labels in self:
self._validate_labels(labels)
def _validate_labels(self, labels):
if self.has_schema:
self.schema.validate(labels)
class LabelsContainerSchema(LabelsSchema):
def add(self, labels):
self.merge_schema(labels.get_active_schema())
def add_container(self, container):
self.add_iterable(container)
def add_iterable(self, iterable):
for labels in iterable:
self.add(labels)
@classmethod
def build_active_schema(cls, container):
schema = cls()
for labels in container:
schema.add(labels.get_active_schema())
return schema
class LabelsContainerSchemaError(LabelsSchemaError):
pass
class LabelsSet(Labels, HasLabelsSchema, etas.Set):
def __init__(self, schema=None, **kwargs):
HasLabelsSchema.__init__(self, schema=schema)
etas.Set.__init__(self, **kwargs)
def __getitem__(self, key):
if key not in self:
logger.warning(
"Key '%s' not found; creating new %s",
key,
self._ELE_CLS.__name__,
)
labels = self._ELE_CLS(**{self._ELE_KEY_ATTR: key})
self.add(labels)
return super(LabelsSet, self).__getitem__(key)
def __bool__(self):
return etas.Set.__bool__(self)
@property
def is_empty(self):
return etas.Set.is_empty(self)
@classmethod
def get_schema_cls(cls):
return cls._ELE_CLS.get_schema_cls()
def empty(self):
return self.__class__(schema=self.schema)
def remove_empty_labels(self):
self.filter_elements([lambda labels: not labels.is_empty])
def add_set(self, labels_set):
self.add_iterable(labels_set)
def get_active_schema(self):
schema_cls = self.get_schema_cls()
schema = schema_cls()
for labels in self:
schema.merge_schema(schema_cls.build_active_schema(labels))
return schema
def filter_by_schema(self, schema):
for labels in self:
labels.filter_by_schema(schema)
def set_schema(self, schema, filter_by_schema=False, validate=False):
self.schema = schema
for labels in self:
labels.set_schema(
schema, filter_by_schema=filter_by_schema, validate=validate
)
def attributes(self):
_attrs = []
if self.has_schema:
_attrs.append("schema")
_attrs += super(LabelsSet, self).attributes()
return _attrs
@classmethod
def from_dict(cls, d):
schema = d.get("schema", None)
if schema is not None:
schema_cls = cls.get_schema_cls()
schema = schema_cls.from_dict(schema)
return super(LabelsSet, cls).from_dict(d, schema=schema)
@classmethod
def from_labels_patt(cls, labels_patt):
labels_set = cls()
for labels_path in etau.get_pattern_matches(labels_patt):
labels_set.add(cls._ELE_CLS.from_json(labels_path))
return labels_set
def validate_schema(self):
if self.has_schema:
for labels in self:
self._validate_labels(labels)
def _validate_labels(self, labels):
if self.has_schema:
self.schema.validate(labels)
class LabelsRenderer(object):
_LABELS_CLS = None
@property
def labels_cls(self):
return self._LABELS_CLS
def render(self, in_place=False):
raise NotImplementedError("subclasses must implement render()")
class LabelsContainerRenderer(LabelsRenderer):
_ELEMENT_RENDERER_CLS = None
def __init__(self, container):
self._container = container
@property
def element_renderer_cls(self):
return self._ELEMENT_RENDERER_CLS
def render(self, in_place=False):
if in_place:
return self._render_in_place()
return self._render_copy()
def _render_in_place(self):
for labels in self._container:
renderer = self.element_renderer_cls(labels)
renderer.render(in_place=True)
return self._container
def _render_copy(self):
new_container = self._container.empty()
for labels in self._container:
renderer = self.element_renderer_cls(labels)
element = renderer.render(in_place=False)
new_container.add(element)
return new_container
class LabelsFrameRenderer(LabelsRenderer):
_FRAME_LABELS_CLS = None
@property
def frame_labels_cls(self):
return self._FRAME_LABELS_CLS
def render_frame(self, frame_number, in_place=False):
raise NotImplementedError("subclasses must implement render_frame()")
def render_all_frames(self, in_place=False):
raise NotImplementedError(
"subclasses must implement render_all_frames()"
)
class LabelsContainerFrameRenderer(
LabelsFrameRenderer, LabelsContainerRenderer
):
_ELEMENT_RENDERER_CLS = None
@property
def element_renderer_cls(self):
return self._ELEMENT_RENDERER_CLS
def render_frame(self, frame_number, in_place=False):
frame_elements = self.frame_labels_cls()
for labels in self._container:
renderer = self.element_renderer_cls(labels)
frame_element = renderer.render_frame(
frame_number, in_place=in_place
)
if frame_element is not None:
frame_elements.add(frame_element)
return frame_elements
def render_all_frames(self, in_place=False):
frame_elements_map = defaultdict(self.frame_labels_cls)
for labels in self._container:
renderer = self.element_renderer_cls(labels)
frames_map = renderer.render_all_frames(in_place=in_place)
for frame_number, frame_element in iteritems(frames_map):
frame_elements_map[frame_number].add(frame_element)
return dict(frame_elements_map)
class LabelsSpatiotemporalRenderer(LabelsRenderer):
pass
class LabelsContainerSpatiotemporalRenderer(
LabelsSpatiotemporalRenderer, LabelsContainerRenderer
):
pass
| true | true |
f73084dd6dc2a8e45a03307bb76941dc0e054034 | 2,458 | py | Python | examples/image_classification/classify.py | sergioalberto/rpi-deep-learning | 94024c2b2c225dc607954874bdcd345b805b1561 | [
"MIT"
] | null | null | null | examples/image_classification/classify.py | sergioalberto/rpi-deep-learning | 94024c2b2c225dc607954874bdcd345b805b1561 | [
"MIT"
] | 14 | 2021-03-19T04:37:26.000Z | 2022-03-12T00:23:16.000Z | examples/image_classification/classify.py | sergioalberto/rpi-deep-learning-lab | 94024c2b2c225dc607954874bdcd345b805b1561 | [
"MIT"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from tflite_runtime.interpreter import Interpreter
from tensorflow.lite.python.interpreter import Interpreter
import numpy as np
import argparse
from PIL import Image
parser = argparse.ArgumentParser(description='Image Classification')
parser.add_argument('--filename', type=str, help='Specify the filename', required=True)
parser.add_argument('--model_path', type=str, help='Specify the model path', required=True)
parser.add_argument('--label_path', type=str, help='Specify the label map', required=True)
parser.add_argument('--top_k', type=int, help='How many top results', default=3)
args = parser.parse_args()
filename = args.filename
model_path = args.model_path
label_path = args.label_path
top_k_results = args.top_k
with open(label_path, 'r') as f:
labels = list(map(str.strip, f.readlines()))
# Load TFLite model and allocate tensors
interpreter = Interpreter(model_path=model_path)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Read image
img = Image.open(filename).convert('RGB')
# Get input size
input_shape = input_details[0]['shape']
size = input_shape[:2] if len(input_shape) == 3 else input_shape[1:3]
# Preprocess image
img = img.resize(size)
img = np.array(img)
# Add a batch dimension
input_data = np.expand_dims(img, axis=0)
# Point the data to be used for testing and run the interpreter
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
# Obtain results and map them to the classes
predictions = interpreter.get_tensor(output_details[0]['index'])[0]
# Get indices of the top k results
top_k_indices = np.argsort(predictions)[::-1][:top_k_results]
for i in range(top_k_results):
print(labels[top_k_indices[i]], predictions[top_k_indices[i]] / 255.0)
| 34.619718 | 91 | 0.767697 |
from tensorflow.lite.python.interpreter import Interpreter
import numpy as np
import argparse
from PIL import Image
parser = argparse.ArgumentParser(description='Image Classification')
parser.add_argument('--filename', type=str, help='Specify the filename', required=True)
parser.add_argument('--model_path', type=str, help='Specify the model path', required=True)
parser.add_argument('--label_path', type=str, help='Specify the label map', required=True)
parser.add_argument('--top_k', type=int, help='How many top results', default=3)
args = parser.parse_args()
filename = args.filename
model_path = args.model_path
label_path = args.label_path
top_k_results = args.top_k
with open(label_path, 'r') as f:
labels = list(map(str.strip, f.readlines()))
interpreter = Interpreter(model_path=model_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
img = Image.open(filename).convert('RGB')
input_shape = input_details[0]['shape']
size = input_shape[:2] if len(input_shape) == 3 else input_shape[1:3]
img = img.resize(size)
img = np.array(img)
input_data = np.expand_dims(img, axis=0)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
predictions = interpreter.get_tensor(output_details[0]['index'])[0]
top_k_indices = np.argsort(predictions)[::-1][:top_k_results]
for i in range(top_k_results):
print(labels[top_k_indices[i]], predictions[top_k_indices[i]] / 255.0)
| true | true |
f73086577458af1fa28ed63eff9c394107997af2 | 29,942 | py | Python | binance/websockets.py | pnpn521521/python-binance-with-futures-websocket | 483737fb364f4de07427df4c2b76e63561b40bbb | [
"MIT"
] | 17 | 2020-04-22T11:24:11.000Z | 2022-03-14T10:03:02.000Z | binance/websockets.py | pnpn521521/python-binance-with-futures-websocket | 483737fb364f4de07427df4c2b76e63561b40bbb | [
"MIT"
] | null | null | null | binance/websockets.py | pnpn521521/python-binance-with-futures-websocket | 483737fb364f4de07427df4c2b76e63561b40bbb | [
"MIT"
] | 7 | 2020-06-30T20:30:28.000Z | 2021-07-28T02:25:27.000Z | # coding=utf-8
import json
import threading
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
from twisted.internet import reactor, ssl
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.internet.error import ReactorAlreadyRunning
from binance.client import Client
class BinanceClientProtocol(WebSocketClientProtocol):
def __init__(self):
super(WebSocketClientProtocol, self).__init__()
def onConnect(self, response):
# reset the delay after reconnecting
self.factory.resetDelay()
def onMessage(self, payload, isBinary):
if not isBinary:
try:
payload_obj = json.loads(payload.decode('utf8'))
except ValueError:
pass
else:
self.factory.callback(payload_obj)
class BinanceReconnectingClientFactory(ReconnectingClientFactory):
# set initial delay to a short time
initialDelay = 0.1
maxDelay = 10
maxRetries = 5
class BinanceClientFactory(WebSocketClientFactory, BinanceReconnectingClientFactory):
protocol = BinanceClientProtocol
_reconnect_error_payload = {
'e': 'error',
'm': 'Max reconnect retries reached'
}
def clientConnectionFailed(self, connector, reason):
self.retry(connector)
if self.retries > self.maxRetries:
self.callback(self._reconnect_error_payload)
def clientConnectionLost(self, connector, reason):
self.retry(connector)
if self.retries > self.maxRetries:
self.callback(self._reconnect_error_payload)
class BinanceSocketManager(threading.Thread):
STREAM_URL = 'wss://stream.binance.com:9443/'
WEBSOCKET_DEPTH_5 = '5'
WEBSOCKET_DEPTH_10 = '10'
WEBSOCKET_DEPTH_20 = '20'
DEFAULT_USER_TIMEOUT = 30 * 60 # 30 minutes
def __init__(self, client, user_timeout=DEFAULT_USER_TIMEOUT):
"""Initialise the BinanceSocketManager
:param client: Binance API client
:type client: binance.Client
:param user_timeout: Custom websocket timeout
:type user_timeout: int
"""
threading.Thread.__init__(self)
self._conns = {}
self._client = client
self._user_timeout = user_timeout
self._timers = {'user': None, 'margin': None}
self._listen_keys = {'user': None, 'margin': None}
self._account_callbacks = {'user': None, 'margin': None}
def _start_socket(self, path, callback, prefix='ws/'):
if path in self._conns:
return False
factory_url = self.STREAM_URL + prefix + path
factory = BinanceClientFactory(factory_url)
factory.protocol = BinanceClientProtocol
factory.callback = callback
factory.reconnect = True
context_factory = ssl.ClientContextFactory()
self._conns[path] = connectWS(factory, context_factory)
return path
def start_depth_socket(self, symbol, callback, depth=None):
"""Start a websocket for symbol market depth returning either a diff or a partial book
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#partial-book-depth-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:param depth: optional Number of depth entries to return, default None. If passed returns a partial book instead of a diff
:type depth: str
:returns: connection key string if successful, False otherwise
Partial Message Format
.. code-block:: python
{
"lastUpdateId": 160, # Last update ID
"bids": [ # Bids to be updated
[
"0.0024", # price level to be updated
"10", # quantity
[] # ignore
]
],
"asks": [ # Asks to be updated
[
"0.0026", # price level to be updated
"100", # quantity
[] # ignore
]
]
}
Diff Message Format
.. code-block:: python
{
"e": "depthUpdate", # Event type
"E": 123456789, # Event time
"s": "BNBBTC", # Symbol
"U": 157, # First update ID in event
"u": 160, # Final update ID in event
"b": [ # Bids to be updated
[
"0.0024", # price level to be updated
"10", # quantity
[] # ignore
]
],
"a": [ # Asks to be updated
[
"0.0026", # price level to be updated
"100", # quantity
[] # ignore
]
]
}
"""
socket_name = symbol.lower() + '@depth'
if depth and depth != '1':
socket_name = '{}{}'.format(socket_name, depth)
return self._start_socket(socket_name, callback)
def start_kline_socket(self, symbol, callback, interval=Client.KLINE_INTERVAL_1MINUTE):
"""Start a websocket for symbol kline data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#klinecandlestick-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:param interval: Kline interval, default KLINE_INTERVAL_1MINUTE
:type interval: str
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "kline", # event type
"E": 1499404907056, # event time
"s": "ETHBTC", # symbol
"k": {
"t": 1499404860000, # start time of this bar
"T": 1499404919999, # end time of this bar
"s": "ETHBTC", # symbol
"i": "1m", # interval
"f": 77462, # first trade id
"L": 77465, # last trade id
"o": "0.10278577", # open
"c": "0.10278645", # close
"h": "0.10278712", # high
"l": "0.10278518", # low
"v": "17.47929838", # volume
"n": 4, # number of trades
"x": false, # whether this bar is final
"q": "1.79662878", # quote volume
"V": "2.34879839", # volume of active buy
"Q": "0.24142166", # quote volume of active buy
"B": "13279784.01349473" # can be ignored
}
}
"""
socket_name = '{}@kline_{}'.format(symbol.lower(), interval)
return self._start_socket(socket_name, callback)
def start_miniticker_socket(self, callback, update_time=1000):
"""Start a miniticker websocket for all trades
This is not in the official Binance api docs, but this is what
feeds the right column on a ticker page on Binance.
:param callback: callback function to handle messages
:type callback: function
:param update_time: time between callbacks in milliseconds, must be 1000 or greater
:type update_time: int
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
[
{
'e': '24hrMiniTicker', # Event type
'E': 1515906156273, # Event time
's': 'QTUMETH', # Symbol
'c': '0.03836900', # close
'o': '0.03953500', # open
'h': '0.04400000', # high
'l': '0.03756000', # low
'v': '147435.80000000', # volume
'q': '5903.84338533' # quote volume
}
]
"""
return self._start_socket('!miniTicker@arr@{}ms'.format(update_time), callback)
def start_trade_socket(self, symbol, callback):
"""Start a websocket for symbol trade data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#trade-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "trade", # Event type
"E": 123456789, # Event time
"s": "BNBBTC", # Symbol
"t": 12345, # Trade ID
"p": "0.001", # Price
"q": "100", # Quantity
"b": 88, # Buyer order Id
"a": 50, # Seller order Id
"T": 123456785, # Trade time
"m": true, # Is the buyer the market maker?
"M": true # Ignore.
}
"""
return self._start_socket(symbol.lower() + '@trade', callback)
def start_aggtrade_socket(self, symbol, callback):
"""Start a websocket for symbol trade data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#aggregate-trade-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "aggTrade", # event type
"E": 1499405254326, # event time
"s": "ETHBTC", # symbol
"a": 70232, # aggregated tradeid
"p": "0.10281118", # price
"q": "8.15632997", # quantity
"f": 77489, # first breakdown trade id
"l": 77489, # last breakdown trade id
"T": 1499405254324, # trade time
"m": false, # whether buyer is a maker
"M": true # can be ignored
}
"""
return self._start_socket(symbol.lower() + '@aggTrade', callback)
def start_symbol_ticker_socket(self, symbol, callback):
"""Start a websocket for a symbol's ticker data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#individual-symbol-ticker-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "24hrTicker", # Event type
"E": 123456789, # Event time
"s": "BNBBTC", # Symbol
"p": "0.0015", # Price change
"P": "250.00", # Price change percent
"w": "0.0018", # Weighted average price
"x": "0.0009", # Previous day's close price
"c": "0.0025", # Current day's close price
"Q": "10", # Close trade's quantity
"b": "0.0024", # Best bid price
"B": "10", # Bid bid quantity
"a": "0.0026", # Best ask price
"A": "100", # Best ask quantity
"o": "0.0010", # Open price
"h": "0.0025", # High price
"l": "0.0010", # Low price
"v": "10000", # Total traded base asset volume
"q": "18", # Total traded quote asset volume
"O": 0, # Statistics open time
"C": 86400000, # Statistics close time
"F": 0, # First trade ID
"L": 18150, # Last trade Id
"n": 18151 # Total number of trades
}
"""
return self._start_socket(symbol.lower() + '@ticker', callback)
def start_ticker_socket(self, callback):
"""Start a websocket for all ticker data
By default all markets are included in an array.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#all-market-tickers-stream
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
[
{
'F': 278610,
'o': '0.07393000',
's': 'BCCBTC',
'C': 1509622420916,
'b': '0.07800800',
'l': '0.07160300',
'h': '0.08199900',
'L': 287722,
'P': '6.694',
'Q': '0.10000000',
'q': '1202.67106335',
'p': '0.00494900',
'O': 1509536020916,
'a': '0.07887800',
'n': 9113,
'B': '1.00000000',
'c': '0.07887900',
'x': '0.07399600',
'w': '0.07639068',
'A': '2.41900000',
'v': '15743.68900000'
}
]
"""
return self._start_socket('!ticker@arr', callback)
def start_symbol_book_ticker_socket(self, symbol, callback):
"""Start a websocket for the best bid or ask's price or quantity for a specified symbol.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#individual-symbol-book-ticker-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"u":400900217, // order book updateId
"s":"BNBUSDT", // symbol
"b":"25.35190000", // best bid price
"B":"31.21000000", // best bid qty
"a":"25.36520000", // best ask price
"A":"40.66000000" // best ask qty
}
"""
return self._start_socket(symbol.lower() + '@bookTicker', callback)
def start_book_ticker_socket(self, callback):
"""Start a websocket for the best bid or ask's price or quantity for all symbols.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#all-book-tickers-stream
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
// Same as <symbol>@bookTicker payload
}
"""
return self._start_socket('!bookTicker', callback)
def start_multiplex_socket(self, streams, callback):
"""Start a multiplexed socket using a list of socket names.
User stream sockets can not be included.
Symbols in socket name must be lowercase i.e bnbbtc@aggTrade, neobtc@ticker
Combined stream events are wrapped as follows: {"stream":"<streamName>","data":<rawPayload>}
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md
:param streams: list of stream names in lower case
:type streams: list
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format - see Binance API docs for all types
"""
stream_path = 'streams={}'.format('/'.join(streams))
return self._start_socket(stream_path, callback, 'stream?')
def start_user_socket(self, callback):
"""Start a websocket for user data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/user-data-stream.md
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format - see Binance API docs for all types
"""
# Get the user listen key
user_listen_key = self._client.stream_get_listen_key()
# and start the socket with this specific key
return self._start_account_socket('user', user_listen_key, callback)
def start_margin_socket(self, callback):
"""Start a websocket for margin data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/user-data-stream.md
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format - see Binance API docs for all types
"""
# Get the user margin listen key
margin_listen_key = self._client.margin_stream_get_listen_key()
# and start the socket with this specific key
return self._start_account_socket('margin', margin_listen_key, callback)
def _start_account_socket(self, socket_type, listen_key, callback):
"""Starts one of user or margin socket"""
self._check_account_socket_open(listen_key)
self._listen_keys[socket_type] = listen_key
self._account_callbacks[socket_type] = callback
conn_key = self._start_socket(listen_key, callback)
if conn_key:
# start timer to keep socket alive
self._start_socket_timer(socket_type)
return conn_key
def _check_account_socket_open(self, listen_key):
if not listen_key:
return
for conn_key in self._conns:
if len(conn_key) >= 60 and conn_key[:60] == listen_key:
self.stop_socket(conn_key)
break
def _start_socket_timer(self, socket_type):
callback = self._keepalive_account_socket
self._timers[socket_type] = threading.Timer(self._user_timeout, callback, [socket_type])
self._timers[socket_type].setDaemon(True)
self._timers[socket_type].start()
def _keepalive_account_socket(self, socket_type):
if socket_type == 'user':
listen_key_func = self._client.stream_get_listen_key
callback = self._account_callbacks[socket_type]
else:
listen_key_func = self._client.margin_stream_get_listen_key
callback = self._account_callbacks[socket_type]
listen_key = listen_key_func()
if listen_key != self._listen_keys[socket_type]:
self._start_account_socket(socket_type, listen_key, callback)
def stop_socket(self, conn_key):
"""Stop a websocket given the connection key
:param conn_key: Socket connection key
:type conn_key: string
:returns: connection key string if successful, False otherwise
"""
if conn_key not in self._conns:
return
# disable reconnecting if we are closing
self._conns[conn_key].factory = WebSocketClientFactory(self.STREAM_URL + 'tmp_path')
self._conns[conn_key].disconnect()
del(self._conns[conn_key])
# check if we have a user stream socket
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys['user']:
self._stop_account_socket('user')
# or a margin stream socket
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys['margin']:
self._stop_account_socket('margin')
def _stop_account_socket(self, socket_type):
if not self._listen_keys[socket_type]:
return
if self._timers[socket_type]:
self._timers[socket_type].cancel()
self._timers[socket_type] = None
self._listen_keys[socket_type] = None
def run(self):
try:
reactor.run(installSignalHandlers=False)
except ReactorAlreadyRunning:
# Ignore error about reactor already running
pass
def close(self):
"""Close all connections
"""
keys = set(self._conns.keys())
for key in keys:
self.stop_socket(key)
self._conns = {}
class BinanceFuturesSocketManager(threading.Thread):
STREAM_URL = "wss://fstream.binance.com/"
WEBSOCKET_DEPTH_5 = "5"
WEBSOCKET_DEPTH_10 = "10"
WEBSOCKET_DEPTH_20 = "20"
DEFAULT_USER_TIMEOUT = 30 * 60 # 30 minutes
def __init__(self, client, user_timeout=DEFAULT_USER_TIMEOUT):
"""Initialise the BinanceSocketManager
:param client: Binance API client
:type client: binance.Client
:param user_timeout: Custom websocket timeout
:type user_timeout: int
"""
threading.Thread.__init__(self)
self._conns = {}
self._client = client
self._user_timeout = user_timeout
self._timers = {"user": None, "margin": None, "futures": None}
self._listen_keys = {"user": None, "margin": None, "futures": None}
self._account_callbacks = {"user": None, "margin": None, "futures": None}
def _start_socket(self, path, callback, prefix="ws/"):
if path in self._conns:
return False
factory_url = self.STREAM_URL + prefix + path
factory = BinanceClientFactory(factory_url)
factory.protocol = BinanceClientProtocol
factory.callback = callback
factory.reconnect = True
context_factory = ssl.ClientContextFactory()
self._conns[path] = connectWS(factory, context_factory)
return path
def start_futures_aggtrade_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@aggTrade", callback)
def start_futures_symbol_markprice_socket(self, symbol, callback, update_time=None):
socket_name = symbol.lower() + "@markPrice"
if update_time:
socket_name = "{}@{}s".format(socket_name, update_time)
return self._start_socket(socket_name, callback)
def start_futures_markprice_socket(self, callback, update_time=None):
socket_name = "!markPrice@arr"
if update_time:
socket_name = "{}@{}s".format(socket_name, update_time)
return self._start_socket(socket_name, callback)
def start_futures_kline_socket(
self, symbol, callback, interval=Client.KLINE_INTERVAL_1MINUTE
):
socket_name = "{}@kline_{}".format(symbol.lower(), interval)
return self._start_socket(socket_name, callback)
def start_futures_symbol_miniticker_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@miniTicker", callback)
def start_futures_miniticker_socket(self, callback):
return self._start_socket("!miniTicker@arr", callback)
def start_futures_symbol_ticker_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@ticker", callback)
def start_futures_ticker_socket(self, callback):
return self._start_socket("!ticker@arr", callback)
def start_futures_symbol_book_ticker_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@bookTicker", callback)
def startt_futures_book_ticker_socket(self, callback):
return self._start_socket("!bookTicker", callback)
def start_futures_symbol_force_order_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@forceOrder", callback)
def start_futures_force_order_socket(self, callback):
return self._start_socket("!forceOrder@arr", callback)
def start_futures_depth_socket(
self, symbol, callback, depth=None, update_time=None
):
socket_name = symbol.lower() + "@depth"
if depth:
socket_name = "{}{}".format(socket_name, depth)
if update_time:
socket_name = "{}@{}ms".format(socket_name, update_time)
return self._start_socket(socket_name, callback)
def start_futures_multiplex_socket(self, streams, callback):
"""Start a multiplexed socket using a list of socket names.
User stream sockets can not be included.
Symbols in socket name must be lowercase i.e bnbbtc@aggTrade, neobtc@ticker
Combined stream events are wrapped as follows: {"stream":"<streamName>","data":<rawPayload>}
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md
:param streams: list of stream names in lower case
:type streams: list
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format - see Binance API docs for all types
"""
stream_path = "streams={}".format("/".join(streams))
return self._start_socket(stream_path, callback, "stream?")
def start_futures_socket(self, callback):
# Get the user listen key
futures_listen_key = self._client.futures_stream_get_listen_key()
# and start the socket with this specific key
return self._start_account_socket("futures", futures_listen_key, callback)
def _start_account_socket(self, socket_type, listen_key, callback):
"""Starts one of user or margin socket"""
self._check_account_socket_open(listen_key)
self._listen_keys[socket_type] = listen_key
self._account_callbacks[socket_type] = callback
conn_key = self._start_socket(listen_key, callback)
if conn_key:
# start timer to keep socket alive
self._start_socket_timer(socket_type)
return conn_key
def _check_account_socket_open(self, listen_key):
if not listen_key:
return
for conn_key in self._conns:
if len(conn_key) >= 60 and conn_key[:60] == listen_key:
self.stop_socket(conn_key)
break
def _start_socket_timer(self, socket_type):
callback = self._keepalive_account_socket
self._timers[socket_type] = threading.Timer(
self._user_timeout, callback, [socket_type]
)
self._timers[socket_type].setDaemon(True)
self._timers[socket_type].start()
def _keepalive_account_socket(self, socket_type):
if socket_type == "user":
listen_key_func = self._client.stream_get_listen_key
callback = self._account_callbacks[socket_type]
elif socket_type == "margin":
listen_key_func = self._client.margin_stream_get_listen_key
callback = self._account_callbacks[socket_type]
else:
listen_key_func = self._client.futures_stream_get_listen_key
callback = self._account_callbacks[socket_type]
listen_key = listen_key_func()
if listen_key != self._listen_keys[socket_type]:
self._start_account_socket(socket_type, listen_key, callback)
def stop_socket(self, conn_key):
"""Stop a websocket given the connection key
:param conn_key: Socket connection key
:type conn_key: string
:returns: connection key string if successful, False otherwise
"""
if conn_key not in self._conns:
return
# disable reconnecting if we are closing
self._conns[conn_key].factory = WebSocketClientFactory(
self.STREAM_URL + "tmp_path"
)
self._conns[conn_key].disconnect()
del self._conns[conn_key]
# check if we have a user stream socket
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys["user"]:
self._stop_account_socket("user")
# or a margin stream socket
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys["margin"]:
self._stop_account_socket("margin")
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys["futures"]:
self._stop_account_socket("futures")
def _stop_account_socket(self, socket_type):
if not self._listen_keys[socket_type]:
return
if self._timers[socket_type]:
self._timers[socket_type].cancel()
self._timers[socket_type] = None
self._listen_keys[socket_type] = None
def run(self):
try:
reactor.run(installSignalHandlers=False)
except ReactorAlreadyRunning:
# Ignore error about reactor already running
pass
def close(self):
"""Close all connections
"""
keys = set(self._conns.keys())
for key in keys:
self.stop_socket(key)
self._conns = {} | 36.514634 | 141 | 0.583962 |
import json
import threading
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
from twisted.internet import reactor, ssl
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.internet.error import ReactorAlreadyRunning
from binance.client import Client
class BinanceClientProtocol(WebSocketClientProtocol):
def __init__(self):
super(WebSocketClientProtocol, self).__init__()
def onConnect(self, response):
self.factory.resetDelay()
def onMessage(self, payload, isBinary):
if not isBinary:
try:
payload_obj = json.loads(payload.decode('utf8'))
except ValueError:
pass
else:
self.factory.callback(payload_obj)
class BinanceReconnectingClientFactory(ReconnectingClientFactory):
initialDelay = 0.1
maxDelay = 10
maxRetries = 5
class BinanceClientFactory(WebSocketClientFactory, BinanceReconnectingClientFactory):
protocol = BinanceClientProtocol
_reconnect_error_payload = {
'e': 'error',
'm': 'Max reconnect retries reached'
}
def clientConnectionFailed(self, connector, reason):
self.retry(connector)
if self.retries > self.maxRetries:
self.callback(self._reconnect_error_payload)
def clientConnectionLost(self, connector, reason):
self.retry(connector)
if self.retries > self.maxRetries:
self.callback(self._reconnect_error_payload)
class BinanceSocketManager(threading.Thread):
STREAM_URL = 'wss://stream.binance.com:9443/'
WEBSOCKET_DEPTH_5 = '5'
WEBSOCKET_DEPTH_10 = '10'
WEBSOCKET_DEPTH_20 = '20'
DEFAULT_USER_TIMEOUT = 30 * 60
def __init__(self, client, user_timeout=DEFAULT_USER_TIMEOUT):
threading.Thread.__init__(self)
self._conns = {}
self._client = client
self._user_timeout = user_timeout
self._timers = {'user': None, 'margin': None}
self._listen_keys = {'user': None, 'margin': None}
self._account_callbacks = {'user': None, 'margin': None}
def _start_socket(self, path, callback, prefix='ws/'):
if path in self._conns:
return False
factory_url = self.STREAM_URL + prefix + path
factory = BinanceClientFactory(factory_url)
factory.protocol = BinanceClientProtocol
factory.callback = callback
factory.reconnect = True
context_factory = ssl.ClientContextFactory()
self._conns[path] = connectWS(factory, context_factory)
return path
def start_depth_socket(self, symbol, callback, depth=None):
socket_name = symbol.lower() + '@depth'
if depth and depth != '1':
socket_name = '{}{}'.format(socket_name, depth)
return self._start_socket(socket_name, callback)
def start_kline_socket(self, symbol, callback, interval=Client.KLINE_INTERVAL_1MINUTE):
socket_name = '{}@kline_{}'.format(symbol.lower(), interval)
return self._start_socket(socket_name, callback)
def start_miniticker_socket(self, callback, update_time=1000):
return self._start_socket('!miniTicker@arr@{}ms'.format(update_time), callback)
def start_trade_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + '@trade', callback)
def start_aggtrade_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + '@aggTrade', callback)
def start_symbol_ticker_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + '@ticker', callback)
def start_ticker_socket(self, callback):
return self._start_socket('!ticker@arr', callback)
def start_symbol_book_ticker_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + '@bookTicker', callback)
def start_book_ticker_socket(self, callback):
return self._start_socket('!bookTicker', callback)
def start_multiplex_socket(self, streams, callback):
stream_path = 'streams={}'.format('/'.join(streams))
return self._start_socket(stream_path, callback, 'stream?')
def start_user_socket(self, callback):
user_listen_key = self._client.stream_get_listen_key()
return self._start_account_socket('user', user_listen_key, callback)
def start_margin_socket(self, callback):
margin_listen_key = self._client.margin_stream_get_listen_key()
return self._start_account_socket('margin', margin_listen_key, callback)
def _start_account_socket(self, socket_type, listen_key, callback):
self._check_account_socket_open(listen_key)
self._listen_keys[socket_type] = listen_key
self._account_callbacks[socket_type] = callback
conn_key = self._start_socket(listen_key, callback)
if conn_key:
self._start_socket_timer(socket_type)
return conn_key
def _check_account_socket_open(self, listen_key):
if not listen_key:
return
for conn_key in self._conns:
if len(conn_key) >= 60 and conn_key[:60] == listen_key:
self.stop_socket(conn_key)
break
def _start_socket_timer(self, socket_type):
callback = self._keepalive_account_socket
self._timers[socket_type] = threading.Timer(self._user_timeout, callback, [socket_type])
self._timers[socket_type].setDaemon(True)
self._timers[socket_type].start()
def _keepalive_account_socket(self, socket_type):
if socket_type == 'user':
listen_key_func = self._client.stream_get_listen_key
callback = self._account_callbacks[socket_type]
else:
listen_key_func = self._client.margin_stream_get_listen_key
callback = self._account_callbacks[socket_type]
listen_key = listen_key_func()
if listen_key != self._listen_keys[socket_type]:
self._start_account_socket(socket_type, listen_key, callback)
def stop_socket(self, conn_key):
if conn_key not in self._conns:
return
self._conns[conn_key].factory = WebSocketClientFactory(self.STREAM_URL + 'tmp_path')
self._conns[conn_key].disconnect()
del(self._conns[conn_key])
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys['user']:
self._stop_account_socket('user')
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys['margin']:
self._stop_account_socket('margin')
def _stop_account_socket(self, socket_type):
if not self._listen_keys[socket_type]:
return
if self._timers[socket_type]:
self._timers[socket_type].cancel()
self._timers[socket_type] = None
self._listen_keys[socket_type] = None
def run(self):
try:
reactor.run(installSignalHandlers=False)
except ReactorAlreadyRunning:
pass
def close(self):
keys = set(self._conns.keys())
for key in keys:
self.stop_socket(key)
self._conns = {}
class BinanceFuturesSocketManager(threading.Thread):
STREAM_URL = "wss://fstream.binance.com/"
WEBSOCKET_DEPTH_5 = "5"
WEBSOCKET_DEPTH_10 = "10"
WEBSOCKET_DEPTH_20 = "20"
DEFAULT_USER_TIMEOUT = 30 * 60
def __init__(self, client, user_timeout=DEFAULT_USER_TIMEOUT):
threading.Thread.__init__(self)
self._conns = {}
self._client = client
self._user_timeout = user_timeout
self._timers = {"user": None, "margin": None, "futures": None}
self._listen_keys = {"user": None, "margin": None, "futures": None}
self._account_callbacks = {"user": None, "margin": None, "futures": None}
def _start_socket(self, path, callback, prefix="ws/"):
if path in self._conns:
return False
factory_url = self.STREAM_URL + prefix + path
factory = BinanceClientFactory(factory_url)
factory.protocol = BinanceClientProtocol
factory.callback = callback
factory.reconnect = True
context_factory = ssl.ClientContextFactory()
self._conns[path] = connectWS(factory, context_factory)
return path
def start_futures_aggtrade_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@aggTrade", callback)
def start_futures_symbol_markprice_socket(self, symbol, callback, update_time=None):
socket_name = symbol.lower() + "@markPrice"
if update_time:
socket_name = "{}@{}s".format(socket_name, update_time)
return self._start_socket(socket_name, callback)
def start_futures_markprice_socket(self, callback, update_time=None):
socket_name = "!markPrice@arr"
if update_time:
socket_name = "{}@{}s".format(socket_name, update_time)
return self._start_socket(socket_name, callback)
def start_futures_kline_socket(
self, symbol, callback, interval=Client.KLINE_INTERVAL_1MINUTE
):
socket_name = "{}@kline_{}".format(symbol.lower(), interval)
return self._start_socket(socket_name, callback)
def start_futures_symbol_miniticker_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@miniTicker", callback)
def start_futures_miniticker_socket(self, callback):
return self._start_socket("!miniTicker@arr", callback)
def start_futures_symbol_ticker_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@ticker", callback)
def start_futures_ticker_socket(self, callback):
return self._start_socket("!ticker@arr", callback)
def start_futures_symbol_book_ticker_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@bookTicker", callback)
def startt_futures_book_ticker_socket(self, callback):
return self._start_socket("!bookTicker", callback)
def start_futures_symbol_force_order_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@forceOrder", callback)
def start_futures_force_order_socket(self, callback):
return self._start_socket("!forceOrder@arr", callback)
def start_futures_depth_socket(
self, symbol, callback, depth=None, update_time=None
):
socket_name = symbol.lower() + "@depth"
if depth:
socket_name = "{}{}".format(socket_name, depth)
if update_time:
socket_name = "{}@{}ms".format(socket_name, update_time)
return self._start_socket(socket_name, callback)
def start_futures_multiplex_socket(self, streams, callback):
stream_path = "streams={}".format("/".join(streams))
return self._start_socket(stream_path, callback, "stream?")
def start_futures_socket(self, callback):
futures_listen_key = self._client.futures_stream_get_listen_key()
return self._start_account_socket("futures", futures_listen_key, callback)
def _start_account_socket(self, socket_type, listen_key, callback):
self._check_account_socket_open(listen_key)
self._listen_keys[socket_type] = listen_key
self._account_callbacks[socket_type] = callback
conn_key = self._start_socket(listen_key, callback)
if conn_key:
self._start_socket_timer(socket_type)
return conn_key
def _check_account_socket_open(self, listen_key):
if not listen_key:
return
for conn_key in self._conns:
if len(conn_key) >= 60 and conn_key[:60] == listen_key:
self.stop_socket(conn_key)
break
def _start_socket_timer(self, socket_type):
callback = self._keepalive_account_socket
self._timers[socket_type] = threading.Timer(
self._user_timeout, callback, [socket_type]
)
self._timers[socket_type].setDaemon(True)
self._timers[socket_type].start()
def _keepalive_account_socket(self, socket_type):
if socket_type == "user":
listen_key_func = self._client.stream_get_listen_key
callback = self._account_callbacks[socket_type]
elif socket_type == "margin":
listen_key_func = self._client.margin_stream_get_listen_key
callback = self._account_callbacks[socket_type]
else:
listen_key_func = self._client.futures_stream_get_listen_key
callback = self._account_callbacks[socket_type]
listen_key = listen_key_func()
if listen_key != self._listen_keys[socket_type]:
self._start_account_socket(socket_type, listen_key, callback)
def stop_socket(self, conn_key):
if conn_key not in self._conns:
return
self._conns[conn_key].factory = WebSocketClientFactory(
self.STREAM_URL + "tmp_path"
)
self._conns[conn_key].disconnect()
del self._conns[conn_key]
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys["user"]:
self._stop_account_socket("user")
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys["margin"]:
self._stop_account_socket("margin")
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys["futures"]:
self._stop_account_socket("futures")
def _stop_account_socket(self, socket_type):
if not self._listen_keys[socket_type]:
return
if self._timers[socket_type]:
self._timers[socket_type].cancel()
self._timers[socket_type] = None
self._listen_keys[socket_type] = None
def run(self):
try:
reactor.run(installSignalHandlers=False)
except ReactorAlreadyRunning:
pass
def close(self):
keys = set(self._conns.keys())
for key in keys:
self.stop_socket(key)
self._conns = {} | true | true |
f7308663c6efc7fb20c889452d367dd74dc0ad2d | 5,218 | py | Python | mantrid/cli.py | epio/mantrid | 1c699f1a4b33888b533c19cb6d025173f2160576 | [
"BSD-3-Clause"
] | 30 | 2015-01-01T00:32:47.000Z | 2021-09-07T20:25:01.000Z | mantrid/cli.py | epio/mantrid | 1c699f1a4b33888b533c19cb6d025173f2160576 | [
"BSD-3-Clause"
] | null | null | null | mantrid/cli.py | epio/mantrid | 1c699f1a4b33888b533c19cb6d025173f2160576 | [
"BSD-3-Clause"
] | 9 | 2015-05-12T05:09:12.000Z | 2021-12-29T19:07:01.000Z | import sys
from .client import MantridClient
class MantridCli(object):
"""Command line interface to Mantrid"""
def __init__(self, base_url):
self.client = MantridClient(base_url)
@classmethod
def main(cls):
cli = cls("http://localhost:8042")
cli.run(sys.argv)
@property
def action_names(self):
for method_name in dir(self):
if method_name.startswith("action_") \
and method_name != "action_names":
yield method_name[7:]
def run(self, argv):
# Work out what action we're doing
try:
action = argv[1]
except IndexError:
sys.stderr.write(
"Please provide an action (%s).\n" % (
", ".join(self.action_names),
)
)
sys.exit(1)
if action not in list(self.action_names):
sys.stderr.write(
"Action %s does not exist.\n" % (
action,
)
)
sys.exit(1)
# Run it
getattr(self, "action_%s" % action)(*argv[2:])
def action_list(self):
"Lists all hosts on the LB"
format = "%-35s %-25s %-8s"
print format % ("HOST", "ACTION", "SUBDOMS")
for host, details in sorted(self.client.get_all().items()):
if details[0] in ("proxy", "mirror"):
action = "%s<%s>" % (
details[0],
",".join(
"%s:%s" % (host, port)
for host, port in details[1]['backends']
)
)
elif details[0] == "static":
action = "%s<%s>" % (
details[0],
details[1]['type'],
)
elif details[0] == "redirect":
action = "%s<%s>" % (
details[0],
details[1]['redirect_to'],
)
elif details[0] == "empty":
action = "%s<%s>" % (
details[0],
details[1]['code'],
)
else:
action = details[0]
print format % (host, action, details[2])
def action_set(self, hostname=None, action=None, subdoms=None, *args):
"Adds a hostname to the LB, or alters an existing one"
usage = "set <hostname> <action> <subdoms> [option=value, ...]"
if hostname is None:
sys.stderr.write("You must supply a hostname.\n")
sys.stderr.write("Usage: %s\n" % usage)
sys.exit(1)
if action is None:
sys.stderr.write("You must supply an action.\n")
sys.stderr.write("Usage: %s\n" % usage)
sys.exit(1)
if subdoms is None or subdoms.lower() not in ("true", "false"):
sys.stderr.write("You must supply True or False for the subdomains flag.\n")
sys.stderr.write("Usage: %s\n" % usage)
sys.exit(1)
# Grab options
options = {}
for arg in args:
if "=" not in arg:
sys.stderr.write("%s is not a valid option (no =)\n" % (
arg
))
sys.exit(1)
key, value = arg.split("=", 1)
options[key] = value
# Sanity-check options
if action in ("proxy, mirror") and "backends" not in options:
sys.stderr.write("The %s action requires a backends option.\n" % action)
sys.exit(1)
if action == "static" and "type" not in options:
sys.stderr.write("The %s action requires a type option.\n" % action)
sys.exit(1)
if action == "redirect" and "redirect_to" not in options:
sys.stderr.write("The %s action requires a redirect_to option.\n" % action)
sys.exit(1)
if action == "empty" and "code" not in options:
sys.stderr.write("The %s action requires a code option.\n" % action)
sys.exit(1)
# Expand some options from text to datastructure
if "backends" in options:
options['backends'] = [
(lambda x: (x[0], int(x[1])))(bit.split(":", 1))
for bit in options['backends'].split(",")
]
# Set!
self.client.set(
hostname,
[action, options, subdoms.lower() == "true"]
)
def action_delete(self, hostname):
"Deletes the hostname from the LB."
self.client.delete(
hostname,
)
def action_stats(self, hostname=None):
"Shows stats (possibly limited by hostname)"
format = "%-35s %-11s %-11s %-11s %-11s"
print format % ("HOST", "OPEN", "COMPLETED", "BYTES IN", "BYTES OUT")
for host, details in sorted(self.client.stats(hostname).items()):
print format % (
host,
details.get("open_requests", 0),
details.get("completed_requests", 0),
details.get("bytes_received", 0),
details.get("bytes_sent", 0),
)
| 36.236111 | 88 | 0.478153 | import sys
from .client import MantridClient
class MantridCli(object):
"""Command line interface to Mantrid"""
def __init__(self, base_url):
self.client = MantridClient(base_url)
@classmethod
def main(cls):
cli = cls("http://localhost:8042")
cli.run(sys.argv)
@property
def action_names(self):
for method_name in dir(self):
if method_name.startswith("action_") \
and method_name != "action_names":
yield method_name[7:]
def run(self, argv):
try:
action = argv[1]
except IndexError:
sys.stderr.write(
"Please provide an action (%s).\n" % (
", ".join(self.action_names),
)
)
sys.exit(1)
if action not in list(self.action_names):
sys.stderr.write(
"Action %s does not exist.\n" % (
action,
)
)
sys.exit(1)
# Run it
getattr(self, "action_%s" % action)(*argv[2:])
def action_list(self):
"Lists all hosts on the LB"
format = "%-35s %-25s %-8s"
print format % ("HOST", "ACTION", "SUBDOMS")
for host, details in sorted(self.client.get_all().items()):
if details[0] in ("proxy", "mirror"):
action = "%s<%s>" % (
details[0],
",".join(
"%s:%s" % (host, port)
for host, port in details[1]['backends']
)
)
elif details[0] == "static":
action = "%s<%s>" % (
details[0],
details[1]['type'],
)
elif details[0] == "redirect":
action = "%s<%s>" % (
details[0],
details[1]['redirect_to'],
)
elif details[0] == "empty":
action = "%s<%s>" % (
details[0],
details[1]['code'],
)
else:
action = details[0]
print format % (host, action, details[2])
def action_set(self, hostname=None, action=None, subdoms=None, *args):
"Adds a hostname to the LB, or alters an existing one"
usage = "set <hostname> <action> <subdoms> [option=value, ...]"
if hostname is None:
sys.stderr.write("You must supply a hostname.\n")
sys.stderr.write("Usage: %s\n" % usage)
sys.exit(1)
if action is None:
sys.stderr.write("You must supply an action.\n")
sys.stderr.write("Usage: %s\n" % usage)
sys.exit(1)
if subdoms is None or subdoms.lower() not in ("true", "false"):
sys.stderr.write("You must supply True or False for the subdomains flag.\n")
sys.stderr.write("Usage: %s\n" % usage)
sys.exit(1)
# Grab options
options = {}
for arg in args:
if "=" not in arg:
sys.stderr.write("%s is not a valid option (no =)\n" % (
arg
))
sys.exit(1)
key, value = arg.split("=", 1)
options[key] = value
# Sanity-check options
if action in ("proxy, mirror") and "backends" not in options:
sys.stderr.write("The %s action requires a backends option.\n" % action)
sys.exit(1)
if action == "static" and "type" not in options:
sys.stderr.write("The %s action requires a type option.\n" % action)
sys.exit(1)
if action == "redirect" and "redirect_to" not in options:
sys.stderr.write("The %s action requires a redirect_to option.\n" % action)
sys.exit(1)
if action == "empty" and "code" not in options:
sys.stderr.write("The %s action requires a code option.\n" % action)
sys.exit(1)
# Expand some options from text to datastructure
if "backends" in options:
options['backends'] = [
(lambda x: (x[0], int(x[1])))(bit.split(":", 1))
for bit in options['backends'].split(",")
]
# Set!
self.client.set(
hostname,
[action, options, subdoms.lower() == "true"]
)
def action_delete(self, hostname):
"Deletes the hostname from the LB."
self.client.delete(
hostname,
)
def action_stats(self, hostname=None):
"Shows stats (possibly limited by hostname)"
format = "%-35s %-11s %-11s %-11s %-11s"
print format % ("HOST", "OPEN", "COMPLETED", "BYTES IN", "BYTES OUT")
for host, details in sorted(self.client.stats(hostname).items()):
print format % (
host,
details.get("open_requests", 0),
details.get("completed_requests", 0),
details.get("bytes_received", 0),
details.get("bytes_sent", 0),
)
| false | true |
f73087aa81776159702022d95866d5b4fbe8cb4f | 3,942 | py | Python | level3/usb/decrypt.py | fishilico/sstic-2016 | 9a05bb18df4c8d2e76f1e30fda6b38b1bc930e8c | [
"Beerware"
] | null | null | null | level3/usb/decrypt.py | fishilico/sstic-2016 | 9a05bb18df4c8d2e76f1e30fda6b38b1bc930e8c | [
"Beerware"
] | null | null | null | level3/usb/decrypt.py | fishilico/sstic-2016 | 9a05bb18df4c8d2e76f1e30fda6b38b1bc930e8c | [
"Beerware"
] | 1 | 2020-04-03T06:19:11.000Z | 2020-04-03T06:19:11.000Z | #!/usr/bin/env python3
import binascii
import hashlib
import os.path
import struct
import sys
sys.path.insert(0, os.path.dirname(__file__))
import rc4
# Helper functions
def rol32(val, shift):
val = val & 0xffffffff
shift = shift & 0x1f
if not shift:
return val
return ((val << shift) & 0xffffffff) | (val >> (32 - shift))
def ror32(val, shift):
return rol32(val, 32 - shift)
# Load encrypted data
with open('extracted_encrypted_data.bin', 'rb') as f:
all_data = f.read()
enc_data = all_data[0x40:]
def rc6_decrypt(ks, enc_block):
"""https://en.wikipedia.org/wiki/RC6"""
a, b, c, d = enc_block
a -= ks[0xa8 // 4]
c -= ks[0xac // 4]
for iround in range(19, -1, -1):
a, b, c, d = [x & 0xffffffff for x in (d, a, b, c)]
u = ror32(d * (2 * d + 1), 5)
t = ror32(b * (2 * b + 1), 5)
c = rol32(c - ks[2 * iround + 3], t) ^ u
a = rol32(a - ks[2 * iround + 2], u) ^ t
d = (d - ks[1]) & 0xffffffff
b = (b - ks[0]) & 0xffffffff
return a, b, c, d
# TODO: key derivation with key "551C2016B00B5F00"
key_state = [
0x2129ab75, 0x975374c8, 0x5eead5ac, 0x2c8b312f,
0xfd0a1322, 0x80d0133c, 0x16a849c2, 0x42064c4a,
0x75fe77f5, 0x4ddaf4d7, 0xe9221458, 0x46a97a25,
0xfea74495, 0xe119d517, 0x055f2605, 0xc6706c81,
0x4d966822, 0xadc3e831, 0x68c68bdf, 0xfcb57dac,
0x7df33f01, 0xefb6081f, 0x98eb29eb, 0x668352b7,
0x98a1545b, 0x0a3e64cd, 0x9b16a929, 0x2233c1c4,
0x7879ec25, 0x17c4466a, 0x6e0b37ea, 0xde30ebb2,
0x01ef095c, 0x35fbdb33, 0xa97b35b7, 0xdfbf652c,
0xaf668798, 0xb7846548, 0xafd8706a, 0x2d346ced,
0xbb33dfe3, 0xae79adfc, 0xc3115146, 0x05a51471,
]
# Decrypt with RC6-CBC with 128-bit blocks (4 32-bit numbers)
iv = [0, 0, 0, 0]
dec_data = bytearray(len(enc_data))
for blkoffset in range(0, len(enc_data), 16):
enc_block = struct.unpack('<IIII', enc_data[blkoffset:blkoffset + 16])
dec_block = rc6_decrypt(key_state, enc_block)
dec_block = [i ^ d for i, d in zip(iv, dec_block)]
dec_data[blkoffset:blkoffset + 16] = struct.pack('<IIII', *dec_block)
iv = enc_block
# dec_data contains chunks
offset = 0
chunk_index = 0
while offset < len(dec_data):
chunck_length = struct.unpack('<I', dec_data[offset:offset + 4])[0]
rc4_key = dec_data[offset + 4:offset + 0x14]
payload_md5 = dec_data[offset + 0x14:offset + 0x24]
enc_payload = dec_data[offset + 0x24:offset + 0x24 + chunck_length]
print("Chunk {} at {:#x}: {:#x} bytes".format(chunk_index, offset, chunck_length))
if chunck_length == 0:
break
keystream = rc4.RC4(rc4_key)
dec_payload = bytearray(e ^ k for e, k in zip(enc_payload, keystream))
with open('decrypted_chunk_{}.bin'.format(chunk_index), 'wb') as f:
f.write(dec_payload)
print(" {}".format(binascii.hexlify(payload_md5).decode('ascii')))
print(" {}".format(hashlib.md5(dec_payload).hexdigest()))
assert payload_md5 == hashlib.md5(dec_payload).digest()
offset += 0x24 + chunck_length
chunk_index += 1
"""
Chunk 0 at 0x0: 0x39 bytes
a83bd78eaf49903dfd64447fcd35831a
a83bd78eaf49903dfd64447fcd35831a
Chunk 1 at 0x5d: 0xc15 bytes
ad2713a0668ac3f421a00b7b21430b4f
ad2713a0668ac3f421a00b7b21430b4f
Chunk 2 at 0xc96: 0x34631 bytes
671d51af77f541605ea91e81e8dc70f0
671d51af77f541605ea91e81e8dc70f0
Chunk 3 at 0x352eb: 0x1b234 bytes
8ff9f891acf83a5ee95f69084b4d48d2
8ff9f891acf83a5ee95f69084b4d48d2
Chunk 4 at 0x50543: 0xfbe0 bytes
c4e5abbc8c4ddff3853db0fcb9eb55ff
c4e5abbc8c4ddff3853db0fcb9eb55ff
Chunk 5 at 0x60147: 0xb9f7 bytes
0cb3389fedc86b4ff4a86db0b492b273
0cb3389fedc86b4ff4a86db0b492b273
Chunk 6 at 0x6bb62: 0x83d5 bytes
03d5e4c549945d4ac5b1e3b973606d61
03d5e4c549945d4ac5b1e3b973606d61
Chunk 7 at 0x73f5b: 0x12500a bytes
581ae98e6119f7672ba38c74b1c427ce
581ae98e6119f7672ba38c74b1c427ce
Chunk 8 at 0x198f89: 0x0 bytes
"""
| 32.04878 | 86 | 0.691781 |
import binascii
import hashlib
import os.path
import struct
import sys
sys.path.insert(0, os.path.dirname(__file__))
import rc4
def rol32(val, shift):
val = val & 0xffffffff
shift = shift & 0x1f
if not shift:
return val
return ((val << shift) & 0xffffffff) | (val >> (32 - shift))
def ror32(val, shift):
return rol32(val, 32 - shift)
with open('extracted_encrypted_data.bin', 'rb') as f:
all_data = f.read()
enc_data = all_data[0x40:]
def rc6_decrypt(ks, enc_block):
a, b, c, d = enc_block
a -= ks[0xa8 // 4]
c -= ks[0xac // 4]
for iround in range(19, -1, -1):
a, b, c, d = [x & 0xffffffff for x in (d, a, b, c)]
u = ror32(d * (2 * d + 1), 5)
t = ror32(b * (2 * b + 1), 5)
c = rol32(c - ks[2 * iround + 3], t) ^ u
a = rol32(a - ks[2 * iround + 2], u) ^ t
d = (d - ks[1]) & 0xffffffff
b = (b - ks[0]) & 0xffffffff
return a, b, c, d
key_state = [
0x2129ab75, 0x975374c8, 0x5eead5ac, 0x2c8b312f,
0xfd0a1322, 0x80d0133c, 0x16a849c2, 0x42064c4a,
0x75fe77f5, 0x4ddaf4d7, 0xe9221458, 0x46a97a25,
0xfea74495, 0xe119d517, 0x055f2605, 0xc6706c81,
0x4d966822, 0xadc3e831, 0x68c68bdf, 0xfcb57dac,
0x7df33f01, 0xefb6081f, 0x98eb29eb, 0x668352b7,
0x98a1545b, 0x0a3e64cd, 0x9b16a929, 0x2233c1c4,
0x7879ec25, 0x17c4466a, 0x6e0b37ea, 0xde30ebb2,
0x01ef095c, 0x35fbdb33, 0xa97b35b7, 0xdfbf652c,
0xaf668798, 0xb7846548, 0xafd8706a, 0x2d346ced,
0xbb33dfe3, 0xae79adfc, 0xc3115146, 0x05a51471,
]
iv = [0, 0, 0, 0]
dec_data = bytearray(len(enc_data))
for blkoffset in range(0, len(enc_data), 16):
enc_block = struct.unpack('<IIII', enc_data[blkoffset:blkoffset + 16])
dec_block = rc6_decrypt(key_state, enc_block)
dec_block = [i ^ d for i, d in zip(iv, dec_block)]
dec_data[blkoffset:blkoffset + 16] = struct.pack('<IIII', *dec_block)
iv = enc_block
offset = 0
chunk_index = 0
while offset < len(dec_data):
chunck_length = struct.unpack('<I', dec_data[offset:offset + 4])[0]
rc4_key = dec_data[offset + 4:offset + 0x14]
payload_md5 = dec_data[offset + 0x14:offset + 0x24]
enc_payload = dec_data[offset + 0x24:offset + 0x24 + chunck_length]
print("Chunk {} at {:#x}: {:#x} bytes".format(chunk_index, offset, chunck_length))
if chunck_length == 0:
break
keystream = rc4.RC4(rc4_key)
dec_payload = bytearray(e ^ k for e, k in zip(enc_payload, keystream))
with open('decrypted_chunk_{}.bin'.format(chunk_index), 'wb') as f:
f.write(dec_payload)
print(" {}".format(binascii.hexlify(payload_md5).decode('ascii')))
print(" {}".format(hashlib.md5(dec_payload).hexdigest()))
assert payload_md5 == hashlib.md5(dec_payload).digest()
offset += 0x24 + chunck_length
chunk_index += 1
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.