hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7305a7160e771c29930fa09a63b5a4213102df1 | 2,604 | py | Python | PyDictAPI/__init__.py | imshawan/PyDictAPI | 937001ce896b634132e6dce3e0f1a59986c4551c | [
"MIT"
] | null | null | null | PyDictAPI/__init__.py | imshawan/PyDictAPI | 937001ce896b634132e6dce3e0f1a59986c4551c | [
"MIT"
] | null | null | null | PyDictAPI/__init__.py | imshawan/PyDictAPI | 937001ce896b634132e6dce3e0f1a59986c4551c | [
"MIT"
] | 1 | 2021-08-29T11:08:07.000Z | 2021-08-29T11:08:07.000Z |
"""
----------------------
Python Dictionary API
----------------------
PyDictAPI is library written in Python, that can be used to fetch meanings and translation.
Both the Finder and Translator class takes an arguement "jsonify" that is set to False by default.
If jsonify is set to True, than the processed queries are returned in JSON. While by default the queries are returned in the form of a Python List (Array)
Currently supports only English-English dictionary searches
Basic usage:
>>> from PyDictAPI import Finder
>>> Meanings = Finder(jsonify=True)
>>> print(Meanings.findMeanings('apple'))
Output:
`{
"word": "Apple",
"meanings": [
{
"partOfSpeech": "Noun",
"definition": "The usually round, red or yellow, edible fruit of a small tree, Malus sylvestris, of the rose family."
},
{
"partOfSpeech": "Noun",
"definition": "A rosaceous tree, Malus sieversii, native to Central Asia but widely cultivated in temperate regions in many varieties, having pink or white fragrant flowers and firm rounded edible fruits. See also crab apple"
}
]
}`
---------------------------------------
Finding Examples, Synonyms and Antonyms
---------------------------------------
>>> print(Meanings.findUsage('help', 2)) #Finding Examples
# Here 2 defines the maximum number of examples to be included in the response,
# by default it is set to 5
>>> print(Meanings.findSynonyms('help', 4)) #Finding Synonyms
>>> print(Meanings.findAntonyms('help', 4)) #Finding Antonyms
----------------
Translating text
----------------
Example:
>>> # Import the module first
>>> from PyDictAPI import Translate
>>> t = Translate(jsonify=True) # Creates an instance of Translate class
>>>
>>> # You can get all supported language list through languages_help()
>>> languages = t.languages_help(pretty=True)
>>> # Pretty=true returns the list of supported languages in a well structured manner. By default Pretty is set to False
>>>
>>> # Tranlate English into Hindi
>>> print(t.translateItems("Hello, How are you?", "hi"))
`{'query': 'Hello, How are you?', 'language_detected': 'Hindi', 'translation': 'नमस्कार किसे हो आप?'}`
Full documentation is at <https://github.com/imshawan/PyDictAPI>.
copyright: (c) 2021 by Shawan Mandal.
license: MIT License, see LICENSE for more details.
"""
__author__ = "Shawan Mandal"
__email__ = "imshawan.dev049@gmail.com"
__version__ = "1.6.0"
try:
from .scrape import *
from .translator import *
except:
from scrape import *
from translator import *
| 30.635294 | 231 | 0.656682 |
__author__ = "Shawan Mandal"
__email__ = "imshawan.dev049@gmail.com"
__version__ = "1.6.0"
try:
from .scrape import *
from .translator import *
except:
from scrape import *
from translator import *
| true | true |
f7305a74ccf623557222c020c2382bc476227606 | 3,177 | py | Python | tests/e2e_scenarios.py | rneatherway/CCF | e04c6bbbe0b5ba044abaab9f972287194b6fc6cc | [
"Apache-2.0"
] | 2 | 2020-08-06T04:12:36.000Z | 2021-09-09T04:15:25.000Z | tests/e2e_scenarios.py | rajdhandus/CCF | 96edbc9db6bd14c559a8c59bcda1c2a4835768d2 | [
"Apache-2.0"
] | 2 | 2022-02-03T06:32:47.000Z | 2022-02-09T23:00:07.000Z | tests/e2e_scenarios.py | securelogicgroup/CCF | 2bad8ca6caa146e6b7cd4167fea551d61fecabfa | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import os
import json
import http
import random
import infra.network
import infra.proc
import infra.e2e_args
import infra.checker
from loguru import logger as LOG
def run(args):
# SNIPPET_START: parsing
with open(args.scenario) as f:
scenario = json.load(f)
hosts = scenario.get("hosts", infra.e2e_args.max_nodes(args, f=0))
args.package = scenario["package"]
# SNIPPET_END: parsing
scenario_dir = os.path.dirname(args.scenario)
# SNIPPET_START: create_network
with infra.network.network(
hosts, args.binary_dir, args.debug_nodes, args.perf_nodes
) as network:
network.start_and_join(args)
# SNIPPET_END: create_network
primary, backups = network.find_nodes()
with primary.client() as mc:
check = infra.checker.Checker()
check_commit = infra.checker.Checker(mc)
for connection in scenario["connections"]:
with (
primary.client("user0")
if not connection.get("on_backup")
else random.choice(backups).client("user0")
) as client:
txs = connection.get("transactions", [])
for include_file in connection.get("include", []):
with open(os.path.join(scenario_dir, include_file)) as f:
txs += json.load(f)
for tx in txs:
r = client.call(
tx["method"],
body=tx["body"],
http_verb=tx.get("verb", "POST"),
)
if tx.get("expected_error") is not None:
check(
r,
error=lambda status, msg, transaction=tx: status
# pylint: disable=no-member
== http.HTTPStatus(
transaction.get("expected_error")
).value,
)
elif tx.get("expected_result") is not None:
check_commit(r, result=tx.get("expected_result"))
else:
check_commit(r, result=lambda res: res is not None)
network.wait_for_node_commit_sync()
if args.network_only:
LOG.info("Keeping network alive with the following nodes:")
LOG.info(" Primary = {}:{}".format(primary.pubhost, primary.pubport))
for i, f in enumerate(backups):
LOG.info(" Backup[{}] = {}:{}".format(i, f.pubhost, f.pubport))
input("Press Enter to shutdown...")
if __name__ == "__main__":
def add(parser):
parser.add_argument(
"--scenario",
help="Path to JSON file listing transactions to execute",
type=str,
required=True,
)
args = infra.e2e_args.cli_args(add=add)
run(args)
| 32.418367 | 81 | 0.514951 |
import os
import json
import http
import random
import infra.network
import infra.proc
import infra.e2e_args
import infra.checker
from loguru import logger as LOG
def run(args):
with open(args.scenario) as f:
scenario = json.load(f)
hosts = scenario.get("hosts", infra.e2e_args.max_nodes(args, f=0))
args.package = scenario["package"]
scenario_dir = os.path.dirname(args.scenario)
with infra.network.network(
hosts, args.binary_dir, args.debug_nodes, args.perf_nodes
) as network:
network.start_and_join(args)
primary, backups = network.find_nodes()
with primary.client() as mc:
check = infra.checker.Checker()
check_commit = infra.checker.Checker(mc)
for connection in scenario["connections"]:
with (
primary.client("user0")
if not connection.get("on_backup")
else random.choice(backups).client("user0")
) as client:
txs = connection.get("transactions", [])
for include_file in connection.get("include", []):
with open(os.path.join(scenario_dir, include_file)) as f:
txs += json.load(f)
for tx in txs:
r = client.call(
tx["method"],
body=tx["body"],
http_verb=tx.get("verb", "POST"),
)
if tx.get("expected_error") is not None:
check(
r,
error=lambda status, msg, transaction=tx: status
== http.HTTPStatus(
transaction.get("expected_error")
).value,
)
elif tx.get("expected_result") is not None:
check_commit(r, result=tx.get("expected_result"))
else:
check_commit(r, result=lambda res: res is not None)
network.wait_for_node_commit_sync()
if args.network_only:
LOG.info("Keeping network alive with the following nodes:")
LOG.info(" Primary = {}:{}".format(primary.pubhost, primary.pubport))
for i, f in enumerate(backups):
LOG.info(" Backup[{}] = {}:{}".format(i, f.pubhost, f.pubport))
input("Press Enter to shutdown...")
if __name__ == "__main__":
def add(parser):
parser.add_argument(
"--scenario",
help="Path to JSON file listing transactions to execute",
type=str,
required=True,
)
args = infra.e2e_args.cli_args(add=add)
run(args)
| true | true |
f7305b4091d94994c4a20cc3634347522a8f0bce | 35,217 | py | Python | qiskit/visualization/pulse/matplotlib.py | siddharthdangwal/qiskit-terra | af34eb06f28de18ef276e1e9029c62a4e35dd6a9 | [
"Apache-2.0"
] | null | null | null | qiskit/visualization/pulse/matplotlib.py | siddharthdangwal/qiskit-terra | af34eb06f28de18ef276e1e9029c62a4e35dd6a9 | [
"Apache-2.0"
] | null | null | null | qiskit/visualization/pulse/matplotlib.py | siddharthdangwal/qiskit-terra | af34eb06f28de18ef276e1e9029c62a4e35dd6a9 | [
"Apache-2.0"
] | 1 | 2020-07-13T17:56:46.000Z | 2020-07-13T17:56:46.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Matplotlib classes for pulse visualization."""
import collections
import warnings
from typing import Dict, List, Tuple, Callable, Union, Any
import numpy as np
try:
from matplotlib import pyplot as plt, gridspec
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
from qiskit.visualization.pulse.qcstyle import PulseStyle, SchedStyle
from qiskit.visualization.pulse.interpolation import step_wise
from qiskit.pulse.channels import (DriveChannel, ControlChannel,
MeasureChannel, AcquireChannel,
SnapshotChannel, Channel)
from qiskit.pulse.commands import FrameChangeInstruction
from qiskit.pulse import (Waveform, SamplePulse, FrameChange, PersistentValue, Snapshot, Play,
Acquire, PulseError, ParametricPulse, SetFrequency, ShiftPhase,
Instruction, ScheduleComponent, ShiftFrequency, SetPhase)
class EventsOutputChannels:
"""Pulse dataset for channel."""
def __init__(self, t0: int, tf: int):
"""Create new channel dataset.
TODO: remove PV
Args:
t0: starting time of plot
tf: ending time of plot
"""
self.pulses = {}
self.t0 = t0
self.tf = tf
self._waveform = None
self._framechanges = None
self._setphase = None
self._frequencychanges = None
self._conditionals = None
self._snapshots = None
self._labels = None
self.enable = False
def add_instruction(self, start_time: int, instruction: Instruction):
"""Add new pulse instruction to channel.
Args:
start_time: Starting time of instruction
instruction: Instruction object to be added
"""
if instruction.command is not None:
pulse = instruction.command
elif isinstance(instruction, Play):
pulse = instruction.pulse
else:
pulse = instruction
if start_time in self.pulses.keys():
self.pulses[start_time].append(pulse)
else:
self.pulses[start_time] = [pulse]
@property
def waveform(self) -> np.ndarray:
"""Get waveform."""
if self._waveform is None:
self._build_waveform()
return self._waveform[self.t0:self.tf]
@property
def framechanges(self) -> Dict[int, FrameChangeInstruction]:
"""Get frame changes."""
if self._framechanges is None:
self._build_waveform()
return self._trim(self._framechanges)
@property
def setphase(self) -> Dict[int, SetPhase]:
"""Get the SetPhase phase values."""
if self._setphase is None:
self._build_waveform()
return self._trim(self._setphase)
@property
def frequencychanges(self) -> Dict[int, SetFrequency]:
"""Get the frequency changes."""
if self._frequencychanges is None:
self._build_waveform()
return self._trim(self._frequencychanges)
@property
def frequencyshift(self) -> Dict[int, ShiftFrequency]:
"""Set the frequency changes."""
if self._frequencychanges is None:
self._build_waveform()
return self._trim(self._frequencychanges)
@property
def conditionals(self) -> Dict[int, str]:
"""Get conditionals."""
if self._conditionals is None:
self._build_waveform()
return self._trim(self._conditionals)
@property
def snapshots(self) -> Dict[int, Snapshot]:
"""Get snapshots."""
if self._snapshots is None:
self._build_waveform()
return self._trim(self._snapshots)
@property
def labels(self) -> Dict[int, Union[Waveform, Acquire]]:
"""Get labels."""
if self._labels is None:
self._build_waveform()
return self._trim(self._labels)
def is_empty(self) -> bool:
"""Return if pulse is empty.
Returns:
bool: if the channel has nothing to plot
"""
if (any(self.waveform) or self.framechanges or self.setphase or
self.conditionals or self.snapshots):
return False
return True
def to_table(self, name: str) -> List[Tuple[int, str, str]]:
"""Get table contains.
Args:
name (str): name of channel
Returns:
A list of events in the channel
"""
time_event = []
framechanges = self.framechanges
setphase = self.setphase
conditionals = self.conditionals
snapshots = self.snapshots
frequencychanges = self.frequencychanges
for key, val in framechanges.items():
data_str = 'shift phase: %.2f' % val
time_event.append((key, name, data_str))
for key, val in setphase.items():
data_str = 'set phase: %.2f' % val
time_event.append((key, name, data_str))
for key, val in conditionals.items():
data_str = 'conditional, %s' % val
time_event.append((key, name, data_str))
for key, val in snapshots.items():
data_str = 'snapshot: %s' % val
time_event.append((key, name, data_str))
for key, val in frequencychanges.items():
data_str = 'frequency: %.4e' % val
time_event.append((key, name, data_str))
return time_event
def _build_waveform(self):
"""Create waveform from stored pulses.
"""
self._framechanges = {}
self._setphase = {}
self._frequencychanges = {}
self._conditionals = {}
self._snapshots = {}
self._labels = {}
fc = 0
pv = np.zeros(self.tf + 1, dtype=np.complex128)
wf = np.zeros(self.tf + 1, dtype=np.complex128)
last_pv = None
for time, commands in sorted(self.pulses.items()):
if time > self.tf:
break
tmp_fc = 0
tmp_set_phase = 0
tmp_sf = None
for command in commands:
if isinstance(command, (FrameChange, ShiftPhase)):
tmp_fc += command.phase
pv[time:] = 0
elif isinstance(command, SetPhase):
tmp_set_phase = command.phase
pv[time:] = 0
elif isinstance(command, SetFrequency):
tmp_sf = command.frequency
elif isinstance(command, ShiftFrequency):
tmp_sf = command.frequency
elif isinstance(command, Snapshot):
self._snapshots[time] = command.name
if tmp_fc != 0:
self._framechanges[time] = tmp_fc
fc += tmp_fc
if tmp_set_phase != 0:
self._setphase[time] = tmp_set_phase
fc = tmp_set_phase
if tmp_sf is not None:
self._frequencychanges[time] = tmp_sf
for command in commands:
if isinstance(command, PersistentValue):
pv[time:] = np.exp(1j*fc) * command.value
last_pv = (time, command)
break
for command in commands:
duration = command.duration
tf = min(time + duration, self.tf)
if isinstance(command, ParametricPulse):
command = command.get_sample_pulse()
if isinstance(command, (Waveform, SamplePulse)):
wf[time:tf] = np.exp(1j*fc) * command.samples[:tf-time]
pv[time:] = 0
self._labels[time] = (tf, command)
if last_pv is not None:
pv_cmd = last_pv[1]
self._labels[last_pv[0]] = (time, pv_cmd)
last_pv = None
elif isinstance(command, Acquire):
wf[time:tf] = np.ones(tf - time)
self._labels[time] = (tf, command)
self._waveform = wf + pv
def _trim(self, events: Dict[int, Any]) -> Dict[int, Any]:
"""Return events during given `time_range`.
Args:
events: time and operation of events.
Returns:
Events within the specified time range.
"""
events_in_time_range = {}
for k, v in events.items():
if self.t0 <= k <= self.tf:
events_in_time_range[k] = v
return events_in_time_range
class SamplePulseDrawer:
"""A class to create figure for sample pulse."""
def __init__(self, style: PulseStyle):
"""Create new figure.
Args:
style: Style sheet for pulse visualization.
"""
self.style = style or PulseStyle()
def draw(self, pulse: Waveform,
dt: float = 1.0,
interp_method: Callable = None,
scale: float = 1, scaling: float = None):
"""Draw figure.
Args:
pulse: Waveform to draw.
dt: time interval.
interp_method: interpolation function.
scale: Relative visual scaling of waveform amplitudes.
scaling: Deprecated, see `scale`.
Returns:
matplotlib.figure.Figure: A matplotlib figure object of the pulse envelope.
"""
if scaling is not None:
warnings.warn('The parameter "scaling" is being replaced by "scale"',
DeprecationWarning, 3)
scale = scaling
# If these self.style.dpi or self.style.figsize are None, they will
# revert back to their default rcParam keys.
figure = plt.figure(dpi=self.style.dpi, figsize=self.style.figsize)
interp_method = interp_method or step_wise
ax = figure.add_subplot(111)
ax.set_facecolor(self.style.bg_color)
samples = pulse.samples
time = np.arange(0, len(samples) + 1, dtype=float) * dt
time, re, im = interp_method(time, samples, self.style.num_points)
# plot
ax.fill_between(x=time, y1=re, y2=np.zeros_like(time),
facecolor=self.style.wave_color[0], alpha=0.3,
edgecolor=self.style.wave_color[0], linewidth=1.5,
label='real part')
ax.fill_between(x=time, y1=im, y2=np.zeros_like(time),
facecolor=self.style.wave_color[1], alpha=0.3,
edgecolor=self.style.wave_color[1], linewidth=1.5,
label='imaginary part')
ax.set_xlim(0, pulse.duration * dt)
if scale:
ax.set_ylim(-1/scale, 1/scale)
else:
v_max = max(max(np.abs(re)), max(np.abs(im)))
ax.set_ylim(-1.2 * v_max, 1.2 * v_max)
bbox = ax.get_position()
# This check is here for backwards compatibility. Before, the check was around
# the suptitle line, however since the font style can take on a type of None
# we need to unfortunately check both the type and the value of the object.
if isinstance(self.style.title_font_size, int) and self.style.title_font_size > 0:
figure.suptitle(str(pulse.name),
fontsize=self.style.title_font_size,
y=bbox.y1 + 0.02,
va='bottom')
return figure
class ScheduleDrawer:
"""A class to create figure for schedule and channel."""
def __init__(self, style: SchedStyle):
"""Create new figure.
Args:
style: Style sheet for pulse schedule visualization.
"""
self.style = style or SchedStyle()
def _build_channels(self, schedule: ScheduleComponent,
channels: List[Channel],
t0: int, tf: int,
show_framechange_channels: bool = True
) -> Tuple[Dict[Channel, EventsOutputChannels],
Dict[Channel, EventsOutputChannels],
Dict[Channel, EventsOutputChannels]]:
"""Create event table of each pulse channels in the given schedule.
Args:
schedule: Schedule object to plot.
channels: Channels to plot.
t0: Start time of plot.
tf: End time of plot.
show_framechange_channels: Plot channels only with FrameChanges.
Returns:
channels: All channels.
output_channels: All (D, M, U, A) channels.
snapshot_channels: Snapshots.
"""
# prepare waveform channels
drive_channels = collections.OrderedDict()
measure_channels = collections.OrderedDict()
control_channels = collections.OrderedDict()
acquire_channels = collections.OrderedDict()
snapshot_channels = collections.OrderedDict()
_channels = set()
if show_framechange_channels:
_channels.update(schedule.channels)
# take channels that do not only contain framechanges
else:
for start_time, instruction in schedule.instructions:
if not isinstance(instruction, (FrameChangeInstruction, ShiftPhase, SetPhase)):
_channels.update(instruction.channels)
_channels.update(channels)
for chan in _channels:
if isinstance(chan, DriveChannel):
try:
drive_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, MeasureChannel):
try:
measure_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, ControlChannel):
try:
control_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, AcquireChannel):
try:
acquire_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, SnapshotChannel):
try:
snapshot_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
output_channels = {**drive_channels, **measure_channels,
**control_channels, **acquire_channels}
channels = {**output_channels, **snapshot_channels}
# sort by index then name to group qubits together.
output_channels = collections.OrderedDict(sorted(output_channels.items(),
key=lambda x: (x[0].index, x[0].name)))
channels = collections.OrderedDict(sorted(channels.items(),
key=lambda x: (x[0].index, x[0].name)))
for start_time, instruction in schedule.instructions:
for channel in instruction.channels:
if channel in output_channels:
output_channels[channel].add_instruction(start_time, instruction)
elif channel in snapshot_channels:
snapshot_channels[channel].add_instruction(start_time, instruction)
return channels, output_channels, snapshot_channels
@staticmethod
def _scale_channels(output_channels: Dict[Channel, EventsOutputChannels],
scale: float,
channel_scales: Dict[Channel, float] = None,
channels: List[Channel] = None,
plot_all: bool = False) -> Dict[Channel, float]:
"""Count number of channels that contains any instruction to show
and find scale factor of that channel.
Args:
output_channels: Event table of channels to show.
scale: Global scale factor.
channel_scales: Channel specific scale factors.
channels: Specified channels to plot.
plot_all: Plot empty channel.
Returns:
scale_dict: Scale factor of each channel.
"""
# count numbers of valid waveform
scale_dict = {chan: 0 for chan in output_channels.keys()}
for channel, events in output_channels.items():
v_max = 0
if channels:
if channel in channels:
waveform = events.waveform
v_max = max(v_max,
max(np.abs(np.real(waveform))),
max(np.abs(np.imag(waveform))))
events.enable = True
else:
if not events.is_empty() or plot_all:
waveform = events.waveform
v_max = max(v_max,
max(np.abs(np.real(waveform))),
max(np.abs(np.imag(waveform))))
events.enable = True
scale_val = channel_scales.get(channel, scale)
if not scale_val:
# when input schedule is empty or comprises only frame changes,
# we need to overwrite maximum amplitude by a value greater than zero,
# otherwise auto axis scaling will fail with zero division.
v_max = v_max or 1
scale_dict[channel] = 1 / v_max
else:
scale_dict[channel] = scale_val
return scale_dict
def _draw_table(self, figure,
channels: Dict[Channel, EventsOutputChannels],
dt: float):
"""Draw event table if events exist.
Args:
figure (matpotlib.figure.Figure): Figure object
channels: Dictionary of channel and event table
dt: Time interval
Returns:
Tuple[matplotlib.axes.Axes]: Axis objects for table and canvas of pulses.
"""
# create table
table_data = []
if self.style.use_table:
for channel, events in channels.items():
if events.enable:
table_data.extend(events.to_table(channel.name))
table_data = sorted(table_data, key=lambda x: x[0])
# plot table
if table_data:
# table area size
ncols = self.style.table_columns
nrows = int(np.ceil(len(table_data)/ncols))
max_size = self.style.max_table_ratio * figure.get_size_inches()[1]
max_rows = np.floor(max_size/self.style.fig_unit_h_table/ncols)
nrows = int(min(nrows, max_rows))
# don't overflow plot with table data
table_data = table_data[:int(nrows*ncols)]
# fig size
h_table = nrows * self.style.fig_unit_h_table
h_waves = (figure.get_size_inches()[1] - h_table)
# create subplots
gs = gridspec.GridSpec(2, 1, height_ratios=[h_table, h_waves], hspace=0)
tb = plt.subplot(gs[0])
ax = plt.subplot(gs[1])
# configure each cell
tb.axis('off')
cell_value = [['' for _kk in range(ncols * 3)] for _jj in range(nrows)]
cell_color = [self.style.table_color * ncols for _jj in range(nrows)]
cell_width = [*([0.2, 0.2, 0.5] * ncols)]
for ii, data in enumerate(table_data):
# pylint: disable=unbalanced-tuple-unpacking
r, c = np.unravel_index(ii, (nrows, ncols), order='f')
# pylint: enable=unbalanced-tuple-unpacking
time, ch_name, data_str = data
# item
cell_value[r][3 * c + 0] = 't = %s' % time * dt
cell_value[r][3 * c + 1] = 'ch %s' % ch_name
cell_value[r][3 * c + 2] = data_str
table = tb.table(cellText=cell_value,
cellLoc='left',
rowLoc='center',
colWidths=cell_width,
bbox=[0, 0, 1, 1],
cellColours=cell_color)
table.auto_set_font_size(False)
table.set_fontsize = self.style.table_font_size
else:
tb = None
ax = figure.add_subplot(111)
return tb, ax
@staticmethod
def _draw_snapshots(ax,
snapshot_channels: Dict[Channel, EventsOutputChannels],
y0: float) -> None:
"""Draw snapshots to given mpl axis.
Args:
ax (matplotlib.axes.Axes): axis object to draw snapshots.
snapshot_channels: Event table of snapshots.
y0: vertical position to draw the snapshots.
"""
for events in snapshot_channels.values():
snapshots = events.snapshots
if snapshots:
for time in snapshots:
ax.annotate(s=u"\u25D8", xy=(time, y0), xytext=(time, y0+0.08),
arrowprops={'arrowstyle': 'wedge'}, ha='center')
def _draw_framechanges(self, ax,
fcs: Dict[int, FrameChangeInstruction],
y0: float) -> bool:
"""Draw frame change of given channel to given mpl axis.
Args:
ax (matplotlib.axes.Axes): axis object to draw frame changes.
fcs: Event table of frame changes.
y0: vertical position to draw the frame changes.
"""
for time in fcs.keys():
ax.text(x=time, y=y0, s=r'$\circlearrowleft$',
fontsize=self.style.icon_font_size,
ha='center', va='center')
def _draw_frequency_changes(self, ax,
sf: Dict[int, SetFrequency],
y0: float) -> bool:
"""Draw set frequency of given channel to given mpl axis.
Args:
ax (matplotlib.axes.Axes): axis object to draw frame changes.
sf: Event table of set frequency.
y0: vertical position to draw the frame changes.
"""
for time in sf.keys():
ax.text(x=time, y=y0, s=r'$\leftrightsquigarrow$',
fontsize=self.style.icon_font_size,
ha='center', va='center', rotation=90)
def _get_channel_color(self, channel: Channel) -> str:
"""Lookup table for waveform color.
Args:
channel: Type of channel.
Return:
Color code or name of color.
"""
# choose color
if isinstance(channel, DriveChannel):
color = self.style.d_ch_color
elif isinstance(channel, ControlChannel):
color = self.style.u_ch_color
elif isinstance(channel, MeasureChannel):
color = self.style.m_ch_color
elif isinstance(channel, AcquireChannel):
color = self.style.a_ch_color
else:
color = 'black'
return color
@staticmethod
def _prev_label_at_time(prev_labels: List[Dict[int, Union[Waveform, Acquire]]],
time: int) -> bool:
"""Check overlap of pulses with pervious channels.
Args:
prev_labels: List of labels in previous channels.
time: Start time of current pulse instruction.
Returns:
`True` if current instruction overlaps with others.
"""
for labels in prev_labels:
for t0, (tf, _) in labels.items():
if time in (t0, tf):
return True
return False
def _draw_labels(self, ax,
labels: Dict[int, Union[Waveform, Acquire]],
prev_labels: List[Dict[int, Union[Waveform, Acquire]]],
y0: float) -> None:
"""Draw label of pulse instructions on given mpl axis.
Args:
ax (matplotlib.axes.Axes): axis object to draw labels.
labels: Pulse labels of channel.
prev_labels: Pulse labels of previous channels.
y0: vertical position to draw the labels.
"""
for t0, (tf, cmd) in labels.items():
if isinstance(cmd, PersistentValue):
name = cmd.name if cmd.name else 'pv'
elif isinstance(cmd, Acquire):
name = cmd.name if cmd.name else 'acquire'
else:
name = cmd.name
ax.annotate(r'%s' % name,
xy=((t0+tf)//2, y0),
xytext=((t0+tf)//2, y0-0.07),
fontsize=self.style.label_font_size,
ha='center', va='center')
linestyle = self.style.label_ch_linestyle
alpha = self.style.label_ch_alpha
color = self.style.label_ch_color
if not self._prev_label_at_time(prev_labels, t0):
ax.axvline(t0, -1, 1, color=color,
linestyle=linestyle, alpha=alpha)
if not (self._prev_label_at_time(prev_labels, tf) or tf in labels):
ax.axvline(tf, -1, 1, color=color,
linestyle=linestyle, alpha=alpha)
def _draw_channels(self, ax,
output_channels: Dict[Channel, EventsOutputChannels],
interp_method: Callable,
t0: int, tf: int,
scale_dict: Dict[Channel, float],
label: bool = False,
framechange: bool = True,
frequencychange: bool = True) -> float:
"""Draw pulse instructions on given mpl axis.
Args:
ax (matplotlib.axes.Axes): axis object to draw pulses.
output_channels: Event table of channels.
interp_method: Callback function for waveform interpolation.
t0: Start time of schedule.
tf: End time of schedule.
scale_dict: Scale factor for each channel.
label: When set `True` draw labels.
framechange: When set `True` draw frame change symbols.
frequencychange: When set `True` draw frequency change symbols.
Return:
Value of final vertical axis of canvas.
"""
y0 = 0
prev_labels = []
for channel, events in output_channels.items():
if events.enable:
# scaling value of this channel
scale = 0.5 * scale_dict.get(channel, 0.5)
# plot waveform
waveform = events.waveform
time = np.arange(t0, tf + 1, dtype=float)
if waveform.any():
time, re, im = interp_method(time, waveform, self.style.num_points)
else:
# when input schedule is empty or comprises only frame changes,
# we should avoid interpolation due to lack of data points.
# instead, it just returns vector of zero.
re, im = np.zeros_like(time), np.zeros_like(time)
color = self._get_channel_color(channel)
# Minimum amplitude scaled
amp_min = scale * abs(min(0, np.nanmin(re), np.nanmin(im)))
# scaling and offset
re = scale * re + y0
im = scale * im + y0
offset = np.zeros_like(time) + y0
# plot
ax.fill_between(x=time, y1=re, y2=offset,
facecolor=color[0], alpha=0.3,
edgecolor=color[0], linewidth=1.5,
label='real part')
ax.fill_between(x=time, y1=im, y2=offset,
facecolor=color[1], alpha=0.3,
edgecolor=color[1], linewidth=1.5,
label='imaginary part')
ax.plot((t0, tf), (y0, y0), color='#000000', linewidth=1.0)
# plot frame changes
fcs = events.framechanges
if fcs and framechange:
self._draw_framechanges(ax, fcs, y0)
# plot frequency changes
sf = events.frequencychanges
if sf and frequencychange:
self._draw_frequency_changes(ax, sf, y0 + scale)
# plot labels
labels = events.labels
if labels and label:
self._draw_labels(ax, labels, prev_labels, y0)
prev_labels.append(labels)
else:
continue
# plot label
ax.text(x=t0, y=y0, s=channel.name,
fontsize=self.style.axis_font_size,
ha='right', va='center')
# show scaling factor
ax.text(x=t0, y=y0 - 0.1, s='x%.1f' % (2 * scale),
fontsize=0.7*self.style.axis_font_size,
ha='right', va='top')
# change the y0 offset for removing spacing when a channel has negative values
if self.style.remove_spacing:
y0 -= 0.5 + amp_min
else:
y0 -= 1
return y0
def draw(self, schedule: ScheduleComponent,
dt: float, interp_method: Callable,
plot_range: Tuple[Union[int, float], Union[int, float]],
scale: float = None,
channel_scales: Dict[Channel, float] = None,
plot_all: bool = True, table: bool = False,
label: bool = False, framechange: bool = True,
scaling: float = None, channels: List[Channel] = None,
show_framechange_channels: bool = True):
"""Draw figure.
Args:
schedule: schedule object to plot.
dt: Time interval of samples. Pulses are visualized in the unit of
cycle time if not provided.
interp_method: Interpolation function. See example.
Interpolation is disabled in default.
See `qiskit.visualization.pulse.interpolation` for more information.
plot_range: A tuple of time range to plot.
scale: Scaling of waveform amplitude. Pulses are automatically
scaled channel by channel if not provided.
channel_scales: Dictionary of scale factor for specific channels.
Scale of channels not specified here is overwritten by `scale`.
plot_all: When set `True` plot empty channels.
table: When set `True` draw event table for supported commands.
label: When set `True` draw label for individual instructions.
framechange: When set `True` draw framechange indicators.
scaling: Deprecated, see `scale`.
channels: A list of channel names to plot.
All non-empty channels are shown if not provided.
show_framechange_channels: When set `True` plot channels
with only framechange instructions.
Returns:
matplotlib.figure.Figure: A matplotlib figure object for the pulse envelope.
Raises:
VisualizationError: When schedule cannot be drawn
"""
if scaling is not None:
warnings.warn('The parameter "scaling" is being replaced by "scale"',
DeprecationWarning, 3)
scale = scaling
figure = plt.figure(dpi=self.style.dpi, figsize=self.style.figsize)
if channels is None:
channels = []
interp_method = interp_method or step_wise
if channel_scales is None:
channel_scales = {}
# setup plot range
if plot_range:
t0 = int(np.floor(plot_range[0]))
tf = int(np.floor(plot_range[1]))
else:
t0 = 0
# when input schedule is empty or comprises only frame changes,
# we need to overwrite pulse duration by an integer greater than zero,
# otherwise waveform returns empty array and matplotlib will be crashed.
if channels:
tf = schedule.ch_duration(*channels)
else:
tf = schedule.stop_time
tf = tf or 1
# prepare waveform channels
(schedule_channels, output_channels,
snapshot_channels) = self._build_channels(schedule, channels, t0, tf,
show_framechange_channels)
# count numbers of valid waveform
scale_dict = self._scale_channels(output_channels,
scale=scale,
channel_scales=channel_scales,
channels=channels,
plot_all=plot_all)
if table:
tb, ax = self._draw_table(figure, schedule_channels, dt)
else:
tb = None
ax = figure.add_subplot(111)
ax.set_facecolor(self.style.bg_color)
y0 = self._draw_channels(ax, output_channels, interp_method,
t0, tf, scale_dict, label=label,
framechange=framechange)
y_ub = 0.5 + self.style.vertical_span
y_lb = y0 + 0.5 - self.style.vertical_span
self._draw_snapshots(ax, snapshot_channels, y_lb)
ax.set_xlim(t0, tf)
tick_labels = np.linspace(t0, tf, 5)
ax.set_xticks(tick_labels)
ax.set_xticklabels([self.style.axis_formatter % label for label in tick_labels * dt],
fontsize=self.style.axis_font_size)
ax.set_ylim(y_lb, y_ub)
ax.set_yticklabels([])
if tb is not None:
bbox = tb.get_position()
else:
bbox = ax.get_position()
# This check is here for backwards compatibility. Before, the check was around
# the suptitle line, however since the font style can take on a type of None
# we need to unfortunately check both the type and the value of the object.
if isinstance(self.style.title_font_size, int) and self.style.title_font_size > 0:
figure.suptitle(str(schedule.name),
fontsize=self.style.title_font_size,
y=bbox.y1 + 0.02,
va='bottom')
return figure
| 39.13 | 96 | 0.548911 |
import collections
import warnings
from typing import Dict, List, Tuple, Callable, Union, Any
import numpy as np
try:
from matplotlib import pyplot as plt, gridspec
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
from qiskit.visualization.pulse.qcstyle import PulseStyle, SchedStyle
from qiskit.visualization.pulse.interpolation import step_wise
from qiskit.pulse.channels import (DriveChannel, ControlChannel,
MeasureChannel, AcquireChannel,
SnapshotChannel, Channel)
from qiskit.pulse.commands import FrameChangeInstruction
from qiskit.pulse import (Waveform, SamplePulse, FrameChange, PersistentValue, Snapshot, Play,
Acquire, PulseError, ParametricPulse, SetFrequency, ShiftPhase,
Instruction, ScheduleComponent, ShiftFrequency, SetPhase)
class EventsOutputChannels:
def __init__(self, t0: int, tf: int):
self.pulses = {}
self.t0 = t0
self.tf = tf
self._waveform = None
self._framechanges = None
self._setphase = None
self._frequencychanges = None
self._conditionals = None
self._snapshots = None
self._labels = None
self.enable = False
def add_instruction(self, start_time: int, instruction: Instruction):
if instruction.command is not None:
pulse = instruction.command
elif isinstance(instruction, Play):
pulse = instruction.pulse
else:
pulse = instruction
if start_time in self.pulses.keys():
self.pulses[start_time].append(pulse)
else:
self.pulses[start_time] = [pulse]
@property
def waveform(self) -> np.ndarray:
if self._waveform is None:
self._build_waveform()
return self._waveform[self.t0:self.tf]
@property
def framechanges(self) -> Dict[int, FrameChangeInstruction]:
if self._framechanges is None:
self._build_waveform()
return self._trim(self._framechanges)
@property
def setphase(self) -> Dict[int, SetPhase]:
if self._setphase is None:
self._build_waveform()
return self._trim(self._setphase)
@property
def frequencychanges(self) -> Dict[int, SetFrequency]:
if self._frequencychanges is None:
self._build_waveform()
return self._trim(self._frequencychanges)
@property
def frequencyshift(self) -> Dict[int, ShiftFrequency]:
if self._frequencychanges is None:
self._build_waveform()
return self._trim(self._frequencychanges)
@property
def conditionals(self) -> Dict[int, str]:
if self._conditionals is None:
self._build_waveform()
return self._trim(self._conditionals)
@property
def snapshots(self) -> Dict[int, Snapshot]:
if self._snapshots is None:
self._build_waveform()
return self._trim(self._snapshots)
@property
def labels(self) -> Dict[int, Union[Waveform, Acquire]]:
if self._labels is None:
self._build_waveform()
return self._trim(self._labels)
def is_empty(self) -> bool:
if (any(self.waveform) or self.framechanges or self.setphase or
self.conditionals or self.snapshots):
return False
return True
def to_table(self, name: str) -> List[Tuple[int, str, str]]:
time_event = []
framechanges = self.framechanges
setphase = self.setphase
conditionals = self.conditionals
snapshots = self.snapshots
frequencychanges = self.frequencychanges
for key, val in framechanges.items():
data_str = 'shift phase: %.2f' % val
time_event.append((key, name, data_str))
for key, val in setphase.items():
data_str = 'set phase: %.2f' % val
time_event.append((key, name, data_str))
for key, val in conditionals.items():
data_str = 'conditional, %s' % val
time_event.append((key, name, data_str))
for key, val in snapshots.items():
data_str = 'snapshot: %s' % val
time_event.append((key, name, data_str))
for key, val in frequencychanges.items():
data_str = 'frequency: %.4e' % val
time_event.append((key, name, data_str))
return time_event
def _build_waveform(self):
self._framechanges = {}
self._setphase = {}
self._frequencychanges = {}
self._conditionals = {}
self._snapshots = {}
self._labels = {}
fc = 0
pv = np.zeros(self.tf + 1, dtype=np.complex128)
wf = np.zeros(self.tf + 1, dtype=np.complex128)
last_pv = None
for time, commands in sorted(self.pulses.items()):
if time > self.tf:
break
tmp_fc = 0
tmp_set_phase = 0
tmp_sf = None
for command in commands:
if isinstance(command, (FrameChange, ShiftPhase)):
tmp_fc += command.phase
pv[time:] = 0
elif isinstance(command, SetPhase):
tmp_set_phase = command.phase
pv[time:] = 0
elif isinstance(command, SetFrequency):
tmp_sf = command.frequency
elif isinstance(command, ShiftFrequency):
tmp_sf = command.frequency
elif isinstance(command, Snapshot):
self._snapshots[time] = command.name
if tmp_fc != 0:
self._framechanges[time] = tmp_fc
fc += tmp_fc
if tmp_set_phase != 0:
self._setphase[time] = tmp_set_phase
fc = tmp_set_phase
if tmp_sf is not None:
self._frequencychanges[time] = tmp_sf
for command in commands:
if isinstance(command, PersistentValue):
pv[time:] = np.exp(1j*fc) * command.value
last_pv = (time, command)
break
for command in commands:
duration = command.duration
tf = min(time + duration, self.tf)
if isinstance(command, ParametricPulse):
command = command.get_sample_pulse()
if isinstance(command, (Waveform, SamplePulse)):
wf[time:tf] = np.exp(1j*fc) * command.samples[:tf-time]
pv[time:] = 0
self._labels[time] = (tf, command)
if last_pv is not None:
pv_cmd = last_pv[1]
self._labels[last_pv[0]] = (time, pv_cmd)
last_pv = None
elif isinstance(command, Acquire):
wf[time:tf] = np.ones(tf - time)
self._labels[time] = (tf, command)
self._waveform = wf + pv
def _trim(self, events: Dict[int, Any]) -> Dict[int, Any]:
events_in_time_range = {}
for k, v in events.items():
if self.t0 <= k <= self.tf:
events_in_time_range[k] = v
return events_in_time_range
class SamplePulseDrawer:
def __init__(self, style: PulseStyle):
self.style = style or PulseStyle()
def draw(self, pulse: Waveform,
dt: float = 1.0,
interp_method: Callable = None,
scale: float = 1, scaling: float = None):
if scaling is not None:
warnings.warn('The parameter "scaling" is being replaced by "scale"',
DeprecationWarning, 3)
scale = scaling
figure = plt.figure(dpi=self.style.dpi, figsize=self.style.figsize)
interp_method = interp_method or step_wise
ax = figure.add_subplot(111)
ax.set_facecolor(self.style.bg_color)
samples = pulse.samples
time = np.arange(0, len(samples) + 1, dtype=float) * dt
time, re, im = interp_method(time, samples, self.style.num_points)
ax.fill_between(x=time, y1=re, y2=np.zeros_like(time),
facecolor=self.style.wave_color[0], alpha=0.3,
edgecolor=self.style.wave_color[0], linewidth=1.5,
label='real part')
ax.fill_between(x=time, y1=im, y2=np.zeros_like(time),
facecolor=self.style.wave_color[1], alpha=0.3,
edgecolor=self.style.wave_color[1], linewidth=1.5,
label='imaginary part')
ax.set_xlim(0, pulse.duration * dt)
if scale:
ax.set_ylim(-1/scale, 1/scale)
else:
v_max = max(max(np.abs(re)), max(np.abs(im)))
ax.set_ylim(-1.2 * v_max, 1.2 * v_max)
bbox = ax.get_position()
if isinstance(self.style.title_font_size, int) and self.style.title_font_size > 0:
figure.suptitle(str(pulse.name),
fontsize=self.style.title_font_size,
y=bbox.y1 + 0.02,
va='bottom')
return figure
class ScheduleDrawer:
def __init__(self, style: SchedStyle):
self.style = style or SchedStyle()
def _build_channels(self, schedule: ScheduleComponent,
channels: List[Channel],
t0: int, tf: int,
show_framechange_channels: bool = True
) -> Tuple[Dict[Channel, EventsOutputChannels],
Dict[Channel, EventsOutputChannels],
Dict[Channel, EventsOutputChannels]]:
drive_channels = collections.OrderedDict()
measure_channels = collections.OrderedDict()
control_channels = collections.OrderedDict()
acquire_channels = collections.OrderedDict()
snapshot_channels = collections.OrderedDict()
_channels = set()
if show_framechange_channels:
_channels.update(schedule.channels)
else:
for start_time, instruction in schedule.instructions:
if not isinstance(instruction, (FrameChangeInstruction, ShiftPhase, SetPhase)):
_channels.update(instruction.channels)
_channels.update(channels)
for chan in _channels:
if isinstance(chan, DriveChannel):
try:
drive_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, MeasureChannel):
try:
measure_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, ControlChannel):
try:
control_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, AcquireChannel):
try:
acquire_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, SnapshotChannel):
try:
snapshot_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
output_channels = {**drive_channels, **measure_channels,
**control_channels, **acquire_channels}
channels = {**output_channels, **snapshot_channels}
output_channels = collections.OrderedDict(sorted(output_channels.items(),
key=lambda x: (x[0].index, x[0].name)))
channels = collections.OrderedDict(sorted(channels.items(),
key=lambda x: (x[0].index, x[0].name)))
for start_time, instruction in schedule.instructions:
for channel in instruction.channels:
if channel in output_channels:
output_channels[channel].add_instruction(start_time, instruction)
elif channel in snapshot_channels:
snapshot_channels[channel].add_instruction(start_time, instruction)
return channels, output_channels, snapshot_channels
@staticmethod
def _scale_channels(output_channels: Dict[Channel, EventsOutputChannels],
scale: float,
channel_scales: Dict[Channel, float] = None,
channels: List[Channel] = None,
plot_all: bool = False) -> Dict[Channel, float]:
scale_dict = {chan: 0 for chan in output_channels.keys()}
for channel, events in output_channels.items():
v_max = 0
if channels:
if channel in channels:
waveform = events.waveform
v_max = max(v_max,
max(np.abs(np.real(waveform))),
max(np.abs(np.imag(waveform))))
events.enable = True
else:
if not events.is_empty() or plot_all:
waveform = events.waveform
v_max = max(v_max,
max(np.abs(np.real(waveform))),
max(np.abs(np.imag(waveform))))
events.enable = True
scale_val = channel_scales.get(channel, scale)
if not scale_val:
v_max = v_max or 1
scale_dict[channel] = 1 / v_max
else:
scale_dict[channel] = scale_val
return scale_dict
def _draw_table(self, figure,
channels: Dict[Channel, EventsOutputChannels],
dt: float):
table_data = []
if self.style.use_table:
for channel, events in channels.items():
if events.enable:
table_data.extend(events.to_table(channel.name))
table_data = sorted(table_data, key=lambda x: x[0])
if table_data:
ncols = self.style.table_columns
nrows = int(np.ceil(len(table_data)/ncols))
max_size = self.style.max_table_ratio * figure.get_size_inches()[1]
max_rows = np.floor(max_size/self.style.fig_unit_h_table/ncols)
nrows = int(min(nrows, max_rows))
table_data = table_data[:int(nrows*ncols)]
# fig size
h_table = nrows * self.style.fig_unit_h_table
h_waves = (figure.get_size_inches()[1] - h_table)
# create subplots
gs = gridspec.GridSpec(2, 1, height_ratios=[h_table, h_waves], hspace=0)
tb = plt.subplot(gs[0])
ax = plt.subplot(gs[1])
# configure each cell
tb.axis('off')
cell_value = [['' for _kk in range(ncols * 3)] for _jj in range(nrows)]
cell_color = [self.style.table_color * ncols for _jj in range(nrows)]
cell_width = [*([0.2, 0.2, 0.5] * ncols)]
for ii, data in enumerate(table_data):
# pylint: disable=unbalanced-tuple-unpacking
r, c = np.unravel_index(ii, (nrows, ncols), order='f')
# pylint: enable=unbalanced-tuple-unpacking
time, ch_name, data_str = data
# item
cell_value[r][3 * c + 0] = 't = %s' % time * dt
cell_value[r][3 * c + 1] = 'ch %s' % ch_name
cell_value[r][3 * c + 2] = data_str
table = tb.table(cellText=cell_value,
cellLoc='left',
rowLoc='center',
colWidths=cell_width,
bbox=[0, 0, 1, 1],
cellColours=cell_color)
table.auto_set_font_size(False)
table.set_fontsize = self.style.table_font_size
else:
tb = None
ax = figure.add_subplot(111)
return tb, ax
@staticmethod
def _draw_snapshots(ax,
snapshot_channels: Dict[Channel, EventsOutputChannels],
y0: float) -> None:
for events in snapshot_channels.values():
snapshots = events.snapshots
if snapshots:
for time in snapshots:
ax.annotate(s=u"\u25D8", xy=(time, y0), xytext=(time, y0+0.08),
arrowprops={'arrowstyle': 'wedge'}, ha='center')
def _draw_framechanges(self, ax,
fcs: Dict[int, FrameChangeInstruction],
y0: float) -> bool:
for time in fcs.keys():
ax.text(x=time, y=y0, s=r'$\circlearrowleft$',
fontsize=self.style.icon_font_size,
ha='center', va='center')
def _draw_frequency_changes(self, ax,
sf: Dict[int, SetFrequency],
y0: float) -> bool:
for time in sf.keys():
ax.text(x=time, y=y0, s=r'$\leftrightsquigarrow$',
fontsize=self.style.icon_font_size,
ha='center', va='center', rotation=90)
def _get_channel_color(self, channel: Channel) -> str:
# choose color
if isinstance(channel, DriveChannel):
color = self.style.d_ch_color
elif isinstance(channel, ControlChannel):
color = self.style.u_ch_color
elif isinstance(channel, MeasureChannel):
color = self.style.m_ch_color
elif isinstance(channel, AcquireChannel):
color = self.style.a_ch_color
else:
color = 'black'
return color
@staticmethod
def _prev_label_at_time(prev_labels: List[Dict[int, Union[Waveform, Acquire]]],
time: int) -> bool:
for labels in prev_labels:
for t0, (tf, _) in labels.items():
if time in (t0, tf):
return True
return False
def _draw_labels(self, ax,
labels: Dict[int, Union[Waveform, Acquire]],
prev_labels: List[Dict[int, Union[Waveform, Acquire]]],
y0: float) -> None:
for t0, (tf, cmd) in labels.items():
if isinstance(cmd, PersistentValue):
name = cmd.name if cmd.name else 'pv'
elif isinstance(cmd, Acquire):
name = cmd.name if cmd.name else 'acquire'
else:
name = cmd.name
ax.annotate(r'%s' % name,
xy=((t0+tf)//2, y0),
xytext=((t0+tf)//2, y0-0.07),
fontsize=self.style.label_font_size,
ha='center', va='center')
linestyle = self.style.label_ch_linestyle
alpha = self.style.label_ch_alpha
color = self.style.label_ch_color
if not self._prev_label_at_time(prev_labels, t0):
ax.axvline(t0, -1, 1, color=color,
linestyle=linestyle, alpha=alpha)
if not (self._prev_label_at_time(prev_labels, tf) or tf in labels):
ax.axvline(tf, -1, 1, color=color,
linestyle=linestyle, alpha=alpha)
def _draw_channels(self, ax,
output_channels: Dict[Channel, EventsOutputChannels],
interp_method: Callable,
t0: int, tf: int,
scale_dict: Dict[Channel, float],
label: bool = False,
framechange: bool = True,
frequencychange: bool = True) -> float:
y0 = 0
prev_labels = []
for channel, events in output_channels.items():
if events.enable:
# scaling value of this channel
scale = 0.5 * scale_dict.get(channel, 0.5)
# plot waveform
waveform = events.waveform
time = np.arange(t0, tf + 1, dtype=float)
if waveform.any():
time, re, im = interp_method(time, waveform, self.style.num_points)
else:
# when input schedule is empty or comprises only frame changes,
# we should avoid interpolation due to lack of data points.
# instead, it just returns vector of zero.
re, im = np.zeros_like(time), np.zeros_like(time)
color = self._get_channel_color(channel)
# Minimum amplitude scaled
amp_min = scale * abs(min(0, np.nanmin(re), np.nanmin(im)))
# scaling and offset
re = scale * re + y0
im = scale * im + y0
offset = np.zeros_like(time) + y0
# plot
ax.fill_between(x=time, y1=re, y2=offset,
facecolor=color[0], alpha=0.3,
edgecolor=color[0], linewidth=1.5,
label='real part')
ax.fill_between(x=time, y1=im, y2=offset,
facecolor=color[1], alpha=0.3,
edgecolor=color[1], linewidth=1.5,
label='imaginary part')
ax.plot((t0, tf), (y0, y0), color='
# plot frame changes
fcs = events.framechanges
if fcs and framechange:
self._draw_framechanges(ax, fcs, y0)
# plot frequency changes
sf = events.frequencychanges
if sf and frequencychange:
self._draw_frequency_changes(ax, sf, y0 + scale)
# plot labels
labels = events.labels
if labels and label:
self._draw_labels(ax, labels, prev_labels, y0)
prev_labels.append(labels)
else:
continue
# plot label
ax.text(x=t0, y=y0, s=channel.name,
fontsize=self.style.axis_font_size,
ha='right', va='center')
# show scaling factor
ax.text(x=t0, y=y0 - 0.1, s='x%.1f' % (2 * scale),
fontsize=0.7*self.style.axis_font_size,
ha='right', va='top')
# change the y0 offset for removing spacing when a channel has negative values
if self.style.remove_spacing:
y0 -= 0.5 + amp_min
else:
y0 -= 1
return y0
def draw(self, schedule: ScheduleComponent,
dt: float, interp_method: Callable,
plot_range: Tuple[Union[int, float], Union[int, float]],
scale: float = None,
channel_scales: Dict[Channel, float] = None,
plot_all: bool = True, table: bool = False,
label: bool = False, framechange: bool = True,
scaling: float = None, channels: List[Channel] = None,
show_framechange_channels: bool = True):
if scaling is not None:
warnings.warn('The parameter "scaling" is being replaced by "scale"',
DeprecationWarning, 3)
scale = scaling
figure = plt.figure(dpi=self.style.dpi, figsize=self.style.figsize)
if channels is None:
channels = []
interp_method = interp_method or step_wise
if channel_scales is None:
channel_scales = {}
# setup plot range
if plot_range:
t0 = int(np.floor(plot_range[0]))
tf = int(np.floor(plot_range[1]))
else:
t0 = 0
# when input schedule is empty or comprises only frame changes,
# we need to overwrite pulse duration by an integer greater than zero,
# otherwise waveform returns empty array and matplotlib will be crashed.
if channels:
tf = schedule.ch_duration(*channels)
else:
tf = schedule.stop_time
tf = tf or 1
# prepare waveform channels
(schedule_channels, output_channels,
snapshot_channels) = self._build_channels(schedule, channels, t0, tf,
show_framechange_channels)
# count numbers of valid waveform
scale_dict = self._scale_channels(output_channels,
scale=scale,
channel_scales=channel_scales,
channels=channels,
plot_all=plot_all)
if table:
tb, ax = self._draw_table(figure, schedule_channels, dt)
else:
tb = None
ax = figure.add_subplot(111)
ax.set_facecolor(self.style.bg_color)
y0 = self._draw_channels(ax, output_channels, interp_method,
t0, tf, scale_dict, label=label,
framechange=framechange)
y_ub = 0.5 + self.style.vertical_span
y_lb = y0 + 0.5 - self.style.vertical_span
self._draw_snapshots(ax, snapshot_channels, y_lb)
ax.set_xlim(t0, tf)
tick_labels = np.linspace(t0, tf, 5)
ax.set_xticks(tick_labels)
ax.set_xticklabels([self.style.axis_formatter % label for label in tick_labels * dt],
fontsize=self.style.axis_font_size)
ax.set_ylim(y_lb, y_ub)
ax.set_yticklabels([])
if tb is not None:
bbox = tb.get_position()
else:
bbox = ax.get_position()
# This check is here for backwards compatibility. Before, the check was around
# the suptitle line, however since the font style can take on a type of None
# we need to unfortunately check both the type and the value of the object.
if isinstance(self.style.title_font_size, int) and self.style.title_font_size > 0:
figure.suptitle(str(schedule.name),
fontsize=self.style.title_font_size,
y=bbox.y1 + 0.02,
va='bottom')
return figure
| true | true |
f7305d3f7c5dd7b9094f0a20b4e9f5a957e94535 | 1,443 | py | Python | numpylint/lintbits.py | perimosocordiae/numpylint | 67e6c077b393760bffe59524ede1d4904476a1ce | [
"MIT"
] | null | null | null | numpylint/lintbits.py | perimosocordiae/numpylint | 67e6c077b393760bffe59524ede1d4904476a1ce | [
"MIT"
] | null | null | null | numpylint/lintbits.py | perimosocordiae/numpylint | 67e6c077b393760bffe59524ede1d4904476a1ce | [
"MIT"
] | null | null | null | import numpy as np
# Dict of all the patterns with their replacements.
# Structure:
# name of replacement -> list of (pattern, replacement, kwargs) tuples
LINTBITS = {
'diagonal matrix dot product': [
# diag(x).dot(y)
('${diag}(${x}).dot(${y})', '((${x}) * (${y}).T).T',
dict(diag='name=numpy.diag')),
# dot(diag(x), y)
('${dot}(${diag}(${x}), ${y})', '((${x}) * (${y}).T).T',
dict(diag='name=numpy.diag', dot='name=numpy.dot')),
# x.dot(diag(y))
('${x}.dot(${diag}(${y}))', '((${x}) * (${y}))',
dict(diag='name=numpy.diag')),
# dot(x, diag(y))
('${dot}(${x}, ${diag}(${y}))', '((${x}) * (${y}))',
dict(diag='name=numpy.diag', dot='name=numpy.dot')),
],
'inverting result of in1d': [
# ~np.in1d(x, y)
('~${in1d}(${x}, ${y})', '${in1d}(${x}, ${y}, invert=True)',
dict(in1d='name=numpy.in1d')),
# ~np.in1d(x, y, assume_unique=z)
('~${in1d}(${x}, ${y}, assume_unique=${z})',
'${in1d}(${x}, ${y}, assume_unique=${z}, invert=True)',
dict(in1d='name=numpy.in1d')),
],
}
if np.lib.NumpyVersion(np.__version__) < '1.3.0':
# this bug was fixed in numpy 1.3.0
LINTBITS['in-place transpose'] = [
# x += x.T
('${x} += ${x}.T', '${x} = ${x} + ${x}.T', dict()),
# x += x.transpose()
('${x} += ${x}.transpose()', '${x} = ${x} + ${x}.T', dict()),
]
| 35.195122 | 72 | 0.444213 | import numpy as np
LINTBITS = {
'diagonal matrix dot product': [
('${diag}(${x}).dot(${y})', '((${x}) * (${y}).T).T',
dict(diag='name=numpy.diag')),
('${dot}(${diag}(${x}), ${y})', '((${x}) * (${y}).T).T',
dict(diag='name=numpy.diag', dot='name=numpy.dot')),
('${x}.dot(${diag}(${y}))', '((${x}) * (${y}))',
dict(diag='name=numpy.diag')),
('${dot}(${x}, ${diag}(${y}))', '((${x}) * (${y}))',
dict(diag='name=numpy.diag', dot='name=numpy.dot')),
],
'inverting result of in1d': [
('~${in1d}(${x}, ${y})', '${in1d}(${x}, ${y}, invert=True)',
dict(in1d='name=numpy.in1d')),
('~${in1d}(${x}, ${y}, assume_unique=${z})',
'${in1d}(${x}, ${y}, assume_unique=${z}, invert=True)',
dict(in1d='name=numpy.in1d')),
],
}
if np.lib.NumpyVersion(np.__version__) < '1.3.0':
LINTBITS['in-place transpose'] = [
('${x} += ${x}.T', '${x} = ${x} + ${x}.T', dict()),
('${x} += ${x}.transpose()', '${x} = ${x} + ${x}.T', dict()),
]
| true | true |
f7305eb7c580daac5db72b2c4ac0c1258330b442 | 26,460 | py | Python | bert4keras/snippets.py | vecent-don/bert4keras | 3c31cbbf87d6574ddad038e4ea17a941ddd027dc | [
"Apache-2.0"
] | 1 | 2021-06-03T12:39:23.000Z | 2021-06-03T12:39:23.000Z | bert4keras/snippets.py | vecent-don/bert4keras | 3c31cbbf87d6574ddad038e4ea17a941ddd027dc | [
"Apache-2.0"
] | null | null | null | bert4keras/snippets.py | vecent-don/bert4keras | 3c31cbbf87d6574ddad038e4ea17a941ddd027dc | [
"Apache-2.0"
] | null | null | null | #! -*- coding: utf-8 -*-
# 代码合集
import os, sys, six, re, json
import logging
import numpy as np
from collections import defaultdict
from bert4keras.backend import K, keras, tf
_open_ = open
is_py2 = six.PY2
if not is_py2:
basestring = str
def to_array(*args):
"""批量转numpy的array
"""
results = [np.array(a) for a in args]
if len(args) == 1:
return results[0]
else:
return results
def is_string(s):
"""判断是否是字符串
"""
return isinstance(s, basestring)
def strQ2B(ustring):
"""全角符号转对应的半角符号
"""
rstring = ''
for uchar in ustring:
inside_code = ord(uchar)
# 全角空格直接转换
if inside_code == 12288:
inside_code = 32
# 全角字符(除空格)根据关系转化
elif (inside_code >= 65281 and inside_code <= 65374):
inside_code -= 65248
rstring += unichr(inside_code)
return rstring
def string_matching(s, keywords):
"""判断s是否至少包含keywords中的至少一个字符串
"""
for k in keywords:
if re.search(k, s):
return True
return False
def convert_to_unicode(text, encoding='utf-8', errors='ignore'):
"""字符串转换为unicode格式(假设输入为utf-8格式)
"""
if is_py2:
if isinstance(text, str):
text = text.decode(encoding, errors=errors)
else:
if isinstance(text, bytes):
text = text.decode(encoding, errors=errors)
return text
def convert_to_str(text, encoding='utf-8', errors='ignore'):
"""字符串转换为str格式(假设输入为utf-8格式)
"""
if is_py2:
if isinstance(text, unicode):
text = text.encode(encoding, errors=errors)
else:
if isinstance(text, bytes):
text = text.decode(encoding, errors=errors)
return text
class open:
"""模仿python自带的open函数
作用:1.主要是为了同时兼容py2和py3;2.增加了索引功能,方便读取大文件。
"""
def __init__(
self, name, mode='r', encoding=None, errors='strict', indexable=False
):
self.name = name
if is_py2:
self.file = _open_(name, mode)
else:
self.file = _open_(name, mode, encoding=encoding, errors=errors)
self.encoding = encoding
self.errors = errors
self.iterator = None
if indexable:
if is_string(indexable) and os.path.exists(indexable):
self.offsets = json.load(_open_(indexable))
else:
self.create_indexes()
if is_string(indexable):
json.dump(self.offsets, _open_(indexable, 'w'))
def create_indexes(self):
print('creating indexes ...')
self.offsets, offset = [], 0
pbar = keras.utils.Progbar(os.path.getsize(self.name))
while self.readline():
self.offsets.append(offset)
offset = self.tell()
pbar.update(offset)
self.seek(0)
print('indexes created.')
def __getitem__(self, key):
self.seek(self.offsets[key])
l = self.readline()
if self.encoding:
l = convert_to_unicode(l, self.encoding, self.errors)
return l
def __len__(self):
return len(self.offsets)
def __iter__(self):
if hasattr(self, 'offsets'):
for i in range(len(self)):
yield self[i]
else:
for l in self.file:
if self.encoding:
l = convert_to_unicode(l, self.encoding, self.errors)
yield l
def next(self):
if self.iterator is None:
self.iterator = self.__iter__()
return next(self.iterator)
def __next__(self):
return self.next()
def read(self):
text = self.file.read()
if self.encoding:
text = convert_to_unicode(text, self.encoding, self.errors)
return text
def readline(self):
text = self.file.readline()
if self.encoding:
text = convert_to_unicode(text, self.encoding, self.errors)
return text
def readlines(self):
if self.encoding:
return [
convert_to_unicode(text, self.encoding, self.errors)
for text in self.file.readlines()
]
else:
return self.file.readlines()
def write(self, text):
if self.encoding:
text = convert_to_str(text, self.encoding, self.errors)
self.file.write(text)
def flush(self):
self.file.flush()
def close(self):
self.file.close()
def tell(self):
return self.file.tell()
def seek(self, offset=0):
return self.file.seek(offset)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
def parallel_apply(
func,
iterable,
workers,
max_queue_size,
callback=None,
dummy=False,
random_seeds=True
):
"""多进程或多线程地将func应用到iterable的每个元素中。
注意这个apply是异步且无序的,也就是说依次输入a,b,c,但是
输出可能是func(c), func(a), func(b)。
参数:
callback: 处理单个输出的回调函数;
dummy: False是多进程/线性,True则是多线程/线性;
random_seeds: 每个进程的随机种子。
"""
if dummy:
from multiprocessing.dummy import Pool, Queue
else:
from multiprocessing import Pool, Queue
in_queue, out_queue, seed_queue = Queue(max_queue_size), Queue(), Queue()
if random_seeds is True:
random_seeds = [None] * workers
elif random_seeds is None or random_seeds is False:
random_seeds = []
for seed in random_seeds:
seed_queue.put(seed)
def worker_step(in_queue, out_queue):
"""单步函数包装成循环执行
"""
if not seed_queue.empty():
np.random.seed(seed_queue.get())
while True:
i, d = in_queue.get()
r = func(d)
out_queue.put((i, r))
# 启动多进程/线程
pool = Pool(workers, worker_step, (in_queue, out_queue))
if callback is None:
results = []
# 后处理函数
def process_out_queue():
out_count = 0
for _ in range(out_queue.qsize()):
i, d = out_queue.get()
out_count += 1
if callback is None:
results.append((i, d))
else:
callback(d)
return out_count
# 存入数据,取出结果
in_count, out_count = 0, 0
for i, d in enumerate(iterable):
in_count += 1
while True:
try:
in_queue.put((i, d), block=False)
break
except six.moves.queue.Full:
out_count += process_out_queue()
if in_count % max_queue_size == 0:
out_count += process_out_queue()
while out_count != in_count:
out_count += process_out_queue()
pool.terminate()
if callback is None:
results = sorted(results, key=lambda r: r[0])
return [r[1] for r in results]
def sequence_padding(inputs, length=None, value=0, seq_dims=1, mode='post'):
"""Numpy函数,将序列padding到同一长度
"""
if length is None:
length = np.max([np.shape(x)[:seq_dims] for x in inputs], axis=0)
elif not hasattr(length, '__getitem__'):
length = [length]
slices = [np.s_[:length[i]] for i in range(seq_dims)]
slices = tuple(slices) if len(slices) > 1 else slices[0]
pad_width = [(0, 0) for _ in np.shape(inputs[0])]
outputs = []
for x in inputs:
x = x[slices]
for i in range(seq_dims):
if mode == 'post':
pad_width[i] = (0, length[i] - np.shape(x)[i])
elif mode == 'pre':
pad_width[i] = (length[i] - np.shape(x)[i], 0)
else:
raise ValueError('"mode" argument must be "post" or "pre".')
x = np.pad(x, pad_width, 'constant', constant_values=value)
outputs.append(x)
return np.array(outputs)
def truncate_sequences(maxlen, indices, *sequences):
"""截断总长度至不超过maxlen
"""
sequences = [s for s in sequences if s]
if not isinstance(indices, (list, tuple)):
indices = [indices] * len(sequences)
while True:
lengths = [len(s) for s in sequences]
if sum(lengths) > maxlen:
i = np.argmax(lengths)
sequences[i].pop(indices[i])
else:
return sequences
def text_segmentate(text, maxlen, seps='\n', strips=None):
"""将文本按照标点符号划分为若干个短句
"""
text = text.strip().strip(strips)
if seps and len(text) > maxlen:
pieces = text.split(seps[0])
text, texts = '', []
for i, p in enumerate(pieces):
if text and p and len(text) + len(p) > maxlen - 1:
texts.extend(text_segmentate(text, maxlen, seps[1:], strips))
text = ''
if i + 1 == len(pieces):
text = text + p
else:
text = text + p + seps[0]
if text:
texts.extend(text_segmentate(text, maxlen, seps[1:], strips))
return texts
else:
return [text]
def is_one_of(x, ys):
"""判断x是否在ys之中
等价于x in ys,但有些情况下x in ys会报错
"""
for y in ys:
if x is y:
return True
return False
class DataGenerator(object):
"""数据生成器模版
"""
def __init__(self, data, batch_size=32, buffer_size=None):
self.data = data
self.batch_size = batch_size
if hasattr(self.data, '__len__'):
self.steps = len(self.data) // self.batch_size
if len(self.data) % self.batch_size != 0:
self.steps += 1
else:
self.steps = None
self.buffer_size = buffer_size or batch_size * 1000
def __len__(self):
return self.steps
def sample(self, random=False):
"""采样函数,每个样本同时返回一个is_end标记
"""
if random:
if self.steps is None:
def generator():
caches, isfull = [], False
for d in self.data:
caches.append(d)
if isfull:
i = np.random.randint(len(caches))
yield caches.pop(i)
elif len(caches) == self.buffer_size:
isfull = True
while caches:
i = np.random.randint(len(caches))
yield caches.pop(i)
else:
def generator():
for i in np.random.permutation(len(self.data)):
yield self.data[i]
data = generator()
else:
data = iter(self.data)
d_current = next(data)
for d_next in data:
yield False, d_current
d_current = d_next
yield True, d_current
def __iter__(self, random=False):
raise NotImplementedError
def forfit(self, random=True):
while True:
for d in self.__iter__(random):
yield d
def to_dataset(self, types, shapes, names=None, padded_batch=False):
"""转为tf.data.Dataset格式
如果传入names的话,自动把数据包装成dict形式。
"""
if names is None:
generator = self.forfit
else:
if is_string(names):
warps = lambda k, v: {k: v}
elif is_string(names[0]):
warps = lambda k, v: dict(zip(k, v))
else:
warps = lambda k, v: tuple(
dict(zip(i, j)) for i, j in zip(k, v)
)
def generator():
for d in self.forfit():
yield warps(names, d)
types = warps(names, types)
shapes = warps(names, shapes)
if padded_batch:
dataset = tf.data.Dataset.from_generator(
generator, output_types=types
)
dataset = dataset.padded_batch(self.batch_size, shapes)
else:
dataset = tf.data.Dataset.from_generator(
generator, output_types=types, output_shapes=shapes
)
dataset = dataset.batch(self.batch_size)
return dataset
class ViterbiDecoder(object):
"""Viterbi解码算法基类
"""
def __init__(self, trans, starts=None, ends=None):
self.trans = trans
self.num_labels = len(trans)
self.non_starts = []
self.non_ends = []
if starts is not None:
for i in range(self.num_labels):
if i not in starts:
self.non_starts.append(i)
if ends is not None:
for i in range(self.num_labels):
if i not in ends:
self.non_ends.append(i)
def decode(self, nodes):
"""nodes.shape=[seq_len, num_labels]
"""
# 预处理
nodes[0, self.non_starts] -= np.inf
nodes[-1, self.non_ends] -= np.inf
# 动态规划
labels = np.arange(self.num_labels).reshape((1, -1))
scores = nodes[0].reshape((-1, 1))
paths = labels
for l in range(1, len(nodes)):
M = scores + self.trans + nodes[l].reshape((1, -1))
idxs = M.argmax(0)
scores = M.max(0).reshape((-1, 1))
paths = np.concatenate([paths[:, idxs], labels], 0)
# 最优路径
return paths[:, scores[:, 0].argmax()]
def softmax(x, axis=-1):
"""numpy版softmax
"""
x = x - x.max(axis=axis, keepdims=True)
x = np.exp(x)
return x / x.sum(axis=axis, keepdims=True)
class AutoRegressiveDecoder(object):
"""通用自回归生成模型解码基类
包含beam search和random sample两种策略
"""
def __init__(self, start_id, end_id, maxlen, minlen=1):
self.start_id = start_id
self.end_id = end_id
self.maxlen = maxlen
self.minlen = minlen
self.models = {}
if start_id is None:
self.first_output_ids = np.empty((1, 0), dtype=int)
else:
self.first_output_ids = np.array([[self.start_id]])
@staticmethod
def wraps(default_rtype='probas', use_states=False):
"""用来进一步完善predict函数
目前包含:1. 设置rtype参数,并做相应处理;
2. 确定states的使用,并做相应处理;
3. 设置温度参数,并做相应处理。
"""
def actual_decorator(predict):
def new_predict(
self,
inputs,
output_ids,
states,
temperature=1,
rtype=default_rtype
):
assert rtype in ['probas', 'logits']
prediction = predict(self, inputs, output_ids, states)
if not use_states:
prediction = (prediction, None)
if default_rtype == 'logits':
prediction = (
softmax(prediction[0] / temperature), prediction[1]
)
elif temperature != 1:
probas = np.power(prediction[0], 1.0 / temperature)
probas = probas / probas.sum(axis=-1, keepdims=True)
prediction = (probas, prediction[1])
if rtype == 'probas':
return prediction
else:
return np.log(prediction[0] + 1e-12), prediction[1]
return new_predict
return actual_decorator
def last_token(self, model):
"""创建一个只返回最后一个token输出的新Model
"""
if model not in self.models:
outputs = [
keras.layers.Lambda(lambda x: x[:, -1])(output)
for output in model.outputs
]
self.models[model] = keras.models.Model(model.inputs, outputs)
return self.models[model]
def predict(self, inputs, output_ids, states=None):
"""用户需自定义递归预测函数
说明:定义的时候,需要用wraps方法进行装饰,传入default_rtype和use_states,
其中default_rtype为字符串logits或probas,probas时返回归一化的概率,
rtype=logits时则返回softmax前的结果或者概率对数。
返回:二元组 (得分或概率, states)
"""
raise NotImplementedError
def beam_search(self, inputs, topk, states=None, temperature=1, min_ends=1):
"""beam search解码
说明:这里的topk即beam size;
返回:最优解码序列。
"""
inputs = [np.array([i]) for i in inputs]
output_ids, output_scores = self.first_output_ids, np.zeros(1)
for step in range(self.maxlen):
scores, states = self.predict(
inputs, output_ids, states, temperature, 'logits'
) # 计算当前得分
if step == 0: # 第1步预测后将输入重复topk次
inputs = [np.repeat(i, topk, axis=0) for i in inputs]
scores = output_scores.reshape((-1, 1)) + scores # 综合累积得分
indices = scores.argpartition(-topk, axis=None)[-topk:] # 仅保留topk
indices_1 = indices // scores.shape[1] # 行索引
indices_2 = (indices % scores.shape[1]).reshape((-1, 1)) # 列索引
output_ids = np.concatenate([output_ids[indices_1], indices_2],
1) # 更新输出
output_scores = np.take_along_axis(
scores, indices, axis=None
) # 更新得分
end_counts = (output_ids == self.end_id).sum(1) # 统计出现的end标记
if output_ids.shape[1] >= self.minlen: # 最短长度判断
best_one = output_scores.argmax() # 得分最大的那个
if end_counts[best_one] == min_ends: # 如果已经终止
return output_ids[best_one] # 直接输出
else: # 否则,只保留未完成部分
flag = (end_counts < min_ends) # 标记未完成序列
if not flag.all(): # 如果有已完成的
inputs = [i[flag] for i in inputs] # 扔掉已完成序列
output_ids = output_ids[flag] # 扔掉已完成序列
output_scores = output_scores[flag] # 扔掉已完成序列
end_counts = end_counts[flag] # 扔掉已完成end计数
topk = flag.sum() # topk相应变化
# 达到长度直接输出
return output_ids[output_scores.argmax()]
def random_sample(
self,
inputs,
n,
topk=None,
topp=None,
states=None,
temperature=1,
min_ends=1
):
"""随机采样n个结果
说明:非None的topk表示每一步只从概率最高的topk个中采样;而非None的topp
表示每一步只从概率最高的且概率之和刚好达到topp的若干个token中采样。
返回:n个解码序列组成的list。
"""
inputs = [np.array([i]) for i in inputs]
output_ids = self.first_output_ids
results = []
for step in range(self.maxlen):
probas, states = self.predict(
inputs, output_ids, states, temperature, 'probas'
) # 计算当前概率
probas /= probas.sum(axis=1, keepdims=True) # 确保归一化
if step == 0: # 第1步预测后将结果重复n次
probas = np.repeat(probas, n, axis=0)
inputs = [np.repeat(i, n, axis=0) for i in inputs]
output_ids = np.repeat(output_ids, n, axis=0)
if topk is not None:
k_indices = probas.argpartition(-topk,
axis=1)[:, -topk:] # 仅保留topk
probas = np.take_along_axis(probas, k_indices, axis=1) # topk概率
probas /= probas.sum(axis=1, keepdims=True) # 重新归一化
if topp is not None:
p_indices = probas.argsort(axis=1)[:, ::-1] # 从高到低排序
probas = np.take_along_axis(probas, p_indices, axis=1) # 排序概率
cumsum_probas = np.cumsum(probas, axis=1) # 累积概率
flag = np.roll(cumsum_probas >= topp, 1, axis=1) # 标记超过topp的部分
flag[:, 0] = False # 结合上面的np.roll,实现平移一位的效果
probas[flag] = 0 # 后面的全部置零
probas /= probas.sum(axis=1, keepdims=True) # 重新归一化
sample_func = lambda p: np.random.choice(len(p), p=p) # 按概率采样函数
sample_ids = np.apply_along_axis(sample_func, 1, probas) # 执行采样
sample_ids = sample_ids.reshape((-1, 1)) # 对齐形状
if topp is not None:
sample_ids = np.take_along_axis(
p_indices, sample_ids, axis=1
) # 对齐原id
if topk is not None:
sample_ids = np.take_along_axis(
k_indices, sample_ids, axis=1
) # 对齐原id
output_ids = np.concatenate([output_ids, sample_ids], 1) # 更新输出
end_counts = (output_ids == self.end_id).sum(1) # 统计出现的end标记
if output_ids.shape[1] >= self.minlen: # 最短长度判断
flag = (end_counts == min_ends) # 标记已完成序列
if flag.any(): # 如果有已完成的
for ids in output_ids[flag]: # 存好已完成序列
results.append(ids)
flag = (flag == False) # 标记未完成序列
inputs = [i[flag] for i in inputs] # 只保留未完成部分输入
output_ids = output_ids[flag] # 只保留未完成部分候选集
end_counts = end_counts[flag] # 只保留未完成部分end计数
if len(output_ids) == 0:
break
# 如果还有未完成序列,直接放入结果
for ids in output_ids:
results.append(ids)
# 返回结果
return results
def insert_arguments(**arguments):
"""装饰器,为类方法增加参数
(主要用于类的__init__方法)
"""
def actual_decorator(func):
def new_func(self, *args, **kwargs):
for k, v in arguments.items():
if k in kwargs:
v = kwargs.pop(k)
setattr(self, k, v)
return func(self, *args, **kwargs)
return new_func
return actual_decorator
def delete_arguments(*arguments):
"""装饰器,为类方法删除参数
(主要用于类的__init__方法)
"""
def actual_decorator(func):
def new_func(self, *args, **kwargs):
for k in arguments:
if k in kwargs:
raise TypeError(
'%s got an unexpected keyword argument \'%s\'' %
(self.__class__.__name__, k)
)
return func(self, *args, **kwargs)
return new_func
return actual_decorator
def longest_common_substring(source, target):
"""最长公共子串(source和target的最长公共切片区间)
返回:子串长度, 所在区间(四元组)
注意:最长公共子串可能不止一个,所返回的区间只代表其中一个。
"""
c, l, span = defaultdict(int), 0, (0, 0, 0, 0)
for i, si in enumerate(source, 1):
for j, tj in enumerate(target, 1):
if si == tj:
c[i, j] = c[i - 1, j - 1] + 1
if c[i, j] > l:
l = c[i, j]
span = (i - l, i, j - l, j)
return l, span
def longest_common_subsequence(source, target):
"""最长公共子序列(source和target的最长非连续子序列)
返回:子序列长度, 映射关系(映射对组成的list)
注意:最长公共子序列可能不止一个,所返回的映射只代表其中一个。
"""
c = defaultdict(int)
for i, si in enumerate(source, 1):
for j, tj in enumerate(target, 1):
if si == tj:
c[i, j] = c[i - 1, j - 1] + 1
elif c[i, j - 1] > c[i - 1, j]:
c[i, j] = c[i, j - 1]
else:
c[i, j] = c[i - 1, j]
l, mapping = c[len(source), len(target)], []
i, j = len(source) - 1, len(target) - 1
while len(mapping) < l:
if source[i] == target[j]:
mapping.append((i, j))
i, j = i - 1, j - 1
elif c[i + 1, j] > c[i, j + 1]:
j = j - 1
else:
i = i - 1
return l, mapping[::-1]
class WebServing(object):
"""简单的Web接口
用法:
arguments = {'text': (None, True), 'n': (int, False)}
web = WebServing(port=8864)
web.route('/gen_synonyms', gen_synonyms, arguments)
web.start()
# 然后访问 http://127.0.0.1:8864/gen_synonyms?text=你好
说明:
基于bottlepy简单封装,仅作为临时测试使用,不保证性能。
目前仅保证支持 Tensorflow 1.x + Keras <= 2.3.1。
欢迎有经验的开发者帮忙改进。
依赖:
pip install bottle
pip install paste
(如果不用 server='paste' 的话,可以不装paste库)
"""
def __init__(self, host='0.0.0.0', port=8000, server='paste'):
import bottle
self.host = host
self.port = port
self.server = server
self.graph = tf.get_default_graph()
self.sess = K.get_session()
self.set_session = K.set_session
self.bottle = bottle
def wraps(self, func, arguments, method='GET'):
"""封装为接口函数
参数:
func:要转换为接口的函数,需要保证输出可以json化,即需要
保证 json.dumps(func(inputs)) 能被执行成功;
arguments:声明func所需参数,其中key为参数名,value[0]为
对应的转换函数(接口获取到的参数值都是字符串
型),value[1]为该参数是否必须;
method:GET或者POST。
"""
def new_func():
outputs = {'code': 0, 'desc': u'succeeded', 'data': {}}
kwargs = {}
for key, value in arguments.items():
if method == 'GET':
result = self.bottle.request.GET.getunicode(key)
else:
result = self.bottle.request.POST.getunicode(key)
if result is None:
if value[1]:
outputs['code'] = 1
outputs['desc'] = 'lack of "%s" argument' % key
return json.dumps(outputs, ensure_ascii=False)
else:
if value[0] is not None:
result = value[0](result)
kwargs[key] = result
try:
with self.graph.as_default():
self.set_session(self.sess)
outputs['data'] = func(**kwargs)
except Exception as e:
outputs['code'] = 2
outputs['desc'] = str(e)
return json.dumps(outputs, ensure_ascii=False)
return new_func
def route(self, path, func, arguments, method='GET'):
"""添加接口
"""
func = self.wraps(func, arguments, method)
self.bottle.route(path, method=method)(func)
def start(self):
"""启动服务
"""
self.bottle.run(host=self.host, port=self.port, server=self.server)
class Hook:
"""注入uniout模块,实现import时才触发
"""
def __init__(self, module):
self.module = module
def __getattr__(self, attr):
"""使得 from bert4keras.backend import uniout
等效于 import uniout (自动识别Python版本,Python3
下则无操作。)
"""
if attr == 'uniout':
if is_py2:
import uniout
else:
return getattr(self.module, attr)
Hook.__name__ = __name__
sys.modules[__name__] = Hook(sys.modules[__name__])
del Hook
| 30.875146 | 80 | 0.524339 |
import os, sys, six, re, json
import logging
import numpy as np
from collections import defaultdict
from bert4keras.backend import K, keras, tf
_open_ = open
is_py2 = six.PY2
if not is_py2:
basestring = str
def to_array(*args):
results = [np.array(a) for a in args]
if len(args) == 1:
return results[0]
else:
return results
def is_string(s):
return isinstance(s, basestring)
def strQ2B(ustring):
rstring = ''
for uchar in ustring:
inside_code = ord(uchar)
if inside_code == 12288:
inside_code = 32
elif (inside_code >= 65281 and inside_code <= 65374):
inside_code -= 65248
rstring += unichr(inside_code)
return rstring
def string_matching(s, keywords):
for k in keywords:
if re.search(k, s):
return True
return False
def convert_to_unicode(text, encoding='utf-8', errors='ignore'):
if is_py2:
if isinstance(text, str):
text = text.decode(encoding, errors=errors)
else:
if isinstance(text, bytes):
text = text.decode(encoding, errors=errors)
return text
def convert_to_str(text, encoding='utf-8', errors='ignore'):
if is_py2:
if isinstance(text, unicode):
text = text.encode(encoding, errors=errors)
else:
if isinstance(text, bytes):
text = text.decode(encoding, errors=errors)
return text
class open:
def __init__(
self, name, mode='r', encoding=None, errors='strict', indexable=False
):
self.name = name
if is_py2:
self.file = _open_(name, mode)
else:
self.file = _open_(name, mode, encoding=encoding, errors=errors)
self.encoding = encoding
self.errors = errors
self.iterator = None
if indexable:
if is_string(indexable) and os.path.exists(indexable):
self.offsets = json.load(_open_(indexable))
else:
self.create_indexes()
if is_string(indexable):
json.dump(self.offsets, _open_(indexable, 'w'))
def create_indexes(self):
print('creating indexes ...')
self.offsets, offset = [], 0
pbar = keras.utils.Progbar(os.path.getsize(self.name))
while self.readline():
self.offsets.append(offset)
offset = self.tell()
pbar.update(offset)
self.seek(0)
print('indexes created.')
def __getitem__(self, key):
self.seek(self.offsets[key])
l = self.readline()
if self.encoding:
l = convert_to_unicode(l, self.encoding, self.errors)
return l
def __len__(self):
return len(self.offsets)
def __iter__(self):
if hasattr(self, 'offsets'):
for i in range(len(self)):
yield self[i]
else:
for l in self.file:
if self.encoding:
l = convert_to_unicode(l, self.encoding, self.errors)
yield l
def next(self):
if self.iterator is None:
self.iterator = self.__iter__()
return next(self.iterator)
def __next__(self):
return self.next()
def read(self):
text = self.file.read()
if self.encoding:
text = convert_to_unicode(text, self.encoding, self.errors)
return text
def readline(self):
text = self.file.readline()
if self.encoding:
text = convert_to_unicode(text, self.encoding, self.errors)
return text
def readlines(self):
if self.encoding:
return [
convert_to_unicode(text, self.encoding, self.errors)
for text in self.file.readlines()
]
else:
return self.file.readlines()
def write(self, text):
if self.encoding:
text = convert_to_str(text, self.encoding, self.errors)
self.file.write(text)
def flush(self):
self.file.flush()
def close(self):
self.file.close()
def tell(self):
return self.file.tell()
def seek(self, offset=0):
return self.file.seek(offset)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
def parallel_apply(
func,
iterable,
workers,
max_queue_size,
callback=None,
dummy=False,
random_seeds=True
):
if dummy:
from multiprocessing.dummy import Pool, Queue
else:
from multiprocessing import Pool, Queue
in_queue, out_queue, seed_queue = Queue(max_queue_size), Queue(), Queue()
if random_seeds is True:
random_seeds = [None] * workers
elif random_seeds is None or random_seeds is False:
random_seeds = []
for seed in random_seeds:
seed_queue.put(seed)
def worker_step(in_queue, out_queue):
if not seed_queue.empty():
np.random.seed(seed_queue.get())
while True:
i, d = in_queue.get()
r = func(d)
out_queue.put((i, r))
pool = Pool(workers, worker_step, (in_queue, out_queue))
if callback is None:
results = []
def process_out_queue():
out_count = 0
for _ in range(out_queue.qsize()):
i, d = out_queue.get()
out_count += 1
if callback is None:
results.append((i, d))
else:
callback(d)
return out_count
in_count, out_count = 0, 0
for i, d in enumerate(iterable):
in_count += 1
while True:
try:
in_queue.put((i, d), block=False)
break
except six.moves.queue.Full:
out_count += process_out_queue()
if in_count % max_queue_size == 0:
out_count += process_out_queue()
while out_count != in_count:
out_count += process_out_queue()
pool.terminate()
if callback is None:
results = sorted(results, key=lambda r: r[0])
return [r[1] for r in results]
def sequence_padding(inputs, length=None, value=0, seq_dims=1, mode='post'):
if length is None:
length = np.max([np.shape(x)[:seq_dims] for x in inputs], axis=0)
elif not hasattr(length, '__getitem__'):
length = [length]
slices = [np.s_[:length[i]] for i in range(seq_dims)]
slices = tuple(slices) if len(slices) > 1 else slices[0]
pad_width = [(0, 0) for _ in np.shape(inputs[0])]
outputs = []
for x in inputs:
x = x[slices]
for i in range(seq_dims):
if mode == 'post':
pad_width[i] = (0, length[i] - np.shape(x)[i])
elif mode == 'pre':
pad_width[i] = (length[i] - np.shape(x)[i], 0)
else:
raise ValueError('"mode" argument must be "post" or "pre".')
x = np.pad(x, pad_width, 'constant', constant_values=value)
outputs.append(x)
return np.array(outputs)
def truncate_sequences(maxlen, indices, *sequences):
sequences = [s for s in sequences if s]
if not isinstance(indices, (list, tuple)):
indices = [indices] * len(sequences)
while True:
lengths = [len(s) for s in sequences]
if sum(lengths) > maxlen:
i = np.argmax(lengths)
sequences[i].pop(indices[i])
else:
return sequences
def text_segmentate(text, maxlen, seps='\n', strips=None):
text = text.strip().strip(strips)
if seps and len(text) > maxlen:
pieces = text.split(seps[0])
text, texts = '', []
for i, p in enumerate(pieces):
if text and p and len(text) + len(p) > maxlen - 1:
texts.extend(text_segmentate(text, maxlen, seps[1:], strips))
text = ''
if i + 1 == len(pieces):
text = text + p
else:
text = text + p + seps[0]
if text:
texts.extend(text_segmentate(text, maxlen, seps[1:], strips))
return texts
else:
return [text]
def is_one_of(x, ys):
for y in ys:
if x is y:
return True
return False
class DataGenerator(object):
def __init__(self, data, batch_size=32, buffer_size=None):
self.data = data
self.batch_size = batch_size
if hasattr(self.data, '__len__'):
self.steps = len(self.data) // self.batch_size
if len(self.data) % self.batch_size != 0:
self.steps += 1
else:
self.steps = None
self.buffer_size = buffer_size or batch_size * 1000
def __len__(self):
return self.steps
def sample(self, random=False):
if random:
if self.steps is None:
def generator():
caches, isfull = [], False
for d in self.data:
caches.append(d)
if isfull:
i = np.random.randint(len(caches))
yield caches.pop(i)
elif len(caches) == self.buffer_size:
isfull = True
while caches:
i = np.random.randint(len(caches))
yield caches.pop(i)
else:
def generator():
for i in np.random.permutation(len(self.data)):
yield self.data[i]
data = generator()
else:
data = iter(self.data)
d_current = next(data)
for d_next in data:
yield False, d_current
d_current = d_next
yield True, d_current
def __iter__(self, random=False):
raise NotImplementedError
def forfit(self, random=True):
while True:
for d in self.__iter__(random):
yield d
def to_dataset(self, types, shapes, names=None, padded_batch=False):
if names is None:
generator = self.forfit
else:
if is_string(names):
warps = lambda k, v: {k: v}
elif is_string(names[0]):
warps = lambda k, v: dict(zip(k, v))
else:
warps = lambda k, v: tuple(
dict(zip(i, j)) for i, j in zip(k, v)
)
def generator():
for d in self.forfit():
yield warps(names, d)
types = warps(names, types)
shapes = warps(names, shapes)
if padded_batch:
dataset = tf.data.Dataset.from_generator(
generator, output_types=types
)
dataset = dataset.padded_batch(self.batch_size, shapes)
else:
dataset = tf.data.Dataset.from_generator(
generator, output_types=types, output_shapes=shapes
)
dataset = dataset.batch(self.batch_size)
return dataset
class ViterbiDecoder(object):
def __init__(self, trans, starts=None, ends=None):
self.trans = trans
self.num_labels = len(trans)
self.non_starts = []
self.non_ends = []
if starts is not None:
for i in range(self.num_labels):
if i not in starts:
self.non_starts.append(i)
if ends is not None:
for i in range(self.num_labels):
if i not in ends:
self.non_ends.append(i)
def decode(self, nodes):
nodes[0, self.non_starts] -= np.inf
nodes[-1, self.non_ends] -= np.inf
labels = np.arange(self.num_labels).reshape((1, -1))
scores = nodes[0].reshape((-1, 1))
paths = labels
for l in range(1, len(nodes)):
M = scores + self.trans + nodes[l].reshape((1, -1))
idxs = M.argmax(0)
scores = M.max(0).reshape((-1, 1))
paths = np.concatenate([paths[:, idxs], labels], 0)
return paths[:, scores[:, 0].argmax()]
def softmax(x, axis=-1):
x = x - x.max(axis=axis, keepdims=True)
x = np.exp(x)
return x / x.sum(axis=axis, keepdims=True)
class AutoRegressiveDecoder(object):
def __init__(self, start_id, end_id, maxlen, minlen=1):
self.start_id = start_id
self.end_id = end_id
self.maxlen = maxlen
self.minlen = minlen
self.models = {}
if start_id is None:
self.first_output_ids = np.empty((1, 0), dtype=int)
else:
self.first_output_ids = np.array([[self.start_id]])
@staticmethod
def wraps(default_rtype='probas', use_states=False):
def actual_decorator(predict):
def new_predict(
self,
inputs,
output_ids,
states,
temperature=1,
rtype=default_rtype
):
assert rtype in ['probas', 'logits']
prediction = predict(self, inputs, output_ids, states)
if not use_states:
prediction = (prediction, None)
if default_rtype == 'logits':
prediction = (
softmax(prediction[0] / temperature), prediction[1]
)
elif temperature != 1:
probas = np.power(prediction[0], 1.0 / temperature)
probas = probas / probas.sum(axis=-1, keepdims=True)
prediction = (probas, prediction[1])
if rtype == 'probas':
return prediction
else:
return np.log(prediction[0] + 1e-12), prediction[1]
return new_predict
return actual_decorator
def last_token(self, model):
if model not in self.models:
outputs = [
keras.layers.Lambda(lambda x: x[:, -1])(output)
for output in model.outputs
]
self.models[model] = keras.models.Model(model.inputs, outputs)
return self.models[model]
def predict(self, inputs, output_ids, states=None):
raise NotImplementedError
def beam_search(self, inputs, topk, states=None, temperature=1, min_ends=1):
inputs = [np.array([i]) for i in inputs]
output_ids, output_scores = self.first_output_ids, np.zeros(1)
for step in range(self.maxlen):
scores, states = self.predict(
inputs, output_ids, states, temperature, 'logits'
)
if step == 0:
inputs = [np.repeat(i, topk, axis=0) for i in inputs]
scores = output_scores.reshape((-1, 1)) + scores
indices = scores.argpartition(-topk, axis=None)[-topk:]
indices_1 = indices // scores.shape[1]
indices_2 = (indices % scores.shape[1]).reshape((-1, 1))
output_ids = np.concatenate([output_ids[indices_1], indices_2],
1)
output_scores = np.take_along_axis(
scores, indices, axis=None
)
end_counts = (output_ids == self.end_id).sum(1)
if output_ids.shape[1] >= self.minlen:
best_one = output_scores.argmax()
if end_counts[best_one] == min_ends:
return output_ids[best_one]
else:
flag = (end_counts < min_ends)
if not flag.all():
inputs = [i[flag] for i in inputs]
output_ids = output_ids[flag]
output_scores = output_scores[flag]
end_counts = end_counts[flag]
topk = flag.sum()
return output_ids[output_scores.argmax()]
def random_sample(
self,
inputs,
n,
topk=None,
topp=None,
states=None,
temperature=1,
min_ends=1
):
inputs = [np.array([i]) for i in inputs]
output_ids = self.first_output_ids
results = []
for step in range(self.maxlen):
probas, states = self.predict(
inputs, output_ids, states, temperature, 'probas'
)
probas /= probas.sum(axis=1, keepdims=True)
if step == 0:
probas = np.repeat(probas, n, axis=0)
inputs = [np.repeat(i, n, axis=0) for i in inputs]
output_ids = np.repeat(output_ids, n, axis=0)
if topk is not None:
k_indices = probas.argpartition(-topk,
axis=1)[:, -topk:]
probas = np.take_along_axis(probas, k_indices, axis=1)
probas /= probas.sum(axis=1, keepdims=True)
if topp is not None:
p_indices = probas.argsort(axis=1)[:, ::-1]
probas = np.take_along_axis(probas, p_indices, axis=1)
cumsum_probas = np.cumsum(probas, axis=1)
flag = np.roll(cumsum_probas >= topp, 1, axis=1)
flag[:, 0] = False
probas[flag] = 0
probas /= probas.sum(axis=1, keepdims=True)
sample_func = lambda p: np.random.choice(len(p), p=p)
sample_ids = np.apply_along_axis(sample_func, 1, probas)
sample_ids = sample_ids.reshape((-1, 1))
if topp is not None:
sample_ids = np.take_along_axis(
p_indices, sample_ids, axis=1
)
if topk is not None:
sample_ids = np.take_along_axis(
k_indices, sample_ids, axis=1
)
output_ids = np.concatenate([output_ids, sample_ids], 1)
end_counts = (output_ids == self.end_id).sum(1)
if output_ids.shape[1] >= self.minlen:
flag = (end_counts == min_ends)
if flag.any():
for ids in output_ids[flag]:
results.append(ids)
flag = (flag == False)
inputs = [i[flag] for i in inputs]
output_ids = output_ids[flag]
end_counts = end_counts[flag]
if len(output_ids) == 0:
break
for ids in output_ids:
results.append(ids)
return results
def insert_arguments(**arguments):
def actual_decorator(func):
def new_func(self, *args, **kwargs):
for k, v in arguments.items():
if k in kwargs:
v = kwargs.pop(k)
setattr(self, k, v)
return func(self, *args, **kwargs)
return new_func
return actual_decorator
def delete_arguments(*arguments):
def actual_decorator(func):
def new_func(self, *args, **kwargs):
for k in arguments:
if k in kwargs:
raise TypeError(
'%s got an unexpected keyword argument \'%s\'' %
(self.__class__.__name__, k)
)
return func(self, *args, **kwargs)
return new_func
return actual_decorator
def longest_common_substring(source, target):
c, l, span = defaultdict(int), 0, (0, 0, 0, 0)
for i, si in enumerate(source, 1):
for j, tj in enumerate(target, 1):
if si == tj:
c[i, j] = c[i - 1, j - 1] + 1
if c[i, j] > l:
l = c[i, j]
span = (i - l, i, j - l, j)
return l, span
def longest_common_subsequence(source, target):
c = defaultdict(int)
for i, si in enumerate(source, 1):
for j, tj in enumerate(target, 1):
if si == tj:
c[i, j] = c[i - 1, j - 1] + 1
elif c[i, j - 1] > c[i - 1, j]:
c[i, j] = c[i, j - 1]
else:
c[i, j] = c[i - 1, j]
l, mapping = c[len(source), len(target)], []
i, j = len(source) - 1, len(target) - 1
while len(mapping) < l:
if source[i] == target[j]:
mapping.append((i, j))
i, j = i - 1, j - 1
elif c[i + 1, j] > c[i, j + 1]:
j = j - 1
else:
i = i - 1
return l, mapping[::-1]
class WebServing(object):
def __init__(self, host='0.0.0.0', port=8000, server='paste'):
import bottle
self.host = host
self.port = port
self.server = server
self.graph = tf.get_default_graph()
self.sess = K.get_session()
self.set_session = K.set_session
self.bottle = bottle
def wraps(self, func, arguments, method='GET'):
def new_func():
outputs = {'code': 0, 'desc': u'succeeded', 'data': {}}
kwargs = {}
for key, value in arguments.items():
if method == 'GET':
result = self.bottle.request.GET.getunicode(key)
else:
result = self.bottle.request.POST.getunicode(key)
if result is None:
if value[1]:
outputs['code'] = 1
outputs['desc'] = 'lack of "%s" argument' % key
return json.dumps(outputs, ensure_ascii=False)
else:
if value[0] is not None:
result = value[0](result)
kwargs[key] = result
try:
with self.graph.as_default():
self.set_session(self.sess)
outputs['data'] = func(**kwargs)
except Exception as e:
outputs['code'] = 2
outputs['desc'] = str(e)
return json.dumps(outputs, ensure_ascii=False)
return new_func
def route(self, path, func, arguments, method='GET'):
func = self.wraps(func, arguments, method)
self.bottle.route(path, method=method)(func)
def start(self):
self.bottle.run(host=self.host, port=self.port, server=self.server)
class Hook:
def __init__(self, module):
self.module = module
def __getattr__(self, attr):
if attr == 'uniout':
if is_py2:
import uniout
else:
return getattr(self.module, attr)
Hook.__name__ = __name__
sys.modules[__name__] = Hook(sys.modules[__name__])
del Hook
| true | true |
f7305f172f99aa4ee10baf90adde0cdb5a91636b | 432 | py | Python | altair/examples/__init__.py | jakevdp/altair2 | 46d391034c5b72867c9e4d01f3a7c7c536533add | [
"BSD-3-Clause"
] | 2 | 2018-02-03T05:35:52.000Z | 2018-02-05T21:00:18.000Z | altair/examples/__init__.py | jakevdp/altair2 | 46d391034c5b72867c9e4d01f3a7c7c536533add | [
"BSD-3-Clause"
] | null | null | null | altair/examples/__init__.py | jakevdp/altair2 | 46d391034c5b72867c9e4d01f3a7c7c536533add | [
"BSD-3-Clause"
] | null | null | null | import os
import json
def iter_example_names():
specdir = os.path.join(os.path.dirname(__file__), 'spec')
for spec in sorted(os.listdir(specdir)):
yield spec
def load_example(name):
filename = os.path.join(os.path.dirname(__file__), 'spec', name)
with open(filename, 'r') as f:
return json.load(f)
def iter_example_json():
for name in iter_example_names():
yield load_example(name)
| 21.6 | 68 | 0.671296 | import os
import json
def iter_example_names():
specdir = os.path.join(os.path.dirname(__file__), 'spec')
for spec in sorted(os.listdir(specdir)):
yield spec
def load_example(name):
filename = os.path.join(os.path.dirname(__file__), 'spec', name)
with open(filename, 'r') as f:
return json.load(f)
def iter_example_json():
for name in iter_example_names():
yield load_example(name)
| true | true |
f7305ff6dcb783b887e7c18ac02bfe8c87cfeacf | 112 | py | Python | vl/wsgi.py | verifid/vl | 39fb3056658fbc2360eb3d8bfcd74bdcfd12cc67 | [
"MIT"
] | 3 | 2019-06-30T21:09:05.000Z | 2021-05-09T17:56:19.000Z | vl/wsgi.py | verifid/vl | 39fb3056658fbc2360eb3d8bfcd74bdcfd12cc67 | [
"MIT"
] | null | null | null | vl/wsgi.py | verifid/vl | 39fb3056658fbc2360eb3d8bfcd74bdcfd12cc67 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from vl.app import app as application
if __name__ == "__main__":
application.run()
| 16 | 37 | 0.705357 |
from vl.app import app as application
if __name__ == "__main__":
application.run()
| true | true |
f73060075d4c066e5e8285761903993ce0cf2935 | 31,110 | py | Python | pymatgen/core/composition.py | rousseab/pymatgen | ecfba4a576a21f31c222be8fd20ce2ddaa77495a | [
"MIT"
] | 1 | 2015-05-18T14:31:20.000Z | 2015-05-18T14:31:20.000Z | pymatgen/core/composition.py | rousseab/pymatgen | ecfba4a576a21f31c222be8fd20ce2ddaa77495a | [
"MIT"
] | null | null | null | pymatgen/core/composition.py | rousseab/pymatgen | ecfba4a576a21f31c222be8fd20ce2ddaa77495a | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import division, unicode_literals
"""
This module implements a Composition class to represent compositions,
and a ChemicalPotential class to represent potentials.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Nov 10, 2012"
import collections
import numbers
import re
import string
import six
from six.moves import filter, map, zip
from fractions import Fraction
from functools import total_ordering
from monty.fractions import gcd
from pymatgen.core.periodic_table import get_el_sp, Element
from pymatgen.util.string_utils import formula_double_format
from pymatgen.serializers.json_coders import PMGSONable
from pymatgen.core.units import unitized
@total_ordering
class Composition(collections.Mapping, collections.Hashable, PMGSONable):
"""
Represents a Composition, which is essentially a {element:amount} mapping
type. Composition is written to be immutable and hashable,
unlike a standard Python dict.
Note that the key can be either an Element or a Specie. Elements and Specie
are treated differently. i.e., a Fe2+ is not the same as a Fe3+ Specie and
would be put in separate keys. This differentiation is deliberate to
support using Composition to determine the fraction of a particular Specie.
Works almost completely like a standard python dictionary, except that
__getitem__ is overridden to return 0 when an element is not found.
(somewhat like a defaultdict, except it is immutable).
Also adds more convenience methods relevant to compositions, e.g.,
get_fraction.
It should also be noted that many Composition related functionality takes
in a standard string as a convenient input. For example,
even though the internal representation of a Fe2O3 composition is
{Element("Fe"): 2, Element("O"): 3}, you can obtain the amount of Fe
simply by comp["Fe"] instead of the more verbose comp[Element("Fe")].
>>> comp = Composition("LiFePO4")
>>> comp.get_atomic_fraction(Element("Li"))
0.14285714285714285
>>> comp.num_atoms
7.0
>>> comp.reduced_formula
'LiFePO4'
>>> comp.formula
'Li1 Fe1 P1 O4'
>>> comp.get_wt_fraction(Element("Li"))
0.04399794666951898
>>> comp.num_atoms
7.0
"""
"""
Tolerance in distinguishing different composition amounts.
1e-8 is fairly tight, but should cut out most floating point arithmetic
errors.
"""
amount_tolerance = 1e-8
"""
Special formula handling for peroxides and certain elements. This is so
that formula output does not write LiO instead of Li2O2 for example.
"""
special_formulas = {"LiO": "Li2O2", "NaO": "Na2O2", "KO": "K2O2",
"HO": "H2O2", "CsO": "Cs2O2", "RbO": "Rb2O2",
"O": "O2", "N": "N2", "F": "F2", "Cl": "Cl2",
"H": "H2"}
def __init__(self, *args, **kwargs): #allow_negative=False
"""
Very flexible Composition construction, similar to the built-in Python
dict(). Also extended to allow simple string init.
Args:
Any form supported by the Python built-in dict() function.
1. A dict of either {Element/Specie: amount},
{string symbol:amount}, or {atomic number:amount} or any mixture
of these. E.g., {Element("Li"):2 ,Element("O"):1},
{"Li":2, "O":1}, {3:2, 8:1} all result in a Li2O composition.
2. Keyword arg initialization, similar to a dict, e.g.,
Composition(Li = 2, O = 1)
In addition, the Composition constructor also allows a single
string as an input formula. E.g., Composition("Li2O").
allow_negative: Whether to allow negative compositions. This
argument must be popped from the \*\*kwargs due to \*args
ambiguity.
"""
self.allow_negative = kwargs.pop('allow_negative', False)
# it's much faster to recognize a composition and use the elmap than
# to pass the composition to dict()
if len(args) == 1 and isinstance(args[0], Composition):
elmap = args[0]._elmap
elif len(args) == 1 and isinstance(args[0], six.string_types):
elmap = self._parse_formula(args[0])
else:
elmap = dict(*args, **kwargs)
self._elmap = {}
self._natoms = 0
for k, v in elmap.items():
if v < -Composition.amount_tolerance and not self.allow_negative:
raise CompositionError("Amounts in Composition cannot be "
"negative!")
if abs(v) >= Composition.amount_tolerance:
self._elmap[get_el_sp(k)] = v
self._natoms += abs(v)
def __getitem__(self, el):
"""
Get the amount for element.
"""
return self._elmap.get(get_el_sp(el), 0)
def __eq__(self, other):
# elements with amounts < Composition.amount_tolerance don't show up
# in the elmap, so checking len enables us to only check one
# compositions elements
if len(self) != len(other):
return False
for el, v in self._elmap.items():
if abs(v - other[el]) > Composition.amount_tolerance:
return False
return True
def __ge__(self, other):
"""
Defines >= for Compositions. Should ONLY be used for defining a sort
order (the behavior is probably not what you'd expect)
"""
for el in sorted(set(self.elements + other.elements)):
if other[el] - self[el] >= Composition.amount_tolerance:
return False
elif self[el] - other[el] >= Composition.amount_tolerance:
return True
return True
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
"""
Adds two compositions. For example, an Fe2O3 composition + an FeO
composition gives a Fe3O4 composition.
"""
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] += v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __sub__(self, other):
"""
Subtracts two compositions. For example, an Fe2O3 composition - an FeO
composition gives an FeO2 composition.
Raises:
CompositionError if the subtracted composition is greater than the
original composition in any of its elements, unless allow_negative
is True
"""
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] -= v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __mul__(self, other):
"""
Multiply a Composition by an integer or a float.
Fe2O3 * 4 -> Fe8O12
"""
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] * other for el in self},
allow_negative=self.allow_negative)
__rmul__ = __mul__
def __truediv__(self, other):
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] / other for el in self},
allow_negative=self.allow_negative)
__div__ = __truediv__
def __hash__(self):
"""
Minimally effective hash function that just distinguishes between
Compositions with different elements.
"""
hashcode = 0
for el in self._elmap.keys():
hashcode += el.Z
return hashcode
def __contains__(self, el):
return el in self._elmap
def __len__(self):
return len(self._elmap)
def __iter__(self):
return self._elmap.__iter__()
@property
def average_electroneg(self):
return sum((el.X * abs(amt) for el, amt in self._elmap.items())) / \
self.num_atoms
def almost_equals(self, other, rtol=0.1, atol=1e-8):
"""
Returns true if compositions are equal within a tolerance.
Args:
other (Composition): Other composition to check
rtol (float): Relative tolerance
atol (float): Absolute tolerance
"""
sps = set(self.elements + other.elements)
for sp in sps:
a = self[sp]
b = other[sp]
tol = atol + rtol * (abs(a) + abs(b)) / 2
if abs(b - a) > tol:
return False
return True
@property
def is_element(self):
"""
True if composition is for an element.
"""
return len(self._elmap) == 1
def copy(self):
return Composition(self._elmap, allow_negative=self.allow_negative)
@property
def formula(self):
"""
Returns a formula string, with elements sorted by electronegativity,
e.g., Li4 Fe4 P4 O16.
"""
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys(), key=lambda sym: get_el_sp(sym).X)
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def alphabetical_formula(self):
"""
Returns a formula string, with elements sorted by alphabetically
e.g., Fe4 Li4 O16 P4.
"""
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys())
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def element_composition(self):
"""
Returns the composition replacing any species by the corresponding
element.
"""
return Composition(self.get_el_amt_dict(),
allow_negative=self.allow_negative)
@property
def fractional_composition(self):
"""
Returns the normalized composition which the number of species sum to
1.
Returns:
Normalized composition which the number of species sum to 1.
"""
return self / self._natoms
@property
def reduced_composition(self):
"""
Returns the reduced composition,i.e. amounts normalized by greatest
common denominator. e.g., Composition("FePO4") for
Composition("Fe4P4O16").
"""
return self.get_reduced_composition_and_factor()[0]
def get_reduced_composition_and_factor(self):
"""
Calculates a reduced composition and factor.
Returns:
A normalized composition and a multiplicative factor, i.e.,
Li4Fe4P4O16 returns (Composition("LiFePO4"), 4).
"""
factor = self.get_reduced_formula_and_factor()[1]
return self / factor, factor
def get_reduced_formula_and_factor(self):
"""
Calculates a reduced formula and factor.
Returns:
A pretty normalized formula and a multiplicative factor, i.e.,
Li4Fe4P4O16 returns (LiFePO4, 4).
"""
all_int = all(x == int(x) for x in self._elmap.values())
if not all_int:
return self.formula.replace(" ", ""), 1
d = self.get_el_amt_dict()
(formula, factor) = reduce_formula(d)
if formula in Composition.special_formulas:
formula = Composition.special_formulas[formula]
factor /= 2
return formula, factor
def get_integer_formula_and_factor(self, max_denominator=10000):
"""
Calculates an integer formula and factor.
Args:
max_denominator (int): all amounts in the el:amt dict are
first converted to a Fraction with this maximum denominator
Returns:
A pretty normalized formula and a multiplicative factor, i.e.,
Li0.5O0.25 returns (Li2O, 0.25). O0.25 returns (O2, 0.125)
"""
mul = gcd(*[Fraction(v).limit_denominator(max_denominator) for v
in self.values()])
d = {k: round(v / mul) for k, v in self.get_el_amt_dict().items()}
(formula, factor) = reduce_formula(d)
if formula in Composition.special_formulas:
formula = Composition.special_formulas[formula]
factor /= 2
return formula, factor * mul
@property
def reduced_formula(self):
"""
Returns a pretty normalized formula, i.e., LiFePO4 instead of
Li4Fe4P4O16.
"""
return self.get_reduced_formula_and_factor()[0]
@property
def elements(self):
"""
Returns view of elements in Composition.
"""
return list(self._elmap.keys())
def __str__(self):
return " ".join([
"{}{}".format(k, formula_double_format(v, ignore_ones=False))
for k, v in self.as_dict().items()])
@property
def num_atoms(self):
"""
Total number of atoms in Composition. For negative amounts, sum
of absolute values
"""
return self._natoms
@property
@unitized("amu")
def weight(self):
"""
Total molecular weight of Composition
"""
return sum([amount * el.atomic_mass
for el, amount in self._elmap.items()])
def get_atomic_fraction(self, el):
"""
Calculate atomic fraction of an Element or Specie.
Args:
el (Element/Specie): Element or Specie to get fraction for.
Returns:
Atomic fraction for element el in Composition
"""
return abs(self[el]) / self._natoms
def get_wt_fraction(self, el):
"""
Calculate weight fraction of an Element or Specie.
Args:
el (Element/Specie): Element or Specie to get fraction for.
Returns:
Weight fraction for element el in Composition
"""
return get_el_sp(el).atomic_mass * abs(self[el]) / self.weight
def _parse_formula(self, formula):
"""
Args:
formula (str): A string formula, e.g. Fe2O3, Li3Fe2(PO4)3
Returns:
Composition with that formula.
"""
def get_sym_dict(f, factor):
sym_dict = collections.defaultdict(float)
for m in re.finditer(r"([A-Z][a-z]*)([-*\.\d]*)", f):
el = m.group(1)
amt = 1
if m.group(2).strip() != "":
amt = float(m.group(2))
sym_dict[el] += amt * factor
f = f.replace(m.group(), "", 1)
if f.strip():
raise CompositionError("{} is an invalid formula!".format(f))
return sym_dict
m = re.search(r"\(([^\(\)]+)\)([\.\d]*)", formula)
if m:
factor = 1
if m.group(2) != "":
factor = float(m.group(2))
unit_sym_dict = get_sym_dict(m.group(1), factor)
expanded_sym = "".join(["{}{}".format(el, amt)
for el, amt in unit_sym_dict.items()])
expanded_formula = formula.replace(m.group(), expanded_sym)
return self._parse_formula(expanded_formula)
return get_sym_dict(formula, 1)
@property
def anonymized_formula(self):
"""
An anonymized formula. Unique species are arranged in ordering of
increasing amounts and assigned ascending alphabets. Useful for
prototyping formulas. For example, all stoichiometric perovskites have
anonymized_formula ABC3.
"""
reduced = self.element_composition
if all(x == int(x) for x in self._elmap.values()):
reduced /= gcd(*self._elmap.values())
anon = ""
for e, amt in zip(string.ascii_uppercase, sorted(reduced.values())):
if amt == 1:
amt_str = ""
elif abs(amt % 1) < 1e-8:
amt_str = str(int(amt))
else:
amt_str = str(amt)
anon += ("{}{}".format(e, amt_str))
return anon
def __repr__(self):
return "Comp: " + self.formula
@classmethod
def from_dict(cls, d):
"""
Creates a composition from a dict generated by as_dict(). Strictly not
necessary given that the standard constructor already takes in such an
input, but this method preserves the standard pymatgen API of having
from_dict methods to reconstitute objects generated by as_dict(). Allows
for easier introspection.
Args:
d (dict): {symbol: amount} dict.
"""
return cls(d)
def get_el_amt_dict(self):
"""
Returns:
Dict with element symbol and (unreduced) amount e.g.,
{"Fe": 4.0, "O":6.0} or {"Fe3+": 4.0, "O2-":6.0}
"""
d = collections.defaultdict(float)
for e, a in self.items():
d[e.symbol] += a
return d
def as_dict(self):
"""
Returns:
dict with species symbol and (unreduced) amount e.g.,
{"Fe": 4.0, "O":6.0} or {"Fe3+": 4.0, "O2-":6.0}
"""
d = collections.defaultdict(float)
for e, a in self.items():
d[str(e)] += a
return d
@property
def to_reduced_dict(self):
"""
Returns:
Dict with element symbol and reduced amount e.g.,
{"Fe": 2.0, "O":3.0}
"""
c = Composition(self.reduced_formula)
return c.as_dict()
@property
def to_data_dict(self):
"""
Returns:
A dict with many keys and values relating to Composition/Formula,
including reduced_cell_composition, unit_cell_composition,
reduced_cell_formula, elements and nelements.
"""
return {"reduced_cell_composition": self.to_reduced_dict,
"unit_cell_composition": self.as_dict(),
"reduced_cell_formula": self.reduced_formula,
"elements": self.as_dict().keys(),
"nelements": len(self.as_dict().keys())}
@staticmethod
def ranked_compositions_from_indeterminate_formula(fuzzy_formula,
lock_if_strict=True):
"""
Takes in a formula where capitilization might not be correctly entered,
and suggests a ranked list of potential Composition matches.
Author: Anubhav Jain
Args:
fuzzy_formula (str): A formula string, such as "co2o3" or "MN",
that may or may not have multiple interpretations
lock_if_strict (bool): If true, a properly entered formula will
only return the one correct interpretation. For example,
"Co1" will only return "Co1" if true, but will return both
"Co1" and "C1 O1" if false.
Returns:
A ranked list of potential Composition matches
"""
#if we have an exact match and the user specifies lock_if_strict, just
#return the exact match!
if lock_if_strict:
#the strict composition parsing might throw an error, we can ignore
#it and just get on with fuzzy matching
try:
comp = Composition(fuzzy_formula)
return [comp]
except (CompositionError, ValueError):
pass
all_matches = Composition._comps_from_fuzzy_formula(fuzzy_formula)
#remove duplicates
all_matches = list(set(all_matches))
#sort matches by rank descending
all_matches = sorted(all_matches,
key=lambda match: match[1], reverse=True)
all_matches = [m[0] for m in all_matches]
return all_matches
@staticmethod
def _comps_from_fuzzy_formula(fuzzy_formula, m_dict={}, m_points=0,
factor=1):
"""
A recursive helper method for formula parsing that helps in
interpreting and ranking indeterminate formulas.
Author: Anubhav Jain
Args:
fuzzy_formula (str): A formula string, such as "co2o3" or "MN",
that may or may not have multiple interpretations.
m_dict (dict): A symbol:amt dictionary from the previously parsed
formula.
m_points: Number of points gained from the previously parsed
formula.
factor: Coefficient for this parse, e.g. (PO4)2 will feed in PO4
as the fuzzy_formula with a coefficient of 2.
Returns:
A list of tuples, with the first element being a Composition and
the second element being the number of points awarded that
Composition intepretation.
"""
def _parse_chomp_and_rank(m, f, m_dict, m_points):
"""
A helper method for formula parsing that helps in interpreting and
ranking indeterminate formulas
Author: Anubhav Jain
Args:
m: A regex match, with the first group being the element and
the second group being the amount
f: The formula part containing the match
m_dict: A symbol:amt dictionary from the previously parsed
formula
m_points: Number of points gained from the previously parsed
formula
Returns:
A tuple of (f, m_dict, points) where m_dict now contains data
from the match and the match has been removed (chomped) from
the formula f. The "goodness" of the match determines the
number of points returned for chomping. Returns
(None, None, None) if no element could be found...
"""
points = 0
# Points awarded if the first element of the element is correctly
# specified as a capital
points_first_capital = 100
# Points awarded if the second letter of the element is correctly
# specified as lowercase
points_second_lowercase = 100
#get element and amount from regex match
el = m.group(1)
if len(el) > 2 or len(el) < 1:
raise CompositionError("Invalid element symbol entered!")
amt = float(m.group(2)) if m.group(2).strip() != "" else 1
#convert the element string to proper [uppercase,lowercase] format
#and award points if it is already in that format
char1 = el[0]
char2 = el[1] if len(el) > 1 else ""
if char1 == char1.upper():
points += points_first_capital
if char2 and char2 == char2.lower():
points += points_second_lowercase
el = char1.upper() + char2.lower()
#if it's a valid element, chomp and add to the points
if Element.is_valid_symbol(el):
if el in m_dict:
m_dict[el] += amt * factor
else:
m_dict[el] = amt * factor
return f.replace(m.group(), "", 1), m_dict, m_points + points
#else return None
return None, None, None
fuzzy_formula = fuzzy_formula.strip()
if len(fuzzy_formula) == 0:
#The entire formula has been parsed into m_dict. Return the
#corresponding Composition and number of points
if m_dict:
yield (Composition.from_dict(m_dict), m_points)
else:
#if there is a parenthesis, remove it and match the remaining stuff
#with the appropriate factor
for mp in re.finditer(r"\(([^\(\)]+)\)([\.\d]*)", fuzzy_formula):
mp_points = m_points
mp_form = fuzzy_formula.replace(mp.group(), " ", 1)
mp_dict = dict(m_dict)
mp_factor = 1 if mp.group(2) == "" else float(mp.group(2))
#Match the stuff inside the parenthesis with the appropriate
#factor
for match in \
Composition._comps_from_fuzzy_formula(mp.group(1),
mp_dict,
mp_points,
factor=mp_factor):
only_me = True
# Match the stuff outside the parentheses and return the
# sum.
for match2 in \
Composition._comps_from_fuzzy_formula(mp_form,
mp_dict,
mp_points,
factor=1):
only_me = False
yield (match[0] + match2[0], match[1] + match2[1])
#if the stuff inside the parenthesis is nothing, then just
#return the stuff inside the parentheses
if only_me:
yield match
return
#try to match the single-letter elements
m1 = re.match(r"([A-z])([\.\d]*)", fuzzy_formula)
if m1:
m_points1 = m_points
m_form1 = fuzzy_formula
m_dict1 = dict(m_dict)
(m_form1, m_dict1, m_points1) = \
_parse_chomp_and_rank(m1, m_form1, m_dict1, m_points1)
if m_dict1:
#there was a real match
for match in \
Composition._comps_from_fuzzy_formula(m_form1,
m_dict1,
m_points1,
factor):
yield match
#try to match two-letter elements
m2 = re.match(r"([A-z]{2})([\.\d]*)", fuzzy_formula)
if m2:
m_points2 = m_points
m_form2 = fuzzy_formula
m_dict2 = dict(m_dict)
(m_form2, m_dict2, m_points2) = \
_parse_chomp_and_rank(m2, m_form2, m_dict2, m_points2)
if m_dict2:
#there was a real match
for match in \
Composition._comps_from_fuzzy_formula(m_form2, m_dict2,
m_points2,
factor):
yield match
def reduce_formula(sym_amt):
"""
Helper method to reduce a sym_amt dict to a reduced formula and factor.
Args:
sym_amt (dict): {symbol: amount}.
Returns:
(reduced_formula, factor).
"""
syms = sorted(sym_amt.keys(),
key=lambda s: get_el_sp(s).X)
syms = list(filter(lambda s: abs(sym_amt[s]) >
Composition.amount_tolerance, syms))
num_el = len(syms)
contains_polyanion = (num_el >= 3 and
get_el_sp(syms[num_el - 1]).X
- get_el_sp(syms[num_el - 2]).X < 1.65)
factor = abs(gcd(*sym_amt.values()))
reduced_form = []
n = num_el - 2 if contains_polyanion else num_el
for i in range(0, n):
s = syms[i]
normamt = sym_amt[s] * 1.0 / factor
reduced_form.append(s)
reduced_form.append(formula_double_format(normamt))
if contains_polyanion:
poly_sym_amt = {syms[i]: sym_amt[syms[i]] / factor
for i in range(n, num_el)}
(poly_form, poly_factor) = reduce_formula(poly_sym_amt)
if poly_factor != 1:
reduced_form.append("({}){}".format(poly_form, int(poly_factor)))
else:
reduced_form.append(poly_form)
reduced_form = "".join(reduced_form)
return reduced_form, factor
class CompositionError(Exception):
"""Exception class for composition errors"""
pass
class ChemicalPotential(dict, PMGSONable):
"""
Class to represent set of chemical potentials. Can be:
multiplied/divided by a Number
multiplied by a Composition (returns an energy)
added/subtracted with other ChemicalPotentials.
"""
def __init__(self, *args, **kwargs):
"""
Args:
*args, **kwargs: any valid dict init arguments
"""
d = dict(*args, **kwargs)
super(ChemicalPotential, self).__init__((get_el_sp(k), v)
for k, v in d.items())
if len(d) != len(self):
raise ValueError("Duplicate potential specified")
def __mul__(self, other):
if isinstance(other, numbers.Number):
return ChemicalPotential({k: v * other for k, v in self.items()})
else:
return NotImplemented
__rmul__ = __mul__
def __truediv__(self, other):
if isinstance(other, numbers.Number):
return ChemicalPotential({k: v / other for k, v in self.items()})
else:
return NotImplemented
__div__ = __truediv__
def __sub__(self, other):
if isinstance(other, ChemicalPotential):
els = set(self.keys()).union(other.keys())
return ChemicalPotential({e: self.get(e, 0) - other.get(e, 0)
for e in els})
else:
return NotImplemented
def __add__(self, other):
if isinstance(other, ChemicalPotential):
els = set(self.keys()).union(other.keys())
return ChemicalPotential({e: self.get(e, 0) + other.get(e, 0)
for e in els})
else:
return NotImplemented
def get_energy(self, composition, strict=True):
"""
Calculates the energy of a composition
Args:
composition (Composition): input composition
strict (bool): Whether all potentials must be specified
"""
if strict and set(composition.keys()) > set(self.keys()):
s = set(composition.keys()) - set(self.keys())
raise ValueError("Potentials not specified for {}".format(s))
return sum(self.get(k, 0) * v for k, v in composition.items())
def __repr__(self):
return "ChemPots: " + super(ChemicalPotential, self).__repr__()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36.006944 | 80 | 0.564899 |
from __future__ import division, unicode_literals
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Nov 10, 2012"
import collections
import numbers
import re
import string
import six
from six.moves import filter, map, zip
from fractions import Fraction
from functools import total_ordering
from monty.fractions import gcd
from pymatgen.core.periodic_table import get_el_sp, Element
from pymatgen.util.string_utils import formula_double_format
from pymatgen.serializers.json_coders import PMGSONable
from pymatgen.core.units import unitized
@total_ordering
class Composition(collections.Mapping, collections.Hashable, PMGSONable):
amount_tolerance = 1e-8
special_formulas = {"LiO": "Li2O2", "NaO": "Na2O2", "KO": "K2O2",
"HO": "H2O2", "CsO": "Cs2O2", "RbO": "Rb2O2",
"O": "O2", "N": "N2", "F": "F2", "Cl": "Cl2",
"H": "H2"}
def __init__(self, *args, **kwargs):
self.allow_negative = kwargs.pop('allow_negative', False)
# to pass the composition to dict()
if len(args) == 1 and isinstance(args[0], Composition):
elmap = args[0]._elmap
elif len(args) == 1 and isinstance(args[0], six.string_types):
elmap = self._parse_formula(args[0])
else:
elmap = dict(*args, **kwargs)
self._elmap = {}
self._natoms = 0
for k, v in elmap.items():
if v < -Composition.amount_tolerance and not self.allow_negative:
raise CompositionError("Amounts in Composition cannot be "
"negative!")
if abs(v) >= Composition.amount_tolerance:
self._elmap[get_el_sp(k)] = v
self._natoms += abs(v)
def __getitem__(self, el):
return self._elmap.get(get_el_sp(el), 0)
def __eq__(self, other):
# elements with amounts < Composition.amount_tolerance don't show up
if len(self) != len(other):
return False
for el, v in self._elmap.items():
if abs(v - other[el]) > Composition.amount_tolerance:
return False
return True
def __ge__(self, other):
for el in sorted(set(self.elements + other.elements)):
if other[el] - self[el] >= Composition.amount_tolerance:
return False
elif self[el] - other[el] >= Composition.amount_tolerance:
return True
return True
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] += v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __sub__(self, other):
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] -= v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __mul__(self, other):
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] * other for el in self},
allow_negative=self.allow_negative)
__rmul__ = __mul__
def __truediv__(self, other):
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] / other for el in self},
allow_negative=self.allow_negative)
__div__ = __truediv__
def __hash__(self):
hashcode = 0
for el in self._elmap.keys():
hashcode += el.Z
return hashcode
def __contains__(self, el):
return el in self._elmap
def __len__(self):
return len(self._elmap)
def __iter__(self):
return self._elmap.__iter__()
@property
def average_electroneg(self):
return sum((el.X * abs(amt) for el, amt in self._elmap.items())) / \
self.num_atoms
def almost_equals(self, other, rtol=0.1, atol=1e-8):
sps = set(self.elements + other.elements)
for sp in sps:
a = self[sp]
b = other[sp]
tol = atol + rtol * (abs(a) + abs(b)) / 2
if abs(b - a) > tol:
return False
return True
@property
def is_element(self):
return len(self._elmap) == 1
def copy(self):
return Composition(self._elmap, allow_negative=self.allow_negative)
@property
def formula(self):
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys(), key=lambda sym: get_el_sp(sym).X)
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def alphabetical_formula(self):
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys())
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def element_composition(self):
return Composition(self.get_el_amt_dict(),
allow_negative=self.allow_negative)
@property
def fractional_composition(self):
return self / self._natoms
@property
def reduced_composition(self):
return self.get_reduced_composition_and_factor()[0]
def get_reduced_composition_and_factor(self):
factor = self.get_reduced_formula_and_factor()[1]
return self / factor, factor
def get_reduced_formula_and_factor(self):
all_int = all(x == int(x) for x in self._elmap.values())
if not all_int:
return self.formula.replace(" ", ""), 1
d = self.get_el_amt_dict()
(formula, factor) = reduce_formula(d)
if formula in Composition.special_formulas:
formula = Composition.special_formulas[formula]
factor /= 2
return formula, factor
def get_integer_formula_and_factor(self, max_denominator=10000):
mul = gcd(*[Fraction(v).limit_denominator(max_denominator) for v
in self.values()])
d = {k: round(v / mul) for k, v in self.get_el_amt_dict().items()}
(formula, factor) = reduce_formula(d)
if formula in Composition.special_formulas:
formula = Composition.special_formulas[formula]
factor /= 2
return formula, factor * mul
@property
def reduced_formula(self):
return self.get_reduced_formula_and_factor()[0]
@property
def elements(self):
return list(self._elmap.keys())
def __str__(self):
return " ".join([
"{}{}".format(k, formula_double_format(v, ignore_ones=False))
for k, v in self.as_dict().items()])
@property
def num_atoms(self):
return self._natoms
@property
@unitized("amu")
def weight(self):
return sum([amount * el.atomic_mass
for el, amount in self._elmap.items()])
def get_atomic_fraction(self, el):
return abs(self[el]) / self._natoms
def get_wt_fraction(self, el):
return get_el_sp(el).atomic_mass * abs(self[el]) / self.weight
def _parse_formula(self, formula):
def get_sym_dict(f, factor):
sym_dict = collections.defaultdict(float)
for m in re.finditer(r"([A-Z][a-z]*)([-*\.\d]*)", f):
el = m.group(1)
amt = 1
if m.group(2).strip() != "":
amt = float(m.group(2))
sym_dict[el] += amt * factor
f = f.replace(m.group(), "", 1)
if f.strip():
raise CompositionError("{} is an invalid formula!".format(f))
return sym_dict
m = re.search(r"\(([^\(\)]+)\)([\.\d]*)", formula)
if m:
factor = 1
if m.group(2) != "":
factor = float(m.group(2))
unit_sym_dict = get_sym_dict(m.group(1), factor)
expanded_sym = "".join(["{}{}".format(el, amt)
for el, amt in unit_sym_dict.items()])
expanded_formula = formula.replace(m.group(), expanded_sym)
return self._parse_formula(expanded_formula)
return get_sym_dict(formula, 1)
@property
def anonymized_formula(self):
reduced = self.element_composition
if all(x == int(x) for x in self._elmap.values()):
reduced /= gcd(*self._elmap.values())
anon = ""
for e, amt in zip(string.ascii_uppercase, sorted(reduced.values())):
if amt == 1:
amt_str = ""
elif abs(amt % 1) < 1e-8:
amt_str = str(int(amt))
else:
amt_str = str(amt)
anon += ("{}{}".format(e, amt_str))
return anon
def __repr__(self):
return "Comp: " + self.formula
@classmethod
def from_dict(cls, d):
return cls(d)
def get_el_amt_dict(self):
d = collections.defaultdict(float)
for e, a in self.items():
d[e.symbol] += a
return d
def as_dict(self):
d = collections.defaultdict(float)
for e, a in self.items():
d[str(e)] += a
return d
@property
def to_reduced_dict(self):
c = Composition(self.reduced_formula)
return c.as_dict()
@property
def to_data_dict(self):
return {"reduced_cell_composition": self.to_reduced_dict,
"unit_cell_composition": self.as_dict(),
"reduced_cell_formula": self.reduced_formula,
"elements": self.as_dict().keys(),
"nelements": len(self.as_dict().keys())}
@staticmethod
def ranked_compositions_from_indeterminate_formula(fuzzy_formula,
lock_if_strict=True):
if lock_if_strict:
try:
comp = Composition(fuzzy_formula)
return [comp]
except (CompositionError, ValueError):
pass
all_matches = Composition._comps_from_fuzzy_formula(fuzzy_formula)
all_matches = list(set(all_matches))
all_matches = sorted(all_matches,
key=lambda match: match[1], reverse=True)
all_matches = [m[0] for m in all_matches]
return all_matches
@staticmethod
def _comps_from_fuzzy_formula(fuzzy_formula, m_dict={}, m_points=0,
factor=1):
def _parse_chomp_and_rank(m, f, m_dict, m_points):
points = 0
points_first_capital = 100
points_second_lowercase = 100
el = m.group(1)
if len(el) > 2 or len(el) < 1:
raise CompositionError("Invalid element symbol entered!")
amt = float(m.group(2)) if m.group(2).strip() != "" else 1
char1 = el[0]
char2 = el[1] if len(el) > 1 else ""
if char1 == char1.upper():
points += points_first_capital
if char2 and char2 == char2.lower():
points += points_second_lowercase
el = char1.upper() + char2.lower()
if Element.is_valid_symbol(el):
if el in m_dict:
m_dict[el] += amt * factor
else:
m_dict[el] = amt * factor
return f.replace(m.group(), "", 1), m_dict, m_points + points
#else return None
return None, None, None
fuzzy_formula = fuzzy_formula.strip()
if len(fuzzy_formula) == 0:
#The entire formula has been parsed into m_dict. Return the
#corresponding Composition and number of points
if m_dict:
yield (Composition.from_dict(m_dict), m_points)
else:
#if there is a parenthesis, remove it and match the remaining stuff
#with the appropriate factor
for mp in re.finditer(r"\(([^\(\)]+)\)([\.\d]*)", fuzzy_formula):
mp_points = m_points
mp_form = fuzzy_formula.replace(mp.group(), " ", 1)
mp_dict = dict(m_dict)
mp_factor = 1 if mp.group(2) == "" else float(mp.group(2))
#Match the stuff inside the parenthesis with the appropriate
#factor
for match in \
Composition._comps_from_fuzzy_formula(mp.group(1),
mp_dict,
mp_points,
factor=mp_factor):
only_me = True
# Match the stuff outside the parentheses and return the
# sum.
for match2 in \
Composition._comps_from_fuzzy_formula(mp_form,
mp_dict,
mp_points,
factor=1):
only_me = False
yield (match[0] + match2[0], match[1] + match2[1])
#if the stuff inside the parenthesis is nothing, then just
#return the stuff inside the parentheses
if only_me:
yield match
return
#try to match the single-letter elements
m1 = re.match(r"([A-z])([\.\d]*)", fuzzy_formula)
if m1:
m_points1 = m_points
m_form1 = fuzzy_formula
m_dict1 = dict(m_dict)
(m_form1, m_dict1, m_points1) = \
_parse_chomp_and_rank(m1, m_form1, m_dict1, m_points1)
if m_dict1:
#there was a real match
for match in \
Composition._comps_from_fuzzy_formula(m_form1,
m_dict1,
m_points1,
factor):
yield match
#try to match two-letter elements
m2 = re.match(r"([A-z]{2})([\.\d]*)", fuzzy_formula)
if m2:
m_points2 = m_points
m_form2 = fuzzy_formula
m_dict2 = dict(m_dict)
(m_form2, m_dict2, m_points2) = \
_parse_chomp_and_rank(m2, m_form2, m_dict2, m_points2)
if m_dict2:
#there was a real match
for match in \
Composition._comps_from_fuzzy_formula(m_form2, m_dict2,
m_points2,
factor):
yield match
def reduce_formula(sym_amt):
syms = sorted(sym_amt.keys(),
key=lambda s: get_el_sp(s).X)
syms = list(filter(lambda s: abs(sym_amt[s]) >
Composition.amount_tolerance, syms))
num_el = len(syms)
contains_polyanion = (num_el >= 3 and
get_el_sp(syms[num_el - 1]).X
- get_el_sp(syms[num_el - 2]).X < 1.65)
factor = abs(gcd(*sym_amt.values()))
reduced_form = []
n = num_el - 2 if contains_polyanion else num_el
for i in range(0, n):
s = syms[i]
normamt = sym_amt[s] * 1.0 / factor
reduced_form.append(s)
reduced_form.append(formula_double_format(normamt))
if contains_polyanion:
poly_sym_amt = {syms[i]: sym_amt[syms[i]] / factor
for i in range(n, num_el)}
(poly_form, poly_factor) = reduce_formula(poly_sym_amt)
if poly_factor != 1:
reduced_form.append("({}){}".format(poly_form, int(poly_factor)))
else:
reduced_form.append(poly_form)
reduced_form = "".join(reduced_form)
return reduced_form, factor
class CompositionError(Exception):
pass
class ChemicalPotential(dict, PMGSONable):
def __init__(self, *args, **kwargs):
d = dict(*args, **kwargs)
super(ChemicalPotential, self).__init__((get_el_sp(k), v)
for k, v in d.items())
if len(d) != len(self):
raise ValueError("Duplicate potential specified")
def __mul__(self, other):
if isinstance(other, numbers.Number):
return ChemicalPotential({k: v * other for k, v in self.items()})
else:
return NotImplemented
__rmul__ = __mul__
def __truediv__(self, other):
if isinstance(other, numbers.Number):
return ChemicalPotential({k: v / other for k, v in self.items()})
else:
return NotImplemented
__div__ = __truediv__
def __sub__(self, other):
if isinstance(other, ChemicalPotential):
els = set(self.keys()).union(other.keys())
return ChemicalPotential({e: self.get(e, 0) - other.get(e, 0)
for e in els})
else:
return NotImplemented
def __add__(self, other):
if isinstance(other, ChemicalPotential):
els = set(self.keys()).union(other.keys())
return ChemicalPotential({e: self.get(e, 0) + other.get(e, 0)
for e in els})
else:
return NotImplemented
def get_energy(self, composition, strict=True):
if strict and set(composition.keys()) > set(self.keys()):
s = set(composition.keys()) - set(self.keys())
raise ValueError("Potentials not specified for {}".format(s))
return sum(self.get(k, 0) * v for k, v in composition.items())
def __repr__(self):
return "ChemPots: " + super(ChemicalPotential, self).__repr__()
if __name__ == "__main__":
import doctest
doctest.testmod()
| true | true |
f730609d863d7ec81eadcaed88ab2c70b888fad3 | 22,602 | py | Python | GearGenerator_by_UI.py | manguel1980-dev/Gear-Generator | c4f2eea957340201e42a213b458cc1c28d04833b | [
"MIT"
] | null | null | null | GearGenerator_by_UI.py | manguel1980-dev/Gear-Generator | c4f2eea957340201e42a213b458cc1c28d04833b | [
"MIT"
] | null | null | null | GearGenerator_by_UI.py | manguel1980-dev/Gear-Generator | c4f2eea957340201e42a213b458cc1c28d04833b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#-------------------------------------------------------------------------------
# Name: Gear Generator
# Purpose: Just for fun
#
# Author: Manuel Astros
# Email: manuel.astros1980@gmail.com
# Web: https://sites.google.com/view/interpolation/home
#
# Created: 25/06/2021
# Copyright: (c) astros 2021
# Licence: MIT
# Based on: Gear Drawing with Bézier Curves (https://www.arc.id.au/GearDrawing.html)
# -------------------------------------------------------------------------------
#
# Reelases:
# 0.1: First Release
# ______________________________________________________________________________________
import sys
from PyQt5.uic import loadUi
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtGui import QCloseEvent, QFont
from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QHeaderView, \
QCheckBox, QComboBox, QMessageBox, QWidget, QVBoxLayout
from Gear_Mpl_Draw import MplWidget
# --------------------------Mpl Import------------
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from math import radians, degrees, atan, hypot, sin, cos
# import numpy as np
import random
# ---------------Internal modules import--------------
from gear_calc import createGearOutline, createIntGearOutline, displace, rotate
# ----------------------------------------
class mainWindow(QMainWindow):
def __init__(self):
self.ErrInt = True
self.ErrFloat = True
self.ErrPitchDiam = True
self.ErrInternalGear = False
self.ErrDiameter = False
super(mainWindow, self).__init__()
loadUi('Gear_Generator.ui', self)
self.tableWidget.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
check_box = internal(self)
check_box.stateChanged.connect(self._clickCheckBox)
self.tableWidget.setCellWidget(0, 0, check_box)
angle = QtWidgets.QTableWidgetItem(str(20))
angle.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 3, angle)
lista = ['Not Linked'] + [str(i) for i in range(1, self.tableWidget.rowCount())]
mesh = Mesh(self, lista)
self.tableWidget.setCellWidget(0, 6, mesh)
# m: module, m = pitch diameter / teeth number
m = float(self.tableWidget.item(0, 1).text()) / float(self.tableWidget.item(0, 2).text())
m = QtWidgets.QTableWidgetItem(str(m))
m.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 5, m)
Acell = QtWidgets.QTableWidgetItem('0')
Acell.setFlags(QtCore.Qt.ItemIsEnabled)
Acell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 7, Acell)
# ------------------------------------Mpl Widget insertion---------------------------------------
self._gearGraphic()
# self.Graph = CanvasGraph(self.mplWidget)
# self.Graph.setObjectName("Gear-View")
# ---------------------------------------------------------------------------
# ------------Signals-----------------------------------
self.add_gear.clicked.connect(self._addRow)
self.remove_gear.clicked.connect(self._removeRow)
self.generate_gear.clicked.connect(self._gearGraphic)
self.tableWidget.itemChanged.connect(self._cellChange)
self._dataRevision()
# self._cancel.clicked.connect(self._close)
# self.add_gear.clicked.connect(self._addRow)
def _gearGraphic(self):
gear_outline = self._gearCalculation()
# self.mplW = MplWidget(self.mplWidget)
# self.addToolBar(QtCore.Qt.BottomToolBarArea, NavigationToolbar(mplW.canvas, self))
self.Graph = MplWidget(self.mplWidget, gear_outline)
self.Graph.show()
def _clickCheckBox(self):
check_row = self.tableWidget.currentRow()
check = self.tableWidget.cellWidget(check_row, 0).getCheckValue()
print(check)
if check:
self.statusLabel.setText('Row: ' + str(check_row + 1) + ' - ' + 'Draw Internal Gear')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
else:
self.statusLabel.setText('Row: ' + str(check_row + 1) + ' - ' + 'Draw Normal Gear')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
def _comboBoxRevision(self):
combo_row = self.tableWidget.currentRow()
# current_col = self.tableWidget.currentRow()
mesh_row_value_pointed = self.tableWidget.cellWidget(combo_row, 6).currentText()
print('actual cell: ', combo_row)
print('valor apuntado: ', mesh_row_value_pointed)
if mesh_row_value_pointed == 'Not Linked':
Acell = self.tableWidget.item(combo_row, 7).text()
print(Acell)
Acell = QtWidgets.QTableWidgetItem(Acell)
Acell.setFlags(QtCore.Qt.ItemIsEnabled)
Acell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 7, Acell)
Xcell = self.tableWidget.item(combo_row, 8).text()
Xcell = QtWidgets.QTableWidgetItem(Xcell)
# Xcell.setFlags(QtCore.Qt.ItemIsEnabled)
Xcell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 8, Xcell)
Ycell = self.tableWidget.item(combo_row, 9).text()
Ycell = QtWidgets.QTableWidgetItem(Ycell)
# Ycell.setFlags(QtCore.Qt.ItemIsEnabled)
Ycell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 9, Ycell)
self.statusLabel.setText('Row: ' + str(combo_row + 1) + ' - Gear is ' + mesh_row_value_pointed)
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
print(mesh_row_value_pointed)
self.ErrPitchDiam = False
else:
try:
A_pitchDiam = float(self.tableWidget.item(combo_row, 1).text())
except ValueError:
Acell = '0'
Xcell ='0'
Ycell = '0'
self.meshMessage = 'Pith diameter missing in current row (' + str(combo_row + 1) + ')'
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
print('Pith diameter missing in current row (' + str(combo_row + 1) + ')')
self.ErrPitchDiam = True
else:
try:
A_pitchDiam_pointed = float(self.tableWidget.item(int(mesh_row_value_pointed) - 1, 1).text())
Acell = float(self.tableWidget.item(combo_row, 7).text())
Xcell = float(self.tableWidget.item(combo_row, 8).text())
Ycell = float(self.tableWidget.item(combo_row, 9).text())
CCell = self.tableWidget.cellWidget(combo_row, 0).getCheckValue()
print('Este es elcheck value: ', CCell)
Acell_pointed = float(self.tableWidget.item(int(mesh_row_value_pointed) - 1, 7).text())
Xcell_pointed = float(self.tableWidget.item(int(mesh_row_value_pointed) - 1, 8).text())
Ycell_pointed = float(self.tableWidget.item(int(mesh_row_value_pointed) - 1, 9).text())
Cell_pointed = self.tableWidget.cellWidget(int(mesh_row_value_pointed) - 1, 0).getCheckValue()
print('Este es elcheck value apuntado: ', Cell_pointed)
if CCell and Cell_pointed:
self.ErrInternalGear = True
Acell = '0'
Xcell ='0'
Ycell = '0'
self.meshMessage = 'Gears ' + str(mesh_row_value_pointed) + ' and ' + str(combo_row + 1) + ' can not be meshed'
elif Cell_pointed:
if A_pitchDiam_pointed <= A_pitchDiam:
self.ErrDiameter = True
Acell = '0'
Xcell ='0'
Ycell = '0'
self.meshMessage = 'Gears ' + str(mesh_row_value_pointed) + ' must be higher than' + str(combo_row + 1) + ' | Imposible meshed'
else:
pitchDiam_dist = (A_pitchDiam_pointed / 2) - (A_pitchDiam / 2)
Xcell = str(Xcell_pointed - pitchDiam_dist * cos(radians(Acell)))
Ycell = str(Ycell_pointed - pitchDiam_dist * sin(radians(Acell)))
Acell = str(Acell)
self.ErrDiameter = False
elif CCell:
if A_pitchDiam <= A_pitchDiam_pointed:
Acell = '0'
Xcell ='0'
Ycell = '0'
self.ErrDiameter = True
self.meshMessage = 'Gears ' + str(combo_row + 1) + ' must be higher than' + str(mesh_row_value_pointed) + ' | Imposible meshed'
else:
pitchDiam_dist = (A_pitchDiam_pointed / 2) - (A_pitchDiam / 2)
Xcell = str(Xcell_pointed - pitchDiam_dist * cos(radians(Acell)))
Ycell = str(Ycell_pointed - pitchDiam_dist * sin(radians(Acell)))
Acell = str(Acell)
self.ErrDiameter = False
else:
pitchDiam_dist = (A_pitchDiam_pointed / 2) + (A_pitchDiam / 2)
Xcell = str(Xcell_pointed + pitchDiam_dist * cos(radians(Acell)))
Ycell = str(Ycell_pointed + pitchDiam_dist * sin(radians(Acell)))
Acell = str(Acell)
self.ErrPitchDiam = False
except:
Acell = '0'
Xcell ='0'
Ycell = '0'
self.meshMessage = 'Pith diameter missing in row (' + str(mesh_row_value_pointed) + ')'
print('Pith diameter missing in row (' + str(mesh_row_value_pointed) + ')')
self.ErrPitchDiam = True
# Acell = self.tableWidget.item(combo_row, 7).text()
Acell = QtWidgets.QTableWidgetItem(Acell)
# Acell.setFlags(QtCore.Qt.ItemIsEnabled)
Acell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 7, Acell)
# Xcell = self.tableWidget.item(combo_row, 8).text()
Xcell = QtWidgets.QTableWidgetItem(Xcell)
Xcell.setFlags(QtCore.Qt.ItemIsEnabled)
Xcell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 8, Xcell)
# Ycell = self.tableWidget.item(combo_row, 9).text()
Ycell = QtWidgets.QTableWidgetItem(Ycell)
Ycell.setFlags(QtCore.Qt.ItemIsEnabled)
Ycell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 9, Ycell)
# todo: corregir funcionamiento de mensajes de error
if self.ErrPitchDiam:
self.statusLabel.setText(self.meshMessage + ' | Row: ' + str(combo_row + 1) + ' - ' + 'meshing with row ' + mesh_row_value_pointed + ' gear')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
print('meshing with ', mesh_row_value_pointed)
elif self.ErrInternalGear:
self.statusLabel.setText(self.meshMessage)
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
print('meshing with ', mesh_row_value_pointed)
elif self.ErrDiameter:
self.statusLabel.setText(self.meshMessage)
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
print('meshing with ', mesh_row_value_pointed)
else:
self.statusLabel.setText('Row: ' + str(combo_row + 1) + ' - ' + 'meshing with row ' + mesh_row_value_pointed + ' gear')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
print('meshing with ', mesh_row_value_pointed)
def _dataRevision(self):
self.ErrInt = True
self.ErrFloat = True
verification = []
row_rev = self.tableWidget.rowCount()
print(row_rev)
for r in range(row_rev):
try:
check_val_rev = self.tableWidget.cellWidget(r, 0).getCheckValue()
teeth_pitch_diam_rev = int(self.tableWidget.item(r, 1).text())
teeth_n_rev = int(self.tableWidget.item(r, 2).text())
pressure_ang_rev = float(self.tableWidget.item(r, 3).text())
s_or_r_radius_rev = float(self.tableWidget.item(r, 4).text()) / 2
module_g_rev = float(self.tableWidget.item(r, 5).text())
mesh_rev = self.tableWidget.cellWidget(r, 6).currentText()
angle_rev = float(self.tableWidget.item(r, 7).text())
x_rev = float(self.tableWidget.item(r, 8).text())
y_rev = float(self.tableWidget.item(r, 9).text())
if mesh_rev != 'Not Linked':
pass
verification.append(True)
except:
verification.append(False)
return verification
def _gearCalculation(self):
# verif = [True, False, True]
verif = self._dataRevision()
gears=[]
location = []
for row_g in range(len(verif)):
gears.append([row_g + 1])
print('intento: ', verif[row_g])
if (verif[row_g]):
teeth_n = int(self.tableWidget.item(row_g, 2).text())
pressure_ang = float(self.tableWidget.item(row_g, 3).text())
s_or_r_radius = float(self.tableWidget.item(row_g, 4).text()) / 2
module_g = float(self.tableWidget.item(row_g, 5).text())
check_val = self.tableWidget.cellWidget(row_g, 0).getCheckValue()
Acell = float(self.tableWidget.item(row_g, 7).text())
Xcell = float(self.tableWidget.item(row_g, 8).text())
Ycell = float(self.tableWidget.item(row_g, 9).text())
if check_val:
outline = createIntGearOutline(module_g, teeth_n, pressure_ang, s_or_r_radius)
else:
outline = createGearOutline(module_g, teeth_n, pressure_ang, s_or_r_radius)
print('outline', outline)
if Xcell != 0 or Ycell != 0:
outline_diplaced = displace(outline, Xcell, Ycell)
outline = outline_diplaced
print('outline displaces:', outline)
location.append([Acell, [Xcell, Ycell]])
gears[row_g].append(outline)
print('True: ', row_g + 1)
else:
gears[row_g].append([False])
location.append([False])
print('False: ', row_g + 1)
# print(gears)
return [location, gears]
def _cellChange(self):
items = self.tableWidget.selectedItems()
col = self.tableWidget.currentColumn()
row = self.tableWidget.currentRow()
print('_cellChange: ', row, col)
enteros = [2]
decimales = [1, 3, 4, 5, 7, 8, 9]
if col in enteros:
try:
cellType = int(items[0].text())
self.ErrInt = True
self.statusLabel.setText('OK: Current cell data is an integer')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
except ValueError:
self.ErrInt = False
self.statusLabel.setText('Error: Value cell most be an integer')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
return self.alertDialog('integer')
elif col in decimales:
try:
cellType = float(items[0].text())
self.ErrFloat = True
self.statusLabel.setText('OK: Current cell data is a float')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
except ValueError:
self.ErrFloat = False
self.statusLabel.setText('Error: Value cell most be an Float')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
return self.alertDialog('Float')
# print(str(items[0].text()))
def alertDialog(self, val):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Information)
message = val + " input is required"
msgBox.setText(message)
msgBox.setWindowTitle("Input Error")
msgBox.setStandardButtons(QMessageBox.Ok)
returnValue = msgBox.exec()
if returnValue == QMessageBox.Ok:
print('OK clicked')
def _addRow(self):
if self.ErrInt or self.ErrFloat:
rowCount = self.tableWidget.rowCount()
self.tableWidget.insertRow(rowCount)
columnCount = self.tableWidget.columnCount()
for col in range(columnCount):
print(col)
if col == 0:
check_box = internal(self)
check_box.stateChanged.connect(self._clickCheckBox)
self.tableWidget.setCellWidget(rowCount, col, check_box)
elif col == 3:
angle = QtWidgets.QTableWidgetItem('20')
angle.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, angle)
elif col == 6:
lista = ['Not Linked'] + [str(i) for i in range(1, self.tableWidget.rowCount())]
mesh = Mesh(self, lista)
self.tableWidget.setCellWidget(rowCount, col, mesh)
mesh.currentIndexChanged.connect(self._comboBoxRevision)
elif col == 7:
Acell = QtWidgets.QTableWidgetItem('0')
Acell.setFlags(QtCore.Qt.ItemIsEnabled)
Acell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, Acell)
elif col == 8:
Xcell = QtWidgets.QTableWidgetItem('0')
# Xcell.setFlags(QtCore.Qt.ItemIsEnabled)
Xcell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, Xcell)
elif col == 9:
Ycell = QtWidgets.QTableWidgetItem('0')
# Ycell.setFlags(QtCore.Qt.ItemIsEnabled)
Ycell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, Ycell)
else:
cellCenter = QtWidgets.QTableWidgetItem()
cellCenter.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, cellCenter)
self.statusLabel.setText('OK: Row just added')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color:rgb(0, 0, 0)")
def _removeRow(self):
if self.tableWidget.rowCount() > 0:
self.tableWidget.removeRow(self.tableWidget.rowCount()-1)
self.statusLabel.setText('OK: Row just deleted')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color:rgb(0, 0, 0)")
# ----------------Events----------------------------------------------
# Properly defined in the future
# def closeEvent(self, event):
# reply = QMessageBox.question(self, 'Window Close', 'Are you sure you want to close the window?',
# QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
#
# if reply == QMessageBox.Yes:
# event.accept()
# # self.action_close_window.triggered.emit(True)
# print('Window closed')
# else:
# event.ignore()
#
# def resizeEvent(self, event):
# print("resize")
# QMainWindow.resizeEvent(self, event)
# ----------------------------------------------------------------------------
class internal(QCheckBox):
def __init__(self, parent):
super().__init__(parent)
self.stateChanged.connect(self.getCheckValue)
def getCheckValue(self):
if self.isChecked() == True:
print('Check Value Active')
return True
elif self.isChecked() == False:
print('Check Value Deactivated')
return False
class Mesh(QComboBox):
def __init__(self, parent, aa):
super().__init__(parent)
self.addItems(aa)
self.currentIndexChanged.connect(self.getComboValue)
def getComboValue(self):
print(self.currentText())
return self.currentText()
app = QApplication(sys.argv)
main_window = mainWindow()
widget = QtWidgets.QStackedWidget()
widget.addWidget(main_window)
# widget.setFixedHeight(300)
# widget.setFixedWidth(1060)
widget.resize(658, 650)
widget.show()
try:
sys.exit(app.exec_())
except:
print('Exiting') | 44.756436 | 160 | 0.543713 |
import sys
from PyQt5.uic import loadUi
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtGui import QCloseEvent, QFont
from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QHeaderView, \
QCheckBox, QComboBox, QMessageBox, QWidget, QVBoxLayout
from Gear_Mpl_Draw import MplWidget
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from math import radians, degrees, atan, hypot, sin, cos
import random
from gear_calc import createGearOutline, createIntGearOutline, displace, rotate
class mainWindow(QMainWindow):
def __init__(self):
self.ErrInt = True
self.ErrFloat = True
self.ErrPitchDiam = True
self.ErrInternalGear = False
self.ErrDiameter = False
super(mainWindow, self).__init__()
loadUi('Gear_Generator.ui', self)
self.tableWidget.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
check_box = internal(self)
check_box.stateChanged.connect(self._clickCheckBox)
self.tableWidget.setCellWidget(0, 0, check_box)
angle = QtWidgets.QTableWidgetItem(str(20))
angle.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 3, angle)
lista = ['Not Linked'] + [str(i) for i in range(1, self.tableWidget.rowCount())]
mesh = Mesh(self, lista)
self.tableWidget.setCellWidget(0, 6, mesh)
m = float(self.tableWidget.item(0, 1).text()) / float(self.tableWidget.item(0, 2).text())
m = QtWidgets.QTableWidgetItem(str(m))
m.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 5, m)
Acell = QtWidgets.QTableWidgetItem('0')
Acell.setFlags(QtCore.Qt.ItemIsEnabled)
Acell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 7, Acell)
self._gearGraphic()
self.add_gear.clicked.connect(self._addRow)
self.remove_gear.clicked.connect(self._removeRow)
self.generate_gear.clicked.connect(self._gearGraphic)
self.tableWidget.itemChanged.connect(self._cellChange)
self._dataRevision()
def _gearGraphic(self):
gear_outline = self._gearCalculation()
self.Graph = MplWidget(self.mplWidget, gear_outline)
self.Graph.show()
def _clickCheckBox(self):
check_row = self.tableWidget.currentRow()
check = self.tableWidget.cellWidget(check_row, 0).getCheckValue()
print(check)
if check:
self.statusLabel.setText('Row: ' + str(check_row + 1) + ' - ' + 'Draw Internal Gear')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
else:
self.statusLabel.setText('Row: ' + str(check_row + 1) + ' - ' + 'Draw Normal Gear')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
def _comboBoxRevision(self):
combo_row = self.tableWidget.currentRow()
mesh_row_value_pointed = self.tableWidget.cellWidget(combo_row, 6).currentText()
print('actual cell: ', combo_row)
print('valor apuntado: ', mesh_row_value_pointed)
if mesh_row_value_pointed == 'Not Linked':
Acell = self.tableWidget.item(combo_row, 7).text()
print(Acell)
Acell = QtWidgets.QTableWidgetItem(Acell)
Acell.setFlags(QtCore.Qt.ItemIsEnabled)
Acell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 7, Acell)
Xcell = self.tableWidget.item(combo_row, 8).text()
Xcell = QtWidgets.QTableWidgetItem(Xcell)
Xcell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 8, Xcell)
Ycell = self.tableWidget.item(combo_row, 9).text()
Ycell = QtWidgets.QTableWidgetItem(Ycell)
Ycell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 9, Ycell)
self.statusLabel.setText('Row: ' + str(combo_row + 1) + ' - Gear is ' + mesh_row_value_pointed)
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
print(mesh_row_value_pointed)
self.ErrPitchDiam = False
else:
try:
A_pitchDiam = float(self.tableWidget.item(combo_row, 1).text())
except ValueError:
Acell = '0'
Xcell ='0'
Ycell = '0'
self.meshMessage = 'Pith diameter missing in current row (' + str(combo_row + 1) + ')'
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
print('Pith diameter missing in current row (' + str(combo_row + 1) + ')')
self.ErrPitchDiam = True
else:
try:
A_pitchDiam_pointed = float(self.tableWidget.item(int(mesh_row_value_pointed) - 1, 1).text())
Acell = float(self.tableWidget.item(combo_row, 7).text())
Xcell = float(self.tableWidget.item(combo_row, 8).text())
Ycell = float(self.tableWidget.item(combo_row, 9).text())
CCell = self.tableWidget.cellWidget(combo_row, 0).getCheckValue()
print('Este es elcheck value: ', CCell)
Acell_pointed = float(self.tableWidget.item(int(mesh_row_value_pointed) - 1, 7).text())
Xcell_pointed = float(self.tableWidget.item(int(mesh_row_value_pointed) - 1, 8).text())
Ycell_pointed = float(self.tableWidget.item(int(mesh_row_value_pointed) - 1, 9).text())
Cell_pointed = self.tableWidget.cellWidget(int(mesh_row_value_pointed) - 1, 0).getCheckValue()
print('Este es elcheck value apuntado: ', Cell_pointed)
if CCell and Cell_pointed:
self.ErrInternalGear = True
Acell = '0'
Xcell ='0'
Ycell = '0'
self.meshMessage = 'Gears ' + str(mesh_row_value_pointed) + ' and ' + str(combo_row + 1) + ' can not be meshed'
elif Cell_pointed:
if A_pitchDiam_pointed <= A_pitchDiam:
self.ErrDiameter = True
Acell = '0'
Xcell ='0'
Ycell = '0'
self.meshMessage = 'Gears ' + str(mesh_row_value_pointed) + ' must be higher than' + str(combo_row + 1) + ' | Imposible meshed'
else:
pitchDiam_dist = (A_pitchDiam_pointed / 2) - (A_pitchDiam / 2)
Xcell = str(Xcell_pointed - pitchDiam_dist * cos(radians(Acell)))
Ycell = str(Ycell_pointed - pitchDiam_dist * sin(radians(Acell)))
Acell = str(Acell)
self.ErrDiameter = False
elif CCell:
if A_pitchDiam <= A_pitchDiam_pointed:
Acell = '0'
Xcell ='0'
Ycell = '0'
self.ErrDiameter = True
self.meshMessage = 'Gears ' + str(combo_row + 1) + ' must be higher than' + str(mesh_row_value_pointed) + ' | Imposible meshed'
else:
pitchDiam_dist = (A_pitchDiam_pointed / 2) - (A_pitchDiam / 2)
Xcell = str(Xcell_pointed - pitchDiam_dist * cos(radians(Acell)))
Ycell = str(Ycell_pointed - pitchDiam_dist * sin(radians(Acell)))
Acell = str(Acell)
self.ErrDiameter = False
else:
pitchDiam_dist = (A_pitchDiam_pointed / 2) + (A_pitchDiam / 2)
Xcell = str(Xcell_pointed + pitchDiam_dist * cos(radians(Acell)))
Ycell = str(Ycell_pointed + pitchDiam_dist * sin(radians(Acell)))
Acell = str(Acell)
self.ErrPitchDiam = False
except:
Acell = '0'
Xcell ='0'
Ycell = '0'
self.meshMessage = 'Pith diameter missing in row (' + str(mesh_row_value_pointed) + ')'
print('Pith diameter missing in row (' + str(mesh_row_value_pointed) + ')')
self.ErrPitchDiam = True
Acell = QtWidgets.QTableWidgetItem(Acell)
Acell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 7, Acell)
Xcell = QtWidgets.QTableWidgetItem(Xcell)
Xcell.setFlags(QtCore.Qt.ItemIsEnabled)
Xcell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 8, Xcell)
Ycell = QtWidgets.QTableWidgetItem(Ycell)
Ycell.setFlags(QtCore.Qt.ItemIsEnabled)
Ycell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(combo_row, 9, Ycell)
if self.ErrPitchDiam:
self.statusLabel.setText(self.meshMessage + ' | Row: ' + str(combo_row + 1) + ' - ' + 'meshing with row ' + mesh_row_value_pointed + ' gear')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
print('meshing with ', mesh_row_value_pointed)
elif self.ErrInternalGear:
self.statusLabel.setText(self.meshMessage)
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
print('meshing with ', mesh_row_value_pointed)
elif self.ErrDiameter:
self.statusLabel.setText(self.meshMessage)
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
print('meshing with ', mesh_row_value_pointed)
else:
self.statusLabel.setText('Row: ' + str(combo_row + 1) + ' - ' + 'meshing with row ' + mesh_row_value_pointed + ' gear')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
print('meshing with ', mesh_row_value_pointed)
def _dataRevision(self):
self.ErrInt = True
self.ErrFloat = True
verification = []
row_rev = self.tableWidget.rowCount()
print(row_rev)
for r in range(row_rev):
try:
check_val_rev = self.tableWidget.cellWidget(r, 0).getCheckValue()
teeth_pitch_diam_rev = int(self.tableWidget.item(r, 1).text())
teeth_n_rev = int(self.tableWidget.item(r, 2).text())
pressure_ang_rev = float(self.tableWidget.item(r, 3).text())
s_or_r_radius_rev = float(self.tableWidget.item(r, 4).text()) / 2
module_g_rev = float(self.tableWidget.item(r, 5).text())
mesh_rev = self.tableWidget.cellWidget(r, 6).currentText()
angle_rev = float(self.tableWidget.item(r, 7).text())
x_rev = float(self.tableWidget.item(r, 8).text())
y_rev = float(self.tableWidget.item(r, 9).text())
if mesh_rev != 'Not Linked':
pass
verification.append(True)
except:
verification.append(False)
return verification
def _gearCalculation(self):
verif = self._dataRevision()
gears=[]
location = []
for row_g in range(len(verif)):
gears.append([row_g + 1])
print('intento: ', verif[row_g])
if (verif[row_g]):
teeth_n = int(self.tableWidget.item(row_g, 2).text())
pressure_ang = float(self.tableWidget.item(row_g, 3).text())
s_or_r_radius = float(self.tableWidget.item(row_g, 4).text()) / 2
module_g = float(self.tableWidget.item(row_g, 5).text())
check_val = self.tableWidget.cellWidget(row_g, 0).getCheckValue()
Acell = float(self.tableWidget.item(row_g, 7).text())
Xcell = float(self.tableWidget.item(row_g, 8).text())
Ycell = float(self.tableWidget.item(row_g, 9).text())
if check_val:
outline = createIntGearOutline(module_g, teeth_n, pressure_ang, s_or_r_radius)
else:
outline = createGearOutline(module_g, teeth_n, pressure_ang, s_or_r_radius)
print('outline', outline)
if Xcell != 0 or Ycell != 0:
outline_diplaced = displace(outline, Xcell, Ycell)
outline = outline_diplaced
print('outline displaces:', outline)
location.append([Acell, [Xcell, Ycell]])
gears[row_g].append(outline)
print('True: ', row_g + 1)
else:
gears[row_g].append([False])
location.append([False])
print('False: ', row_g + 1)
return [location, gears]
def _cellChange(self):
items = self.tableWidget.selectedItems()
col = self.tableWidget.currentColumn()
row = self.tableWidget.currentRow()
print('_cellChange: ', row, col)
enteros = [2]
decimales = [1, 3, 4, 5, 7, 8, 9]
if col in enteros:
try:
cellType = int(items[0].text())
self.ErrInt = True
self.statusLabel.setText('OK: Current cell data is an integer')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
except ValueError:
self.ErrInt = False
self.statusLabel.setText('Error: Value cell most be an integer')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
return self.alertDialog('integer')
elif col in decimales:
try:
cellType = float(items[0].text())
self.ErrFloat = True
self.statusLabel.setText('OK: Current cell data is a float')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(0, 0, 0)")
except ValueError:
self.ErrFloat = False
self.statusLabel.setText('Error: Value cell most be an Float')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color: rgb(122, 55, 55)")
return self.alertDialog('Float')
def alertDialog(self, val):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Information)
message = val + " input is required"
msgBox.setText(message)
msgBox.setWindowTitle("Input Error")
msgBox.setStandardButtons(QMessageBox.Ok)
returnValue = msgBox.exec()
if returnValue == QMessageBox.Ok:
print('OK clicked')
def _addRow(self):
if self.ErrInt or self.ErrFloat:
rowCount = self.tableWidget.rowCount()
self.tableWidget.insertRow(rowCount)
columnCount = self.tableWidget.columnCount()
for col in range(columnCount):
print(col)
if col == 0:
check_box = internal(self)
check_box.stateChanged.connect(self._clickCheckBox)
self.tableWidget.setCellWidget(rowCount, col, check_box)
elif col == 3:
angle = QtWidgets.QTableWidgetItem('20')
angle.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, angle)
elif col == 6:
lista = ['Not Linked'] + [str(i) for i in range(1, self.tableWidget.rowCount())]
mesh = Mesh(self, lista)
self.tableWidget.setCellWidget(rowCount, col, mesh)
mesh.currentIndexChanged.connect(self._comboBoxRevision)
elif col == 7:
Acell = QtWidgets.QTableWidgetItem('0')
Acell.setFlags(QtCore.Qt.ItemIsEnabled)
Acell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, Acell)
elif col == 8:
Xcell = QtWidgets.QTableWidgetItem('0')
Xcell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, Xcell)
elif col == 9:
Ycell = QtWidgets.QTableWidgetItem('0')
Ycell.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, Ycell)
else:
cellCenter = QtWidgets.QTableWidgetItem()
cellCenter.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setItem(rowCount, col, cellCenter)
self.statusLabel.setText('OK: Row just added')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color:rgb(0, 0, 0)")
def _removeRow(self):
if self.tableWidget.rowCount() > 0:
self.tableWidget.removeRow(self.tableWidget.rowCount()-1)
self.statusLabel.setText('OK: Row just deleted')
self.statusLabel.setStyleSheet("background-color:rgba(122, 167, 146, 150); color:rgb(0, 0, 0)")
ef __init__(self, parent):
super().__init__(parent)
self.stateChanged.connect(self.getCheckValue)
def getCheckValue(self):
if self.isChecked() == True:
print('Check Value Active')
return True
elif self.isChecked() == False:
print('Check Value Deactivated')
return False
class Mesh(QComboBox):
def __init__(self, parent, aa):
super().__init__(parent)
self.addItems(aa)
self.currentIndexChanged.connect(self.getComboValue)
def getComboValue(self):
print(self.currentText())
return self.currentText()
app = QApplication(sys.argv)
main_window = mainWindow()
widget = QtWidgets.QStackedWidget()
widget.addWidget(main_window)
widget.resize(658, 650)
widget.show()
try:
sys.exit(app.exec_())
except:
print('Exiting') | true | true |
f730617f1ec79e7ee02ce125d82a1b33d9acb7f8 | 4,003 | py | Python | workflow/powerbiCatToM.py | sixtysecondrevit/dynamoPython | dfb4b001800ebf9ab308510db40cfc5a5a953fee | [
"MIT"
] | 114 | 2018-07-17T17:47:11.000Z | 2022-03-08T09:33:39.000Z | workflow/powerbiCatToM.py | sixtysecondrevit/dynamoPython | dfb4b001800ebf9ab308510db40cfc5a5a953fee | [
"MIT"
] | 28 | 2018-07-18T10:43:37.000Z | 2020-11-24T06:08:18.000Z | workflow/powerbiCatToM.py | sixtysecondrevit/dynamoPython | dfb4b001800ebf9ab308510db40cfc5a5a953fee | [
"MIT"
] | 56 | 2018-07-17T17:57:28.000Z | 2022-03-26T12:30:39.000Z | '''
CATEGORIES TO M SCRIPT - CREATE CONDITIONAL STATEMENT CODE FOR POWER BI
-
a dynamoPython script, visit the website for more details
https://github.com/Amoursol/dynamoPython
'''
__author__ = 'Adam Bear - adam@ukbear.com'
__twitter__ = '@adambear82'
__github__ = '@adambear82'
__version__ = '1.0.0'
'''
for large projects with lots of clashes it is useful to analyse in
a business inteligence or data visualisation tool such as ms power bi.
creating the conditonal statement in power bi can take a long time if
there are a lot of categories to include
'''
# ------------------------
# import modules
# ------------------------
# refer to the clipboard
import clr
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import Clipboard
# refer to the document manager
clr.AddReference('RevitServices')
import RevitServices
from RevitServices.Persistence import DocumentManager
doc = DocumentManager.Instance.CurrentDBDocument
# refer to the revit API
clr.AddReference('RevitAPI')
import Autodesk
from Autodesk.Revit.DB import *
# ------------------------
# inputs & variables
# ------------------------
# some categoreies exported from navisworks are not included as
# categories in visibility graphics, for examplevv
# Handrails, Landings, Pads, Runs, Slab Edges, Top Rails, Wall Sweeps
# remove single and double spaces after commas and split into list
catsInput = IN[0]
catsReplace1 = catsInput.replace(', ', ',')
catsReplace2 = catsReplace1.replace(', ', ',')
catsManual = catsReplace2.split(',')
catsManual.sort()
# provide reference strings
hashtag = 'Renamed Columns1'
pathlink = 'pathlink'
filterIn = 'filter_in'
filterOut = 'filter_out'
# ------------------------
# get categories
# ------------------------
# get categories that can add sub categories
# ie the categories which appear in vis graphics
# annotated from forum post with kudos to René Picazo
# https://forum.dynamobim.com/t/get-all-elements-in-model-categories/9447/7
modelCats = []
for cat in doc.Settings.Categories :
if cat.CategoryType == CategoryType.Model and cat.CanAddSubcategory:
modelCats.append(cat.Name)
# only append extra categories if they have been defined in input
if catsInput :
for cat in catsManual :
modelCats.append(cat)
# sort alphabetically so its easier to read
cats = sorted(modelCats)
# ------------------------
# strings
# ------------------------
# the 1st line adds a column to the table based on a filter on the hash
table = ''.join(('= Table.AddColumn(#"', hashtag, '", "filter",'))
# define strings to be used in M code
each = 'each if ['
elif0 = 'else if ['
elif1 = '] = "'
elif2 = '" then "'
elif3 = '"'
# the 2nd line is a special case
# where cats[0] requires 'each' instead of 'else if'
catJoin = each, pathlink, elif1, cats[0], elif2, filterIn, elif3
temp = ''.join(catJoin)
listLines = []
listLines.append(temp)
# the 3rd line and onwards starts with else if
# each row is checked if it is equall to one of the remaining cats
# cats is sliced by [1:] to return items from index 1 to the last index
for c in cats[1:] :
catJoin = elif0, pathlink, elif1, c, elif2, filterIn, elif3
temp = ''.join(catJoin)
listLines.append(temp)
lines = '\r\n'.join(listLines)
# the final line starts with else
# rows not in cats are given the filterOut value
strElse = ''.join(('else "', filterOut, '")'))
# the code is brought together with new lines between each line
code = '\r\n'.join((table, lines, strElse))
# ------------------------
# send to clipboard
# ------------------------
# annotated with kudos to bakery 'by send to clipboard from revit' (sic)
# https://github.com/LukeyJohnson/BakeryForDynamo/blob/97e5622db7ba14cd42caac9b8bd4fdba6b66871e/nodes/bv%20Send%20to%20Clipboard%20from%20Revit.dyf#L5-L12
# try to copy the code, provide a message if it fails
try:
Clipboard.SetText(code)
copyMsg = code
except:
copyMsg = 'Data could not be copied to clipboard'
# ------------------------
# output
# ------------------------
OUT = copyMsg
| 29.433824 | 154 | 0.683987 | __author__ = 'Adam Bear - adam@ukbear.com'
__twitter__ = '@adambear82'
__github__ = '@adambear82'
__version__ = '1.0.0'
import clr
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import Clipboard
clr.AddReference('RevitServices')
import RevitServices
from RevitServices.Persistence import DocumentManager
doc = DocumentManager.Instance.CurrentDBDocument
clr.AddReference('RevitAPI')
import Autodesk
from Autodesk.Revit.DB import *
catsInput = IN[0]
catsReplace1 = catsInput.replace(', ', ',')
catsReplace2 = catsReplace1.replace(', ', ',')
catsManual = catsReplace2.split(',')
catsManual.sort()
hashtag = 'Renamed Columns1'
pathlink = 'pathlink'
filterIn = 'filter_in'
filterOut = 'filter_out'
modelCats = []
for cat in doc.Settings.Categories :
if cat.CategoryType == CategoryType.Model and cat.CanAddSubcategory:
modelCats.append(cat.Name)
if catsInput :
for cat in catsManual :
modelCats.append(cat)
cats = sorted(modelCats)
table = ''.join(('= Table.AddColumn(#"', hashtag, '", "filter",'))
each = 'each if ['
elif0 = 'else if ['
elif1 = '] = "'
elif2 = '" then "'
elif3 = '"'
catJoin = each, pathlink, elif1, cats[0], elif2, filterIn, elif3
temp = ''.join(catJoin)
listLines = []
listLines.append(temp)
for c in cats[1:] :
catJoin = elif0, pathlink, elif1, c, elif2, filterIn, elif3
temp = ''.join(catJoin)
listLines.append(temp)
lines = '\r\n'.join(listLines)
strElse = ''.join(('else "', filterOut, '")'))
code = '\r\n'.join((table, lines, strElse))
Clipboard.SetText(code)
copyMsg = code
except:
copyMsg = 'Data could not be copied to clipboard'
OUT = copyMsg
| true | true |
f730625ed1d40a9135df52e51742c9f032d3ce20 | 1,930 | py | Python | results/rabi_and_lmg_optimizations_20190227/script_rabi_bangramp_neldermead.py | lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381 | 29f80dcf914096555cee9bc2e18249a2c95d6a50 | [
"MIT"
] | 1 | 2020-07-21T02:31:41.000Z | 2020-07-21T02:31:41.000Z | results/rabi_and_lmg_optimizations_20190228/script_rabi_bangramp_neldermead.py | lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381 | 29f80dcf914096555cee9bc2e18249a2c95d6a50 | [
"MIT"
] | null | null | null | results/rabi_and_lmg_optimizations_20190228/script_rabi_bangramp_neldermead.py | lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381 | 29f80dcf914096555cee9bc2e18249a2c95d6a50 | [
"MIT"
] | null | null | null | import os
import sys
import numpy as np
import pandas as pd
import logging
if '../../' not in sys.path:
sys.path.append('../../')
import src.optimization as optimization
model = 'rabi'
model_parameters = dict(N=100, Omega=100, omega_0=1.)
protocol = 'bangramp'
optimization_method = 'Nelder-Mead'
# ------ build and check name for output file
additional_file_name_qualifiers = None
output_file_name = (model + '_' + protocol + '_' +
optimization_method.replace('-', '').lower())
if additional_file_name_qualifiers is not None:
output_file_name += '_' + additional_file_name_qualifiers
filenum = 1
_output_file_name = output_file_name
while os.path.isfile(_output_file_name + '.csv'):
_output_file_name = output_file_name + '({:02})'.format(filenum)
filenum += 1
output_file_name = _output_file_name + '.csv'
# ------ set up logger
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s]"
"[%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
# consoleHandler = logging.StreamHandler()
# consoleHandler.setFormatter(logFormatter)
# rootLogger.addHandler(consoleHandler)
fileHandler = logging.FileHandler(output_file_name[:-4] + '.log')
fileHandler.setFormatter(logFormatter)
fileHandler.setLevel(logging.DEBUG)
rootLogger.addHandler(fileHandler)
logging.info('Output file name will be "{}"'.format(output_file_name))
# ------ start optimization
results = optimization.find_best_protocol(
problem_specification=dict(
model=model,
model_parameters=model_parameters,
task='critical point state generation'
),
optimization_specs=dict(
protocol=protocol,
optimization_method=optimization_method
),
other_options=dict(
scan_times=np.linspace(0.1, 4, 100)
)
)
# ------ save results to file
results.to_csv(output_file_name)
| 29.242424 | 70 | 0.707254 | import os
import sys
import numpy as np
import pandas as pd
import logging
if '../../' not in sys.path:
sys.path.append('../../')
import src.optimization as optimization
model = 'rabi'
model_parameters = dict(N=100, Omega=100, omega_0=1.)
protocol = 'bangramp'
optimization_method = 'Nelder-Mead'
additional_file_name_qualifiers = None
output_file_name = (model + '_' + protocol + '_' +
optimization_method.replace('-', '').lower())
if additional_file_name_qualifiers is not None:
output_file_name += '_' + additional_file_name_qualifiers
filenum = 1
_output_file_name = output_file_name
while os.path.isfile(_output_file_name + '.csv'):
_output_file_name = output_file_name + '({:02})'.format(filenum)
filenum += 1
output_file_name = _output_file_name + '.csv'
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s]"
"[%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(output_file_name[:-4] + '.log')
fileHandler.setFormatter(logFormatter)
fileHandler.setLevel(logging.DEBUG)
rootLogger.addHandler(fileHandler)
logging.info('Output file name will be "{}"'.format(output_file_name))
results = optimization.find_best_protocol(
problem_specification=dict(
model=model,
model_parameters=model_parameters,
task='critical point state generation'
),
optimization_specs=dict(
protocol=protocol,
optimization_method=optimization_method
),
other_options=dict(
scan_times=np.linspace(0.1, 4, 100)
)
)
results.to_csv(output_file_name)
| true | true |
f730640adaf31ffe5b7c8de73e4fd29b5bc4983e | 9,252 | py | Python | doc/source/conf.py | ifxit/nidho | 7d49bb7d879d0f3d444df50f2c18c2cdf883216c | [
"MIT"
] | 11 | 2016-06-09T12:07:14.000Z | 2018-01-18T08:01:08.000Z | doc/source/conf.py | ifxit/nidho | 7d49bb7d879d0f3d444df50f2c18c2cdf883216c | [
"MIT"
] | 4 | 2016-07-06T11:06:34.000Z | 2020-01-02T10:11:48.000Z | doc/source/conf.py | ifxit/nidhogg | 7d49bb7d879d0f3d444df50f2c18c2cdf883216c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Nidhogg documentation build configuration file, created by
# sphinx-quickstart on Thu May 28 09:48:45 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import shlex
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Nidhogg'
copyright = u'2018, Roland Wohlfahrt, Christian Assing'
author = u'Roland Wohlfahrt, Christian Assing'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.9.0'
# The full version, including alpha/beta/rc tags.
release = '3.9.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Nidhoggdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Nidhogg.tex', u'Nidhogg Documentation',
u'Roland Wohlfahrt, Christian Assing', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nidhogg', u'Nidhogg Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Nidhogg', u'Nidhogg Documentation',
author, 'Nidhogg', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.236934 | 79 | 0.718223 |
import os
import shlex
import sys
sys.path.insert(0, os.path.abspath('../../'))
extensions = [
'sphinx.ext.autodoc',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Nidhogg'
copyright = u'2018, Roland Wohlfahrt, Christian Assing'
author = u'Roland Wohlfahrt, Christian Assing'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.9.0'
# The full version, including alpha/beta/rc tags.
release = '3.9.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Nidhoggdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Nidhogg.tex', u'Nidhogg Documentation',
u'Roland Wohlfahrt, Christian Assing', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nidhogg', u'Nidhogg Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Nidhogg', u'Nidhogg Documentation',
author, 'Nidhogg', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
| true | true |
f730647bf68842c5a3eff14c987502a2fca1c432 | 141,522 | py | Python | scripts/automation/trex_control_plane/interactive/trex/stl/trex_stl_client.py | GabrielGanne/trex-core | 688a0fe0adb890964691473723d70ffa98e00dd3 | [
"Apache-2.0"
] | null | null | null | scripts/automation/trex_control_plane/interactive/trex/stl/trex_stl_client.py | GabrielGanne/trex-core | 688a0fe0adb890964691473723d70ffa98e00dd3 | [
"Apache-2.0"
] | null | null | null | scripts/automation/trex_control_plane/interactive/trex/stl/trex_stl_client.py | GabrielGanne/trex-core | 688a0fe0adb890964691473723d70ffa98e00dd3 | [
"Apache-2.0"
] | null | null | null | import time
import sys
import os
from collections import OrderedDict
from functools import wraps
from ..utils.common import get_current_user, list_intersect, is_sub_list, user_input, list_difference, parse_ports_from_profiles
from ..utils import parsing_opts, text_tables
from ..utils.text_opts import format_text, format_num
from ..common.trex_exceptions import *
from ..common.trex_events import Event
from ..common.trex_logger import Logger
from ..common.trex_client import TRexClient, PacketBuffer
from ..common.trex_types import *
from ..common.trex_types import PortProfileID, ALL_PROFILE_ID
from ..common.trex_psv import *
from ..common.trex_api_annotators import client_api, console_api
from .trex_stl_port import STLPort
from .trex_stl_streams import STLStream, STLProfile, STLTaggedPktGroupTagConf
from .trex_stl_stats import CPgIdStats
def validate_port_input(port_arg):
"""Decorator to support PortProfileID type input.
Convert int,str argument to PortProfileID type
"""
def wrap (func):
@wraps(func)
def wrapper(self, *args, **kwargs):
code = func.__code__
fname = func.__name__
names = code.co_varnames[:code.co_argcount]
argname = port_arg
try:
port_index = names.index(argname) - 1
argval = args[port_index]
args = list(args)
args[port_index] = convert_port_to_profile(argval)
args = tuple(args)
except (ValueError, IndexError):
argval = kwargs.get(argname)
kwargs[argname] = convert_port_to_profile(argval)
return func(self, *args, **kwargs)
def convert_port_to_profile(port):
if port is None:
return port
if isinstance(port, list):
result = list(port)
for idx, val in enumerate(result):
validate_type('port', val, (int, str, PortProfileID))
result[idx] = PortProfileID(str(val))
else:
validate_type('port', port, (int, str, PortProfileID))
result = PortProfileID(str(port))
return result
return wrapper
return wrap
class TPGState:
"""
A simple class representing the states of Tagged Packet Group State machine.
This class should be always kept in Sync with the state machine in the server.
"""
DISABLED = 0 # Tagged Packet Group is disabled.
ENABLED_CP = 1 # Tagged Packet Group Control Plane is enabled, message sent to Rx.
ENABLED_CP_RX = 2 # Tagged Packet Group Control Plane and Rx are enabled. Awaiting Data Plane.
ENABLED = 3 # Tagged Packet Group is enabled.
DISABLED_DP = 4 # Tagged Packet Group Data Plane disabled, message sent to Rx.
DISABLED_DP_RX = 5 # Tagged Packet Group Data Plane and Rx disabled. Object can be destroyed.
RX_ALLOC_FAILED = 6 # Rx Allocation Failed
DP_ALLOC_FAILED = 7 # Dp Allocation Failed
ALL_STATES = [DISABLED, ENABLED_CP, ENABLED_CP_RX, ENABLED, DISABLED_DP, DISABLED_DP_RX, RX_ALLOC_FAILED, DP_ALLOC_FAILED]
ERROR_STATES = [RX_ALLOC_FAILED, DP_ALLOC_FAILED]
def __init__(self, initial_state):
if initial_state not in TPGState.ALL_STATES:
raise TRexError("Invalid TPG State {}".format(initial_state))
self._state = initial_state
self.fail_messages = {
TPGState.RX_ALLOC_FAILED: "Rx counter allocation failed!",
TPGState.DP_ALLOC_FAILED: "Tx counter allocation failed!"
}
def is_error_state(self):
"""
Indicate if this TPGState is an error state.
"""
return self._state in TPGState.ERROR_STATES
def get_fail_message(self):
"""
Get the fail message to print to the user for this state.
"""
if not self.is_error_state():
return "TPG State is valid!"
return self.fail_messages[self._state]
def __eq__(self, other):
if not isinstance(other, TPGState):
raise TRexError("Invalid comparision for TPGState")
return self._state == other._state
class STLClient(TRexClient):
# different modes for attaching traffic to ports
CORE_MASK_SPLIT = 1
CORE_MASK_PIN = 2
CORE_MASK_SINGLE = 3
def __init__(self,
username = get_current_user(),
server = "localhost",
sync_port = 4501,
async_port = 4500,
verbose_level = "error",
logger = None,
sync_timeout = None,
async_timeout = None
):
"""
TRex stateless client
:parameters:
username : string
the user name, for example imarom
server : string
the server name or ip
sync_port : int
the RPC port
async_port : int
the ASYNC port (subscriber port)
verbose_level: str
one of "none", "critical", "error", "info", "debug"
logger: instance of AbstractLogger
if None, will use ScreenLogger
sync_timeout: int
time in sec for timeout for RPC commands. for local lab keep it as default (3 sec)
higher number would be more resilient for Firewalls but slower to identify real server crash
async_timeout: int
time in sec for timeout for async notification. for local lab keep it as default (3 sec)
higher number would be more resilient for Firewalls but slower to identify real server crash
"""
api_ver = {'name': 'STL', 'major': 5, 'minor': 1}
TRexClient.__init__(self,
api_ver,
username,
server,
sync_port,
async_port,
verbose_level,
logger,
sync_timeout,
async_timeout)
self.pgid_stats = CPgIdStats(self.conn.rpc)
self.tpg_status = None # TPG Status cached in Python Side
def get_mode (self):
return "STL"
############################ called #############################
############################ by base #############################
############################ TRex Client #############################
def _on_connect(self):
return RC_OK()
def _on_connect_create_ports(self, system_info):
"""
called when connecting to the server
triggered by the common client object
"""
# create ports
port_map = {}
for info in system_info['ports']:
port_id = info['index']
port_map[port_id] = STLPort(self.ctx, port_id, self.conn.rpc, info, self.is_dynamic)
return self._assign_ports(port_map)
def _on_connect_clear_stats(self):
# clear stats to baseline
with self.ctx.logger.suppress(verbose = "warning"):
self.clear_stats(ports = self.get_all_ports(), clear_xstats = False)
return RC_OK()
############################ events #############################
############################ #############################
############################ #############################
# register all common events
def _register_events (self):
super(STLClient, self)._register_events()
self.ctx.event_handler.register_event_handler("profile started", self._on_profile_started)
self.ctx.event_handler.register_event_handler("profile stopped", self._on_profile_stopped)
self.ctx.event_handler.register_event_handler("profile paused", self._on_profile_paused)
self.ctx.event_handler.register_event_handler("profile resumed", self._on_profile_resumed)
self.ctx.event_handler.register_event_handler("profile finished tx", self._on_profile_finished_tx)
self.ctx.event_handler.register_event_handler("profile error", self._on_profile_error)
def _on_profile_started (self, port_id, profile_id):
msg = "Profile {0}.{1} has started".format(port_id, profile_id)
if port_id in self.ports:
self.ports[port_id].async_event_profile_started(profile_id)
return Event('server', 'info', msg)
def _on_profile_stopped (self, port_id, profile_id):
msg = "Profile {0}.{1} has stopped".format(port_id, profile_id)
if port_id in self.ports:
self.ports[port_id].async_event_profile_stopped(profile_id)
return Event('server', 'info', msg)
def _on_profile_paused (self, port_id, profile_id):
msg = "Profile {0}.{1} has paused".format(port_id, profile_id)
if port_id in self.ports:
self.ports[port_id].async_event_profile_paused(profile_id)
return Event('server', 'info', msg)
def _on_profile_resumed (self, port_id, profile_id):
msg = "Profile {0}.{1} has resumed".format(port_id, profile_id)
if port_id in self.ports:
self.ports[port_id].async_event_profile_resumed(profile_id)
return Event('server', 'info', msg)
def _on_profile_finished_tx (self, port_id, profile_id):
msg = "Profile {0}.{1} job done".format(port_id, profile_id)
if port_id in self.ports:
self.ports[port_id].async_event_profile_job_done(profile_id)
ev = Event('server', 'info', msg)
if port_id in self.get_acquired_ports():
self.ctx.logger.info(ev)
return ev
def _on_profile_error (self, port_id, profile_id):
msg = "Profile {0}.{1} job failed".format(port_id, profile_id)
return Event('server', 'warning', msg)
######################### private/helper #########################
############################ functions #############################
############################ #############################
# remove all RX filters in a safe manner
@validate_port_input("ports")
def _remove_rx_filters (self, ports, rx_delay_ms):
# get the enabled RX profiles
rx_ports = [p for p in ports if self.ports[p.port_id].has_profile_rx_enabled(p.profile_id)]
if not rx_ports:
return RC_OK()
# block while any RX configured profile has not yet have it's delay expired
while any([not self.ports[p.port_id].has_rx_delay_expired(p.profile_id, rx_delay_ms) for p in rx_ports]):
time.sleep(0.01)
# remove RX filters
return self._for_each_port('remove_rx_filters', rx_ports)
# Check console API ports argument
def validate_profile_input(self, input_profiles):
ports = []
result_profiles = []
for profile in input_profiles:
if profile.profile_id == ALL_PROFILE_ID:
if int(profile) not in ports:
ports.append(int(profile))
else:
raise TRexError("Cannot have more than on %d.* in the params" %int(profile))
for pid in ports:
for profile in input_profiles:
if int(profile) == pid and profile.profile_id != ALL_PROFILE_ID:
raise TRexError("Cannot have %d.* and %s passed together as --ports" %(int(profile), str(profile)))
port_profiles = self.ports[pid].get_port_profiles("all")
result_profiles.extend(port_profiles)
for profile in input_profiles:
if profile.profile_id != ALL_PROFILE_ID:
if profile not in result_profiles:
result_profiles.append(profile)
return result_profiles
# Get all profiles with the certain state from ports
# state = {"active", "transmitting", "paused", "streams"}
def get_profiles_with_state(self, state):
active_ports = self.get_acquired_ports()
active_profiles = []
for port in active_ports:
port_profiles = self.ports[port].get_port_profiles(state)
active_profiles.extend(port_profiles)
return active_profiles
############################ Stateless #############################
############################ API #############################
############################ #############################
@client_api('command', True)
def reset(self, ports = None, restart = False):
"""
Force acquire ports, stop the traffic, remove all streams and clear stats
:parameters:
ports : list
Ports on which to execute the command
restart: bool
Restart the NICs (link down / up)
:raises:
+ :exc:`TRexError`
"""
ports = ports if ports is not None else self.get_all_ports()
ports = self.psv.validate('reset', ports)
all_profiles = []
for port in ports:
profile = PortProfileID(str(port) + ".*")
all_profiles.append(profile)
if restart:
if not all([p.is_link_change_supported() for p in self.ports.values()]):
raise TRexError("NICs of this type do not support link down, can't use restart flag.")
self.ctx.logger.pre_cmd("Hard resetting ports {0}:".format(ports))
else:
self.ctx.logger.pre_cmd("Resetting ports {0}:".format(ports))
try:
with self.ctx.logger.suppress():
# force take the port and ignore any streams on it
self.acquire(ports, force = True, sync_streams = False)
self.stop(all_profiles)
self.remove_all_streams(all_profiles)
self.clear_stats(ports)
self.set_port_attr(ports,
promiscuous = False if self.any_port.is_prom_supported() else None,
link_up = True if restart else None)
self.remove_rx_queue(ports)
self._for_each_port('stop_capture_port', ports)
self.remove_all_captures()
self.set_service_mode(ports, False)
self.ctx.logger.post_cmd(RC_OK())
except TRexError as e:
self.ctx.logger.post_cmd(False)
raise
@client_api('command', True)
def acquire (self, ports = None, force = False, sync_streams = True):
"""
Acquires ports for executing commands
:parameters:
ports : list
Ports on which to execute the command
force : bool
Force acquire the ports.
sync_streams: bool
sync with the server about the configured streams
:raises:
+ :exc:`TRexError`
"""
# by default use all ports
ports = ports if ports is not None else self.get_all_ports()
# validate ports
ports = self.psv.validate('acquire', ports)
if force:
self.ctx.logger.pre_cmd("Force acquiring ports {0}:".format(ports))
for port in ports:
tpg_status = self.get_tpg_status(port=port)
enabled = tpg_status.get("enabled", False)
if enabled:
username = tpg_status["data"]["username"]
tpg_ports = tpg_status["data"]["acquired_ports"]
self.ctx.logger.pre_cmd(format_text("Found TPG Context of user {} in ports {}".format(username, tpg_ports), "yellow"))
self.disable_tpg(username)
else:
self.ctx.logger.pre_cmd("Acquiring ports {0}:".format(ports))
rc = self._for_each_port('acquire', ports, force)
self.ctx.logger.post_cmd(rc)
if not rc:
# cleanup
self._for_each_port('release', ports)
raise TRexError(rc)
self._post_acquire_common(ports)
# sync streams
if sync_streams:
rc = self._for_each_port('sync_streams', ports)
if not rc:
raise TRexError(rc)
@client_api('command', True)
def release(self, ports = None):
"""
Release ports
:parameters:
ports : list
Ports on which to execute the command
:raises:
+ :exc:`TRexError`
"""
ports = ports if ports is not None else self.get_acquired_ports()
# validate ports
ports = self.psv.validate('release', ports, PSV_ACQUIRED)
if self.tpg_status is None:
# Nothing in cache
self.get_tpg_status()
if self.tpg_status["enabled"]:
self.disable_tpg()
self.ctx.logger.pre_cmd("Releasing ports {0}:".format(ports))
rc = self._for_each_port('release', ports)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
def set_service_mode (self, ports = None, enabled = True, filtered = False, mask = None):
''' based on :meth:`trex.stl.trex_stl_client.STLClient.set_service_mode_base` '''
# call the base method
self.set_service_mode_base(ports, enabled, filtered, mask)
rc = self._for_each_port('set_service_mode', ports, enabled, filtered, mask)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("ports")
def remove_all_streams (self, ports = None):
"""
remove all streams from port(s)
:parameters:
ports : list
Ports on which to execute the command
:raises:
+ :exc:`TRexError`
"""
ports = ports if ports is not None else self.get_acquired_ports()
# validate ports
ports = self.psv.validate('remove_all_streams', ports, (PSV_ACQUIRED, PSV_IDLE))
self.ctx.logger.pre_cmd("Removing all streams from port(s) {0}:".format(ports))
rc = self._for_each_port('remove_all_streams', ports)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("ports")
def add_streams (self, streams, ports = None):
"""
Add a list of streams to port(s)
:parameters:
ports : list
Ports on which to execute the command
streams: list
Streams to attach (or profile)
:returns:
List of stream IDs in order of the stream list
:raises:
+ :exc:`TRexError`
"""
ports = ports if ports is not None else self.get_acquired_ports()
# validate ports
ports = self.psv.validate('add_streams', ports, (PSV_ACQUIRED, PSV_IDLE))
if isinstance(streams, STLProfile):
streams = streams.get_streams()
# transform single stream
if not isinstance(streams, list):
streams = [streams]
# check streams
if not all([isinstance(stream, STLStream) for stream in streams]):
raise TRexArgumentError('streams', streams)
self.ctx.logger.pre_cmd("Attaching {0} streams to port(s) {1}:".format(len(streams), ports))
rc = self._for_each_port('add_streams', ports, streams)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
# return the stream IDs
return rc.data()
@client_api('command', True)
def add_profile(self, filename, ports = None, **kwargs):
""" | Add streams from profile by its type. Supported types are:
| .py
| .yaml
| .pcap file that converted to profile automatically
:parameters:
filename : string
filename (with path) of the profile
ports : list
list of ports to add the profile (default: all acquired)
kwargs : dict
forward those key-value pairs to the profile (tunables)
:returns:
List of stream IDs in order of the stream list
:raises:
+ :exc:`TRexError`
"""
validate_type('filename', filename, basestring)
profile = STLProfile.load(filename, **kwargs)
return self.add_streams(profile.get_streams(), ports)
@client_api('command', True)
@validate_port_input("ports")
def remove_streams (self, stream_id_list, ports = None):
"""
Remove a list of streams from ports
:parameters:
stream_id_list: int or list of ints
Stream id list to remove
ports : list
Ports on which to execute the command
:raises:
+ :exc:`TRexError`
"""
validate_type('streams_id_list', stream_id_list, (int, list))
# transform single stream
stream_id_list = listify(stream_id_list)
# check at least one exists
if not stream_id_list:
raise TRexError("remove_streams - 'stream_id_list' cannot be empty")
# check stream IDs
for i, stream_id in enumerate(stream_id_list):
validate_type('stream ID:{0}'.format(i), stream_id, int)
ports = ports if ports is not None else self.get_acquired_ports()
ports = self.psv.validate('remove_streams', ports, (PSV_ACQUIRED, PSV_IDLE))
# transform single stream
if not isinstance(stream_id_list, list):
stream_id_list = [stream_id_list]
# check streams
for stream_id in stream_id_list:
validate_type('stream_id', stream_id, int)
# remove streams
self.ctx.logger.pre_cmd("Removing {0} streams from port(s) {1}:".format(len(stream_id_list), ports))
rc = self._for_each_port("remove_streams", ports, stream_id_list)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
# check that either port is resolved or all streams have explicit dest MAC
def __check_streams_explicit_dest(self, streams_per_port):
for port_id, streams in streams_per_port.items():
if self.ports[port_id].is_resolved():
continue
for stream in streams:
if not stream.is_explicit_dst_mac():
err = 'Port %s dest MAC is invalid and there are streams without explicit dest MAC.' % port_id
raise TRexError(err)
# common checks for start API
def __pre_start_check (self, cmd_name, ports, force, streams_per_port = None):
ports = listify(ports)
for port in ports:
if isinstance(port, PortProfileID):
if port.profile_id == ALL_PROFILE_ID:
err = 'Profile id * is invalid for starting the traffic. Please assign a specific profile id'
raise TRexError(err)
if force:
return self.psv.validate(cmd_name, ports)
states = {PSV_UP: "check the connection or specify 'force'",
PSV_IDLE: "please stop them or specify 'force'",
PSV_NON_SERVICE: "please disable service mode or specify 'force'"}
if streams_per_port:
self.__check_streams_explicit_dest(streams_per_port)
else:
states[PSV_RESOLVED] = "please resolve them or specify 'force'";
return self.psv.validate(cmd_name, ports, states)
def __decode_core_mask (self, ports, core_mask):
available_modes = [self.CORE_MASK_PIN, self.CORE_MASK_SPLIT, self.CORE_MASK_SINGLE]
# predefined modes
if isinstance(core_mask, int):
if core_mask not in available_modes:
raise TRexError("'core_mask' can be either %s or a list of masks" % ', '.join(available_modes))
decoded_mask = {}
for port in ports:
# a pin mode was requested and we have
# the second port from the group in the start list
if (core_mask == self.CORE_MASK_PIN) and ( (port ^ 0x1) in ports ):
decoded_mask[port] = 0x55555555 if( port % 2) == 0 else 0xAAAAAAAA
elif core_mask == self.CORE_MASK_SINGLE:
decoded_mask[port] = 0x1
else:
decoded_mask[port] = None
return decoded_mask
# list of masks
elif isinstance(core_mask, list):
if len(ports) != len(core_mask):
raise TRexError("'core_mask' list must be the same length as 'ports' list")
decoded_mask = {}
for i, port in enumerate(ports):
decoded_mask[port] = core_mask[i]
return decoded_mask
@client_api('command', True)
@validate_port_input("ports")
def start (self,
ports = None,
mult = "1",
force = False,
duration = -1,
total = False,
core_mask = None,
synchronized = False):
"""
Start traffic on port(s)
:parameters:
ports : list
Ports on which to execute the command
mult : str
Multiplier in a form of pps, bps, or line util in %
Examples: "5kpps", "10gbps", "85%", "32mbps"
force : bool
If the ports are not in stopped mode or do not have sufficient bandwidth for the traffic, determines whether to stop the current traffic and force start.
True: Force start
False: Do not force start
duration : int
Limit the run time (seconds)
-1 = unlimited
total : bool
Determines whether to divide the configured bandwidth among the ports, or to duplicate the bandwidth for each port.
True: Divide bandwidth among the ports
False: Duplicate
core_mask: CORE_MASK_SPLIT, CORE_MASK_PIN, CORE_MASK_SINGLE or a list of masks (one per port)
Determine the allocation of cores per port
In CORE_MASK_SPLIT all the traffic will be divided equally between all the cores
associated with each port
In CORE_MASK_PIN, for each dual ports (a group that shares the same cores)
the cores will be divided half pinned for each port
synchronized: bool
In case of several ports, ensure their transmitting time is synchronized.
Must use adjacent ports (belong to same set of cores).
Will set default core_mask to 0x1.
Recommended ipg 1ms and more.
:raises:
+ :exc:`TRexError`
"""
if ports is None:
ports = []
for pid in self.get_acquired_ports():
port = PortProfileID(pid)
ports.append(port)
else:
ports = listify(ports)
port_id_list = parse_ports_from_profiles(ports)
streams_per_port = {}
for port in port_id_list:
streams_per_port[port] = self.ports[port].streams.values()
ports = self.__pre_start_check('START', ports, force, streams_per_port)
validate_type('mult', mult, basestring)
validate_type('force', force, bool)
validate_type('duration', duration, (int, float))
validate_type('total', total, bool)
validate_type('core_mask', core_mask, (type(None), int, list))
#########################
# decode core mask argument
if core_mask is None:
core_mask = self.CORE_MASK_SINGLE if synchronized else self.CORE_MASK_SPLIT
decoded_mask = self.__decode_core_mask(port_id_list, core_mask)
#######################
# verify multiplier
mult_obj = parsing_opts.decode_multiplier(mult,
allow_update = False,
divide_count = len(ports) if total else 1)
if not mult_obj:
raise TRexArgumentError('mult', mult)
# stop active ports if needed
active_profiles = list_intersect(self.get_profiles_with_state("active"), ports)
if active_profiles and force:
self.stop(active_profiles)
if synchronized:
# start synchronized (per pair of ports) traffic
if len(ports) % 2:
raise TRexError('Must use even number of ports in synchronized mode')
for port in ports:
pair_port = int(port) ^ 0x1
if isinstance(port, PortProfileID):
pair_port = str(pair_port) + "." + str(port.profile_id)
pair_port = PortProfileID(pair_port)
if pair_port not in ports:
raise TRexError('Must use adjacent ports in synchronized mode. Port "%s" has not pair.' % port)
start_time = time.time()
with self.ctx.logger.supress():
ping_data = self.ping_rpc_server()
start_at_ts = ping_data['ts'] + max((time.time() - start_time), 0.5) * len(ports)
synchronized_str = 'synchronized '
else:
start_at_ts = 0
synchronized_str = ''
# clear flow stats and latency stats when starting traffic. (Python cache only)
self.pgid_stats.clear_stats(clear_flow_stats=True, clear_latency_stats=True)
# start traffic
self.ctx.logger.pre_cmd("Starting {}traffic on port(s) {}:".format(synchronized_str, ports))
# mask is port specific information
pargs = {k:{'mask': v} for k, v in decoded_mask.items()}
rc = self._for_each_port("start", ports, mult_obj, duration, force, start_at_ts = start_at_ts, pargs = pargs)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
return rc
@client_api('command', True)
@validate_port_input("ports")
def stop (self, ports = None, rx_delay_ms = None):
"""
Stop port(s)
:parameters:
ports : list
Ports on which to execute the command
rx_delay_ms : int
time to wait until RX filters are removed
this value should reflect the time it takes
packets which were transmitted to arrive
to the destination.
after this time the RX filters will be removed
:raises:
+ :exc:`TRexError`
"""
if ports is None:
ports = self.get_profiles_with_state("active")
if not ports:
return
ports = self.psv.validate('STOP', ports, PSV_ACQUIRED)
if not ports:
return
port_id_list = parse_ports_from_profiles(ports)
self.ctx.logger.pre_cmd("Stopping traffic on port(s) {0}:".format(ports))
rc = self._for_each_port('stop', ports)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
if rx_delay_ms is None:
if self.ports[port_id_list[0]].is_virtual(): # assume all ports have same type
rx_delay_ms = 100
else:
rx_delay_ms = 10
# remove any RX filters
rc = self._remove_rx_filters(ports, rx_delay_ms)
if not rc:
raise TRexError(rc)
@client_api('command', True)
def wait_on_traffic (self, ports = None, timeout = None, rx_delay_ms = None):
"""
.. _wait_on_traffic:
Block until traffic on specified port(s) has ended
:parameters:
ports : list
Ports on which to execute the command
timeout : int
timeout in seconds
default will be blocking
rx_delay_ms : int
Time to wait (in milliseconds) after last packet was sent, until RX filters used for
measuring flow statistics and latency are removed.
This value should reflect the time it takes packets which were transmitted to arrive
to the destination.
After this time, RX filters will be removed, and packets arriving for per flow statistics feature and latency flows will be counted as errors.
:raises:
+ :exc:`TRexTimeoutError` - in case timeout has expired
+ :exe:'TRexError'
"""
# call the base implementation
ports = ports if ports is not None else self.get_acquired_ports()
ports = self.psv.validate('wait_on_traffic', ports, PSV_ACQUIRED)
TRexClient.wait_on_traffic(self, ports, timeout)
if rx_delay_ms is None:
if self.ports[ports[0]].is_virtual(): # assume all ports have same type
rx_delay_ms = 100
else:
rx_delay_ms = 10
# remove any RX filters
rc = self._remove_rx_filters(ports, rx_delay_ms = rx_delay_ms)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("ports")
def update (self, ports = None, mult = "1", total = False, force = False):
"""
Update traffic on port(s)
:parameters:
ports : list
Ports on which to execute the command
mult : str
Multiplier in a form of pps, bps, or line util in %
Can also specify +/-
Examples: "5kpps+", "10gbps-", "85%", "32mbps", "20%+"
force : bool
If the ports are not in stopped mode or do not have sufficient bandwidth for the traffic, determines whether to stop the current traffic and force start.
True: Force start
False: Do not force start
total : bool
Determines whether to divide the configured bandwidth among the ports, or to duplicate the bandwidth for each port.
True: Divide bandwidth among the ports
False: Duplicate
:raises:
+ :exc:`TRexError`
"""
ports = ports if ports is not None else self.get_profiles_with_state("active")
ports = self.psv.validate('update', ports, (PSV_ACQUIRED, PSV_TX))
validate_type('mult', mult, basestring)
validate_type('force', force, bool)
validate_type('total', total, bool)
# verify multiplier
mult_obj = parsing_opts.decode_multiplier(mult,
allow_update = True,
divide_count = len(ports) if total else 1)
if not mult_obj:
raise TRexArgumentError('mult', mult)
# call low level functions
self.ctx.logger.pre_cmd("Updating traffic on port(s) {0}:".format(ports))
rc = self._for_each_port("update", ports, mult_obj, force)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("port")
def update_streams(self, port, mult = "1", force = False, stream_ids = None):
"""
| Temporary hack to update specific streams.
| Do not rely on this function, might be removed in future!
| Warning: Changing rates of specific streams causes out of sync between CP and DP regarding streams rate.
| In order to update rate of whole port, need to revert changes made to rates of those streams.
:parameters:
port : int
Port on which to execute the command
mult : str
Multiplier in a form of pps, bps, or line util in %
Examples: "5kpps", "10gbps", "85%", "32mbps"
force : bool
If the port are not in stopped mode or do not have sufficient bandwidth for the traffic, determines whether to stop the current traffic and force start.
True: Force start
False: Do not force start
:raises:
+ :exc:`TRexError`
"""
validate_type('mult', mult, basestring)
validate_type('force', force, bool)
validate_type('stream_ids', stream_ids, list)
ports = self.psv.validate('update_streams', port, (PSV_ACQUIRED, PSV_TX))
if not stream_ids:
raise TRexError('Please specify stream IDs to update')
# verify multiplier
mult_obj = parsing_opts.decode_multiplier(mult, allow_update = False)
if not mult_obj:
raise TRexArgumentError('mult', mult)
# call low level functions
self.ctx.logger.pre_cmd('Updating streams %s on port %s:' % (stream_ids, port))
rc = self._for_each_port("update_streams", port, mult_obj, force, stream_ids)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("ports")
def pause (self, ports = None):
"""
Pause traffic on port(s). Works only for ports that are active, and only if all streams are in Continuous mode.
:parameters:
ports : list
Ports on which to execute the command
:raises:
+ :exc:`TRexError`
"""
ports = ports if ports is not None else self.get_profiles_with_state("transmitting")
ports = self.psv.validate('pause', ports, (PSV_ACQUIRED, PSV_TX))
self.ctx.logger.pre_cmd("Pausing traffic on port(s) {0}:".format(ports))
rc = self._for_each_port("pause", ports)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("port")
def pause_streams(self, port, stream_ids):
"""
Temporary hack to pause specific streams.
Does not change state of port.
Do not rely on this function, might be removed in future!
:parameters:
port : int
Port on which to execute the command
stream_ids : list
Stream IDs to pause
:raises:
+ :exc:`TRexError`
"""
validate_type('stream_ids', stream_ids, list)
ports = self.psv.validate('pause_streams', port, (PSV_ACQUIRED, PSV_TX))
if not stream_ids:
raise TRexError('Please specify stream IDs to pause')
self.ctx.logger.pre_cmd('Pause streams %s on port %s:' % (stream_ids, port))
rc = self._for_each_port("pause_streams", port, stream_ids)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("ports")
def resume (self, ports = None):
"""
Resume traffic on port(s)
:parameters:
ports : list
Ports on which to execute the command
:raises:
+ :exc:`TRexError`
"""
ports = ports if ports is not None else self.get_profiles_with_state("paused")
ports = self.psv.validate('resume', ports, (PSV_ACQUIRED, PSV_PAUSED))
self.ctx.logger.pre_cmd("Resume traffic on port(s) {0}:".format(ports))
rc = self._for_each_port('resume', ports)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("port")
def resume_streams(self, port, stream_ids):
"""
Temporary hack to resume specific streams.
Does not change state of port.
Do not rely on this function, might be removed in future!
:parameters:
port : int
Port on which to execute the command
stream_ids : list
Stream IDs to resume
:raises:
+ :exc:`TRexError`
"""
validate_type('stream_ids', stream_ids, list)
ports = self.psv.validate('resume_streams', port, (PSV_ACQUIRED))
if not stream_ids:
raise TRexError('Please specify stream IDs to resume')
self.ctx.logger.pre_cmd('Resume streams %s on port %s:' % (stream_ids, port))
rc = self._for_each_port("resume_streams", port, stream_ids)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
def __push_remote (self, pcap_filename, port_id_list, ipg_usec, speedup, count, duration, is_dual, min_ipg_usec):
rc = RC()
for port_id in port_id_list:
# for dual, provide the slave handler as well
slave_handler = self.ports[port_id ^ 0x1].handler if is_dual else ""
rc.add(self.ports[port_id].push_remote(pcap_filename,
ipg_usec,
speedup,
count,
duration,
is_dual,
slave_handler,
min_ipg_usec))
return rc
@client_api('command', True)
def push_remote (self,
pcap_filename,
ports = None,
ipg_usec = None,
speedup = 1.0,
count = 1,
duration = -1,
is_dual = False,
min_ipg_usec = None,
force = False,
src_mac_pcap = False,
dst_mac_pcap = False):
"""
Push a remote server-reachable PCAP file
the path must be fullpath accessible to the server
:parameters:
pcap_filename : str
PCAP file name in full path and accessible to the server
ports : list
Ports on which to execute the command
ipg_usec : float
Inter-packet gap in microseconds.
Exclusive with min_ipg_usec
speedup : float
A factor to adjust IPG. effectively IPG = IPG / speedup
count: int
How many times to transmit the cap
duration: float
Limit runtime by duration in seconds
is_dual: bool
Inject from both directions.
requires ERF file with meta data for direction.
also requires that all the ports will be in master mode
with their adjacent ports as slaves
min_ipg_usec : float
Minimum inter-packet gap in microseconds to guard from too small ipg.
Exclusive with ipg_usec
force : bool
Ignore if port is active
src_mac_pcap : bool
Source MAC address will be taken from pcap file if True.
dst_mac_pcap : bool
Destination MAC address will be taken from pcap file if True.
:raises:
+ :exc:`TRexError`
"""
ports = ports if ports is not None else self.get_acquired_ports()
ports = self.__pre_start_check('PUSH', ports, force)
validate_type('pcap_filename', pcap_filename, basestring)
validate_type('ipg_usec', ipg_usec, (float, int, type(None)))
validate_type('speedup', speedup, (float, int))
validate_type('count', count, int)
validate_type('duration', duration, (float, int))
validate_type('is_dual', is_dual, bool)
validate_type('min_ipg_usec', min_ipg_usec, (float, int, type(None)))
validate_type('src_mac_pcap', src_mac_pcap, bool)
validate_type('dst_mac_pcap', dst_mac_pcap, bool)
# if force - stop any active ports
if force:
active_ports = list(set(self.get_active_ports()).intersection(ports))
all_profiles = []
for port in active_ports:
profile = PortProfileID(str(port) + ".*")
all_profiles.append(profile)
if all_profiles:
self.stop(all_profiles)
# for dual mode check that all are masters
if is_dual:
if not pcap_filename.endswith('erf'):
raise TRexError("dual mode: only ERF format is supported for dual mode")
for port in ports:
master = port
slave = port ^ 0x1
if slave in ports:
raise TRexError("dual mode: cannot provide adjacent ports ({0}, {1}) in a batch".format(master, slave))
if slave not in self.get_acquired_ports():
raise TRexError("dual mode: adjacent port {0} must be owned during dual mode".format(slave))
# overload the count in new version, workaround instead of passing new variable
if count & 0xC0000000:
raise TRexError("count is limited to 0x3fff,ffff")
count = count & 0x3FFFFFFF
if src_mac_pcap:
count |= 0x80000000
if dst_mac_pcap:
count |= 0x40000000
self.ctx.logger.pre_cmd("Pushing remote PCAP on port(s) {0}:".format(ports))
rc = self.__push_remote(pcap_filename, ports, ipg_usec, speedup, count, duration, is_dual, min_ipg_usec)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
def push_pcap (self,
pcap_filename,
ports = None,
ipg_usec = None,
speedup = 1.0,
count = 1,
duration = -1,
force = False,
vm = None,
packet_hook = None,
is_dual = False,
min_ipg_usec = None,
src_mac_pcap = False,
dst_mac_pcap = False):
"""
Push a local PCAP to the server
This is equivalent to loading a PCAP file to a profile
and attaching the profile to port(s)
file size is limited to 1MB
:parameters:
pcap_filename : str
PCAP filename (accessible locally)
ports : list
Ports on which to execute the command
ipg_usec : float
Inter-packet gap in microseconds.
Exclusive with min_ipg_usec
speedup : float
A factor to adjust IPG. effectively IPG = IPG / speedup
count: int
How many times to transmit the cap
duration: float
Limit runtime by duration in seconds
force: bool
Ignore file size limit - push any file size to the server
also ignore if port is active
vm: list of VM instructions
VM instructions to apply for every packet
packet_hook : Callable or function
Will be applied to every packet
is_dual: bool
Inject from both directions.
Requires that all the ports will be in master mode
with their adjacent ports as slaves
min_ipg_usec : float
Minimum inter-packet gap in microseconds to guard from too small ipg.
Exclusive with ipg_usec
src_mac_pcap : bool
Source MAC address will be taken from pcap file if True.
dst_mac_pcap : bool
Destination MAC address will be taken from pcap file if True.
:raises:
+ :exc:`TRexError`
"""
ports = ports if ports is not None else self.get_acquired_ports()
ports = self.__pre_start_check('PUSH', ports, force)
validate_type('pcap_filename', pcap_filename, basestring)
validate_type('ipg_usec', ipg_usec, (float, int, type(None)))
validate_type('speedup', speedup, (float, int))
validate_type('count', count, int)
validate_type('duration', duration, (float, int))
validate_type('vm', vm, (list, type(None)))
validate_type('is_dual', is_dual, bool)
validate_type('min_ipg_usec', min_ipg_usec, (float, int, type(None)))
validate_type('src_mac_pcap', src_mac_pcap, bool)
validate_type('dst_mac_pcap', dst_mac_pcap, bool)
if all([ipg_usec, min_ipg_usec]):
raise TRexError('Please specify either ipg or minimal ipg, not both.')
# if force - stop any active ports
if force:
active_ports = list(set(self.get_active_ports()).intersection(ports))
if active_ports:
self.stop(active_ports)
# no support for > 1MB PCAP - use push remote
file_size = os.path.getsize(pcap_filename)
if not force and file_size > (1024 * 1024):
file_size_str = format_num(file_size, suffix = 'B')
url = 'https://trex-tgn.cisco.com/trex/doc/trex_stateless.html#_pcap_based_traffic'
raise TRexError("PCAP size of {:} is too big for local push - consider using remote (-r):\n{}".format(file_size_str, url))
if is_dual:
for port in ports:
master = port
slave = port ^ 0x1
if slave in ports:
raise TRexError("dual mode: please specify only one of adjacent ports ({0}, {1}) in a batch".format(master, slave))
if slave not in self.get_acquired_ports():
raise TRexError("dual mode: adjacent port {0} must be owned during dual mode".format(slave))
# regular push
if not is_dual:
# create the profile from the PCAP
try:
self.ctx.logger.pre_cmd("Converting '{0}' to streams:".format(pcap_filename))
profile = STLProfile.load_pcap(pcap_filename,
ipg_usec,
speedup,
count,
vm = vm,
packet_hook = packet_hook,
min_ipg_usec = min_ipg_usec,
src_mac_pcap = src_mac_pcap,
dst_mac_pcap = dst_mac_pcap)
self.ctx.logger.post_cmd(RC_OK())
except TRexError as e:
self.ctx.logger.post_cmd(RC_ERR(e))
raise
self.remove_all_streams(ports = ports)
id_list = self.add_streams(profile.get_streams(), ports)
return self.start(ports = ports, duration = duration, force = force)
else:
# create a dual profile
split_mode = 'MAC'
if (ipg_usec and ipg_usec < 1000 * speedup) or (min_ipg_usec and min_ipg_usec < 1000):
self.ctx.logger.warning('In order to get synchronized traffic, ensure that effective ipg is at least 1000 usec')
try:
self.ctx.logger.pre_cmd("Analyzing '{0}' for dual ports based on {1}:".format(pcap_filename, split_mode))
profile_a, profile_b = STLProfile.load_pcap(pcap_filename,
ipg_usec,
speedup,
count,
vm = vm,
packet_hook = packet_hook,
split_mode = split_mode,
min_ipg_usec = min_ipg_usec,
src_mac_pcap = src_mac_pcap,
dst_mac_pcap = dst_mac_pcap)
self.ctx.logger.post_cmd(RC_OK())
except TRexError as e:
self.ctx.logger.post_cmd(RC_ERR(e))
raise
all_ports = ports + [p ^ 0x1 for p in ports if profile_b]
self.remove_all_streams(ports = all_ports)
for port in ports:
master = port
slave = port ^ 0x1
self.add_streams(profile_a.get_streams(), master)
if profile_b:
self.add_streams(profile_b.get_streams(), slave)
return self.start(ports = all_ports, duration = duration, force = force, synchronized = True)
# get stats
@client_api('getter', True)
def get_stats (self, ports = None, sync_now = True):
"""
Gets all statistics on given ports, flow stats and latency.
:parameters:
ports: list
sync_now: boolean
"""
output = self._get_stats_common(ports, sync_now)
# TODO: move this to a generic protocol (AbstractStats)
pgid_stats = self.get_pgid_stats()
if not pgid_stats:
raise TRexError(pgid_stats)
output['flow_stats'] = pgid_stats.get('flow_stats', {})
output['latency'] = pgid_stats.get('latency', {})
return output
# clear stats
@client_api('command', True)
def clear_stats (self,
ports = None,
clear_global = True,
clear_flow_stats = True,
clear_latency_stats = True,
clear_xstats = True):
"""
Clears statistics in given ports.
:parameters:
ports: list
clear_global: boolean
clear_flow_stats: boolean
clear_latency_stats: boolean
clear_xstats: boolean
"""
self._clear_stats_common(ports, clear_global, clear_xstats)
# TODO: move this to a generic protocol
if clear_flow_stats or clear_latency_stats:
self.pgid_stats.clear_stats(clear_flow_stats=clear_flow_stats, clear_latency_stats=clear_latency_stats)
@client_api('getter', True)
def get_active_pgids(self):
"""
Get active packet group IDs
:Parameters:
None
:returns:
Dict with entries 'latency' and 'flow_stats'. Each entry contains list of used packet group IDs
of the given type.
:Raises:
+ :exc:`TRexError`
"""
return self.pgid_stats.get_active_pgids()
@client_api('getter', True)
def get_pgid_stats (self, pgid_list = []):
"""
.. _get_pgid_stats:
Get flow statistics for give list of pgids
:parameters:
pgid_list: list
pgids to get statistics on. If empty list, get statistics for all pgids.
Allows to get statistics for 1024 flows in one call (will return error if asking for more).
:return:
Return dictionary containing packet group id statistics information gathered from the server.
=============================== ===============
key Meaning
=============================== ===============
:ref:`flow_stats <flow_stats>` Per flow statistics
:ref:`latency <latency>` Per flow statistics regarding flow latency
=============================== ===============
Below is description of each of the inner dictionaries.
.. _flow_stats:
**flow_stats** contains :ref:`global dictionary <flow_stats_global>`, and dictionaries per packet group id (pg id). See structures below.
**per pg_id flow stat** dictionaries have following structure:
================= ===============
key Meaning
================= ===============
rx_bps Received bits per second rate
rx_bps_l1 Received bits per second rate, including layer one
rx_bytes Total number of received bytes
rx_pkts Total number of received packets
rx_pps Received packets per second
tx_bps Transmit bits per second rate
tx_bps_l1 Transmit bits per second rate, including layer one
tx_bytes Total number of sent bits
tx_pkts Total number of sent packets
tx_pps Transmit packets per second rate
================= ===============
.. _flow_stats_global:
**global flow stats** dictionary has the following structure:
================= ===============
key Meaning
================= ===============
rx_err Number of flow statistics packets received that we could not associate to any pg_id. This can happen if latency on the used setup is large. See :ref:`wait_on_traffic <wait_on_traffic>` rx_delay_ms parameter for details.
tx_err Number of flow statistics packets transmitted that we could not associate to any pg_id. This is never expected. If you see this different than 0, please report.
================= ===============
.. _latency:
**latency** contains :ref:`global dictionary <lat_stats_global>`, and dictionaries per packet group id (pg id). Each one with the following structure.
**per pg_id latency stat** dictionaries have following structure:
=========================== ===============
key Meaning
=========================== ===============
:ref:`err_cntrs<err-cntrs>` Counters describing errors that occurred with this pg id
:ref:`latency<lat_inner>` Information regarding packet latency
=========================== ===============
Following are the inner dictionaries of latency
.. _err-cntrs:
**err-cntrs**
================= ===============
key Meaning (see better explanation below the table)
================= ===============
dropped How many packets were dropped (estimation)
dup How many packets were duplicated.
out_of_order How many packets we received out of order.
seq_too_high How many events of packet with sequence number too high we saw.
seq_too_low How many events of packet with sequence number too low we saw.
================= ===============
For calculating packet error events, we add sequence number to each packet's payload. We decide what went wrong only according to sequence number
of last packet received and that of the previous packet. 'seq_too_low' and 'seq_too_high' count events we see. 'dup', 'out_of_order' and 'dropped'
are heuristics we apply to try and understand what happened. They will be accurate in common error scenarios.
We describe few scenarios below to help understand this.
Scenario 1: Received packet with seq num 10, and another one with seq num 10. We increment 'dup' and 'seq_too_low' by 1.
Scenario 2: Received packet with seq num 10 and then packet with seq num 15. We assume 4 packets were dropped, and increment 'dropped' by 4, and 'seq_too_high' by 1.
We expect next packet to arrive with sequence number 16.
Scenario 2 continue: Received packet with seq num 11. We increment 'seq_too_low' by 1. We increment 'out_of_order' by 1. We *decrement* 'dropped' by 1.
(We assume here that one of the packets we considered as dropped before, actually arrived out of order).
.. _lat_inner:
**latency**
================= ===============
key Meaning
================= ===============
average Average latency over the stream lifetime (usec).Low pass filter is applied to the last window average.It is computed each sampling period by following formula: <average> = <prev average>/2 + <last sampling period average>/2
histogram Dictionary describing logarithmic distribution histogram of packet latencies. Keys in the dictionary represent range of latencies (in usec). Values are the total number of packets received in this latency range. For example, an entry {100:13} would mean that we saw 13 packets with latency in the range between 100 and 200 usec.
jitter Jitter of latency samples, computed as described in :rfc:`3550#appendix-A.8`
last_max Maximum latency measured between last two data reads from server (0.5 sec window).
total_max Maximum latency measured over the stream lifetime (in usec).
total_min Minimum latency measured over the stream lifetime (in usec).
================= ===============
.. _lat_stats_global:
**global latency stats** dictionary has the following structure:
================= ===============
key Meaning
================= ===============
old_flow Number of latency statistics packets received that we could not associate to any pg_id. This can happen if latency on the used setup is large. See :ref:`wait_on_traffic <wait_on_traffic>` rx_delay_ms parameter for details.
bad_hdr Number of latency packets received with bad latency data. This can happen because of garbage packets in the network, or if the DUT causes packet corruption.
================= ===============
:raises:
+ :exc:`TRexError`
"""
# transform single stream
pgid_list = listify(pgid_list)
return self.pgid_stats.get_stats(pgid_list)
##########################
# Tagged Packet Grouping #
##########################
@staticmethod
def _validate_tpg_tag(tag, update, num_tags):
"""
Validate Tagged Packet Group tags.
:parameters:
tag: dict
Tag to validate
update: bool
Are we verifying the tags for update?
num_tags: int
Number of tags in total
"""
def _verify_vlan(vlan):
"""
Verify vlan is a valid Vlan value.
:parameters:
vlan: int
Vlan to verify
:raises:
TRexError: In case the Vlan is not a valid vlan
"""
validate_type("vlan", vlan, int)
MIN_VLAN, MAX_VLAN = 1, 4094
if not MIN_VLAN <= vlan <= MAX_VLAN:
raise TRexError("Invalid vlan value {}, vlan must be in [{}, {}]".format(vlan, MIN_VLAN, MAX_VLAN))
SUPPORTED_TAG_TYPES = ["Dot1Q", "QinQ"]
if update:
SUPPORTED_TAG_TYPES.append(None)
validate_type("tag", tag, dict)
tag_type = tag.get("type", "-")
if tag_type == "-":
raise TRexError("Please provide a type field for each TPG tag!")
if tag_type not in SUPPORTED_TAG_TYPES:
raise TRexError("Tag type {} not supported. Supported tag types are = {}".format(tag_type, SUPPORTED_TAG_TYPES))
tag_value = tag.get("value", None)
if tag_value is None and not update:
raise TRexError("You must provide a value field for each TPG tag!")
if not update:
validate_type("tag_value", tag_value, (dict, type(None)))
if tag_type == "Dot1Q":
validate_type("tag_value", tag_value, dict)
vlan = tag_value.get("vlan", None)
if vlan is None: # Check explicitly if it is none, since it can be 0.
raise TRexError("You must provide a vlan key for each Dot1Q tag!")
_verify_vlan(vlan)
elif tag_type == "QinQ":
validate_type("tag_value", tag_value, dict)
vlans = tag_value.get("vlans", None)
if not vlans:
raise TRexError("You must provide vlans key for each QinQ tag!")
validate_type("vlans", vlans, list)
if len(vlans) != 2:
raise TRexError("You must provide 2 vlans for QinQ tag.")
for vlan in vlans:
_verify_vlan(vlan)
if update:
tag_id = tag.get("tag_id", None)
if tag_id is None:
raise TRexError("You must provide a tag id when updating TPG tags.")
validate_type("tag_id", tag_id, int)
if not 0 <= tag_id < num_tags:
raise TRexError("Invalid Tag Id {}. Must be in [0-{}).".format(tag_id, num_tags))
@client_api('command', True)
def enable_tpg(self, num_tpgids, tags, rx_ports = None):
"""
Enable Tagged Packet Grouping.
This method has 3 phases:
1. Enable TPG in Control Plane and send message to Rx to allocate memory.
2. Wait until Rx finishes allocating.
3. Enable the feature in Data Plane.
:parameters:
num_tpgids: uint32
Number of Tagged Packet Groups that we are expecting to send. The number is an upper bound, and tpgids
should be in *[0, num_tpgids)*.
.. note:: This number is important in allocating server memory, hence be careful with it.
tags: list
List of dictionaries that represents the mapping of actual tags to tag ids.
.. highlight:: python
.. code-block:: python
[
{
"type": "Dot1Q",
"value": {
"vlan": 5,
}
},
{
"type": "QinQ",
"value": {
"vlans": [20, 30]
}
}
]
Currently supports only **Dot1Q**, **QinQ** tags. In our example, Dot1Q (5) is mapped to tag id 0,
and QinQ (20, 30) is mapped to tag id 1 and so on.
Each dictionary should be of the following format:
=============================== ===============
key Meaning
=============================== ===============
type String that represents type of tag, only **Dot1Q** and **QinQ** supported at the moment.
value Dictionary that contains the value for the tag. Differs on each tag type.
=============================== ===============
rx_ports: list
List of rx ports on which we gather Tagged Packet Group Statistics. Optional. If not provided,
data will be gathered on all acquired ports.
"""
acquired_ports = self.get_acquired_ports()
rx_ports = rx_ports if rx_ports is not None else acquired_ports
self.psv.validate('enable_tpg', rx_ports)
validate_type("num_tpgids", num_tpgids, int)
validate_type("tags", tags, list)
for tag in tags:
STLClient._validate_tpg_tag(tag, update=False, num_tags=len(tags))
# Validate that Rx ports are included in Acquired Ports
if not set(rx_ports).issubset(set(acquired_ports)):
raise TRexError("TPG Rx Ports {} must be acquired".format(rx_ports))
self.ctx.logger.pre_cmd("Enabling Tagged Packet Group")
# Invalidate cache
self.tpg_status = None
# Enable TPG in CP and Rx async.
params = {
"num_tpgids": num_tpgids,
"ports": acquired_ports,
"rx_ports": rx_ports,
"username": self.ctx.username,
"session_id": self.ctx.session_id,
"tags": tags
}
rc = self._transmit("enable_tpg", params=params)
if not rc:
self.ctx.logger.post_cmd(rc)
raise TRexError(rc)
tpg_state = TPGState(TPGState.DISABLED)
while tpg_state != TPGState(TPGState.ENABLED_CP_RX):
rc = self._transmit("get_tpg_state", params={"username": self.ctx.username})
if not rc:
self.ctx.logger.post_cmd(rc)
raise TRexError(rc)
tpg_state = TPGState(rc.data())
if tpg_state.is_error_state():
self.disable_tpg(surpress_log=True)
self.ctx.logger.post_cmd(False)
raise TRexError(tpg_state.get_fail_message())
# Enable TPG in DP Sync
rc = self._transmit("enable_tpg", params={"username": self.ctx.username})
if not rc:
rc = self._transmit("get_tpg_state", params={"username": self.ctx.username})
if not rc:
self.ctx.logger.post_cmd(rc)
raise TRexError(rc)
tpg_state = TPGState(rc.data())
if tpg_state.is_error_state():
self.disable_tpg(surpress_log=True)
self.ctx.logger.post_cmd(False)
raise TRexError(tpg_state.get_fail_message())
else:
raise TRexError("TPG enablement failed but server doesn't indicate of errors.")
self.ctx.logger.post_cmd(rc)
@client_api('command', True)
def disable_tpg(self, username=None, surpress_log=False):
"""
Disable Tagged Packet Grouping.
This method has 2 phases.
1. Disable TPG in DPs and Cp. Send a message to Rx to start deallocating.
2. Wait until Rx finishes deallocating.
:parameters:
username: string
Username whose TPG context we want to disable. Optional. If not provided, we disable for the calling user.
surpress_log: bool
Surpress logs, in case disable TPG is run as a subroutine. Defaults to False.
"""
# Invalidate cache
self.tpg_status = None
if not surpress_log:
self.ctx.logger.pre_cmd("Disabling Tagged Packet Group")
# Disable TPG RPC simply indicates to the server to start deallocating the memory, it doesn't mean
# it has finished deallocating
username = self.ctx.username if username is None else username
rc = self._transmit("disable_tpg", params={"username": username})
if not rc:
raise TRexError(rc)
tpg_state = TPGState(TPGState.ENABLED)
while tpg_state != TPGState(TPGState.DISABLED_DP_RX):
rc = self._transmit("get_tpg_state", params={"username": username})
if not rc:
raise TRexError(rc)
tpg_state = TPGState(rc.data())
# State is set to TPGState.DISABLED_DP_RX, we can proceed to destroying the context.
rc = self._transmit("disable_tpg", params={"username": username})
if not rc:
raise TRexError(rc)
if not surpress_log:
self.ctx.logger.post_cmd(rc)
@client_api('getter', True)
def get_tpg_status(self, username=None, port=None):
"""
Get Tagged Packet Group Status from the server. We can collect the TPG status for a user or for a port.
If no parameters are provided we will collect for the calling user.
.. note:: Only one between username and port should be provided.
:parameters:
username: str
Username whose TPG status we want to check. Optional. In case it isn't provided,
the username that runs the command will be used.
port: uint8
Port whose TPG status we want to check. Optional.
:returns:
dict: Tagged Packet Group Status from the server. The dictionary contains the following keys:
.. highlight:: python
.. code-block:: python
{
"enabled": true,
"data": {
"rx_ports": [1],
"acquired_ports": [0, 1],
"num_tpgids": 3,
"num_tags": 10,
"username": "bdollma"
}
}
=============================== ===============
key Meaning
=============================== ===============
enabled Boolean indicated if TPG is enabled/disabled.
rx_ports Ports on which TPG is collecting stats. Relevant only if TPG is enabled.
acquired_ports Ports on which TPG can transmit. Relevant only if TPG is enabled.
num_tpgids Number of Tagged Packet Groups. Relevant only if TPG is enabled.
num_tags Number of Tagged Packet Group Tags. Relevant only if TPG is enabled.
username User that owns this instance of TPG. Relevant only if TPG is enabled.
=============================== ===============
"""
default_params = (username is None and port is None)
if default_params and self.tpg_status is not None:
# Default Params and value is cached, no need to query the server.
return self.tpg_status
params = {}
if port is None:
params = {"username": self.ctx.username if username is None else username}
else:
self.psv.validate('get_tpg_status', [port])
if username is not None:
raise TRexError("Should provide only one between port and username for TPG status.")
params = {"port_id": port}
rc = self._transmit("get_tpg_status", params=params)
if not rc:
raise TRexError(rc)
if default_params:
# Cache status only if default parameters
self.tpg_status = rc.data()
return rc.data()
@client_api('command', True)
def update_tpg_tags(self, new_tags, clear=False):
"""
Update Tagged Packet Grouping Tags.
:parameters:
new_tags: list
List of dictionaries that represents the tags to replace.
.. highlight:: python
.. code-block:: python
[
{
"type": "Dot1Q",
"value": {
"vlan": 5,
}
"tag_id": 20
},
{
"type": "QinQ",
"value": {
"vlans": [20, 30]
}
"tag_id": 7
},
{
"type": None,
"tag_id": 0
}
]
Currently supports only **Dot1Q**, **QinQ**, **None** types. In our example, tag_id 20 is now replaced with Dot1Q(5).
Note that Dot1Q(5) must not be present, or at least invalidated in one of the previous entries.
When the type is None, it invalidates the tag.
Each dictionary should be of the following format:
=============================== ===============
key Meaning
=============================== ===============
type String that represents type of tag, only **Dot1Q**, **QinQ** or None supported at the moment.
value Dictionary that contains the value for the tag. Differs on each tag type. Not needed in case of None.
tag_id The tag id that this new tag is going to have.
=============================== ===============
clear: bool
Clear stats for the tags we updated. Defaults to False.
.. note:: This can take some time, since we need to clear the stats in all the receiveing ports for all tpgids.
"""
def clear_update(self, port, min_tpgid, max_tpgid, tag_list):
params = {
"username": self.ctx.username,
"port_id": port,
"min_tpgid": min_tpgid,
"max_tpgid": max_tpgid,
"tag_list": tag_list
}
self._transmit("clear_updated", params=params)
self.ctx.logger.pre_cmd("Updating Tagged Packet Group Tags")
validate_type("new_tags", new_tags, list)
tpg_status = self.get_tpg_status()
if not tpg_status["enabled"]:
raise TRexError("Tagged Packet Group is not enabled.")
num_tags = tpg_status["data"]["num_tags"]
for tag in new_tags:
STLClient._validate_tpg_tag(tag, update=True, num_tags=num_tags)
rc = self._transmit("update_tpg_tags", params={"username": self.ctx.username, "tags": new_tags})
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
if clear:
tag_list = [tag["tag_id"] for tag in new_tags]
rx_ports = tpg_status["data"]["rx_ports"]
num_tpgids = tpg_status["data"]["num_tpgids"]
NUM_STATS_CHUNK = 2048
TPGID_CHUNK_SIZE = NUM_STATS_CHUNK // len(tag_list)
min_tpgid = 0
for port in rx_ports:
while min_tpgid != num_tpgids:
max_tpgid = min(min_tpgid + TPGID_CHUNK_SIZE, num_tpgids)
clear_update(self, port, min_tpgid, max_tpgid, tag_list)
min_tpgid = max_tpgid
@client_api('getter', True)
def get_tpg_tags(self, min_tag = 0, max_tag = None, username = None, port = None):
"""
Get Tagged Packet Group Tags from the server. It will return as a list starting from
*min_tag* until *max_tag*. If not provided, we will collect for all tags.
We can collect the TPG status for a user or for a port.
If no parameters are provided we will collect for the calling user.
:parameters:
min_tag: int
Minimal tag to collect the tag for. Optional. If not provided, we will start from 0.
max_tag: int
Maximal tag to collect the tag for. Defaults to None. If not provided, we will collect
for the max possible tag.
username: str
Username whose TPG status we want to check. Optional. In case it isn't provided,
the username that runs the command will be used.
port: uint8
Port whose TPG status we want to check. Optional.
:returns:
list: Tagged Packet Group Tags from the server. At index *i* in the list we can find the descripton
for tag number *i*. If the value is None, it means that this tag index was invalidated.
.. highlight:: python
.. code-block:: python
[
{
"type": "Dot1Q",
"value": {
"vlan": 7
}
},
None,
{
"type": "QinQ",
"value": {
"vlans": [1, 11]
}
}
]
"""
CHUNK_SIZE = 500
def get_tpg_tags_chunk(self, params):
"""
Assumes that the amount of tags requested is at most CHUNKS_SIZE.
"""
rc = self._transmit("get_tpg_tags", params=params)
if not rc:
raise TRexError(rc)
return rc.data()
validate_type("min_tag", min_tag, int)
validate_type("max_tag", max_tag, (int, type(None)))
validate_type("username", username, (str, type(None)))
tpg_status = self.get_tpg_status(username=username, port=port)
if not tpg_status["enabled"]:
raise TRexError("Tagged Packet Group is not enabled.")
num_tags = tpg_status["data"]["num_tags"]
if max_tag is None:
max_tag = num_tags
if max_tag > num_tags:
raise TRexError("Max Tag {} must be less than number of tags defined: {}".format(max_tag, num_tags))
if min_tag > max_tag:
raise TRexError("Min Tag {} must be less than Max Tag {}".format(min_tag, max_tag))
params = {}
if port is None:
params = {"username": self.ctx.username if username is None else username}
else:
self.psv.validate('get_tpg_tags', [port])
if username is not None:
raise TRexError("Should provide only one between port and username for get_tpg_tags.")
params = {"port_id": port}
tpg_tags = [] # List that will contain all tags
current_max_tag = 0
while current_max_tag != max_tag:
current_max_tag = min(max_tag, min_tag + CHUNK_SIZE)
params["min_tag"], params["max_tag"] = min_tag, current_max_tag
tpg_tags += get_tpg_tags_chunk(self, params)
min_tag = current_max_tag
return tpg_tags
@client_api('getter', True)
def get_tpg_stats(self, port, tpgid, min_tag, max_tag, max_sections = 50, unknown_tag = False, untagged = False):
"""
Get Tagged Packet Group statistics that are received in this port,
for this Tagged Packet Group Identifier in [min, max) tag_range.
:parameters:
port: uint8
Port on which we collect the stats.
tpgid: uint32
Tagged Packet Group Identifier whose stats we are interested to collect.
min_tag: uint16
Minimal Tag to collect stats for.
max_tag: uint16
Maximal Tag to collect stats for. Non inclusive.
max_sections: int
Maximal sections to collect in the stats. Defaults to 50.
.. note:: If we have the same stats for two consequent tags, their values will assembled into one section
in order to compress the stats. The common use case is that stats are the same on each tag, hence the compression is effective.
If all the tags from *[min-max)* can be compressed in less than *max_sections*, we will get all tags
from [min-max), otherwise we will get *max_sections* entries in the stats dictionary.
unknown_tag: bool
Get the stats of packets received with this tpgid but with a tag that isn't provided in the mapping,
i.e an unknown tag.
untagged: bool
Get the stats of packets received with this tpgid but without any tag.
:returns:
(dict, uint16): Stats collected the next tag to start collecting from (relevant if not all the data was collected)
Dictionary contains Tagged Packet Group statistics gathered from the server. For example:
.. highlight:: python
.. code-block:: python
print(get_tpg_stats(port=3, tpgid=1, min_tag=0, max_tag=4000, unknown_tag=True)[0])
{'3': {'1': {
'0-200': {'bytes': 0,
'dup': 0,
'ooo': 0,
'pkts': 0,
'seq_err': 0,
'seq_err_too_big': 0,
'seq_err_too_small': 0
},
'201': {'bytes': 204,
'dup': 0,
'ooo': 0,
'pkts': 3,
'seq_err': 2,
'seq_err_too_big': 1,
'seq_err_too_small': 0},
'202-3999': {'bytes': 0,
'dup': 0,
'ooo': 0,
'pkts': 0,
'seq_err': 0,
'seq_err_too_big': 0,
'seq_err_too_small': 0},
'untagged': {'bytes': 0,
'dup': 0,
'ooo': 0,
'pkts': 0,
'seq_err': 0,
'seq_err_too_big': 0,
'seq_err_too_small': 0},
'unknown_tag': {'bytes': 0,
'pkts': 0}}}}
The returned data is separated per port and per tpgid, so it can be easily merged with data from other ports/tpgids.
In this example we can see that all the data is compressed in 3 sections (excluding the *unknown_tag* and *untagged*.).
uint16: Indicates the next tag to start collecting from. In case all the tags were collected this will equal *max_tag*.
In case the user provided min_tag = max tag, the user collected only unknown or untagged, hence this will be None.
"""
self.psv.validate('get_tpg_stats', [port])
validate_type("tpgid", tpgid, int)
validate_type("min_tag", min_tag, int)
validate_type("max_tag", max_tag, int)
validate_type("max_sections", max_sections, int)
validate_type("unknown_tag", unknown_tag, bool)
validate_type("untagged", untagged, bool)
if min_tag > max_tag:
raise TRexError("Min Tag {} must be smaller/equal than Max Tag {}".format(min_tag, max_tag))
if min_tag == max_tag and not untagged and not unknown_tag:
raise TRexError("Min Tag can equal Max Tag iff untagged or unknown tag flags provided.")
def get_tpg_stats_section(self, port, tpgid, min_tag, max_tag, unknown_tag, untagged):
"""
Get TPGID stats from the server for one section only.
:parameters:
port: uint8
Port on which we collected the stats.
tpgid: uint32
Tagged Packet Group Identifier for the group we collect stats.
min_tag: uint16
Min Tag to collect stats for.
max_tag: uint16
Max Tag to collect stats for.
unknown_tag: bool
Collect stats of unknown tags.
untagged: bool
Collect stats of untagged packets.
:returns:
dict: Stats of one section collected from the server.
"""
params = {
"port_id": port,
"tpgid": tpgid,
"min_tag": min_tag,
"max_tag": max_tag,
"unknown_tag": unknown_tag,
"untagged": untagged
}
rc = self._transmit("get_tpg_stats", params=params)
if not rc:
raise TRexError(rc)
return rc.data()
def _get_next_min_tag(section_stats, port, tpgid):
"""
Calculate the next min value based on the stats we received until now.
:parameters:
section_stats: dict
The latest stats as received by the server.
port: uint8
Port on which we collected the stats.
tpgid: uint32
Tagged Packet Group Identifier for the group we collect stats.
:returns:
uint32: The next value to use as a minimal tag.
"""
tpgid_stats = section_stats[str(port)][str(tpgid)]
# Keys of tpgid_stats can be:
# 1. "unknown", "untagged"
# 2. "min_tag-new_min_tag"
# 3. "min_tag"
for key in tpgid_stats.keys():
if "unknown" in key or "untagged" in key:
continue
elif "-" in key:
return int(key.split("-")[1]) + 1 # return the second value, add one for the new minimum
else:
return (int(key)) + 1
return None
# Initialize some variables
stats = {}
sections = 0
done = False
_min_tag = min_tag
# Loop until finished or reached max sections
while not done and sections < max_sections:
# Collect one section of stats from the server
section_stats = get_tpg_stats_section(self, port, tpgid, _min_tag, max_tag, unknown_tag, untagged)
# Calculate the next min tag.
_min_tag = _get_next_min_tag(section_stats, port, tpgid)
if _min_tag is None or _min_tag == max_tag:
done = True
if not stats:
# First section, set the stats dictionary
stats = section_stats
else:
# Update the stats dictionary with new sections
tpgid_stats = stats[str(port)][str(tpgid)]
new_tpgid_stats = section_stats[str(port)][str(tpgid)]
tpgid_stats.update(new_tpgid_stats)
unknown_tag = False # after the first iteration set unknown_tag to False
untagged = False # after the first iteration set untagged to False
sections += 1
return (stats, _min_tag)
@client_api('command', True)
def clear_tpg_stats(self, port, tpgid, min_tag = 0, max_tag = None, tag_list = None, unknown_tag = False, untagged = False):
"""
Clear Packet Group Identifier statistics that are received in this port,
for this Tagged Packet Group Identifier in [min, max) tag_range.
:parameters:
port: uint8
Port on which we want to clear the stats.
tpgid: uint32
Tagged Packet Group Identifier whose stats we are interested to clear.
min_tag: uint16
Minimal Tag to clear stats for. Defaults to 0.
max_tag: uint16
Maximal Tag to clear stats for. Non inclusive. Defaults to None. Exclusive to *tag_list*.
tag_list: list or None
List of tags to clear, if provided takes precedence over the range [min-max). Exclusive to *max_tag*.
unknown_tag: bool
Clear the stats of packets received with this tpgid but with a tag that isn't provided in the mapping,
i.e an unknown tag.
untagged: bool
Clear the stats of packets received with this tpgid but without any tag.
"""
self.ctx.logger.pre_cmd("Clearing TPG stats")
self.psv.validate('clear_tpg_tx_stats', [port])
validate_type("tpgid", tpgid, int)
validate_type("min_tag", min_tag, int)
validate_type("max_tag", max_tag, (int, type(None)))
validate_type("tag_list", tag_list, (list, type(None)))
validate_type("unknown_tag", unknown_tag, bool)
validate_type("untagged", untagged, bool)
if (max_tag is None and not tag_list) or (max_tag is not None and tag_list):
raise TRexError("One between max_tag and tag_list must be provided.")
if max_tag is not None:
if min_tag > max_tag:
raise TRexError("Min Tag {} must be smaller/equal than Max Tag {}".format(min_tag, max_tag))
if min_tag == max_tag and not untagged and not unknown_tag:
raise TRexError("Min Tag can equal Max Tag iff untagged or unknown tag flags provided.")
if tag_list:
for tag in tag_list:
validate_type("tag", tag, int)
if tag < 0:
raise TRexError("Invalid tag {}. Tag must be positive.".format(tag))
params = {
"port_id": port,
"tpgid": tpgid,
"min_tag": min_tag,
"max_tag": max_tag,
"tag_list": tag_list if tag_list else None, # Send None in case of empty list too
"unknown_tag": unknown_tag,
"untagged": untagged,
}
rc = self._transmit("clear_tpg_stats", params=params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('getter', True)
def get_tpg_tx_stats(self, port, tpgid):
"""
Get Tagged Packet Group Identifier statistics that are *transmitted* in this port,
for this Tagged Packet Group Identifier.
:parameters:
port: uint8
Port on which we transmit TPG packets.
tpgid: uint32
Tagged Packet Group Identifier
:returns:
dict: Dictionary contains Tagged Packet Group statistics gathered from the server. For example:
.. highlight:: python
.. code-block:: python
print(get_tpg_tx_stats(port=0, tpgid=1))
{'0':
{'1':
{ 'bytes': 0,
'pkts': 0}}}
"""
self.psv.validate('get_tpg_tx_stats', [port])
validate_type("tpgid", tpgid, int)
rc = self._transmit("get_tpg_tx_stats", params={"port_id": port, "tpgid": tpgid})
if not rc:
raise TRexError(rc)
return rc.data()
@client_api('command', True)
def clear_tpg_tx_stats(self, port, tpgid):
"""
Clear Tagged Packet Group Identifier statistics that are transmitted in this port,
for this Tagged Packet Group Identifier.
:parameters:
port: uint8
Port on which we transmit TPG packets.
tpgid: uint32
Tagged Packet Group Identifier
"""
self.ctx.logger.pre_cmd("Clearing TPG Tx stats")
self.psv.validate('clear_tpg_tx_stats', [port])
validate_type("tpgid", tpgid, int)
rc = self._transmit("clear_tpg_tx_stats", params={"port_id": port, "tpgid": tpgid})
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('getter', True)
def get_tpg_unknown_tags(self, port):
"""
Get Tagged Packet Group Unknown tags found in this port.
:parameters:
port: uint8
Port on which we collect TPG stats.
:returns:
dict: Dictionary contains Tagged Packet Group unknown tags gathered on each port. For example:
.. highlight:: python
.. code-block:: python
print(get_tpg_unknown_tags(port=1)
{'1': [
{'tag': {'type': 'Dot1Q', 'value': {'vlan': 12}}, 'tpgid': 12},
{'tag': {'type': 'QinQ', 'value': {'vlans': [1, 100]}}, 'tpgid': 0},
{'tag': {'type': 'Dot1Q', 'value': {'vlan': 11}}, 'tpgid': 11}
]}
"""
self.psv.validate('get_tpg_unknown_tags', [port])
rc = self._transmit("get_tpg_unknown_tags", params={"port_id": port})
if not rc:
raise TRexError(rc)
return rc.data()
@client_api('command', True)
def clear_tpg_unknown_tags(self, port):
"""
Clear Tagged Packet Group Unknown tags found in this port.
:parameters:
port: uint8
Port on which we collect TPG packets.
"""
self.ctx.logger.pre_cmd("Clearing TPG unknown tags")
self.psv.validate('clear_tpg_unknown_tags', [port])
rc = self._transmit("clear_tpg_unknown_tags", params={"port_id": port})
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
############################ console #############################
############################ commands #############################
############################ #############################
def _show_streams_stats(self, buffer = sys.stdout):
all_pg_ids = self.get_active_pgids()
# Display data for at most 4 pgids. If there are latency PG IDs, use them first
pg_ids = all_pg_ids['latency'][:4]
pg_ids += all_pg_ids['flow_stats'][:4 - len(pg_ids)]
table = self.pgid_stats.streams_stats_to_table(pg_ids)
# show
text_tables.print_table_with_header(table, table.title, buffer = buffer)
def _show_latency_stats(self, buffer = sys.stdout):
all_pg_ids = self.get_active_pgids()
# Display data for at most 5 pgids.
pg_ids = all_pg_ids['latency'][:5]
table = self.pgid_stats.latency_stats_to_table(pg_ids)
# show
text_tables.print_table_with_header(table, table.title, buffer = buffer)
def _show_latency_histogram(self, buffer = sys.stdout):
all_pg_ids = self.get_active_pgids()
# Display data for at most 5 pgids.
pg_ids = all_pg_ids['latency'][:5]
table = self.pgid_stats.latency_histogram_to_table(pg_ids)
# show
text_tables.print_table_with_header(table, table.title, buffer = buffer)
@console_api('reset', 'common', True)
def reset_line (self, line):
'''Reset ports'''
parser = parsing_opts.gen_parser(self,
"reset",
self.reset_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL,
parsing_opts.PORT_RESTART)
opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True)
self.reset(ports = opts.ports, restart = opts.restart)
return True
@console_api('acquire', 'common', True)
def acquire_line (self, line):
'''Acquire ports\n'''
# define a parser
parser = parsing_opts.gen_parser(self,
"acquire",
self.acquire_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL,
parsing_opts.FORCE)
opts = parser.parse_args(line.split(), default_ports = self.get_all_ports())
# filter out all the already owned ports
ports = list_difference(opts.ports, self.get_acquired_ports())
if not ports:
raise TRexError("acquire - all of port(s) {0} are already acquired".format(opts.ports))
self.acquire(ports = ports, force = opts.force)
# show time if success
return True
@console_api('release', 'common', True)
def release_line (self, line):
'''Release ports\n'''
parser = parsing_opts.gen_parser(self,
"release",
self.release_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL)
opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports())
ports = list_intersect(opts.ports, self.get_acquired_ports())
if not ports:
if not opts.ports:
raise TRexError("no acquired ports")
else:
raise TRexError("none of port(s) {0} are acquired".format(opts.ports))
self.release(ports = ports)
# show time if success
return True
@console_api('stats', 'common', True)
def show_stats_line (self, line):
'''Show various statistics\n'''
# define a parser
parser = parsing_opts.gen_parser(self,
"stats",
self.show_stats_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL,
parsing_opts.STL_STATS)
opts = parser.parse_args(line.split())
# without parameters show only global and ports
if not opts.stats:
self._show_global_stats()
self._show_port_stats(opts.ports)
return
# decode which stats to show
if opts.stats == 'global':
self._show_global_stats()
elif opts.stats == 'ports':
self._show_port_stats(opts.ports)
elif opts.stats == 'xstats':
self._show_port_xstats(opts.ports, False)
elif opts.stats == 'xstats_inc_zero':
self._show_port_xstats(opts.ports, True)
elif opts.stats == 'status':
self._show_port_status(opts.ports)
elif opts.stats == 'cpu':
self._show_cpu_util()
elif opts.stats == 'mbuf':
self._show_mbuf_util()
elif opts.stats == 'streams':
self._show_streams_stats()
elif opts.stats == 'latency':
self._show_latency_stats()
elif opts.stats == 'latency_histogram':
self._show_latency_histogram()
else:
raise TRexError('Unhandled stats: %s' % opts.stats)
def _get_profiles(self, port_id_list):
profiles_per_port = OrderedDict()
for port_id in port_id_list:
data = self.ports[port_id].generate_loaded_profiles()
if data:
profiles_per_port[port_id] = data
return profiles_per_port
def _get_streams(self, port_id_list, streams_mask, table_format):
streams_per_port = OrderedDict()
for port_id in port_id_list:
data = self.ports[port_id].generate_loaded_streams_sum(streams_mask, table_format)
if data:
streams_per_port[port_id] = data
return streams_per_port
@console_api('profiles', 'STL', True, True)
def profiles_line(self, line):
'''Get loaded to server profiles information'''
parser = parsing_opts.gen_parser(self,
"profiles",
self.profiles_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL)
opts = parser.parse_args(line.split())
if not opts:
return opts
profiles_per_port = self._get_profiles(opts.ports)
if not profiles_per_port:
self.logger.info(format_text("No profiles found with desired filter.\n", "bold", "magenta"))
for port_id, port_profiles_table in profiles_per_port.items():
if port_profiles_table:
text_tables.print_table_with_header(port_profiles_table,
header = 'Port %s:' % port_id)
@console_api('streams', 'STL', True, True)
def streams_line(self, line):
'''Get loaded to server streams information'''
parser = parsing_opts.gen_parser(self,
"streams",
self.streams_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL,
parsing_opts.STREAMS_MASK,
parsing_opts.STREAMS_CODE)
opts = parser.parse_args(line.split())
if not opts:
return opts
streams_per_port = self._get_streams(opts.ports, set(opts.ids), table_format = opts.code is None)
if not streams_per_port:
self.logger.info(format_text("No streams found with desired filter.\n", "bold", "magenta"))
elif opts.code is None: # Just print the summary table of streams
for port_id, port_streams_table in streams_per_port.items():
if port_streams_table:
text_tables.print_table_with_header(port_streams_table,
header = 'Port %s:' % port_id)
elif opts.code: # Save the code that generates streams to file
if not opts.code.endswith('.py'):
raise TRexError('Saved filename should end with .py')
is_several_ports = len(streams_per_port) > 1
if is_several_ports:
print(format_text('\nWarning: several ports specified, will save in separate file per port.', 'bold'))
for port_id, port_streams_data in streams_per_port.items():
if not port_streams_data:
print('No streams to save at port %s, skipping.' % port_id)
continue
filename = ('%s_port%s.py' % (opts.code[:-3], port_id)) if is_several_ports else opts.code
if os.path.exists(filename):
sys.stdout.write('\nFilename %s already exists, overwrite? (y/N) ' % filename)
ans = user_input().strip()
if ans.lower() not in ('y', 'yes'):
print('Not saving.')
continue
self.logger.pre_cmd('Saving file as: %s' % filename)
try:
profile = STLProfile(list(port_streams_data.values()))
with open(filename, 'w') as f:
f.write(profile.dump_to_code())
except Exception as e:
self.logger.post_cmd(False)
print(e)
print('')
else:
self.logger.post_cmd(True)
else: # Print the code that generates streams
for port_id, port_streams_data in streams_per_port.items():
if not port_streams_data:
continue
print(format_text('Port: %s' % port_id, 'cyan', 'underline') + '\n')
for stream_id, stream in port_streams_data.items():
print(format_text('Stream ID: %s' % stream_id, 'cyan', 'underline'))
print(' ' + '\n '.join(stream.to_code().splitlines()) + '\n')
@console_api('push', 'STL', True)
def push_line(self, line):
'''Push a pcap file '''
args = [self,
"push",
self.push_line.__doc__,
parsing_opts.REMOTE_FILE,
parsing_opts.PORT_LIST_WITH_ALL,
parsing_opts.COUNT,
parsing_opts.DURATION,
parsing_opts.IPG,
parsing_opts.MIN_IPG,
parsing_opts.SPEEDUP,
parsing_opts.FORCE,
parsing_opts.DUAL,
parsing_opts.SRC_MAC_PCAP,
parsing_opts.DST_MAC_PCAP]
parser = parsing_opts.gen_parser(*(args + [parsing_opts.FILE_PATH_NO_CHECK]))
opts = parser.parse_args(line.split(), verify_acquired = True)
if not opts:
return opts
if not opts.remote:
parser = parsing_opts.gen_parser(*(args + [parsing_opts.FILE_PATH]))
opts = parser.parse_args(line.split(), verify_acquired = True)
if not opts:
return opts
if opts.remote:
self.push_remote(opts.file[0],
ports = opts.ports,
ipg_usec = opts.ipg_usec,
min_ipg_usec = opts.min_ipg_usec,
speedup = opts.speedup,
count = opts.count,
duration = opts.duration,
force = opts.force,
is_dual = opts.dual,
src_mac_pcap = opts.src_mac_pcap,
dst_mac_pcap = opts.dst_mac_pcap)
else:
self.push_pcap(opts.file[0],
ports = opts.ports,
ipg_usec = opts.ipg_usec,
min_ipg_usec = opts.min_ipg_usec,
speedup = opts.speedup,
count = opts.count,
duration = opts.duration,
force = opts.force,
is_dual = opts.dual,
src_mac_pcap = opts.src_mac_pcap,
dst_mac_pcap = opts.dst_mac_pcap)
return RC_OK()
@console_api('service', 'STL', True)
def service_line (self, line):
'''Configures port for service mode.
In service mode ports will reply to ARP, PING
and etc.
'''
parser = parsing_opts.gen_parser(self,
"service",
self.service_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL,
parsing_opts.SERVICE_GROUP)
opts = parser.parse_args(line.split())
enabled, filtered, mask = self._get_service_params(opts)
self.set_service_mode(ports = opts.ports, enabled = enabled, filtered = filtered, mask = mask)
return True
@console_api('start', 'STL', True)
def start_line (self, line):
'''Start selected traffic on specified ports on TRex\n'''
# parser for parsing the start command arguments
parser = parsing_opts.gen_parser(self,
"start",
self.start_line.__doc__,
parsing_opts.PROFILE_LIST,
parsing_opts.TOTAL,
parsing_opts.FORCE,
parsing_opts.FILE_PATH,
parsing_opts.DURATION,
parsing_opts.ARGPARSE_TUNABLES,
parsing_opts.MULTIPLIER_STRICT,
parsing_opts.DRY_RUN,
parsing_opts.CORE_MASK_GROUP,
parsing_opts.SYNCHRONIZED)
opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True)
help_flags = ('-h', '--help')
# if the user chose to pass the tunables arguments in previous version (-t var1=x1,var2=x2..)
# we decode the tunables and then convert the output from dictionary to list in order to have the same format with the
# newer version.
tunable_dict = {}
if "-t" in line and '=' in line:
tun_list = opts.tunables
tunable_dict = parsing_opts.decode_tunables(tun_list[0])
opts.tunables = parsing_opts.convert_old_tunables_to_new_tunables(tun_list[0])
opts.tunables.extend(tun_list[1:])
tunable_dict["tunables"] = opts.tunables
ports = []
for port in opts.ports:
if not isinstance(port, PortProfileID):
port = PortProfileID(port)
ports.append(port)
port_id_list = parse_ports_from_profiles(ports)
# core mask
if opts.core_mask is not None:
core_mask = opts.core_mask
else:
core_mask = self.CORE_MASK_PIN if opts.pin_cores else self.CORE_MASK_SPLIT
# just for sanity - will be checked on the API as well
self.__decode_core_mask(port_id_list, core_mask)
streams_per_profile = {}
streams_per_port = {}
# pack the profile
try:
for profile in ports:
profile_name = str(profile)
port_id = int(profile)
profile = STLProfile.load(opts.file[0],
direction = port_id % 2,
port_id = port_id,
**tunable_dict)
if any(h in opts.tunables for h in help_flags):
return True
if profile is None:
print('Failed to convert STL profile')
return False
stream_list = profile.get_streams()
streams_per_profile[profile_name] = stream_list
if port_id not in streams_per_port:
streams_per_port[port_id] = list(stream_list)
else:
streams_per_port[port_id].extend(list(stream_list))
except TRexError as e:
s = format_text("\nError loading profile '{0}'\n".format(opts.file[0]), 'bold')
s += "\n" + e.brief()
raise TRexError(s)
# for better use experience - check this before any other action on port
self.__pre_start_check('START', ports, opts.force, streams_per_port)
ports = self.validate_profile_input(ports)
# stop ports if needed
active_profiles = list_intersect(self.get_profiles_with_state("active"), ports)
if active_profiles and opts.force:
self.stop(active_profiles)
# remove all streams
self.remove_all_streams(ports)
for profile in ports:
profile_name = str(profile)
self.add_streams(streams_per_profile[profile_name], ports = profile)
if opts.dry:
self.validate(ports, opts.mult, opts.duration, opts.total)
else:
self.start(ports,
opts.mult,
opts.force,
opts.duration,
opts.total,
core_mask,
opts.sync)
return True
@console_api('stop', 'STL', True)
def stop_line (self, line):
'''Stop active traffic on specified ports on TRex\n'''
parser = parsing_opts.gen_parser(self,
"stop",
self.stop_line.__doc__,
parsing_opts.PROFILE_LIST_WITH_ALL,
parsing_opts.REMOVE)
opts = parser.parse_args(line.split(), default_ports = self.get_profiles_with_state("active"), verify_acquired = True, allow_empty = True)
ports = self.validate_profile_input(opts.ports)
# find the relevant ports
port_id_list = parse_ports_from_profiles(ports)
active_ports = list_intersect(ports, self.get_profiles_with_state("active"))
if not active_ports:
if not ports:
msg = 'no active ports'
else:
msg = 'no active traffic on ports {0}'.format(ports)
print(msg)
else:
# call API
self.stop(active_ports)
if opts.remove:
streams_ports = list_intersect(ports, self.get_profiles_with_state("streams"))
if not streams_ports:
if not ports:
msg = 'no ports with streams'
else:
msg = 'no streams on ports {0}'.format(ports)
print(msg)
else:
# call API
self.remove_all_streams(ports)
return True
@console_api('update', 'STL', True)
def update_line (self, line):
'''Update port(s) speed currently active\n'''
parser = parsing_opts.gen_parser(self,
"update",
self.update_line.__doc__,
parsing_opts.PROFILE_LIST,
parsing_opts.MULTIPLIER,
parsing_opts.TOTAL,
parsing_opts.FORCE,
parsing_opts.STREAMS_MASK)
opts = parser.parse_args(line.split(), default_ports = self.get_profiles_with_state("active"), verify_acquired = True)
ports = self.validate_profile_input(opts.ports)
if opts.ids:
if len(ports) != 1:
raise TRexError('must provide exactly one port when specifying stream_ids, got: %s' % ports)
self.update_streams(ports[0], opts.mult, opts.force, opts.ids)
return True
# find the relevant ports
profiles = list_intersect(ports, self.get_profiles_with_state("active"))
if not profiles:
if not ports:
msg = 'no active ports'
else:
msg = 'no active traffic on ports {0}'.format(ports)
raise TRexError(msg)
self.update(profiles, opts.mult, opts.total, opts.force)
return True
@console_api('pause', 'STL', True)
def pause_line (self, line):
'''Pause active traffic on specified ports on TRex\n'''
parser = parsing_opts.gen_parser(self,
"pause",
self.pause_line.__doc__,
parsing_opts.PROFILE_LIST,
parsing_opts.STREAMS_MASK)
opts = parser.parse_args(line.split(), default_ports = self.get_profiles_with_state("transmitting"), verify_acquired = True)
ports = self.validate_profile_input(opts.ports)
if opts.ids:
if len(ports) != 1:
raise TRexError('pause - must provide exactly one port when specifying stream_ids, got: %s' % ports)
self.pause_streams(ports[0], opts.ids)
return True
# check for already paused case
if ports and is_sub_list(ports, self.get_profiles_with_state("paused")):
raise TRexError('all of ports(s) {0} are already paused'.format(ports))
# find the relevant ports
profiles = list_intersect(ports, self.get_profiles_with_state("transmitting"))
if not profiles:
if not ports:
msg = 'no transmitting ports'
else:
msg = 'none of ports {0} are transmitting'.format(ports)
raise TRexError(msg)
self.pause(profiles)
return True
@console_api('resume', 'STL', True)
def resume_line (self, line):
'''Resume active traffic on specified ports on TRex\n'''
parser = parsing_opts.gen_parser(self,
"resume",
self.resume_line.__doc__,
parsing_opts.PROFILE_LIST,
parsing_opts.STREAMS_MASK)
opts = parser.parse_args(line.split(), default_ports = self.get_profiles_with_state("paused"), verify_acquired = True)
ports = self.validate_profile_input(opts.ports)
if opts.ids:
if len(ports) != 1:
raise TRexError('must provide exactly one port when specifying stream_ids, got: %s' % ports)
self.resume_streams(ports[0], opts.ids)
return True
# find the relevant ports
profiles = list_intersect(ports, self.get_profiles_with_state("paused"))
if not profiles:
if not ports:
msg = 'no paused ports'
else:
msg = 'none of ports {0} are paused'.format(ports)
raise TRexError(msg)
self.resume(profiles)
# true means print time
return True
##########################
# Tagged Packet Grouping #
##########################
@staticmethod
def _tpg_tag_value_2str(tag_type, value):
"""
Convert the structured tag type and value to a printable string.
:parameters:
tag_type: str
String represeting the tag type. Supported tag types are Dot1Q and QinQ.
value: dict
Value of the tag.
:return:
String representing the tag type and value.
"""
known_types = ["Dot1Q", "QinQ"]
if tag_type not in known_types:
return "Unknown Type"
if tag_type == "Dot1Q":
return "Dot1Q({})".format(value["vlan"])
if tag_type == "QinQ":
return "QinQ({}, {})".format(value["vlans"][0], value["vlans"][1])
@console_api('tpg_enable', 'STL', True)
def tpg_enable(self, line):
"""Enable Tagged Packet Group"""
parser = parsing_opts.gen_parser(self,
"tpg_enable",
self.tpg_enable.__doc__,
parsing_opts.TPG_ENABLE
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
tpg_conf = STLTaggedPktGroupTagConf.load(opts.tags_conf, **{"tunables": opts.tunables})
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
if tpg_conf is None:
# Can be a --help call.
return None
try:
self.enable_tpg(opts.num_tpgids, tpg_conf, opts.ports)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_disable', 'STL', True)
def tpg_disable(self, line):
"""Disable Tagged Packet Group"""
parser = parsing_opts.gen_parser(self,
"tpg_disable",
self.tpg_disable.__doc__,
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
self.disable_tpg()
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_status', 'STL', True)
def show_tpg_status(self, line):
'''Show Tagged Packet Group Status\n'''
parser = parsing_opts.gen_parser(self,
"tpg_status",
self.show_tpg_status.__doc__,
parsing_opts.TPG_USERNAME,
parsing_opts.SINGLE_PORT_NOT_REQ
)
opts = parser.parse_args(line.split())
if not opts:
return opts
status = None
try:
status = self.get_tpg_status(opts.username, opts.port)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
if status is None:
self.logger.info(format_text("Couldn't get status from STL Server.\n", "bold", "magenta"))
enabled = status.get("enabled", None)
if enabled is None:
self.logger.info(format_text("Enabled not found in server status response.\n", "bold", "magenta"))
msg = "\nTagged Packet Group is enabled\n" if enabled else "\nTagged Packet Group is disabled\n"
self.logger.info(format_text(msg, "bold", "yellow"))
# If Tagged Packet Group is enabled, print its details in a table.
if enabled:
data = status.get("data", None)
if data is None:
self.logger.info(format_text("Data not found in server status response.\n", "bold", "magenta"))
keys_to_headers = [ {'key': 'username', 'header': 'Username'},
{'key': 'acquired_ports', 'header': 'Acquired Ports'},
{'key': 'rx_ports', 'header': 'Rx Ports'},
{'key': 'num_tpgids', 'header': 'Num TPGId'},
{'key': 'num_tags', 'header': 'Num Tags'},
]
kwargs = {'title': 'Tagged Packet Group Data',
'empty_msg': 'No status found',
'keys_to_headers': keys_to_headers}
text_tables.print_table_by_keys(data, **kwargs)
@console_api('tpg_update', 'STL', True)
def tpg_update(self, line):
'''Update Tagged Packet Group Tag\n'''
parser = parsing_opts.gen_parser(self,
"tpg_tags",
self.show_tpg_tags.__doc__,
parsing_opts.TPG_UPDATE
)
opts = parser.parse_args(line.split())
if not opts:
return opts
tag_type = opts.tag_type if opts.tag_type != "Invalidate" else None
new_tag = {
"type": tag_type,
"tag_id": opts.tag_id
}
if tag_type is not None:
# Not invalidating tag, value is needed
if opts.value is None:
raise TRexError(format_text("Value must be present for type {}.".format(tag_type), "red", "bold"))
if tag_type == "Dot1Q":
if len(opts.value) != 1:
raise TRexError(format_text("Only one value must be presented for Dot1Q tags. Invalid value {}.".format(opts.value), "red", "bold"))
new_tag["value"] = {
"vlan": opts.value[0]
}
if tag_type == "QinQ":
if len(opts.value) != 2:
raise TRexError(format_text("Exactly two values must be presented for QinQ tags. Invalid value {}.".format(opts.value), "red", "bold"))
new_tag["value"] = {
"vlans": opts.value
}
try:
self.update_tpg_tags([new_tag], opts.clear)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_tags', 'STL', True)
def show_tpg_tags(self, line):
'''Show Tagged Packet Group Tags\n'''
parser = parsing_opts.gen_parser(self,
"tpg_tags",
self.show_tpg_tags.__doc__,
parsing_opts.TPG_USERNAME,
parsing_opts.SINGLE_PORT_NOT_REQ,
parsing_opts.TPG_MIN_TAG,
parsing_opts.TPG_MAX_TAG_NOT_REQ,
)
opts = parser.parse_args(line.split())
if not opts:
return opts
MAX_TAGS_TO_SHOW = 20
table_keys_to_headers = [ {'key': 'tag_id', 'header': 'Tag Id'},
{'key': 'tag', 'header': 'Tag Type'}
]
table_kwargs = {'empty_msg': '\nNo tags found',
'keys_to_headers': table_keys_to_headers}
tpg_status = self.get_tpg_status(username=opts.username, port=opts.port)
if not tpg_status["enabled"]:
raise TRexError(format_text("Tagged Packet Group is not enabled.", "bold", "red"))
num_tags_total = tpg_status["data"]["num_tags"]
last_tag = num_tags_total if opts.max_tag is None else min(num_tags_total, opts.max_tag)
current_tag = opts.min_tag
while current_tag != last_tag:
next_current_tag = min(current_tag + MAX_TAGS_TO_SHOW, last_tag)
try:
tags = self.get_tpg_tags(current_tag, next_current_tag, opts.username, opts.port)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
tags_to_print = []
for i in range(len(tags)):
tags_to_print.append(
{
"tag_id": current_tag + i,
"tag": '-' if tags[i] is None else STLClient._tpg_tag_value_2str(tags[i]['type'], tags[i]['value'])
}
)
table_kwargs['title'] = "Tags [{}-{})".format(current_tag, next_current_tag)
text_tables.print_table_by_keys(tags_to_print, **table_kwargs)
current_tag = next_current_tag
if current_tag != last_tag:
# The message should be printed iff there will be another iteration.
input("Press Enter to see the rest of the tags")
@console_api('tpg_stats', 'STL', True)
def show_tpg_stats(self, line):
'''Show Tagged Packet Group Statistics\n'''
parser = parsing_opts.gen_parser(self,
"tpg_stats",
self.show_tpg_stats.__doc__,
parsing_opts.TPG_STL_STATS
)
opts = parser.parse_args(line.split())
if not opts:
return opts
if opts.max_tag < opts.min_tag:
# The client Api checks this as well but our loop logic requires this condition.
raise TRexError(format_text("Max Tag {} must be greater/equal than Min Tag {}".format(opts.max_tag, opts.min_tag), "bold", "red"))
if opts.min_tag == opts.max_tag and not opts.untagged and not opts.unknown_tag:
raise TRexError(format_text("Min Tag can equal Max Tag iff untagged or unknown tag flags provided.", "bold", "red"))
MAX_TAGS_TO_SHOW = 20
current_tag = opts.min_tag
new_current_tag = current_tag
first_iteration = True
table_keys_to_headers = [ {'key': 'tags', 'header': 'Tag Id'},
{'key': 'pkts', 'header': 'Packets'},
{'key': 'bytes', 'header': 'Bytes'},
{'key': 'seq_err', 'header': 'Seq Error'},
{'key': 'seq_err_too_big', 'header': 'Seq Too Big'},
{'key': 'seq_err_too_small', 'header': 'Seq Too Small'},
{'key': 'dup', 'header': 'Duplicates'},
{'key': 'ooo', 'header': 'Out of Order'},
]
table_kwargs = {'empty_msg': 'No stats found',
'keys_to_headers': table_keys_to_headers}
# Loop until we get all the tags
while current_tag != opts.max_tag or first_iteration:
stats = None
try:
unknown_tag = first_iteration and opts.unknown_tag
untagged = first_iteration and opts.untagged
stats, new_current_tag = self.get_tpg_stats(opts.port, opts.tpgid, current_tag, opts.max_tag, max_sections=MAX_TAGS_TO_SHOW, unknown_tag=unknown_tag, untagged=untagged)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
if stats is None:
self.logger.info(format_text("\nNo stats found for the provided params.\n", "bold", "yellow"))
return
port_stats = stats.get(str(opts.port), None)
if port_stats is None:
self.logger.info(format_text("\nNo stats found for the provided port.\n", "bold", "yellow"))
return
tpgid_stats = port_stats.get(str(opts.tpgid), None)
if tpgid_stats is None:
self.logger.info(format_text("\nNo stats found for the provided tpgid.\n", "bold", "yellow"))
return
stats_list = []
for tag_id, tag_stats in tpgid_stats.items():
tag_stats['tags'] = tag_id.replace("_tag", "") # remove _tag keyword when printing
stats_list.append(tag_stats)
table_kwargs['title'] = "Port {}, tpgid {}, Tags = [{}, {})".format(opts.port, opts.tpgid, current_tag, new_current_tag)
text_tables.print_table_by_keys(stats_list, **table_kwargs)
if new_current_tag is not None and new_current_tag != opts.max_tag:
# The message should be printed iff there will be another iteration.
input("Press Enter to see the rest of the stats")
first_iteration = False # Set this false after the first iteration
current_tag = new_current_tag if new_current_tag is not None else current_tag # Update the current tag in case it is a new one.
@console_api('tpg_clear_stats', 'STL', True)
def tpg_clear_stats(self, line):
'''Clear Tagged Packet Group Stats\n'''
parser = parsing_opts.gen_parser(self,
"tpg_clear_stats",
self.tpg_clear_stats.__doc__,
parsing_opts.TPG_STL_CLEAR_STATS
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
self.clear_tpg_stats(opts.port, opts.tpgid, opts.min_tag, opts.max_tag, opts.tag_list, opts.unknown_tag, opts.untagged)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_tx_stats', 'STL', True)
def show_tpg_tx_stats(self, line):
'''Show Tagged Packet Group Tx Statistics\n'''
parser = parsing_opts.gen_parser(self,
"tpg_tx_stats",
self.show_tpg_tx_stats.__doc__,
parsing_opts.TPG_STL_TX_STATS
)
opts = parser.parse_args(line.split())
if not opts:
return opts
table_keys_to_headers = [ {'key': 'pkts', 'header': 'Packets'},
{'key': 'bytes', 'header': 'Bytes'},
]
table_kwargs = {'empty_msg': 'No stats found',
'keys_to_headers': table_keys_to_headers}
tx_stats = {}
try:
tx_stats = self.get_tpg_tx_stats(opts.port, opts.tpgid)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
port_stats = tx_stats.get(str(opts.port), None)
if port_stats is None:
self.logger.info(format_text("\nNo stats found for the provided port.\n", "bold", "yellow"))
return
tpgid_stats = port_stats.get(str(opts.tpgid), None)
if tpgid_stats is None:
self.logger.info(format_text("\nNo stats found for the provided tpgid.\n", "bold", "yellow"))
return
table_kwargs['title'] = "Port {}, tpgid {}".format(opts.port, opts.tpgid)
text_tables.print_table_by_keys(tpgid_stats, **table_kwargs)
@console_api('tpg_clear_tx_stats', 'STL', True)
def tpg_clear_tx_stats(self, line):
'''Clear Tagged Packet Group Tx Stats\n'''
parser = parsing_opts.gen_parser(self,
"tpg_clear_tx_stats",
self.tpg_clear_tx_stats.__doc__,
parsing_opts.TPG_STL_TX_STATS
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
self.clear_tpg_tx_stats(opts.port, opts.tpgid)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_show_unknown_tags', 'STL', True)
def show_tpg_unknown_stats(self, line):
'''Show Tagged Packet Group Unknown Tags\n'''
parser = parsing_opts.gen_parser(self,
"tpg_show_unknown_stats",
self.show_tpg_unknown_stats.__doc__,
parsing_opts.TPG_PORT,
)
opts = parser.parse_args(line.split())
if not opts:
return opts
table_keys_to_headers = [{'key': 'tpgid', 'header': 'tpgid'},
{'key': 'type', 'header': 'Type'}]
table_kwargs = {'empty_msg': '\nNo unknown tags found in port {}.'.format(opts.port),
'keys_to_headers': table_keys_to_headers}
unknown_tags = {}
try:
unknown_tags = self.get_tpg_unknown_tags(opts.port)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
port_unknown_tags = unknown_tags.get(str(opts.port), None)
if port_unknown_tags is None:
self.logger.info(format_text("\nNo unknown tags found in the provided port.\n", "bold", "yellow"))
return
unknown_tags_to_print = []
for val in port_unknown_tags:
unknown_tag = {
'tpgid': val['tpgid'],
'type': STLClient._tpg_tag_value_2str(val['tag']['type'], val['tag']['value'])
}
if unknown_tag not in unknown_tags_to_print:
# This list is at max 10 elements. Dict is not hashable.
unknown_tags_to_print.append(unknown_tag)
table_kwargs['title'] = "Port {} unknown tags".format(opts.port)
text_tables.print_table_by_keys(unknown_tags_to_print, **table_kwargs)
@console_api('tpg_clear_unknown_tags', 'STL', True)
def tpg_clear_unknown_stats(self, line):
'''Clear Tagged Packet Group Unknown Tags\n'''
parser = parsing_opts.gen_parser(self,
"tpg_clear_unknown_stats",
self.tpg_clear_unknown_stats.__doc__,
parsing_opts.TPG_PORT,
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
self.clear_tpg_unknown_tags(opts.port)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
| 39.268036 | 360 | 0.531084 | import time
import sys
import os
from collections import OrderedDict
from functools import wraps
from ..utils.common import get_current_user, list_intersect, is_sub_list, user_input, list_difference, parse_ports_from_profiles
from ..utils import parsing_opts, text_tables
from ..utils.text_opts import format_text, format_num
from ..common.trex_exceptions import *
from ..common.trex_events import Event
from ..common.trex_logger import Logger
from ..common.trex_client import TRexClient, PacketBuffer
from ..common.trex_types import *
from ..common.trex_types import PortProfileID, ALL_PROFILE_ID
from ..common.trex_psv import *
from ..common.trex_api_annotators import client_api, console_api
from .trex_stl_port import STLPort
from .trex_stl_streams import STLStream, STLProfile, STLTaggedPktGroupTagConf
from .trex_stl_stats import CPgIdStats
def validate_port_input(port_arg):
def wrap (func):
@wraps(func)
def wrapper(self, *args, **kwargs):
code = func.__code__
fname = func.__name__
names = code.co_varnames[:code.co_argcount]
argname = port_arg
try:
port_index = names.index(argname) - 1
argval = args[port_index]
args = list(args)
args[port_index] = convert_port_to_profile(argval)
args = tuple(args)
except (ValueError, IndexError):
argval = kwargs.get(argname)
kwargs[argname] = convert_port_to_profile(argval)
return func(self, *args, **kwargs)
def convert_port_to_profile(port):
if port is None:
return port
if isinstance(port, list):
result = list(port)
for idx, val in enumerate(result):
validate_type('port', val, (int, str, PortProfileID))
result[idx] = PortProfileID(str(val))
else:
validate_type('port', port, (int, str, PortProfileID))
result = PortProfileID(str(port))
return result
return wrapper
return wrap
class TPGState:
DISABLED = 0
ENABLED_CP = 1
ENABLED_CP_RX = 2
ENABLED = 3
DISABLED_DP = 4
DISABLED_DP_RX = 5
RX_ALLOC_FAILED = 6
DP_ALLOC_FAILED = 7
ALL_STATES = [DISABLED, ENABLED_CP, ENABLED_CP_RX, ENABLED, DISABLED_DP, DISABLED_DP_RX, RX_ALLOC_FAILED, DP_ALLOC_FAILED]
ERROR_STATES = [RX_ALLOC_FAILED, DP_ALLOC_FAILED]
def __init__(self, initial_state):
if initial_state not in TPGState.ALL_STATES:
raise TRexError("Invalid TPG State {}".format(initial_state))
self._state = initial_state
self.fail_messages = {
TPGState.RX_ALLOC_FAILED: "Rx counter allocation failed!",
TPGState.DP_ALLOC_FAILED: "Tx counter allocation failed!"
}
def is_error_state(self):
return self._state in TPGState.ERROR_STATES
def get_fail_message(self):
if not self.is_error_state():
return "TPG State is valid!"
return self.fail_messages[self._state]
def __eq__(self, other):
if not isinstance(other, TPGState):
raise TRexError("Invalid comparision for TPGState")
return self._state == other._state
class STLClient(TRexClient):
CORE_MASK_SPLIT = 1
CORE_MASK_PIN = 2
CORE_MASK_SINGLE = 3
def __init__(self,
username = get_current_user(),
server = "localhost",
sync_port = 4501,
async_port = 4500,
verbose_level = "error",
logger = None,
sync_timeout = None,
async_timeout = None
):
api_ver = {'name': 'STL', 'major': 5, 'minor': 1}
TRexClient.__init__(self,
api_ver,
username,
server,
sync_port,
async_port,
verbose_level,
logger,
sync_timeout,
async_timeout)
self.pgid_stats = CPgIdStats(self.conn.rpc)
self.tpg_status = None
def get_mode (self):
return "STL"
e, rx_delay_ms = None):
ports = ports if ports is not None else self.get_acquired_ports()
ports = self.psv.validate('wait_on_traffic', ports, PSV_ACQUIRED)
TRexClient.wait_on_traffic(self, ports, timeout)
if rx_delay_ms is None:
if self.ports[ports[0]].is_virtual():
rx_delay_ms = 100
else:
rx_delay_ms = 10
rc = self._remove_rx_filters(ports, rx_delay_ms = rx_delay_ms)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("ports")
def update (self, ports = None, mult = "1", total = False, force = False):
ports = ports if ports is not None else self.get_profiles_with_state("active")
ports = self.psv.validate('update', ports, (PSV_ACQUIRED, PSV_TX))
validate_type('mult', mult, basestring)
validate_type('force', force, bool)
validate_type('total', total, bool)
mult_obj = parsing_opts.decode_multiplier(mult,
allow_update = True,
divide_count = len(ports) if total else 1)
if not mult_obj:
raise TRexArgumentError('mult', mult)
self.ctx.logger.pre_cmd("Updating traffic on port(s) {0}:".format(ports))
rc = self._for_each_port("update", ports, mult_obj, force)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("port")
def update_streams(self, port, mult = "1", force = False, stream_ids = None):
validate_type('mult', mult, basestring)
validate_type('force', force, bool)
validate_type('stream_ids', stream_ids, list)
ports = self.psv.validate('update_streams', port, (PSV_ACQUIRED, PSV_TX))
if not stream_ids:
raise TRexError('Please specify stream IDs to update')
mult_obj = parsing_opts.decode_multiplier(mult, allow_update = False)
if not mult_obj:
raise TRexArgumentError('mult', mult)
self.ctx.logger.pre_cmd('Updating streams %s on port %s:' % (stream_ids, port))
rc = self._for_each_port("update_streams", port, mult_obj, force, stream_ids)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("ports")
def pause (self, ports = None):
ports = ports if ports is not None else self.get_profiles_with_state("transmitting")
ports = self.psv.validate('pause', ports, (PSV_ACQUIRED, PSV_TX))
self.ctx.logger.pre_cmd("Pausing traffic on port(s) {0}:".format(ports))
rc = self._for_each_port("pause", ports)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("port")
def pause_streams(self, port, stream_ids):
validate_type('stream_ids', stream_ids, list)
ports = self.psv.validate('pause_streams', port, (PSV_ACQUIRED, PSV_TX))
if not stream_ids:
raise TRexError('Please specify stream IDs to pause')
self.ctx.logger.pre_cmd('Pause streams %s on port %s:' % (stream_ids, port))
rc = self._for_each_port("pause_streams", port, stream_ids)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("ports")
def resume (self, ports = None):
ports = ports if ports is not None else self.get_profiles_with_state("paused")
ports = self.psv.validate('resume', ports, (PSV_ACQUIRED, PSV_PAUSED))
self.ctx.logger.pre_cmd("Resume traffic on port(s) {0}:".format(ports))
rc = self._for_each_port('resume', ports)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
@validate_port_input("port")
def resume_streams(self, port, stream_ids):
validate_type('stream_ids', stream_ids, list)
ports = self.psv.validate('resume_streams', port, (PSV_ACQUIRED))
if not stream_ids:
raise TRexError('Please specify stream IDs to resume')
self.ctx.logger.pre_cmd('Resume streams %s on port %s:' % (stream_ids, port))
rc = self._for_each_port("resume_streams", port, stream_ids)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
def __push_remote (self, pcap_filename, port_id_list, ipg_usec, speedup, count, duration, is_dual, min_ipg_usec):
rc = RC()
for port_id in port_id_list:
slave_handler = self.ports[port_id ^ 0x1].handler if is_dual else ""
rc.add(self.ports[port_id].push_remote(pcap_filename,
ipg_usec,
speedup,
count,
duration,
is_dual,
slave_handler,
min_ipg_usec))
return rc
@client_api('command', True)
def push_remote (self,
pcap_filename,
ports = None,
ipg_usec = None,
speedup = 1.0,
count = 1,
duration = -1,
is_dual = False,
min_ipg_usec = None,
force = False,
src_mac_pcap = False,
dst_mac_pcap = False):
ports = ports if ports is not None else self.get_acquired_ports()
ports = self.__pre_start_check('PUSH', ports, force)
validate_type('pcap_filename', pcap_filename, basestring)
validate_type('ipg_usec', ipg_usec, (float, int, type(None)))
validate_type('speedup', speedup, (float, int))
validate_type('count', count, int)
validate_type('duration', duration, (float, int))
validate_type('is_dual', is_dual, bool)
validate_type('min_ipg_usec', min_ipg_usec, (float, int, type(None)))
validate_type('src_mac_pcap', src_mac_pcap, bool)
validate_type('dst_mac_pcap', dst_mac_pcap, bool)
if force:
active_ports = list(set(self.get_active_ports()).intersection(ports))
all_profiles = []
for port in active_ports:
profile = PortProfileID(str(port) + ".*")
all_profiles.append(profile)
if all_profiles:
self.stop(all_profiles)
if is_dual:
if not pcap_filename.endswith('erf'):
raise TRexError("dual mode: only ERF format is supported for dual mode")
for port in ports:
master = port
slave = port ^ 0x1
if slave in ports:
raise TRexError("dual mode: cannot provide adjacent ports ({0}, {1}) in a batch".format(master, slave))
if slave not in self.get_acquired_ports():
raise TRexError("dual mode: adjacent port {0} must be owned during dual mode".format(slave))
if count & 0xC0000000:
raise TRexError("count is limited to 0x3fff,ffff")
count = count & 0x3FFFFFFF
if src_mac_pcap:
count |= 0x80000000
if dst_mac_pcap:
count |= 0x40000000
self.ctx.logger.pre_cmd("Pushing remote PCAP on port(s) {0}:".format(ports))
rc = self.__push_remote(pcap_filename, ports, ipg_usec, speedup, count, duration, is_dual, min_ipg_usec)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('command', True)
def push_pcap (self,
pcap_filename,
ports = None,
ipg_usec = None,
speedup = 1.0,
count = 1,
duration = -1,
force = False,
vm = None,
packet_hook = None,
is_dual = False,
min_ipg_usec = None,
src_mac_pcap = False,
dst_mac_pcap = False):
ports = ports if ports is not None else self.get_acquired_ports()
ports = self.__pre_start_check('PUSH', ports, force)
validate_type('pcap_filename', pcap_filename, basestring)
validate_type('ipg_usec', ipg_usec, (float, int, type(None)))
validate_type('speedup', speedup, (float, int))
validate_type('count', count, int)
validate_type('duration', duration, (float, int))
validate_type('vm', vm, (list, type(None)))
validate_type('is_dual', is_dual, bool)
validate_type('min_ipg_usec', min_ipg_usec, (float, int, type(None)))
validate_type('src_mac_pcap', src_mac_pcap, bool)
validate_type('dst_mac_pcap', dst_mac_pcap, bool)
if all([ipg_usec, min_ipg_usec]):
raise TRexError('Please specify either ipg or minimal ipg, not both.')
if force:
active_ports = list(set(self.get_active_ports()).intersection(ports))
if active_ports:
self.stop(active_ports)
file_size = os.path.getsize(pcap_filename)
if not force and file_size > (1024 * 1024):
file_size_str = format_num(file_size, suffix = 'B')
url = 'https://trex-tgn.cisco.com/trex/doc/trex_stateless.html#_pcap_based_traffic'
raise TRexError("PCAP size of {:} is too big for local push - consider using remote (-r):\n{}".format(file_size_str, url))
if is_dual:
for port in ports:
master = port
slave = port ^ 0x1
if slave in ports:
raise TRexError("dual mode: please specify only one of adjacent ports ({0}, {1}) in a batch".format(master, slave))
if slave not in self.get_acquired_ports():
raise TRexError("dual mode: adjacent port {0} must be owned during dual mode".format(slave))
if not is_dual:
try:
self.ctx.logger.pre_cmd("Converting '{0}' to streams:".format(pcap_filename))
profile = STLProfile.load_pcap(pcap_filename,
ipg_usec,
speedup,
count,
vm = vm,
packet_hook = packet_hook,
min_ipg_usec = min_ipg_usec,
src_mac_pcap = src_mac_pcap,
dst_mac_pcap = dst_mac_pcap)
self.ctx.logger.post_cmd(RC_OK())
except TRexError as e:
self.ctx.logger.post_cmd(RC_ERR(e))
raise
self.remove_all_streams(ports = ports)
id_list = self.add_streams(profile.get_streams(), ports)
return self.start(ports = ports, duration = duration, force = force)
else:
split_mode = 'MAC'
if (ipg_usec and ipg_usec < 1000 * speedup) or (min_ipg_usec and min_ipg_usec < 1000):
self.ctx.logger.warning('In order to get synchronized traffic, ensure that effective ipg is at least 1000 usec')
try:
self.ctx.logger.pre_cmd("Analyzing '{0}' for dual ports based on {1}:".format(pcap_filename, split_mode))
profile_a, profile_b = STLProfile.load_pcap(pcap_filename,
ipg_usec,
speedup,
count,
vm = vm,
packet_hook = packet_hook,
split_mode = split_mode,
min_ipg_usec = min_ipg_usec,
src_mac_pcap = src_mac_pcap,
dst_mac_pcap = dst_mac_pcap)
self.ctx.logger.post_cmd(RC_OK())
except TRexError as e:
self.ctx.logger.post_cmd(RC_ERR(e))
raise
all_ports = ports + [p ^ 0x1 for p in ports if profile_b]
self.remove_all_streams(ports = all_ports)
for port in ports:
master = port
slave = port ^ 0x1
self.add_streams(profile_a.get_streams(), master)
if profile_b:
self.add_streams(profile_b.get_streams(), slave)
return self.start(ports = all_ports, duration = duration, force = force, synchronized = True)
@client_api('getter', True)
def get_stats (self, ports = None, sync_now = True):
output = self._get_stats_common(ports, sync_now)
pgid_stats = self.get_pgid_stats()
if not pgid_stats:
raise TRexError(pgid_stats)
output['flow_stats'] = pgid_stats.get('flow_stats', {})
output['latency'] = pgid_stats.get('latency', {})
return output
@client_api('command', True)
def clear_stats (self,
ports = None,
clear_global = True,
clear_flow_stats = True,
clear_latency_stats = True,
clear_xstats = True):
self._clear_stats_common(ports, clear_global, clear_xstats)
if clear_flow_stats or clear_latency_stats:
self.pgid_stats.clear_stats(clear_flow_stats=clear_flow_stats, clear_latency_stats=clear_latency_stats)
@client_api('getter', True)
def get_active_pgids(self):
return self.pgid_stats.get_active_pgids()
@client_api('getter', True)
def get_pgid_stats (self, pgid_list = []):
pgid_list = listify(pgid_list)
return self.pgid_stats.get_stats(pgid_list)
PG tag!")
if tag_type not in SUPPORTED_TAG_TYPES:
raise TRexError("Tag type {} not supported. Supported tag types are = {}".format(tag_type, SUPPORTED_TAG_TYPES))
tag_value = tag.get("value", None)
if tag_value is None and not update:
raise TRexError("You must provide a value field for each TPG tag!")
if not update:
validate_type("tag_value", tag_value, (dict, type(None)))
if tag_type == "Dot1Q":
validate_type("tag_value", tag_value, dict)
vlan = tag_value.get("vlan", None)
if vlan is None:
raise TRexError("You must provide a vlan key for each Dot1Q tag!")
_verify_vlan(vlan)
elif tag_type == "QinQ":
validate_type("tag_value", tag_value, dict)
vlans = tag_value.get("vlans", None)
if not vlans:
raise TRexError("You must provide vlans key for each QinQ tag!")
validate_type("vlans", vlans, list)
if len(vlans) != 2:
raise TRexError("You must provide 2 vlans for QinQ tag.")
for vlan in vlans:
_verify_vlan(vlan)
if update:
tag_id = tag.get("tag_id", None)
if tag_id is None:
raise TRexError("You must provide a tag id when updating TPG tags.")
validate_type("tag_id", tag_id, int)
if not 0 <= tag_id < num_tags:
raise TRexError("Invalid Tag Id {}. Must be in [0-{}).".format(tag_id, num_tags))
@client_api('command', True)
def enable_tpg(self, num_tpgids, tags, rx_ports = None):
acquired_ports = self.get_acquired_ports()
rx_ports = rx_ports if rx_ports is not None else acquired_ports
self.psv.validate('enable_tpg', rx_ports)
validate_type("num_tpgids", num_tpgids, int)
validate_type("tags", tags, list)
for tag in tags:
STLClient._validate_tpg_tag(tag, update=False, num_tags=len(tags))
if not set(rx_ports).issubset(set(acquired_ports)):
raise TRexError("TPG Rx Ports {} must be acquired".format(rx_ports))
self.ctx.logger.pre_cmd("Enabling Tagged Packet Group")
self.tpg_status = None
params = {
"num_tpgids": num_tpgids,
"ports": acquired_ports,
"rx_ports": rx_ports,
"username": self.ctx.username,
"session_id": self.ctx.session_id,
"tags": tags
}
rc = self._transmit("enable_tpg", params=params)
if not rc:
self.ctx.logger.post_cmd(rc)
raise TRexError(rc)
tpg_state = TPGState(TPGState.DISABLED)
while tpg_state != TPGState(TPGState.ENABLED_CP_RX):
rc = self._transmit("get_tpg_state", params={"username": self.ctx.username})
if not rc:
self.ctx.logger.post_cmd(rc)
raise TRexError(rc)
tpg_state = TPGState(rc.data())
if tpg_state.is_error_state():
self.disable_tpg(surpress_log=True)
self.ctx.logger.post_cmd(False)
raise TRexError(tpg_state.get_fail_message())
rc = self._transmit("enable_tpg", params={"username": self.ctx.username})
if not rc:
rc = self._transmit("get_tpg_state", params={"username": self.ctx.username})
if not rc:
self.ctx.logger.post_cmd(rc)
raise TRexError(rc)
tpg_state = TPGState(rc.data())
if tpg_state.is_error_state():
self.disable_tpg(surpress_log=True)
self.ctx.logger.post_cmd(False)
raise TRexError(tpg_state.get_fail_message())
else:
raise TRexError("TPG enablement failed but server doesn't indicate of errors.")
self.ctx.logger.post_cmd(rc)
@client_api('command', True)
def disable_tpg(self, username=None, surpress_log=False):
# Invalidate cache
self.tpg_status = None
if not surpress_log:
self.ctx.logger.pre_cmd("Disabling Tagged Packet Group")
# Disable TPG RPC simply indicates to the server to start deallocating the memory, it doesn't mean
username = self.ctx.username if username is None else username
rc = self._transmit("disable_tpg", params={"username": username})
if not rc:
raise TRexError(rc)
tpg_state = TPGState(TPGState.ENABLED)
while tpg_state != TPGState(TPGState.DISABLED_DP_RX):
rc = self._transmit("get_tpg_state", params={"username": username})
if not rc:
raise TRexError(rc)
tpg_state = TPGState(rc.data())
rc = self._transmit("disable_tpg", params={"username": username})
if not rc:
raise TRexError(rc)
if not surpress_log:
self.ctx.logger.post_cmd(rc)
@client_api('getter', True)
def get_tpg_status(self, username=None, port=None):
default_params = (username is None and port is None)
if default_params and self.tpg_status is not None:
return self.tpg_status
params = {}
if port is None:
params = {"username": self.ctx.username if username is None else username}
else:
self.psv.validate('get_tpg_status', [port])
if username is not None:
raise TRexError("Should provide only one between port and username for TPG status.")
params = {"port_id": port}
rc = self._transmit("get_tpg_status", params=params)
if not rc:
raise TRexError(rc)
if default_params:
self.tpg_status = rc.data()
return rc.data()
@client_api('command', True)
def update_tpg_tags(self, new_tags, clear=False):
def clear_update(self, port, min_tpgid, max_tpgid, tag_list):
params = {
"username": self.ctx.username,
"port_id": port,
"min_tpgid": min_tpgid,
"max_tpgid": max_tpgid,
"tag_list": tag_list
}
self._transmit("clear_updated", params=params)
self.ctx.logger.pre_cmd("Updating Tagged Packet Group Tags")
validate_type("new_tags", new_tags, list)
tpg_status = self.get_tpg_status()
if not tpg_status["enabled"]:
raise TRexError("Tagged Packet Group is not enabled.")
num_tags = tpg_status["data"]["num_tags"]
for tag in new_tags:
STLClient._validate_tpg_tag(tag, update=True, num_tags=num_tags)
rc = self._transmit("update_tpg_tags", params={"username": self.ctx.username, "tags": new_tags})
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
if clear:
tag_list = [tag["tag_id"] for tag in new_tags]
rx_ports = tpg_status["data"]["rx_ports"]
num_tpgids = tpg_status["data"]["num_tpgids"]
NUM_STATS_CHUNK = 2048
TPGID_CHUNK_SIZE = NUM_STATS_CHUNK // len(tag_list)
min_tpgid = 0
for port in rx_ports:
while min_tpgid != num_tpgids:
max_tpgid = min(min_tpgid + TPGID_CHUNK_SIZE, num_tpgids)
clear_update(self, port, min_tpgid, max_tpgid, tag_list)
min_tpgid = max_tpgid
@client_api('getter', True)
def get_tpg_tags(self, min_tag = 0, max_tag = None, username = None, port = None):
CHUNK_SIZE = 500
def get_tpg_tags_chunk(self, params):
rc = self._transmit("get_tpg_tags", params=params)
if not rc:
raise TRexError(rc)
return rc.data()
validate_type("min_tag", min_tag, int)
validate_type("max_tag", max_tag, (int, type(None)))
validate_type("username", username, (str, type(None)))
tpg_status = self.get_tpg_status(username=username, port=port)
if not tpg_status["enabled"]:
raise TRexError("Tagged Packet Group is not enabled.")
num_tags = tpg_status["data"]["num_tags"]
if max_tag is None:
max_tag = num_tags
if max_tag > num_tags:
raise TRexError("Max Tag {} must be less than number of tags defined: {}".format(max_tag, num_tags))
if min_tag > max_tag:
raise TRexError("Min Tag {} must be less than Max Tag {}".format(min_tag, max_tag))
params = {}
if port is None:
params = {"username": self.ctx.username if username is None else username}
else:
self.psv.validate('get_tpg_tags', [port])
if username is not None:
raise TRexError("Should provide only one between port and username for get_tpg_tags.")
params = {"port_id": port}
tpg_tags = []
current_max_tag = 0
while current_max_tag != max_tag:
current_max_tag = min(max_tag, min_tag + CHUNK_SIZE)
params["min_tag"], params["max_tag"] = min_tag, current_max_tag
tpg_tags += get_tpg_tags_chunk(self, params)
min_tag = current_max_tag
return tpg_tags
@client_api('getter', True)
def get_tpg_stats(self, port, tpgid, min_tag, max_tag, max_sections = 50, unknown_tag = False, untagged = False):
self.psv.validate('get_tpg_stats', [port])
validate_type("tpgid", tpgid, int)
validate_type("min_tag", min_tag, int)
validate_type("max_tag", max_tag, int)
validate_type("max_sections", max_sections, int)
validate_type("unknown_tag", unknown_tag, bool)
validate_type("untagged", untagged, bool)
if min_tag > max_tag:
raise TRexError("Min Tag {} must be smaller/equal than Max Tag {}".format(min_tag, max_tag))
if min_tag == max_tag and not untagged and not unknown_tag:
raise TRexError("Min Tag can equal Max Tag iff untagged or unknown tag flags provided.")
def get_tpg_stats_section(self, port, tpgid, min_tag, max_tag, unknown_tag, untagged):
params = {
"port_id": port,
"tpgid": tpgid,
"min_tag": min_tag,
"max_tag": max_tag,
"unknown_tag": unknown_tag,
"untagged": untagged
}
rc = self._transmit("get_tpg_stats", params=params)
if not rc:
raise TRexError(rc)
return rc.data()
def _get_next_min_tag(section_stats, port, tpgid):
tpgid_stats = section_stats[str(port)][str(tpgid)]
for key in tpgid_stats.keys():
if "unknown" in key or "untagged" in key:
continue
elif "-" in key:
return int(key.split("-")[1]) + 1
else:
return (int(key)) + 1
return None
stats = {}
sections = 0
done = False
_min_tag = min_tag
while not done and sections < max_sections:
section_stats = get_tpg_stats_section(self, port, tpgid, _min_tag, max_tag, unknown_tag, untagged)
_min_tag = _get_next_min_tag(section_stats, port, tpgid)
if _min_tag is None or _min_tag == max_tag:
done = True
if not stats:
stats = section_stats
else:
tpgid_stats = stats[str(port)][str(tpgid)]
new_tpgid_stats = section_stats[str(port)][str(tpgid)]
tpgid_stats.update(new_tpgid_stats)
unknown_tag = False
untagged = False
sections += 1
return (stats, _min_tag)
@client_api('command', True)
def clear_tpg_stats(self, port, tpgid, min_tag = 0, max_tag = None, tag_list = None, unknown_tag = False, untagged = False):
self.ctx.logger.pre_cmd("Clearing TPG stats")
self.psv.validate('clear_tpg_tx_stats', [port])
validate_type("tpgid", tpgid, int)
validate_type("min_tag", min_tag, int)
validate_type("max_tag", max_tag, (int, type(None)))
validate_type("tag_list", tag_list, (list, type(None)))
validate_type("unknown_tag", unknown_tag, bool)
validate_type("untagged", untagged, bool)
if (max_tag is None and not tag_list) or (max_tag is not None and tag_list):
raise TRexError("One between max_tag and tag_list must be provided.")
if max_tag is not None:
if min_tag > max_tag:
raise TRexError("Min Tag {} must be smaller/equal than Max Tag {}".format(min_tag, max_tag))
if min_tag == max_tag and not untagged and not unknown_tag:
raise TRexError("Min Tag can equal Max Tag iff untagged or unknown tag flags provided.")
if tag_list:
for tag in tag_list:
validate_type("tag", tag, int)
if tag < 0:
raise TRexError("Invalid tag {}. Tag must be positive.".format(tag))
params = {
"port_id": port,
"tpgid": tpgid,
"min_tag": min_tag,
"max_tag": max_tag,
"tag_list": tag_list if tag_list else None,
"unknown_tag": unknown_tag,
"untagged": untagged,
}
rc = self._transmit("clear_tpg_stats", params=params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('getter', True)
def get_tpg_tx_stats(self, port, tpgid):
self.psv.validate('get_tpg_tx_stats', [port])
validate_type("tpgid", tpgid, int)
rc = self._transmit("get_tpg_tx_stats", params={"port_id": port, "tpgid": tpgid})
if not rc:
raise TRexError(rc)
return rc.data()
@client_api('command', True)
def clear_tpg_tx_stats(self, port, tpgid):
self.ctx.logger.pre_cmd("Clearing TPG Tx stats")
self.psv.validate('clear_tpg_tx_stats', [port])
validate_type("tpgid", tpgid, int)
rc = self._transmit("clear_tpg_tx_stats", params={"port_id": port, "tpgid": tpgid})
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
@client_api('getter', True)
def get_tpg_unknown_tags(self, port):
self.psv.validate('get_tpg_unknown_tags', [port])
rc = self._transmit("get_tpg_unknown_tags", params={"port_id": port})
if not rc:
raise TRexError(rc)
return rc.data()
@client_api('command', True)
def clear_tpg_unknown_tags(self, port):
self.ctx.logger.pre_cmd("Clearing TPG unknown tags")
self.psv.validate('clear_tpg_unknown_tags', [port])
rc = self._transmit("clear_tpg_unknown_tags", params={"port_id": port})
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
bold", "magenta"))
for port_id, port_profiles_table in profiles_per_port.items():
if port_profiles_table:
text_tables.print_table_with_header(port_profiles_table,
header = 'Port %s:' % port_id)
@console_api('streams', 'STL', True, True)
def streams_line(self, line):
parser = parsing_opts.gen_parser(self,
"streams",
self.streams_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL,
parsing_opts.STREAMS_MASK,
parsing_opts.STREAMS_CODE)
opts = parser.parse_args(line.split())
if not opts:
return opts
streams_per_port = self._get_streams(opts.ports, set(opts.ids), table_format = opts.code is None)
if not streams_per_port:
self.logger.info(format_text("No streams found with desired filter.\n", "bold", "magenta"))
elif opts.code is None:
for port_id, port_streams_table in streams_per_port.items():
if port_streams_table:
text_tables.print_table_with_header(port_streams_table,
header = 'Port %s:' % port_id)
elif opts.code:
if not opts.code.endswith('.py'):
raise TRexError('Saved filename should end with .py')
is_several_ports = len(streams_per_port) > 1
if is_several_ports:
print(format_text('\nWarning: several ports specified, will save in separate file per port.', 'bold'))
for port_id, port_streams_data in streams_per_port.items():
if not port_streams_data:
print('No streams to save at port %s, skipping.' % port_id)
continue
filename = ('%s_port%s.py' % (opts.code[:-3], port_id)) if is_several_ports else opts.code
if os.path.exists(filename):
sys.stdout.write('\nFilename %s already exists, overwrite? (y/N) ' % filename)
ans = user_input().strip()
if ans.lower() not in ('y', 'yes'):
print('Not saving.')
continue
self.logger.pre_cmd('Saving file as: %s' % filename)
try:
profile = STLProfile(list(port_streams_data.values()))
with open(filename, 'w') as f:
f.write(profile.dump_to_code())
except Exception as e:
self.logger.post_cmd(False)
print(e)
print('')
else:
self.logger.post_cmd(True)
else:
for port_id, port_streams_data in streams_per_port.items():
if not port_streams_data:
continue
print(format_text('Port: %s' % port_id, 'cyan', 'underline') + '\n')
for stream_id, stream in port_streams_data.items():
print(format_text('Stream ID: %s' % stream_id, 'cyan', 'underline'))
print(' ' + '\n '.join(stream.to_code().splitlines()) + '\n')
@console_api('push', 'STL', True)
def push_line(self, line):
args = [self,
"push",
self.push_line.__doc__,
parsing_opts.REMOTE_FILE,
parsing_opts.PORT_LIST_WITH_ALL,
parsing_opts.COUNT,
parsing_opts.DURATION,
parsing_opts.IPG,
parsing_opts.MIN_IPG,
parsing_opts.SPEEDUP,
parsing_opts.FORCE,
parsing_opts.DUAL,
parsing_opts.SRC_MAC_PCAP,
parsing_opts.DST_MAC_PCAP]
parser = parsing_opts.gen_parser(*(args + [parsing_opts.FILE_PATH_NO_CHECK]))
opts = parser.parse_args(line.split(), verify_acquired = True)
if not opts:
return opts
if not opts.remote:
parser = parsing_opts.gen_parser(*(args + [parsing_opts.FILE_PATH]))
opts = parser.parse_args(line.split(), verify_acquired = True)
if not opts:
return opts
if opts.remote:
self.push_remote(opts.file[0],
ports = opts.ports,
ipg_usec = opts.ipg_usec,
min_ipg_usec = opts.min_ipg_usec,
speedup = opts.speedup,
count = opts.count,
duration = opts.duration,
force = opts.force,
is_dual = opts.dual,
src_mac_pcap = opts.src_mac_pcap,
dst_mac_pcap = opts.dst_mac_pcap)
else:
self.push_pcap(opts.file[0],
ports = opts.ports,
ipg_usec = opts.ipg_usec,
min_ipg_usec = opts.min_ipg_usec,
speedup = opts.speedup,
count = opts.count,
duration = opts.duration,
force = opts.force,
is_dual = opts.dual,
src_mac_pcap = opts.src_mac_pcap,
dst_mac_pcap = opts.dst_mac_pcap)
return RC_OK()
@console_api('service', 'STL', True)
def service_line (self, line):
parser = parsing_opts.gen_parser(self,
"service",
self.service_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL,
parsing_opts.SERVICE_GROUP)
opts = parser.parse_args(line.split())
enabled, filtered, mask = self._get_service_params(opts)
self.set_service_mode(ports = opts.ports, enabled = enabled, filtered = filtered, mask = mask)
return True
@console_api('start', 'STL', True)
def start_line (self, line):
parser = parsing_opts.gen_parser(self,
"start",
self.start_line.__doc__,
parsing_opts.PROFILE_LIST,
parsing_opts.TOTAL,
parsing_opts.FORCE,
parsing_opts.FILE_PATH,
parsing_opts.DURATION,
parsing_opts.ARGPARSE_TUNABLES,
parsing_opts.MULTIPLIER_STRICT,
parsing_opts.DRY_RUN,
parsing_opts.CORE_MASK_GROUP,
parsing_opts.SYNCHRONIZED)
opts = parser.parse_args(line.split(), default_ports = self.get_acquired_ports(), verify_acquired = True)
help_flags = ('-h', '--help')
tunable_dict = {}
if "-t" in line and '=' in line:
tun_list = opts.tunables
tunable_dict = parsing_opts.decode_tunables(tun_list[0])
opts.tunables = parsing_opts.convert_old_tunables_to_new_tunables(tun_list[0])
opts.tunables.extend(tun_list[1:])
tunable_dict["tunables"] = opts.tunables
ports = []
for port in opts.ports:
if not isinstance(port, PortProfileID):
port = PortProfileID(port)
ports.append(port)
port_id_list = parse_ports_from_profiles(ports)
if opts.core_mask is not None:
core_mask = opts.core_mask
else:
core_mask = self.CORE_MASK_PIN if opts.pin_cores else self.CORE_MASK_SPLIT
self.__decode_core_mask(port_id_list, core_mask)
streams_per_profile = {}
streams_per_port = {}
try:
for profile in ports:
profile_name = str(profile)
port_id = int(profile)
profile = STLProfile.load(opts.file[0],
direction = port_id % 2,
port_id = port_id,
**tunable_dict)
if any(h in opts.tunables for h in help_flags):
return True
if profile is None:
print('Failed to convert STL profile')
return False
stream_list = profile.get_streams()
streams_per_profile[profile_name] = stream_list
if port_id not in streams_per_port:
streams_per_port[port_id] = list(stream_list)
else:
streams_per_port[port_id].extend(list(stream_list))
except TRexError as e:
s = format_text("\nError loading profile '{0}'\n".format(opts.file[0]), 'bold')
s += "\n" + e.brief()
raise TRexError(s)
self.__pre_start_check('START', ports, opts.force, streams_per_port)
ports = self.validate_profile_input(ports)
active_profiles = list_intersect(self.get_profiles_with_state("active"), ports)
if active_profiles and opts.force:
self.stop(active_profiles)
self.remove_all_streams(ports)
for profile in ports:
profile_name = str(profile)
self.add_streams(streams_per_profile[profile_name], ports = profile)
if opts.dry:
self.validate(ports, opts.mult, opts.duration, opts.total)
else:
self.start(ports,
opts.mult,
opts.force,
opts.duration,
opts.total,
core_mask,
opts.sync)
return True
@console_api('stop', 'STL', True)
def stop_line (self, line):
parser = parsing_opts.gen_parser(self,
"stop",
self.stop_line.__doc__,
parsing_opts.PROFILE_LIST_WITH_ALL,
parsing_opts.REMOVE)
opts = parser.parse_args(line.split(), default_ports = self.get_profiles_with_state("active"), verify_acquired = True, allow_empty = True)
ports = self.validate_profile_input(opts.ports)
port_id_list = parse_ports_from_profiles(ports)
active_ports = list_intersect(ports, self.get_profiles_with_state("active"))
if not active_ports:
if not ports:
msg = 'no active ports'
else:
msg = 'no active traffic on ports {0}'.format(ports)
print(msg)
else:
self.stop(active_ports)
if opts.remove:
streams_ports = list_intersect(ports, self.get_profiles_with_state("streams"))
if not streams_ports:
if not ports:
msg = 'no ports with streams'
else:
msg = 'no streams on ports {0}'.format(ports)
print(msg)
else:
self.remove_all_streams(ports)
return True
@console_api('update', 'STL', True)
def update_line (self, line):
parser = parsing_opts.gen_parser(self,
"update",
self.update_line.__doc__,
parsing_opts.PROFILE_LIST,
parsing_opts.MULTIPLIER,
parsing_opts.TOTAL,
parsing_opts.FORCE,
parsing_opts.STREAMS_MASK)
opts = parser.parse_args(line.split(), default_ports = self.get_profiles_with_state("active"), verify_acquired = True)
ports = self.validate_profile_input(opts.ports)
if opts.ids:
if len(ports) != 1:
raise TRexError('must provide exactly one port when specifying stream_ids, got: %s' % ports)
self.update_streams(ports[0], opts.mult, opts.force, opts.ids)
return True
profiles = list_intersect(ports, self.get_profiles_with_state("active"))
if not profiles:
if not ports:
msg = 'no active ports'
else:
msg = 'no active traffic on ports {0}'.format(ports)
raise TRexError(msg)
self.update(profiles, opts.mult, opts.total, opts.force)
return True
@console_api('pause', 'STL', True)
def pause_line (self, line):
parser = parsing_opts.gen_parser(self,
"pause",
self.pause_line.__doc__,
parsing_opts.PROFILE_LIST,
parsing_opts.STREAMS_MASK)
opts = parser.parse_args(line.split(), default_ports = self.get_profiles_with_state("transmitting"), verify_acquired = True)
ports = self.validate_profile_input(opts.ports)
if opts.ids:
if len(ports) != 1:
raise TRexError('pause - must provide exactly one port when specifying stream_ids, got: %s' % ports)
self.pause_streams(ports[0], opts.ids)
return True
if ports and is_sub_list(ports, self.get_profiles_with_state("paused")):
raise TRexError('all of ports(s) {0} are already paused'.format(ports))
profiles = list_intersect(ports, self.get_profiles_with_state("transmitting"))
if not profiles:
if not ports:
msg = 'no transmitting ports'
else:
msg = 'none of ports {0} are transmitting'.format(ports)
raise TRexError(msg)
self.pause(profiles)
return True
@console_api('resume', 'STL', True)
def resume_line (self, line):
parser = parsing_opts.gen_parser(self,
"resume",
self.resume_line.__doc__,
parsing_opts.PROFILE_LIST,
parsing_opts.STREAMS_MASK)
opts = parser.parse_args(line.split(), default_ports = self.get_profiles_with_state("paused"), verify_acquired = True)
ports = self.validate_profile_input(opts.ports)
if opts.ids:
if len(ports) != 1:
raise TRexError('must provide exactly one port when specifying stream_ids, got: %s' % ports)
self.resume_streams(ports[0], opts.ids)
return True
profiles = list_intersect(ports, self.get_profiles_with_state("paused"))
if not profiles:
if not ports:
msg = 'no paused ports'
else:
msg = 'none of ports {0} are paused'.format(ports)
raise TRexError(msg)
self.resume(profiles)
return True
parsing_opts.TPG_ENABLE
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
tpg_conf = STLTaggedPktGroupTagConf.load(opts.tags_conf, **{"tunables": opts.tunables})
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
if tpg_conf is None:
return None
try:
self.enable_tpg(opts.num_tpgids, tpg_conf, opts.ports)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_disable', 'STL', True)
def tpg_disable(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_disable",
self.tpg_disable.__doc__,
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
self.disable_tpg()
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_status', 'STL', True)
def show_tpg_status(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_status",
self.show_tpg_status.__doc__,
parsing_opts.TPG_USERNAME,
parsing_opts.SINGLE_PORT_NOT_REQ
)
opts = parser.parse_args(line.split())
if not opts:
return opts
status = None
try:
status = self.get_tpg_status(opts.username, opts.port)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
if status is None:
self.logger.info(format_text("Couldn't get status from STL Server.\n", "bold", "magenta"))
enabled = status.get("enabled", None)
if enabled is None:
self.logger.info(format_text("Enabled not found in server status response.\n", "bold", "magenta"))
msg = "\nTagged Packet Group is enabled\n" if enabled else "\nTagged Packet Group is disabled\n"
self.logger.info(format_text(msg, "bold", "yellow"))
# If Tagged Packet Group is enabled, print its details in a table.
if enabled:
data = status.get("data", None)
if data is None:
self.logger.info(format_text("Data not found in server status response.\n", "bold", "magenta"))
keys_to_headers = [ {'key': 'username', 'header': 'Username'},
{'key': 'acquired_ports', 'header': 'Acquired Ports'},
{'key': 'rx_ports', 'header': 'Rx Ports'},
{'key': 'num_tpgids', 'header': 'Num TPGId'},
{'key': 'num_tags', 'header': 'Num Tags'},
]
kwargs = {'title': 'Tagged Packet Group Data',
'empty_msg': 'No status found',
'keys_to_headers': keys_to_headers}
text_tables.print_table_by_keys(data, **kwargs)
@console_api('tpg_update', 'STL', True)
def tpg_update(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_tags",
self.show_tpg_tags.__doc__,
parsing_opts.TPG_UPDATE
)
opts = parser.parse_args(line.split())
if not opts:
return opts
tag_type = opts.tag_type if opts.tag_type != "Invalidate" else None
new_tag = {
"type": tag_type,
"tag_id": opts.tag_id
}
if tag_type is not None:
# Not invalidating tag, value is needed
if opts.value is None:
raise TRexError(format_text("Value must be present for type {}.".format(tag_type), "red", "bold"))
if tag_type == "Dot1Q":
if len(opts.value) != 1:
raise TRexError(format_text("Only one value must be presented for Dot1Q tags. Invalid value {}.".format(opts.value), "red", "bold"))
new_tag["value"] = {
"vlan": opts.value[0]
}
if tag_type == "QinQ":
if len(opts.value) != 2:
raise TRexError(format_text("Exactly two values must be presented for QinQ tags. Invalid value {}.".format(opts.value), "red", "bold"))
new_tag["value"] = {
"vlans": opts.value
}
try:
self.update_tpg_tags([new_tag], opts.clear)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_tags', 'STL', True)
def show_tpg_tags(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_tags",
self.show_tpg_tags.__doc__,
parsing_opts.TPG_USERNAME,
parsing_opts.SINGLE_PORT_NOT_REQ,
parsing_opts.TPG_MIN_TAG,
parsing_opts.TPG_MAX_TAG_NOT_REQ,
)
opts = parser.parse_args(line.split())
if not opts:
return opts
MAX_TAGS_TO_SHOW = 20
table_keys_to_headers = [ {'key': 'tag_id', 'header': 'Tag Id'},
{'key': 'tag', 'header': 'Tag Type'}
]
table_kwargs = {'empty_msg': '\nNo tags found',
'keys_to_headers': table_keys_to_headers}
tpg_status = self.get_tpg_status(username=opts.username, port=opts.port)
if not tpg_status["enabled"]:
raise TRexError(format_text("Tagged Packet Group is not enabled.", "bold", "red"))
num_tags_total = tpg_status["data"]["num_tags"]
last_tag = num_tags_total if opts.max_tag is None else min(num_tags_total, opts.max_tag)
current_tag = opts.min_tag
while current_tag != last_tag:
next_current_tag = min(current_tag + MAX_TAGS_TO_SHOW, last_tag)
try:
tags = self.get_tpg_tags(current_tag, next_current_tag, opts.username, opts.port)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
tags_to_print = []
for i in range(len(tags)):
tags_to_print.append(
{
"tag_id": current_tag + i,
"tag": '-' if tags[i] is None else STLClient._tpg_tag_value_2str(tags[i]['type'], tags[i]['value'])
}
)
table_kwargs['title'] = "Tags [{}-{})".format(current_tag, next_current_tag)
text_tables.print_table_by_keys(tags_to_print, **table_kwargs)
current_tag = next_current_tag
if current_tag != last_tag:
# The message should be printed iff there will be another iteration.
input("Press Enter to see the rest of the tags")
@console_api('tpg_stats', 'STL', True)
def show_tpg_stats(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_stats",
self.show_tpg_stats.__doc__,
parsing_opts.TPG_STL_STATS
)
opts = parser.parse_args(line.split())
if not opts:
return opts
if opts.max_tag < opts.min_tag:
# The client Api checks this as well but our loop logic requires this condition.
raise TRexError(format_text("Max Tag {} must be greater/equal than Min Tag {}".format(opts.max_tag, opts.min_tag), "bold", "red"))
if opts.min_tag == opts.max_tag and not opts.untagged and not opts.unknown_tag:
raise TRexError(format_text("Min Tag can equal Max Tag iff untagged or unknown tag flags provided.", "bold", "red"))
MAX_TAGS_TO_SHOW = 20
current_tag = opts.min_tag
new_current_tag = current_tag
first_iteration = True
table_keys_to_headers = [ {'key': 'tags', 'header': 'Tag Id'},
{'key': 'pkts', 'header': 'Packets'},
{'key': 'bytes', 'header': 'Bytes'},
{'key': 'seq_err', 'header': 'Seq Error'},
{'key': 'seq_err_too_big', 'header': 'Seq Too Big'},
{'key': 'seq_err_too_small', 'header': 'Seq Too Small'},
{'key': 'dup', 'header': 'Duplicates'},
{'key': 'ooo', 'header': 'Out of Order'},
]
table_kwargs = {'empty_msg': 'No stats found',
'keys_to_headers': table_keys_to_headers}
# Loop until we get all the tags
while current_tag != opts.max_tag or first_iteration:
stats = None
try:
unknown_tag = first_iteration and opts.unknown_tag
untagged = first_iteration and opts.untagged
stats, new_current_tag = self.get_tpg_stats(opts.port, opts.tpgid, current_tag, opts.max_tag, max_sections=MAX_TAGS_TO_SHOW, unknown_tag=unknown_tag, untagged=untagged)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
if stats is None:
self.logger.info(format_text("\nNo stats found for the provided params.\n", "bold", "yellow"))
return
port_stats = stats.get(str(opts.port), None)
if port_stats is None:
self.logger.info(format_text("\nNo stats found for the provided port.\n", "bold", "yellow"))
return
tpgid_stats = port_stats.get(str(opts.tpgid), None)
if tpgid_stats is None:
self.logger.info(format_text("\nNo stats found for the provided tpgid.\n", "bold", "yellow"))
return
stats_list = []
for tag_id, tag_stats in tpgid_stats.items():
tag_stats['tags'] = tag_id.replace("_tag", "") # remove _tag keyword when printing
stats_list.append(tag_stats)
table_kwargs['title'] = "Port {}, tpgid {}, Tags = [{}, {})".format(opts.port, opts.tpgid, current_tag, new_current_tag)
text_tables.print_table_by_keys(stats_list, **table_kwargs)
if new_current_tag is not None and new_current_tag != opts.max_tag:
# The message should be printed iff there will be another iteration.
input("Press Enter to see the rest of the stats")
first_iteration = False # Set this false after the first iteration
current_tag = new_current_tag if new_current_tag is not None else current_tag # Update the current tag in case it is a new one.
@console_api('tpg_clear_stats', 'STL', True)
def tpg_clear_stats(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_clear_stats",
self.tpg_clear_stats.__doc__,
parsing_opts.TPG_STL_CLEAR_STATS
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
self.clear_tpg_stats(opts.port, opts.tpgid, opts.min_tag, opts.max_tag, opts.tag_list, opts.unknown_tag, opts.untagged)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_tx_stats', 'STL', True)
def show_tpg_tx_stats(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_tx_stats",
self.show_tpg_tx_stats.__doc__,
parsing_opts.TPG_STL_TX_STATS
)
opts = parser.parse_args(line.split())
if not opts:
return opts
table_keys_to_headers = [ {'key': 'pkts', 'header': 'Packets'},
{'key': 'bytes', 'header': 'Bytes'},
]
table_kwargs = {'empty_msg': 'No stats found',
'keys_to_headers': table_keys_to_headers}
tx_stats = {}
try:
tx_stats = self.get_tpg_tx_stats(opts.port, opts.tpgid)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
port_stats = tx_stats.get(str(opts.port), None)
if port_stats is None:
self.logger.info(format_text("\nNo stats found for the provided port.\n", "bold", "yellow"))
return
tpgid_stats = port_stats.get(str(opts.tpgid), None)
if tpgid_stats is None:
self.logger.info(format_text("\nNo stats found for the provided tpgid.\n", "bold", "yellow"))
return
table_kwargs['title'] = "Port {}, tpgid {}".format(opts.port, opts.tpgid)
text_tables.print_table_by_keys(tpgid_stats, **table_kwargs)
@console_api('tpg_clear_tx_stats', 'STL', True)
def tpg_clear_tx_stats(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_clear_tx_stats",
self.tpg_clear_tx_stats.__doc__,
parsing_opts.TPG_STL_TX_STATS
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
self.clear_tpg_tx_stats(opts.port, opts.tpgid)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
@console_api('tpg_show_unknown_tags', 'STL', True)
def show_tpg_unknown_stats(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_show_unknown_stats",
self.show_tpg_unknown_stats.__doc__,
parsing_opts.TPG_PORT,
)
opts = parser.parse_args(line.split())
if not opts:
return opts
table_keys_to_headers = [{'key': 'tpgid', 'header': 'tpgid'},
{'key': 'type', 'header': 'Type'}]
table_kwargs = {'empty_msg': '\nNo unknown tags found in port {}.'.format(opts.port),
'keys_to_headers': table_keys_to_headers}
unknown_tags = {}
try:
unknown_tags = self.get_tpg_unknown_tags(opts.port)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
port_unknown_tags = unknown_tags.get(str(opts.port), None)
if port_unknown_tags is None:
self.logger.info(format_text("\nNo unknown tags found in the provided port.\n", "bold", "yellow"))
return
unknown_tags_to_print = []
for val in port_unknown_tags:
unknown_tag = {
'tpgid': val['tpgid'],
'type': STLClient._tpg_tag_value_2str(val['tag']['type'], val['tag']['value'])
}
if unknown_tag not in unknown_tags_to_print:
# This list is at max 10 elements. Dict is not hashable.
unknown_tags_to_print.append(unknown_tag)
table_kwargs['title'] = "Port {} unknown tags".format(opts.port)
text_tables.print_table_by_keys(unknown_tags_to_print, **table_kwargs)
@console_api('tpg_clear_unknown_tags', 'STL', True)
def tpg_clear_unknown_stats(self, line):
parser = parsing_opts.gen_parser(self,
"tpg_clear_unknown_stats",
self.tpg_clear_unknown_stats.__doc__,
parsing_opts.TPG_PORT,
)
opts = parser.parse_args(line.split())
if not opts:
return opts
try:
self.clear_tpg_unknown_tags(opts.port)
except TRexError as e:
s = format_text("{}".format(e.brief()), "bold", "red")
raise TRexError(s)
| true | true |
f73064bf0edcae1e29ce194797909ff6107a9759 | 22,176 | py | Python | salt/runners/saltutil.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 2 | 2015-06-18T19:07:20.000Z | 2017-09-27T18:54:29.000Z | salt/runners/saltutil.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 12 | 2015-04-15T22:17:42.000Z | 2016-03-22T08:46:27.000Z | salt/runners/saltutil.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 4 | 2015-04-16T03:24:08.000Z | 2015-04-22T15:33:28.000Z | # -*- coding: utf-8 -*-
'''
The Saltutil runner is used to sync custom types to the Master. See the
:mod:`saltutil module <salt.modules.saltutil>` for documentation on
managing updates to minions.
.. versionadded:: 2016.3.0
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
import salt.utils.extmods
log = logging.getLogger(__name__)
def sync_all(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync all custom types
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
dictionary of modules to sync based on type
extmod_blacklist : None
dictionary of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_all
salt-run saltutil.sync_all extmod_whitelist={'runners': ['custom_runner'], 'grains': []}
'''
log.debug('Syncing all')
ret = {}
ret['clouds'] = sync_clouds(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['modules'] = sync_modules(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['states'] = sync_states(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['grains'] = sync_grains(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['renderers'] = sync_renderers(saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)
ret['returners'] = sync_returners(saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)
ret['output'] = sync_output(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['proxymodules'] = sync_proxymodules(saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)
ret['runners'] = sync_runners(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['wheel'] = sync_wheel(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['engines'] = sync_engines(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['thorium'] = sync_thorium(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['queues'] = sync_queues(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['pillar'] = sync_pillar(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['utils'] = sync_utils(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['sdb'] = sync_sdb(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['cache'] = sync_cache(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['fileserver'] = sync_fileserver(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['tops'] = sync_tops(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['tokens'] = sync_eauth_tokens(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['serializers'] = sync_serializers(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['auth'] = sync_auth(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['executors'] = sync_executors(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
return ret
def sync_auth(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync execution modules from ``salt://_auth`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_auth
'''
return salt.utils.extmods.sync(__opts__, 'auth', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_modules(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync execution modules from ``salt://_modules`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_modules
'''
return salt.utils.extmods.sync(__opts__, 'modules', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_states(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync state modules from ``salt://_states`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_states
'''
return salt.utils.extmods.sync(__opts__, 'states', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_grains(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync grains modules from ``salt://_grains`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_grains
'''
return salt.utils.extmods.sync(__opts__, 'grains', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_renderers(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync renderer modules from from ``salt://_renderers`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_renderers
'''
return salt.utils.extmods.sync(__opts__, 'renderers', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_returners(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync returner modules from ``salt://_returners`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_returners
'''
return salt.utils.extmods.sync(__opts__, 'returners', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_output(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync output modules from ``salt://_output`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_output
'''
return salt.utils.extmods.sync(__opts__, 'output', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_proxymodules(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync proxy modules from ``salt://_proxy`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_proxymodules
'''
return salt.utils.extmods.sync(__opts__, 'proxy', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_runners(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync runners from ``salt://_runners`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_runners
'''
return salt.utils.extmods.sync(__opts__, 'runners', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_wheel(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync wheel modules from ``salt://_wheel`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_wheel
'''
return salt.utils.extmods.sync(__opts__, 'wheel', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_engines(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync engines from ``salt://_engines`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_engines
'''
return salt.utils.extmods.sync(__opts__, 'engines', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_thorium(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2018.3.0
Sync Thorium from ``salt://_thorium`` to the master
saltenv: ``base``
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist
comma-separated list of modules to sync
extmod_blacklist
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_thorium
'''
return salt.utils.extmods.sync(__opts__, 'thorium', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_queues(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync queue modules from ``salt://_queues`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_queues
'''
return salt.utils.extmods.sync(__opts__, 'queues', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_pillar(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync pillar modules from ``salt://_pillar`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_pillar
'''
return salt.utils.extmods.sync(__opts__, 'pillar', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_utils(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2016.11.0
Sync utils modules from ``salt://_utils`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_utils
'''
return salt.utils.extmods.sync(__opts__, 'utils', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_sdb(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2017.7.0
Sync sdb modules from ``salt://_sdb`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_sdb
'''
return salt.utils.extmods.sync(__opts__, 'sdb', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_tops(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2016.3.7,2016.11.4,2017.7.0
Sync master_tops modules from ``salt://_tops`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_tops
'''
return salt.utils.extmods.sync(__opts__, 'tops', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_cache(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2017.7.0
Sync cache modules from ``salt://_cache`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_cache
'''
return salt.utils.extmods.sync(__opts__, 'cache', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_fileserver(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2018.3.0
Sync fileserver modules from ``salt://_fileserver`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_fileserver
'''
return salt.utils.extmods.sync(__opts__, 'fileserver', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_clouds(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2017.7.0
Sync cloud modules from ``salt://_clouds`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_clouds
'''
return salt.utils.extmods.sync(__opts__, 'clouds', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_roster(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2017.7.0
Sync roster modules from ``salt://_roster`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_roster
'''
return salt.utils.extmods.sync(__opts__, 'roster', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_eauth_tokens(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2018.3.0
Sync eauth token modules from ``salt://_tokens`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_eauth_tokens
'''
return salt.utils.extmods.sync(__opts__, 'tokens', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_serializers(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2019.2.0
Sync serializer modules from ``salt://_serializers`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_utils
'''
return salt.utils.extmods.sync(__opts__, 'serializers', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_executors(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2019.2.1
Sync executor modules from ``salt://_executors`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-seperated list of modules to sync
extmod_blacklist : None
comma-seperated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_executors
'''
return salt.utils.extmods.sync(__opts__, 'executors', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
| 33.498489 | 128 | 0.689123 |
from __future__ import absolute_import, print_function, unicode_literals
import logging
import salt.utils.extmods
log = logging.getLogger(__name__)
def sync_all(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
log.debug('Syncing all')
ret = {}
ret['clouds'] = sync_clouds(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['modules'] = sync_modules(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['states'] = sync_states(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['grains'] = sync_grains(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['renderers'] = sync_renderers(saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)
ret['returners'] = sync_returners(saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)
ret['output'] = sync_output(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['proxymodules'] = sync_proxymodules(saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)
ret['runners'] = sync_runners(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['wheel'] = sync_wheel(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['engines'] = sync_engines(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['thorium'] = sync_thorium(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['queues'] = sync_queues(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['pillar'] = sync_pillar(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['utils'] = sync_utils(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['sdb'] = sync_sdb(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['cache'] = sync_cache(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['fileserver'] = sync_fileserver(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['tops'] = sync_tops(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['tokens'] = sync_eauth_tokens(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['serializers'] = sync_serializers(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['auth'] = sync_auth(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['executors'] = sync_executors(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
return ret
def sync_auth(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'auth', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_modules(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'modules', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_states(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'states', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_grains(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'grains', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_renderers(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'renderers', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_returners(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'returners', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_output(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'output', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_proxymodules(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'proxy', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_runners(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'runners', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_wheel(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'wheel', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_engines(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'engines', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_thorium(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'thorium', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_queues(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'queues', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_pillar(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'pillar', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_utils(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'utils', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_sdb(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'sdb', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_tops(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'tops', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_cache(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'cache', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_fileserver(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'fileserver', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_clouds(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'clouds', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_roster(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'roster', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_eauth_tokens(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'tokens', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_serializers(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'serializers', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_executors(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
return salt.utils.extmods.sync(__opts__, 'executors', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
| true | true |
f73064f7b7a9daf8dccd16ffa7b8b4d1a6869ec6 | 21,364 | py | Python | tensorflow/python/tpu/device_assignment.py | wainshine/tensorflow | dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d | [
"Apache-2.0"
] | 54 | 2017-06-17T14:07:48.000Z | 2022-03-29T02:11:20.000Z | tensorflow/python/tpu/device_assignment.py | wainshine/tensorflow | dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d | [
"Apache-2.0"
] | 19 | 2021-12-28T12:44:55.000Z | 2022-01-13T08:11:28.000Z | tensorflow/python/tpu/device_assignment.py | wainshine/tensorflow | dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d | [
"Apache-2.0"
] | 11 | 2018-04-19T22:36:01.000Z | 2021-08-02T08:44:43.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of TPU helper functions."""
import enum
import math
from typing import List, Optional, Text, Tuple
import numpy as np
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu.topology import Topology
from tensorflow.python.util.tf_export import tf_export
SINGLE_CORE_ASSIGNMENT = [[[0, 0, 0, 0]]]
def _compute_task_and_cores_to_replicas(core_assignment, topology):
"""Computes a nested dict which maps task and logical core to replicas."""
task_and_cores_to_replicas = {}
for replica in range(core_assignment.shape[0]):
for logical_core in range(core_assignment.shape[1]):
coordinates = core_assignment[replica, logical_core, :]
task_id = topology.task_ordinal_at_coordinates(coordinates)
if task_id not in task_and_cores_to_replicas:
task_and_cores_to_replicas[task_id] = {}
if logical_core not in task_and_cores_to_replicas[task_id]:
task_and_cores_to_replicas[task_id][logical_core] = set()
task_and_cores_to_replicas[task_id][logical_core].add(replica)
task_to_sorted_replica_id = {}
for task, core_to_replicas in task_and_cores_to_replicas.items():
core_to_sorted_replicas = {}
for core, replicas in core_to_replicas.items():
core_to_sorted_replicas[core] = sorted(replicas)
task_to_sorted_replica_id[task] = core_to_sorted_replicas
return task_to_sorted_replica_id
@tf_export("tpu.experimental.DeviceAssignment")
class DeviceAssignment(object):
"""Mapping from logical cores in a computation to the physical TPU topology.
Prefer to use the `DeviceAssignment.build()` helper to construct a
`DeviceAssignment`; it is easier if less flexible than constructing a
`DeviceAssignment` directly.
"""
def __init__(self, topology: Topology, core_assignment: np.ndarray):
"""Constructs a `DeviceAssignment` object.
Args:
topology: A `Topology` object that describes the physical TPU topology.
core_assignment: A logical to physical core mapping, represented as a
rank 3 numpy array. See the description of the `core_assignment`
property for more details.
Raises:
ValueError: If `topology` is not `Topology` object.
ValueError: If `core_assignment` is not a rank 3 numpy array.
"""
if not isinstance(topology, Topology):
raise ValueError("topology must be a Topology object, got {}".format(
type(topology)))
core_assignment = np.asarray(core_assignment, dtype=np.int32)
self._topology = topology
if core_assignment.ndim != 3:
raise ValueError("core_assignment must be a rank 3 numpy array, "
f"got shape {core_assignment.shape}")
self._num_replicas = core_assignment.shape[0]
self._num_cores_per_replica = core_assignment.shape[1]
if core_assignment.shape[-1] != topology.mesh_rank:
raise ValueError(
"core_assignment.shape[-1] must have size equal to topology "
f"rank ({topology.mesh_rank}), got "
f"core_assignment.shape={core_assignment.shape}")
self._core_assignment = core_assignment
self._task_and_cores_to_replicas = _compute_task_and_cores_to_replicas(
self._core_assignment, topology)
@property
def topology(self) -> Topology:
"""A `Topology` that describes the TPU topology."""
return self._topology
@property
def num_cores_per_replica(self) -> int:
"""The number of cores per replica."""
return self._num_cores_per_replica
@property
def num_replicas(self) -> int:
"""The number of replicas of the computation."""
return self._num_replicas
@property
def core_assignment(self) -> np.ndarray:
"""The logical to physical core mapping.
Returns:
An integer numpy array of rank 3, with shape
`[num_replicas, num_cores_per_replica, topology_rank]`. Maps
(replica, logical core) pairs to physical topology coordinates.
"""
return self._core_assignment
def coordinates(self, replica: int, logical_core: int) -> Tuple: # pylint:disable=g-bare-generic
"""Returns the physical topology coordinates of a logical core."""
return tuple(self.core_assignment[replica, logical_core, :])
def lookup_replicas(self, task_id: int, logical_core: int) -> List[int]:
"""Lookup replica ids by task number and logical core.
Args:
task_id: TensorFlow task number.
logical_core: An integer, identifying a logical core.
Returns:
A sorted list of the replicas that are attached to that task and
logical_core.
Raises:
ValueError: If no replica exists in the task which contains the logical
core.
"""
try:
return self._task_and_cores_to_replicas[task_id][logical_core]
except KeyError:
raise ValueError(
"Can not find any replica in task: {} contains logical_core: {} ".
format(task_id, logical_core))
def tpu_ordinal(self, replica: int = 0, logical_core: int = 0) -> int:
"""Returns the ordinal of the TPU device assigned to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.tpu_device_ordinal_at_coordinates(coordinates)
def host_device(self,
replica: int = 0,
logical_core: int = 0,
job: Optional[Text] = None) -> Text:
"""Returns the CPU device attached to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.cpu_device_name_at_coordinates(coordinates, job=job)
def tpu_device(self,
replica: int = 0,
logical_core: int = 0,
job: Optional[Text] = None) -> Text:
"""Returns the name of the TPU device assigned to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.tpu_device_name_at_coordinates(coordinates, job=job)
@staticmethod
def build(topology: Topology,
computation_shape: Optional[np.ndarray] = None,
computation_stride: Optional[np.ndarray] = None,
num_replicas: int = 1) -> "DeviceAssignment":
return device_assignment(topology, computation_shape, computation_stride,
num_replicas)
def _open_ring_2d(x_size: int, y_size: int,
z_coord: int) -> List[Tuple[int, int, int]]:
"""Ring-order of a X by Y mesh, with a fixed Z coordinate.
For example, in a 4x4 mesh, this returns the following order.
0 -- 1 -- 2 -- 3
| | | |
15-- 6 -- 5 -- 4
| | | |
14-- 7 -- 8 -- 9
| | | |
13-- 12-- 11-- 10
Note that chip 0 is not included in the output.
Args:
x_size: An integer represents the mesh size in the x-dimension. Must be
larger than 1.
y_size: An integer represents the mesh size in the y-dimension. Must be
larger than 1.
z_coord: An integer represents the z-coordinate to use for the chips in the
ring.
Returns:
A list of (x,y,z) triples in ring order.
"""
ret = []
for i in range(y_size // 2):
for j in range(1, x_size):
ret.append((j, 2 * i, z_coord))
for j in range(x_size - 1, 0, -1):
ret.append((j, 2 * i + 1, z_coord))
for i in range(y_size - 1, 0, -1):
ret.append((0, i, z_coord))
return ret
def _ring_3d(x_size: int, y_size: int,
z_size: int) -> List[Tuple[int, int, int]]:
"""Ring-order of a X by Y by Z mesh.
Constructs the 3d ring from 2d rings that are stacked in the Z dimension and
joined in one corner.
z == 0:
0 -- 1 -- 2 -- 3
| | | |
15 - 6 -- 5 -- 4
| | | |
14 - 7 -- 8 -- 9
| | | |
13 - 12 - 11 - 10
z == 1:
63 - 30 - 29 - 28
| | | |
16 - 25 - 26 - 27
| | | |
17 - 24 - 23 - 22
| | | |
18 - 19 - 20 - 21
z == 2:
62 - 31 - 32 - 33
| | | |
45 - 36 - 35 - 34
| | | |
44 - 37 - 38 - 39
| | | |
43 - 42 - 41 - 40
z == 3:
61 - 60 - 59 - 58
| | | |
46 - 55 - 56 - 57
| | | |
47 - 54 - 53 - 52
| | | |
48 - 49 - 50 - 51
Args:
x_size: An integer represents the mesh size in the x-dimension. Must be
larger than 1.
y_size: An integer represents the mesh size in the y-dimension. Must be
larger than 1.
z_size: An integer represents the mesh size in the z-dimension. Must be
larger than 1. For example, in a 4x4x4 mesh, this returns the following
order.
Returns:
A list of (x,y,z) triples in ring order.
"""
# Handle the case where 2 dimensions are size 1.
if x_size == 1 and y_size == 1:
return [(0, 0, i) for i in range(z_size)]
if x_size == 1 and z_size == 1:
return [(0, i, 0) for i in range(y_size)]
if y_size == 1 and z_size == 1:
return [(i, 0, 0) for i in range(x_size)]
# Handle odd mesh dimensions. This never happens in practice, so we don't
# bother to try building something optimal.
if (x_size > 1 and x_size % 2 != 0) or (y_size > 1 and
y_size % 2 != 0) or (z_size > 1 and
z_size % 2 != 0):
logging.warning("Odd dimension")
ret = []
for z in range(z_size):
for y in range(y_size):
ret.extend((x, y, z) for x in range(x_size))
return ret
# Always start with chip 0.
ret = [(0, 0, 0)]
# Handle the case where one dimension is size 1. We just build a flat, 2d
# ring.
if z_size == 1:
ret.extend(_open_ring_2d(x_size, y_size, 0))
return ret
if y_size == 1:
ret = [(0, 0, 0)]
ret.extend((x, y, z) for (x, z, y) in _open_ring_2d(x_size, z_size, 0))
return ret
if x_size == 1:
ret = [(0, 0, 0)]
ret.extend((x, y, z) for (y, z, x) in _open_ring_2d(y_size, z_size, 0))
return ret
# Handle the case where all dimensions have size > 1 and even.
ret = [(0, 0, 0)]
for i in range(0, z_size):
r = _open_ring_2d(x_size, y_size, i)
if i % 2 == 0:
ret.extend(r)
else:
ret.extend(reversed(r))
for i in range(z_size - 1, 0, -1):
ret.append((0, 0, i))
return ret
class DeviceOrderMode(enum.IntEnum):
"""The way of determining device orders when computing device assignment."""
# By default the mode is set to AUTO, the library will choose to form rings
# when that is possible.
AUTO = 0
# Form rings for replicas and model-parallel cores.
RING = 1
# Form meshes for replicas and/or model-parallel cores.
MESH = 2
def device_assignment(
topology: Topology,
computation_shape: Optional[np.ndarray] = None,
computation_stride: Optional[np.ndarray] = None,
num_replicas: int = 1,
device_order_mode: DeviceOrderMode = DeviceOrderMode.AUTO
) -> DeviceAssignment:
"""Computes a device_assignment of a computation across a TPU topology.
Attempts to choose a compact grid of cores for locality.
Returns a `DeviceAssignment` that describes the cores in the topology assigned
to each core of each replica.
`computation_shape` and `computation_stride` values should be powers of 2 for
optimal packing.
Args:
topology: A `Topology` object that describes the TPU cluster topology. To
obtain a TPU topology, evaluate the `Tensor` returned by
`initialize_system` using `Session.run`. Either a serialized
`TopologyProto` or a `Topology` object may be passed. Note: you must
evaluate the `Tensor` first; you cannot pass an unevaluated `Tensor`
here.
computation_shape: A rank 1 int32 numpy array with size equal to the
topology rank, describing the shape of the computation's block of cores.
If None, the `computation_shape` is `[1] * topology_rank`.
computation_stride: A rank 1 int32 numpy array of size `topology_rank`,
describing the inter-core spacing of the `computation_shape` cores in the
TPU topology. If None, the `computation_stride` is `[1] * topology_rank`.
num_replicas: The number of computation replicas to run. The replicas will
be packed into the free spaces of the topology.
device_order_mode: An enum of `DeviceOrderMode` class which indicates
whether to assign devices to form rings or meshes, or let the library to
choose.
Returns:
A DeviceAssignment object, which describes the mapping between the logical
cores in each computation replica and the physical cores in the TPU
topology.
Raises:
ValueError: If `topology` is not a valid `Topology` object.
ValueError: If `computation_shape` or `computation_stride` are not 1D int32
numpy arrays with shape [3] where all values are positive.
ValueError: If computation's replicas cannot fit into the TPU topology.
"""
# Deserialize the Topology proto, if it is a string.
if isinstance(topology, bytes):
topology = Topology(serialized=topology)
if not isinstance(topology, Topology):
raise ValueError(
f"`topology` is not a Topology object; got {type(topology)}")
topology_rank = len(topology.mesh_shape)
mesh_shape = topology.mesh_shape
if computation_shape is None:
computation_shape = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_shape = np.asarray(computation_shape, dtype=np.int32)
if computation_stride is None:
computation_stride = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_stride = np.asarray(computation_stride, dtype=np.int32)
if computation_shape.shape != (topology_rank,):
raise ValueError(
f"computation_shape must have shape [{topology_rank}]; "
f"got {computation_shape.shape}"
)
if computation_stride.shape != (topology_rank,):
raise ValueError(
f"computation_stride must have shape [{topology_rank}]; "
f"got {computation_stride.shape}"
)
if any(computation_shape < 1):
raise ValueError(
"computation_shape must be positive; got computation_shape={}".format(
computation_shape))
if any(computation_stride < 1):
raise ValueError(
"computation_stride must be positive; got computation_stride={}".format(
computation_stride))
# Computes the physical size of one computation instance.
computation_footprint = computation_shape * computation_stride
if any(computation_footprint > mesh_shape):
raise ValueError(
"computation footprint {} does not fit in TPU topology shape {}".format(
computation_footprint, mesh_shape))
# Computes how many copies of the computation footprint fit in the mesh.
block_counts = mesh_shape // computation_footprint
replica_counts = block_counts * computation_stride
max_replicas = np.prod(replica_counts)
if num_replicas > max_replicas:
raise ValueError(
"requested {} replicas but only {} replicas with shape {} and "
"computation_stride {} fit in a TPU mesh of shape {}".format(
num_replicas, max_replicas, computation_shape, computation_stride,
mesh_shape))
def ceil_of_ratio(n, m):
return (n + m - 1) // m
if topology.missing_devices.size == 0:
replica_shape = [0] * topology_rank
if num_replicas > 0:
remaining_replicas = num_replicas
remaining_dims = topology_rank
# Choose dimensions as close to an equal cube as possible,
# in order of increasing dimension size. By visiting dimensions
# in increasing size, we assign the most constrained dimension
# first, so we won't make infeasible choices.
#
# As a secondary sort order, visit the last dimension (core index) first,
# then the other dimensions in increasing order. This means we try to use
# both cores on the same chip in preference to two cores on different
# chips. We visit the x dimension first, and the z dimension last, so
# that we prefer to arrange adjacent replicas on the same machine when
# possible.
#
# For example, if num_replicas == 4, we prefer to use a replica_shape of
# (2,1,1,2) over (1,1,2,2).
for x, ni in sorted(((x, ((i + 1) % topology_rank))
for (i, x) in enumerate(replica_counts))):
i = (ni + topology_rank - 1) % topology_rank
target_size = int(math.ceil(remaining_replicas**(1.0 / remaining_dims)))
replica_shape[i] = min(target_size, x)
remaining_replicas = ceil_of_ratio(remaining_replicas, replica_shape[i])
remaining_dims -= 1
assert remaining_replicas == 1 and remaining_dims == 0
# Assigns an offset to each replica such that no two replicas overlap.
replica_offsets = np.full([num_replicas, topology_rank], -1, dtype=np.int32)
enable_3d_tiling = (
topology_rank == 4 and
computation_shape[-1] == mesh_shape[-1] # Only handle 3D case.
and np.prod(computation_stride) == 1 # Ensure no stride.
and num_replicas == max_replicas) # Full replication.
if device_order_mode != DeviceOrderMode.AUTO:
if device_order_mode == DeviceOrderMode.RING and not enable_3d_tiling:
raise ValueError(
"device_order_mode=DeviceOrderMode.RING is not compatible with the "
"3D tiling current topology. Try setting "
"device_order_mode=DeviceOrderMode.AUTO"
)
enable_3d_tiling = device_order_mode == DeviceOrderMode.RING
if enable_3d_tiling:
assignment = []
inner_ring = _ring_3d(computation_shape[0], computation_shape[1],
computation_shape[2])
outer_ring = _ring_3d(replica_shape[0], replica_shape[1],
replica_shape[2])
for replica in range(num_replicas):
outer_x, outer_y, outer_z = outer_ring[replica]
per_replica_assignment = []
for index in range(np.prod(computation_shape)):
inner_x, inner_y, inner_z = inner_ring[index // mesh_shape[-1]]
px = outer_x * computation_shape[0] + inner_x
py = outer_y * computation_shape[1] + inner_y
pz = outer_z * computation_shape[2] + inner_z
pi = index % mesh_shape[-1]
per_replica_assignment.append([px, py, pz, pi])
assignment.append(per_replica_assignment)
else:
for replica in range(num_replicas):
# Chooses a replica number in each axis.
t = replica
pos = []
# Visit the core number first.
for dim in np.concatenate([[replica_shape[-1]], replica_shape[:-1]]):
pos.append(t % dim)
t //= dim
replica_pos = np.concatenate([pos[1:], [pos[0]]])
# Determines where that replica starts in each axis.
outer = replica_pos // computation_stride
inner = replica_pos % computation_stride
replica_offsets[replica, :] = outer * computation_footprint + inner
# Computes a logical core -> physical core mapping for each replica.
indices = [
np.arange(0, computation_shape[i] * computation_stride[i],
computation_stride[i]) for i in range(topology_rank)
]
indices = np.concatenate(
[i[..., np.newaxis] for i in np.meshgrid(*indices, indexing="ij")],
axis=-1)
indices = indices.reshape((-1, topology_rank))
assignment = indices + replica_offsets[:, np.newaxis, :]
else:
# We have a slice with missing chips. We define a simple assignment by
# ignoring computation stride. This assignment should enable a consistent
# and correct device assignment on degraded slices. It is optimal when
# weights are not sharded. But this device assignment may be sub-optimal for
# other model parallelism scenarios.
assert np.prod(computation_stride) == 1
# Next, we check if we have sufficient devices.
assert num_replicas * np.prod(
computation_shape) <= topology.num_tasks * topology.num_tpus_per_task
# Map replicas to physical devices in task order.
device_coordinates = topology.device_coordinates
assignment = []
devices_per_replica = np.prod(computation_shape)
for rindex in range(num_replicas):
replica_assignment = []
for index in range(devices_per_replica):
logical_id = rindex * devices_per_replica + index
# Pick logical cores in task order
task = logical_id // topology.num_tpus_per_task
device = logical_id % topology.num_tpus_per_task
# Append physical cores to the replica assignment
replica_assignment.append(device_coordinates[task, device, :])
assignment.append(replica_assignment)
return DeviceAssignment(topology, core_assignment=assignment)
| 38.15 | 99 | 0.662236 |
import enum
import math
from typing import List, Optional, Text, Tuple
import numpy as np
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu.topology import Topology
from tensorflow.python.util.tf_export import tf_export
SINGLE_CORE_ASSIGNMENT = [[[0, 0, 0, 0]]]
def _compute_task_and_cores_to_replicas(core_assignment, topology):
task_and_cores_to_replicas = {}
for replica in range(core_assignment.shape[0]):
for logical_core in range(core_assignment.shape[1]):
coordinates = core_assignment[replica, logical_core, :]
task_id = topology.task_ordinal_at_coordinates(coordinates)
if task_id not in task_and_cores_to_replicas:
task_and_cores_to_replicas[task_id] = {}
if logical_core not in task_and_cores_to_replicas[task_id]:
task_and_cores_to_replicas[task_id][logical_core] = set()
task_and_cores_to_replicas[task_id][logical_core].add(replica)
task_to_sorted_replica_id = {}
for task, core_to_replicas in task_and_cores_to_replicas.items():
core_to_sorted_replicas = {}
for core, replicas in core_to_replicas.items():
core_to_sorted_replicas[core] = sorted(replicas)
task_to_sorted_replica_id[task] = core_to_sorted_replicas
return task_to_sorted_replica_id
@tf_export("tpu.experimental.DeviceAssignment")
class DeviceAssignment(object):
def __init__(self, topology: Topology, core_assignment: np.ndarray):
if not isinstance(topology, Topology):
raise ValueError("topology must be a Topology object, got {}".format(
type(topology)))
core_assignment = np.asarray(core_assignment, dtype=np.int32)
self._topology = topology
if core_assignment.ndim != 3:
raise ValueError("core_assignment must be a rank 3 numpy array, "
f"got shape {core_assignment.shape}")
self._num_replicas = core_assignment.shape[0]
self._num_cores_per_replica = core_assignment.shape[1]
if core_assignment.shape[-1] != topology.mesh_rank:
raise ValueError(
"core_assignment.shape[-1] must have size equal to topology "
f"rank ({topology.mesh_rank}), got "
f"core_assignment.shape={core_assignment.shape}")
self._core_assignment = core_assignment
self._task_and_cores_to_replicas = _compute_task_and_cores_to_replicas(
self._core_assignment, topology)
@property
def topology(self) -> Topology:
return self._topology
@property
def num_cores_per_replica(self) -> int:
return self._num_cores_per_replica
@property
def num_replicas(self) -> int:
return self._num_replicas
@property
def core_assignment(self) -> np.ndarray:
return self._core_assignment
def coordinates(self, replica: int, logical_core: int) -> Tuple:
return tuple(self.core_assignment[replica, logical_core, :])
def lookup_replicas(self, task_id: int, logical_core: int) -> List[int]:
try:
return self._task_and_cores_to_replicas[task_id][logical_core]
except KeyError:
raise ValueError(
"Can not find any replica in task: {} contains logical_core: {} ".
format(task_id, logical_core))
def tpu_ordinal(self, replica: int = 0, logical_core: int = 0) -> int:
coordinates = self.coordinates(replica, logical_core)
return self._topology.tpu_device_ordinal_at_coordinates(coordinates)
def host_device(self,
replica: int = 0,
logical_core: int = 0,
job: Optional[Text] = None) -> Text:
coordinates = self.coordinates(replica, logical_core)
return self._topology.cpu_device_name_at_coordinates(coordinates, job=job)
def tpu_device(self,
replica: int = 0,
logical_core: int = 0,
job: Optional[Text] = None) -> Text:
coordinates = self.coordinates(replica, logical_core)
return self._topology.tpu_device_name_at_coordinates(coordinates, job=job)
@staticmethod
def build(topology: Topology,
computation_shape: Optional[np.ndarray] = None,
computation_stride: Optional[np.ndarray] = None,
num_replicas: int = 1) -> "DeviceAssignment":
return device_assignment(topology, computation_shape, computation_stride,
num_replicas)
def _open_ring_2d(x_size: int, y_size: int,
z_coord: int) -> List[Tuple[int, int, int]]:
ret = []
for i in range(y_size // 2):
for j in range(1, x_size):
ret.append((j, 2 * i, z_coord))
for j in range(x_size - 1, 0, -1):
ret.append((j, 2 * i + 1, z_coord))
for i in range(y_size - 1, 0, -1):
ret.append((0, i, z_coord))
return ret
def _ring_3d(x_size: int, y_size: int,
z_size: int) -> List[Tuple[int, int, int]]:
if x_size == 1 and y_size == 1:
return [(0, 0, i) for i in range(z_size)]
if x_size == 1 and z_size == 1:
return [(0, i, 0) for i in range(y_size)]
if y_size == 1 and z_size == 1:
return [(i, 0, 0) for i in range(x_size)]
# bother to try building something optimal.
if (x_size > 1 and x_size % 2 != 0) or (y_size > 1 and
y_size % 2 != 0) or (z_size > 1 and
z_size % 2 != 0):
logging.warning("Odd dimension")
ret = []
for z in range(z_size):
for y in range(y_size):
ret.extend((x, y, z) for x in range(x_size))
return ret
# Always start with chip 0.
ret = [(0, 0, 0)]
# Handle the case where one dimension is size 1. We just build a flat, 2d
# ring.
if z_size == 1:
ret.extend(_open_ring_2d(x_size, y_size, 0))
return ret
if y_size == 1:
ret = [(0, 0, 0)]
ret.extend((x, y, z) for (x, z, y) in _open_ring_2d(x_size, z_size, 0))
return ret
if x_size == 1:
ret = [(0, 0, 0)]
ret.extend((x, y, z) for (y, z, x) in _open_ring_2d(y_size, z_size, 0))
return ret
# Handle the case where all dimensions have size > 1 and even.
ret = [(0, 0, 0)]
for i in range(0, z_size):
r = _open_ring_2d(x_size, y_size, i)
if i % 2 == 0:
ret.extend(r)
else:
ret.extend(reversed(r))
for i in range(z_size - 1, 0, -1):
ret.append((0, 0, i))
return ret
class DeviceOrderMode(enum.IntEnum):
# By default the mode is set to AUTO, the library will choose to form rings
# when that is possible.
AUTO = 0
# Form rings for replicas and model-parallel cores.
RING = 1
# Form meshes for replicas and/or model-parallel cores.
MESH = 2
def device_assignment(
topology: Topology,
computation_shape: Optional[np.ndarray] = None,
computation_stride: Optional[np.ndarray] = None,
num_replicas: int = 1,
device_order_mode: DeviceOrderMode = DeviceOrderMode.AUTO
) -> DeviceAssignment:
# Deserialize the Topology proto, if it is a string.
if isinstance(topology, bytes):
topology = Topology(serialized=topology)
if not isinstance(topology, Topology):
raise ValueError(
f"`topology` is not a Topology object; got {type(topology)}")
topology_rank = len(topology.mesh_shape)
mesh_shape = topology.mesh_shape
if computation_shape is None:
computation_shape = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_shape = np.asarray(computation_shape, dtype=np.int32)
if computation_stride is None:
computation_stride = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_stride = np.asarray(computation_stride, dtype=np.int32)
if computation_shape.shape != (topology_rank,):
raise ValueError(
f"computation_shape must have shape [{topology_rank}]; "
f"got {computation_shape.shape}"
)
if computation_stride.shape != (topology_rank,):
raise ValueError(
f"computation_stride must have shape [{topology_rank}]; "
f"got {computation_stride.shape}"
)
if any(computation_shape < 1):
raise ValueError(
"computation_shape must be positive; got computation_shape={}".format(
computation_shape))
if any(computation_stride < 1):
raise ValueError(
"computation_stride must be positive; got computation_stride={}".format(
computation_stride))
# Computes the physical size of one computation instance.
computation_footprint = computation_shape * computation_stride
if any(computation_footprint > mesh_shape):
raise ValueError(
"computation footprint {} does not fit in TPU topology shape {}".format(
computation_footprint, mesh_shape))
# Computes how many copies of the computation footprint fit in the mesh.
block_counts = mesh_shape // computation_footprint
replica_counts = block_counts * computation_stride
max_replicas = np.prod(replica_counts)
if num_replicas > max_replicas:
raise ValueError(
"requested {} replicas but only {} replicas with shape {} and "
"computation_stride {} fit in a TPU mesh of shape {}".format(
num_replicas, max_replicas, computation_shape, computation_stride,
mesh_shape))
def ceil_of_ratio(n, m):
return (n + m - 1) // m
if topology.missing_devices.size == 0:
replica_shape = [0] * topology_rank
if num_replicas > 0:
remaining_replicas = num_replicas
remaining_dims = topology_rank
# Choose dimensions as close to an equal cube as possible,
# in order of increasing dimension size. By visiting dimensions
# in increasing size, we assign the most constrained dimension
# first, so we won't make infeasible choices.
for x, ni in sorted(((x, ((i + 1) % topology_rank))
for (i, x) in enumerate(replica_counts))):
i = (ni + topology_rank - 1) % topology_rank
target_size = int(math.ceil(remaining_replicas**(1.0 / remaining_dims)))
replica_shape[i] = min(target_size, x)
remaining_replicas = ceil_of_ratio(remaining_replicas, replica_shape[i])
remaining_dims -= 1
assert remaining_replicas == 1 and remaining_dims == 0
replica_offsets = np.full([num_replicas, topology_rank], -1, dtype=np.int32)
enable_3d_tiling = (
topology_rank == 4 and
computation_shape[-1] == mesh_shape[-1]
and np.prod(computation_stride) == 1
and num_replicas == max_replicas)
if device_order_mode != DeviceOrderMode.AUTO:
if device_order_mode == DeviceOrderMode.RING and not enable_3d_tiling:
raise ValueError(
"device_order_mode=DeviceOrderMode.RING is not compatible with the "
"3D tiling current topology. Try setting "
"device_order_mode=DeviceOrderMode.AUTO"
)
enable_3d_tiling = device_order_mode == DeviceOrderMode.RING
if enable_3d_tiling:
assignment = []
inner_ring = _ring_3d(computation_shape[0], computation_shape[1],
computation_shape[2])
outer_ring = _ring_3d(replica_shape[0], replica_shape[1],
replica_shape[2])
for replica in range(num_replicas):
outer_x, outer_y, outer_z = outer_ring[replica]
per_replica_assignment = []
for index in range(np.prod(computation_shape)):
inner_x, inner_y, inner_z = inner_ring[index // mesh_shape[-1]]
px = outer_x * computation_shape[0] + inner_x
py = outer_y * computation_shape[1] + inner_y
pz = outer_z * computation_shape[2] + inner_z
pi = index % mesh_shape[-1]
per_replica_assignment.append([px, py, pz, pi])
assignment.append(per_replica_assignment)
else:
for replica in range(num_replicas):
t = replica
pos = []
for dim in np.concatenate([[replica_shape[-1]], replica_shape[:-1]]):
pos.append(t % dim)
t //= dim
replica_pos = np.concatenate([pos[1:], [pos[0]]])
outer = replica_pos // computation_stride
inner = replica_pos % computation_stride
replica_offsets[replica, :] = outer * computation_footprint + inner
indices = [
np.arange(0, computation_shape[i] * computation_stride[i],
computation_stride[i]) for i in range(topology_rank)
]
indices = np.concatenate(
[i[..., np.newaxis] for i in np.meshgrid(*indices, indexing="ij")],
axis=-1)
indices = indices.reshape((-1, topology_rank))
assignment = indices + replica_offsets[:, np.newaxis, :]
else:
assert np.prod(computation_stride) == 1
assert num_replicas * np.prod(
computation_shape) <= topology.num_tasks * topology.num_tpus_per_task
device_coordinates = topology.device_coordinates
assignment = []
devices_per_replica = np.prod(computation_shape)
for rindex in range(num_replicas):
replica_assignment = []
for index in range(devices_per_replica):
logical_id = rindex * devices_per_replica + index
task = logical_id // topology.num_tpus_per_task
device = logical_id % topology.num_tpus_per_task
replica_assignment.append(device_coordinates[task, device, :])
assignment.append(replica_assignment)
return DeviceAssignment(topology, core_assignment=assignment)
| true | true |
f730660ea29c7c979e17a2993b7cf5dd2dd52d57 | 7,981 | py | Python | tests/unit/streamalert/alert_processor/outputs/test_output_base.py | Meliairon/streamalert | 3b774a59d260b2822cd156e837781bd34f3625f7 | [
"Apache-2.0"
] | null | null | null | tests/unit/streamalert/alert_processor/outputs/test_output_base.py | Meliairon/streamalert | 3b774a59d260b2822cd156e837781bd34f3625f7 | [
"Apache-2.0"
] | null | null | null | tests/unit/streamalert/alert_processor/outputs/test_output_base.py | Meliairon/streamalert | 3b774a59d260b2822cd156e837781bd34f3625f7 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=abstract-class-instantiated,protected-access,attribute-defined-outside-init
from mock import Mock, patch, MagicMock
from moto import mock_kms, mock_ssm
from nose.tools import (
assert_equal,
assert_is_instance,
assert_is_not_none,
assert_is_none,
assert_count_equal
)
from requests.exceptions import Timeout as ReqTimeout
from streamalert.alert_processor.outputs.output_base import (
OutputDispatcher,
OutputProperty,
OutputRequestFailure,
StreamAlertOutput
)
from streamalert.alert_processor.outputs.aws import S3Output
from tests.unit.streamalert.alert_processor import (
CONFIG,
KMS_ALIAS,
MOCK_ENV,
REGION,
PREFIX
)
from tests.unit.streamalert.alert_processor.helpers import (
put_mock_ssm_parameters
)
def test_output_property_default():
"""OutputProperty defaults"""
prop = OutputProperty()
assert_equal(prop.description, '')
assert_equal(prop.value, '')
assert_equal(prop.input_restrictions, {' ', ':'})
assert_equal(prop.mask_input, False)
assert_equal(prop.cred_requirement, False)
def test_get_dispatcher_good():
"""StreamAlertOutput - Get Valid Dispatcher"""
dispatcher = StreamAlertOutput.get_dispatcher('aws-s3')
assert_is_not_none(dispatcher)
@patch('logging.Logger.error')
def test_get_dispatcher_bad(log_mock):
"""StreamAlertOutput - Get Invalid Dispatcher"""
dispatcher = StreamAlertOutput.get_dispatcher('aws-s4')
assert_is_none(dispatcher)
log_mock.assert_called_with('Designated output service [%s] does not exist', 'aws-s4')
@patch.dict('os.environ', MOCK_ENV)
def test_create_dispatcher():
"""StreamAlertOutput - Create Dispatcher"""
dispatcher = StreamAlertOutput.create_dispatcher('aws-s3', CONFIG)
assert_is_instance(dispatcher, S3Output)
def test_user_defined_properties():
"""OutputDispatcher - User Defined Properties"""
for output in list(StreamAlertOutput.get_all_outputs().values()):
props = output.get_user_defined_properties()
# The user defined properties should at a minimum contain a descriptor
assert_is_not_none(props.get('descriptor'))
def test_output_loading():
"""OutputDispatcher - Loading Output Classes"""
loaded_outputs = set(StreamAlertOutput.get_all_outputs())
# Add new outputs to this list to make sure they're loaded properly
expected_outputs = {
'aws-firehose',
'aws-lambda',
'aws-s3',
'aws-ses',
'aws-sns',
'aws-sqs',
'aws-cloudwatch-log',
'carbonblack',
'demisto',
'github',
'jira',
'komand',
'pagerduty',
'pagerduty-v2',
'pagerduty-incident',
'phantom',
'slack',
'teams'
}
assert_count_equal(loaded_outputs, expected_outputs)
@patch.object(OutputDispatcher, '__service__', 'test_service')
class TestOutputDispatcher:
"""Test class for OutputDispatcher"""
@patch.object(OutputDispatcher, '__service__', 'test_service')
@patch.object(OutputDispatcher, '__abstractmethods__', frozenset())
@patch.dict('os.environ', MOCK_ENV)
def setup(self):
"""Setup before each method"""
self._dispatcher = OutputDispatcher(CONFIG)
self._descriptor = 'desc_test'
@patch.object(OutputDispatcher, '__service__', 'test_service')
@patch.object(OutputDispatcher, '__abstractmethods__', frozenset())
@patch('streamalert.alert_processor.outputs.output_base.OutputCredentialsProvider')
def test_credentials_provider(self, provider_constructor):
"""OutputDispatcher - Constructor"""
provider = MagicMock()
provider_constructor.return_value = provider
_ = OutputDispatcher(CONFIG)
provider_constructor.assert_called_with('test_service',
config=CONFIG, defaults=None, region=REGION)
assert_equal(self._dispatcher._credentials_provider._service_name, 'test_service')
@patch('logging.Logger.info')
def test_log_status_success(self, log_mock):
"""OutputDispatcher - Log status success"""
self._dispatcher._log_status(True, self._descriptor)
log_mock.assert_called_with('Successfully sent alert to %s:%s',
'test_service', self._descriptor)
@patch('logging.Logger.error')
def test_log_status_failed(self, log_mock):
"""OutputDispatcher - Log status failed"""
self._dispatcher._log_status(False, self._descriptor)
log_mock.assert_called_with('Failed to send alert to %s:%s',
'test_service', self._descriptor)
@patch('requests.Response')
def test_check_http_response(self, mock_response):
"""OutputDispatcher - Check HTTP Response"""
# Test with a good response code
mock_response.status_code = 200
result = self._dispatcher._check_http_response(mock_response)
assert_equal(result, True)
# Test with a bad response code
mock_response.status_code = 440
result = self._dispatcher._check_http_response(mock_response)
assert_equal(result, False)
@mock_ssm
@mock_kms
def test_load_creds(self):
"""OutputDispatcher - Load Credentials"""
param_name = '/{}/streamalert/outputs/test_service/desc_test'.format(PREFIX)
creds = {
'url': 'http://www.foo.bar/test',
'token': 'token_to_encrypt'
}
put_mock_ssm_parameters(param_name, creds, KMS_ALIAS, region=REGION)
loaded_creds = self._dispatcher._load_creds(self._descriptor)
assert_is_not_none(loaded_creds)
assert_equal(len(loaded_creds), 2)
assert_equal(loaded_creds['url'], creds['url'])
assert_equal(loaded_creds['token'], creds['token'])
def test_format_output_config(self):
"""OutputDispatcher - Format Output Config"""
with patch.object(OutputDispatcher, '__service__', 'slack'):
props = {'descriptor': OutputProperty('test_desc', 'test_channel')}
formatted = self._dispatcher.format_output_config(CONFIG, props)
assert_equal(len(formatted), 2)
assert_equal(formatted[0], 'unit_test_channel')
assert_equal(formatted[1], 'test_channel')
@patch.object(OutputDispatcher, '_get_exceptions_to_catch', Mock(return_value=(ValueError)))
def test_catch_exceptions_non_default(self):
"""OutputDispatcher - Catch Non Default Exceptions"""
exceptions = self._dispatcher._catch_exceptions()
assert_equal(exceptions, (OutputRequestFailure, ReqTimeout, ValueError))
@patch.object(OutputDispatcher,
'_get_exceptions_to_catch', Mock(return_value=(ValueError, TypeError)))
def test_catch_exceptions_non_default_tuple(self):
"""OutputDispatcher - Catch Non Default Exceptions Tuple"""
exceptions = self._dispatcher._catch_exceptions()
assert_equal(exceptions, (OutputRequestFailure, ReqTimeout, ValueError, TypeError))
@patch.object(OutputDispatcher, '_get_exceptions_to_catch', Mock(return_value=()))
def test_catch_exceptions_default(self):
"""OutputDispatcher - Catch Default Exceptions"""
exceptions = self._dispatcher._catch_exceptions()
assert_equal(exceptions, (OutputRequestFailure, ReqTimeout))
| 36.610092 | 96 | 0.699035 |
from mock import Mock, patch, MagicMock
from moto import mock_kms, mock_ssm
from nose.tools import (
assert_equal,
assert_is_instance,
assert_is_not_none,
assert_is_none,
assert_count_equal
)
from requests.exceptions import Timeout as ReqTimeout
from streamalert.alert_processor.outputs.output_base import (
OutputDispatcher,
OutputProperty,
OutputRequestFailure,
StreamAlertOutput
)
from streamalert.alert_processor.outputs.aws import S3Output
from tests.unit.streamalert.alert_processor import (
CONFIG,
KMS_ALIAS,
MOCK_ENV,
REGION,
PREFIX
)
from tests.unit.streamalert.alert_processor.helpers import (
put_mock_ssm_parameters
)
def test_output_property_default():
prop = OutputProperty()
assert_equal(prop.description, '')
assert_equal(prop.value, '')
assert_equal(prop.input_restrictions, {' ', ':'})
assert_equal(prop.mask_input, False)
assert_equal(prop.cred_requirement, False)
def test_get_dispatcher_good():
dispatcher = StreamAlertOutput.get_dispatcher('aws-s3')
assert_is_not_none(dispatcher)
@patch('logging.Logger.error')
def test_get_dispatcher_bad(log_mock):
dispatcher = StreamAlertOutput.get_dispatcher('aws-s4')
assert_is_none(dispatcher)
log_mock.assert_called_with('Designated output service [%s] does not exist', 'aws-s4')
@patch.dict('os.environ', MOCK_ENV)
def test_create_dispatcher():
dispatcher = StreamAlertOutput.create_dispatcher('aws-s3', CONFIG)
assert_is_instance(dispatcher, S3Output)
def test_user_defined_properties():
for output in list(StreamAlertOutput.get_all_outputs().values()):
props = output.get_user_defined_properties()
assert_is_not_none(props.get('descriptor'))
def test_output_loading():
loaded_outputs = set(StreamAlertOutput.get_all_outputs())
expected_outputs = {
'aws-firehose',
'aws-lambda',
'aws-s3',
'aws-ses',
'aws-sns',
'aws-sqs',
'aws-cloudwatch-log',
'carbonblack',
'demisto',
'github',
'jira',
'komand',
'pagerduty',
'pagerduty-v2',
'pagerduty-incident',
'phantom',
'slack',
'teams'
}
assert_count_equal(loaded_outputs, expected_outputs)
@patch.object(OutputDispatcher, '__service__', 'test_service')
class TestOutputDispatcher:
@patch.object(OutputDispatcher, '__service__', 'test_service')
@patch.object(OutputDispatcher, '__abstractmethods__', frozenset())
@patch.dict('os.environ', MOCK_ENV)
def setup(self):
self._dispatcher = OutputDispatcher(CONFIG)
self._descriptor = 'desc_test'
@patch.object(OutputDispatcher, '__service__', 'test_service')
@patch.object(OutputDispatcher, '__abstractmethods__', frozenset())
@patch('streamalert.alert_processor.outputs.output_base.OutputCredentialsProvider')
def test_credentials_provider(self, provider_constructor):
provider = MagicMock()
provider_constructor.return_value = provider
_ = OutputDispatcher(CONFIG)
provider_constructor.assert_called_with('test_service',
config=CONFIG, defaults=None, region=REGION)
assert_equal(self._dispatcher._credentials_provider._service_name, 'test_service')
@patch('logging.Logger.info')
def test_log_status_success(self, log_mock):
self._dispatcher._log_status(True, self._descriptor)
log_mock.assert_called_with('Successfully sent alert to %s:%s',
'test_service', self._descriptor)
@patch('logging.Logger.error')
def test_log_status_failed(self, log_mock):
self._dispatcher._log_status(False, self._descriptor)
log_mock.assert_called_with('Failed to send alert to %s:%s',
'test_service', self._descriptor)
@patch('requests.Response')
def test_check_http_response(self, mock_response):
# Test with a good response code
mock_response.status_code = 200
result = self._dispatcher._check_http_response(mock_response)
assert_equal(result, True)
# Test with a bad response code
mock_response.status_code = 440
result = self._dispatcher._check_http_response(mock_response)
assert_equal(result, False)
@mock_ssm
@mock_kms
def test_load_creds(self):
param_name = '/{}/streamalert/outputs/test_service/desc_test'.format(PREFIX)
creds = {
'url': 'http://www.foo.bar/test',
'token': 'token_to_encrypt'
}
put_mock_ssm_parameters(param_name, creds, KMS_ALIAS, region=REGION)
loaded_creds = self._dispatcher._load_creds(self._descriptor)
assert_is_not_none(loaded_creds)
assert_equal(len(loaded_creds), 2)
assert_equal(loaded_creds['url'], creds['url'])
assert_equal(loaded_creds['token'], creds['token'])
def test_format_output_config(self):
with patch.object(OutputDispatcher, '__service__', 'slack'):
props = {'descriptor': OutputProperty('test_desc', 'test_channel')}
formatted = self._dispatcher.format_output_config(CONFIG, props)
assert_equal(len(formatted), 2)
assert_equal(formatted[0], 'unit_test_channel')
assert_equal(formatted[1], 'test_channel')
@patch.object(OutputDispatcher, '_get_exceptions_to_catch', Mock(return_value=(ValueError)))
def test_catch_exceptions_non_default(self):
exceptions = self._dispatcher._catch_exceptions()
assert_equal(exceptions, (OutputRequestFailure, ReqTimeout, ValueError))
@patch.object(OutputDispatcher,
'_get_exceptions_to_catch', Mock(return_value=(ValueError, TypeError)))
def test_catch_exceptions_non_default_tuple(self):
exceptions = self._dispatcher._catch_exceptions()
assert_equal(exceptions, (OutputRequestFailure, ReqTimeout, ValueError, TypeError))
@patch.object(OutputDispatcher, '_get_exceptions_to_catch', Mock(return_value=()))
def test_catch_exceptions_default(self):
exceptions = self._dispatcher._catch_exceptions()
assert_equal(exceptions, (OutputRequestFailure, ReqTimeout))
| true | true |
f7306785f709b39146aac848874a28763d800c0c | 20,872 | py | Python | test/functional/test_framework/script.py | bitcoin-money/bitcoinmoney | d208756366292fa7ab16b8f4b9f2dff9b8567bee | [
"MIT"
] | null | null | null | test/functional/test_framework/script.py | bitcoin-money/bitcoinmoney | d208756366292fa7ab16b8f4b9f2dff9b8567bee | [
"MIT"
] | null | null | null | test/functional/test_framework/script.py | bitcoin-money/bitcoinmoney | d208756366292fa7ab16b8f4b9f2dff9b8567bee | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Money developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Functionality to build scripts, as well as SignatureHash().
This file is modified from python-bitcoinlib.
"""
from .mininode import CTransaction, CTxOut, sha256, hash256, uint256_from_str, ser_uint256, ser_string
from binascii import hexlify
import hashlib
import sys
bchr = chr
bord = ord
if sys.version > '3':
long = int
bchr = lambda x: bytes([x])
bord = lambda x: x
import struct
from .bignum import bn2vch
MAX_SCRIPT_ELEMENT_SIZE = 520
OPCODE_NAMES = {}
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
_opcode_instances = []
class CScriptOp(int):
"""A single script opcode"""
__slots__ = []
@staticmethod
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
return b'' + bchr(len(d)) + d # OP_PUSHDATA
elif len(d) <= 0xff:
return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
"""Encode a small integer op, returning an opcode"""
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n-1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1+1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
return _opcode_instances[n]
# Populate opcode instance table
for n in range(0xff+1):
CScriptOp(n)
# push value
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE=OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
# control
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
# stack ops
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
# splice ops
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
# bit logic
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
# numeric
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
# crypto
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
# expansion
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
# template matching params
OP_SMALLINTEGER = CScriptOp(0xfa)
OP_PUBKEYS = CScriptOp(0xfb)
OP_PUBKEYHASH = CScriptOp(0xfd)
OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
OPCODE_NAMES.update({
OP_0 : 'OP_0',
OP_PUSHDATA1 : 'OP_PUSHDATA1',
OP_PUSHDATA2 : 'OP_PUSHDATA2',
OP_PUSHDATA4 : 'OP_PUSHDATA4',
OP_1NEGATE : 'OP_1NEGATE',
OP_RESERVED : 'OP_RESERVED',
OP_1 : 'OP_1',
OP_2 : 'OP_2',
OP_3 : 'OP_3',
OP_4 : 'OP_4',
OP_5 : 'OP_5',
OP_6 : 'OP_6',
OP_7 : 'OP_7',
OP_8 : 'OP_8',
OP_9 : 'OP_9',
OP_10 : 'OP_10',
OP_11 : 'OP_11',
OP_12 : 'OP_12',
OP_13 : 'OP_13',
OP_14 : 'OP_14',
OP_15 : 'OP_15',
OP_16 : 'OP_16',
OP_NOP : 'OP_NOP',
OP_VER : 'OP_VER',
OP_IF : 'OP_IF',
OP_NOTIF : 'OP_NOTIF',
OP_VERIF : 'OP_VERIF',
OP_VERNOTIF : 'OP_VERNOTIF',
OP_ELSE : 'OP_ELSE',
OP_ENDIF : 'OP_ENDIF',
OP_VERIFY : 'OP_VERIFY',
OP_RETURN : 'OP_RETURN',
OP_TOALTSTACK : 'OP_TOALTSTACK',
OP_FROMALTSTACK : 'OP_FROMALTSTACK',
OP_2DROP : 'OP_2DROP',
OP_2DUP : 'OP_2DUP',
OP_3DUP : 'OP_3DUP',
OP_2OVER : 'OP_2OVER',
OP_2ROT : 'OP_2ROT',
OP_2SWAP : 'OP_2SWAP',
OP_IFDUP : 'OP_IFDUP',
OP_DEPTH : 'OP_DEPTH',
OP_DROP : 'OP_DROP',
OP_DUP : 'OP_DUP',
OP_NIP : 'OP_NIP',
OP_OVER : 'OP_OVER',
OP_PICK : 'OP_PICK',
OP_ROLL : 'OP_ROLL',
OP_ROT : 'OP_ROT',
OP_SWAP : 'OP_SWAP',
OP_TUCK : 'OP_TUCK',
OP_CAT : 'OP_CAT',
OP_SUBSTR : 'OP_SUBSTR',
OP_LEFT : 'OP_LEFT',
OP_RIGHT : 'OP_RIGHT',
OP_SIZE : 'OP_SIZE',
OP_INVERT : 'OP_INVERT',
OP_AND : 'OP_AND',
OP_OR : 'OP_OR',
OP_XOR : 'OP_XOR',
OP_EQUAL : 'OP_EQUAL',
OP_EQUALVERIFY : 'OP_EQUALVERIFY',
OP_RESERVED1 : 'OP_RESERVED1',
OP_RESERVED2 : 'OP_RESERVED2',
OP_1ADD : 'OP_1ADD',
OP_1SUB : 'OP_1SUB',
OP_2MUL : 'OP_2MUL',
OP_2DIV : 'OP_2DIV',
OP_NEGATE : 'OP_NEGATE',
OP_ABS : 'OP_ABS',
OP_NOT : 'OP_NOT',
OP_0NOTEQUAL : 'OP_0NOTEQUAL',
OP_ADD : 'OP_ADD',
OP_SUB : 'OP_SUB',
OP_MUL : 'OP_MUL',
OP_DIV : 'OP_DIV',
OP_MOD : 'OP_MOD',
OP_LSHIFT : 'OP_LSHIFT',
OP_RSHIFT : 'OP_RSHIFT',
OP_BOOLAND : 'OP_BOOLAND',
OP_BOOLOR : 'OP_BOOLOR',
OP_NUMEQUAL : 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
OP_LESSTHAN : 'OP_LESSTHAN',
OP_GREATERTHAN : 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
OP_MIN : 'OP_MIN',
OP_MAX : 'OP_MAX',
OP_WITHIN : 'OP_WITHIN',
OP_RIPEMD160 : 'OP_RIPEMD160',
OP_SHA1 : 'OP_SHA1',
OP_SHA256 : 'OP_SHA256',
OP_HASH160 : 'OP_HASH160',
OP_HASH256 : 'OP_HASH256',
OP_CODESEPARATOR : 'OP_CODESEPARATOR',
OP_CHECKSIG : 'OP_CHECKSIG',
OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
OP_NOP1 : 'OP_NOP1',
OP_CHECKLOCKTIMEVERIFY : 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY : 'OP_CHECKSEQUENCEVERIFY',
OP_NOP4 : 'OP_NOP4',
OP_NOP5 : 'OP_NOP5',
OP_NOP6 : 'OP_NOP6',
OP_NOP7 : 'OP_NOP7',
OP_NOP8 : 'OP_NOP8',
OP_NOP9 : 'OP_NOP9',
OP_NOP10 : 'OP_NOP10',
OP_SMALLINTEGER : 'OP_SMALLINTEGER',
OP_PUBKEYS : 'OP_PUBKEYS',
OP_PUBKEYHASH : 'OP_PUBKEYHASH',
OP_PUBKEY : 'OP_PUBKEY',
OP_INVALIDOPCODE : 'OP_INVALIDOPCODE',
})
class CScriptInvalidError(Exception):
"""Base class for CScript exceptions"""
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
"""Invalid pushdata due to truncation"""
def __init__(self, msg, data):
self.data = data
super(CScriptTruncatedPushDataError, self).__init__(msg)
# This is used, eg, for blockchain heights in coinbase scripts (bip34)
class CScriptNum():
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(absvalue & 0xff)
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes(bchr(len(r)) + r)
class CScript(bytes):
"""Serialized script
A bytes subclass, so you can use this directly whenever bytes are accepted.
Note that this means that indexing does *not* work - you'll get an index by
byte rather than opcode. This format was chosen for efficiency so that the
general case would not require creating a lot of little CScriptOP objects.
iter(script) however does iterate by opcode.
"""
@classmethod
def __coerce_instance(cls, other):
# Coerce other into bytes
if isinstance(other, CScriptOp):
other = bchr(other)
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bchr(CScriptOp(OP_0))
else:
other = CScriptNum.encode(other)
elif isinstance(other, int):
if 0 <= other <= 16:
other = bytes(bchr(CScriptOp.encode_op_n(other)))
elif other == -1:
other = bytes(bchr(OP_1NEGATE))
else:
other = CScriptOp.encode_op_pushdata(bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
# Do the coercion outside of the try block so that errors in it are
# noticed.
other = self.__coerce_instance(other)
try:
# bytes.__add__ always returns bytes instances unfortunately
return CScript(super(CScript, self).__add__(other))
except TypeError:
raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
def join(self, iterable):
# join makes no sense for a CScript()
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super(CScript, cls).__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
# Annoyingly on both python2 and python3 bytes.join() always
# returns a bytes instance even when subclassed.
return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
"""Raw iteration
Yields tuples of (opcode, data, sop_idx) so that the different possible
PUSHDATA encodings can be accurately distinguished, as well as
determining the exact opcode byte indexes. (sop_idx)
"""
i = 0
while i < len(self):
sop_idx = i
opcode = bord(self[i])
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = bord(self[i])
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24)
i += 4
else:
assert False # shouldn't happen
data = bytes(self[i:i+datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
"""'Cooked' iteration
Returns either a CScriptOP instance, an integer, or bytes, as
appropriate.
See raw_iter() if you need to distinguish the different possible
PUSHDATA encodings.
"""
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
def _repr(o):
if isinstance(o, bytes):
return "x('%s')" % hexlify(o).decode('ascii')
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
"""Get the SigOp count.
fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
Note that this is consensus-critical.
"""
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
"""Consensus critical, see FindAndDelete() in Satoshi codebase"""
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def SignatureHash(script, txTo, inIdx, hashtype):
"""Consensus-correct SignatureHash
Returns (hash, err) to precisely match the consensus-critical behavior of
the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
"""
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for i in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize_without_witness()
s += struct.pack(b"<I", hashtype)
hash = hash256(s)
return (hash, None)
# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
# Performance optimization probably not necessary for python tests, however.
# Note that this corresponds to sigversion == 1 in EvalScript, which is used
# for version 0 witnesses.
def SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, amount):
hashPrevouts = 0
hashSequence = 0
hashOutputs = 0
if not (hashtype & SIGHASH_ANYONECANPAY):
serialize_prevouts = bytes()
for i in txTo.vin:
serialize_prevouts += i.prevout.serialize()
hashPrevouts = uint256_from_str(hash256(serialize_prevouts))
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = uint256_from_str(hash256(serialize_sequence))
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
serialize_outputs = txTo.vout[inIdx].serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
ss = bytes()
ss += struct.pack("<i", txTo.nVersion)
ss += ser_uint256(hashPrevouts)
ss += ser_uint256(hashSequence)
ss += txTo.vin[inIdx].prevout.serialize()
ss += ser_string(script)
ss += struct.pack("<q", amount)
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
ss += ser_uint256(hashOutputs)
ss += struct.pack("<i", txTo.nLockTime)
ss += struct.pack("<I", hashtype)
return hash256(ss)
| 30.118326 | 146 | 0.613549 |
from .mininode import CTransaction, CTxOut, sha256, hash256, uint256_from_str, ser_uint256, ser_string
from binascii import hexlify
import hashlib
import sys
bchr = chr
bord = ord
if sys.version > '3':
long = int
bchr = lambda x: bytes([x])
bord = lambda x: x
import struct
from .bignum import bn2vch
MAX_SCRIPT_ELEMENT_SIZE = 520
OPCODE_NAMES = {}
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
_opcode_instances = []
class CScriptOp(int):
__slots__ = []
@staticmethod
def encode_op_pushdata(d):
if len(d) < 0x4c:
return b'' + bchr(len(d)) + d
elif len(d) <= 0xff:
return b'\x4c' + bchr(len(d)) + d
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n-1)
def decode_op_n(self):
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1+1)
def is_small_int(self):
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
return _opcode_instances[n]
for n in range(0xff+1):
CScriptOp(n)
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE=OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
OP_SMALLINTEGER = CScriptOp(0xfa)
OP_PUBKEYS = CScriptOp(0xfb)
OP_PUBKEYHASH = CScriptOp(0xfd)
OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
OPCODE_NAMES.update({
OP_0 : 'OP_0',
OP_PUSHDATA1 : 'OP_PUSHDATA1',
OP_PUSHDATA2 : 'OP_PUSHDATA2',
OP_PUSHDATA4 : 'OP_PUSHDATA4',
OP_1NEGATE : 'OP_1NEGATE',
OP_RESERVED : 'OP_RESERVED',
OP_1 : 'OP_1',
OP_2 : 'OP_2',
OP_3 : 'OP_3',
OP_4 : 'OP_4',
OP_5 : 'OP_5',
OP_6 : 'OP_6',
OP_7 : 'OP_7',
OP_8 : 'OP_8',
OP_9 : 'OP_9',
OP_10 : 'OP_10',
OP_11 : 'OP_11',
OP_12 : 'OP_12',
OP_13 : 'OP_13',
OP_14 : 'OP_14',
OP_15 : 'OP_15',
OP_16 : 'OP_16',
OP_NOP : 'OP_NOP',
OP_VER : 'OP_VER',
OP_IF : 'OP_IF',
OP_NOTIF : 'OP_NOTIF',
OP_VERIF : 'OP_VERIF',
OP_VERNOTIF : 'OP_VERNOTIF',
OP_ELSE : 'OP_ELSE',
OP_ENDIF : 'OP_ENDIF',
OP_VERIFY : 'OP_VERIFY',
OP_RETURN : 'OP_RETURN',
OP_TOALTSTACK : 'OP_TOALTSTACK',
OP_FROMALTSTACK : 'OP_FROMALTSTACK',
OP_2DROP : 'OP_2DROP',
OP_2DUP : 'OP_2DUP',
OP_3DUP : 'OP_3DUP',
OP_2OVER : 'OP_2OVER',
OP_2ROT : 'OP_2ROT',
OP_2SWAP : 'OP_2SWAP',
OP_IFDUP : 'OP_IFDUP',
OP_DEPTH : 'OP_DEPTH',
OP_DROP : 'OP_DROP',
OP_DUP : 'OP_DUP',
OP_NIP : 'OP_NIP',
OP_OVER : 'OP_OVER',
OP_PICK : 'OP_PICK',
OP_ROLL : 'OP_ROLL',
OP_ROT : 'OP_ROT',
OP_SWAP : 'OP_SWAP',
OP_TUCK : 'OP_TUCK',
OP_CAT : 'OP_CAT',
OP_SUBSTR : 'OP_SUBSTR',
OP_LEFT : 'OP_LEFT',
OP_RIGHT : 'OP_RIGHT',
OP_SIZE : 'OP_SIZE',
OP_INVERT : 'OP_INVERT',
OP_AND : 'OP_AND',
OP_OR : 'OP_OR',
OP_XOR : 'OP_XOR',
OP_EQUAL : 'OP_EQUAL',
OP_EQUALVERIFY : 'OP_EQUALVERIFY',
OP_RESERVED1 : 'OP_RESERVED1',
OP_RESERVED2 : 'OP_RESERVED2',
OP_1ADD : 'OP_1ADD',
OP_1SUB : 'OP_1SUB',
OP_2MUL : 'OP_2MUL',
OP_2DIV : 'OP_2DIV',
OP_NEGATE : 'OP_NEGATE',
OP_ABS : 'OP_ABS',
OP_NOT : 'OP_NOT',
OP_0NOTEQUAL : 'OP_0NOTEQUAL',
OP_ADD : 'OP_ADD',
OP_SUB : 'OP_SUB',
OP_MUL : 'OP_MUL',
OP_DIV : 'OP_DIV',
OP_MOD : 'OP_MOD',
OP_LSHIFT : 'OP_LSHIFT',
OP_RSHIFT : 'OP_RSHIFT',
OP_BOOLAND : 'OP_BOOLAND',
OP_BOOLOR : 'OP_BOOLOR',
OP_NUMEQUAL : 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
OP_LESSTHAN : 'OP_LESSTHAN',
OP_GREATERTHAN : 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
OP_MIN : 'OP_MIN',
OP_MAX : 'OP_MAX',
OP_WITHIN : 'OP_WITHIN',
OP_RIPEMD160 : 'OP_RIPEMD160',
OP_SHA1 : 'OP_SHA1',
OP_SHA256 : 'OP_SHA256',
OP_HASH160 : 'OP_HASH160',
OP_HASH256 : 'OP_HASH256',
OP_CODESEPARATOR : 'OP_CODESEPARATOR',
OP_CHECKSIG : 'OP_CHECKSIG',
OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
OP_NOP1 : 'OP_NOP1',
OP_CHECKLOCKTIMEVERIFY : 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY : 'OP_CHECKSEQUENCEVERIFY',
OP_NOP4 : 'OP_NOP4',
OP_NOP5 : 'OP_NOP5',
OP_NOP6 : 'OP_NOP6',
OP_NOP7 : 'OP_NOP7',
OP_NOP8 : 'OP_NOP8',
OP_NOP9 : 'OP_NOP9',
OP_NOP10 : 'OP_NOP10',
OP_SMALLINTEGER : 'OP_SMALLINTEGER',
OP_PUBKEYS : 'OP_PUBKEYS',
OP_PUBKEYHASH : 'OP_PUBKEYHASH',
OP_PUBKEY : 'OP_PUBKEY',
OP_INVALIDOPCODE : 'OP_INVALIDOPCODE',
})
class CScriptInvalidError(Exception):
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
def __init__(self, msg, data):
self.data = data
super(CScriptTruncatedPushDataError, self).__init__(msg)
class CScriptNum():
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(absvalue & 0xff)
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes(bchr(len(r)) + r)
class CScript(bytes):
@classmethod
def __coerce_instance(cls, other):
if isinstance(other, CScriptOp):
other = bchr(other)
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bchr(CScriptOp(OP_0))
else:
other = CScriptNum.encode(other)
elif isinstance(other, int):
if 0 <= other <= 16:
other = bytes(bchr(CScriptOp.encode_op_n(other)))
elif other == -1:
other = bytes(bchr(OP_1NEGATE))
else:
other = CScriptOp.encode_op_pushdata(bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
other = self.__coerce_instance(other)
try:
return CScript(super(CScript, self).__add__(other))
except TypeError:
raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
def join(self, iterable):
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super(CScript, cls).__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
i = 0
while i < len(self):
sop_idx = i
opcode = bord(self[i])
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = bord(self[i])
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24)
i += 4
else:
assert False
data = bytes(self[i:i+datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
def _repr(o):
if isinstance(o, bytes):
return "x('%s')" % hexlify(o).decode('ascii')
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def SignatureHash(script, txTo, inIdx, hashtype):
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for i in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize_without_witness()
s += struct.pack(b"<I", hashtype)
hash = hash256(s)
return (hash, None)
# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
# Performance optimization probably not necessary for python tests, however.
# Note that this corresponds to sigversion == 1 in EvalScript, which is used
# for version 0 witnesses.
def SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, amount):
hashPrevouts = 0
hashSequence = 0
hashOutputs = 0
if not (hashtype & SIGHASH_ANYONECANPAY):
serialize_prevouts = bytes()
for i in txTo.vin:
serialize_prevouts += i.prevout.serialize()
hashPrevouts = uint256_from_str(hash256(serialize_prevouts))
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = uint256_from_str(hash256(serialize_sequence))
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
serialize_outputs = txTo.vout[inIdx].serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
ss = bytes()
ss += struct.pack("<i", txTo.nVersion)
ss += ser_uint256(hashPrevouts)
ss += ser_uint256(hashSequence)
ss += txTo.vin[inIdx].prevout.serialize()
ss += ser_string(script)
ss += struct.pack("<q", amount)
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
ss += ser_uint256(hashOutputs)
ss += struct.pack("<i", txTo.nLockTime)
ss += struct.pack("<I", hashtype)
return hash256(ss)
| true | true |
f730697874d74284e3f36d2b3355caa3e355cf54 | 2,770 | py | Python | mwparserfromhell/nodes/wikilink.py | valhallasw/mwparserfromhell | 1607687c37c1b1e7c0c83a39d7803707665151ef | [
"MIT"
] | null | null | null | mwparserfromhell/nodes/wikilink.py | valhallasw/mwparserfromhell | 1607687c37c1b1e7c0c83a39d7803707665151ef | [
"MIT"
] | null | null | null | mwparserfromhell/nodes/wikilink.py | valhallasw/mwparserfromhell | 1607687c37c1b1e7c0c83a39d7803707665151ef | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals
from . import Node
from ..compat import str
from ..utils import parse_anything
__all__ = ["Wikilink"]
class Wikilink(Node):
"""Represents an internal wikilink, like ``[[Foo|Bar]]``."""
def __init__(self, title, text=None):
super(Wikilink, self).__init__()
self._title = title
self._text = text
def __unicode__(self):
if self.text is not None:
return "[[" + str(self.title) + "|" + str(self.text) + "]]"
return "[[" + str(self.title) + "]]"
def __children__(self):
yield self.title
if self.text is not None:
yield self.text
def __strip__(self, normalize, collapse):
if self.text is not None:
return self.text.strip_code(normalize, collapse)
return self.title.strip_code(normalize, collapse)
def __showtree__(self, write, get, mark):
write("[[")
get(self.title)
if self.text is not None:
write(" | ")
mark()
get(self.text)
write("]]")
@property
def title(self):
"""The title of the linked page, as a :py:class:`~.Wikicode` object."""
return self._title
@property
def text(self):
"""The text to display (if any), as a :py:class:`~.Wikicode` object."""
return self._text
@title.setter
def title(self, value):
self._title = parse_anything(value)
@text.setter
def text(self, value):
if value is None:
self._text = None
else:
self._text = parse_anything(value)
| 33.373494 | 79 | 0.654152 |
from __future__ import unicode_literals
from . import Node
from ..compat import str
from ..utils import parse_anything
__all__ = ["Wikilink"]
class Wikilink(Node):
def __init__(self, title, text=None):
super(Wikilink, self).__init__()
self._title = title
self._text = text
def __unicode__(self):
if self.text is not None:
return "[[" + str(self.title) + "|" + str(self.text) + "]]"
return "[[" + str(self.title) + "]]"
def __children__(self):
yield self.title
if self.text is not None:
yield self.text
def __strip__(self, normalize, collapse):
if self.text is not None:
return self.text.strip_code(normalize, collapse)
return self.title.strip_code(normalize, collapse)
def __showtree__(self, write, get, mark):
write("[[")
get(self.title)
if self.text is not None:
write(" | ")
mark()
get(self.text)
write("]]")
@property
def title(self):
return self._title
@property
def text(self):
return self._text
@title.setter
def title(self, value):
self._title = parse_anything(value)
@text.setter
def text(self, value):
if value is None:
self._text = None
else:
self._text = parse_anything(value)
| true | true |
f7306a936639c0c25548020fae55cc2780443200 | 935 | py | Python | wus_feats_predict.py | FloodCamML/FloodCam-WUSFeats | b58c3f3770bebebf078be4e3804f12f3512b5569 | [
"MIT"
] | 2 | 2021-05-21T10:57:44.000Z | 2021-05-29T17:02:54.000Z | wus_feats_predict.py | FloodCamML/FloodCam-WUSFeats | b58c3f3770bebebf078be4e3804f12f3512b5569 | [
"MIT"
] | null | null | null | wus_feats_predict.py | FloodCamML/FloodCam-WUSFeats | b58c3f3770bebebf078be4e3804f12f3512b5569 | [
"MIT"
] | 1 | 2021-05-28T12:45:38.000Z | 2021-05-28T12:45:38.000Z |
#i/o
import requests, os, random
from glob import glob
from collections import Counter
from collections import defaultdict
from PIL import Image
from skimage.io import imread
import pickle
#numerica
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow import keras
from tensorflow.keras import layers
from skimage.transform import resize
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA #for data dimensionality reduction / viz.
# plots
# from sklearn.metrics import ConfusionMatrixDisplay
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns #extended functionality / style to matplotlib plots
from matplotlib.offsetbox import OffsetImage, AnnotationBbox #for visualizing image thumbnails plotted as markers | 29.21875 | 113 | 0.840642 |
import requests, os, random
from glob import glob
from collections import Counter
from collections import defaultdict
from PIL import Image
from skimage.io import imread
import pickle
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow import keras
from tensorflow.keras import layers
from skimage.transform import resize
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.offsetbox import OffsetImage, AnnotationBbox | true | true |
f7306be7655fa4665aa16310d89615d9df796e9f | 207 | py | Python | leetcode/python/others/hamming_distance.py | ajeet1308/code_problems | 5d99839b6319295c6d81dd86775c46a536e7a1ca | [
"MIT"
] | 61 | 2020-09-26T19:57:44.000Z | 2022-03-09T18:51:44.000Z | leetcode/python/others/hamming_distance.py | ajeet1308/code_problems | 5d99839b6319295c6d81dd86775c46a536e7a1ca | [
"MIT"
] | 88 | 2020-09-19T20:00:27.000Z | 2021-10-31T09:41:57.000Z | leetcode/python/others/hamming_distance.py | ajeet1308/code_problems | 5d99839b6319295c6d81dd86775c46a536e7a1ca | [
"MIT"
] | 218 | 2020-09-20T08:18:03.000Z | 2022-01-30T23:13:16.000Z | class Solution:
def hammingDistance(self, x: int, y: int) -> int:
count = 0
diff = x ^ y
while diff != 0:
count += 1
diff &= (diff-1)
return count
| 23 | 53 | 0.454106 | class Solution:
def hammingDistance(self, x: int, y: int) -> int:
count = 0
diff = x ^ y
while diff != 0:
count += 1
diff &= (diff-1)
return count
| true | true |
f7306cae98bbc792ae9e49ea47e2232347c28149 | 3,218 | py | Python | homeassistant/util/logging.py | GotoCode/home-assistant | 7e39a5c4d50cf5754f5f32a84870ca57a5778b02 | [
"Apache-2.0"
] | 11 | 2017-09-25T13:11:33.000Z | 2020-05-16T21:54:28.000Z | homeassistant/util/logging.py | GotoCode/home-assistant | 7e39a5c4d50cf5754f5f32a84870ca57a5778b02 | [
"Apache-2.0"
] | 125 | 2018-12-11T07:31:20.000Z | 2021-07-27T08:20:03.000Z | homeassistant/util/logging.py | y1ngyang/home-assistant | 7e39a5c4d50cf5754f5f32a84870ca57a5778b02 | [
"Apache-2.0"
] | 3 | 2018-05-22T18:52:01.000Z | 2019-07-18T21:30:45.000Z | """Logging utilities."""
import asyncio
import logging
import threading
from .async_ import run_coroutine_threadsafe
class HideSensitiveDataFilter(logging.Filter):
"""Filter API password calls."""
def __init__(self, text):
"""Initialize sensitive data filter."""
super().__init__()
self.text = text
def filter(self, record):
"""Hide sensitive data in messages."""
record.msg = record.msg.replace(self.text, '*******')
return True
# pylint: disable=invalid-name
class AsyncHandler(object):
"""Logging handler wrapper to add an async layer."""
def __init__(self, loop, handler):
"""Initialize async logging handler wrapper."""
self.handler = handler
self.loop = loop
self._queue = asyncio.Queue(loop=loop)
self._thread = threading.Thread(target=self._process)
# Delegate from handler
self.setLevel = handler.setLevel
self.setFormatter = handler.setFormatter
self.addFilter = handler.addFilter
self.removeFilter = handler.removeFilter
self.filter = handler.filter
self.flush = handler.flush
self.handle = handler.handle
self.handleError = handler.handleError
self.format = handler.format
self._thread.start()
def close(self):
"""Wrap close to handler."""
self.emit(None)
@asyncio.coroutine
def async_close(self, blocking=False):
"""Close the handler.
When blocking=True, will wait till closed.
"""
yield from self._queue.put(None)
if blocking:
while self._thread.is_alive():
yield from asyncio.sleep(0, loop=self.loop)
def emit(self, record):
"""Process a record."""
ident = self.loop.__dict__.get("_thread_ident")
# inside eventloop
if ident is not None and ident == threading.get_ident():
self._queue.put_nowait(record)
# from a thread/executor
else:
self.loop.call_soon_threadsafe(self._queue.put_nowait, record)
def __repr__(self):
"""Return the string names."""
return str(self.handler)
def _process(self):
"""Process log in a thread."""
while True:
record = run_coroutine_threadsafe(
self._queue.get(), self.loop).result()
if record is None:
self.handler.close()
return
self.handler.emit(record)
def createLock(self):
"""Ignore lock stuff."""
pass
def acquire(self):
"""Ignore lock stuff."""
pass
def release(self):
"""Ignore lock stuff."""
pass
@property
def level(self):
"""Wrap property level to handler."""
return self.handler.level
@property
def formatter(self):
"""Wrap property formatter to handler."""
return self.handler.formatter
@property
def name(self):
"""Wrap property set_name to handler."""
return self.handler.get_name()
@name.setter
def name(self, name):
"""Wrap property get_name to handler."""
self.handler.name = name
| 26.377049 | 74 | 0.598198 | import asyncio
import logging
import threading
from .async_ import run_coroutine_threadsafe
class HideSensitiveDataFilter(logging.Filter):
def __init__(self, text):
super().__init__()
self.text = text
def filter(self, record):
record.msg = record.msg.replace(self.text, '*******')
return True
class AsyncHandler(object):
def __init__(self, loop, handler):
self.handler = handler
self.loop = loop
self._queue = asyncio.Queue(loop=loop)
self._thread = threading.Thread(target=self._process)
self.setLevel = handler.setLevel
self.setFormatter = handler.setFormatter
self.addFilter = handler.addFilter
self.removeFilter = handler.removeFilter
self.filter = handler.filter
self.flush = handler.flush
self.handle = handler.handle
self.handleError = handler.handleError
self.format = handler.format
self._thread.start()
def close(self):
self.emit(None)
@asyncio.coroutine
def async_close(self, blocking=False):
yield from self._queue.put(None)
if blocking:
while self._thread.is_alive():
yield from asyncio.sleep(0, loop=self.loop)
def emit(self, record):
ident = self.loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
self._queue.put_nowait(record)
else:
self.loop.call_soon_threadsafe(self._queue.put_nowait, record)
def __repr__(self):
return str(self.handler)
def _process(self):
while True:
record = run_coroutine_threadsafe(
self._queue.get(), self.loop).result()
if record is None:
self.handler.close()
return
self.handler.emit(record)
def createLock(self):
pass
def acquire(self):
pass
def release(self):
pass
@property
def level(self):
return self.handler.level
@property
def formatter(self):
return self.handler.formatter
@property
def name(self):
return self.handler.get_name()
@name.setter
def name(self, name):
self.handler.name = name
| true | true |
f7306e7cc53d34293df4261830687777126b1853 | 107,459 | py | Python | pysnmp-with-texts/HUAWEI-CLOCK-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/HUAWEI-CLOCK-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/HUAWEI-CLOCK-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module HUAWEI-CLOCK-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-CLOCK-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:43:52 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint")
PhysicalIndex, = mibBuilder.importSymbols("ENTITY-MIB", "PhysicalIndex")
hwDatacomm, = mibBuilder.importSymbols("HUAWEI-MIB", "hwDatacomm")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, MibIdentifier, Unsigned32, Gauge32, IpAddress, ObjectIdentity, NotificationType, ModuleIdentity, Counter32, Counter64, Integer32, Bits, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "MibIdentifier", "Unsigned32", "Gauge32", "IpAddress", "ObjectIdentity", "NotificationType", "ModuleIdentity", "Counter32", "Counter64", "Integer32", "Bits", "iso")
DisplayString, TextualConvention, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "RowStatus")
hwClockMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186))
hwClockMIB.setRevisions(('2014-11-29 00:00', '2014-11-03 00:00', '2014-08-13 00:00', '2014-04-21 00:00', '2014-01-07 00:00', '2013-11-12 00:00', '2013-10-31 00:00', '2013-05-23 00:00', '2013-05-14 00:00', '2013-03-20 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hwClockMIB.setRevisionsDescriptions(('Modify alarm hwClockSourceInputBelowThreshold, hwClockSourceInputBelowThresholdResume.', 'Add alarm hwClockCesDcrMasterPwChange, hwClockCesDcrLockFail,hwClockCesDcrLockFailResume,hwClockSsmPktLos,hwClockSsmPktLosResume and add mib hwClockCesDcrSlot,hwClockCesDcrCard,hwClockCesDcrDomain,hwClockCesDcrOldMasterPwName,hwClockCesDcrNewMasterPwName,hwClockCesDcrLockState,hwClockCesMode', 'Add alarm hwClockSourceInputBelowThreshold, hwClockSourceInputBelowThresholdResume.', 'Add alarm hwClockClusterTopoFail, hwClockClusterTopoFailResume and table hwClockClusterTopoTable.', 'Edit the range of hwClockCesAcrDomianInfoDomain.', 'Add mib hwClockBitsCfgFrameFormat, hwClockAttributeLtiSquelch and hwClockAttributeInputThreshold.', 'Edit the range of hwClockCesAcrRecoveryDomain.', 'Re-edit the range of some nodes.', 'Re-edit the default values of hwClockAttributeTodProtocol node.', 'Some errors have been modified in current version and some nodes have been added into the current version.',))
if mibBuilder.loadTexts: hwClockMIB.setLastUpdated('201411290000Z')
if mibBuilder.loadTexts: hwClockMIB.setOrganization('Huawei Technologies Co.,Ltd. ')
if mibBuilder.loadTexts: hwClockMIB.setContactInfo("Huawei Industrial Base Bantian, Longgang Shenzhen 518129 People's Republic of China Website: http://www.huawei.com Email: support@huawei.com ")
if mibBuilder.loadTexts: hwClockMIB.setDescription('The MIB contains objects of module clock management and 1588 interface.')
hwClockManageObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1))
hwClockGlobalObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1))
hwClockSourceEthClkEnable = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 1), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceEthClkEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceEthClkEnable.setDescription('The flag indicates that the ethernet clock is globally enabled.')
hwClockSourceSsmUnknown = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15))).clone('dnu')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceSsmUnknown.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSsmUnknown.setDescription('The quality level of unknown SSM.')
hwClockSourceSysClkWorkMode = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("trace", 1), ("hold", 2), ("freeoscillate", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSourceSysClkWorkMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSysClkWorkMode.setDescription('The work mode of system clock.')
hwClockSourceForceCloseEnableStatus = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 4), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceForceCloseEnableStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceForceCloseEnableStatus.setDescription('The enable status of export forced close.')
hwClockSourceSsmControl = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("extend", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceSsmControl.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSsmControl.setDescription('The flag whether SSM is concerned with the clock source selection.')
hwClockSourceHoldMode = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("hold24Hours", 1), ("holdForever", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSourceHoldMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceHoldMode.setDescription('The hold mode of clock source.')
hwClockSourceFreqCheckEnable = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 7), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceFreqCheckEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheckEnable.setDescription('The enable flag of frequency check.')
hwClockSourceFreqCheckLeftRange = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(50, 1000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceFreqCheckLeftRange.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheckLeftRange.setDescription('The left range of frequency check, unit in 0.01ppm.')
hwClockSourceFreqCheckRightRange = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(50, 1000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceFreqCheckRightRange.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheckRightRange.setDescription('The right range of frequency check, unit in 0.01ppm.')
hwClockSourceRetrieveMode = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("retrieve", 1), ("noRetrieve", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceRetrieveMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceRetrieveMode.setDescription('The retrieve mode of clock source.')
hwClockTimeUsedSource = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("srcDclsTimeBit0", 1), ("srcDclsTimeBit1", 2), ("src1ppsTodBit0", 3), ("src1ppsTodBit1", 4), ("srcPtp", 5), ("srcFreeRun", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockTimeUsedSource.setStatus('current')
if mibBuilder.loadTexts: hwClockTimeUsedSource.setDescription('The clock time used source.')
hwClockExtTimeInputType = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("typeDclsTime", 1), ("type1ppsTodRs232", 2), ("type1ppsTodGps", 3), ("typeNone", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockExtTimeInputType.setStatus('current')
if mibBuilder.loadTexts: hwClockExtTimeInputType.setDescription('The input time type of clock extern time.')
hwClockExtTimeOutputType = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("typeDclsTime", 1), ("type1ppsTodRs232", 2), ("type1ppsTodGps", 3), ("typeNone", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockExtTimeOutputType.setStatus('current')
if mibBuilder.loadTexts: hwClockExtTimeOutputType.setDescription('The output time type of clock extern time.')
hwClockAlarmThresholdFrequencyOffset = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 92)).clone(92)).setUnits('100ppb').setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAlarmThresholdFrequencyOffset.setStatus('current')
if mibBuilder.loadTexts: hwClockAlarmThresholdFrequencyOffset.setDescription('The Threshold of clock alarm.')
hwClockFrequencyOffsetMax = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 15), Integer32()).setUnits('ppb').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockFrequencyOffsetMax.setStatus('current')
if mibBuilder.loadTexts: hwClockFrequencyOffsetMax.setDescription('The max offset of clock frequency.')
hwClockFrequencyOffsetMin = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 16), Integer32()).setUnits('ppb').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockFrequencyOffsetMin.setStatus('current')
if mibBuilder.loadTexts: hwClockFrequencyOffsetMin.setDescription('The min offset of clock frequency.')
hwClockFrequencyOffsetMean = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 17), Integer32()).setUnits('ppb').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockFrequencyOffsetMean.setStatus('current')
if mibBuilder.loadTexts: hwClockFrequencyOffsetMean.setDescription('The mean offset of clock frequency.')
hwClockFrequencyOffset = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 18), Integer32()).setUnits('ppb').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockFrequencyOffset.setStatus('current')
if mibBuilder.loadTexts: hwClockFrequencyOffset.setDescription('The current offset of clock frequency.')
hwClockSourceSelTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2), )
if mibBuilder.loadTexts: hwClockSourceSelTable.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelTable.setDescription('The system clock source selection table.')
hwClockSourceSelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockSourceSelChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockSourceSelType"))
if mibBuilder.loadTexts: hwClockSourceSelEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelEntry.setDescription('The entry of system clock source selection table.')
hwClockSourceSelChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockSourceSelChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelChassisIndex.setDescription('The chassis index.')
hwClockSourceSelType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100)))
if mibBuilder.loadTexts: hwClockSourceSelType.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelType.setDescription('The select type.')
hwClockSourceSelMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("auto", 1), ("manual", 2), ("force", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSourceSelMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelMode.setDescription('The mode of clock source selection.')
hwClockSourceSelSourceId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1, 4), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSourceSelSourceId.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelSourceId.setDescription('The source ID of the clock traced.')
hwClockSourceCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3), )
if mibBuilder.loadTexts: hwClockSourceCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceCfgTable.setDescription('The clock source config table.')
hwClockSourceCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockCfgChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockCfgSourceIndex"))
if mibBuilder.loadTexts: hwClockSourceCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceCfgEntry.setDescription('The entry of clock source config table.')
hwClockCfgChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockCfgChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgChassisIndex.setDescription('The index of the chassis whitch the clock source belongs to.')
hwClockCfgSourceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 20)))
if mibBuilder.loadTexts: hwClockCfgSourceIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceIndex.setDescription('The clock source index.')
hwClockCfgSourceId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCfgSourceId.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceId.setDescription('The clock source ID.')
hwClockCfgSourceDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCfgSourceDescr.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceDescr.setDescription('The clock source description.')
hwClockCfgWtrTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 12))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgWtrTime.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgWtrTime.setDescription('The waiting for restore time of clock source.')
hwClockCfgBadDetect = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 6), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBadDetect.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBadDetect.setDescription('The enable status of clock source bad detecting.')
hwClockCfgSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSystemPriority.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSystemPriority.setDescription('The priority of system clock source.')
hwClockCfgBits0Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBits0Priority.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBits0Priority.setDescription('The priority of BITS0 clock source.')
hwClockCfgBits1Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBits1Priority.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBits1Priority.setDescription('The priority of BITS1 clock source.')
hwClockCfgSystemLockOut = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 10), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSystemLockOut.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSystemLockOut.setDescription('The lock out of system clock source.')
hwClockCfgBits0LockOut = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 11), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBits0LockOut.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBits0LockOut.setDescription('The lock out of BITS0 clock source.')
hwClockCfgBits1LockOut = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 12), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBits1LockOut.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBits1LockOut.setDescription('The lock out of BITS1 clock source.')
hwClockCfgSourceSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("ssmPrc", 1), ("ssmSsut", 2), ("ssmSsul", 3), ("ssmSec", 4), ("ssmDnu", 5), ("ssmUnknown", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSourceSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceSsm.setDescription('The SSM quality of clock source.')
hwClockCfgSourceSsmSetMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("manual", 1), ("auto", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSourceSsmSetMode.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceSsmSetMode.setDescription('The set mode of SSM.')
hwClockCfgExportEnableStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 15), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgExportEnableStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgExportEnableStatus.setDescription('The enable status of clock source export.')
hwClockCfgSwiEnableStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 16), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSwiEnableStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSwiEnableStatus.setDescription('he enable status of clock source switch.')
hwClockCfgSourceState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("normal", 1), ("abnormal", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSourceState.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceState.setDescription('The state of clock source.')
hwClockCfgSsmThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("qlDnu", 1), ("qlSec", 2), ("qlSsub", 3), ("qlSsua", 4), ("qlPrc", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSsmThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSsmThreshold.setDescription('The SSM quality level threshold of clock source.')
hwClockCfgSourceS1Id = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCfgSourceS1Id.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceS1Id.setDescription('The S1 byte of the clock.')
hwClockCfgFreqCheckResult = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 20), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgFreqCheckResult.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgFreqCheckResult.setDescription('The result of frequency check, unit in 0.01ppm.')
hwClockCfgHoldOffTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(3, 18))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgHoldOffTime.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgHoldOffTime.setDescription('The hold off time of clock, unit in 100ms.')
hwClockCfgPriRvtEnableStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 22), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgPriRvtEnableStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgPriRvtEnableStatus.setDescription('The enable status of switch according priority.')
hwClockCfgSwitchCondition = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("noSwitch", 1), ("switch", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSwitchCondition.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSwitchCondition.setDescription('The condition of clock switch.')
hwClockCfgClkSourceType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("bits", 1), ("line", 2), ("inner", 3), ("system", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCfgClkSourceType.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgClkSourceType.setDescription('The type of clock source.')
hwClockBitsCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4), )
if mibBuilder.loadTexts: hwClockBitsCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgTable.setDescription('The clock bits congfig table.')
hwClockBitsCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockBitsCfgChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockBitsCfgBitsIndex"))
if mibBuilder.loadTexts: hwClockBitsCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgEntry.setDescription('The entry of clock bits congfig table.')
hwClockBitsCfgChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockBitsCfgChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgChassisIndex.setDescription('The index of the chassis whitch the clock source belongs to.')
hwClockBitsCfgBitsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)))
if mibBuilder.loadTexts: hwClockBitsCfgBitsIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgBitsIndex.setDescription('The index of BITS clock.')
hwClockBitsCfgName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockBitsCfgName.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgName.setDescription('The name of clock.')
hwClockBitsCfgBitsPortType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("portRj45", 1), ("portSMB", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockBitsCfgBitsPortType.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgBitsPortType.setDescription('The BITS port type.')
hwClockBitsCfgBitsType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("type2Mbps", 0), ("type2Mhz", 1), ("typeDclsTime", 2), ("type1ppsTod", 3), ("none", 4), ("type1544Mbps", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgBitsType.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgBitsType.setDescription('The BITS type.')
hwClockBitsCfgDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("in", 1), ("out", 2), ("inAndOut", 3), ("none", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgDirection.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgDirection.setDescription('The direction of BITS.')
hwClockBitsCfgRecvSaBit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("sa4", 4), ("sa5", 5), ("sa6", 6), ("sa7", 7), ("sa8", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgRecvSaBit.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgRecvSaBit.setDescription('The received SA bit.')
hwClockBitsCfgSendSaBit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("sa4", 4), ("sa5", 5), ("sa6", 6), ("sa7", 7), ("sa8", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgSendSaBit.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgSendSaBit.setDescription('The sent SA bit.')
hwClockBitsCfgForceOutS1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgForceOutS1.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgForceOutS1.setDescription('The S1 byte of forcing out.')
hwClockBitsCfgSaBit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("sa4", 4), ("sa5", 5), ("sa6", 6), ("sa7", 7), ("sa8", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgSaBit.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgSaBit.setDescription('The SA bit of SSM information.')
hwClockBitsCfgInputMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("clk2MBits", 0), ("clk2MHz", 1), ("dclsTime", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgInputMode.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgInputMode.setDescription('The input mode of clock source.')
hwClockBitsCfgOutputMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("clk2MBits", 0), ("clk2MHz", 1), ("dclsTime", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgOutputMode.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgOutputMode.setDescription('The output mode of clock source.')
hwClockBitsCfgInvalidCond = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("no", 1), ("ais", 2), ("lof", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgInvalidCond.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgInvalidCond.setDescription('The invalid condition of clock source.')
hwClockBitsCfgSourceId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockBitsCfgSourceId.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgSourceId.setDescription('The clock source ID.')
hwClockBitsCfgTodSignal = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("nmea", 1), ("ubx", 2), ("none", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgTodSignal.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgTodSignal.setDescription('The tod signal of clock source.')
hwClockBitsCfgFrameFormat = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 0), ("pcm30nocrc", 1), ("pcm30crc", 2), ("pcm31nocrc", 3), ("pcm31crc", 4))).clone(4)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgFrameFormat.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgFrameFormat.setDescription('Encoding type and frame check format of the extern clock port.')
hwClockPortCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5), )
if mibBuilder.loadTexts: hwClockPortCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgTable.setDescription('The clock port config table.')
hwClockPortCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockPortCfgIfIndex"))
if mibBuilder.loadTexts: hwClockPortCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgEntry.setDescription('The entry of clock port config table.')
hwClockPortCfgIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwClockPortCfgIfIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgIfIndex.setDescription('The interface index.')
hwClockPortCfgLeftFramePri = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockPortCfgLeftFramePri.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgLeftFramePri.setDescription('The clock priority of left frame.')
hwClockPortCfgRightFramePri = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockPortCfgRightFramePri.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgRightFramePri.setDescription('The clock priority of right frame.')
hwClockPortCfgForceOutS1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockPortCfgForceOutS1.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgForceOutS1.setDescription('The S1 byte of forcing out.')
hwClockLineClkCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6), )
if mibBuilder.loadTexts: hwClockLineClkCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgTable.setDescription('The line clock config table.')
hwClockLineClkCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockLineClkCfgChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockLineClkCfgSlotIndex"))
if mibBuilder.loadTexts: hwClockLineClkCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgEntry.setDescription('The entry of line clock config table.')
hwClockLineClkCfgChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockLineClkCfgChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgChassisIndex.setDescription('The chassis index.')
hwClockLineClkCfgSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 200)))
if mibBuilder.loadTexts: hwClockLineClkCfgSlotIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgSlotIndex.setDescription('The slot index of the line clock.')
hwClockLineClkCfgCardId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 3), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockLineClkCfgCardId.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgCardId.setDescription('The card index witch is seleced to provide line clock.')
hwClockLineClkCfgPortId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 4), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockLineClkCfgPortId.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgPortId.setDescription('The port index witch is seleced to provide line clock.')
hwClockLineClkCfgRecvS1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockLineClkCfgRecvS1.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgRecvS1.setDescription('The S1 byte value received.')
hwClockLineClkCfgSendS1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 6), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockLineClkCfgSendS1.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgSendS1.setDescription('The S1 byte value sent.')
hwClockLineCfgSoureId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 7), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockLineCfgSoureId.setStatus('current')
if mibBuilder.loadTexts: hwClockLineCfgSoureId.setDescription('Description.')
hwClockTrapOid = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7))
hwClockLastSourceName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 1), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockLastSourceName.setStatus('current')
if mibBuilder.loadTexts: hwClockLastSourceName.setDescription('The last clock source name.')
hwClockCurSourceName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 2), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCurSourceName.setStatus('current')
if mibBuilder.loadTexts: hwClockCurSourceName.setDescription('The current clock source name.')
hwClockSourceOldLockMode = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 16, 19))).clone(namedValues=NamedValues(("freeRun", 0), ("fastLock", 1), ("lock", 2), ("hold", 3), ("freeRunJudge", 16), ("holdJudge", 19)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockSourceOldLockMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceOldLockMode.setDescription('The old lock mode of clock source.')
hwClockChassisId = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 4), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockChassisId.setStatus('current')
if mibBuilder.loadTexts: hwClockChassisId.setDescription('The chassis ID.')
hwClockOldSourceState = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("initial", 0), ("normal", 1), ("abnormal", 2), ("wtr", 3), ("holdoff", 4)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockOldSourceState.setStatus('current')
if mibBuilder.loadTexts: hwClockOldSourceState.setDescription('The old state of clock source.')
hwClockPllId = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("system", 1), ("sync2M1", 2), ("sync2M2", 3)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockPllId.setStatus('current')
if mibBuilder.loadTexts: hwClockPllId.setDescription('The id of pll.')
hwClockAttributeOutValue = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockAttributeOutValue.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeOutValue.setDescription('The current output value.')
hwClockCesAcrSlot = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 8), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrSlot.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrSlot.setDescription('The slot ID of CES ACR clock source.')
hwClockCesAcrCard = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 9), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrCard.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCard.setDescription('The card ID of CES ACR clock source.')
hwClockCesAcrDomain = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 10), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomain.setDescription('The recovery domain value of CES ACR clock source.')
hwClockCesAcrOldMasterPwName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 11), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrOldMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrOldMasterPwName.setDescription('The master pw SerialPort name of CES ACR old clock source.')
hwClockCesAcrNewMasterPwName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 12), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrNewMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrNewMasterPwName.setDescription('The master pw SerialPort name of CES ACR new clock source.')
hwClockCesAcrLockState = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 13), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrLockState.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrLockState.setDescription('The lock state of the CES ACR.')
hwClockCesDcrSlot = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 14), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrSlot.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrSlot.setDescription('The slot ID of CES DCR clock source.')
hwClockCesDcrCard = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 15), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrCard.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrCard.setDescription('The card ID of CES DCR clock source.')
hwClockCesDcrDomain = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 16), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrDomain.setDescription('The recovery domain value of CES DCR clock source.')
hwClockCesDcrOldMasterPwName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 17), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrOldMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrOldMasterPwName.setDescription('The master pw SerialPort name of CES DCR old clock source.')
hwClockCesDcrNewMasterPwName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 18), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrNewMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrNewMasterPwName.setDescription('The master pw SerialPort name of CES DCR new clock source.')
hwClockCesDcrLockState = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 19), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrLockState.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrLockState.setDescription('The lock state of the CES DCR.')
hwClockNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8))
hwClockSourceSwitch = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 1)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockLastSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockSrcSelMode"))
if mibBuilder.loadTexts: hwClockSourceSwitch.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSwitch.setDescription('Clock source switch notification.')
hwClockSourceSysClkLockModeChange = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 2)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOldLockMode"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeSysClkLockMode"))
if mibBuilder.loadTexts: hwClockSourceSysClkLockModeChange.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSysClkLockModeChange.setDescription('The lock mode of system clock source change notification.')
hwClockSourceStateChange = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 3)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockOldSourceState"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceState"))
if mibBuilder.loadTexts: hwClockSourceStateChange.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceStateChange.setDescription('The state of clock source change notification.')
hwClockSourceStateResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 4)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockOldSourceState"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceState"))
if mibBuilder.loadTexts: hwClockSourceStateResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceStateResume.setDescription('The state of clock source resume notification.')
hwClockSourceFreqCheck = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 5)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceDescr"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgFreqCheckResult"))
if mibBuilder.loadTexts: hwClockSourceFreqCheck.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheck.setDescription('The result of clock source frequnce check abnormal notification.')
hwClockSourceOutputBelowThreshold = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 6)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutValue"))
if mibBuilder.loadTexts: hwClockSourceOutputBelowThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceOutputBelowThreshold.setDescription('The SSM of output below threshold notification.')
hwClockNotInLockedMode = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 7)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeSysClkLockMode"))
if mibBuilder.loadTexts: hwClockNotInLockedMode.setStatus('current')
if mibBuilder.loadTexts: hwClockNotInLockedMode.setDescription('The work mode of system clock is not in locked mode.')
hwClockInLockedMode = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 8)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeSysClkLockMode"))
if mibBuilder.loadTexts: hwClockInLockedMode.setStatus('current')
if mibBuilder.loadTexts: hwClockInLockedMode.setDescription('The work mode of system clock is in locked mode.')
hwClockSourceFailed = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 11)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceState"))
if mibBuilder.loadTexts: hwClockSourceFailed.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFailed.setDescription('The state of clock source is failed.')
hwClockSourceValid = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 12)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceState"))
if mibBuilder.loadTexts: hwClockSourceValid.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceValid.setDescription('The state of clock source is valid.')
hwClockSourceFreqCheckResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 13)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceDescr"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgFreqCheckResult"))
if mibBuilder.loadTexts: hwClockSourceFreqCheckResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheckResume.setDescription('The result of clock source frequnce check normal notification.')
hwClockSourceOutputBelowThresholdResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 14)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutValue"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"))
if mibBuilder.loadTexts: hwClockSourceOutputBelowThresholdResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceOutputBelowThresholdResume.setDescription('The SSM of output above threshold notification.')
hwClockCesAcrMasterPwChange = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 15)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesAcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrOldMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrNewMasterPwName"))
if mibBuilder.loadTexts: hwClockCesAcrMasterPwChange.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrMasterPwChange.setDescription('CES ACR master PW status change.')
hwClockCesAcrLockFail = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 16)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesAcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockState"))
if mibBuilder.loadTexts: hwClockCesAcrLockFail.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrLockFail.setDescription('CES ACR clock source lock fail.')
hwClockCesAcrLockFailResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 17)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesAcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockState"))
if mibBuilder.loadTexts: hwClockCesAcrLockFailResume.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrLockFailResume.setDescription('CES ACR clock source lock fail resume.')
hwClockClusterTopoFail = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 22)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockClusterSyncType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoLinkType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoStatus"))
if mibBuilder.loadTexts: hwClockClusterTopoFail.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoFail.setDescription('Clock cluster inter-chassis synchronization topo compute failed.')
hwClockClusterTopoFailResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 23)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockClusterSyncType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoLinkType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoStatus"))
if mibBuilder.loadTexts: hwClockClusterTopoFailResume.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoFailResume.setDescription('Clock inter-chassis synchronization topo compute successfully.')
hwClockSourceInputBelowThreshold = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 24)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeInputThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceSsm"))
if mibBuilder.loadTexts: hwClockSourceInputBelowThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceInputBelowThreshold.setDescription('The SSM of input below threshold notification.')
hwClockSourceInputBelowThresholdResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 25)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeInputThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceSsm"))
if mibBuilder.loadTexts: hwClockSourceInputBelowThresholdResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceInputBelowThresholdResume.setDescription('The SSM of input above or equal threshold notification.')
hwClockSsmPktLos = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 26)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"))
if mibBuilder.loadTexts: hwClockSsmPktLos.setStatus('current')
if mibBuilder.loadTexts: hwClockSsmPktLos.setDescription('The ssm packet of clock source is lost.')
hwClockSsmPktLosResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 27)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"))
if mibBuilder.loadTexts: hwClockSsmPktLosResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSsmPktLosResume.setDescription('The ssm packet of clock source is normal.')
hwClockCesDcrMasterPwChange = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 28)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesDcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrOldMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrNewMasterPwName"))
if mibBuilder.loadTexts: hwClockCesDcrMasterPwChange.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrMasterPwChange.setDescription('CES DCR master PW status change.')
hwClockCesDcrLockFail = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 29)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesDcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockState"))
if mibBuilder.loadTexts: hwClockCesDcrLockFail.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrLockFail.setDescription('CES DCR clock source lock fail.')
hwClockCesDcrLockFailResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 30)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesDcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockState"))
if mibBuilder.loadTexts: hwClockCesDcrLockFailResume.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrLockFailResume.setDescription('CES DCR clock source lock fail resume.')
hwClockAttributeTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9), )
if mibBuilder.loadTexts: hwClockAttributeTable.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeTable.setDescription('The clock Attribute table.')
hwClockAttributeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockAttributeChassisIndex"))
if mibBuilder.loadTexts: hwClockAttributeEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeEntry.setDescription('The entry of clock Attribute table.')
hwClockAttributeChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockAttributeChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeChassisIndex.setDescription('The chassis index.')
hwClockAttributeSysClkRunMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("normal", 0), ("freeRun", 1), ("hold", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeSysClkRunMode.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeSysClkRunMode.setDescription('The run mode of system clock.')
hwClockAttributeSsmControl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("on", 0), ("off", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeSsmControl.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeSsmControl.setDescription('The flag whether SSM is concerned with the clock source selection.')
hwClockAttributeFreqCheckEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 4), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeFreqCheckEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeFreqCheckEnable.setDescription('The enable flag of frequency check.')
hwClockAttributeRetrieveMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("retrieve", 0), ("noRetrieve", 1))).clone('retrieve')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeRetrieveMode.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeRetrieveMode.setDescription('The retrieve mode of system clock.')
hwClockAttributeWtrTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 12)).clone(5)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeWtrTime.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeWtrTime.setDescription('The time waiting for retrieve.')
hwClockAttributeHoldOffTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(300, 1800)).clone(1000)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeHoldOffTime.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeHoldOffTime.setDescription('The holdoff-time when the system source is lost.')
hwClockAttributeOutThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeOutThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeOutThreshold.setDescription('The Threshold of out put.')
hwClockAttributeSysMaxOutSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeSysMaxOutSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeSysMaxOutSsm.setDescription('The max ssm of system out put.')
hwClockAttribute2M1MaxOutSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttribute2M1MaxOutSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockAttribute2M1MaxOutSsm.setDescription('The max ssm of 2msync-1 out put.')
hwClockAttribute2M2MaxOutSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttribute2M2MaxOutSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockAttribute2M2MaxOutSsm.setDescription('The max ssm of 2msync-2 out put.')
hwClockAttributeSysClkLockMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 16, 19))).clone(namedValues=NamedValues(("freeRun", 0), ("fastLock", 1), ("lock", 2), ("hold", 3), ("freeRunJudge", 16), ("holdJudge", 19)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockAttributeSysClkLockMode.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeSysClkLockMode.setDescription('The Lock mode of system clock.')
hwClockAttributeExtendSsmControl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("on", 0), ("off", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeExtendSsmControl.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeExtendSsmControl.setDescription('The flag whether Extend SSM is concerned with the clock source selection.')
hwClockAttributeInternalClockId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeInternalClockId.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeInternalClockId.setDescription('The internal clockid of the device.')
hwClockAttributeTodProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("nmea", 1), ("ubx", 2), ("none", 3), ("ccsa", 4))).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeTodProtocol.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeTodProtocol.setDescription('1pps bits tod protocol.')
hwClockAttributeLtiSquelch = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 16), EnabledStatus().clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeLtiSquelch.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeLtiSquelch.setDescription('The frequency signal output squelch flag upon the frequency loss.')
hwClockAttributeInputThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15))).clone(15)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeInputThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeInputThreshold.setDescription('The squelch threshold of the external input source.')
hwClockSrcSelTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10), )
if mibBuilder.loadTexts: hwClockSrcSelTable.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelTable.setDescription('The system clock source selection table.')
hwClockSrcSelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockSrcSelChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockSrcSelType"))
if mibBuilder.loadTexts: hwClockSrcSelEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelEntry.setDescription('The entry of system clock source selection table.')
hwClockSrcSelChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockSrcSelChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelChassisIndex.setDescription('The chassis index.')
hwClockSrcSelType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("system", 1), ("sync2M1", 2), ("sync2M2", 3))))
if mibBuilder.loadTexts: hwClockSrcSelType.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelType.setDescription('The PLL Id.')
hwClockSrcSelMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("auto", 0), ("manual", 1), ("force", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSrcSelMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelMode.setDescription('The mode of clock source selection.')
hwClockSrcSelSrcName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 4), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSrcSelSrcName.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelSrcName.setDescription('The name of clock source for selection.')
hwClockSrcTraceSrcName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 5), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcTraceSrcName.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcTraceSrcName.setDescription('The name of trace source.')
hwClockSrcCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11), )
if mibBuilder.loadTexts: hwClockSrcCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgTable.setDescription('The clock source config table.')
hwClockSrcCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockSrcCfgChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceTypeIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceIndex"))
if mibBuilder.loadTexts: hwClockSrcCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgEntry.setDescription('The entry of clock source config table.')
hwClockSrcCfgChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockSrcCfgChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgChassisIndex.setDescription('The chassis index.')
hwClockSrcCfgSourceTypeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("bits", 1), ("ptp", 2), ("interface", 3)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockSrcCfgSourceTypeIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceTypeIndex.setDescription('The type of clock source.')
hwClockSrcCfgSourceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 3), Integer32())
if mibBuilder.loadTexts: hwClockSrcCfgSourceIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceIndex.setDescription('The index of clock source.')
hwClockSrcCfgSourceDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgSourceDescr.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceDescr.setDescription('The description of clock source.')
hwClockSrcCfgClkEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 5), EnabledStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgClkEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgClkEnable.setDescription('The enable flag of clock source.')
hwClockSrcCfgSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgSystemPriority.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSystemPriority.setDescription('The priority of system clock source.')
hwClockSrcCfg2M1Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfg2M1Priority.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfg2M1Priority.setDescription('The priority of 2msync-1 clock source.')
hwClockSrcCfg2M2Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfg2M2Priority.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfg2M2Priority.setDescription('The priority of 2msync-2 clock source.')
hwClockSrcCfgSourceSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15, 16))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15), ("unknown", 16)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgSourceSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceSsm.setDescription('The SSM quality of clock source.')
hwClockSrcCfgSsmSetMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("manual", 1), ("auto", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgSsmSetMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSsmSetMode.setDescription('The set mode of SSM.')
hwClockSrcCfgSourceState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("initial", 0), ("normal", 1), ("abnormal", 2), ("waitwtr", 3), ("holdoff", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgSourceState.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceState.setDescription('The state of clock source.')
hwClockSrcCfgFreqCheckResult = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("abnormal", 0), ("normal", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgFreqCheckResult.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgFreqCheckResult.setDescription('The result of frequency check.')
hwClockSrcCfgSsmInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(512, 8000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgSsmInterval.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSsmInterval.setDescription('Description.')
hwClockSrcCfgSsmTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2000, 32000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgSsmTimeout.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSsmTimeout.setDescription('Description.')
hwClockSrcCfgSabit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8, 99))).clone(namedValues=NamedValues(("sa4", 4), ("sa5", 5), ("sa6", 6), ("sa7", 7), ("sa8", 8), ("invalid", 99))).clone(4)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSrcCfgSabit.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSabit.setDescription('The SA bit of E1 Port SSM information.')
hwClockSrcCfgClockId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSrcCfgClockId.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgClockId.setDescription('The clockid of clock source.')
hwClockSrcCfgClockIdSetMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("manual", 1), ("auto", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgClockIdSetMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgClockIdSetMode.setDescription('The set mode of clockid.')
hwClockSrcCfgOutSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15, 16, 99))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15), ("unknown", 16), ("invalid", 99)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgOutSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgOutSsm.setDescription('Current output ssm.')
hwClockSrcCfgOutClockId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 99))).clone(namedValues=NamedValues(("clockid0", 0), ("clockid1", 1), ("clockid2", 2), ("clockid3", 3), ("clockid4", 4), ("clockid5", 5), ("clockid6", 6), ("clockid7", 7), ("clockid8", 8), ("clockid9", 9), ("clockid10", 10), ("clockid11", 11), ("clockid12", 12), ("clockid13", 13), ("clockid14", 14), ("clockid15", 15), ("notsupport", 99)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgOutClockId.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgOutClockId.setDescription('Current output clockid.')
hwClockSrcCfgRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 20), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgRowStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgRowStatus.setDescription('The row status.')
hwClockSrcCfgFreqDeviation = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 21), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgFreqDeviation.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgFreqDeviation.setDescription('Freqdeviation value of clock source.')
hwClockSrcCfgPhyState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("cardTypeNotSupport", 0), ("slave", 1), ("master", 2), ("speedNotSupport", 3), ("portDown", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgPhyState.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgPhyState.setDescription('The PHY clock state of ports.')
hwClockSrcCfgNegotiationSlave = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("notSupport", 0), ("enable", 1), ("disable", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgNegotiationSlave.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgNegotiationSlave.setDescription('Set PHY clock state to slave.')
hwClockCesAcrPortCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12), )
if mibBuilder.loadTexts: hwClockCesAcrPortCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPortCfgTable.setDescription('The CES ACR clock port config table.')
hwClockCesAcrPortCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrParentIfIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrChannelId"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrIfIndex"))
if mibBuilder.loadTexts: hwClockCesAcrPortCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPortCfgEntry.setDescription('The entry of CES ACR clock port config table.')
hwClockCesAcrParentIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwClockCesAcrParentIfIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrParentIfIndex.setDescription('Indicates the index of the parent interface.')
hwClockCesAcrChannelId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 2), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrChannelId.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrChannelId.setDescription('Indicates the channel ID.')
hwClockCesAcrIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 3), InterfaceIndex())
if mibBuilder.loadTexts: hwClockCesAcrIfIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrIfIndex.setDescription('Indicates the interface index.')
hwClockCesAcrPortName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrPortName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPortName.setDescription('Port name.')
hwClockCesAcrChannelType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("t1", 1), ("e1", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrChannelType.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrChannelType.setDescription('Indicates the interface type. The type can be E1/CE1 or T1/CT1.')
hwClockCesAcrSourceMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("master", 1), ("slave", 2), ("recoveryDomain", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrSourceMode.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrSourceMode.setDescription('Indicates the clock mode of the interface. master: indicates that the clock works in master mode and uses the internal clock signal. slave: indicates that the clock works in slave mode and uses the line clock signal. recovery-domain: indicates that the clock works in slave mode and uses the recovery domain clock signal. ')
hwClockCesAcrRecoveryDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrRecoveryDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrRecoveryDomain.setDescription('Indicates the clock recovery domain of the interface. DEFVAL is 0.')
hwClockCesAcrPwDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 8))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrPwDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPwDomain.setDescription('Indicates the clock PW domain of the interface. DEFVAL is 0.')
hwClockCesAcrPortCfgRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrPortCfgRowStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPortCfgRowStatus.setDescription('The row status.')
hwClockCesAcrMasterDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrMasterDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrMasterDomain.setDescription('Indicates the clock master domain of the interface. DEFVAL is 0.')
hwClockCesMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("acr", 1), ("dcr", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesMode.setStatus('current')
if mibBuilder.loadTexts: hwClockCesMode.setDescription('Indicates the clock CES recovery mode of the interface. DEFVAL is 0.')
hwClockCesAcrCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13), )
if mibBuilder.loadTexts: hwClockCesAcrCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgTable.setDescription('The CES ACR clock source config table.')
hwClockCesAcrCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrCfgSlot"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrCfgCard"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrCfgDomain"))
if mibBuilder.loadTexts: hwClockCesAcrCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgEntry.setDescription('The entry of CES ACR clock source config table.')
hwClockCesAcrCfgSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 1), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrCfgSlot.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSlot.setDescription('The slot ID of CES ACR clock source.')
hwClockCesAcrCfgCard = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 2), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrCfgCard.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgCard.setDescription('The card ID of CES ACR clock source.')
hwClockCesAcrCfgDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8)))
if mibBuilder.loadTexts: hwClockCesAcrCfgDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgDomain.setDescription('The recovery domain value of CES ACR clock source.')
hwClockCesAcrCfgDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrCfgDescr.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgDescr.setDescription('The description of clock source.')
hwClockCesAcrCfgSyncEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 5), EnabledStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgSyncEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSyncEnable.setDescription('The enable flag of CES ACR clock source.')
hwClockCesAcrCfgSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgSystemPriority.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSystemPriority.setDescription('The priority of system CES ACR clock source. DEFVAL is 0.')
hwClockCesAcrCfgSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15, 16))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15), ("unknown", 16)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSsm.setDescription('The SSM quality of CES ACR clock source.')
hwClockCesAcrCfgClockId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgClockId.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgClockId.setDescription('The clockid of clock source. DEFVAL is 0.')
hwClockCesAcrCfgSourceState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("initial", 0), ("normal", 1), ("abnormal", 2), ("waitwtr", 3), ("holdoff", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrCfgSourceState.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSourceState.setDescription('The state of CES ACR clock source.')
hwClockCesAcrCfgFreqCheckResult = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("abnormal", 0), ("normal", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrCfgFreqCheckResult.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgFreqCheckResult.setDescription('The result of CES ACR clock source frequency check.')
hwClockCesAcrCfgRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 11), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgRowStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgRowStatus.setDescription('The row status.')
hwClockCesAcrDomainInfoTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14), )
if mibBuilder.loadTexts: hwClockCesAcrDomainInfoTable.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomainInfoTable.setDescription('The CES ACR domain infomation table.')
hwClockCesAcrDomainInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrDomianInfoSlot"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrDomianInfoCard"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrDomianInfoDomain"))
if mibBuilder.loadTexts: hwClockCesAcrDomainInfoEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomainInfoEntry.setDescription('The entry of CES ACR domain infomation table.')
hwClockCesAcrDomianInfoSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 1), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoSlot.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoSlot.setDescription('The slot ID of CES ACR clock source.')
hwClockCesAcrDomianInfoCard = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 2), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoCard.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoCard.setDescription('The card ID of CES ACR clock source.')
hwClockCesAcrDomianInfoDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)))
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoDomain.setDescription('The recovery domain value of CES ACR clock source.')
hwClockCesAcrDomianInfoMasterPwName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoMasterPwName.setDescription('Port name.')
hwClockCesAcrDomianInfoChannelId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoChannelId.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoChannelId.setDescription('Indicates the channel ID.')
hwClockCesAcrDomianInfoState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("wait", 2), ("lock", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoState.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoState.setDescription('The state of CES ACR clock source.')
hwClockClusterTopoTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15), )
if mibBuilder.loadTexts: hwClockClusterTopoTable.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoTable.setDescription('The CES ACR domain infomation table.')
hwClockClusterTopoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockClusterSyncType"), (0, "HUAWEI-CLOCK-MIB", "hwClockClusterTopoType"), (0, "HUAWEI-CLOCK-MIB", "hwClockClusterTopoLinkType"))
if mibBuilder.loadTexts: hwClockClusterTopoEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoEntry.setDescription('Description.')
hwClockClusterSyncType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("frequency", 1), ("time", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockClusterSyncType.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterSyncType.setDescription('The type of clock inter-chassis sync.')
hwClockClusterTopoType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("interlink", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockClusterTopoType.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoType.setDescription('The type of clock inter-chassis topo..')
hwClockClusterTopoLinkType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("bits", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockClusterTopoLinkType.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoLinkType.setDescription('The type of clock inter-chassis link.')
hwClockClusterTopoStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("fail", 1), ("success", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockClusterTopoStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoStatus.setDescription('The status of clock inter-chassis topo.')
hwClockConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10))
hwClockSourceCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 1))
hwClockSourceCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 1, 1)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockManageSysGroup"), ("HUAWEI-CLOCK-MIB", "hwClockSourceCfgGroup"), ("HUAWEI-CLOCK-MIB", "hwClockPortCfgGroup"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgGroup"), ("HUAWEI-CLOCK-MIB", "hwClockNotificationsGroup"), ("HUAWEI-CLOCK-MIB", "hwClockSysSelGroup"), ("HUAWEI-CLOCK-MIB", "hwClockTrapOidGroup"), ("HUAWEI-CLOCK-MIB", "hwClockLineCfgGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockSourceCompliance = hwClockSourceCompliance.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceCompliance.setDescription('The compliance of clock MIB.')
hwClockSourceGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2))
hwClockManageSysGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 8)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockSourceSysClkWorkMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheckEnable"), ("HUAWEI-CLOCK-MIB", "hwClockSourceHoldMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceSsmControl"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheckRightRange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheckLeftRange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceRetrieveMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceForceCloseEnableStatus"), ("HUAWEI-CLOCK-MIB", "hwClockSourceSsmUnknown"), ("HUAWEI-CLOCK-MIB", "hwClockExtTimeOutputType"), ("HUAWEI-CLOCK-MIB", "hwClockExtTimeInputType"), ("HUAWEI-CLOCK-MIB", "hwClockTimeUsedSource"), ("HUAWEI-CLOCK-MIB", "hwClockSourceEthClkEnable"), ("HUAWEI-CLOCK-MIB", "hwClockAlarmThresholdFrequencyOffset"), ("HUAWEI-CLOCK-MIB", "hwClockFrequencyOffsetMax"), ("HUAWEI-CLOCK-MIB", "hwClockFrequencyOffsetMin"), ("HUAWEI-CLOCK-MIB", "hwClockFrequencyOffsetMean"), ("HUAWEI-CLOCK-MIB", "hwClockFrequencyOffset"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockManageSysGroup = hwClockManageSysGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockManageSysGroup.setDescription('The manage group.')
hwClockSysSelGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 9)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockSourceSelMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceSelSourceId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockLastSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOldLockMode"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrOldMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrNewMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutValue"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockState"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrOldMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrNewMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockSysSelGroup = hwClockSysSelGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockSysSelGroup.setDescription('The system selection group.')
hwClockSourceCfgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 10)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCfgSourceId"), ("HUAWEI-CLOCK-MIB", "hwClockCfgPriRvtEnableStatus"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSwitchCondition"), ("HUAWEI-CLOCK-MIB", "hwClockCfgWtrTime"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBadDetect"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceSsm"), ("HUAWEI-CLOCK-MIB", "hwClockCfgExportEnableStatus"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSwiEnableStatus"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceState"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceDescr"), ("HUAWEI-CLOCK-MIB", "hwClockCfgFreqCheckResult"), ("HUAWEI-CLOCK-MIB", "hwClockCfgHoldOffTime"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBits0Priority"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBits1Priority"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSystemPriority"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceSsmSetMode"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceS1Id"), ("HUAWEI-CLOCK-MIB", "hwClockCfgClkSourceType"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSsmThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSystemLockOut"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBits0LockOut"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBits1LockOut"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgTodSignal"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockSourceCfgGroup = hwClockSourceCfgGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceCfgGroup.setDescription('The clock source group.')
hwClockPortCfgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 13)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockPortCfgLeftFramePri"), ("HUAWEI-CLOCK-MIB", "hwClockPortCfgRightFramePri"), ("HUAWEI-CLOCK-MIB", "hwClockPortCfgForceOutS1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockPortCfgGroup = hwClockPortCfgGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgGroup.setDescription('The port config of clock source group.')
hwClockBitsCfgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 14)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockBitsCfgRecvSaBit"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgSendSaBit"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgForceOutS1"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgName"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgBitsType"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgDirection"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgSaBit"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgInputMode"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgOutputMode"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgSourceId"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgInvalidCond"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgBitsPortType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockBitsCfgGroup = hwClockBitsCfgGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgGroup.setDescription('The BITS clock source group.')
hwClockTrapOidGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 15)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockLastSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOldLockMode"), ("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockOldSourceState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockTrapOidGroup = hwClockTrapOidGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockTrapOidGroup.setDescription('The clock trap group.')
hwClockNotificationsGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 16)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockSourceSwitch"), ("HUAWEI-CLOCK-MIB", "hwClockSourceStateChange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceStateResume"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheck"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheckResume"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOutputBelowThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOutputBelowThresholdResume"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockFail"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockFailResume"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrMasterPwChange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceValid"), ("HUAWEI-CLOCK-MIB", "hwClockInLockedMode"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoFailResume"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoFail"), ("HUAWEI-CLOCK-MIB", "hwClockNotInLockedMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceSysClkLockModeChange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFailed"), ("HUAWEI-CLOCK-MIB", "hwClockSourceInputBelowThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockSourceInputBelowThresholdResume"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrMasterPwChange"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockFail"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockFailResume"), ("HUAWEI-CLOCK-MIB", "hwClockSsmPktLos"), ("HUAWEI-CLOCK-MIB", "hwClockSsmPktLosResume"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockNotificationsGroup = hwClockNotificationsGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockNotificationsGroup.setDescription('This is the group of clock notification.')
hwClockLineCfgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 17)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockLineClkCfgRecvS1"), ("HUAWEI-CLOCK-MIB", "hwClockLineClkCfgSendS1"), ("HUAWEI-CLOCK-MIB", "hwClockLineClkCfgCardId"), ("HUAWEI-CLOCK-MIB", "hwClockLineClkCfgPortId"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockLineCfgGroup = hwClockLineCfgGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockLineCfgGroup.setDescription('The line clock group..')
mibBuilder.exportSymbols("HUAWEI-CLOCK-MIB", PYSNMP_MODULE_ID=hwClockMIB, hwClockCfgSourceS1Id=hwClockCfgSourceS1Id, hwClockBitsCfgFrameFormat=hwClockBitsCfgFrameFormat, hwClockCfgSourceId=hwClockCfgSourceId, hwClockAttributeSsmControl=hwClockAttributeSsmControl, hwClockCesAcrDomianInfoDomain=hwClockCesAcrDomianInfoDomain, hwClockSrcCfgNegotiationSlave=hwClockSrcCfgNegotiationSlave, hwClockCurSourceName=hwClockCurSourceName, hwClockSourceInputBelowThresholdResume=hwClockSourceInputBelowThresholdResume, hwClockSrcCfgFreqCheckResult=hwClockSrcCfgFreqCheckResult, hwClockBitsCfgTodSignal=hwClockBitsCfgTodSignal, hwClockSrcCfgSabit=hwClockSrcCfgSabit, hwClockSrcSelSrcName=hwClockSrcSelSrcName, hwClockCesAcrCfgCard=hwClockCesAcrCfgCard, hwClockCesAcrCfgSystemPriority=hwClockCesAcrCfgSystemPriority, hwClockAttributeSysClkRunMode=hwClockAttributeSysClkRunMode, hwClockCesAcrParentIfIndex=hwClockCesAcrParentIfIndex, hwClockCesAcrPortCfgTable=hwClockCesAcrPortCfgTable, hwClockSourceEthClkEnable=hwClockSourceEthClkEnable, hwClockLineClkCfgSlotIndex=hwClockLineClkCfgSlotIndex, hwClockInLockedMode=hwClockInLockedMode, hwClockCesAcrMasterDomain=hwClockCesAcrMasterDomain, hwClockCesAcrCfgSyncEnable=hwClockCesAcrCfgSyncEnable, hwClockPortCfgLeftFramePri=hwClockPortCfgLeftFramePri, hwClockCfgBadDetect=hwClockCfgBadDetect, hwClockSrcCfgSourceTypeIndex=hwClockSrcCfgSourceTypeIndex, hwClockSrcCfgSystemPriority=hwClockSrcCfgSystemPriority, hwClockCesAcrCfgRowStatus=hwClockCesAcrCfgRowStatus, hwClockCfgSourceIndex=hwClockCfgSourceIndex, hwClockSrcCfgClockId=hwClockSrcCfgClockId, hwClockSourceSwitch=hwClockSourceSwitch, hwClockLineClkCfgTable=hwClockLineClkCfgTable, hwClockSrcCfg2M2Priority=hwClockSrcCfg2M2Priority, hwClockSourceValid=hwClockSourceValid, hwClockCesMode=hwClockCesMode, hwClockCfgClkSourceType=hwClockCfgClkSourceType, hwClockBitsCfgDirection=hwClockBitsCfgDirection, hwClockBitsCfgInvalidCond=hwClockBitsCfgInvalidCond, hwClockCfgSwitchCondition=hwClockCfgSwitchCondition, hwClockCesAcrCfgDescr=hwClockCesAcrCfgDescr, hwClockAttribute2M1MaxOutSsm=hwClockAttribute2M1MaxOutSsm, hwClockCesAcrDomianInfoMasterPwName=hwClockCesAcrDomianInfoMasterPwName, hwClockAlarmThresholdFrequencyOffset=hwClockAlarmThresholdFrequencyOffset, hwClockCesAcrCfgSlot=hwClockCesAcrCfgSlot, hwClockChassisId=hwClockChassisId, hwClockGlobalObjects=hwClockGlobalObjects, hwClockBitsCfgSendSaBit=hwClockBitsCfgSendSaBit, hwClockSourceFreqCheckLeftRange=hwClockSourceFreqCheckLeftRange, hwClockSrcCfgFreqDeviation=hwClockSrcCfgFreqDeviation, hwClockSourceCompliances=hwClockSourceCompliances, hwClockClusterTopoType=hwClockClusterTopoType, hwClockSrcCfgSourceSsm=hwClockSrcCfgSourceSsm, hwClockCesAcrDomianInfoSlot=hwClockCesAcrDomianInfoSlot, hwClockSourceCfgGroup=hwClockSourceCfgGroup, hwClockCesDcrOldMasterPwName=hwClockCesDcrOldMasterPwName, hwClockOldSourceState=hwClockOldSourceState, hwClockSourceCompliance=hwClockSourceCompliance, hwClockMIB=hwClockMIB, hwClockLineClkCfgRecvS1=hwClockLineClkCfgRecvS1, hwClockPortCfgIfIndex=hwClockPortCfgIfIndex, hwClockCfgSourceDescr=hwClockCfgSourceDescr, hwClockExtTimeInputType=hwClockExtTimeInputType, hwClockCfgSwiEnableStatus=hwClockCfgSwiEnableStatus, hwClockLineCfgGroup=hwClockLineCfgGroup, hwClockManageObjects=hwClockManageObjects, hwClockBitsCfgSaBit=hwClockBitsCfgSaBit, hwClockSourceFreqCheckRightRange=hwClockSourceFreqCheckRightRange, hwClockSrcSelMode=hwClockSrcSelMode, hwClockClusterTopoTable=hwClockClusterTopoTable, hwClockFrequencyOffset=hwClockFrequencyOffset, hwClockManageSysGroup=hwClockManageSysGroup, hwClockSourceFreqCheckEnable=hwClockSourceFreqCheckEnable, hwClockAttribute2M2MaxOutSsm=hwClockAttribute2M2MaxOutSsm, hwClockCesAcrCfgFreqCheckResult=hwClockCesAcrCfgFreqCheckResult, hwClockCesAcrDomainInfoTable=hwClockCesAcrDomainInfoTable, hwClockCesAcrDomianInfoChannelId=hwClockCesAcrDomianInfoChannelId, hwClockSrcCfgClockIdSetMode=hwClockSrcCfgClockIdSetMode, hwClockSourceSelType=hwClockSourceSelType, hwClockCfgBits0Priority=hwClockCfgBits0Priority, hwClockSrcCfgSsmSetMode=hwClockSrcCfgSsmSetMode, hwClockClusterTopoFail=hwClockClusterTopoFail, hwClockPllId=hwClockPllId, hwClockSrcCfg2M1Priority=hwClockSrcCfg2M1Priority, hwClockSourceHoldMode=hwClockSourceHoldMode, hwClockSrcSelTable=hwClockSrcSelTable, hwClockLineClkCfgCardId=hwClockLineClkCfgCardId, hwClockSsmPktLosResume=hwClockSsmPktLosResume, hwClockSourceSelChassisIndex=hwClockSourceSelChassisIndex, hwClockAttributeExtendSsmControl=hwClockAttributeExtendSsmControl, hwClockSourceOldLockMode=hwClockSourceOldLockMode, hwClockPortCfgRightFramePri=hwClockPortCfgRightFramePri, hwClockCesAcrChannelId=hwClockCesAcrChannelId, hwClockCesAcrCfgSsm=hwClockCesAcrCfgSsm, hwClockSourceSelMode=hwClockSourceSelMode, hwClockSrcCfgSourceDescr=hwClockSrcCfgSourceDescr, hwClockTrapOid=hwClockTrapOid, hwClockAttributeEntry=hwClockAttributeEntry, hwClockCesAcrRecoveryDomain=hwClockCesAcrRecoveryDomain, hwClockCesAcrSlot=hwClockCesAcrSlot, hwClockFrequencyOffsetMax=hwClockFrequencyOffsetMax, hwClockSrcCfgRowStatus=hwClockSrcCfgRowStatus, hwClockCfgSourceState=hwClockCfgSourceState, hwClockBitsCfgOutputMode=hwClockBitsCfgOutputMode, hwClockBitsCfgBitsIndex=hwClockBitsCfgBitsIndex, hwClockFrequencyOffsetMin=hwClockFrequencyOffsetMin, hwClockCfgChassisIndex=hwClockCfgChassisIndex, hwClockLastSourceName=hwClockLastSourceName, hwClockCesAcrNewMasterPwName=hwClockCesAcrNewMasterPwName, hwClockAttributeHoldOffTime=hwClockAttributeHoldOffTime, hwClockClusterTopoLinkType=hwClockClusterTopoLinkType, hwClockCesAcrPortName=hwClockCesAcrPortName, hwClockPortCfgForceOutS1=hwClockPortCfgForceOutS1, hwClockSourceInputBelowThreshold=hwClockSourceInputBelowThreshold, hwClockSrcCfgTable=hwClockSrcCfgTable, hwClockCesAcrChannelType=hwClockCesAcrChannelType, hwClockBitsCfgSourceId=hwClockBitsCfgSourceId, hwClockSourceSelSourceId=hwClockSourceSelSourceId, hwClockAttributeLtiSquelch=hwClockAttributeLtiSquelch, hwClockSourceSysClkWorkMode=hwClockSourceSysClkWorkMode, hwClockCesDcrSlot=hwClockCesDcrSlot, hwClockCfgBits1LockOut=hwClockCfgBits1LockOut, hwClockSrcCfgClkEnable=hwClockSrcCfgClkEnable, hwClockConformance=hwClockConformance, hwClockSysSelGroup=hwClockSysSelGroup, hwClockNotifications=hwClockNotifications, hwClockSourceSelEntry=hwClockSourceSelEntry, hwClockCesAcrDomain=hwClockCesAcrDomain, hwClockCesDcrMasterPwChange=hwClockCesDcrMasterPwChange, hwClockCesAcrCard=hwClockCesAcrCard, hwClockSrcCfgPhyState=hwClockSrcCfgPhyState, hwClockSourceCfgTable=hwClockSourceCfgTable, hwClockNotInLockedMode=hwClockNotInLockedMode, hwClockSourceSsmUnknown=hwClockSourceSsmUnknown, hwClockBitsCfgChassisIndex=hwClockBitsCfgChassisIndex, hwClockCesDcrLockFail=hwClockCesDcrLockFail, hwClockCesAcrPortCfgEntry=hwClockCesAcrPortCfgEntry, hwClockPortCfgTable=hwClockPortCfgTable, hwClockSourceSsmControl=hwClockSourceSsmControl, hwClockCesDcrCard=hwClockCesDcrCard, hwClockSrcTraceSrcName=hwClockSrcTraceSrcName, hwClockSrcCfgSourceState=hwClockSrcCfgSourceState, hwClockBitsCfgForceOutS1=hwClockBitsCfgForceOutS1, hwClockCfgSourceSsm=hwClockCfgSourceSsm, hwClockBitsCfgBitsPortType=hwClockBitsCfgBitsPortType, hwClockLineClkCfgPortId=hwClockLineClkCfgPortId, hwClockCesAcrLockFail=hwClockCesAcrLockFail, hwClockSrcSelChassisIndex=hwClockSrcSelChassisIndex, hwClockAttributeWtrTime=hwClockAttributeWtrTime, hwClockAttributeFreqCheckEnable=hwClockAttributeFreqCheckEnable, hwClockCfgPriRvtEnableStatus=hwClockCfgPriRvtEnableStatus, hwClockLineClkCfgSendS1=hwClockLineClkCfgSendS1, hwClockSourceStateResume=hwClockSourceStateResume, hwClockSrcCfgChassisIndex=hwClockSrcCfgChassisIndex, hwClockCesAcrLockFailResume=hwClockCesAcrLockFailResume, hwClockCesAcrDomianInfoState=hwClockCesAcrDomianInfoState, hwClockExtTimeOutputType=hwClockExtTimeOutputType, hwClockSourceOutputBelowThreshold=hwClockSourceOutputBelowThreshold, hwClockCesAcrMasterPwChange=hwClockCesAcrMasterPwChange, hwClockAttributeInputThreshold=hwClockAttributeInputThreshold, hwClockCesAcrCfgSourceState=hwClockCesAcrCfgSourceState, hwClockSrcCfgEntry=hwClockSrcCfgEntry, hwClockCfgHoldOffTime=hwClockCfgHoldOffTime, hwClockSourceCfgEntry=hwClockSourceCfgEntry, hwClockPortCfgEntry=hwClockPortCfgEntry, hwClockAttributeRetrieveMode=hwClockAttributeRetrieveMode, hwClockCfgSsmThreshold=hwClockCfgSsmThreshold, hwClockSourceFreqCheck=hwClockSourceFreqCheck, hwClockSourceFailed=hwClockSourceFailed, hwClockClusterSyncType=hwClockClusterSyncType, hwClockCesAcrDomianInfoCard=hwClockCesAcrDomianInfoCard, hwClockCfgSystemLockOut=hwClockCfgSystemLockOut, hwClockCesAcrLockState=hwClockCesAcrLockState, hwClockCesAcrCfgClockId=hwClockCesAcrCfgClockId, hwClockLineClkCfgEntry=hwClockLineClkCfgEntry, hwClockSrcSelEntry=hwClockSrcSelEntry, hwClockAttributeSysMaxOutSsm=hwClockAttributeSysMaxOutSsm, hwClockCesAcrPortCfgRowStatus=hwClockCesAcrPortCfgRowStatus, hwClockSourceSysClkLockModeChange=hwClockSourceSysClkLockModeChange, hwClockTrapOidGroup=hwClockTrapOidGroup, hwClockSsmPktLos=hwClockSsmPktLos, hwClockAttributeTable=hwClockAttributeTable, hwClockSourceOutputBelowThresholdResume=hwClockSourceOutputBelowThresholdResume, hwClockSrcCfgOutClockId=hwClockSrcCfgOutClockId, hwClockLineClkCfgChassisIndex=hwClockLineClkCfgChassisIndex, hwClockSrcCfgSsmTimeout=hwClockSrcCfgSsmTimeout, hwClockCesAcrCfgDomain=hwClockCesAcrCfgDomain, hwClockBitsCfgGroup=hwClockBitsCfgGroup, hwClockCfgSourceSsmSetMode=hwClockCfgSourceSsmSetMode, hwClockCfgBits1Priority=hwClockCfgBits1Priority, hwClockBitsCfgRecvSaBit=hwClockBitsCfgRecvSaBit, hwClockSourceStateChange=hwClockSourceStateChange, hwClockAttributeOutThreshold=hwClockAttributeOutThreshold, hwClockClusterTopoStatus=hwClockClusterTopoStatus, hwClockLineCfgSoureId=hwClockLineCfgSoureId, hwClockAttributeOutValue=hwClockAttributeOutValue, hwClockAttributeSysClkLockMode=hwClockAttributeSysClkLockMode, hwClockCesAcrOldMasterPwName=hwClockCesAcrOldMasterPwName, hwClockCesDcrLockState=hwClockCesDcrLockState, hwClockCfgSystemPriority=hwClockCfgSystemPriority, hwClockClusterTopoEntry=hwClockClusterTopoEntry, hwClockCesAcrCfgTable=hwClockCesAcrCfgTable, hwClockClusterTopoFailResume=hwClockClusterTopoFailResume, hwClockCfgFreqCheckResult=hwClockCfgFreqCheckResult, hwClockSrcSelType=hwClockSrcSelType, hwClockBitsCfgInputMode=hwClockBitsCfgInputMode, hwClockAttributeInternalClockId=hwClockAttributeInternalClockId, hwClockSrcCfgOutSsm=hwClockSrcCfgOutSsm, hwClockAttributeChassisIndex=hwClockAttributeChassisIndex, hwClockNotificationsGroup=hwClockNotificationsGroup, hwClockSrcCfgSsmInterval=hwClockSrcCfgSsmInterval, hwClockCesAcrIfIndex=hwClockCesAcrIfIndex, hwClockSourceForceCloseEnableStatus=hwClockSourceForceCloseEnableStatus, hwClockSourceFreqCheckResume=hwClockSourceFreqCheckResume, hwClockSourceGroups=hwClockSourceGroups, hwClockCfgBits0LockOut=hwClockCfgBits0LockOut, hwClockCesDcrDomain=hwClockCesDcrDomain, hwClockTimeUsedSource=hwClockTimeUsedSource, hwClockCfgWtrTime=hwClockCfgWtrTime, hwClockCfgExportEnableStatus=hwClockCfgExportEnableStatus, hwClockBitsCfgEntry=hwClockBitsCfgEntry, hwClockCesAcrDomainInfoEntry=hwClockCesAcrDomainInfoEntry, hwClockFrequencyOffsetMean=hwClockFrequencyOffsetMean, hwClockBitsCfgName=hwClockBitsCfgName, hwClockBitsCfgBitsType=hwClockBitsCfgBitsType, hwClockSrcCfgSourceIndex=hwClockSrcCfgSourceIndex, hwClockCesDcrLockFailResume=hwClockCesDcrLockFailResume, hwClockBitsCfgTable=hwClockBitsCfgTable, hwClockAttributeTodProtocol=hwClockAttributeTodProtocol, hwClockCesAcrSourceMode=hwClockCesAcrSourceMode, hwClockSourceRetrieveMode=hwClockSourceRetrieveMode, hwClockCesDcrNewMasterPwName=hwClockCesDcrNewMasterPwName, hwClockCesAcrCfgEntry=hwClockCesAcrCfgEntry, hwClockSourceSelTable=hwClockSourceSelTable, hwClockPortCfgGroup=hwClockPortCfgGroup, hwClockCesAcrPwDomain=hwClockCesAcrPwDomain)
| 148.629322 | 11,779 | 0.782587 |
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint")
PhysicalIndex, = mibBuilder.importSymbols("ENTITY-MIB", "PhysicalIndex")
hwDatacomm, = mibBuilder.importSymbols("HUAWEI-MIB", "hwDatacomm")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, MibIdentifier, Unsigned32, Gauge32, IpAddress, ObjectIdentity, NotificationType, ModuleIdentity, Counter32, Counter64, Integer32, Bits, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "MibIdentifier", "Unsigned32", "Gauge32", "IpAddress", "ObjectIdentity", "NotificationType", "ModuleIdentity", "Counter32", "Counter64", "Integer32", "Bits", "iso")
DisplayString, TextualConvention, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "RowStatus")
hwClockMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186))
hwClockMIB.setRevisions(('2014-11-29 00:00', '2014-11-03 00:00', '2014-08-13 00:00', '2014-04-21 00:00', '2014-01-07 00:00', '2013-11-12 00:00', '2013-10-31 00:00', '2013-05-23 00:00', '2013-05-14 00:00', '2013-03-20 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hwClockMIB.setRevisionsDescriptions(('Modify alarm hwClockSourceInputBelowThreshold, hwClockSourceInputBelowThresholdResume.', 'Add alarm hwClockCesDcrMasterPwChange, hwClockCesDcrLockFail,hwClockCesDcrLockFailResume,hwClockSsmPktLos,hwClockSsmPktLosResume and add mib hwClockCesDcrSlot,hwClockCesDcrCard,hwClockCesDcrDomain,hwClockCesDcrOldMasterPwName,hwClockCesDcrNewMasterPwName,hwClockCesDcrLockState,hwClockCesMode', 'Add alarm hwClockSourceInputBelowThreshold, hwClockSourceInputBelowThresholdResume.', 'Add alarm hwClockClusterTopoFail, hwClockClusterTopoFailResume and table hwClockClusterTopoTable.', 'Edit the range of hwClockCesAcrDomianInfoDomain.', 'Add mib hwClockBitsCfgFrameFormat, hwClockAttributeLtiSquelch and hwClockAttributeInputThreshold.', 'Edit the range of hwClockCesAcrRecoveryDomain.', 'Re-edit the range of some nodes.', 'Re-edit the default values of hwClockAttributeTodProtocol node.', 'Some errors have been modified in current version and some nodes have been added into the current version.',))
if mibBuilder.loadTexts: hwClockMIB.setLastUpdated('201411290000Z')
if mibBuilder.loadTexts: hwClockMIB.setOrganization('Huawei Technologies Co.,Ltd. ')
if mibBuilder.loadTexts: hwClockMIB.setContactInfo("Huawei Industrial Base Bantian, Longgang Shenzhen 518129 People's Republic of China Website: http://www.huawei.com Email: support@huawei.com ")
if mibBuilder.loadTexts: hwClockMIB.setDescription('The MIB contains objects of module clock management and 1588 interface.')
hwClockManageObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1))
hwClockGlobalObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1))
hwClockSourceEthClkEnable = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 1), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceEthClkEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceEthClkEnable.setDescription('The flag indicates that the ethernet clock is globally enabled.')
hwClockSourceSsmUnknown = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15))).clone('dnu')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceSsmUnknown.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSsmUnknown.setDescription('The quality level of unknown SSM.')
hwClockSourceSysClkWorkMode = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("trace", 1), ("hold", 2), ("freeoscillate", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSourceSysClkWorkMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSysClkWorkMode.setDescription('The work mode of system clock.')
hwClockSourceForceCloseEnableStatus = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 4), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceForceCloseEnableStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceForceCloseEnableStatus.setDescription('The enable status of export forced close.')
hwClockSourceSsmControl = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("extend", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceSsmControl.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSsmControl.setDescription('The flag whether SSM is concerned with the clock source selection.')
hwClockSourceHoldMode = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("hold24Hours", 1), ("holdForever", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSourceHoldMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceHoldMode.setDescription('The hold mode of clock source.')
hwClockSourceFreqCheckEnable = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 7), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceFreqCheckEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheckEnable.setDescription('The enable flag of frequency check.')
hwClockSourceFreqCheckLeftRange = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(50, 1000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceFreqCheckLeftRange.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheckLeftRange.setDescription('The left range of frequency check, unit in 0.01ppm.')
hwClockSourceFreqCheckRightRange = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(50, 1000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceFreqCheckRightRange.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheckRightRange.setDescription('The right range of frequency check, unit in 0.01ppm.')
hwClockSourceRetrieveMode = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("retrieve", 1), ("noRetrieve", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSourceRetrieveMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceRetrieveMode.setDescription('The retrieve mode of clock source.')
hwClockTimeUsedSource = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("srcDclsTimeBit0", 1), ("srcDclsTimeBit1", 2), ("src1ppsTodBit0", 3), ("src1ppsTodBit1", 4), ("srcPtp", 5), ("srcFreeRun", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockTimeUsedSource.setStatus('current')
if mibBuilder.loadTexts: hwClockTimeUsedSource.setDescription('The clock time used source.')
hwClockExtTimeInputType = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("typeDclsTime", 1), ("type1ppsTodRs232", 2), ("type1ppsTodGps", 3), ("typeNone", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockExtTimeInputType.setStatus('current')
if mibBuilder.loadTexts: hwClockExtTimeInputType.setDescription('The input time type of clock extern time.')
hwClockExtTimeOutputType = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("typeDclsTime", 1), ("type1ppsTodRs232", 2), ("type1ppsTodGps", 3), ("typeNone", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockExtTimeOutputType.setStatus('current')
if mibBuilder.loadTexts: hwClockExtTimeOutputType.setDescription('The output time type of clock extern time.')
hwClockAlarmThresholdFrequencyOffset = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 92)).clone(92)).setUnits('100ppb').setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAlarmThresholdFrequencyOffset.setStatus('current')
if mibBuilder.loadTexts: hwClockAlarmThresholdFrequencyOffset.setDescription('The Threshold of clock alarm.')
hwClockFrequencyOffsetMax = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 15), Integer32()).setUnits('ppb').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockFrequencyOffsetMax.setStatus('current')
if mibBuilder.loadTexts: hwClockFrequencyOffsetMax.setDescription('The max offset of clock frequency.')
hwClockFrequencyOffsetMin = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 16), Integer32()).setUnits('ppb').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockFrequencyOffsetMin.setStatus('current')
if mibBuilder.loadTexts: hwClockFrequencyOffsetMin.setDescription('The min offset of clock frequency.')
hwClockFrequencyOffsetMean = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 17), Integer32()).setUnits('ppb').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockFrequencyOffsetMean.setStatus('current')
if mibBuilder.loadTexts: hwClockFrequencyOffsetMean.setDescription('The mean offset of clock frequency.')
hwClockFrequencyOffset = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 1, 18), Integer32()).setUnits('ppb').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockFrequencyOffset.setStatus('current')
if mibBuilder.loadTexts: hwClockFrequencyOffset.setDescription('The current offset of clock frequency.')
hwClockSourceSelTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2), )
if mibBuilder.loadTexts: hwClockSourceSelTable.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelTable.setDescription('The system clock source selection table.')
hwClockSourceSelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockSourceSelChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockSourceSelType"))
if mibBuilder.loadTexts: hwClockSourceSelEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelEntry.setDescription('The entry of system clock source selection table.')
hwClockSourceSelChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockSourceSelChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelChassisIndex.setDescription('The chassis index.')
hwClockSourceSelType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100)))
if mibBuilder.loadTexts: hwClockSourceSelType.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelType.setDescription('The select type.')
hwClockSourceSelMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("auto", 1), ("manual", 2), ("force", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSourceSelMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelMode.setDescription('The mode of clock source selection.')
hwClockSourceSelSourceId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 2, 1, 4), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSourceSelSourceId.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSelSourceId.setDescription('The source ID of the clock traced.')
hwClockSourceCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3), )
if mibBuilder.loadTexts: hwClockSourceCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceCfgTable.setDescription('The clock source config table.')
hwClockSourceCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockCfgChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockCfgSourceIndex"))
if mibBuilder.loadTexts: hwClockSourceCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceCfgEntry.setDescription('The entry of clock source config table.')
hwClockCfgChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockCfgChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgChassisIndex.setDescription('The index of the chassis whitch the clock source belongs to.')
hwClockCfgSourceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 20)))
if mibBuilder.loadTexts: hwClockCfgSourceIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceIndex.setDescription('The clock source index.')
hwClockCfgSourceId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCfgSourceId.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceId.setDescription('The clock source ID.')
hwClockCfgSourceDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCfgSourceDescr.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceDescr.setDescription('The clock source description.')
hwClockCfgWtrTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 12))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgWtrTime.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgWtrTime.setDescription('The waiting for restore time of clock source.')
hwClockCfgBadDetect = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 6), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBadDetect.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBadDetect.setDescription('The enable status of clock source bad detecting.')
hwClockCfgSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSystemPriority.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSystemPriority.setDescription('The priority of system clock source.')
hwClockCfgBits0Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBits0Priority.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBits0Priority.setDescription('The priority of BITS0 clock source.')
hwClockCfgBits1Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBits1Priority.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBits1Priority.setDescription('The priority of BITS1 clock source.')
hwClockCfgSystemLockOut = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 10), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSystemLockOut.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSystemLockOut.setDescription('The lock out of system clock source.')
hwClockCfgBits0LockOut = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 11), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBits0LockOut.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBits0LockOut.setDescription('The lock out of BITS0 clock source.')
hwClockCfgBits1LockOut = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 12), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgBits1LockOut.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgBits1LockOut.setDescription('The lock out of BITS1 clock source.')
hwClockCfgSourceSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("ssmPrc", 1), ("ssmSsut", 2), ("ssmSsul", 3), ("ssmSec", 4), ("ssmDnu", 5), ("ssmUnknown", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSourceSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceSsm.setDescription('The SSM quality of clock source.')
hwClockCfgSourceSsmSetMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("manual", 1), ("auto", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSourceSsmSetMode.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceSsmSetMode.setDescription('The set mode of SSM.')
hwClockCfgExportEnableStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 15), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgExportEnableStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgExportEnableStatus.setDescription('The enable status of clock source export.')
hwClockCfgSwiEnableStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 16), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSwiEnableStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSwiEnableStatus.setDescription('he enable status of clock source switch.')
hwClockCfgSourceState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("normal", 1), ("abnormal", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSourceState.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceState.setDescription('The state of clock source.')
hwClockCfgSsmThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("qlDnu", 1), ("qlSec", 2), ("qlSsub", 3), ("qlSsua", 4), ("qlPrc", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSsmThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSsmThreshold.setDescription('The SSM quality level threshold of clock source.')
hwClockCfgSourceS1Id = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCfgSourceS1Id.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSourceS1Id.setDescription('The S1 byte of the clock.')
hwClockCfgFreqCheckResult = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 20), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgFreqCheckResult.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgFreqCheckResult.setDescription('The result of frequency check, unit in 0.01ppm.')
hwClockCfgHoldOffTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(3, 18))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgHoldOffTime.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgHoldOffTime.setDescription('The hold off time of clock, unit in 100ms.')
hwClockCfgPriRvtEnableStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 22), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgPriRvtEnableStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgPriRvtEnableStatus.setDescription('The enable status of switch according priority.')
hwClockCfgSwitchCondition = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("noSwitch", 1), ("switch", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockCfgSwitchCondition.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgSwitchCondition.setDescription('The condition of clock switch.')
hwClockCfgClkSourceType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 3, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("bits", 1), ("line", 2), ("inner", 3), ("system", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCfgClkSourceType.setStatus('current')
if mibBuilder.loadTexts: hwClockCfgClkSourceType.setDescription('The type of clock source.')
hwClockBitsCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4), )
if mibBuilder.loadTexts: hwClockBitsCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgTable.setDescription('The clock bits congfig table.')
hwClockBitsCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockBitsCfgChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockBitsCfgBitsIndex"))
if mibBuilder.loadTexts: hwClockBitsCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgEntry.setDescription('The entry of clock bits congfig table.')
hwClockBitsCfgChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockBitsCfgChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgChassisIndex.setDescription('The index of the chassis whitch the clock source belongs to.')
hwClockBitsCfgBitsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)))
if mibBuilder.loadTexts: hwClockBitsCfgBitsIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgBitsIndex.setDescription('The index of BITS clock.')
hwClockBitsCfgName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockBitsCfgName.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgName.setDescription('The name of clock.')
hwClockBitsCfgBitsPortType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("portRj45", 1), ("portSMB", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockBitsCfgBitsPortType.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgBitsPortType.setDescription('The BITS port type.')
hwClockBitsCfgBitsType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("type2Mbps", 0), ("type2Mhz", 1), ("typeDclsTime", 2), ("type1ppsTod", 3), ("none", 4), ("type1544Mbps", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgBitsType.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgBitsType.setDescription('The BITS type.')
hwClockBitsCfgDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("in", 1), ("out", 2), ("inAndOut", 3), ("none", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgDirection.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgDirection.setDescription('The direction of BITS.')
hwClockBitsCfgRecvSaBit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("sa4", 4), ("sa5", 5), ("sa6", 6), ("sa7", 7), ("sa8", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgRecvSaBit.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgRecvSaBit.setDescription('The received SA bit.')
hwClockBitsCfgSendSaBit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("sa4", 4), ("sa5", 5), ("sa6", 6), ("sa7", 7), ("sa8", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgSendSaBit.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgSendSaBit.setDescription('The sent SA bit.')
hwClockBitsCfgForceOutS1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgForceOutS1.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgForceOutS1.setDescription('The S1 byte of forcing out.')
hwClockBitsCfgSaBit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("sa4", 4), ("sa5", 5), ("sa6", 6), ("sa7", 7), ("sa8", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgSaBit.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgSaBit.setDescription('The SA bit of SSM information.')
hwClockBitsCfgInputMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("clk2MBits", 0), ("clk2MHz", 1), ("dclsTime", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgInputMode.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgInputMode.setDescription('The input mode of clock source.')
hwClockBitsCfgOutputMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("clk2MBits", 0), ("clk2MHz", 1), ("dclsTime", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgOutputMode.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgOutputMode.setDescription('The output mode of clock source.')
hwClockBitsCfgInvalidCond = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("no", 1), ("ais", 2), ("lof", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgInvalidCond.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgInvalidCond.setDescription('The invalid condition of clock source.')
hwClockBitsCfgSourceId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockBitsCfgSourceId.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgSourceId.setDescription('The clock source ID.')
hwClockBitsCfgTodSignal = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("nmea", 1), ("ubx", 2), ("none", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgTodSignal.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgTodSignal.setDescription('The tod signal of clock source.')
hwClockBitsCfgFrameFormat = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 4, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 0), ("pcm30nocrc", 1), ("pcm30crc", 2), ("pcm31nocrc", 3), ("pcm31crc", 4))).clone(4)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockBitsCfgFrameFormat.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgFrameFormat.setDescription('Encoding type and frame check format of the extern clock port.')
hwClockPortCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5), )
if mibBuilder.loadTexts: hwClockPortCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgTable.setDescription('The clock port config table.')
hwClockPortCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockPortCfgIfIndex"))
if mibBuilder.loadTexts: hwClockPortCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgEntry.setDescription('The entry of clock port config table.')
hwClockPortCfgIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwClockPortCfgIfIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgIfIndex.setDescription('The interface index.')
hwClockPortCfgLeftFramePri = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockPortCfgLeftFramePri.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgLeftFramePri.setDescription('The clock priority of left frame.')
hwClockPortCfgRightFramePri = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockPortCfgRightFramePri.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgRightFramePri.setDescription('The clock priority of right frame.')
hwClockPortCfgForceOutS1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 5, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockPortCfgForceOutS1.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgForceOutS1.setDescription('The S1 byte of forcing out.')
hwClockLineClkCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6), )
if mibBuilder.loadTexts: hwClockLineClkCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgTable.setDescription('The line clock config table.')
hwClockLineClkCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockLineClkCfgChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockLineClkCfgSlotIndex"))
if mibBuilder.loadTexts: hwClockLineClkCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgEntry.setDescription('The entry of line clock config table.')
hwClockLineClkCfgChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockLineClkCfgChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgChassisIndex.setDescription('The chassis index.')
hwClockLineClkCfgSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 200)))
if mibBuilder.loadTexts: hwClockLineClkCfgSlotIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgSlotIndex.setDescription('The slot index of the line clock.')
hwClockLineClkCfgCardId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 3), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockLineClkCfgCardId.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgCardId.setDescription('The card index witch is seleced to provide line clock.')
hwClockLineClkCfgPortId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 4), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockLineClkCfgPortId.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgPortId.setDescription('The port index witch is seleced to provide line clock.')
hwClockLineClkCfgRecvS1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockLineClkCfgRecvS1.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgRecvS1.setDescription('The S1 byte value received.')
hwClockLineClkCfgSendS1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 6), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockLineClkCfgSendS1.setStatus('current')
if mibBuilder.loadTexts: hwClockLineClkCfgSendS1.setDescription('The S1 byte value sent.')
hwClockLineCfgSoureId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 6, 1, 7), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockLineCfgSoureId.setStatus('current')
if mibBuilder.loadTexts: hwClockLineCfgSoureId.setDescription('Description.')
hwClockTrapOid = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7))
hwClockLastSourceName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 1), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockLastSourceName.setStatus('current')
if mibBuilder.loadTexts: hwClockLastSourceName.setDescription('The last clock source name.')
hwClockCurSourceName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 2), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCurSourceName.setStatus('current')
if mibBuilder.loadTexts: hwClockCurSourceName.setDescription('The current clock source name.')
hwClockSourceOldLockMode = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 16, 19))).clone(namedValues=NamedValues(("freeRun", 0), ("fastLock", 1), ("lock", 2), ("hold", 3), ("freeRunJudge", 16), ("holdJudge", 19)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockSourceOldLockMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceOldLockMode.setDescription('The old lock mode of clock source.')
hwClockChassisId = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 4), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockChassisId.setStatus('current')
if mibBuilder.loadTexts: hwClockChassisId.setDescription('The chassis ID.')
hwClockOldSourceState = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("initial", 0), ("normal", 1), ("abnormal", 2), ("wtr", 3), ("holdoff", 4)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockOldSourceState.setStatus('current')
if mibBuilder.loadTexts: hwClockOldSourceState.setDescription('The old state of clock source.')
hwClockPllId = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("system", 1), ("sync2M1", 2), ("sync2M2", 3)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockPllId.setStatus('current')
if mibBuilder.loadTexts: hwClockPllId.setDescription('The id of pll.')
hwClockAttributeOutValue = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockAttributeOutValue.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeOutValue.setDescription('The current output value.')
hwClockCesAcrSlot = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 8), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrSlot.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrSlot.setDescription('The slot ID of CES ACR clock source.')
hwClockCesAcrCard = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 9), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrCard.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCard.setDescription('The card ID of CES ACR clock source.')
hwClockCesAcrDomain = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 10), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomain.setDescription('The recovery domain value of CES ACR clock source.')
hwClockCesAcrOldMasterPwName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 11), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrOldMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrOldMasterPwName.setDescription('The master pw SerialPort name of CES ACR old clock source.')
hwClockCesAcrNewMasterPwName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 12), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrNewMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrNewMasterPwName.setDescription('The master pw SerialPort name of CES ACR new clock source.')
hwClockCesAcrLockState = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 13), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesAcrLockState.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrLockState.setDescription('The lock state of the CES ACR.')
hwClockCesDcrSlot = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 14), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrSlot.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrSlot.setDescription('The slot ID of CES DCR clock source.')
hwClockCesDcrCard = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 15), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrCard.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrCard.setDescription('The card ID of CES DCR clock source.')
hwClockCesDcrDomain = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 16), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrDomain.setDescription('The recovery domain value of CES DCR clock source.')
hwClockCesDcrOldMasterPwName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 17), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrOldMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrOldMasterPwName.setDescription('The master pw SerialPort name of CES DCR old clock source.')
hwClockCesDcrNewMasterPwName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 18), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrNewMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrNewMasterPwName.setDescription('The master pw SerialPort name of CES DCR new clock source.')
hwClockCesDcrLockState = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 7, 19), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockCesDcrLockState.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrLockState.setDescription('The lock state of the CES DCR.')
hwClockNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8))
hwClockSourceSwitch = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 1)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockLastSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockSrcSelMode"))
if mibBuilder.loadTexts: hwClockSourceSwitch.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSwitch.setDescription('Clock source switch notification.')
hwClockSourceSysClkLockModeChange = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 2)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOldLockMode"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeSysClkLockMode"))
if mibBuilder.loadTexts: hwClockSourceSysClkLockModeChange.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceSysClkLockModeChange.setDescription('The lock mode of system clock source change notification.')
hwClockSourceStateChange = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 3)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockOldSourceState"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceState"))
if mibBuilder.loadTexts: hwClockSourceStateChange.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceStateChange.setDescription('The state of clock source change notification.')
hwClockSourceStateResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 4)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockOldSourceState"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceState"))
if mibBuilder.loadTexts: hwClockSourceStateResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceStateResume.setDescription('The state of clock source resume notification.')
hwClockSourceFreqCheck = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 5)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceDescr"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgFreqCheckResult"))
if mibBuilder.loadTexts: hwClockSourceFreqCheck.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheck.setDescription('The result of clock source frequnce check abnormal notification.')
hwClockSourceOutputBelowThreshold = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 6)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutValue"))
if mibBuilder.loadTexts: hwClockSourceOutputBelowThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceOutputBelowThreshold.setDescription('The SSM of output below threshold notification.')
hwClockNotInLockedMode = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 7)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeSysClkLockMode"))
if mibBuilder.loadTexts: hwClockNotInLockedMode.setStatus('current')
if mibBuilder.loadTexts: hwClockNotInLockedMode.setDescription('The work mode of system clock is not in locked mode.')
hwClockInLockedMode = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 8)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeSysClkLockMode"))
if mibBuilder.loadTexts: hwClockInLockedMode.setStatus('current')
if mibBuilder.loadTexts: hwClockInLockedMode.setDescription('The work mode of system clock is in locked mode.')
hwClockSourceFailed = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 11)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceState"))
if mibBuilder.loadTexts: hwClockSourceFailed.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFailed.setDescription('The state of clock source is failed.')
hwClockSourceValid = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 12)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceState"))
if mibBuilder.loadTexts: hwClockSourceValid.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceValid.setDescription('The state of clock source is valid.')
hwClockSourceFreqCheckResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 13)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceDescr"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgFreqCheckResult"))
if mibBuilder.loadTexts: hwClockSourceFreqCheckResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceFreqCheckResume.setDescription('The result of clock source frequnce check normal notification.')
hwClockSourceOutputBelowThresholdResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 14)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutValue"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"))
if mibBuilder.loadTexts: hwClockSourceOutputBelowThresholdResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceOutputBelowThresholdResume.setDescription('The SSM of output above threshold notification.')
hwClockCesAcrMasterPwChange = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 15)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesAcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrOldMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrNewMasterPwName"))
if mibBuilder.loadTexts: hwClockCesAcrMasterPwChange.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrMasterPwChange.setDescription('CES ACR master PW status change.')
hwClockCesAcrLockFail = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 16)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesAcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockState"))
if mibBuilder.loadTexts: hwClockCesAcrLockFail.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrLockFail.setDescription('CES ACR clock source lock fail.')
hwClockCesAcrLockFailResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 17)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesAcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockState"))
if mibBuilder.loadTexts: hwClockCesAcrLockFailResume.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrLockFailResume.setDescription('CES ACR clock source lock fail resume.')
hwClockClusterTopoFail = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 22)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockClusterSyncType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoLinkType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoStatus"))
if mibBuilder.loadTexts: hwClockClusterTopoFail.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoFail.setDescription('Clock cluster inter-chassis synchronization topo compute failed.')
hwClockClusterTopoFailResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 23)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockClusterSyncType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoLinkType"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoStatus"))
if mibBuilder.loadTexts: hwClockClusterTopoFailResume.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoFailResume.setDescription('Clock inter-chassis synchronization topo compute successfully.')
hwClockSourceInputBelowThreshold = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 24)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeInputThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceSsm"))
if mibBuilder.loadTexts: hwClockSourceInputBelowThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceInputBelowThreshold.setDescription('The SSM of input below threshold notification.')
hwClockSourceInputBelowThresholdResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 25)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeInputThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceSsm"))
if mibBuilder.loadTexts: hwClockSourceInputBelowThresholdResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceInputBelowThresholdResume.setDescription('The SSM of input above or equal threshold notification.')
hwClockSsmPktLos = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 26)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"))
if mibBuilder.loadTexts: hwClockSsmPktLos.setStatus('current')
if mibBuilder.loadTexts: hwClockSsmPktLos.setDescription('The ssm packet of clock source is lost.')
hwClockSsmPktLosResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 27)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"))
if mibBuilder.loadTexts: hwClockSsmPktLosResume.setStatus('current')
if mibBuilder.loadTexts: hwClockSsmPktLosResume.setDescription('The ssm packet of clock source is normal.')
hwClockCesDcrMasterPwChange = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 28)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesDcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrOldMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrNewMasterPwName"))
if mibBuilder.loadTexts: hwClockCesDcrMasterPwChange.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrMasterPwChange.setDescription('CES DCR master PW status change.')
hwClockCesDcrLockFail = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 29)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesDcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockState"))
if mibBuilder.loadTexts: hwClockCesDcrLockFail.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrLockFail.setDescription('CES DCR clock source lock fail.')
hwClockCesDcrLockFailResume = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 8, 30)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCesDcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockState"))
if mibBuilder.loadTexts: hwClockCesDcrLockFailResume.setStatus('current')
if mibBuilder.loadTexts: hwClockCesDcrLockFailResume.setDescription('CES DCR clock source lock fail resume.')
hwClockAttributeTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9), )
if mibBuilder.loadTexts: hwClockAttributeTable.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeTable.setDescription('The clock Attribute table.')
hwClockAttributeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockAttributeChassisIndex"))
if mibBuilder.loadTexts: hwClockAttributeEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeEntry.setDescription('The entry of clock Attribute table.')
hwClockAttributeChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockAttributeChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeChassisIndex.setDescription('The chassis index.')
hwClockAttributeSysClkRunMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("normal", 0), ("freeRun", 1), ("hold", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeSysClkRunMode.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeSysClkRunMode.setDescription('The run mode of system clock.')
hwClockAttributeSsmControl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("on", 0), ("off", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeSsmControl.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeSsmControl.setDescription('The flag whether SSM is concerned with the clock source selection.')
hwClockAttributeFreqCheckEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 4), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeFreqCheckEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeFreqCheckEnable.setDescription('The enable flag of frequency check.')
hwClockAttributeRetrieveMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("retrieve", 0), ("noRetrieve", 1))).clone('retrieve')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeRetrieveMode.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeRetrieveMode.setDescription('The retrieve mode of system clock.')
hwClockAttributeWtrTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 12)).clone(5)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeWtrTime.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeWtrTime.setDescription('The time waiting for retrieve.')
hwClockAttributeHoldOffTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(300, 1800)).clone(1000)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeHoldOffTime.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeHoldOffTime.setDescription('The holdoff-time when the system source is lost.')
hwClockAttributeOutThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeOutThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeOutThreshold.setDescription('The Threshold of out put.')
hwClockAttributeSysMaxOutSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeSysMaxOutSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeSysMaxOutSsm.setDescription('The max ssm of system out put.')
hwClockAttribute2M1MaxOutSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttribute2M1MaxOutSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockAttribute2M1MaxOutSsm.setDescription('The max ssm of 2msync-1 out put.')
hwClockAttribute2M2MaxOutSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttribute2M2MaxOutSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockAttribute2M2MaxOutSsm.setDescription('The max ssm of 2msync-2 out put.')
hwClockAttributeSysClkLockMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 16, 19))).clone(namedValues=NamedValues(("freeRun", 0), ("fastLock", 1), ("lock", 2), ("hold", 3), ("freeRunJudge", 16), ("holdJudge", 19)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockAttributeSysClkLockMode.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeSysClkLockMode.setDescription('The Lock mode of system clock.')
hwClockAttributeExtendSsmControl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("on", 0), ("off", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeExtendSsmControl.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeExtendSsmControl.setDescription('The flag whether Extend SSM is concerned with the clock source selection.')
hwClockAttributeInternalClockId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeInternalClockId.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeInternalClockId.setDescription('The internal clockid of the device.')
hwClockAttributeTodProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("nmea", 1), ("ubx", 2), ("none", 3), ("ccsa", 4))).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeTodProtocol.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeTodProtocol.setDescription('1pps bits tod protocol.')
hwClockAttributeLtiSquelch = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 16), EnabledStatus().clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeLtiSquelch.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeLtiSquelch.setDescription('The frequency signal output squelch flag upon the frequency loss.')
hwClockAttributeInputThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 9, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 4, 8, 11, 15))).clone(namedValues=NamedValues(("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15))).clone(15)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockAttributeInputThreshold.setStatus('current')
if mibBuilder.loadTexts: hwClockAttributeInputThreshold.setDescription('The squelch threshold of the external input source.')
hwClockSrcSelTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10), )
if mibBuilder.loadTexts: hwClockSrcSelTable.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelTable.setDescription('The system clock source selection table.')
hwClockSrcSelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockSrcSelChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockSrcSelType"))
if mibBuilder.loadTexts: hwClockSrcSelEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelEntry.setDescription('The entry of system clock source selection table.')
hwClockSrcSelChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockSrcSelChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelChassisIndex.setDescription('The chassis index.')
hwClockSrcSelType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("system", 1), ("sync2M1", 2), ("sync2M2", 3))))
if mibBuilder.loadTexts: hwClockSrcSelType.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelType.setDescription('The PLL Id.')
hwClockSrcSelMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("auto", 0), ("manual", 1), ("force", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSrcSelMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelMode.setDescription('The mode of clock source selection.')
hwClockSrcSelSrcName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 4), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSrcSelSrcName.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcSelSrcName.setDescription('The name of clock source for selection.')
hwClockSrcTraceSrcName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 10, 1, 5), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcTraceSrcName.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcTraceSrcName.setDescription('The name of trace source.')
hwClockSrcCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11), )
if mibBuilder.loadTexts: hwClockSrcCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgTable.setDescription('The clock source config table.')
hwClockSrcCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockSrcCfgChassisIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceTypeIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockSrcCfgSourceIndex"))
if mibBuilder.loadTexts: hwClockSrcCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgEntry.setDescription('The entry of clock source config table.')
hwClockSrcCfgChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 1), PhysicalIndex())
if mibBuilder.loadTexts: hwClockSrcCfgChassisIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgChassisIndex.setDescription('The chassis index.')
hwClockSrcCfgSourceTypeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("bits", 1), ("ptp", 2), ("interface", 3)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwClockSrcCfgSourceTypeIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceTypeIndex.setDescription('The type of clock source.')
hwClockSrcCfgSourceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 3), Integer32())
if mibBuilder.loadTexts: hwClockSrcCfgSourceIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceIndex.setDescription('The index of clock source.')
hwClockSrcCfgSourceDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgSourceDescr.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceDescr.setDescription('The description of clock source.')
hwClockSrcCfgClkEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 5), EnabledStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgClkEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgClkEnable.setDescription('The enable flag of clock source.')
hwClockSrcCfgSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgSystemPriority.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSystemPriority.setDescription('The priority of system clock source.')
hwClockSrcCfg2M1Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfg2M1Priority.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfg2M1Priority.setDescription('The priority of 2msync-1 clock source.')
hwClockSrcCfg2M2Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfg2M2Priority.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfg2M2Priority.setDescription('The priority of 2msync-2 clock source.')
hwClockSrcCfgSourceSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15, 16))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15), ("unknown", 16)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgSourceSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceSsm.setDescription('The SSM quality of clock source.')
hwClockSrcCfgSsmSetMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("manual", 1), ("auto", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgSsmSetMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSsmSetMode.setDescription('The set mode of SSM.')
hwClockSrcCfgSourceState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("initial", 0), ("normal", 1), ("abnormal", 2), ("waitwtr", 3), ("holdoff", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgSourceState.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSourceState.setDescription('The state of clock source.')
hwClockSrcCfgFreqCheckResult = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("abnormal", 0), ("normal", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgFreqCheckResult.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgFreqCheckResult.setDescription('The result of frequency check.')
hwClockSrcCfgSsmInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(512, 8000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgSsmInterval.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSsmInterval.setDescription('Description.')
hwClockSrcCfgSsmTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2000, 32000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgSsmTimeout.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSsmTimeout.setDescription('Description.')
hwClockSrcCfgSabit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8, 99))).clone(namedValues=NamedValues(("sa4", 4), ("sa5", 5), ("sa6", 6), ("sa7", 7), ("sa8", 8), ("invalid", 99))).clone(4)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSrcCfgSabit.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgSabit.setDescription('The SA bit of E1 Port SSM information.')
hwClockSrcCfgClockId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwClockSrcCfgClockId.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgClockId.setDescription('The clockid of clock source.')
hwClockSrcCfgClockIdSetMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("manual", 1), ("auto", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgClockIdSetMode.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgClockIdSetMode.setDescription('The set mode of clockid.')
hwClockSrcCfgOutSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15, 16, 99))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15), ("unknown", 16), ("invalid", 99)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgOutSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgOutSsm.setDescription('Current output ssm.')
hwClockSrcCfgOutClockId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 99))).clone(namedValues=NamedValues(("clockid0", 0), ("clockid1", 1), ("clockid2", 2), ("clockid3", 3), ("clockid4", 4), ("clockid5", 5), ("clockid6", 6), ("clockid7", 7), ("clockid8", 8), ("clockid9", 9), ("clockid10", 10), ("clockid11", 11), ("clockid12", 12), ("clockid13", 13), ("clockid14", 14), ("clockid15", 15), ("notsupport", 99)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgOutClockId.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgOutClockId.setDescription('Current output clockid.')
hwClockSrcCfgRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 20), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgRowStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgRowStatus.setDescription('The row status.')
hwClockSrcCfgFreqDeviation = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 21), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgFreqDeviation.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgFreqDeviation.setDescription('Freqdeviation value of clock source.')
hwClockSrcCfgPhyState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("cardTypeNotSupport", 0), ("slave", 1), ("master", 2), ("speedNotSupport", 3), ("portDown", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockSrcCfgPhyState.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgPhyState.setDescription('The PHY clock state of ports.')
hwClockSrcCfgNegotiationSlave = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 11, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("notSupport", 0), ("enable", 1), ("disable", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockSrcCfgNegotiationSlave.setStatus('current')
if mibBuilder.loadTexts: hwClockSrcCfgNegotiationSlave.setDescription('Set PHY clock state to slave.')
hwClockCesAcrPortCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12), )
if mibBuilder.loadTexts: hwClockCesAcrPortCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPortCfgTable.setDescription('The CES ACR clock port config table.')
hwClockCesAcrPortCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrParentIfIndex"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrChannelId"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrIfIndex"))
if mibBuilder.loadTexts: hwClockCesAcrPortCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPortCfgEntry.setDescription('The entry of CES ACR clock port config table.')
hwClockCesAcrParentIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwClockCesAcrParentIfIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrParentIfIndex.setDescription('Indicates the index of the parent interface.')
hwClockCesAcrChannelId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 2), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrChannelId.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrChannelId.setDescription('Indicates the channel ID.')
hwClockCesAcrIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 3), InterfaceIndex())
if mibBuilder.loadTexts: hwClockCesAcrIfIndex.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrIfIndex.setDescription('Indicates the interface index.')
hwClockCesAcrPortName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrPortName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPortName.setDescription('Port name.')
hwClockCesAcrChannelType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("t1", 1), ("e1", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrChannelType.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrChannelType.setDescription('Indicates the interface type. The type can be E1/CE1 or T1/CT1.')
hwClockCesAcrSourceMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("master", 1), ("slave", 2), ("recoveryDomain", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrSourceMode.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrSourceMode.setDescription('Indicates the clock mode of the interface. master: indicates that the clock works in master mode and uses the internal clock signal. slave: indicates that the clock works in slave mode and uses the line clock signal. recovery-domain: indicates that the clock works in slave mode and uses the recovery domain clock signal. ')
hwClockCesAcrRecoveryDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrRecoveryDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrRecoveryDomain.setDescription('Indicates the clock recovery domain of the interface. DEFVAL is 0.')
hwClockCesAcrPwDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 8))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrPwDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPwDomain.setDescription('Indicates the clock PW domain of the interface. DEFVAL is 0.')
hwClockCesAcrPortCfgRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrPortCfgRowStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrPortCfgRowStatus.setDescription('The row status.')
hwClockCesAcrMasterDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrMasterDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrMasterDomain.setDescription('Indicates the clock master domain of the interface. DEFVAL is 0.')
hwClockCesMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 12, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("acr", 1), ("dcr", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesMode.setStatus('current')
if mibBuilder.loadTexts: hwClockCesMode.setDescription('Indicates the clock CES recovery mode of the interface. DEFVAL is 0.')
hwClockCesAcrCfgTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13), )
if mibBuilder.loadTexts: hwClockCesAcrCfgTable.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgTable.setDescription('The CES ACR clock source config table.')
hwClockCesAcrCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrCfgSlot"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrCfgCard"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrCfgDomain"))
if mibBuilder.loadTexts: hwClockCesAcrCfgEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgEntry.setDescription('The entry of CES ACR clock source config table.')
hwClockCesAcrCfgSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 1), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrCfgSlot.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSlot.setDescription('The slot ID of CES ACR clock source.')
hwClockCesAcrCfgCard = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 2), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrCfgCard.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgCard.setDescription('The card ID of CES ACR clock source.')
hwClockCesAcrCfgDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8)))
if mibBuilder.loadTexts: hwClockCesAcrCfgDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgDomain.setDescription('The recovery domain value of CES ACR clock source.')
hwClockCesAcrCfgDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrCfgDescr.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgDescr.setDescription('The description of clock source.')
hwClockCesAcrCfgSyncEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 5), EnabledStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgSyncEnable.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSyncEnable.setDescription('The enable flag of CES ACR clock source.')
hwClockCesAcrCfgSystemPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgSystemPriority.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSystemPriority.setDescription('The priority of system CES ACR clock source. DEFVAL is 0.')
hwClockCesAcrCfgSsm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 4, 8, 11, 15, 16))).clone(namedValues=NamedValues(("unk", 0), ("prc", 2), ("ssua", 4), ("ssub", 8), ("sec", 11), ("dnu", 15), ("unknown", 16)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgSsm.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSsm.setDescription('The SSM quality of CES ACR clock source.')
hwClockCesAcrCfgClockId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgClockId.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgClockId.setDescription('The clockid of clock source. DEFVAL is 0.')
hwClockCesAcrCfgSourceState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("initial", 0), ("normal", 1), ("abnormal", 2), ("waitwtr", 3), ("holdoff", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrCfgSourceState.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgSourceState.setDescription('The state of CES ACR clock source.')
hwClockCesAcrCfgFreqCheckResult = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("abnormal", 0), ("normal", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrCfgFreqCheckResult.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgFreqCheckResult.setDescription('The result of CES ACR clock source frequency check.')
hwClockCesAcrCfgRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 13, 1, 11), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwClockCesAcrCfgRowStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrCfgRowStatus.setDescription('The row status.')
hwClockCesAcrDomainInfoTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14), )
if mibBuilder.loadTexts: hwClockCesAcrDomainInfoTable.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomainInfoTable.setDescription('The CES ACR domain infomation table.')
hwClockCesAcrDomainInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrDomianInfoSlot"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrDomianInfoCard"), (0, "HUAWEI-CLOCK-MIB", "hwClockCesAcrDomianInfoDomain"))
if mibBuilder.loadTexts: hwClockCesAcrDomainInfoEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomainInfoEntry.setDescription('The entry of CES ACR domain infomation table.')
hwClockCesAcrDomianInfoSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 1), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoSlot.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoSlot.setDescription('The slot ID of CES ACR clock source.')
hwClockCesAcrDomianInfoCard = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 2), Integer32())
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoCard.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoCard.setDescription('The card ID of CES ACR clock source.')
hwClockCesAcrDomianInfoDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)))
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoDomain.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoDomain.setDescription('The recovery domain value of CES ACR clock source.')
hwClockCesAcrDomianInfoMasterPwName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoMasterPwName.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoMasterPwName.setDescription('Port name.')
hwClockCesAcrDomianInfoChannelId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoChannelId.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoChannelId.setDescription('Indicates the channel ID.')
hwClockCesAcrDomianInfoState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 14, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("wait", 2), ("lock", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoState.setStatus('current')
if mibBuilder.loadTexts: hwClockCesAcrDomianInfoState.setDescription('The state of CES ACR clock source.')
hwClockClusterTopoTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15), )
if mibBuilder.loadTexts: hwClockClusterTopoTable.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoTable.setDescription('The CES ACR domain infomation table.')
hwClockClusterTopoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1), ).setIndexNames((0, "HUAWEI-CLOCK-MIB", "hwClockClusterSyncType"), (0, "HUAWEI-CLOCK-MIB", "hwClockClusterTopoType"), (0, "HUAWEI-CLOCK-MIB", "hwClockClusterTopoLinkType"))
if mibBuilder.loadTexts: hwClockClusterTopoEntry.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoEntry.setDescription('Description.')
hwClockClusterSyncType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("frequency", 1), ("time", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockClusterSyncType.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterSyncType.setDescription('The type of clock inter-chassis sync.')
hwClockClusterTopoType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("interlink", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockClusterTopoType.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoType.setDescription('The type of clock inter-chassis topo..')
hwClockClusterTopoLinkType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("bits", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockClusterTopoLinkType.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoLinkType.setDescription('The type of clock inter-chassis link.')
hwClockClusterTopoStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 1, 15, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("fail", 1), ("success", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwClockClusterTopoStatus.setStatus('current')
if mibBuilder.loadTexts: hwClockClusterTopoStatus.setDescription('The status of clock inter-chassis topo.')
hwClockConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10))
hwClockSourceCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 1))
hwClockSourceCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 1, 1)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockManageSysGroup"), ("HUAWEI-CLOCK-MIB", "hwClockSourceCfgGroup"), ("HUAWEI-CLOCK-MIB", "hwClockPortCfgGroup"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgGroup"), ("HUAWEI-CLOCK-MIB", "hwClockNotificationsGroup"), ("HUAWEI-CLOCK-MIB", "hwClockSysSelGroup"), ("HUAWEI-CLOCK-MIB", "hwClockTrapOidGroup"), ("HUAWEI-CLOCK-MIB", "hwClockLineCfgGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockSourceCompliance = hwClockSourceCompliance.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceCompliance.setDescription('The compliance of clock MIB.')
hwClockSourceGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2))
hwClockManageSysGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 8)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockSourceSysClkWorkMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheckEnable"), ("HUAWEI-CLOCK-MIB", "hwClockSourceHoldMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceSsmControl"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheckRightRange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheckLeftRange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceRetrieveMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceForceCloseEnableStatus"), ("HUAWEI-CLOCK-MIB", "hwClockSourceSsmUnknown"), ("HUAWEI-CLOCK-MIB", "hwClockExtTimeOutputType"), ("HUAWEI-CLOCK-MIB", "hwClockExtTimeInputType"), ("HUAWEI-CLOCK-MIB", "hwClockTimeUsedSource"), ("HUAWEI-CLOCK-MIB", "hwClockSourceEthClkEnable"), ("HUAWEI-CLOCK-MIB", "hwClockAlarmThresholdFrequencyOffset"), ("HUAWEI-CLOCK-MIB", "hwClockFrequencyOffsetMax"), ("HUAWEI-CLOCK-MIB", "hwClockFrequencyOffsetMin"), ("HUAWEI-CLOCK-MIB", "hwClockFrequencyOffsetMean"), ("HUAWEI-CLOCK-MIB", "hwClockFrequencyOffset"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockManageSysGroup = hwClockManageSysGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockManageSysGroup.setDescription('The manage group.')
hwClockSysSelGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 9)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockSourceSelMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceSelSourceId"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockLastSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockPllId"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOldLockMode"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrOldMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrNewMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockAttributeOutValue"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockState"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrSlot"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrCard"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrDomain"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrOldMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrNewMasterPwName"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockSysSelGroup = hwClockSysSelGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockSysSelGroup.setDescription('The system selection group.')
hwClockSourceCfgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 10)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockCfgSourceId"), ("HUAWEI-CLOCK-MIB", "hwClockCfgPriRvtEnableStatus"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSwitchCondition"), ("HUAWEI-CLOCK-MIB", "hwClockCfgWtrTime"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBadDetect"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceSsm"), ("HUAWEI-CLOCK-MIB", "hwClockCfgExportEnableStatus"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSwiEnableStatus"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceState"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceDescr"), ("HUAWEI-CLOCK-MIB", "hwClockCfgFreqCheckResult"), ("HUAWEI-CLOCK-MIB", "hwClockCfgHoldOffTime"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBits0Priority"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBits1Priority"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSystemPriority"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceSsmSetMode"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSourceS1Id"), ("HUAWEI-CLOCK-MIB", "hwClockCfgClkSourceType"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSsmThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockCfgSystemLockOut"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBits0LockOut"), ("HUAWEI-CLOCK-MIB", "hwClockCfgBits1LockOut"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgTodSignal"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockSourceCfgGroup = hwClockSourceCfgGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockSourceCfgGroup.setDescription('The clock source group.')
hwClockPortCfgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 13)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockPortCfgLeftFramePri"), ("HUAWEI-CLOCK-MIB", "hwClockPortCfgRightFramePri"), ("HUAWEI-CLOCK-MIB", "hwClockPortCfgForceOutS1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockPortCfgGroup = hwClockPortCfgGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockPortCfgGroup.setDescription('The port config of clock source group.')
hwClockBitsCfgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 14)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockBitsCfgRecvSaBit"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgSendSaBit"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgForceOutS1"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgName"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgBitsType"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgDirection"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgSaBit"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgInputMode"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgOutputMode"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgSourceId"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgInvalidCond"), ("HUAWEI-CLOCK-MIB", "hwClockBitsCfgBitsPortType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockBitsCfgGroup = hwClockBitsCfgGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockBitsCfgGroup.setDescription('The BITS clock source group.')
hwClockTrapOidGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 15)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockLastSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockCurSourceName"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOldLockMode"), ("HUAWEI-CLOCK-MIB", "hwClockChassisId"), ("HUAWEI-CLOCK-MIB", "hwClockOldSourceState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockTrapOidGroup = hwClockTrapOidGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockTrapOidGroup.setDescription('The clock trap group.')
hwClockNotificationsGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 16)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockSourceSwitch"), ("HUAWEI-CLOCK-MIB", "hwClockSourceStateChange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceStateResume"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheck"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFreqCheckResume"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOutputBelowThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockSourceOutputBelowThresholdResume"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockFail"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrLockFailResume"), ("HUAWEI-CLOCK-MIB", "hwClockCesAcrMasterPwChange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceValid"), ("HUAWEI-CLOCK-MIB", "hwClockInLockedMode"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoFailResume"), ("HUAWEI-CLOCK-MIB", "hwClockClusterTopoFail"), ("HUAWEI-CLOCK-MIB", "hwClockNotInLockedMode"), ("HUAWEI-CLOCK-MIB", "hwClockSourceSysClkLockModeChange"), ("HUAWEI-CLOCK-MIB", "hwClockSourceFailed"), ("HUAWEI-CLOCK-MIB", "hwClockSourceInputBelowThreshold"), ("HUAWEI-CLOCK-MIB", "hwClockSourceInputBelowThresholdResume"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrMasterPwChange"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockFail"), ("HUAWEI-CLOCK-MIB", "hwClockCesDcrLockFailResume"), ("HUAWEI-CLOCK-MIB", "hwClockSsmPktLos"), ("HUAWEI-CLOCK-MIB", "hwClockSsmPktLosResume"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockNotificationsGroup = hwClockNotificationsGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockNotificationsGroup.setDescription('This is the group of clock notification.')
hwClockLineCfgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 186, 10, 2, 17)).setObjects(("HUAWEI-CLOCK-MIB", "hwClockLineClkCfgRecvS1"), ("HUAWEI-CLOCK-MIB", "hwClockLineClkCfgSendS1"), ("HUAWEI-CLOCK-MIB", "hwClockLineClkCfgCardId"), ("HUAWEI-CLOCK-MIB", "hwClockLineClkCfgPortId"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwClockLineCfgGroup = hwClockLineCfgGroup.setStatus('current')
if mibBuilder.loadTexts: hwClockLineCfgGroup.setDescription('The line clock group..')
mibBuilder.exportSymbols("HUAWEI-CLOCK-MIB", PYSNMP_MODULE_ID=hwClockMIB, hwClockCfgSourceS1Id=hwClockCfgSourceS1Id, hwClockBitsCfgFrameFormat=hwClockBitsCfgFrameFormat, hwClockCfgSourceId=hwClockCfgSourceId, hwClockAttributeSsmControl=hwClockAttributeSsmControl, hwClockCesAcrDomianInfoDomain=hwClockCesAcrDomianInfoDomain, hwClockSrcCfgNegotiationSlave=hwClockSrcCfgNegotiationSlave, hwClockCurSourceName=hwClockCurSourceName, hwClockSourceInputBelowThresholdResume=hwClockSourceInputBelowThresholdResume, hwClockSrcCfgFreqCheckResult=hwClockSrcCfgFreqCheckResult, hwClockBitsCfgTodSignal=hwClockBitsCfgTodSignal, hwClockSrcCfgSabit=hwClockSrcCfgSabit, hwClockSrcSelSrcName=hwClockSrcSelSrcName, hwClockCesAcrCfgCard=hwClockCesAcrCfgCard, hwClockCesAcrCfgSystemPriority=hwClockCesAcrCfgSystemPriority, hwClockAttributeSysClkRunMode=hwClockAttributeSysClkRunMode, hwClockCesAcrParentIfIndex=hwClockCesAcrParentIfIndex, hwClockCesAcrPortCfgTable=hwClockCesAcrPortCfgTable, hwClockSourceEthClkEnable=hwClockSourceEthClkEnable, hwClockLineClkCfgSlotIndex=hwClockLineClkCfgSlotIndex, hwClockInLockedMode=hwClockInLockedMode, hwClockCesAcrMasterDomain=hwClockCesAcrMasterDomain, hwClockCesAcrCfgSyncEnable=hwClockCesAcrCfgSyncEnable, hwClockPortCfgLeftFramePri=hwClockPortCfgLeftFramePri, hwClockCfgBadDetect=hwClockCfgBadDetect, hwClockSrcCfgSourceTypeIndex=hwClockSrcCfgSourceTypeIndex, hwClockSrcCfgSystemPriority=hwClockSrcCfgSystemPriority, hwClockCesAcrCfgRowStatus=hwClockCesAcrCfgRowStatus, hwClockCfgSourceIndex=hwClockCfgSourceIndex, hwClockSrcCfgClockId=hwClockSrcCfgClockId, hwClockSourceSwitch=hwClockSourceSwitch, hwClockLineClkCfgTable=hwClockLineClkCfgTable, hwClockSrcCfg2M2Priority=hwClockSrcCfg2M2Priority, hwClockSourceValid=hwClockSourceValid, hwClockCesMode=hwClockCesMode, hwClockCfgClkSourceType=hwClockCfgClkSourceType, hwClockBitsCfgDirection=hwClockBitsCfgDirection, hwClockBitsCfgInvalidCond=hwClockBitsCfgInvalidCond, hwClockCfgSwitchCondition=hwClockCfgSwitchCondition, hwClockCesAcrCfgDescr=hwClockCesAcrCfgDescr, hwClockAttribute2M1MaxOutSsm=hwClockAttribute2M1MaxOutSsm, hwClockCesAcrDomianInfoMasterPwName=hwClockCesAcrDomianInfoMasterPwName, hwClockAlarmThresholdFrequencyOffset=hwClockAlarmThresholdFrequencyOffset, hwClockCesAcrCfgSlot=hwClockCesAcrCfgSlot, hwClockChassisId=hwClockChassisId, hwClockGlobalObjects=hwClockGlobalObjects, hwClockBitsCfgSendSaBit=hwClockBitsCfgSendSaBit, hwClockSourceFreqCheckLeftRange=hwClockSourceFreqCheckLeftRange, hwClockSrcCfgFreqDeviation=hwClockSrcCfgFreqDeviation, hwClockSourceCompliances=hwClockSourceCompliances, hwClockClusterTopoType=hwClockClusterTopoType, hwClockSrcCfgSourceSsm=hwClockSrcCfgSourceSsm, hwClockCesAcrDomianInfoSlot=hwClockCesAcrDomianInfoSlot, hwClockSourceCfgGroup=hwClockSourceCfgGroup, hwClockCesDcrOldMasterPwName=hwClockCesDcrOldMasterPwName, hwClockOldSourceState=hwClockOldSourceState, hwClockSourceCompliance=hwClockSourceCompliance, hwClockMIB=hwClockMIB, hwClockLineClkCfgRecvS1=hwClockLineClkCfgRecvS1, hwClockPortCfgIfIndex=hwClockPortCfgIfIndex, hwClockCfgSourceDescr=hwClockCfgSourceDescr, hwClockExtTimeInputType=hwClockExtTimeInputType, hwClockCfgSwiEnableStatus=hwClockCfgSwiEnableStatus, hwClockLineCfgGroup=hwClockLineCfgGroup, hwClockManageObjects=hwClockManageObjects, hwClockBitsCfgSaBit=hwClockBitsCfgSaBit, hwClockSourceFreqCheckRightRange=hwClockSourceFreqCheckRightRange, hwClockSrcSelMode=hwClockSrcSelMode, hwClockClusterTopoTable=hwClockClusterTopoTable, hwClockFrequencyOffset=hwClockFrequencyOffset, hwClockManageSysGroup=hwClockManageSysGroup, hwClockSourceFreqCheckEnable=hwClockSourceFreqCheckEnable, hwClockAttribute2M2MaxOutSsm=hwClockAttribute2M2MaxOutSsm, hwClockCesAcrCfgFreqCheckResult=hwClockCesAcrCfgFreqCheckResult, hwClockCesAcrDomainInfoTable=hwClockCesAcrDomainInfoTable, hwClockCesAcrDomianInfoChannelId=hwClockCesAcrDomianInfoChannelId, hwClockSrcCfgClockIdSetMode=hwClockSrcCfgClockIdSetMode, hwClockSourceSelType=hwClockSourceSelType, hwClockCfgBits0Priority=hwClockCfgBits0Priority, hwClockSrcCfgSsmSetMode=hwClockSrcCfgSsmSetMode, hwClockClusterTopoFail=hwClockClusterTopoFail, hwClockPllId=hwClockPllId, hwClockSrcCfg2M1Priority=hwClockSrcCfg2M1Priority, hwClockSourceHoldMode=hwClockSourceHoldMode, hwClockSrcSelTable=hwClockSrcSelTable, hwClockLineClkCfgCardId=hwClockLineClkCfgCardId, hwClockSsmPktLosResume=hwClockSsmPktLosResume, hwClockSourceSelChassisIndex=hwClockSourceSelChassisIndex, hwClockAttributeExtendSsmControl=hwClockAttributeExtendSsmControl, hwClockSourceOldLockMode=hwClockSourceOldLockMode, hwClockPortCfgRightFramePri=hwClockPortCfgRightFramePri, hwClockCesAcrChannelId=hwClockCesAcrChannelId, hwClockCesAcrCfgSsm=hwClockCesAcrCfgSsm, hwClockSourceSelMode=hwClockSourceSelMode, hwClockSrcCfgSourceDescr=hwClockSrcCfgSourceDescr, hwClockTrapOid=hwClockTrapOid, hwClockAttributeEntry=hwClockAttributeEntry, hwClockCesAcrRecoveryDomain=hwClockCesAcrRecoveryDomain, hwClockCesAcrSlot=hwClockCesAcrSlot, hwClockFrequencyOffsetMax=hwClockFrequencyOffsetMax, hwClockSrcCfgRowStatus=hwClockSrcCfgRowStatus, hwClockCfgSourceState=hwClockCfgSourceState, hwClockBitsCfgOutputMode=hwClockBitsCfgOutputMode, hwClockBitsCfgBitsIndex=hwClockBitsCfgBitsIndex, hwClockFrequencyOffsetMin=hwClockFrequencyOffsetMin, hwClockCfgChassisIndex=hwClockCfgChassisIndex, hwClockLastSourceName=hwClockLastSourceName, hwClockCesAcrNewMasterPwName=hwClockCesAcrNewMasterPwName, hwClockAttributeHoldOffTime=hwClockAttributeHoldOffTime, hwClockClusterTopoLinkType=hwClockClusterTopoLinkType, hwClockCesAcrPortName=hwClockCesAcrPortName, hwClockPortCfgForceOutS1=hwClockPortCfgForceOutS1, hwClockSourceInputBelowThreshold=hwClockSourceInputBelowThreshold, hwClockSrcCfgTable=hwClockSrcCfgTable, hwClockCesAcrChannelType=hwClockCesAcrChannelType, hwClockBitsCfgSourceId=hwClockBitsCfgSourceId, hwClockSourceSelSourceId=hwClockSourceSelSourceId, hwClockAttributeLtiSquelch=hwClockAttributeLtiSquelch, hwClockSourceSysClkWorkMode=hwClockSourceSysClkWorkMode, hwClockCesDcrSlot=hwClockCesDcrSlot, hwClockCfgBits1LockOut=hwClockCfgBits1LockOut, hwClockSrcCfgClkEnable=hwClockSrcCfgClkEnable, hwClockConformance=hwClockConformance, hwClockSysSelGroup=hwClockSysSelGroup, hwClockNotifications=hwClockNotifications, hwClockSourceSelEntry=hwClockSourceSelEntry, hwClockCesAcrDomain=hwClockCesAcrDomain, hwClockCesDcrMasterPwChange=hwClockCesDcrMasterPwChange, hwClockCesAcrCard=hwClockCesAcrCard, hwClockSrcCfgPhyState=hwClockSrcCfgPhyState, hwClockSourceCfgTable=hwClockSourceCfgTable, hwClockNotInLockedMode=hwClockNotInLockedMode, hwClockSourceSsmUnknown=hwClockSourceSsmUnknown, hwClockBitsCfgChassisIndex=hwClockBitsCfgChassisIndex, hwClockCesDcrLockFail=hwClockCesDcrLockFail, hwClockCesAcrPortCfgEntry=hwClockCesAcrPortCfgEntry, hwClockPortCfgTable=hwClockPortCfgTable, hwClockSourceSsmControl=hwClockSourceSsmControl, hwClockCesDcrCard=hwClockCesDcrCard, hwClockSrcTraceSrcName=hwClockSrcTraceSrcName, hwClockSrcCfgSourceState=hwClockSrcCfgSourceState, hwClockBitsCfgForceOutS1=hwClockBitsCfgForceOutS1, hwClockCfgSourceSsm=hwClockCfgSourceSsm, hwClockBitsCfgBitsPortType=hwClockBitsCfgBitsPortType, hwClockLineClkCfgPortId=hwClockLineClkCfgPortId, hwClockCesAcrLockFail=hwClockCesAcrLockFail, hwClockSrcSelChassisIndex=hwClockSrcSelChassisIndex, hwClockAttributeWtrTime=hwClockAttributeWtrTime, hwClockAttributeFreqCheckEnable=hwClockAttributeFreqCheckEnable, hwClockCfgPriRvtEnableStatus=hwClockCfgPriRvtEnableStatus, hwClockLineClkCfgSendS1=hwClockLineClkCfgSendS1, hwClockSourceStateResume=hwClockSourceStateResume, hwClockSrcCfgChassisIndex=hwClockSrcCfgChassisIndex, hwClockCesAcrLockFailResume=hwClockCesAcrLockFailResume, hwClockCesAcrDomianInfoState=hwClockCesAcrDomianInfoState, hwClockExtTimeOutputType=hwClockExtTimeOutputType, hwClockSourceOutputBelowThreshold=hwClockSourceOutputBelowThreshold, hwClockCesAcrMasterPwChange=hwClockCesAcrMasterPwChange, hwClockAttributeInputThreshold=hwClockAttributeInputThreshold, hwClockCesAcrCfgSourceState=hwClockCesAcrCfgSourceState, hwClockSrcCfgEntry=hwClockSrcCfgEntry, hwClockCfgHoldOffTime=hwClockCfgHoldOffTime, hwClockSourceCfgEntry=hwClockSourceCfgEntry, hwClockPortCfgEntry=hwClockPortCfgEntry, hwClockAttributeRetrieveMode=hwClockAttributeRetrieveMode, hwClockCfgSsmThreshold=hwClockCfgSsmThreshold, hwClockSourceFreqCheck=hwClockSourceFreqCheck, hwClockSourceFailed=hwClockSourceFailed, hwClockClusterSyncType=hwClockClusterSyncType, hwClockCesAcrDomianInfoCard=hwClockCesAcrDomianInfoCard, hwClockCfgSystemLockOut=hwClockCfgSystemLockOut, hwClockCesAcrLockState=hwClockCesAcrLockState, hwClockCesAcrCfgClockId=hwClockCesAcrCfgClockId, hwClockLineClkCfgEntry=hwClockLineClkCfgEntry, hwClockSrcSelEntry=hwClockSrcSelEntry, hwClockAttributeSysMaxOutSsm=hwClockAttributeSysMaxOutSsm, hwClockCesAcrPortCfgRowStatus=hwClockCesAcrPortCfgRowStatus, hwClockSourceSysClkLockModeChange=hwClockSourceSysClkLockModeChange, hwClockTrapOidGroup=hwClockTrapOidGroup, hwClockSsmPktLos=hwClockSsmPktLos, hwClockAttributeTable=hwClockAttributeTable, hwClockSourceOutputBelowThresholdResume=hwClockSourceOutputBelowThresholdResume, hwClockSrcCfgOutClockId=hwClockSrcCfgOutClockId, hwClockLineClkCfgChassisIndex=hwClockLineClkCfgChassisIndex, hwClockSrcCfgSsmTimeout=hwClockSrcCfgSsmTimeout, hwClockCesAcrCfgDomain=hwClockCesAcrCfgDomain, hwClockBitsCfgGroup=hwClockBitsCfgGroup, hwClockCfgSourceSsmSetMode=hwClockCfgSourceSsmSetMode, hwClockCfgBits1Priority=hwClockCfgBits1Priority, hwClockBitsCfgRecvSaBit=hwClockBitsCfgRecvSaBit, hwClockSourceStateChange=hwClockSourceStateChange, hwClockAttributeOutThreshold=hwClockAttributeOutThreshold, hwClockClusterTopoStatus=hwClockClusterTopoStatus, hwClockLineCfgSoureId=hwClockLineCfgSoureId, hwClockAttributeOutValue=hwClockAttributeOutValue, hwClockAttributeSysClkLockMode=hwClockAttributeSysClkLockMode, hwClockCesAcrOldMasterPwName=hwClockCesAcrOldMasterPwName, hwClockCesDcrLockState=hwClockCesDcrLockState, hwClockCfgSystemPriority=hwClockCfgSystemPriority, hwClockClusterTopoEntry=hwClockClusterTopoEntry, hwClockCesAcrCfgTable=hwClockCesAcrCfgTable, hwClockClusterTopoFailResume=hwClockClusterTopoFailResume, hwClockCfgFreqCheckResult=hwClockCfgFreqCheckResult, hwClockSrcSelType=hwClockSrcSelType, hwClockBitsCfgInputMode=hwClockBitsCfgInputMode, hwClockAttributeInternalClockId=hwClockAttributeInternalClockId, hwClockSrcCfgOutSsm=hwClockSrcCfgOutSsm, hwClockAttributeChassisIndex=hwClockAttributeChassisIndex, hwClockNotificationsGroup=hwClockNotificationsGroup, hwClockSrcCfgSsmInterval=hwClockSrcCfgSsmInterval, hwClockCesAcrIfIndex=hwClockCesAcrIfIndex, hwClockSourceForceCloseEnableStatus=hwClockSourceForceCloseEnableStatus, hwClockSourceFreqCheckResume=hwClockSourceFreqCheckResume, hwClockSourceGroups=hwClockSourceGroups, hwClockCfgBits0LockOut=hwClockCfgBits0LockOut, hwClockCesDcrDomain=hwClockCesDcrDomain, hwClockTimeUsedSource=hwClockTimeUsedSource, hwClockCfgWtrTime=hwClockCfgWtrTime, hwClockCfgExportEnableStatus=hwClockCfgExportEnableStatus, hwClockBitsCfgEntry=hwClockBitsCfgEntry, hwClockCesAcrDomainInfoEntry=hwClockCesAcrDomainInfoEntry, hwClockFrequencyOffsetMean=hwClockFrequencyOffsetMean, hwClockBitsCfgName=hwClockBitsCfgName, hwClockBitsCfgBitsType=hwClockBitsCfgBitsType, hwClockSrcCfgSourceIndex=hwClockSrcCfgSourceIndex, hwClockCesDcrLockFailResume=hwClockCesDcrLockFailResume, hwClockBitsCfgTable=hwClockBitsCfgTable, hwClockAttributeTodProtocol=hwClockAttributeTodProtocol, hwClockCesAcrSourceMode=hwClockCesAcrSourceMode, hwClockSourceRetrieveMode=hwClockSourceRetrieveMode, hwClockCesDcrNewMasterPwName=hwClockCesDcrNewMasterPwName, hwClockCesAcrCfgEntry=hwClockCesAcrCfgEntry, hwClockSourceSelTable=hwClockSourceSelTable, hwClockPortCfgGroup=hwClockPortCfgGroup, hwClockCesAcrPwDomain=hwClockCesAcrPwDomain)
| true | true |
f7306fd1c3e0405e0d613485262300727fcf5a80 | 340 | py | Python | setup.py | ScottHMcKean/gfracture | 63eec9fd4cf4234149b6a6656ec538d5a6ddcb41 | [
"MIT"
] | null | null | null | setup.py | ScottHMcKean/gfracture | 63eec9fd4cf4234149b6a6656ec538d5a6ddcb41 | [
"MIT"
] | null | null | null | setup.py | ScottHMcKean/gfracture | 63eec9fd4cf4234149b6a6656ec538d5a6ddcb41 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='gfracture',
version='0.1',
description='Fracture segmentation and trace analysis',
url='https://github.com/ScottHMcKean/gfracture',
author='Scott McKean',
author_email='scott.mckean@ucalgary.ca',
license='MIT',
packages=['gfracture'],
zip_safe=False) | 30.909091 | 61 | 0.667647 | from setuptools import setup
setup(name='gfracture',
version='0.1',
description='Fracture segmentation and trace analysis',
url='https://github.com/ScottHMcKean/gfracture',
author='Scott McKean',
author_email='scott.mckean@ucalgary.ca',
license='MIT',
packages=['gfracture'],
zip_safe=False) | true | true |
f7307105658f4dd57c433ca83570b1d9ad3a1448 | 79 | py | Python | src/textacy/tokenizers/__init__.py | austinjp/textacy | dddfdbf0e0ab3bf756bc4eda042eab1001aac709 | [
"Apache-2.0"
] | 1,929 | 2016-02-14T08:30:38.000Z | 2022-03-31T03:00:35.000Z | src/textacy/tokenizers/__init__.py | austinjp/textacy | dddfdbf0e0ab3bf756bc4eda042eab1001aac709 | [
"Apache-2.0"
] | 304 | 2016-02-18T15:52:22.000Z | 2022-03-31T18:06:54.000Z | src/textacy/tokenizers/__init__.py | austinjp/textacy | dddfdbf0e0ab3bf756bc4eda042eab1001aac709 | [
"Apache-2.0"
] | 285 | 2016-03-20T04:25:08.000Z | 2022-03-24T11:31:17.000Z | from .char_ngrams import CharNgramsTokenizer
from .terms import TermsTokenizer
| 26.333333 | 44 | 0.873418 | from .char_ngrams import CharNgramsTokenizer
from .terms import TermsTokenizer
| true | true |
f73071c7b77b19c3949f7425141f76374ee2f03f | 1,667 | py | Python | selfswab/migrations/0012_auto_20201202_1235.py | praekeltfoundation/healthcheck | 3f8b3722ea41c2d81c706e0f9a3473ba2cb2f2ba | [
"BSD-3-Clause"
] | null | null | null | selfswab/migrations/0012_auto_20201202_1235.py | praekeltfoundation/healthcheck | 3f8b3722ea41c2d81c706e0f9a3473ba2cb2f2ba | [
"BSD-3-Clause"
] | 23 | 2020-07-16T15:40:35.000Z | 2021-12-13T13:59:30.000Z | selfswab/migrations/0012_auto_20201202_1235.py | praekeltfoundation/healthcheck | 3f8b3722ea41c2d81c706e0f9a3473ba2cb2f2ba | [
"BSD-3-Clause"
] | 1 | 2021-02-24T04:58:40.000Z | 2021-02-24T04:58:40.000Z | # Generated by Django 3.1 on 2020-12-02 10:35
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
("selfswab", "0011_selfswabtest_should_sync"),
]
operations = [
migrations.AddField(
model_name="selfswabregistration",
name="age",
field=models.CharField(
choices=[
("<18", "<18"),
("18-39", "18-39"),
("40-65", "40-65"),
(">65", ">65"),
],
max_length=5,
null=True,
),
),
migrations.AddField(
model_name="selfswabregistration",
name="gender",
field=models.CharField(
choices=[
("Male", "Male"),
("Female", "Female"),
("Other", "Other"),
("not_say", "not_say"),
],
max_length=10,
null=True,
),
),
migrations.AddField(
model_name="selfswabregistration",
name="should_sync",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="selfswabregistration",
name="timestamp",
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name="selfswabregistration",
name="updated_at",
field=models.DateTimeField(auto_now=True, db_index=True),
),
]
| 28.741379 | 74 | 0.467307 |
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
("selfswab", "0011_selfswabtest_should_sync"),
]
operations = [
migrations.AddField(
model_name="selfswabregistration",
name="age",
field=models.CharField(
choices=[
("<18", "<18"),
("18-39", "18-39"),
("40-65", "40-65"),
(">65", ">65"),
],
max_length=5,
null=True,
),
),
migrations.AddField(
model_name="selfswabregistration",
name="gender",
field=models.CharField(
choices=[
("Male", "Male"),
("Female", "Female"),
("Other", "Other"),
("not_say", "not_say"),
],
max_length=10,
null=True,
),
),
migrations.AddField(
model_name="selfswabregistration",
name="should_sync",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="selfswabregistration",
name="timestamp",
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name="selfswabregistration",
name="updated_at",
field=models.DateTimeField(auto_now=True, db_index=True),
),
]
| true | true |
f7307275427014a14072d16f4f9a637d0116fbc9 | 19,509 | py | Python | tests/unit/common/db/test_api.py | onecloud/gbp-rally | 7589b1788c4de26bb66c531ef340ba080754f8c3 | [
"Apache-2.0"
] | null | null | null | tests/unit/common/db/test_api.py | onecloud/gbp-rally | 7589b1788c4de26bb66c531ef340ba080754f8c3 | [
"Apache-2.0"
] | null | null | null | tests/unit/common/db/test_api.py | onecloud/gbp-rally | 7589b1788c4de26bb66c531ef340ba080754f8c3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for db.api layer."""
from six import moves
from rally.common import db
from rally import consts
from rally import exceptions
from tests.unit import test
class TasksTestCase(test.DBTestCase):
def setUp(self):
super(TasksTestCase, self).setUp()
self.deploy = db.deployment_create({})
def _get_task(self, uuid):
return db.task_get(uuid)
def _create_task(self, values=None):
values = values or {}
if "deployment_uuid" not in values:
values["deployment_uuid"] = self.deploy["uuid"]
return db.task_create(values)
def test_task_get_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_get, "f885f435-f6ca-4f3e-9b3e-aeb6837080f2")
def test_task_create(self):
task = self._create_task()
db_task = self._get_task(task["uuid"])
self.assertIsNotNone(db_task["uuid"])
self.assertIsNotNone(db_task["id"])
self.assertEqual(db_task["status"], consts.TaskStatus.INIT)
def test_task_create_without_uuid(self):
_uuid = "19be8589-48b0-4af1-a369-9bebaaa563ab"
task = self._create_task({"uuid": _uuid})
db_task = self._get_task(task["uuid"])
self.assertEqual(db_task["uuid"], _uuid)
def test_task_update(self):
task = self._create_task({})
db.task_update(task["uuid"], {"status": consts.TaskStatus.FAILED})
db_task = self._get_task(task["uuid"])
self.assertEqual(db_task["status"], consts.TaskStatus.FAILED)
def test_task_update_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_update,
"7ae1da26-feaa-4213-8208-76af2857a5ab", {})
def test_task_update_all_stats(self):
_uuid = self._create_task({})["uuid"]
for status in consts.TaskStatus:
db.task_update(_uuid, {"status": status})
db_task = self._get_task(_uuid)
self.assertEqual(db_task["status"], status)
def test_task_list_empty(self):
self.assertEqual([], db.task_list())
def test_task_list(self):
INIT = consts.TaskStatus.INIT
task_init = sorted(self._create_task()["uuid"] for i in moves.range(3))
FINISHED = consts.TaskStatus.FINISHED
task_finished = sorted(self._create_task(
{"status": FINISHED,
"deployment_uuid": self.deploy["uuid"]}
)["uuid"] for i in moves.range(3))
task_all = sorted(task_init + task_finished)
def get_uuids(status=None, deployment=None):
tasks = db.task_list(status=status, deployment=deployment)
return sorted(task["uuid"] for task in tasks)
self.assertEqual(task_all, get_uuids(None))
self.assertEqual(task_init, get_uuids(status=INIT))
self.assertEqual(task_finished, get_uuids(status=FINISHED))
self.assertRaises(exceptions.DeploymentNotFound,
get_uuids, deployment="non-existing-deployment")
deleted_task_uuid = task_finished.pop()
db.task_delete(deleted_task_uuid)
self.assertEqual(task_init, get_uuids(INIT))
self.assertEqual(sorted(task_finished), get_uuids(FINISHED))
def test_task_delete(self):
task1, task2 = self._create_task()["uuid"], self._create_task()["uuid"]
db.task_delete(task1)
self.assertRaises(exceptions.TaskNotFound, self._get_task, task1)
self.assertEqual(task2, self._get_task(task2)["uuid"])
def test_task_delete_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_delete,
"da6f820c-b133-4b9f-8534-4c3bcc40724b")
def test_task_delete_with_results(self):
task_id = self._create_task()["uuid"]
db.task_result_create(task_id,
{task_id: task_id},
{task_id: task_id})
res = db.task_result_get_all_by_uuid(task_id)
self.assertEqual(len(res), 1)
db.task_delete(task_id)
res = db.task_result_get_all_by_uuid(task_id)
self.assertEqual(len(res), 0)
def test_task_delete_by_uuid_and_status(self):
values = {
"status": consts.TaskStatus.FINISHED,
}
task1 = self._create_task(values=values)["uuid"]
task2 = self._create_task(values=values)["uuid"]
db.task_delete(task1, status=consts.TaskStatus.FINISHED)
self.assertRaises(exceptions.TaskNotFound, self._get_task, task1)
self.assertEqual(task2, self._get_task(task2)["uuid"])
def test_task_delete_by_uuid_and_status_invalid(self):
task = self._create_task(
values={"status": consts.TaskStatus.INIT})["uuid"]
self.assertRaises(exceptions.TaskInvalidStatus, db.task_delete, task,
status=consts.TaskStatus.FINISHED)
def test_task_delete_by_uuid_and_status_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_delete,
"fcd0483f-a405-44c4-b712-99c9e52254eb",
status=consts.TaskStatus.FINISHED)
def test_task_result_get_all_by_uuid(self):
task1 = self._create_task()["uuid"]
task2 = self._create_task()["uuid"]
for task_id in (task1, task2):
db.task_result_create(task_id,
{task_id: task_id},
{task_id: task_id})
for task_id in (task1, task2):
res = db.task_result_get_all_by_uuid(task_id)
data = {task_id: task_id}
self.assertEqual(len(res), 1)
self.assertEqual(res[0]["key"], data)
self.assertEqual(res[0]["data"], data)
def test_task_get_detailed(self):
task1 = self._create_task()
key = {"name": "atata"}
data = {"a": "b", "c": "d"}
db.task_result_create(task1["uuid"], key, data)
task1_full = db.task_get_detailed(task1["uuid"])
results = task1_full["results"]
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["key"], key)
self.assertEqual(results[0]["data"], data)
def test_task_get_detailed_last(self):
task1 = self._create_task()
key = {"name": "atata"}
data = {"a": "b", "c": "d"}
db.task_result_create(task1["uuid"], key, data)
task1_full = db.task_get_detailed_last()
results = task1_full["results"]
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["key"], key)
self.assertEqual(results[0]["data"], data)
class DeploymentTestCase(test.DBTestCase):
def test_deployment_create(self):
deploy = db.deployment_create({"config": {"opt": "val"}})
deploys = db.deployment_list()
self.assertEqual(len(deploys), 1)
self.assertEqual(deploy["uuid"], deploys[0]["uuid"])
self.assertEqual(deploy["status"], consts.DeployStatus.DEPLOY_INIT)
self.assertEqual(deploy["config"], {"opt": "val"})
def test_deployment_create_several(self):
# Create a deployment
deploys = db.deployment_list()
self.assertEqual(len(deploys), 0)
deploy_one = db.deployment_create({"config": {"opt1": "val1"}})
deploys = db.deployment_list()
self.assertEqual(len(deploys), 1)
self.assertEqual(deploy_one["uuid"], deploys[0]["uuid"])
self.assertEqual(deploy_one["status"], consts.DeployStatus.DEPLOY_INIT)
self.assertEqual(deploy_one["config"], {"opt1": "val1"})
# Create another deployment and sure that they are different
deploy_two = db.deployment_create({"config": {"opt2": "val2"}})
deploys = db.deployment_list()
self.assertEqual(len(deploys), 2)
self.assertEqual(set([deploy_one["uuid"], deploy_two["uuid"]]),
set([deploy["uuid"] for deploy in deploys]))
self.assertNotEqual(deploy_one["uuid"], deploy_two["uuid"])
self.assertEqual(deploy_two["status"], consts.DeployStatus.DEPLOY_INIT)
self.assertEqual(deploy_two["config"], {"opt2": "val2"})
def test_deployment_update(self):
deploy = db.deployment_create({})
self.assertEqual(deploy["config"], {})
update_deploy = db.deployment_update(deploy["uuid"],
{"config": {"opt": "val"}})
self.assertEqual(update_deploy["uuid"], deploy["uuid"])
self.assertEqual(update_deploy["config"], {"opt": "val"})
get_deploy = db.deployment_get(deploy["uuid"])
self.assertEqual(get_deploy["uuid"], deploy["uuid"])
self.assertEqual(get_deploy["config"], {"opt": "val"})
def test_deployment_update_several(self):
# Create a deployment and update it
deploy_one = db.deployment_create({})
self.assertEqual(deploy_one["config"], {})
update_deploy_one = db.deployment_update(
deploy_one["uuid"], {"config": {"opt1": "val1"}})
self.assertEqual(update_deploy_one["uuid"], deploy_one["uuid"])
self.assertEqual(update_deploy_one["config"], {"opt1": "val1"})
get_deploy_one = db.deployment_get(deploy_one["uuid"])
self.assertEqual(get_deploy_one["uuid"], deploy_one["uuid"])
self.assertEqual(get_deploy_one["config"], {"opt1": "val1"})
# Create another deployment
deploy_two = db.deployment_create({})
update_deploy_two = db.deployment_update(
deploy_two["uuid"], {"config": {"opt2": "val2"}})
self.assertEqual(update_deploy_two["uuid"], deploy_two["uuid"])
self.assertEqual(update_deploy_two["config"], {"opt2": "val2"})
get_deploy_one_again = db.deployment_get(deploy_one["uuid"])
self.assertEqual(get_deploy_one_again["uuid"], deploy_one["uuid"])
self.assertEqual(get_deploy_one_again["config"], {"opt1": "val1"})
def test_deployment_get(self):
deploy_one = db.deployment_create({"config": {"opt1": "val1"}})
deploy_two = db.deployment_create({"config": {"opt2": "val2"}})
get_deploy_one = db.deployment_get(deploy_one["uuid"])
get_deploy_two = db.deployment_get(deploy_two["uuid"])
self.assertNotEqual(get_deploy_one["uuid"], get_deploy_two["uuid"])
self.assertEqual(get_deploy_one["config"], {"opt1": "val1"})
self.assertEqual(get_deploy_two["config"], {"opt2": "val2"})
def test_deployment_get_not_found(self):
self.assertRaises(exceptions.DeploymentNotFound,
db.deployment_get,
"852e932b-9552-4b2d-89e3-a5915780a5e3")
def test_deployment_list(self):
deploy_one = db.deployment_create({})
deploy_two = db.deployment_create({})
deploys = db.deployment_list()
self.assertEqual(sorted([deploy_one["uuid"], deploy_two["uuid"]]),
sorted([deploy["uuid"] for deploy in deploys]))
def test_deployment_list_with_status_and_name(self):
deploy_one = db.deployment_create({})
deploy_two = db.deployment_create({
"config": {},
"status": consts.DeployStatus.DEPLOY_FAILED,
})
deploy_three = db.deployment_create({"name": "deployment_name"})
deploys = db.deployment_list(status=consts.DeployStatus.DEPLOY_INIT)
deploys.sort(key=lambda x: x["id"])
self.assertEqual(len(deploys), 2)
self.assertEqual(deploys[0]["uuid"], deploy_one["uuid"])
deploys = db.deployment_list(status=consts.DeployStatus.DEPLOY_FAILED)
self.assertEqual(len(deploys), 1)
self.assertEqual(deploys[0]["uuid"], deploy_two["uuid"])
deploys = db.deployment_list(
status=consts.DeployStatus.DEPLOY_FINISHED)
self.assertEqual(len(deploys), 0)
deploys = db.deployment_list(name="deployment_name")
self.assertEqual(deploys[0]["uuid"], deploy_three["uuid"])
self.assertEqual(len(deploys), 1)
def test_deployment_list_parent(self):
deploy = db.deployment_create({})
subdeploy1 = db.deployment_create({"parent_uuid": deploy.uuid})
subdeploy2 = db.deployment_create({"parent_uuid": deploy.uuid})
self.assertEqual([deploy.uuid], [d.uuid for d in db.deployment_list()])
subdeploys = db.deployment_list(parent_uuid=deploy.uuid)
self.assertEqual(set([subdeploy1.uuid, subdeploy2.uuid]),
set([d.uuid for d in subdeploys]))
def test_deployment_delete(self):
deploy_one = db.deployment_create({})
deploy_two = db.deployment_create({})
db.deployment_delete(deploy_two["uuid"])
deploys = db.deployment_list()
self.assertEqual(len(deploys), 1)
self.assertEqual(deploys[0]["uuid"], deploy_one["uuid"])
def test_deployment_delete_not_found(self):
self.assertRaises(exceptions.DeploymentNotFound,
db.deployment_delete,
"5f2883be-46c8-4c4b-a4fe-988ad0c6b20a")
def test_deployment_delete_is_busy(self):
deployment = db.deployment_create({})
db.resource_create({"deployment_uuid": deployment["uuid"]})
db.resource_create({"deployment_uuid": deployment["uuid"]})
self.assertRaises(exceptions.DeploymentIsBusy, db.deployment_delete,
deployment["uuid"])
class ResourceTestCase(test.DBTestCase):
def test_create(self):
deployment = db.deployment_create({})
resource = db.resource_create({
"deployment_uuid": deployment["uuid"],
"provider_name": "fakeprovider",
"type": "faketype",
})
resources = db.resource_get_all(deployment["uuid"])
self.assertTrue(resource["id"])
self.assertEqual(len(resources), 1)
self.assertTrue(resource["id"], resources[0]["id"])
self.assertEqual(resource["deployment_uuid"], deployment["uuid"])
self.assertEqual(resource["provider_name"], "fakeprovider")
self.assertEqual(resource["type"], "faketype")
def test_delete(self):
deployment = db.deployment_create({})
res = db.resource_create({"deployment_uuid": deployment["uuid"]})
db.resource_delete(res["id"])
resources = db.resource_get_all(deployment["uuid"])
self.assertEqual(len(resources), 0)
def test_delete_not_found(self):
self.assertRaises(exceptions.ResourceNotFound,
db.resource_delete, 123456789)
def test_get_all(self):
deployment0 = db.deployment_create({})
deployment1 = db.deployment_create({})
res0 = db.resource_create({"deployment_uuid": deployment0["uuid"]})
res1 = db.resource_create({"deployment_uuid": deployment1["uuid"]})
res2 = db.resource_create({"deployment_uuid": deployment1["uuid"]})
resources = db.resource_get_all(deployment1["uuid"])
self.assertEqual(sorted([res1["id"], res2["id"]]),
sorted([r["id"] for r in resources]))
resources = db.resource_get_all(deployment0["uuid"])
self.assertEqual(len(resources), 1)
self.assertEqual(res0["id"], resources[0]["id"])
def test_get_all_by_provider_name(self):
deployment = db.deployment_create({})
res_one = db.resource_create({
"deployment_uuid": deployment["uuid"],
"provider_name": "one",
})
res_two = db.resource_create({
"deployment_uuid": deployment["uuid"],
"provider_name": "two",
})
resources = db.resource_get_all(deployment["uuid"],
provider_name="one")
self.assertEqual(len(resources), 1)
self.assertEqual(res_one["id"], resources[0]["id"])
resources = db.resource_get_all(deployment["uuid"],
provider_name="two")
self.assertEqual(len(resources), 1)
self.assertEqual(res_two["id"], resources[0]["id"])
def test_get_all_by_provider_type(self):
deployment = db.deployment_create({})
res_one = db.resource_create({
"deployment_uuid": deployment["uuid"],
"type": "one",
})
res_two = db.resource_create({
"deployment_uuid": deployment["uuid"],
"type": "two",
})
resources = db.resource_get_all(deployment["uuid"], type="one")
self.assertEqual(len(resources), 1)
self.assertEqual(res_one["id"], resources[0]["id"])
resources = db.resource_get_all(deployment["uuid"], type="two")
self.assertEqual(len(resources), 1)
self.assertEqual(res_two["id"], resources[0]["id"])
class VerificationTestCase(test.DBTestCase):
def setUp(self):
super(VerificationTestCase, self).setUp()
self.deploy = db.deployment_create({})
def _create_verification(self):
deployment_uuid = self.deploy["uuid"]
return db.verification_create(deployment_uuid)
def test_creation_of_verification(self):
verification = self._create_verification()
db_verification = db.verification_get(verification["uuid"])
self.assertEqual(verification["tests"], db_verification["tests"])
self.assertEqual(verification["time"], db_verification["time"])
self.assertEqual(verification["errors"], db_verification["errors"])
self.assertEqual(verification["failures"], db_verification["failures"])
class WorkerTestCase(test.DBTestCase):
def setUp(self):
super(WorkerTestCase, self).setUp()
self.worker = db.register_worker({"hostname": "test"})
def test_register_worker_duplicate(self):
self.assertRaises(exceptions.WorkerAlreadyRegistered,
db.register_worker, {"hostname": "test"})
def test_get_worker(self):
worker = db.get_worker("test")
self.assertEqual(self.worker["id"], worker["id"])
self.assertEqual(self.worker["hostname"], worker["hostname"])
def test_get_worker_not_found(self):
self.assertRaises(exceptions.WorkerNotFound, db.get_worker, "notfound")
def test_unregister_worker(self):
db.unregister_worker("test")
self.assertRaises(exceptions.WorkerNotFound, db.get_worker, "test")
def test_unregister_worker_not_found(self):
self.assertRaises(exceptions.WorkerNotFound,
db.unregister_worker, "fake")
def test_update_worker(self):
db.update_worker("test")
worker = db.get_worker("test")
self.assertNotEqual(self.worker["updated_at"], worker["updated_at"])
def test_update_worker_not_found(self):
self.assertRaises(exceptions.WorkerNotFound, db.update_worker, "fake")
| 43.066225 | 79 | 0.634579 |
from six import moves
from rally.common import db
from rally import consts
from rally import exceptions
from tests.unit import test
class TasksTestCase(test.DBTestCase):
def setUp(self):
super(TasksTestCase, self).setUp()
self.deploy = db.deployment_create({})
def _get_task(self, uuid):
return db.task_get(uuid)
def _create_task(self, values=None):
values = values or {}
if "deployment_uuid" not in values:
values["deployment_uuid"] = self.deploy["uuid"]
return db.task_create(values)
def test_task_get_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_get, "f885f435-f6ca-4f3e-9b3e-aeb6837080f2")
def test_task_create(self):
task = self._create_task()
db_task = self._get_task(task["uuid"])
self.assertIsNotNone(db_task["uuid"])
self.assertIsNotNone(db_task["id"])
self.assertEqual(db_task["status"], consts.TaskStatus.INIT)
def test_task_create_without_uuid(self):
_uuid = "19be8589-48b0-4af1-a369-9bebaaa563ab"
task = self._create_task({"uuid": _uuid})
db_task = self._get_task(task["uuid"])
self.assertEqual(db_task["uuid"], _uuid)
def test_task_update(self):
task = self._create_task({})
db.task_update(task["uuid"], {"status": consts.TaskStatus.FAILED})
db_task = self._get_task(task["uuid"])
self.assertEqual(db_task["status"], consts.TaskStatus.FAILED)
def test_task_update_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_update,
"7ae1da26-feaa-4213-8208-76af2857a5ab", {})
def test_task_update_all_stats(self):
_uuid = self._create_task({})["uuid"]
for status in consts.TaskStatus:
db.task_update(_uuid, {"status": status})
db_task = self._get_task(_uuid)
self.assertEqual(db_task["status"], status)
def test_task_list_empty(self):
self.assertEqual([], db.task_list())
def test_task_list(self):
INIT = consts.TaskStatus.INIT
task_init = sorted(self._create_task()["uuid"] for i in moves.range(3))
FINISHED = consts.TaskStatus.FINISHED
task_finished = sorted(self._create_task(
{"status": FINISHED,
"deployment_uuid": self.deploy["uuid"]}
)["uuid"] for i in moves.range(3))
task_all = sorted(task_init + task_finished)
def get_uuids(status=None, deployment=None):
tasks = db.task_list(status=status, deployment=deployment)
return sorted(task["uuid"] for task in tasks)
self.assertEqual(task_all, get_uuids(None))
self.assertEqual(task_init, get_uuids(status=INIT))
self.assertEqual(task_finished, get_uuids(status=FINISHED))
self.assertRaises(exceptions.DeploymentNotFound,
get_uuids, deployment="non-existing-deployment")
deleted_task_uuid = task_finished.pop()
db.task_delete(deleted_task_uuid)
self.assertEqual(task_init, get_uuids(INIT))
self.assertEqual(sorted(task_finished), get_uuids(FINISHED))
def test_task_delete(self):
task1, task2 = self._create_task()["uuid"], self._create_task()["uuid"]
db.task_delete(task1)
self.assertRaises(exceptions.TaskNotFound, self._get_task, task1)
self.assertEqual(task2, self._get_task(task2)["uuid"])
def test_task_delete_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_delete,
"da6f820c-b133-4b9f-8534-4c3bcc40724b")
def test_task_delete_with_results(self):
task_id = self._create_task()["uuid"]
db.task_result_create(task_id,
{task_id: task_id},
{task_id: task_id})
res = db.task_result_get_all_by_uuid(task_id)
self.assertEqual(len(res), 1)
db.task_delete(task_id)
res = db.task_result_get_all_by_uuid(task_id)
self.assertEqual(len(res), 0)
def test_task_delete_by_uuid_and_status(self):
values = {
"status": consts.TaskStatus.FINISHED,
}
task1 = self._create_task(values=values)["uuid"]
task2 = self._create_task(values=values)["uuid"]
db.task_delete(task1, status=consts.TaskStatus.FINISHED)
self.assertRaises(exceptions.TaskNotFound, self._get_task, task1)
self.assertEqual(task2, self._get_task(task2)["uuid"])
def test_task_delete_by_uuid_and_status_invalid(self):
task = self._create_task(
values={"status": consts.TaskStatus.INIT})["uuid"]
self.assertRaises(exceptions.TaskInvalidStatus, db.task_delete, task,
status=consts.TaskStatus.FINISHED)
def test_task_delete_by_uuid_and_status_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_delete,
"fcd0483f-a405-44c4-b712-99c9e52254eb",
status=consts.TaskStatus.FINISHED)
def test_task_result_get_all_by_uuid(self):
task1 = self._create_task()["uuid"]
task2 = self._create_task()["uuid"]
for task_id in (task1, task2):
db.task_result_create(task_id,
{task_id: task_id},
{task_id: task_id})
for task_id in (task1, task2):
res = db.task_result_get_all_by_uuid(task_id)
data = {task_id: task_id}
self.assertEqual(len(res), 1)
self.assertEqual(res[0]["key"], data)
self.assertEqual(res[0]["data"], data)
def test_task_get_detailed(self):
task1 = self._create_task()
key = {"name": "atata"}
data = {"a": "b", "c": "d"}
db.task_result_create(task1["uuid"], key, data)
task1_full = db.task_get_detailed(task1["uuid"])
results = task1_full["results"]
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["key"], key)
self.assertEqual(results[0]["data"], data)
def test_task_get_detailed_last(self):
task1 = self._create_task()
key = {"name": "atata"}
data = {"a": "b", "c": "d"}
db.task_result_create(task1["uuid"], key, data)
task1_full = db.task_get_detailed_last()
results = task1_full["results"]
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["key"], key)
self.assertEqual(results[0]["data"], data)
class DeploymentTestCase(test.DBTestCase):
def test_deployment_create(self):
deploy = db.deployment_create({"config": {"opt": "val"}})
deploys = db.deployment_list()
self.assertEqual(len(deploys), 1)
self.assertEqual(deploy["uuid"], deploys[0]["uuid"])
self.assertEqual(deploy["status"], consts.DeployStatus.DEPLOY_INIT)
self.assertEqual(deploy["config"], {"opt": "val"})
def test_deployment_create_several(self):
deploys = db.deployment_list()
self.assertEqual(len(deploys), 0)
deploy_one = db.deployment_create({"config": {"opt1": "val1"}})
deploys = db.deployment_list()
self.assertEqual(len(deploys), 1)
self.assertEqual(deploy_one["uuid"], deploys[0]["uuid"])
self.assertEqual(deploy_one["status"], consts.DeployStatus.DEPLOY_INIT)
self.assertEqual(deploy_one["config"], {"opt1": "val1"})
deploy_two = db.deployment_create({"config": {"opt2": "val2"}})
deploys = db.deployment_list()
self.assertEqual(len(deploys), 2)
self.assertEqual(set([deploy_one["uuid"], deploy_two["uuid"]]),
set([deploy["uuid"] for deploy in deploys]))
self.assertNotEqual(deploy_one["uuid"], deploy_two["uuid"])
self.assertEqual(deploy_two["status"], consts.DeployStatus.DEPLOY_INIT)
self.assertEqual(deploy_two["config"], {"opt2": "val2"})
def test_deployment_update(self):
deploy = db.deployment_create({})
self.assertEqual(deploy["config"], {})
update_deploy = db.deployment_update(deploy["uuid"],
{"config": {"opt": "val"}})
self.assertEqual(update_deploy["uuid"], deploy["uuid"])
self.assertEqual(update_deploy["config"], {"opt": "val"})
get_deploy = db.deployment_get(deploy["uuid"])
self.assertEqual(get_deploy["uuid"], deploy["uuid"])
self.assertEqual(get_deploy["config"], {"opt": "val"})
def test_deployment_update_several(self):
deploy_one = db.deployment_create({})
self.assertEqual(deploy_one["config"], {})
update_deploy_one = db.deployment_update(
deploy_one["uuid"], {"config": {"opt1": "val1"}})
self.assertEqual(update_deploy_one["uuid"], deploy_one["uuid"])
self.assertEqual(update_deploy_one["config"], {"opt1": "val1"})
get_deploy_one = db.deployment_get(deploy_one["uuid"])
self.assertEqual(get_deploy_one["uuid"], deploy_one["uuid"])
self.assertEqual(get_deploy_one["config"], {"opt1": "val1"})
deploy_two = db.deployment_create({})
update_deploy_two = db.deployment_update(
deploy_two["uuid"], {"config": {"opt2": "val2"}})
self.assertEqual(update_deploy_two["uuid"], deploy_two["uuid"])
self.assertEqual(update_deploy_two["config"], {"opt2": "val2"})
get_deploy_one_again = db.deployment_get(deploy_one["uuid"])
self.assertEqual(get_deploy_one_again["uuid"], deploy_one["uuid"])
self.assertEqual(get_deploy_one_again["config"], {"opt1": "val1"})
def test_deployment_get(self):
deploy_one = db.deployment_create({"config": {"opt1": "val1"}})
deploy_two = db.deployment_create({"config": {"opt2": "val2"}})
get_deploy_one = db.deployment_get(deploy_one["uuid"])
get_deploy_two = db.deployment_get(deploy_two["uuid"])
self.assertNotEqual(get_deploy_one["uuid"], get_deploy_two["uuid"])
self.assertEqual(get_deploy_one["config"], {"opt1": "val1"})
self.assertEqual(get_deploy_two["config"], {"opt2": "val2"})
def test_deployment_get_not_found(self):
self.assertRaises(exceptions.DeploymentNotFound,
db.deployment_get,
"852e932b-9552-4b2d-89e3-a5915780a5e3")
def test_deployment_list(self):
deploy_one = db.deployment_create({})
deploy_two = db.deployment_create({})
deploys = db.deployment_list()
self.assertEqual(sorted([deploy_one["uuid"], deploy_two["uuid"]]),
sorted([deploy["uuid"] for deploy in deploys]))
def test_deployment_list_with_status_and_name(self):
deploy_one = db.deployment_create({})
deploy_two = db.deployment_create({
"config": {},
"status": consts.DeployStatus.DEPLOY_FAILED,
})
deploy_three = db.deployment_create({"name": "deployment_name"})
deploys = db.deployment_list(status=consts.DeployStatus.DEPLOY_INIT)
deploys.sort(key=lambda x: x["id"])
self.assertEqual(len(deploys), 2)
self.assertEqual(deploys[0]["uuid"], deploy_one["uuid"])
deploys = db.deployment_list(status=consts.DeployStatus.DEPLOY_FAILED)
self.assertEqual(len(deploys), 1)
self.assertEqual(deploys[0]["uuid"], deploy_two["uuid"])
deploys = db.deployment_list(
status=consts.DeployStatus.DEPLOY_FINISHED)
self.assertEqual(len(deploys), 0)
deploys = db.deployment_list(name="deployment_name")
self.assertEqual(deploys[0]["uuid"], deploy_three["uuid"])
self.assertEqual(len(deploys), 1)
def test_deployment_list_parent(self):
deploy = db.deployment_create({})
subdeploy1 = db.deployment_create({"parent_uuid": deploy.uuid})
subdeploy2 = db.deployment_create({"parent_uuid": deploy.uuid})
self.assertEqual([deploy.uuid], [d.uuid for d in db.deployment_list()])
subdeploys = db.deployment_list(parent_uuid=deploy.uuid)
self.assertEqual(set([subdeploy1.uuid, subdeploy2.uuid]),
set([d.uuid for d in subdeploys]))
def test_deployment_delete(self):
deploy_one = db.deployment_create({})
deploy_two = db.deployment_create({})
db.deployment_delete(deploy_two["uuid"])
deploys = db.deployment_list()
self.assertEqual(len(deploys), 1)
self.assertEqual(deploys[0]["uuid"], deploy_one["uuid"])
def test_deployment_delete_not_found(self):
self.assertRaises(exceptions.DeploymentNotFound,
db.deployment_delete,
"5f2883be-46c8-4c4b-a4fe-988ad0c6b20a")
def test_deployment_delete_is_busy(self):
deployment = db.deployment_create({})
db.resource_create({"deployment_uuid": deployment["uuid"]})
db.resource_create({"deployment_uuid": deployment["uuid"]})
self.assertRaises(exceptions.DeploymentIsBusy, db.deployment_delete,
deployment["uuid"])
class ResourceTestCase(test.DBTestCase):
def test_create(self):
deployment = db.deployment_create({})
resource = db.resource_create({
"deployment_uuid": deployment["uuid"],
"provider_name": "fakeprovider",
"type": "faketype",
})
resources = db.resource_get_all(deployment["uuid"])
self.assertTrue(resource["id"])
self.assertEqual(len(resources), 1)
self.assertTrue(resource["id"], resources[0]["id"])
self.assertEqual(resource["deployment_uuid"], deployment["uuid"])
self.assertEqual(resource["provider_name"], "fakeprovider")
self.assertEqual(resource["type"], "faketype")
def test_delete(self):
deployment = db.deployment_create({})
res = db.resource_create({"deployment_uuid": deployment["uuid"]})
db.resource_delete(res["id"])
resources = db.resource_get_all(deployment["uuid"])
self.assertEqual(len(resources), 0)
def test_delete_not_found(self):
self.assertRaises(exceptions.ResourceNotFound,
db.resource_delete, 123456789)
def test_get_all(self):
deployment0 = db.deployment_create({})
deployment1 = db.deployment_create({})
res0 = db.resource_create({"deployment_uuid": deployment0["uuid"]})
res1 = db.resource_create({"deployment_uuid": deployment1["uuid"]})
res2 = db.resource_create({"deployment_uuid": deployment1["uuid"]})
resources = db.resource_get_all(deployment1["uuid"])
self.assertEqual(sorted([res1["id"], res2["id"]]),
sorted([r["id"] for r in resources]))
resources = db.resource_get_all(deployment0["uuid"])
self.assertEqual(len(resources), 1)
self.assertEqual(res0["id"], resources[0]["id"])
def test_get_all_by_provider_name(self):
deployment = db.deployment_create({})
res_one = db.resource_create({
"deployment_uuid": deployment["uuid"],
"provider_name": "one",
})
res_two = db.resource_create({
"deployment_uuid": deployment["uuid"],
"provider_name": "two",
})
resources = db.resource_get_all(deployment["uuid"],
provider_name="one")
self.assertEqual(len(resources), 1)
self.assertEqual(res_one["id"], resources[0]["id"])
resources = db.resource_get_all(deployment["uuid"],
provider_name="two")
self.assertEqual(len(resources), 1)
self.assertEqual(res_two["id"], resources[0]["id"])
def test_get_all_by_provider_type(self):
deployment = db.deployment_create({})
res_one = db.resource_create({
"deployment_uuid": deployment["uuid"],
"type": "one",
})
res_two = db.resource_create({
"deployment_uuid": deployment["uuid"],
"type": "two",
})
resources = db.resource_get_all(deployment["uuid"], type="one")
self.assertEqual(len(resources), 1)
self.assertEqual(res_one["id"], resources[0]["id"])
resources = db.resource_get_all(deployment["uuid"], type="two")
self.assertEqual(len(resources), 1)
self.assertEqual(res_two["id"], resources[0]["id"])
class VerificationTestCase(test.DBTestCase):
def setUp(self):
super(VerificationTestCase, self).setUp()
self.deploy = db.deployment_create({})
def _create_verification(self):
deployment_uuid = self.deploy["uuid"]
return db.verification_create(deployment_uuid)
def test_creation_of_verification(self):
verification = self._create_verification()
db_verification = db.verification_get(verification["uuid"])
self.assertEqual(verification["tests"], db_verification["tests"])
self.assertEqual(verification["time"], db_verification["time"])
self.assertEqual(verification["errors"], db_verification["errors"])
self.assertEqual(verification["failures"], db_verification["failures"])
class WorkerTestCase(test.DBTestCase):
def setUp(self):
super(WorkerTestCase, self).setUp()
self.worker = db.register_worker({"hostname": "test"})
def test_register_worker_duplicate(self):
self.assertRaises(exceptions.WorkerAlreadyRegistered,
db.register_worker, {"hostname": "test"})
def test_get_worker(self):
worker = db.get_worker("test")
self.assertEqual(self.worker["id"], worker["id"])
self.assertEqual(self.worker["hostname"], worker["hostname"])
def test_get_worker_not_found(self):
self.assertRaises(exceptions.WorkerNotFound, db.get_worker, "notfound")
def test_unregister_worker(self):
db.unregister_worker("test")
self.assertRaises(exceptions.WorkerNotFound, db.get_worker, "test")
def test_unregister_worker_not_found(self):
self.assertRaises(exceptions.WorkerNotFound,
db.unregister_worker, "fake")
def test_update_worker(self):
db.update_worker("test")
worker = db.get_worker("test")
self.assertNotEqual(self.worker["updated_at"], worker["updated_at"])
def test_update_worker_not_found(self):
self.assertRaises(exceptions.WorkerNotFound, db.update_worker, "fake")
| true | true |
f73072bb0fa62e6519596da6e47d9a8d23c2a32a | 1,146 | py | Python | src/wsgi.py | mine-archived/dinner | 0b7e556994a6f8e91450377631f88694a97fdcf7 | [
"MIT"
] | null | null | null | src/wsgi.py | mine-archived/dinner | 0b7e556994a6f8e91450377631f88694a97fdcf7 | [
"MIT"
] | null | null | null | src/wsgi.py | mine-archived/dinner | 0b7e556994a6f8e91450377631f88694a97fdcf7 | [
"MIT"
] | null | null | null | """
WSGI config for src project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 38.2 | 79 | 0.790576 | import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"settings")
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| true | true |
f73073b29e40f88ca0b69b779adcab0c43322bfc | 10,040 | py | Python | modules/feature_extraction.py | OverFitted/ai-academy-2022 | e58a68a13d81f203027cc367f5f335c2b22f0962 | [
"MIT"
] | null | null | null | modules/feature_extraction.py | OverFitted/ai-academy-2022 | e58a68a13d81f203027cc367f5f335c2b22f0962 | [
"MIT"
] | null | null | null | modules/feature_extraction.py | OverFitted/ai-academy-2022 | e58a68a13d81f203027cc367f5f335c2b22f0962 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class VGG_FeatureExtractor(nn.Module):
def __init__(self, input_channel, output_channel=512):
super(VGG_FeatureExtractor, self).__init__()
self.output_channel = [int(output_channel / 8), int(output_channel / 4),
int(output_channel / 2), output_channel] # [64, 128, 256, 512]
self.ConvNet = nn.Sequential(
nn.Conv2d(input_channel, self.output_channel[0], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d(2, 2), # 64x16x50
nn.Conv2d(self.output_channel[0], self.output_channel[1], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d(2, 2), # 128x8x25
nn.Conv2d(self.output_channel[1], self.output_channel[2], 3, 1, 1), nn.ReLU(True), # 256x8x25
nn.Conv2d(self.output_channel[2], self.output_channel[2], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d((2, 1), (2, 1)), # 256x4x25
nn.Conv2d(self.output_channel[2], self.output_channel[3], 3, 1, 1, bias=False),
nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True), # 512x4x25
nn.Conv2d(self.output_channel[3], self.output_channel[3], 3, 1, 1, bias=False),
nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True),
nn.MaxPool2d((2, 1), (2, 1)), # 512x2x25
nn.Conv2d(self.output_channel[3], self.output_channel[3], 2, 1, 0), nn.ReLU(True)) # 512x1x24
def forward(self, input):
return self.ConvNet(input)
class RCNN_FeatureExtractor(nn.Module):
def __init__(self, input_channel, output_channel=512):
super(RCNN_FeatureExtractor, self).__init__()
self.output_channel = [int(output_channel / 8), int(output_channel / 4),
int(output_channel / 2), output_channel] # [64, 128, 256, 512]
self.ConvNet = nn.Sequential(
nn.Conv2d(input_channel, self.output_channel[0], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d(2, 2), # 64 x 16 x 50
GRCL(self.output_channel[0], self.output_channel[0], num_iteration=5, kernel_size=3, pad=1),
nn.MaxPool2d(2, 2), # 64 x 8 x 25
GRCL(self.output_channel[0], self.output_channel[1], num_iteration=5, kernel_size=3, pad=1),
nn.MaxPool2d(2, (2, 1), (0, 1)), # 128 x 4 x 26
GRCL(self.output_channel[1], self.output_channel[2], num_iteration=5, kernel_size=3, pad=1),
nn.MaxPool2d(2, (2, 1), (0, 1)), # 256 x 2 x 27
nn.Conv2d(self.output_channel[2], self.output_channel[3], 2, 1, 0, bias=False),
nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True)) # 512 x 1 x 26
def forward(self, input):
return self.ConvNet(input)
class ResNet_FeatureExtractor(nn.Module):
def __init__(self, input_channel, output_channel=512):
super(ResNet_FeatureExtractor, self).__init__()
self.ConvNet = ResNet(input_channel, output_channel, BasicBlock, [1, 2, 5, 3])
def forward(self, input):
return self.ConvNet(input)
class GRCL(nn.Module):
def __init__(self, input_channel, output_channel, num_iteration, kernel_size, pad):
super(GRCL, self).__init__()
self.wgf_u = nn.Conv2d(input_channel, output_channel, 1, 1, 0, bias=False)
self.wgr_x = nn.Conv2d(output_channel, output_channel, 1, 1, 0, bias=False)
self.wf_u = nn.Conv2d(input_channel, output_channel, kernel_size, 1, pad, bias=False)
self.wr_x = nn.Conv2d(output_channel, output_channel, kernel_size, 1, pad, bias=False)
self.BN_x_init = nn.BatchNorm2d(output_channel)
self.num_iteration = num_iteration
self.GRCL = [GRCL_unit(output_channel) for _ in range(num_iteration)]
self.GRCL = nn.Sequential(*self.GRCL)
def forward(self, input):
""" The input of GRCL is consistant over time t, which is denoted by u(0)
thus wgf_u / wf_u is also consistant over time t.
"""
wgf_u = self.wgf_u(input)
wf_u = self.wf_u(input)
x = F.relu(self.BN_x_init(wf_u))
for i in range(self.num_iteration):
x = self.GRCL[i](wgf_u, self.wgr_x(x), wf_u, self.wr_x(x))
return x
class GRCL_unit(nn.Module):
def __init__(self, output_channel):
super(GRCL_unit, self).__init__()
self.BN_gfu = nn.BatchNorm2d(output_channel)
self.BN_grx = nn.BatchNorm2d(output_channel)
self.BN_fu = nn.BatchNorm2d(output_channel)
self.BN_rx = nn.BatchNorm2d(output_channel)
self.BN_Gx = nn.BatchNorm2d(output_channel)
def forward(self, wgf_u, wgr_x, wf_u, wr_x):
G_first_term = self.BN_gfu(wgf_u)
G_second_term = self.BN_grx(wgr_x)
G = F.sigmoid(G_first_term + G_second_term)
x_first_term = self.BN_fu(wf_u)
x_second_term = self.BN_Gx(self.BN_rx(wr_x) * G)
x = F.relu(x_first_term + x_second_term)
return x
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = self._conv3x3(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = self._conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def _conv3x3(self, in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, input_channel, output_channel, block, layers):
super(ResNet, self).__init__()
self.output_channel_block = [int(output_channel / 4), int(output_channel / 2), output_channel, output_channel]
self.inplanes = int(output_channel / 8)
self.conv0_1 = nn.Conv2d(input_channel, int(output_channel / 16),
kernel_size=3, stride=1, padding=1, bias=False)
self.bn0_1 = nn.BatchNorm2d(int(output_channel / 16))
self.conv0_2 = nn.Conv2d(int(output_channel / 16), self.inplanes,
kernel_size=3, stride=1, padding=1, bias=False)
self.bn0_2 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.layer1 = self._make_layer(block, self.output_channel_block[0], layers[0])
self.conv1 = nn.Conv2d(self.output_channel_block[0], self.output_channel_block[
0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.output_channel_block[0])
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.layer2 = self._make_layer(block, self.output_channel_block[1], layers[1], stride=1)
self.conv2 = nn.Conv2d(self.output_channel_block[1], self.output_channel_block[
1], kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(self.output_channel_block[1])
self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=(2, 1), padding=(0, 1))
self.layer3 = self._make_layer(block, self.output_channel_block[2], layers[2], stride=1)
self.conv3 = nn.Conv2d(self.output_channel_block[2], self.output_channel_block[
2], kernel_size=3, stride=1, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.output_channel_block[2])
self.layer4 = self._make_layer(block, self.output_channel_block[3], layers[3], stride=1)
self.conv4_1 = nn.Conv2d(self.output_channel_block[3], self.output_channel_block[
3], kernel_size=2, stride=(2, 1), padding=(0, 1), bias=False)
self.bn4_1 = nn.BatchNorm2d(self.output_channel_block[3])
self.conv4_2 = nn.Conv2d(self.output_channel_block[3], self.output_channel_block[
3], kernel_size=2, stride=1, padding=0, bias=False)
self.bn4_2 = nn.BatchNorm2d(self.output_channel_block[3])
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv0_1(x)
x = self.bn0_1(x)
x = self.relu(x)
x = self.conv0_2(x)
x = self.bn0_2(x)
x = self.relu(x)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool2(x)
x = self.layer2(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.maxpool3(x)
x = self.layer3(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.layer4(x)
x = self.conv4_1(x)
x = self.bn4_1(x)
x = self.relu(x)
x = self.conv4_2(x)
x = self.bn4_2(x)
x = self.relu(x)
return x
| 41.659751 | 118 | 0.612251 | import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class VGG_FeatureExtractor(nn.Module):
def __init__(self, input_channel, output_channel=512):
super(VGG_FeatureExtractor, self).__init__()
self.output_channel = [int(output_channel / 8), int(output_channel / 4),
int(output_channel / 2), output_channel]
self.ConvNet = nn.Sequential(
nn.Conv2d(input_channel, self.output_channel[0], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d(2, 2),
nn.Conv2d(self.output_channel[0], self.output_channel[1], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d(2, 2),
nn.Conv2d(self.output_channel[1], self.output_channel[2], 3, 1, 1), nn.ReLU(True),
nn.Conv2d(self.output_channel[2], self.output_channel[2], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d((2, 1), (2, 1)),
nn.Conv2d(self.output_channel[2], self.output_channel[3], 3, 1, 1, bias=False),
nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True),
nn.Conv2d(self.output_channel[3], self.output_channel[3], 3, 1, 1, bias=False),
nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True),
nn.MaxPool2d((2, 1), (2, 1)),
nn.Conv2d(self.output_channel[3], self.output_channel[3], 2, 1, 0), nn.ReLU(True))
def forward(self, input):
return self.ConvNet(input)
class RCNN_FeatureExtractor(nn.Module):
def __init__(self, input_channel, output_channel=512):
super(RCNN_FeatureExtractor, self).__init__()
self.output_channel = [int(output_channel / 8), int(output_channel / 4),
int(output_channel / 2), output_channel]
self.ConvNet = nn.Sequential(
nn.Conv2d(input_channel, self.output_channel[0], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d(2, 2),
GRCL(self.output_channel[0], self.output_channel[0], num_iteration=5, kernel_size=3, pad=1),
nn.MaxPool2d(2, 2),
GRCL(self.output_channel[0], self.output_channel[1], num_iteration=5, kernel_size=3, pad=1),
nn.MaxPool2d(2, (2, 1), (0, 1)),
GRCL(self.output_channel[1], self.output_channel[2], num_iteration=5, kernel_size=3, pad=1),
nn.MaxPool2d(2, (2, 1), (0, 1)),
nn.Conv2d(self.output_channel[2], self.output_channel[3], 2, 1, 0, bias=False),
nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True))
def forward(self, input):
return self.ConvNet(input)
class ResNet_FeatureExtractor(nn.Module):
def __init__(self, input_channel, output_channel=512):
super(ResNet_FeatureExtractor, self).__init__()
self.ConvNet = ResNet(input_channel, output_channel, BasicBlock, [1, 2, 5, 3])
def forward(self, input):
return self.ConvNet(input)
class GRCL(nn.Module):
def __init__(self, input_channel, output_channel, num_iteration, kernel_size, pad):
super(GRCL, self).__init__()
self.wgf_u = nn.Conv2d(input_channel, output_channel, 1, 1, 0, bias=False)
self.wgr_x = nn.Conv2d(output_channel, output_channel, 1, 1, 0, bias=False)
self.wf_u = nn.Conv2d(input_channel, output_channel, kernel_size, 1, pad, bias=False)
self.wr_x = nn.Conv2d(output_channel, output_channel, kernel_size, 1, pad, bias=False)
self.BN_x_init = nn.BatchNorm2d(output_channel)
self.num_iteration = num_iteration
self.GRCL = [GRCL_unit(output_channel) for _ in range(num_iteration)]
self.GRCL = nn.Sequential(*self.GRCL)
def forward(self, input):
wgf_u = self.wgf_u(input)
wf_u = self.wf_u(input)
x = F.relu(self.BN_x_init(wf_u))
for i in range(self.num_iteration):
x = self.GRCL[i](wgf_u, self.wgr_x(x), wf_u, self.wr_x(x))
return x
class GRCL_unit(nn.Module):
def __init__(self, output_channel):
super(GRCL_unit, self).__init__()
self.BN_gfu = nn.BatchNorm2d(output_channel)
self.BN_grx = nn.BatchNorm2d(output_channel)
self.BN_fu = nn.BatchNorm2d(output_channel)
self.BN_rx = nn.BatchNorm2d(output_channel)
self.BN_Gx = nn.BatchNorm2d(output_channel)
def forward(self, wgf_u, wgr_x, wf_u, wr_x):
G_first_term = self.BN_gfu(wgf_u)
G_second_term = self.BN_grx(wgr_x)
G = F.sigmoid(G_first_term + G_second_term)
x_first_term = self.BN_fu(wf_u)
x_second_term = self.BN_Gx(self.BN_rx(wr_x) * G)
x = F.relu(x_first_term + x_second_term)
return x
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = self._conv3x3(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = self._conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def _conv3x3(self, in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, input_channel, output_channel, block, layers):
super(ResNet, self).__init__()
self.output_channel_block = [int(output_channel / 4), int(output_channel / 2), output_channel, output_channel]
self.inplanes = int(output_channel / 8)
self.conv0_1 = nn.Conv2d(input_channel, int(output_channel / 16),
kernel_size=3, stride=1, padding=1, bias=False)
self.bn0_1 = nn.BatchNorm2d(int(output_channel / 16))
self.conv0_2 = nn.Conv2d(int(output_channel / 16), self.inplanes,
kernel_size=3, stride=1, padding=1, bias=False)
self.bn0_2 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.layer1 = self._make_layer(block, self.output_channel_block[0], layers[0])
self.conv1 = nn.Conv2d(self.output_channel_block[0], self.output_channel_block[
0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.output_channel_block[0])
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.layer2 = self._make_layer(block, self.output_channel_block[1], layers[1], stride=1)
self.conv2 = nn.Conv2d(self.output_channel_block[1], self.output_channel_block[
1], kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(self.output_channel_block[1])
self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=(2, 1), padding=(0, 1))
self.layer3 = self._make_layer(block, self.output_channel_block[2], layers[2], stride=1)
self.conv3 = nn.Conv2d(self.output_channel_block[2], self.output_channel_block[
2], kernel_size=3, stride=1, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.output_channel_block[2])
self.layer4 = self._make_layer(block, self.output_channel_block[3], layers[3], stride=1)
self.conv4_1 = nn.Conv2d(self.output_channel_block[3], self.output_channel_block[
3], kernel_size=2, stride=(2, 1), padding=(0, 1), bias=False)
self.bn4_1 = nn.BatchNorm2d(self.output_channel_block[3])
self.conv4_2 = nn.Conv2d(self.output_channel_block[3], self.output_channel_block[
3], kernel_size=2, stride=1, padding=0, bias=False)
self.bn4_2 = nn.BatchNorm2d(self.output_channel_block[3])
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv0_1(x)
x = self.bn0_1(x)
x = self.relu(x)
x = self.conv0_2(x)
x = self.bn0_2(x)
x = self.relu(x)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool2(x)
x = self.layer2(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.maxpool3(x)
x = self.layer3(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.layer4(x)
x = self.conv4_1(x)
x = self.bn4_1(x)
x = self.relu(x)
x = self.conv4_2(x)
x = self.bn4_2(x)
x = self.relu(x)
return x
| true | true |
f73075b082a65a62398c6c4d6ab5bdf795ac00ee | 8,704 | py | Python | examples/python-guide/Gaussian_process_mixed_effects_models_example.py | StatMixedML/GPBoost | 786d8be61c5c28da0690e167af636a6d777bf9e1 | [
"Apache-2.0"
] | 2 | 2020-04-12T06:12:17.000Z | 2020-04-12T15:34:01.000Z | examples/python-guide/Gaussian_process_mixed_effects_models_example.py | StatMixedML/GPBoost | 786d8be61c5c28da0690e167af636a6d777bf9e1 | [
"Apache-2.0"
] | null | null | null | examples/python-guide/Gaussian_process_mixed_effects_models_example.py | StatMixedML/GPBoost | 786d8be61c5c28da0690e167af636a6d777bf9e1 | [
"Apache-2.0"
] | 1 | 2020-04-12T15:34:12.000Z | 2020-04-12T15:34:12.000Z | # coding: utf-8
# pylint: disable = invalid-name, C0111
import gpboost as gpb
import numpy as np
# import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# --------------------Grouped random effects model: single-level random effect----------------
# Simulate data
n = 100 # number of samples
m = 25 # number of categories / levels for grouping variable
group = np.arange(n) # grouping variable
for i in range(m):
group[int(i * n / m):int((i + 1) * n / m)] = i
# incidence matrix relating grouped random effects to samples
Z1 = np.zeros((n, m))
for i in range(m):
Z1[np.where(group == i), i] = 1
sigma2_1 = 1 ** 2 # random effect variance
sigma2 = 0.5 ** 2 # error variance
np.random.seed(1)
b1 = np.sqrt(sigma2_1) * np.random.normal(size=m) # simulate random effects
eps = Z1.dot(b1)
xi = np.sqrt(sigma2) * np.random.normal(size=n) # simulate error term
y = eps + xi # observed data
# Define and fit model
gp_model = gpb.GPModel(group_data=group)
gp_model.fit(y=y, std_dev=True)
gp_model.summary()
# Make predictions
group_test = np.arange(m)
pred = gp_model.predict(group_data_pred=group_test)
# Compare true and predicted random effects
plt.scatter(b1, pred['mu'])
plt.title("Comparison of true and predicted random effects")
plt.xlabel("truth")
plt.ylabel("predicted")
plt.show()
# Other optimization specifications (gradient descent with Nesterov acceleration)
gp_model = gpb.GPModel(group_data=group)
gp_model.fit(y=y, std_dev=True, params={"optimizer_cov": "gradient_descent", "lr_cov": 0.1,
"use_nesterov_acc": True})
gp_model.summary()
# --------------------Two crossed random effects and a random slope----------------
# NOTE: run the above example first to create the first random effect
# Simulate data
np.random.seed(1)
x = np.random.uniform(size=n) # covariate data for random slope
n_obs_gr = int(n / m) # number of sampels per group
group2 = np.arange(n) # grouping variable for second random effect
for i in range(m):
group2[(n_obs_gr * i):(n_obs_gr * (i + 1))] = np.arange(n_obs_gr)
# incidence matrix relating grouped random effects to samples
Z2 = np.zeros((n, n_obs_gr))
for i in range(n_obs_gr):
Z2[np.where(group2 == i), i] = 1
Z3 = np.diag(x).dot(Z1)
sigma2_2 = 0.5 ** 2 # variance of second random effect
sigma2_3 = 0.75 ** 2 # variance of random slope for first random effect
b2 = np.sqrt(sigma2_2) * np.random.normal(size=n_obs_gr) # simulate random effects
b3 = np.sqrt(sigma2_3) * np.random.normal(size=m)
eps2 = Z1.dot(b1) + Z2.dot(b2) + Z3.dot(b3)
y = eps2 + xi # observed data
# Define and fit model
group_data = np.column_stack((group, group2))
gp_model = gpb.GPModel(group_data=group_data, group_rand_coef_data=x, ind_effect_group_rand_coef=[1])
gp_model.fit(y=y, std_dev=True)
gp_model.summary()
# --------------------Mixed effects model: random effects and linear fixed effects----------------
# NOTE: run the above example first to create the random effects part
# Simulate data
np.random.seed(1)
X = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n))) # desing matrix / covariate data for fixed effect
beta = np.array([3, 3]) # regression coefficents
y = eps2 + xi + X.dot(beta) # add fixed effect to observed data
# Define and fit model
gp_model = gpb.GPModel(group_data=group_data, group_rand_coef_data=x, ind_effect_group_rand_coef=[1])
gp_model.fit(y=y, X=X, std_dev=True)
gp_model.summary()
# --------------------Gaussian process model----------------
# Simulate data
n = 200 # number of samples
np.random.seed(2)
coords = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n))) # locations (=features) for Gaussian process
sigma2_1 = 1 ** 2 # marginal variance of GP
rho = 0.1 # range parameter
sigma2 = 0.5 ** 2 # error variance
D = np.zeros((n, n)) # distance matrix
for i in range(0, n):
for j in range(i + 1, n):
D[i, j] = np.linalg.norm(coords[i, :] - coords[j, :])
D[j, i] = D[i, j]
Sigma = sigma2_1 * np.exp(-D / rho) + np.diag(np.zeros(n) + 1e-20)
C = np.linalg.cholesky(Sigma)
b1 = np.random.normal(size=n) # simulate random effects
eps = C.dot(b1)
xi = np.sqrt(sigma2) * np.random.normal(size=n) # simulate error term
y = eps + xi
# Define and fit model
gp_model = gpb.GPModel(gp_coords=coords, cov_function="exponential")
## Other covariance functions:
# gp_model = gpb.GPModel(gp_coords=coords, cov_function="gaussian")
# gp_model = gpb.GPModel(gp_coords=coords, cov_function="matern", cov_fct_shape=1.5)
# gp_model = gpb.GPModel(gp_coords=coords, cov_function="powered_exponential", cov_fct_shape=1.1)
gp_model.fit(y=y, std_dev=True, params={"optimizer_cov": "gradient_descent",
"lr_cov": 0.1})
gp_model.summary()
# Make predictions
np.random.seed(1)
ntest = 5
# prediction locations (=features) for Gaussian process
coords_test = np.column_stack(
(np.random.uniform(size=ntest), np.random.uniform(size=ntest))) / 10.
pred = gp_model.predict(gp_coords_pred=coords_test, predict_cov_mat=True)
print("Predicted (posterior/conditional) mean of GP")
pred['mu']
print("Predicted (posterior/conditional) covariance matrix of GP")
pred['cov']
# --------------------Gaussian process model with Vecchia approximation----------------
gp_model = gpb.GPModel(gp_coords=coords, cov_function="exponential",
vecchia_approx=True, num_neighbors=30)
gp_model.fit(y=y, params={"optimizer_cov": "gradient_descent",
"lr_cov": 0.1})
gp_model.summary()
# --------------------Gaussian process model with random coefficents----------------
# Simulate data
n = 500 # number of samples
np.random.seed(1)
coords = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n))) # locations (=features) for Gaussian process
sigma2_1 = 1 ** 2 # marginal variance of GP (for simplicity, all GPs have the same parameters)
rho = 0.1 # range parameter
sigma2 = 0.5 ** 2 # error variance
D = np.zeros((n, n)) # distance matrix
for i in range(0, n):
for j in range(i + 1, n):
D[i, j] = np.linalg.norm(coords[i, :] - coords[j, :])
D[j, i] = D[i, j]
Sigma = sigma2_1 * np.exp(-D / rho) + np.diag(np.zeros(n) + 1e-20)
C = np.linalg.cholesky(Sigma)
X_SVC = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n))) # covariate data for random coeffients
b1 = np.random.normal(size=n) # simulate random effect
b2 = np.random.normal(size=n)
b3 = np.random.normal(size=n)
eps = C.dot(b1) + X_SVC[:, 0] * C.dot(b2) + X_SVC[:, 1] * C.dot(b3)
xi = np.sqrt(sigma2) * np.random.normal(size=n) # simulate error term
y = eps + xi
# Define and fit model (takes a few seconds)
gp_model = gpb.GPModel(gp_coords=coords, cov_function="exponential", gp_rand_coef_data=X_SVC)
gp_model.fit(y=y, std_dev=True, params={"optimizer_cov": "gradient_descent",
"lr_cov": 0.05,
"use_nesterov_acc": True,
"acc_rate_cov": 0.5})
gp_model.summary()
# --------------------Combine Gaussian process with grouped random effects----------------
n = 200 # number of samples
m = 25 # number of categories / levels for grouping variable
group = np.arange(n) # grouping variable
for i in range(m):
group[int(i * n / m):int((i + 1) * n / m)] = i
# incidence matrix relating grouped random effects to samples
Z1 = np.zeros((n, m))
for i in range(m):
Z1[np.where(group == i), i] = 1
np.random.seed(1)
coords = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n))) # locations (=features) for Gaussian process
sigma2_1 = 1 ** 2 # random effect variance
sigma2_2 = 1 ** 2 # marginal variance of GP
rho = 0.1 # range parameter
sigma2 = 0.5 ** 2 # error variance
D = np.zeros((n, n)) # distance matrix
for i in range(0, n):
for j in range(i + 1, n):
D[i, j] = np.linalg.norm(coords[i, :] - coords[j, :])
D[j, i] = D[i, j]
Sigma = sigma2_2 * np.exp(-D / rho) + np.diag(np.zeros(n) + 1e-20)
C = np.linalg.cholesky(Sigma)
b1 = np.random.normal(size=m) # simulate random effect
b2 = np.random.normal(size=n)
eps = Z1.dot(b1) + C.dot(b2)
xi = np.sqrt(sigma2) * np.random.normal(size=n) # simulate error term
y = eps + xi
# Create Gaussian process model
gp_model = gpb.GPModel(group_data=group, gp_coords=coords, cov_function="exponential")
gp_model.fit(y=y, std_dev=True, params={"optimizer_cov": "gradient_descent",
"lr_cov": 0.05,
"use_nesterov_acc": True,
"acc_rate_cov": 0.5})
gp_model.summary()
| 42.048309 | 110 | 0.654756 |
import gpboost as gpb
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
n = 100
m = 25
group = np.arange(n)
for i in range(m):
group[int(i * n / m):int((i + 1) * n / m)] = i
Z1 = np.zeros((n, m))
for i in range(m):
Z1[np.where(group == i), i] = 1
sigma2_1 = 1 ** 2
sigma2 = 0.5 ** 2
np.random.seed(1)
b1 = np.sqrt(sigma2_1) * np.random.normal(size=m)
eps = Z1.dot(b1)
xi = np.sqrt(sigma2) * np.random.normal(size=n)
y = eps + xi
gp_model = gpb.GPModel(group_data=group)
gp_model.fit(y=y, std_dev=True)
gp_model.summary()
group_test = np.arange(m)
pred = gp_model.predict(group_data_pred=group_test)
plt.scatter(b1, pred['mu'])
plt.title("Comparison of true and predicted random effects")
plt.xlabel("truth")
plt.ylabel("predicted")
plt.show()
gp_model = gpb.GPModel(group_data=group)
gp_model.fit(y=y, std_dev=True, params={"optimizer_cov": "gradient_descent", "lr_cov": 0.1,
"use_nesterov_acc": True})
gp_model.summary()
np.random.seed(1)
x = np.random.uniform(size=n)
n_obs_gr = int(n / m)
group2 = np.arange(n)
for i in range(m):
group2[(n_obs_gr * i):(n_obs_gr * (i + 1))] = np.arange(n_obs_gr)
Z2 = np.zeros((n, n_obs_gr))
for i in range(n_obs_gr):
Z2[np.where(group2 == i), i] = 1
Z3 = np.diag(x).dot(Z1)
sigma2_2 = 0.5 ** 2
sigma2_3 = 0.75 ** 2
b2 = np.sqrt(sigma2_2) * np.random.normal(size=n_obs_gr)
b3 = np.sqrt(sigma2_3) * np.random.normal(size=m)
eps2 = Z1.dot(b1) + Z2.dot(b2) + Z3.dot(b3)
y = eps2 + xi
group_data = np.column_stack((group, group2))
gp_model = gpb.GPModel(group_data=group_data, group_rand_coef_data=x, ind_effect_group_rand_coef=[1])
gp_model.fit(y=y, std_dev=True)
gp_model.summary()
np.random.seed(1)
X = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n)))
beta = np.array([3, 3])
y = eps2 + xi + X.dot(beta)
gp_model = gpb.GPModel(group_data=group_data, group_rand_coef_data=x, ind_effect_group_rand_coef=[1])
gp_model.fit(y=y, X=X, std_dev=True)
gp_model.summary()
n = 200
np.random.seed(2)
coords = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n)))
sigma2_1 = 1 ** 2
rho = 0.1
sigma2 = 0.5 ** 2
D = np.zeros((n, n))
for i in range(0, n):
for j in range(i + 1, n):
D[i, j] = np.linalg.norm(coords[i, :] - coords[j, :])
D[j, i] = D[i, j]
Sigma = sigma2_1 * np.exp(-D / rho) + np.diag(np.zeros(n) + 1e-20)
C = np.linalg.cholesky(Sigma)
b1 = np.random.normal(size=n)
eps = C.dot(b1)
xi = np.sqrt(sigma2) * np.random.normal(size=n)
y = eps + xi
gp_model = gpb.GPModel(gp_coords=coords, cov_function="exponential")
=True, params={"optimizer_cov": "gradient_descent",
"lr_cov": 0.1})
gp_model.summary()
np.random.seed(1)
ntest = 5
coords_test = np.column_stack(
(np.random.uniform(size=ntest), np.random.uniform(size=ntest))) / 10.
pred = gp_model.predict(gp_coords_pred=coords_test, predict_cov_mat=True)
print("Predicted (posterior/conditional) mean of GP")
pred['mu']
print("Predicted (posterior/conditional) covariance matrix of GP")
pred['cov']
gp_model = gpb.GPModel(gp_coords=coords, cov_function="exponential",
vecchia_approx=True, num_neighbors=30)
gp_model.fit(y=y, params={"optimizer_cov": "gradient_descent",
"lr_cov": 0.1})
gp_model.summary()
n = 500
np.random.seed(1)
coords = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n)))
sigma2_1 = 1 ** 2
rho = 0.1
sigma2 = 0.5 ** 2
D = np.zeros((n, n))
for i in range(0, n):
for j in range(i + 1, n):
D[i, j] = np.linalg.norm(coords[i, :] - coords[j, :])
D[j, i] = D[i, j]
Sigma = sigma2_1 * np.exp(-D / rho) + np.diag(np.zeros(n) + 1e-20)
C = np.linalg.cholesky(Sigma)
X_SVC = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n)))
b1 = np.random.normal(size=n)
b2 = np.random.normal(size=n)
b3 = np.random.normal(size=n)
eps = C.dot(b1) + X_SVC[:, 0] * C.dot(b2) + X_SVC[:, 1] * C.dot(b3)
xi = np.sqrt(sigma2) * np.random.normal(size=n)
y = eps + xi
gp_model = gpb.GPModel(gp_coords=coords, cov_function="exponential", gp_rand_coef_data=X_SVC)
gp_model.fit(y=y, std_dev=True, params={"optimizer_cov": "gradient_descent",
"lr_cov": 0.05,
"use_nesterov_acc": True,
"acc_rate_cov": 0.5})
gp_model.summary()
n = 200
m = 25
group = np.arange(n)
for i in range(m):
group[int(i * n / m):int((i + 1) * n / m)] = i
Z1 = np.zeros((n, m))
for i in range(m):
Z1[np.where(group == i), i] = 1
np.random.seed(1)
coords = np.column_stack(
(np.random.uniform(size=n), np.random.uniform(size=n)))
sigma2_1 = 1 ** 2
sigma2_2 = 1 ** 2
rho = 0.1
sigma2 = 0.5 ** 2
D = np.zeros((n, n))
for i in range(0, n):
for j in range(i + 1, n):
D[i, j] = np.linalg.norm(coords[i, :] - coords[j, :])
D[j, i] = D[i, j]
Sigma = sigma2_2 * np.exp(-D / rho) + np.diag(np.zeros(n) + 1e-20)
C = np.linalg.cholesky(Sigma)
b1 = np.random.normal(size=m)
b2 = np.random.normal(size=n)
eps = Z1.dot(b1) + C.dot(b2)
xi = np.sqrt(sigma2) * np.random.normal(size=n)
y = eps + xi
gp_model = gpb.GPModel(group_data=group, gp_coords=coords, cov_function="exponential")
gp_model.fit(y=y, std_dev=True, params={"optimizer_cov": "gradient_descent",
"lr_cov": 0.05,
"use_nesterov_acc": True,
"acc_rate_cov": 0.5})
gp_model.summary()
| true | true |
f73075bef3ed405f4ed01eba617bcacbd18b5ea5 | 10,394 | py | Python | tests/test_renault_vehicle.py | Nebukadneza/renault-api | 30fdcc405575ca394c98b556878260da787c9ffc | [
"MIT"
] | null | null | null | tests/test_renault_vehicle.py | Nebukadneza/renault-api | 30fdcc405575ca394c98b556878260da787c9ffc | [
"MIT"
] | null | null | null | tests/test_renault_vehicle.py | Nebukadneza/renault-api | 30fdcc405575ca394c98b556878260da787c9ffc | [
"MIT"
] | null | null | null | """Test cases for the Renault client API keys."""
from datetime import datetime
from typing import List
import aiohttp
import pytest
from aioresponses import aioresponses
from tests import get_file_content
from tests.const import TEST_ACCOUNT_ID
from tests.const import TEST_COUNTRY
from tests.const import TEST_KAMEREON_URL
from tests.const import TEST_LOCALE_DETAILS
from tests.const import TEST_VIN
from tests.test_credential_store import get_logged_in_credential_store
from tests.test_renault_session import get_logged_in_session
from renault_api.kamereon.enums import ChargeMode
from renault_api.kamereon.models import ChargeSchedule
from renault_api.renault_vehicle import RenaultVehicle
TEST_KAMEREON_BASE_URL = f"{TEST_KAMEREON_URL}/commerce/v1"
TEST_KAMEREON_ACCOUNT_URL = f"{TEST_KAMEREON_BASE_URL}/accounts/{TEST_ACCOUNT_ID}"
TEST_KAMEREON_VEHICLE_URL1 = (
f"{TEST_KAMEREON_ACCOUNT_URL}/kamereon/kca/car-adapter/v1/cars/{TEST_VIN}"
)
TEST_KAMEREON_VEHICLE_URL2 = (
f"{TEST_KAMEREON_ACCOUNT_URL}/kamereon/kca/car-adapter/v2/cars/{TEST_VIN}"
)
FIXTURE_PATH = "tests/fixtures/kamereon/"
QUERY_STRING = f"country={TEST_COUNTRY}"
@pytest.fixture
def vehicle(websession: aiohttp.ClientSession) -> RenaultVehicle:
"""Fixture for testing RenaultVehicle."""
return RenaultVehicle(
account_id=TEST_ACCOUNT_ID,
vin=TEST_VIN,
session=get_logged_in_session(websession),
)
def tests_init(websession: aiohttp.ClientSession) -> None:
"""Test RenaultVehicle initialisation."""
assert RenaultVehicle(
account_id=TEST_ACCOUNT_ID,
vin=TEST_VIN,
session=get_logged_in_session(websession),
)
assert RenaultVehicle(
account_id=TEST_ACCOUNT_ID,
vin=TEST_VIN,
websession=websession,
country=TEST_COUNTRY,
locale_details=TEST_LOCALE_DETAILS,
credential_store=get_logged_in_credential_store(),
)
@pytest.mark.asyncio
async def test_get_battery_status(vehicle: RenaultVehicle) -> None:
"""Test get_battery_status."""
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL2}/battery-status?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/battery-status.1.json"),
)
assert await vehicle.get_battery_status()
@pytest.mark.asyncio
async def test_get_location(vehicle: RenaultVehicle) -> None:
"""Test get_location."""
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/location?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/location.json"),
)
assert await vehicle.get_location()
@pytest.mark.asyncio
async def test_get_hvac_status(vehicle: RenaultVehicle) -> None:
"""Test get_hvac_status."""
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/hvac-status?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/hvac-status.json"),
)
assert await vehicle.get_hvac_status()
@pytest.mark.asyncio
async def test_get_charge_mode(vehicle: RenaultVehicle) -> None:
"""Test get_charge_mode."""
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/charge-mode?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/charge-mode.json"),
)
assert await vehicle.get_charge_mode()
@pytest.mark.asyncio
async def test_get_cockpit(vehicle: RenaultVehicle) -> None:
"""Test get_cockpit."""
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL2}/cockpit?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/cockpit.zoe.json"),
)
assert await vehicle.get_cockpit()
@pytest.mark.asyncio
async def test_get_lock_status(vehicle: RenaultVehicle) -> None:
"""Test get_lock_status."""
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/lock-status?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/lock-status.json"),
)
assert await vehicle.get_lock_status()
@pytest.mark.asyncio
async def test_get_charging_settings(vehicle: RenaultVehicle) -> None:
"""Test get_charging_settings."""
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/charging-settings?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_data/charging-settings.json"
),
)
assert await vehicle.get_charging_settings()
@pytest.mark.asyncio
async def test_get_notification_settings(vehicle: RenaultVehicle) -> None:
"""Test get_notification_settings."""
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/notification-settings?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_data/notification-settings.json"
),
)
assert await vehicle.get_notification_settings()
@pytest.mark.asyncio
async def test_get_charge_history(vehicle: RenaultVehicle) -> None:
"""Test get_charge_history."""
query_string = f"{QUERY_STRING}&end=202011&start=202010&type=month"
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/charge-history?{query_string}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/charge-history.json"),
)
assert await vehicle.get_charge_history(
start=datetime(2020, 10, 1),
end=datetime(2020, 11, 15),
)
@pytest.mark.asyncio
async def test_get_charges(vehicle: RenaultVehicle) -> None:
"""Test get_charges."""
query_string = f"{QUERY_STRING}&end=20201115&start=20201001"
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/charges?{query_string}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/charges.json"),
)
assert await vehicle.get_charges(
start=datetime(2020, 10, 1),
end=datetime(2020, 11, 15),
)
@pytest.mark.asyncio
async def test_get_hvac_history(vehicle: RenaultVehicle) -> None:
"""Test get_hvac_history."""
query_string = f"{QUERY_STRING}&end=202011&start=202010&type=month"
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/hvac-history?{query_string}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/hvac-history.json"),
)
assert await vehicle.get_hvac_history(
start=datetime(2020, 10, 1),
end=datetime(2020, 11, 15),
)
@pytest.mark.asyncio
async def test_get_hvac_sessions(vehicle: RenaultVehicle) -> None:
"""Test get_hvac_sessions."""
query_string = f"{QUERY_STRING}&end=20201115&start=20201001"
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/hvac-sessions?{query_string}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/hvac-sessions.json"),
)
assert await vehicle.get_hvac_sessions(
start=datetime(2020, 10, 1),
end=datetime(2020, 11, 15),
)
@pytest.mark.asyncio
async def test_set_ac_start(vehicle: RenaultVehicle) -> None:
"""Test set_ac_start."""
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL1}/actions/hvac-start?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/hvac-start.start.json"
),
)
assert await vehicle.set_ac_start(21, datetime(2020, 11, 24))
@pytest.mark.asyncio
async def test_set_ac_stop(vehicle: RenaultVehicle) -> None:
"""Test set_ac_stop."""
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL1}/actions/hvac-start?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/hvac-start.cancel.json"
),
)
assert await vehicle.set_ac_stop()
@pytest.mark.asyncio
async def test_set_charge_mode(vehicle: RenaultVehicle) -> None:
"""Test set_charge_mode."""
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL1}/actions/charge-mode?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/charge-mode.schedule_mode.json"
),
)
assert await vehicle.set_charge_mode(ChargeMode.SCHEDULE_MODE)
@pytest.mark.asyncio
async def test_set_charge_schedules(vehicle: RenaultVehicle) -> None:
"""Test set_charge_schedules."""
schedules: List[ChargeSchedule] = []
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL2}/actions/charge-schedule?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/charge-schedule.schedules.json"
),
)
assert await vehicle.set_charge_schedules(schedules)
@pytest.mark.asyncio
async def test_set_charge_start(vehicle: RenaultVehicle) -> None:
"""Test set_charge_start."""
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL1}/actions/charging-start?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/charging-start.start.json"
),
)
assert await vehicle.set_charge_start()
| 35.233898 | 88 | 0.681547 | from datetime import datetime
from typing import List
import aiohttp
import pytest
from aioresponses import aioresponses
from tests import get_file_content
from tests.const import TEST_ACCOUNT_ID
from tests.const import TEST_COUNTRY
from tests.const import TEST_KAMEREON_URL
from tests.const import TEST_LOCALE_DETAILS
from tests.const import TEST_VIN
from tests.test_credential_store import get_logged_in_credential_store
from tests.test_renault_session import get_logged_in_session
from renault_api.kamereon.enums import ChargeMode
from renault_api.kamereon.models import ChargeSchedule
from renault_api.renault_vehicle import RenaultVehicle
TEST_KAMEREON_BASE_URL = f"{TEST_KAMEREON_URL}/commerce/v1"
TEST_KAMEREON_ACCOUNT_URL = f"{TEST_KAMEREON_BASE_URL}/accounts/{TEST_ACCOUNT_ID}"
TEST_KAMEREON_VEHICLE_URL1 = (
f"{TEST_KAMEREON_ACCOUNT_URL}/kamereon/kca/car-adapter/v1/cars/{TEST_VIN}"
)
TEST_KAMEREON_VEHICLE_URL2 = (
f"{TEST_KAMEREON_ACCOUNT_URL}/kamereon/kca/car-adapter/v2/cars/{TEST_VIN}"
)
FIXTURE_PATH = "tests/fixtures/kamereon/"
QUERY_STRING = f"country={TEST_COUNTRY}"
@pytest.fixture
def vehicle(websession: aiohttp.ClientSession) -> RenaultVehicle:
return RenaultVehicle(
account_id=TEST_ACCOUNT_ID,
vin=TEST_VIN,
session=get_logged_in_session(websession),
)
def tests_init(websession: aiohttp.ClientSession) -> None:
assert RenaultVehicle(
account_id=TEST_ACCOUNT_ID,
vin=TEST_VIN,
session=get_logged_in_session(websession),
)
assert RenaultVehicle(
account_id=TEST_ACCOUNT_ID,
vin=TEST_VIN,
websession=websession,
country=TEST_COUNTRY,
locale_details=TEST_LOCALE_DETAILS,
credential_store=get_logged_in_credential_store(),
)
@pytest.mark.asyncio
async def test_get_battery_status(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL2}/battery-status?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/battery-status.1.json"),
)
assert await vehicle.get_battery_status()
@pytest.mark.asyncio
async def test_get_location(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/location?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/location.json"),
)
assert await vehicle.get_location()
@pytest.mark.asyncio
async def test_get_hvac_status(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/hvac-status?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/hvac-status.json"),
)
assert await vehicle.get_hvac_status()
@pytest.mark.asyncio
async def test_get_charge_mode(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/charge-mode?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/charge-mode.json"),
)
assert await vehicle.get_charge_mode()
@pytest.mark.asyncio
async def test_get_cockpit(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL2}/cockpit?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/cockpit.zoe.json"),
)
assert await vehicle.get_cockpit()
@pytest.mark.asyncio
async def test_get_lock_status(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/lock-status?{QUERY_STRING}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/lock-status.json"),
)
assert await vehicle.get_lock_status()
@pytest.mark.asyncio
async def test_get_charging_settings(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/charging-settings?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_data/charging-settings.json"
),
)
assert await vehicle.get_charging_settings()
@pytest.mark.asyncio
async def test_get_notification_settings(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/notification-settings?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_data/notification-settings.json"
),
)
assert await vehicle.get_notification_settings()
@pytest.mark.asyncio
async def test_get_charge_history(vehicle: RenaultVehicle) -> None:
query_string = f"{QUERY_STRING}&end=202011&start=202010&type=month"
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/charge-history?{query_string}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/charge-history.json"),
)
assert await vehicle.get_charge_history(
start=datetime(2020, 10, 1),
end=datetime(2020, 11, 15),
)
@pytest.mark.asyncio
async def test_get_charges(vehicle: RenaultVehicle) -> None:
query_string = f"{QUERY_STRING}&end=20201115&start=20201001"
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/charges?{query_string}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/charges.json"),
)
assert await vehicle.get_charges(
start=datetime(2020, 10, 1),
end=datetime(2020, 11, 15),
)
@pytest.mark.asyncio
async def test_get_hvac_history(vehicle: RenaultVehicle) -> None:
query_string = f"{QUERY_STRING}&end=202011&start=202010&type=month"
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/hvac-history?{query_string}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/hvac-history.json"),
)
assert await vehicle.get_hvac_history(
start=datetime(2020, 10, 1),
end=datetime(2020, 11, 15),
)
@pytest.mark.asyncio
async def test_get_hvac_sessions(vehicle: RenaultVehicle) -> None:
query_string = f"{QUERY_STRING}&end=20201115&start=20201001"
with aioresponses() as mocked_responses:
mocked_responses.get(
f"{TEST_KAMEREON_VEHICLE_URL1}/hvac-sessions?{query_string}",
status=200,
body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/hvac-sessions.json"),
)
assert await vehicle.get_hvac_sessions(
start=datetime(2020, 10, 1),
end=datetime(2020, 11, 15),
)
@pytest.mark.asyncio
async def test_set_ac_start(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL1}/actions/hvac-start?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/hvac-start.start.json"
),
)
assert await vehicle.set_ac_start(21, datetime(2020, 11, 24))
@pytest.mark.asyncio
async def test_set_ac_stop(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL1}/actions/hvac-start?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/hvac-start.cancel.json"
),
)
assert await vehicle.set_ac_stop()
@pytest.mark.asyncio
async def test_set_charge_mode(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL1}/actions/charge-mode?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/charge-mode.schedule_mode.json"
),
)
assert await vehicle.set_charge_mode(ChargeMode.SCHEDULE_MODE)
@pytest.mark.asyncio
async def test_set_charge_schedules(vehicle: RenaultVehicle) -> None:
schedules: List[ChargeSchedule] = []
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL2}/actions/charge-schedule?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/charge-schedule.schedules.json"
),
)
assert await vehicle.set_charge_schedules(schedules)
@pytest.mark.asyncio
async def test_set_charge_start(vehicle: RenaultVehicle) -> None:
with aioresponses() as mocked_responses:
mocked_responses.post(
f"{TEST_KAMEREON_VEHICLE_URL1}/actions/charging-start?{QUERY_STRING}",
status=200,
body=get_file_content(
f"{FIXTURE_PATH}/vehicle_action/charging-start.start.json"
),
)
assert await vehicle.set_charge_start()
| true | true |
f73076c02920003b04f8d7e5599366f2dce13c0e | 1,277 | py | Python | randomgenius.py | JosephCottingham/MusicPlaylist-LinkedList | bb8014158fed05b45194705424bc5830ed25527a | [
"MIT"
] | null | null | null | randomgenius.py | JosephCottingham/MusicPlaylist-LinkedList | bb8014158fed05b45194705424bc5830ed25527a | [
"MIT"
] | null | null | null | randomgenius.py | JosephCottingham/MusicPlaylist-LinkedList | bb8014158fed05b45194705424bc5830ed25527a | [
"MIT"
] | null | null | null | import requests, json, random, sys, configparser
#def getData(auth_string, id):
def getData():
global auth_token
s = random.randint(100000, 1000000)
id_str = str(s)
request_url = "http://api.genius.com/songs/" + id_str
headersMap = {
"User-Agent": "CompuServe Classic/1.22",
"Accept": "application/json",
"Authorization": "Bearer " + auth_token
}
response = requests.get(request_url, headers=headersMap)
### Output the HTTP status code and reason text...
#print response.status, response.reason
result = json.loads(response.content)
output = "[" + id_str + "] "
if response.status_code == 200:
title = result["response"]["song"]["full_title"]
song_uri = result["response"]["song"]["path"]
if not title:
return getData()
return title
else:
return getData()
#Copy auth.cfg.sample to auth.cfg and fill in your auth token
Config = configparser.ConfigParser()
Config.read('auth.cfg')
auth_token = Config.get('Auth', 'Token')
#if no argument (count) is given, set default to 1
c = 0
if (len(sys.argv) == 1):
c = 1
else:
c = int(sys.argv[1])
#Concerning the docs, the song id is a 6-digit number
for i in range(0, c):
getData()
| 29.697674 | 61 | 0.628034 | import requests, json, random, sys, configparser
def getData():
global auth_token
s = random.randint(100000, 1000000)
id_str = str(s)
request_url = "http://api.genius.com/songs/" + id_str
headersMap = {
"User-Agent": "CompuServe Classic/1.22",
"Accept": "application/json",
"Authorization": "Bearer " + auth_token
}
response = requests.get(request_url, headers=headersMap)
status_code == 200:
title = result["response"]["song"]["full_title"]
song_uri = result["response"]["song"]["path"]
if not title:
return getData()
return title
else:
return getData()
Config = configparser.ConfigParser()
Config.read('auth.cfg')
auth_token = Config.get('Auth', 'Token')
c = 0
if (len(sys.argv) == 1):
c = 1
else:
c = int(sys.argv[1])
for i in range(0, c):
getData()
| true | true |
f73077be389c6681ad96c3ab9228da1f85632f08 | 2,263 | py | Python | aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/SetHttpHeaderConfigRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/SetHttpHeaderConfigRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/SetHttpHeaderConfigRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcdn.endpoint import endpoint_data
class SetHttpHeaderConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'SetHttpHeaderConfig')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_HeaderValue(self):
return self.get_query_params().get('HeaderValue')
def set_HeaderValue(self,HeaderValue):
self.add_query_param('HeaderValue',HeaderValue)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_ConfigId(self):
return self.get_query_params().get('ConfigId')
def set_ConfigId(self,ConfigId):
self.add_query_param('ConfigId',ConfigId)
def get_HeaderKey(self):
return self.get_query_params().get('HeaderKey')
def set_HeaderKey(self,HeaderKey):
self.add_query_param('HeaderKey',HeaderKey) | 33.776119 | 74 | 0.764914 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcdn.endpoint import endpoint_data
class SetHttpHeaderConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'SetHttpHeaderConfig')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_HeaderValue(self):
return self.get_query_params().get('HeaderValue')
def set_HeaderValue(self,HeaderValue):
self.add_query_param('HeaderValue',HeaderValue)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_ConfigId(self):
return self.get_query_params().get('ConfigId')
def set_ConfigId(self,ConfigId):
self.add_query_param('ConfigId',ConfigId)
def get_HeaderKey(self):
return self.get_query_params().get('HeaderKey')
def set_HeaderKey(self,HeaderKey):
self.add_query_param('HeaderKey',HeaderKey) | true | true |
f7307823c40ee64d03118ec461eb3103c01b9c29 | 5,544 | py | Python | Heats/scoresheetsHtml.py | yuxuibbs/MCC-Competition-Docs | 384726c41434c5a07becb6438c3d2409c6ca6eb4 | [
"MIT"
] | 4 | 2016-11-13T20:49:33.000Z | 2017-12-20T20:03:03.000Z | Heats/scoresheetsHtml.py | yuxuibbs/MCC-Competition-Docs | 384726c41434c5a07becb6438c3d2409c6ca6eb4 | [
"MIT"
] | 5 | 2016-12-26T19:14:46.000Z | 2022-02-11T03:44:39.000Z | Heats/scoresheetsHtml.py | yuxuibbs/MCC-Competition-Docs | 384726c41434c5a07becb6438c3d2409c6ca6eb4 | [
"MIT"
] | 2 | 2016-12-29T12:03:15.000Z | 2017-02-16T15:51:02.000Z | startHTML = '''
<html>
<head>
<style>
table {
border-collapse: collapse;
height: 100%;
width: 100%;
}
table, th, td {
border: 3px solid black;
}
@media print {
table {
page-break-after: always;
}
}
.cutoffs td {
border: 0;
font-weight: bold;
}
.compName {
font-size: 48pt;
font-weight: bold;
}
.labels {
font-size: 24pt;
font-weight: bold;
}
.attempt {
font-size: 36pt;
font-weight: bold;
text-align: center;
}
.event, .personID, .scrambler {
font-size: 24pt;
font-weight: bold;
width: 60px;
}
.round, .heat {
font-size: 24pt;
font-weight: bold;
}
.personName {
font-size: 40pt;
font-weight: bold;
}
.attemptNumber {
width: 60px;
}
.initial {
width: 100px;
}
</style>
</head>
<body>
'''
ao5Table = '''
<table>
<tr>
<th colspan="6" class="compName">competitionName</th>
</tr>
<tr>
<th colspan="1" class="personID">competitorID</th>
<th colspan="3" class="event">eventName</th>
<th colspan="1" class="heat">G: heatNumber</th>
<th colspan="1" class="round">R: roundNumber</th>
</tr>
<tr>
<th colspan="6" class="personName">competitorName</th>
</tr>
<tr class="labels">
<th colspan="1" class="scrambler">Scr</th>
<th colspan="1" class="attemptNumber">#</th>
<th colspan="2">Results</th>
<th colspan="1" class="initial">Judge</th>
<th colspan="1" class="initial">Comp</th>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">1</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">2</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="cutoffs">
<td colspan="1"></td>
<td colspan="1"></td>
<td colspan="1">Cutoff: cutoffTime</td>
<td colspan="1">Time Limit: timeLimit</td>
<td colspan="1"></td>
<td colspan="1"></td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">3</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">4</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">5</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="empty">
<td colspan="6"></td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">E</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
</table>
'''
mo3Table = '''
<table>
<tr>
<th colspan="6" class="compName">competitionName</th>
</tr>
<tr>
<th colspan="1" class="personID">competitorID</th>
<th colspan="3" class="event">eventName</th>
<th colspan="1" class="heat">G: heatNumber</th>
<th colspan="1" class="round">R: roundNumber</th>
</tr>
<tr>
<th colspan="6" class="personName">competitorName</th>
</tr>
<tr class="labels">
<th colspan="1" class="scrambler">Scr</th>
<th colspan="1" class="attemptNumber">#</th>
<th colspan="2">Results</th>
<th colspan="1" class="initial">Judge</th>
<th colspan="1" class="initial">Comp</th>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">1</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="cutoffs">
<td colspan="1"></td>
<td colspan="1"></td>
<td colspan="1">Cutoff: cutoffTime</td>
<td colspan="1">Time Limit: timeLimit</td>
<td colspan="1"></td>
<td colspan="1"></td>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">2</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">3</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="empty">
<td colspan="6"></td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">E</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
</table>
'''
endHTML = '''
</body>
</html>
''' | 25.906542 | 64 | 0.418831 | startHTML = '''
<html>
<head>
<style>
table {
border-collapse: collapse;
height: 100%;
width: 100%;
}
table, th, td {
border: 3px solid black;
}
@media print {
table {
page-break-after: always;
}
}
.cutoffs td {
border: 0;
font-weight: bold;
}
.compName {
font-size: 48pt;
font-weight: bold;
}
.labels {
font-size: 24pt;
font-weight: bold;
}
.attempt {
font-size: 36pt;
font-weight: bold;
text-align: center;
}
.event, .personID, .scrambler {
font-size: 24pt;
font-weight: bold;
width: 60px;
}
.round, .heat {
font-size: 24pt;
font-weight: bold;
}
.personName {
font-size: 40pt;
font-weight: bold;
}
.attemptNumber {
width: 60px;
}
.initial {
width: 100px;
}
</style>
</head>
<body>
'''
ao5Table = '''
<table>
<tr>
<th colspan="6" class="compName">competitionName</th>
</tr>
<tr>
<th colspan="1" class="personID">competitorID</th>
<th colspan="3" class="event">eventName</th>
<th colspan="1" class="heat">G: heatNumber</th>
<th colspan="1" class="round">R: roundNumber</th>
</tr>
<tr>
<th colspan="6" class="personName">competitorName</th>
</tr>
<tr class="labels">
<th colspan="1" class="scrambler">Scr</th>
<th colspan="1" class="attemptNumber">#</th>
<th colspan="2">Results</th>
<th colspan="1" class="initial">Judge</th>
<th colspan="1" class="initial">Comp</th>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">1</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">2</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="cutoffs">
<td colspan="1"></td>
<td colspan="1"></td>
<td colspan="1">Cutoff: cutoffTime</td>
<td colspan="1">Time Limit: timeLimit</td>
<td colspan="1"></td>
<td colspan="1"></td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">3</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">4</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">5</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="empty">
<td colspan="6"></td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">E</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
</table>
'''
mo3Table = '''
<table>
<tr>
<th colspan="6" class="compName">competitionName</th>
</tr>
<tr>
<th colspan="1" class="personID">competitorID</th>
<th colspan="3" class="event">eventName</th>
<th colspan="1" class="heat">G: heatNumber</th>
<th colspan="1" class="round">R: roundNumber</th>
</tr>
<tr>
<th colspan="6" class="personName">competitorName</th>
</tr>
<tr class="labels">
<th colspan="1" class="scrambler">Scr</th>
<th colspan="1" class="attemptNumber">#</th>
<th colspan="2">Results</th>
<th colspan="1" class="initial">Judge</th>
<th colspan="1" class="initial">Comp</th>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">1</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="cutoffs">
<td colspan="1"></td>
<td colspan="1"></td>
<td colspan="1">Cutoff: cutoffTime</td>
<td colspan="1">Time Limit: timeLimit</td>
<td colspan="1"></td>
<td colspan="1"></td>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">2</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">3</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
<tr class="empty">
<td colspan="6"></td>
</tr>
<tr class="attempt">
<td colspan="1"> </td>
<td colspan="1">E</td>
<td colspan="2"> </td>
<td colspan="1"> </td>
<td colspan="1"> </td>
</tr>
</table>
'''
endHTML = '''
</body>
</html>
''' | true | true |
f7307828ee10c47bd1ad7591cd1e3358963ef61f | 1,063 | py | Python | authors/apps/authentication/tests/test_login.py | andela/ah-backend-thanos | baf7f20a023cc3c3ecae0fcf91bb7d9165e79fc8 | [
"BSD-3-Clause"
] | null | null | null | authors/apps/authentication/tests/test_login.py | andela/ah-backend-thanos | baf7f20a023cc3c3ecae0fcf91bb7d9165e79fc8 | [
"BSD-3-Clause"
] | 42 | 2018-10-24T08:21:07.000Z | 2021-06-10T20:54:39.000Z | authors/apps/authentication/tests/test_login.py | andela/ah-backend-thanos | baf7f20a023cc3c3ecae0fcf91bb7d9165e79fc8 | [
"BSD-3-Clause"
] | 2 | 2018-11-05T08:56:42.000Z | 2019-05-03T12:40:43.000Z | from rest_framework import status
from django.contrib.auth import get_user_model
from rest_framework.reverse import reverse
from .basetest import BaseTestCase
User = get_user_model()
signup_url = reverse("authentication:signup")
login_url = reverse("authentication:login")
class UserApiTestCase(BaseTestCase):
def test_login_user(self):
# Test user login
self.login_response = self.client.post(
login_url, self.login_data, format="json")
self.assertEqual(self.login_response.status_code, status.HTTP_200_OK)
login_token = self.login_response.data['token']
self.assertEqual(
self.login_response.data, {"email": "daniel@test.com",
"username": "daniel",
"token": login_token}
)
def test_get_user_email(self):
""" Test model method to get user's email """
self.email = self.login_data["user"]["email"]
email = self.email.__str__()
self.assertIn(email, "daniel@test.com")
| 34.290323 | 77 | 0.643462 | from rest_framework import status
from django.contrib.auth import get_user_model
from rest_framework.reverse import reverse
from .basetest import BaseTestCase
User = get_user_model()
signup_url = reverse("authentication:signup")
login_url = reverse("authentication:login")
class UserApiTestCase(BaseTestCase):
def test_login_user(self):
self.login_response = self.client.post(
login_url, self.login_data, format="json")
self.assertEqual(self.login_response.status_code, status.HTTP_200_OK)
login_token = self.login_response.data['token']
self.assertEqual(
self.login_response.data, {"email": "daniel@test.com",
"username": "daniel",
"token": login_token}
)
def test_get_user_email(self):
self.email = self.login_data["user"]["email"]
email = self.email.__str__()
self.assertIn(email, "daniel@test.com")
| true | true |
f73078ac1b77d76adea8e2e058894be4fc7fa673 | 4,504 | py | Python | techno_files/eic25_zawieszki.py | lukaszmachura/ejc2019_name_badge | 13996bdfc25958129fda26a8692c1e08c016eae4 | [
"MIT"
] | null | null | null | techno_files/eic25_zawieszki.py | lukaszmachura/ejc2019_name_badge | 13996bdfc25958129fda26a8692c1e08c016eae4 | [
"MIT"
] | null | null | null | techno_files/eic25_zawieszki.py | lukaszmachura/ejc2019_name_badge | 13996bdfc25958129fda26a8692c1e08c016eae4 | [
"MIT"
] | null | null | null | import subprocess
import os
def read_list(fname="lista.csv"):
for row in open(fname):
yield row
def write_preamble(fname):
preamble = r"""%25 EIC technology, (C) LM 2018
\documentclass{article}
\usepackage{graphicx}
\usepackage[space]{grffile}
\usepackage{anyfontsize}
\usepackage{fontspec}
\usepackage{color}
\setromanfont[
BoldFont=NexaB.otf,
]{NexaL.otf}
\usepackage{multicol}
\usepackage[english]{babel}%
\usepackage[a6paper,left=1cm,right=1cm,top=1cm,bottom=1cm]{geometry}
\usepackage{lipsum}% http://ctan.org/pkg/lipsum
\begin{document}
\pagestyle{empty}
% top
\centering{\includegraphics[width=0.9\textwidth,keepaspectratio]{top}}\\
\vspace*{0.3cm}
"""
with open(fname, "w") as f:
f.write(preamble)
def write_player(p, f, photo):
country = p[0].lower()
name = p[1].lower()
surname = p[2].lower()
dan = p[3].lower()
role = p[4].lower()
spec = p[8].lower()
Name = name.capitalize()
Surname = surname.capitalize()
if photo == None:
photo = "nophoto"
if dan == "-1":
dan = "{\color{white}1}"
elif dan.lower() == "mudan":
dan = "mudan"
elif "renshi" in dan:
dan = "renshi " + dan[0] + " dan"
elif "kyoshi" in dan:
dan = "kyoshi " + dan[0] + " dan"
elif "hanshi" in dan:
dan = "hanshi " + dan[0] + " dan"
else:
dan += " dan"
name_size = "huge"
if len(Name) > 8:
name_size = "Large"
surname_size = "huge"
if len(Surname) > 8:
surname_size = "Large"
string = "%"
string += r""" pic name role
\begin{minipage}[c]{0.3\textwidth}
\centering{\includegraphics[height=3cm]{foto/%s}}
\end{minipage}
\begin{minipage}[c]{0.15\textwidth}
\phantom{eic}
\end{minipage}
\begin{minipage}[c]{0.5\textwidth}
\begin{flushright}
{\%s \textbf{%s}}\\
{\%s \textbf{%s}}\\
\vspace*{0.3cm}
{\Large %s}\\{\Large %s}
\end{flushright}
\end{minipage}\\
\vspace*{0.3cm}
""" % (photo, name_size, Name.split(" ")[0],
surname_size, Surname, dan, spec)
with open(f, "a") as f:
f.write(string)
def write_country(p, f, C):
country = p[0].lower()
string = "%"
string += r""" country
\begin{minipage}[c]{0.3\textwidth}
\centering{\includegraphics[height=1.5cm]{flags/%s}}
\end{minipage}
\begin{minipage}[c]{0.15\textwidth}
\phantom{eic}
\end{minipage}
\begin{minipage}[c]{0.5\textwidth}
\begin{center}
{\fontsize{30}{40}\selectfont %s}
\end{center}
\end{minipage}
""" % (country, C)
with open(f, "a") as f:
f.write(string)
def write_role(p, f):
role = p[4].lower()
size = "huge"
if len(role) > len("manager + competitor"):
size = "Large"
string = "%"
string += r""" role
\vspace*{0.25cm}
\begin{center}
{\%s %s}
\end{center}
\vspace*{0.25cm}
""" % (size, role)
with open(f, "a") as f:
f.write(string)
def write_footer(f):
foot = "%"
foot += r""" foot
\centering{\includegraphics[width=\textwidth,keepaspectratio]{footer}}
\end{document}
"""
with open(f, "a") as f:
f.write(foot)
def is_in_foto_dir(ekf, path="./foto"):
for element in os.listdir(path):
if ekf.lower() in element.lower():
return element
return None
if __name__ == "__main__":
DIR = "./zawieszki/"
COUNTRIES = "./countries/"
country = {'pol': 'Poland'}
rl = read_list()
for row in rl:
player = row.split(",")
country = player[0]
cou = country[:3].lower()
fname = player[1]
lname = player[2]
dan = player[3]
role = player[4]
ekf = player[5]
pic = player[6]
fn = fname.lower()
ln = lname.lower()
fname = "{}_{}_{}".format(cou, ln, fn)
fname_tex = fname + ".tex"
write_preamble(fname_tex)
photo = None if ekf == "NA" else is_in_foto_dir(ekf)
write_player(player, fname_tex, photo)
write_country(player, fname_tex, country) #[player[0]])
write_role(player, fname_tex)
write_footer(fname_tex)
subprocess.call(["ls", fname])
subprocess.call(["xelatex", fname_tex])
subprocess.call(["rm", fname_tex, fname + ".log", fname + ".aux"])
subprocess.call(["mv", fname + ".pdf", DIR])
| 23.458333 | 76 | 0.552398 | import subprocess
import os
def read_list(fname="lista.csv"):
for row in open(fname):
yield row
def write_preamble(fname):
preamble = r"""%25 EIC technology, (C) LM 2018
\documentclass{article}
\usepackage{graphicx}
\usepackage[space]{grffile}
\usepackage{anyfontsize}
\usepackage{fontspec}
\usepackage{color}
\setromanfont[
BoldFont=NexaB.otf,
]{NexaL.otf}
\usepackage{multicol}
\usepackage[english]{babel}%
\usepackage[a6paper,left=1cm,right=1cm,top=1cm,bottom=1cm]{geometry}
\usepackage{lipsum}% http://ctan.org/pkg/lipsum
\begin{document}
\pagestyle{empty}
% top
\centering{\includegraphics[width=0.9\textwidth,keepaspectratio]{top}}\\
\vspace*{0.3cm}
"""
with open(fname, "w") as f:
f.write(preamble)
def write_player(p, f, photo):
country = p[0].lower()
name = p[1].lower()
surname = p[2].lower()
dan = p[3].lower()
role = p[4].lower()
spec = p[8].lower()
Name = name.capitalize()
Surname = surname.capitalize()
if photo == None:
photo = "nophoto"
if dan == "-1":
dan = "{\color{white}1}"
elif dan.lower() == "mudan":
dan = "mudan"
elif "renshi" in dan:
dan = "renshi " + dan[0] + " dan"
elif "kyoshi" in dan:
dan = "kyoshi " + dan[0] + " dan"
elif "hanshi" in dan:
dan = "hanshi " + dan[0] + " dan"
else:
dan += " dan"
name_size = "huge"
if len(Name) > 8:
name_size = "Large"
surname_size = "huge"
if len(Surname) > 8:
surname_size = "Large"
string = "%"
string += r""" pic name role
\begin{minipage}[c]{0.3\textwidth}
\centering{\includegraphics[height=3cm]{foto/%s}}
\end{minipage}
\begin{minipage}[c]{0.15\textwidth}
\phantom{eic}
\end{minipage}
\begin{minipage}[c]{0.5\textwidth}
\begin{flushright}
{\%s \textbf{%s}}\\
{\%s \textbf{%s}}\\
\vspace*{0.3cm}
{\Large %s}\\{\Large %s}
\end{flushright}
\end{minipage}\\
\vspace*{0.3cm}
""" % (photo, name_size, Name.split(" ")[0],
surname_size, Surname, dan, spec)
with open(f, "a") as f:
f.write(string)
def write_country(p, f, C):
country = p[0].lower()
string = "%"
string += r""" country
\begin{minipage}[c]{0.3\textwidth}
\centering{\includegraphics[height=1.5cm]{flags/%s}}
\end{minipage}
\begin{minipage}[c]{0.15\textwidth}
\phantom{eic}
\end{minipage}
\begin{minipage}[c]{0.5\textwidth}
\begin{center}
{\fontsize{30}{40}\selectfont %s}
\end{center}
\end{minipage}
""" % (country, C)
with open(f, "a") as f:
f.write(string)
def write_role(p, f):
role = p[4].lower()
size = "huge"
if len(role) > len("manager + competitor"):
size = "Large"
string = "%"
string += r""" role
\vspace*{0.25cm}
\begin{center}
{\%s %s}
\end{center}
\vspace*{0.25cm}
""" % (size, role)
with open(f, "a") as f:
f.write(string)
def write_footer(f):
foot = "%"
foot += r""" foot
\centering{\includegraphics[width=\textwidth,keepaspectratio]{footer}}
\end{document}
"""
with open(f, "a") as f:
f.write(foot)
def is_in_foto_dir(ekf, path="./foto"):
for element in os.listdir(path):
if ekf.lower() in element.lower():
return element
return None
if __name__ == "__main__":
DIR = "./zawieszki/"
COUNTRIES = "./countries/"
country = {'pol': 'Poland'}
rl = read_list()
for row in rl:
player = row.split(",")
country = player[0]
cou = country[:3].lower()
fname = player[1]
lname = player[2]
dan = player[3]
role = player[4]
ekf = player[5]
pic = player[6]
fn = fname.lower()
ln = lname.lower()
fname = "{}_{}_{}".format(cou, ln, fn)
fname_tex = fname + ".tex"
write_preamble(fname_tex)
photo = None if ekf == "NA" else is_in_foto_dir(ekf)
write_player(player, fname_tex, photo)
write_country(player, fname_tex, country)
write_role(player, fname_tex)
write_footer(fname_tex)
subprocess.call(["ls", fname])
subprocess.call(["xelatex", fname_tex])
subprocess.call(["rm", fname_tex, fname + ".log", fname + ".aux"])
subprocess.call(["mv", fname + ".pdf", DIR])
| true | true |
f73079477a3e239cd6a9759ffd6c3d612f6a5ff4 | 9,121 | py | Python | google/ads/google_ads/interceptors/interceptor.py | allandproust/google-ads-python | 004d283f5c9031748782884daad41d97c281cafa | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/interceptors/interceptor.py | allandproust/google-ads-python | 004d283f5c9031748782884daad41d97c281cafa | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/interceptors/interceptor.py | allandproust/google-ads-python | 004d283f5c9031748782884daad41d97c281cafa | [
"Apache-2.0"
] | 1 | 2020-03-13T00:14:31.000Z | 2020-03-13T00:14:31.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A mixin class to store shared functionality for all the gRPC Interceptors.
This mixin class centralizes sets of functionality that are common across all
Interceptors, including retrieving data from gRPC metadata and initializing
instances of grpc.ClientCallDetails.
"""
from collections import namedtuple
from importlib import import_module
import json
from google.protobuf.message import DecodeError
from grpc import ClientCallDetails, StatusCode
from google.ads.google_ads.errors import GoogleAdsException
_REQUEST_ID_KEY = 'request-id'
# Codes that are retried upon by google.api_core.
_RETRY_STATUS_CODES = (StatusCode.INTERNAL, StatusCode.RESOURCE_EXHAUSTED)
_SENSITIVE_INFO_MASK = 'REDACTED'
class Interceptor:
@classmethod
def get_request_id_from_metadata(cls, trailing_metadata):
"""Gets the request ID for the Google Ads API request.
Args:
trailing_metadata: a tuple of metadatum from the service response.
Returns:
A str request ID associated with the Google Ads API request, or None
if it doesn't exist.
"""
for kv in trailing_metadata:
if kv[0] == _REQUEST_ID_KEY:
return kv[1] # Return the found request ID.
return None
@classmethod
def parse_metadata_to_json(cls, metadata):
"""Parses metadata from gRPC request and response messages to a JSON str.
Obscures the value for "developer-token".
Args:
metadata: a tuple of metadatum.
Returns:
A str of metadata formatted as JSON key/value pairs.
"""
metadata_dict = {}
if metadata is None:
return '{}'
for datum in metadata:
key = datum[0]
if key == 'developer-token':
metadata_dict[key] = _SENSITIVE_INFO_MASK
else:
value = datum[1]
metadata_dict[key] = value
return cls.format_json_object(metadata_dict)
@classmethod
def format_json_object(cls, obj):
"""Parses a serializable object into a consistently formatted JSON string.
Returns:
A str of formatted JSON serialized from the given object.
Args:
obj: an object or dict.
Returns:
A str of metadata formatted as JSON key/value pairs.
"""
def default_serializer(value):
if isinstance(value, bytes):
return value.decode(errors='ignore')
else:
return None
return str(json.dumps(obj, indent=2, sort_keys=True, ensure_ascii=False,
default=default_serializer,
separators=(',', ': ')))
@classmethod
def get_trailing_metadata_from_interceptor_exception(cls, exception):
"""Retrieves trailing metadata from an exception object.
Args:
exception: an instance of grpc.Call.
Returns:
A tuple of trailing metadata key value pairs.
"""
try:
# GoogleAdsFailure exceptions will contain trailing metadata on the
# error attribute.
return exception.error.trailing_metadata()
except AttributeError:
try:
# Transport failures, i.e. issues at the gRPC layer, will contain
# trailing metadata on the exception itself.
return exception.trailing_metadata()
except AttributeError:
# if trailing metadata is not found in either location then
# return an empty tuple
return tuple()
@classmethod
def get_client_call_details_instance(cls, method, timeout, metadata,
credentials=None):
"""Initializes an instance of the ClientCallDetails with the given data.
Args:
method: A str of the service method being invoked.
timeout: A float of the request timeout
metadata: A list of metadata tuples
credentials: An optional grpc.CallCredentials instance for the RPC
Returns:
An instance of _ClientCallDetails that wraps grpc.ClientCallDetails.
"""
class _ClientCallDetails(
namedtuple(
'_ClientCallDetails',
('method', 'timeout', 'metadata', 'credentials')),
ClientCallDetails):
"""Wrapper class for initializing a new ClientCallDetails instance.
"""
pass
return _ClientCallDetails(method, timeout, metadata, credentials)
def __init__(self, api_version):
self._error_protos = import_module(
f'google.ads.google_ads.{api_version}.proto.errors.errors_pb2')
self._failure_key = (
f'google.ads.googleads.{api_version}.errors.googleadsfailure-bin')
self._exception = None
def _get_error_from_response(self, response):
"""Attempts to wrap failed responses as GoogleAdsException instances.
Handles failed gRPC responses of by attempting to convert them
to a more readable GoogleAdsException. Certain types of exceptions are
not converted; if the object's trailing metadata does not indicate that
it is a GoogleAdsException, or if it falls under a certain category of
status code, (INTERNAL or RESOURCE_EXHAUSTED). See documentation for
more information about gRPC status codes:
https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
Args:
response: a grpc.Call/grpc.Future instance.
Returns:
GoogleAdsException: If the exception's trailing metadata
indicates that it is a GoogleAdsException.
RpcError: If the exception's is a gRPC exception but the trailing
metadata is empty or is not indicative of a GoogleAdsException,
or if the exception has a status code of INTERNAL or
RESOURCE_EXHAUSTED.
Exception: If not a GoogleAdsException or RpcException the error
will be raised as-is.
"""
if self._exception:
return self._exception
status_code = response.code()
response_exception = response.exception()
if status_code not in _RETRY_STATUS_CODES:
trailing_metadata = response.trailing_metadata()
google_ads_failure = self._get_google_ads_failure(trailing_metadata)
if google_ads_failure:
request_id = self.get_request_id_from_metadata(
trailing_metadata)
# If exception is a GoogleAdsFailure then it gets wrapped in a
# library-specific Error type for easy handling. These errors
# originate from the Google Ads API and are often caused by
# invalid requests.
self._exception = GoogleAdsException(
response_exception, response, google_ads_failure,
request_id)
else:
# Raise the original exception if not a GoogleAdsFailure. This
# type of error is generally caused by problems at the request
# level, such as when an invalid endpoint is given.
self._exception = response_exception
else:
# Raise the original exception if error has status code
# INTERNAL or RESOURCE_EXHAUSTED, meaning that
self._exception = response_exception
return self._exception
def _get_google_ads_failure(self, trailing_metadata):
"""Gets the Google Ads failure details if they exist.
Args:
trailing_metadata: a tuple of metadatum from the service response.
Returns:
A GoogleAdsFailure that describes how a GoogleAds API call failed.
Returns None if either the trailing metadata of the request did not
return the failure details, or if the GoogleAdsFailure fails to
parse.
"""
if trailing_metadata is not None:
for kv in trailing_metadata:
if kv[0] == self._failure_key:
try:
ga_failure = self._error_protos.GoogleAdsFailure()
ga_failure.ParseFromString(kv[1])
return ga_failure
except DecodeError:
return None
return None
| 37.846473 | 82 | 0.63129 |
from collections import namedtuple
from importlib import import_module
import json
from google.protobuf.message import DecodeError
from grpc import ClientCallDetails, StatusCode
from google.ads.google_ads.errors import GoogleAdsException
_REQUEST_ID_KEY = 'request-id'
_RETRY_STATUS_CODES = (StatusCode.INTERNAL, StatusCode.RESOURCE_EXHAUSTED)
_SENSITIVE_INFO_MASK = 'REDACTED'
class Interceptor:
@classmethod
def get_request_id_from_metadata(cls, trailing_metadata):
for kv in trailing_metadata:
if kv[0] == _REQUEST_ID_KEY:
return kv[1]
return None
@classmethod
def parse_metadata_to_json(cls, metadata):
metadata_dict = {}
if metadata is None:
return '{}'
for datum in metadata:
key = datum[0]
if key == 'developer-token':
metadata_dict[key] = _SENSITIVE_INFO_MASK
else:
value = datum[1]
metadata_dict[key] = value
return cls.format_json_object(metadata_dict)
@classmethod
def format_json_object(cls, obj):
def default_serializer(value):
if isinstance(value, bytes):
return value.decode(errors='ignore')
else:
return None
return str(json.dumps(obj, indent=2, sort_keys=True, ensure_ascii=False,
default=default_serializer,
separators=(',', ': ')))
@classmethod
def get_trailing_metadata_from_interceptor_exception(cls, exception):
try:
return exception.error.trailing_metadata()
except AttributeError:
try:
return exception.trailing_metadata()
except AttributeError:
return tuple()
@classmethod
def get_client_call_details_instance(cls, method, timeout, metadata,
credentials=None):
class _ClientCallDetails(
namedtuple(
'_ClientCallDetails',
('method', 'timeout', 'metadata', 'credentials')),
ClientCallDetails):
pass
return _ClientCallDetails(method, timeout, metadata, credentials)
def __init__(self, api_version):
self._error_protos = import_module(
f'google.ads.google_ads.{api_version}.proto.errors.errors_pb2')
self._failure_key = (
f'google.ads.googleads.{api_version}.errors.googleadsfailure-bin')
self._exception = None
def _get_error_from_response(self, response):
if self._exception:
return self._exception
status_code = response.code()
response_exception = response.exception()
if status_code not in _RETRY_STATUS_CODES:
trailing_metadata = response.trailing_metadata()
google_ads_failure = self._get_google_ads_failure(trailing_metadata)
if google_ads_failure:
request_id = self.get_request_id_from_metadata(
trailing_metadata)
self._exception = GoogleAdsException(
response_exception, response, google_ads_failure,
request_id)
else:
self._exception = response_exception
else:
self._exception = response_exception
return self._exception
def _get_google_ads_failure(self, trailing_metadata):
if trailing_metadata is not None:
for kv in trailing_metadata:
if kv[0] == self._failure_key:
try:
ga_failure = self._error_protos.GoogleAdsFailure()
ga_failure.ParseFromString(kv[1])
return ga_failure
except DecodeError:
return None
return None
| true | true |
f7307a29a25257c1fb187abf7a58e0c31eda5993 | 17,968 | py | Python | toolchain/riscv/MSYS/python/Lib/test/test_platform.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | null | null | null | toolchain/riscv/MSYS/python/Lib/test/test_platform.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | 1 | 2020-04-23T02:34:53.000Z | 2020-04-23T02:34:53.000Z | toolchain/riscv/MSYS/python/Lib/test/test_platform.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | 1 | 2020-04-27T15:07:54.000Z | 2020-04-27T15:07:54.000Z | from unittest import mock
import os
import platform
import subprocess
import sys
import sysconfig
import tempfile
import unittest
import warnings
from test import support
class PlatformTest(unittest.TestCase):
def test_architecture(self):
res = platform.architecture()
@support.skip_unless_symlink
def test_architecture_via_symlink(self): # issue3762
if sys.platform == "win32" and not os.path.exists(sys.executable):
# App symlink appears to not exist, but we want the
# real executable here anyway
import _winapi
real = _winapi.GetModuleFileName(0)
else:
real = os.path.realpath(sys.executable)
link = os.path.abspath(support.TESTFN)
os.symlink(real, link)
# On Windows, the EXE needs to know where pythonXY.dll and *.pyd is at
# so we add the directory to the path, PYTHONHOME and PYTHONPATH.
env = None
if sys.platform == "win32":
env = {k.upper(): os.environ[k] for k in os.environ}
env["PATH"] = "{};{}".format(
os.path.dirname(real), env.get("PATH", ""))
env["PYTHONHOME"] = os.path.dirname(real)
if sysconfig.is_python_build(True):
env["PYTHONPATH"] = os.path.dirname(os.__file__)
def get(python, env=None):
cmd = [python, '-c',
'import platform; print(platform.architecture())']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
r = p.communicate()
if p.returncode:
print(repr(r[0]))
print(repr(r[1]), file=sys.stderr)
self.fail('unexpected return code: {0} (0x{0:08X})'
.format(p.returncode))
return r
try:
self.assertEqual(get(sys.executable), get(link, env=env))
finally:
os.remove(link)
def test_platform(self):
for aliased in (False, True):
for terse in (False, True):
res = platform.platform(aliased, terse)
def test_system(self):
res = platform.system()
def test_node(self):
res = platform.node()
def test_release(self):
res = platform.release()
def test_version(self):
res = platform.version()
def test_machine(self):
res = platform.machine()
def test_processor(self):
res = platform.processor()
def setUp(self):
self.save_version = sys.version
self.save_git = sys._git
self.save_platform = sys.platform
def tearDown(self):
sys.version = self.save_version
sys._git = self.save_git
sys.platform = self.save_platform
def test_sys_version(self):
# Old test.
for input, output in (
('2.4.3 (#1, Jun 21 2006, 13:54:21) \n[GCC 3.3.4 (pre 3.3.5 20040809)]',
('CPython', '2.4.3', '', '', '1', 'Jun 21 2006 13:54:21', 'GCC 3.3.4 (pre 3.3.5 20040809)')),
('IronPython 1.0.60816 on .NET 2.0.50727.42',
('IronPython', '1.0.60816', '', '', '', '', '.NET 2.0.50727.42')),
('IronPython 1.0 (1.0.61005.1977) on .NET 2.0.50727.42',
('IronPython', '1.0.0', '', '', '', '', '.NET 2.0.50727.42')),
('2.4.3 (truncation, date, t) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date t', 'GCC')),
('2.4.3 (truncation, date, ) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, date,) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, date) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, d) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'd', 'GCC')),
('2.4.3 (truncation, ) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
('2.4.3 (truncation,) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
('2.4.3 (truncation) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
):
# branch and revision are not "parsed", but fetched
# from sys._git. Ignore them
(name, version, branch, revision, buildno, builddate, compiler) \
= platform._sys_version(input)
self.assertEqual(
(name, version, '', '', buildno, builddate, compiler), output)
# Tests for python_implementation(), python_version(), python_branch(),
# python_revision(), python_build(), and python_compiler().
sys_versions = {
("2.6.1 (r261:67515, Dec 6 2008, 15:26:00) \n[GCC 4.0.1 (Apple Computer, Inc. build 5370)]",
('CPython', 'tags/r261', '67515'), self.save_platform)
:
("CPython", "2.6.1", "tags/r261", "67515",
('r261:67515', 'Dec 6 2008 15:26:00'),
'GCC 4.0.1 (Apple Computer, Inc. build 5370)'),
("IronPython 2.0 (2.0.0.0) on .NET 2.0.50727.3053", None, "cli")
:
("IronPython", "2.0.0", "", "", ("", ""),
".NET 2.0.50727.3053"),
("2.6.1 (IronPython 2.6.1 (2.6.10920.0) on .NET 2.0.50727.1433)", None, "cli")
:
("IronPython", "2.6.1", "", "", ("", ""),
".NET 2.0.50727.1433"),
("2.7.4 (IronPython 2.7.4 (2.7.0.40) on Mono 4.0.30319.1 (32-bit))", None, "cli")
:
("IronPython", "2.7.4", "", "", ("", ""),
"Mono 4.0.30319.1 (32-bit)"),
("2.5 (trunk:6107, Mar 26 2009, 13:02:18) \n[Java HotSpot(TM) Client VM (\"Apple Computer, Inc.\")]",
('Jython', 'trunk', '6107'), "java1.5.0_16")
:
("Jython", "2.5.0", "trunk", "6107",
('trunk:6107', 'Mar 26 2009'), "java1.5.0_16"),
("2.5.2 (63378, Mar 26 2009, 18:03:29)\n[PyPy 1.0.0]",
('PyPy', 'trunk', '63378'), self.save_platform)
:
("PyPy", "2.5.2", "trunk", "63378", ('63378', 'Mar 26 2009'),
"")
}
for (version_tag, scm, sys_platform), info in \
sys_versions.items():
sys.version = version_tag
if scm is None:
if hasattr(sys, "_git"):
del sys._git
else:
sys._git = scm
if sys_platform is not None:
sys.platform = sys_platform
self.assertEqual(platform.python_implementation(), info[0])
self.assertEqual(platform.python_version(), info[1])
self.assertEqual(platform.python_branch(), info[2])
self.assertEqual(platform.python_revision(), info[3])
self.assertEqual(platform.python_build(), info[4])
self.assertEqual(platform.python_compiler(), info[5])
def test_system_alias(self):
res = platform.system_alias(
platform.system(),
platform.release(),
platform.version(),
)
def test_uname(self):
res = platform.uname()
self.assertTrue(any(res))
self.assertEqual(res[0], res.system)
self.assertEqual(res[1], res.node)
self.assertEqual(res[2], res.release)
self.assertEqual(res[3], res.version)
self.assertEqual(res[4], res.machine)
self.assertEqual(res[5], res.processor)
@unittest.skipUnless(sys.platform.startswith('win'), "windows only test")
def test_uname_win32_ARCHITEW6432(self):
# Issue 7860: make sure we get architecture from the correct variable
# on 64 bit Windows: if PROCESSOR_ARCHITEW6432 exists we should be
# using it, per
# http://blogs.msdn.com/david.wang/archive/2006/03/26/HOWTO-Detect-Process-Bitness.aspx
try:
with support.EnvironmentVarGuard() as environ:
if 'PROCESSOR_ARCHITEW6432' in environ:
del environ['PROCESSOR_ARCHITEW6432']
environ['PROCESSOR_ARCHITECTURE'] = 'foo'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'foo')
environ['PROCESSOR_ARCHITEW6432'] = 'bar'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'bar')
finally:
platform._uname_cache = None
def test_java_ver(self):
res = platform.java_ver()
if sys.platform == 'java':
self.assertTrue(all(res))
def test_win32_ver(self):
res = platform.win32_ver()
def test_mac_ver(self):
res = platform.mac_ver()
if platform.uname().system == 'Darwin':
# We're on a MacOSX system, check that
# the right version information is returned
fd = os.popen('sw_vers', 'r')
real_ver = None
for ln in fd:
if ln.startswith('ProductVersion:'):
real_ver = ln.strip().split()[-1]
break
fd.close()
self.assertFalse(real_ver is None)
result_list = res[0].split('.')
expect_list = real_ver.split('.')
len_diff = len(result_list) - len(expect_list)
# On Snow Leopard, sw_vers reports 10.6.0 as 10.6
if len_diff > 0:
expect_list.extend(['0'] * len_diff)
self.assertEqual(result_list, expect_list)
# res[1] claims to contain
# (version, dev_stage, non_release_version)
# That information is no longer available
self.assertEqual(res[1], ('', '', ''))
if sys.byteorder == 'little':
self.assertIn(res[2], ('i386', 'x86_64'))
else:
self.assertEqual(res[2], 'PowerPC')
@unittest.skipUnless(sys.platform == 'darwin', "OSX only test")
def test_mac_ver_with_fork(self):
# Issue7895: platform.mac_ver() crashes when using fork without exec
#
# This test checks that the fix for that issue works.
#
pid = os.fork()
if pid == 0:
# child
info = platform.mac_ver()
os._exit(0)
else:
# parent
cpid, sts = os.waitpid(pid, 0)
self.assertEqual(cpid, pid)
self.assertEqual(sts, 0)
def test_dist(self):
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
DeprecationWarning,
)
res = platform.dist()
def test_libc_ver(self):
if os.path.isdir(sys.executable) and \
os.path.exists(sys.executable+'.exe'):
# Cygwin horror
executable = sys.executable + '.exe'
elif sys.platform == "win32" and not os.path.exists(sys.executable):
# App symlink appears to not exist, but we want the
# real executable here anyway
import _winapi
executable = _winapi.GetModuleFileName(0)
else:
executable = sys.executable
res = platform.libc_ver(executable)
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, 'wb') as f:
f.write(b'x'*(16384-10))
f.write(b'GLIBC_1.23.4\0GLIBC_1.9\0GLIBC_1.21\0')
self.assertEqual(platform.libc_ver(support.TESTFN),
('glibc', '1.23.4'))
@support.cpython_only
def test__comparable_version(self):
from platform import _comparable_version as V
self.assertEqual(V('1.2.3'), V('1.2.3'))
self.assertLess(V('1.2.3'), V('1.2.10'))
self.assertEqual(V('1.2.3.4'), V('1_2-3+4'))
self.assertLess(V('1.2spam'), V('1.2dev'))
self.assertLess(V('1.2dev'), V('1.2alpha'))
self.assertLess(V('1.2dev'), V('1.2a'))
self.assertLess(V('1.2alpha'), V('1.2beta'))
self.assertLess(V('1.2a'), V('1.2b'))
self.assertLess(V('1.2beta'), V('1.2c'))
self.assertLess(V('1.2b'), V('1.2c'))
self.assertLess(V('1.2c'), V('1.2RC'))
self.assertLess(V('1.2c'), V('1.2rc'))
self.assertLess(V('1.2RC'), V('1.2.0'))
self.assertLess(V('1.2rc'), V('1.2.0'))
self.assertLess(V('1.2.0'), V('1.2pl'))
self.assertLess(V('1.2.0'), V('1.2p'))
self.assertLess(V('1.5.1'), V('1.5.2b2'))
self.assertLess(V('3.10a'), V('161'))
self.assertEqual(V('8.02'), V('8.02'))
self.assertLess(V('3.4j'), V('1996.07.12'))
self.assertLess(V('3.1.1.6'), V('3.2.pl0'))
self.assertLess(V('2g6'), V('11g'))
self.assertLess(V('0.9'), V('2.2'))
self.assertLess(V('1.2'), V('1.2.1'))
self.assertLess(V('1.1'), V('1.2.2'))
self.assertLess(V('1.1'), V('1.2'))
self.assertLess(V('1.2.1'), V('1.2.2'))
self.assertLess(V('1.2'), V('1.2.2'))
self.assertLess(V('0.4'), V('0.4.0'))
self.assertLess(V('1.13++'), V('5.5.kw'))
self.assertLess(V('0.960923'), V('2.2beta29'))
def test_parse_release_file(self):
for input, output in (
# Examples of release file contents:
('SuSE Linux 9.3 (x86-64)', ('SuSE Linux ', '9.3', 'x86-64')),
('SUSE LINUX 10.1 (X86-64)', ('SUSE LINUX ', '10.1', 'X86-64')),
('SUSE LINUX 10.1 (i586)', ('SUSE LINUX ', '10.1', 'i586')),
('Fedora Core release 5 (Bordeaux)', ('Fedora Core', '5', 'Bordeaux')),
('Red Hat Linux release 8.0 (Psyche)', ('Red Hat Linux', '8.0', 'Psyche')),
('Red Hat Linux release 9 (Shrike)', ('Red Hat Linux', '9', 'Shrike')),
('Red Hat Enterprise Linux release 4 (Nahant)', ('Red Hat Enterprise Linux', '4', 'Nahant')),
('CentOS release 4', ('CentOS', '4', None)),
('Rocks release 4.2.1 (Cydonia)', ('Rocks', '4.2.1', 'Cydonia')),
('', ('', '', '')), # If there's nothing there.
):
self.assertEqual(platform._parse_release_file(input), output)
def test_popen(self):
mswindows = (sys.platform == "win32")
if mswindows:
command = '"{}" -c "print(\'Hello\')"'.format(sys.executable)
else:
command = "'{}' -c 'print(\"Hello\")'".format(sys.executable)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with platform.popen(command) as stdout:
hello = stdout.read().strip()
stdout.close()
self.assertEqual(hello, "Hello")
data = 'plop'
if mswindows:
command = '"{}" -c "import sys; data=sys.stdin.read(); exit(len(data))"'
else:
command = "'{}' -c 'import sys; data=sys.stdin.read(); exit(len(data))'"
command = command.format(sys.executable)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with platform.popen(command, 'w') as stdin:
stdout = stdin.write(data)
ret = stdin.close()
self.assertIsNotNone(ret)
if os.name == 'nt':
returncode = ret
else:
returncode = ret >> 8
self.assertEqual(returncode, len(data))
def test_linux_distribution_encoding(self):
# Issue #17429
with tempfile.TemporaryDirectory() as tempdir:
filename = os.path.join(tempdir, 'fedora-release')
with open(filename, 'w', encoding='utf-8') as f:
f.write('Fedora release 19 (Schr\xf6dinger\u2019s Cat)\n')
with mock.patch('platform._UNIXCONFDIR', tempdir):
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
DeprecationWarning,
)
distname, version, distid = platform.linux_distribution()
self.assertEqual(distname, 'Fedora')
self.assertEqual(version, '19')
self.assertEqual(distid, 'Schr\xf6dinger\u2019s Cat')
class DeprecationTest(unittest.TestCase):
def test_dist_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
platform.dist()
self.assertEqual(str(cm.warning),
'dist() and linux_distribution() functions are '
'deprecated in Python 3.5')
def test_linux_distribution_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
platform.linux_distribution()
self.assertEqual(str(cm.warning),
'dist() and linux_distribution() functions are '
'deprecated in Python 3.5')
if __name__ == '__main__':
unittest.main()
| 41.689095 | 114 | 0.512077 | from unittest import mock
import os
import platform
import subprocess
import sys
import sysconfig
import tempfile
import unittest
import warnings
from test import support
class PlatformTest(unittest.TestCase):
def test_architecture(self):
res = platform.architecture()
@support.skip_unless_symlink
def test_architecture_via_symlink(self):
if sys.platform == "win32" and not os.path.exists(sys.executable):
import _winapi
real = _winapi.GetModuleFileName(0)
else:
real = os.path.realpath(sys.executable)
link = os.path.abspath(support.TESTFN)
os.symlink(real, link)
env = None
if sys.platform == "win32":
env = {k.upper(): os.environ[k] for k in os.environ}
env["PATH"] = "{};{}".format(
os.path.dirname(real), env.get("PATH", ""))
env["PYTHONHOME"] = os.path.dirname(real)
if sysconfig.is_python_build(True):
env["PYTHONPATH"] = os.path.dirname(os.__file__)
def get(python, env=None):
cmd = [python, '-c',
'import platform; print(platform.architecture())']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
r = p.communicate()
if p.returncode:
print(repr(r[0]))
print(repr(r[1]), file=sys.stderr)
self.fail('unexpected return code: {0} (0x{0:08X})'
.format(p.returncode))
return r
try:
self.assertEqual(get(sys.executable), get(link, env=env))
finally:
os.remove(link)
def test_platform(self):
for aliased in (False, True):
for terse in (False, True):
res = platform.platform(aliased, terse)
def test_system(self):
res = platform.system()
def test_node(self):
res = platform.node()
def test_release(self):
res = platform.release()
def test_version(self):
res = platform.version()
def test_machine(self):
res = platform.machine()
def test_processor(self):
res = platform.processor()
def setUp(self):
self.save_version = sys.version
self.save_git = sys._git
self.save_platform = sys.platform
def tearDown(self):
sys.version = self.save_version
sys._git = self.save_git
sys.platform = self.save_platform
def test_sys_version(self):
for input, output in (
('2.4.3 (#1, Jun 21 2006, 13:54:21) \n[GCC 3.3.4 (pre 3.3.5 20040809)]',
('CPython', '2.4.3', '', '', '1', 'Jun 21 2006 13:54:21', 'GCC 3.3.4 (pre 3.3.5 20040809)')),
('IronPython 1.0.60816 on .NET 2.0.50727.42',
('IronPython', '1.0.60816', '', '', '', '', '.NET 2.0.50727.42')),
('IronPython 1.0 (1.0.61005.1977) on .NET 2.0.50727.42',
('IronPython', '1.0.0', '', '', '', '', '.NET 2.0.50727.42')),
('2.4.3 (truncation, date, t) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date t', 'GCC')),
('2.4.3 (truncation, date, ) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, date,) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, date) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'date', 'GCC')),
('2.4.3 (truncation, d) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', 'd', 'GCC')),
('2.4.3 (truncation, ) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
('2.4.3 (truncation,) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
('2.4.3 (truncation) \n[GCC]',
('CPython', '2.4.3', '', '', 'truncation', '', 'GCC')),
):
(name, version, branch, revision, buildno, builddate, compiler) \
= platform._sys_version(input)
self.assertEqual(
(name, version, '', '', buildno, builddate, compiler), output)
sys_versions = {
("2.6.1 (r261:67515, Dec 6 2008, 15:26:00) \n[GCC 4.0.1 (Apple Computer, Inc. build 5370)]",
('CPython', 'tags/r261', '67515'), self.save_platform)
:
("CPython", "2.6.1", "tags/r261", "67515",
('r261:67515', 'Dec 6 2008 15:26:00'),
'GCC 4.0.1 (Apple Computer, Inc. build 5370)'),
("IronPython 2.0 (2.0.0.0) on .NET 2.0.50727.3053", None, "cli")
:
("IronPython", "2.0.0", "", "", ("", ""),
".NET 2.0.50727.3053"),
("2.6.1 (IronPython 2.6.1 (2.6.10920.0) on .NET 2.0.50727.1433)", None, "cli")
:
("IronPython", "2.6.1", "", "", ("", ""),
".NET 2.0.50727.1433"),
("2.7.4 (IronPython 2.7.4 (2.7.0.40) on Mono 4.0.30319.1 (32-bit))", None, "cli")
:
("IronPython", "2.7.4", "", "", ("", ""),
"Mono 4.0.30319.1 (32-bit)"),
("2.5 (trunk:6107, Mar 26 2009, 13:02:18) \n[Java HotSpot(TM) Client VM (\"Apple Computer, Inc.\")]",
('Jython', 'trunk', '6107'), "java1.5.0_16")
:
("Jython", "2.5.0", "trunk", "6107",
('trunk:6107', 'Mar 26 2009'), "java1.5.0_16"),
("2.5.2 (63378, Mar 26 2009, 18:03:29)\n[PyPy 1.0.0]",
('PyPy', 'trunk', '63378'), self.save_platform)
:
("PyPy", "2.5.2", "trunk", "63378", ('63378', 'Mar 26 2009'),
"")
}
for (version_tag, scm, sys_platform), info in \
sys_versions.items():
sys.version = version_tag
if scm is None:
if hasattr(sys, "_git"):
del sys._git
else:
sys._git = scm
if sys_platform is not None:
sys.platform = sys_platform
self.assertEqual(platform.python_implementation(), info[0])
self.assertEqual(platform.python_version(), info[1])
self.assertEqual(platform.python_branch(), info[2])
self.assertEqual(platform.python_revision(), info[3])
self.assertEqual(platform.python_build(), info[4])
self.assertEqual(platform.python_compiler(), info[5])
def test_system_alias(self):
res = platform.system_alias(
platform.system(),
platform.release(),
platform.version(),
)
def test_uname(self):
res = platform.uname()
self.assertTrue(any(res))
self.assertEqual(res[0], res.system)
self.assertEqual(res[1], res.node)
self.assertEqual(res[2], res.release)
self.assertEqual(res[3], res.version)
self.assertEqual(res[4], res.machine)
self.assertEqual(res[5], res.processor)
@unittest.skipUnless(sys.platform.startswith('win'), "windows only test")
def test_uname_win32_ARCHITEW6432(self):
try:
with support.EnvironmentVarGuard() as environ:
if 'PROCESSOR_ARCHITEW6432' in environ:
del environ['PROCESSOR_ARCHITEW6432']
environ['PROCESSOR_ARCHITECTURE'] = 'foo'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'foo')
environ['PROCESSOR_ARCHITEW6432'] = 'bar'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'bar')
finally:
platform._uname_cache = None
def test_java_ver(self):
res = platform.java_ver()
if sys.platform == 'java':
self.assertTrue(all(res))
def test_win32_ver(self):
res = platform.win32_ver()
def test_mac_ver(self):
res = platform.mac_ver()
if platform.uname().system == 'Darwin':
# the right version information is returned
fd = os.popen('sw_vers', 'r')
real_ver = None
for ln in fd:
if ln.startswith('ProductVersion:'):
real_ver = ln.strip().split()[-1]
break
fd.close()
self.assertFalse(real_ver is None)
result_list = res[0].split('.')
expect_list = real_ver.split('.')
len_diff = len(result_list) - len(expect_list)
# On Snow Leopard, sw_vers reports 10.6.0 as 10.6
if len_diff > 0:
expect_list.extend(['0'] * len_diff)
self.assertEqual(result_list, expect_list)
# res[1] claims to contain
# (version, dev_stage, non_release_version)
# That information is no longer available
self.assertEqual(res[1], ('', '', ''))
if sys.byteorder == 'little':
self.assertIn(res[2], ('i386', 'x86_64'))
else:
self.assertEqual(res[2], 'PowerPC')
@unittest.skipUnless(sys.platform == 'darwin', "OSX only test")
def test_mac_ver_with_fork(self):
# Issue7895: platform.mac_ver() crashes when using fork without exec
#
# This test checks that the fix for that issue works.
#
pid = os.fork()
if pid == 0:
# child
info = platform.mac_ver()
os._exit(0)
else:
# parent
cpid, sts = os.waitpid(pid, 0)
self.assertEqual(cpid, pid)
self.assertEqual(sts, 0)
def test_dist(self):
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
DeprecationWarning,
)
res = platform.dist()
def test_libc_ver(self):
if os.path.isdir(sys.executable) and \
os.path.exists(sys.executable+'.exe'):
# Cygwin horror
executable = sys.executable + '.exe'
elif sys.platform == "win32" and not os.path.exists(sys.executable):
# App symlink appears to not exist, but we want the
# real executable here anyway
import _winapi
executable = _winapi.GetModuleFileName(0)
else:
executable = sys.executable
res = platform.libc_ver(executable)
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, 'wb') as f:
f.write(b'x'*(16384-10))
f.write(b'GLIBC_1.23.4\0GLIBC_1.9\0GLIBC_1.21\0')
self.assertEqual(platform.libc_ver(support.TESTFN),
('glibc', '1.23.4'))
@support.cpython_only
def test__comparable_version(self):
from platform import _comparable_version as V
self.assertEqual(V('1.2.3'), V('1.2.3'))
self.assertLess(V('1.2.3'), V('1.2.10'))
self.assertEqual(V('1.2.3.4'), V('1_2-3+4'))
self.assertLess(V('1.2spam'), V('1.2dev'))
self.assertLess(V('1.2dev'), V('1.2alpha'))
self.assertLess(V('1.2dev'), V('1.2a'))
self.assertLess(V('1.2alpha'), V('1.2beta'))
self.assertLess(V('1.2a'), V('1.2b'))
self.assertLess(V('1.2beta'), V('1.2c'))
self.assertLess(V('1.2b'), V('1.2c'))
self.assertLess(V('1.2c'), V('1.2RC'))
self.assertLess(V('1.2c'), V('1.2rc'))
self.assertLess(V('1.2RC'), V('1.2.0'))
self.assertLess(V('1.2rc'), V('1.2.0'))
self.assertLess(V('1.2.0'), V('1.2pl'))
self.assertLess(V('1.2.0'), V('1.2p'))
self.assertLess(V('1.5.1'), V('1.5.2b2'))
self.assertLess(V('3.10a'), V('161'))
self.assertEqual(V('8.02'), V('8.02'))
self.assertLess(V('3.4j'), V('1996.07.12'))
self.assertLess(V('3.1.1.6'), V('3.2.pl0'))
self.assertLess(V('2g6'), V('11g'))
self.assertLess(V('0.9'), V('2.2'))
self.assertLess(V('1.2'), V('1.2.1'))
self.assertLess(V('1.1'), V('1.2.2'))
self.assertLess(V('1.1'), V('1.2'))
self.assertLess(V('1.2.1'), V('1.2.2'))
self.assertLess(V('1.2'), V('1.2.2'))
self.assertLess(V('0.4'), V('0.4.0'))
self.assertLess(V('1.13++'), V('5.5.kw'))
self.assertLess(V('0.960923'), V('2.2beta29'))
def test_parse_release_file(self):
for input, output in (
# Examples of release file contents:
('SuSE Linux 9.3 (x86-64)', ('SuSE Linux ', '9.3', 'x86-64')),
('SUSE LINUX 10.1 (X86-64)', ('SUSE LINUX ', '10.1', 'X86-64')),
('SUSE LINUX 10.1 (i586)', ('SUSE LINUX ', '10.1', 'i586')),
('Fedora Core release 5 (Bordeaux)', ('Fedora Core', '5', 'Bordeaux')),
('Red Hat Linux release 8.0 (Psyche)', ('Red Hat Linux', '8.0', 'Psyche')),
('Red Hat Linux release 9 (Shrike)', ('Red Hat Linux', '9', 'Shrike')),
('Red Hat Enterprise Linux release 4 (Nahant)', ('Red Hat Enterprise Linux', '4', 'Nahant')),
('CentOS release 4', ('CentOS', '4', None)),
('Rocks release 4.2.1 (Cydonia)', ('Rocks', '4.2.1', 'Cydonia')),
('', ('', '', '')), # If there's nothing there.
):
self.assertEqual(platform._parse_release_file(input), output)
def test_popen(self):
mswindows = (sys.platform == "win32")
if mswindows:
command = '"{}" -c "print(\'Hello\')"'.format(sys.executable)
else:
command = "'{}' -c 'print(\"Hello\")'".format(sys.executable)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with platform.popen(command) as stdout:
hello = stdout.read().strip()
stdout.close()
self.assertEqual(hello, "Hello")
data = 'plop'
if mswindows:
command = '"{}" -c "import sys; data=sys.stdin.read(); exit(len(data))"'
else:
command = "'{}' -c 'import sys; data=sys.stdin.read(); exit(len(data))'"
command = command.format(sys.executable)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with platform.popen(command, 'w') as stdin:
stdout = stdin.write(data)
ret = stdin.close()
self.assertIsNotNone(ret)
if os.name == 'nt':
returncode = ret
else:
returncode = ret >> 8
self.assertEqual(returncode, len(data))
def test_linux_distribution_encoding(self):
with tempfile.TemporaryDirectory() as tempdir:
filename = os.path.join(tempdir, 'fedora-release')
with open(filename, 'w', encoding='utf-8') as f:
f.write('Fedora release 19 (Schr\xf6dinger\u2019s Cat)\n')
with mock.patch('platform._UNIXCONFDIR', tempdir):
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
DeprecationWarning,
)
distname, version, distid = platform.linux_distribution()
self.assertEqual(distname, 'Fedora')
self.assertEqual(version, '19')
self.assertEqual(distid, 'Schr\xf6dinger\u2019s Cat')
class DeprecationTest(unittest.TestCase):
def test_dist_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
platform.dist()
self.assertEqual(str(cm.warning),
'dist() and linux_distribution() functions are '
'deprecated in Python 3.5')
def test_linux_distribution_deprecation(self):
with self.assertWarns(DeprecationWarning) as cm:
platform.linux_distribution()
self.assertEqual(str(cm.warning),
'dist() and linux_distribution() functions are '
'deprecated in Python 3.5')
if __name__ == '__main__':
unittest.main()
| true | true |
f7307a51d6578e0dff654d9626eca5b71b10855a | 2,435 | py | Python | src/core/magicae/core.py | ravenSanstete/hako | fe72c76e9f319add1921a63dee711f90f4960873 | [
"MIT"
] | 1 | 2016-11-17T07:15:00.000Z | 2016-11-17T07:15:00.000Z | src/core/magicae/core.py | ravenSanstete/hako | fe72c76e9f319add1921a63dee711f90f4960873 | [
"MIT"
] | 6 | 2016-11-17T10:27:38.000Z | 2016-11-18T13:20:05.000Z | src/core/magicae/core.py | ravenSanstete/hako | fe72c76e9f319add1921a63dee711f90f4960873 | [
"MIT"
] | null | null | null | """
Implementation of an auxiliary class that as an intermediate layer of Monad and Prototype
A Function Class
"""
import functools as F
from __init__ import utils
from __init__ import proto
# a magica determines (A) the way to distribute the difference to each prototype
# (B) the way to measure over the prototypes
# By generating functions when invoking the mix functional
# In such a way, magica more like a static class
# constaints should not be contained in this file
# never try to modify the prototype information even it seems necessary
class Magica(object):
"""docstring for Magica"""
def __init__(self):
super(Magica,self).__init__()
# return generated measure method, generated propagate method, individual variables
def mix(self, prototypes):
utils.check_types(prototypes,proto.Prototype);
return F.partial(measure, self, prototypes), F.partial(propagate, self, prototypes), self._allocate(prototypes);
# you may able to set different sample options for each prototype
# options should be a list the same length with prototypes, otherwise, the last several will be considered as None
def measure(self, prototypes, options):
assert(len(options)<=len(prototypes)); # the less part
# padding the options list
while(len(prototypes)!=len(options)): options.append(None);
self._measure([prototypes[i].measure(options[i]) for i in range(len(options))]);
def propagate(self, prototypes, diff, parameters):
sub_diff_list=self._distribute(prototypes, diff, parameters);
F.map(lambda i:prototypes[i].propagate(sub_diff_list[i]),range(len(sub_diff_list)));
#
# <override> is needed. As a measurements
def _measure(self, sub_measurements, parameters):
utils.require_override();
# <override> is needed. return a list of the distributed difference list
# prototypes passed for reference
# diff may be a batch, thus the sublist should be a matrix maybe, along axis-0 do the arrangement
# parameters may be a batch as well, which should correspond to each diff
def _distribute(self, prototypes, diff, parameters):
utils.require_override();
# <override> is needed. return parameters of the instance of this kind of magica for a certain monad
# a dictionary with name is needed
def _allocate(self,prototypes):
utils.require_override();
| 48.7 | 120 | 0.720329 |
import functools as F
from __init__ import utils
from __init__ import proto
class Magica(object):
def __init__(self):
super(Magica,self).__init__()
def mix(self, prototypes):
utils.check_types(prototypes,proto.Prototype);
return F.partial(measure, self, prototypes), F.partial(propagate, self, prototypes), self._allocate(prototypes);
def measure(self, prototypes, options):
assert(len(options)<=len(prototypes));
while(len(prototypes)!=len(options)): options.append(None);
self._measure([prototypes[i].measure(options[i]) for i in range(len(options))]);
def propagate(self, prototypes, diff, parameters):
sub_diff_list=self._distribute(prototypes, diff, parameters);
F.map(lambda i:prototypes[i].propagate(sub_diff_list[i]),range(len(sub_diff_list)));
def _measure(self, sub_measurements, parameters):
utils.require_override();
def _distribute(self, prototypes, diff, parameters):
utils.require_override();
def _allocate(self,prototypes):
utils.require_override();
| true | true |
f7307f0a3175323d41f8181d3a53d0ce5b8e591e | 3,465 | py | Python | src/programy/parser/template/nodes/system.py | cen-ai/program-y | a753667638147544c54dbebd9f1c8f9ae7f2159e | [
"MIT"
] | 5 | 2018-08-21T00:13:45.000Z | 2018-09-01T20:00:55.000Z | src/programy/parser/template/nodes/system.py | cen-ai/program-y | a753667638147544c54dbebd9f1c8f9ae7f2159e | [
"MIT"
] | 1 | 2018-09-12T18:30:17.000Z | 2018-09-12T18:30:17.000Z | src/programy/parser/template/nodes/system.py | cen-ai/program-y | a753667638147544c54dbebd9f1c8f9ae7f2159e | [
"MIT"
] | 5 | 2018-08-21T00:08:36.000Z | 2018-09-23T06:11:04.000Z | """
Copyright (c) 2016-2018 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
import subprocess
from programy.parser.exceptions import ParserException
from programy.parser.template.nodes.attrib import TemplateAttribNode
class TemplateSystemNode(TemplateAttribNode):
def __init__(self):
TemplateAttribNode.__init__(self)
self._timeout = 0
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, timeout):
self._timeout = timeout
def resolve_to_string(self, client_context):
if client_context.brain.configuration.overrides.allow_system_aiml is True:
command = self.resolve_children_to_string(client_context)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = []
for line in process.stdout.readlines():
byte_string = line.decode("utf-8")
result.append(byte_string.strip())
process.wait()
resolved = " ".join(result)
else:
YLogger.warning(client_context, "System command node disabled in config")
resolved = ""
YLogger.debug(client_context, "[%s] resolved to [%s]", self.to_string(), resolved)
return resolved
def to_string(self):
return "[SYSTEM timeout=%s]" % (self._timeout)
def set_attrib(self, attrib_name, attrib_value):
if attrib_name != 'timeout':
raise ParserException("Invalid attribute name %s for this node", attrib_name)
YLogger.warning(self, "System node timeout attrib currently ignored")
self._timeout = attrib_value
def to_xml(self, client_context):
xml = "<system"
if self._timeout != 0:
xml += ' timeout="%d"' % self._timeout
xml += ">"
xml += self.children_to_xml(client_context)
xml += "</system>"
return xml
#######################################################################################################
# SYSTEM_EXPRESSION ::==
# <system( TIMEOUT_ATTRIBUTE)>TEMPLATE_EXPRESSION</system> |
# <system><timeout>TEMPLATE_EXPRESSION</timeout></system>
# TIMEOUT_ATTRIBUTE :== timeout=”NUMBER”
def parse_expression(self, graph, expression):
self._parse_node_with_attrib(graph, expression, "timeout", "0")
| 42.777778 | 120 | 0.679076 |
from programy.utils.logging.ylogger import YLogger
import subprocess
from programy.parser.exceptions import ParserException
from programy.parser.template.nodes.attrib import TemplateAttribNode
class TemplateSystemNode(TemplateAttribNode):
def __init__(self):
TemplateAttribNode.__init__(self)
self._timeout = 0
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, timeout):
self._timeout = timeout
def resolve_to_string(self, client_context):
if client_context.brain.configuration.overrides.allow_system_aiml is True:
command = self.resolve_children_to_string(client_context)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = []
for line in process.stdout.readlines():
byte_string = line.decode("utf-8")
result.append(byte_string.strip())
process.wait()
resolved = " ".join(result)
else:
YLogger.warning(client_context, "System command node disabled in config")
resolved = ""
YLogger.debug(client_context, "[%s] resolved to [%s]", self.to_string(), resolved)
return resolved
def to_string(self):
return "[SYSTEM timeout=%s]" % (self._timeout)
def set_attrib(self, attrib_name, attrib_value):
if attrib_name != 'timeout':
raise ParserException("Invalid attribute name %s for this node", attrib_name)
YLogger.warning(self, "System node timeout attrib currently ignored")
self._timeout = attrib_value
def to_xml(self, client_context):
xml = "<system"
if self._timeout != 0:
xml += ' timeout="%d"' % self._timeout
xml += ">"
xml += self.children_to_xml(client_context)
xml += "</system>"
return xml
| true | true |
f73080eed9178010cd0131e215e25b70104b6075 | 1,483 | py | Python | engine/test_cloud_init.py | cnrancher/os-tests | 57d46413954e602e81cad287410dfecf46bfef84 | [
"Apache-2.0"
] | 2 | 2018-11-14T17:02:02.000Z | 2019-07-19T07:13:41.000Z | engine/test_cloud_init.py | cnrancher/os-tests | 57d46413954e602e81cad287410dfecf46bfef84 | [
"Apache-2.0"
] | 13 | 2018-11-06T09:29:50.000Z | 2019-12-23T07:36:07.000Z | engine/test_cloud_init.py | cnrancher/os-tests | 57d46413954e602e81cad287410dfecf46bfef84 | [
"Apache-2.0"
] | 1 | 2018-11-05T04:03:20.000Z | 2018-11-05T04:03:20.000Z | # coding = utf-8
# Create date: 2018-11-20
# Author :Hailong
from utils.connect_to_os import executor, connection
def test_cloud_init(ros_kvm_init, cloud_config_url):
kwargs = dict(cloud_config='{url}test_cloud_init.yml'.format(url=cloud_config_url),
is_install_to_hard_drive=True)
tuple_return = ros_kvm_init(**kwargs)
client = tuple_return[0]
ip = tuple_return[1]
c_export_config = 'sudo ros c export'
output_export_config = executor(client, c_export_config)
assert ('debug' in output_export_config)
# Create a datasource file locally
# test_cloud_init.txt
# # cloud-config
# rancher:
# log: true
c_create_ds = 'sudo tee /var/lib/rancher/conf/cloud-config.d/datasources.yml << EOF \
rancher: \
cloud_init: \
datasources: \
- url:https://gist.githubusercontent.com/Aisuko/4914974de1cf2a3d5127fd482e2c001a/raw/\
ed1e30a8a096c6e10d485d02092eaaf8ca8871bd/test_cloud_init.txt \
EOF'
# Reboot
c_reboot = 'sudo reboot'
executor(client, c_create_ds + c_reboot)
second_client = connection(ip, None)
c_ros_log = 'sudo ros config get rancher.log'
output_ros_log = executor(second_client, c_ros_log)
if output_ros_log:
output_ros_log = output_ros_log.replace('\n', '')
second_client.close()
assert ('true' == output_ros_log)
| 35.309524 | 110 | 0.647336 |
from utils.connect_to_os import executor, connection
def test_cloud_init(ros_kvm_init, cloud_config_url):
kwargs = dict(cloud_config='{url}test_cloud_init.yml'.format(url=cloud_config_url),
is_install_to_hard_drive=True)
tuple_return = ros_kvm_init(**kwargs)
client = tuple_return[0]
ip = tuple_return[1]
c_export_config = 'sudo ros c export'
output_export_config = executor(client, c_export_config)
assert ('debug' in output_export_config)
c_create_ds = 'sudo tee /var/lib/rancher/conf/cloud-config.d/datasources.yml << EOF \
rancher: \
cloud_init: \
datasources: \
- url:https://gist.githubusercontent.com/Aisuko/4914974de1cf2a3d5127fd482e2c001a/raw/\
ed1e30a8a096c6e10d485d02092eaaf8ca8871bd/test_cloud_init.txt \
EOF'
c_reboot = 'sudo reboot'
executor(client, c_create_ds + c_reboot)
second_client = connection(ip, None)
c_ros_log = 'sudo ros config get rancher.log'
output_ros_log = executor(second_client, c_ros_log)
if output_ros_log:
output_ros_log = output_ros_log.replace('\n', '')
second_client.close()
assert ('true' == output_ros_log)
| true | true |
f730815c16e59fb5fe9c61f2d195fe9da671f9b0 | 1,023 | py | Python | Python/as-far-from-land-as-possible.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | Python/as-far-from-land-as-possible.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | Python/as-far-from-land-as-possible.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | # Time: O(m * n)
# Space: O(m * n)
import collections
class Solution(object):
def maxDistance(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
q = collections.deque([(i, j) for i in range(len(grid))
for j in range(len(grid[0])) if grid[i][j] == 1])
if len(q) == len(grid)*len(grid[0]):
return -1
level = -1
while q:
next_q = collections.deque()
while q:
x, y = q.popleft()
for dx, dy in directions:
nx, ny = x+dx, y+dy
if not (0 <= nx < len(grid) and
0 <= ny < len(grid[0]) and
grid[nx][ny] == 0):
continue
next_q.append((nx, ny))
grid[nx][ny] = 1
q = next_q
level += 1
return level
| 30.088235 | 87 | 0.379277 |
import collections
class Solution(object):
def maxDistance(self, grid):
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
q = collections.deque([(i, j) for i in range(len(grid))
for j in range(len(grid[0])) if grid[i][j] == 1])
if len(q) == len(grid)*len(grid[0]):
return -1
level = -1
while q:
next_q = collections.deque()
while q:
x, y = q.popleft()
for dx, dy in directions:
nx, ny = x+dx, y+dy
if not (0 <= nx < len(grid) and
0 <= ny < len(grid[0]) and
grid[nx][ny] == 0):
continue
next_q.append((nx, ny))
grid[nx][ny] = 1
q = next_q
level += 1
return level
| true | true |
f73081bc0d5f8d3350d07a2150f753fd344a22a7 | 623 | py | Python | guanabara/Exercicios/mundo 1 _ aulas 01 a 12/001-003.py | pbittencourt/datasciencestudies | 85f0b2a4366fe7c6daa5628ed4bd2994355963c0 | [
"MIT"
] | null | null | null | guanabara/Exercicios/mundo 1 _ aulas 01 a 12/001-003.py | pbittencourt/datasciencestudies | 85f0b2a4366fe7c6daa5628ed4bd2994355963c0 | [
"MIT"
] | null | null | null | guanabara/Exercicios/mundo 1 _ aulas 01 a 12/001-003.py | pbittencourt/datasciencestudies | 85f0b2a4366fe7c6daa5628ed4bd2994355963c0 | [
"MIT"
] | null | null | null | # PRIMEIROS PASSOS
"""
Imprime OLÁ, MUNDO na tela -- obviamente!
Pede nome do usuário e dois números.
Exibe nome do usuário e a soma dos números inseridos
"""
print('Hello, world!!!')
nome = input('Qual é o seu nome? ')
print('Olá, {}, é um prazer te conhecer!'.format(nome))
#
idade = int(input('Qual é a sua idade? '))
print('Você tem {} anos e ano que vem você terá {} anos' .format(idade, idade + 1))
n1 = int(input('Aproveitando a brincadeira, {}, digita um número aí: ' .format(nome)))
n2 = int(input('Digita outro número, por obséquio: '))
s = n1 + n2
print('A soma entre {} e {} é igual a {}' .format(n1, n2, s))
| 32.789474 | 86 | 0.659711 |
print('Hello, world!!!')
nome = input('Qual é o seu nome? ')
print('Olá, {}, é um prazer te conhecer!'.format(nome))
idade = int(input('Qual é a sua idade? '))
print('Você tem {} anos e ano que vem você terá {} anos' .format(idade, idade + 1))
n1 = int(input('Aproveitando a brincadeira, {}, digita um número aí: ' .format(nome)))
n2 = int(input('Digita outro número, por obséquio: '))
s = n1 + n2
print('A soma entre {} e {} é igual a {}' .format(n1, n2, s))
| true | true |
f73081bcbbb55a3e2f6093ea3985232180c04f1a | 1,812 | py | Python | homework/HW4/HW4-final/P1.py | TangJiahui/cs107_system_devlopment | c46d7769683d9be0c31973e3b0666e3fe2a4099b | [
"MIT"
] | null | null | null | homework/HW4/HW4-final/P1.py | TangJiahui/cs107_system_devlopment | c46d7769683d9be0c31973e3b0666e3fe2a4099b | [
"MIT"
] | null | null | null | homework/HW4/HW4-final/P1.py | TangJiahui/cs107_system_devlopment | c46d7769683d9be0c31973e3b0666e3fe2a4099b | [
"MIT"
] | 1 | 2021-09-21T16:28:51.000Z | 2021-09-21T16:28:51.000Z | import numpy as np
import matplotlib.pyplot as plt
# Part A: Numerical Differentiation Closure
def numerical_diff(f,h):
def inner(x):
return (f(x+h) - f(x))/h
return inner
# Part B:
f = np.log
x = np.linspace(0.2, 0.4, 500)
h = [1e-1, 1e-7, 1e-15]
y_analytical = 1/x
result = {}
for i in h:
y = numerical_diff(f,i)(x)
result[i] = y
# Plotting
plt.figure(figsize = (8,5))
plt.plot(x, y_analytical, 'x-', label='Analytical Derivative')
for i in h:
plt.plot(x, result[i], label='Estimated derivative h = '+str(i))
plt.xlabel("X value")
plt.ylabel("Derivative Value at X")
plt.title("Differentiation Value at X on various h value")
plt.legend()
# Part C:
print("Answer to Q-a: When h value is 1e-7, it most closely approximates the true derivative. \n",
"When h value is too small: The approximation is jumping around stepwise and not displaying a smooth curve approximation, it amplifies floating point errors in numerical operation such as rounding and division\n",
"When h value is too large: The approximation is lower than the true value, it doesn't provide a good approximation to the derivative\n")
print("Answer to Q-b: Automatic differentiation avoids the problem of not choosing a good h value. \n"
"The finite difference approach is quick and easy but suffers from accuracy and stability problems.\n"
"Symbolic derivatives can be evaluated to machine precision, but can be costly to evaluate.\n"
"Automatic differentiation (AD) overcomes both of these deficiencies. It is less costly than symbolic differentiation while evaluating derivatives to machine precision.\n"
"AD uses forward or backward modes to differentiate, via Computational Graph, chain rule and evaluation trace.")
# Show plot
plt.show()
# plt.savefig('P1_fig.png')
| 38.553191 | 219 | 0.724614 | import numpy as np
import matplotlib.pyplot as plt
def numerical_diff(f,h):
def inner(x):
return (f(x+h) - f(x))/h
return inner
f = np.log
x = np.linspace(0.2, 0.4, 500)
h = [1e-1, 1e-7, 1e-15]
y_analytical = 1/x
result = {}
for i in h:
y = numerical_diff(f,i)(x)
result[i] = y
plt.figure(figsize = (8,5))
plt.plot(x, y_analytical, 'x-', label='Analytical Derivative')
for i in h:
plt.plot(x, result[i], label='Estimated derivative h = '+str(i))
plt.xlabel("X value")
plt.ylabel("Derivative Value at X")
plt.title("Differentiation Value at X on various h value")
plt.legend()
print("Answer to Q-a: When h value is 1e-7, it most closely approximates the true derivative. \n",
"When h value is too small: The approximation is jumping around stepwise and not displaying a smooth curve approximation, it amplifies floating point errors in numerical operation such as rounding and division\n",
"When h value is too large: The approximation is lower than the true value, it doesn't provide a good approximation to the derivative\n")
print("Answer to Q-b: Automatic differentiation avoids the problem of not choosing a good h value. \n"
"The finite difference approach is quick and easy but suffers from accuracy and stability problems.\n"
"Symbolic derivatives can be evaluated to machine precision, but can be costly to evaluate.\n"
"Automatic differentiation (AD) overcomes both of these deficiencies. It is less costly than symbolic differentiation while evaluating derivatives to machine precision.\n"
"AD uses forward or backward modes to differentiate, via Computational Graph, chain rule and evaluation trace.")
# Show plot
plt.show()
# plt.savefig('P1_fig.png')
| true | true |
f730824ede58439b2808be47b15c08761386514e | 10,409 | py | Python | neutron/plugins/mlnx/db/mlnx_db_v2.py | petrutlucian94/neutron | 44976d12bbe72331e536d92bb46e35a8835a75ce | [
"Apache-2.0"
] | null | null | null | neutron/plugins/mlnx/db/mlnx_db_v2.py | petrutlucian94/neutron | 44976d12bbe72331e536d92bb46e35a8835a75ce | [
"Apache-2.0"
] | null | null | null | neutron/plugins/mlnx/db/mlnx_db_v2.py | petrutlucian94/neutron | 44976d12bbe72331e536d92bb46e35a8835a75ce | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy.orm import exc
from neutron.common import exceptions as q_exc
import neutron.db.api as db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.mlnx.common import config # noqa
from neutron.plugins.mlnx.db import mlnx_models_v2
LOG = logging.getLogger(__name__)
def initialize():
db.configure_db()
def _remove_non_allocatable_vlans(session, allocations,
physical_network, vlan_ids):
if physical_network in allocations:
for entry in allocations[physical_network]:
try:
# see if vlan is allocatable
vlan_ids.remove(entry.segmentation_id)
except KeyError:
# it's not allocatable, so check if its allocated
if not entry.allocated:
# it's not, so remove it from table
LOG.debug(_(
"Removing vlan %(seg_id)s on "
"physical network "
"%(net)s from pool"),
{'seg_id': entry.segmentation_id,
'net': physical_network})
session.delete(entry)
del allocations[physical_network]
def _add_missing_allocatable_vlans(session, physical_network, vlan_ids):
for vlan_id in sorted(vlan_ids):
entry = mlnx_models_v2.SegmentationIdAllocation(physical_network,
vlan_id)
session.add(entry)
def _remove_unconfigured_vlans(session, allocations):
for entries in allocations.itervalues():
for entry in entries:
if not entry.allocated:
LOG.debug(_("Removing vlan %(seg_id)s on physical "
"network %(net)s from pool"),
{'seg_id': entry.segmentation_id,
'net': entry.physical_network})
session.delete(entry)
def sync_network_states(network_vlan_ranges):
"""Synchronize network_states table with current configured VLAN ranges."""
session = db.get_session()
with session.begin():
# get existing allocations for all physical networks
allocations = dict()
entries = (session.query(mlnx_models_v2.SegmentationIdAllocation).
all())
for entry in entries:
allocations.setdefault(entry.physical_network, set()).add(entry)
# process vlan ranges for each configured physical network
for physical_network, vlan_ranges in network_vlan_ranges.iteritems():
# determine current configured allocatable vlans for this
# physical network
vlan_ids = set()
for vlan_range in vlan_ranges:
vlan_ids |= set(xrange(vlan_range[0], vlan_range[1] + 1))
# remove from table unallocated vlans not currently allocatable
_remove_non_allocatable_vlans(session, allocations,
physical_network, vlan_ids)
# add missing allocatable vlans to table
_add_missing_allocatable_vlans(session, physical_network, vlan_ids)
# remove from table unallocated vlans for any unconfigured physical
# networks
_remove_unconfigured_vlans(session, allocations)
def get_network_state(physical_network, segmentation_id):
"""Get entry of specified network."""
session = db.get_session()
qry = session.query(mlnx_models_v2.SegmentationIdAllocation)
qry = qry.filter_by(physical_network=physical_network,
segmentation_id=segmentation_id)
return qry.first()
def reserve_network(session):
with session.begin(subtransactions=True):
entry = (session.query(mlnx_models_v2.SegmentationIdAllocation).
filter_by(allocated=False).
with_lockmode('update').
first())
if not entry:
raise q_exc.NoNetworkAvailable()
LOG.debug(_("Reserving vlan %(seg_id)s on physical network "
"%(net)s from pool"),
{'seg_id': entry.segmentation_id,
'net': entry.physical_network})
entry.allocated = True
return (entry.physical_network, entry.segmentation_id)
def reserve_specific_network(session, physical_network, segmentation_id):
with session.begin(subtransactions=True):
log_args = {'seg_id': segmentation_id, 'phy_net': physical_network}
try:
entry = (session.query(mlnx_models_v2.SegmentationIdAllocation).
filter_by(physical_network=physical_network,
segmentation_id=segmentation_id).
with_lockmode('update').
one())
if entry.allocated:
raise q_exc.VlanIdInUse(vlan_id=segmentation_id,
physical_network=physical_network)
LOG.debug(_("Reserving specific vlan %(seg_id)s "
"on physical network %(phy_net)s from pool"),
log_args)
entry.allocated = True
except exc.NoResultFound:
LOG.debug(_("Reserving specific vlan %(seg_id)s on "
"physical network %(phy_net)s outside pool"),
log_args)
entry = mlnx_models_v2.SegmentationIdAllocation(physical_network,
segmentation_id)
entry.allocated = True
session.add(entry)
def release_network(session, physical_network,
segmentation_id, network_vlan_ranges):
with session.begin(subtransactions=True):
log_args = {'seg_id': segmentation_id, 'phy_net': physical_network}
try:
state = (session.query(mlnx_models_v2.SegmentationIdAllocation).
filter_by(physical_network=physical_network,
segmentation_id=segmentation_id).
with_lockmode('update').
one())
state.allocated = False
inside = False
for vlan_range in network_vlan_ranges.get(physical_network, []):
if (segmentation_id >= vlan_range[0] and
segmentation_id <= vlan_range[1]):
inside = True
break
if inside:
LOG.debug(_("Releasing vlan %(seg_id)s "
"on physical network "
"%(phy_net)s to pool"),
log_args)
else:
LOG.debug(_("Releasing vlan %(seg_id)s "
"on physical network "
"%(phy_net)s outside pool"),
log_args)
session.delete(state)
except exc.NoResultFound:
LOG.warning(_("vlan_id %(seg_id)s on physical network "
"%(phy_net)s not found"),
log_args)
def add_network_binding(session, network_id, network_type,
physical_network, vlan_id):
with session.begin(subtransactions=True):
binding = mlnx_models_v2.NetworkBinding(network_id, network_type,
physical_network, vlan_id)
session.add(binding)
def get_network_binding(session, network_id):
return (session.query(mlnx_models_v2.NetworkBinding).
filter_by(network_id=network_id).first())
def add_port_profile_binding(session, port_id, vnic_type):
with session.begin(subtransactions=True):
binding = mlnx_models_v2.PortProfileBinding(port_id, vnic_type)
session.add(binding)
def get_port_profile_binding(session, port_id):
return (session.query(mlnx_models_v2.PortProfileBinding).
filter_by(port_id=port_id).first())
def get_port_from_device(device):
"""Get port from database."""
LOG.debug(_("get_port_from_device() called"))
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
query = session.query(models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(device))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = [
sg_id for port_in_db, sg_id in port_and_sgs if sg_id
]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
def get_port_from_device_mac(device_mac):
"""Get port from database."""
LOG.debug(_("Get_port_from_device_mac() called"))
session = db.get_session()
qry = session.query(models_v2.Port).filter_by(mac_address=device_mac)
return qry.first()
def set_port_status(port_id, status):
"""Set the port status."""
LOG.debug(_("Set_port_status as %s called"), status)
session = db.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.merge(port)
session.flush()
except exc.NoResultFound:
raise q_exc.PortNotFound(port_id=port_id)
| 39.729008 | 79 | 0.614372 |
from sqlalchemy.orm import exc
from neutron.common import exceptions as q_exc
import neutron.db.api as db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.mlnx.common import config
from neutron.plugins.mlnx.db import mlnx_models_v2
LOG = logging.getLogger(__name__)
def initialize():
db.configure_db()
def _remove_non_allocatable_vlans(session, allocations,
physical_network, vlan_ids):
if physical_network in allocations:
for entry in allocations[physical_network]:
try:
vlan_ids.remove(entry.segmentation_id)
except KeyError:
if not entry.allocated:
# it's not, so remove it from table
LOG.debug(_(
"Removing vlan %(seg_id)s on "
"physical network "
"%(net)s from pool"),
{'seg_id': entry.segmentation_id,
'net': physical_network})
session.delete(entry)
del allocations[physical_network]
def _add_missing_allocatable_vlans(session, physical_network, vlan_ids):
for vlan_id in sorted(vlan_ids):
entry = mlnx_models_v2.SegmentationIdAllocation(physical_network,
vlan_id)
session.add(entry)
def _remove_unconfigured_vlans(session, allocations):
for entries in allocations.itervalues():
for entry in entries:
if not entry.allocated:
LOG.debug(_("Removing vlan %(seg_id)s on physical "
"network %(net)s from pool"),
{'seg_id': entry.segmentation_id,
'net': entry.physical_network})
session.delete(entry)
def sync_network_states(network_vlan_ranges):
session = db.get_session()
with session.begin():
allocations = dict()
entries = (session.query(mlnx_models_v2.SegmentationIdAllocation).
all())
for entry in entries:
allocations.setdefault(entry.physical_network, set()).add(entry)
for physical_network, vlan_ranges in network_vlan_ranges.iteritems():
vlan_ids = set()
for vlan_range in vlan_ranges:
vlan_ids |= set(xrange(vlan_range[0], vlan_range[1] + 1))
_remove_non_allocatable_vlans(session, allocations,
physical_network, vlan_ids)
_add_missing_allocatable_vlans(session, physical_network, vlan_ids)
_remove_unconfigured_vlans(session, allocations)
def get_network_state(physical_network, segmentation_id):
session = db.get_session()
qry = session.query(mlnx_models_v2.SegmentationIdAllocation)
qry = qry.filter_by(physical_network=physical_network,
segmentation_id=segmentation_id)
return qry.first()
def reserve_network(session):
with session.begin(subtransactions=True):
entry = (session.query(mlnx_models_v2.SegmentationIdAllocation).
filter_by(allocated=False).
with_lockmode('update').
first())
if not entry:
raise q_exc.NoNetworkAvailable()
LOG.debug(_("Reserving vlan %(seg_id)s on physical network "
"%(net)s from pool"),
{'seg_id': entry.segmentation_id,
'net': entry.physical_network})
entry.allocated = True
return (entry.physical_network, entry.segmentation_id)
def reserve_specific_network(session, physical_network, segmentation_id):
with session.begin(subtransactions=True):
log_args = {'seg_id': segmentation_id, 'phy_net': physical_network}
try:
entry = (session.query(mlnx_models_v2.SegmentationIdAllocation).
filter_by(physical_network=physical_network,
segmentation_id=segmentation_id).
with_lockmode('update').
one())
if entry.allocated:
raise q_exc.VlanIdInUse(vlan_id=segmentation_id,
physical_network=physical_network)
LOG.debug(_("Reserving specific vlan %(seg_id)s "
"on physical network %(phy_net)s from pool"),
log_args)
entry.allocated = True
except exc.NoResultFound:
LOG.debug(_("Reserving specific vlan %(seg_id)s on "
"physical network %(phy_net)s outside pool"),
log_args)
entry = mlnx_models_v2.SegmentationIdAllocation(physical_network,
segmentation_id)
entry.allocated = True
session.add(entry)
def release_network(session, physical_network,
segmentation_id, network_vlan_ranges):
with session.begin(subtransactions=True):
log_args = {'seg_id': segmentation_id, 'phy_net': physical_network}
try:
state = (session.query(mlnx_models_v2.SegmentationIdAllocation).
filter_by(physical_network=physical_network,
segmentation_id=segmentation_id).
with_lockmode('update').
one())
state.allocated = False
inside = False
for vlan_range in network_vlan_ranges.get(physical_network, []):
if (segmentation_id >= vlan_range[0] and
segmentation_id <= vlan_range[1]):
inside = True
break
if inside:
LOG.debug(_("Releasing vlan %(seg_id)s "
"on physical network "
"%(phy_net)s to pool"),
log_args)
else:
LOG.debug(_("Releasing vlan %(seg_id)s "
"on physical network "
"%(phy_net)s outside pool"),
log_args)
session.delete(state)
except exc.NoResultFound:
LOG.warning(_("vlan_id %(seg_id)s on physical network "
"%(phy_net)s not found"),
log_args)
def add_network_binding(session, network_id, network_type,
physical_network, vlan_id):
with session.begin(subtransactions=True):
binding = mlnx_models_v2.NetworkBinding(network_id, network_type,
physical_network, vlan_id)
session.add(binding)
def get_network_binding(session, network_id):
return (session.query(mlnx_models_v2.NetworkBinding).
filter_by(network_id=network_id).first())
def add_port_profile_binding(session, port_id, vnic_type):
with session.begin(subtransactions=True):
binding = mlnx_models_v2.PortProfileBinding(port_id, vnic_type)
session.add(binding)
def get_port_profile_binding(session, port_id):
return (session.query(mlnx_models_v2.PortProfileBinding).
filter_by(port_id=port_id).first())
def get_port_from_device(device):
LOG.debug(_("get_port_from_device() called"))
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
query = session.query(models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(device))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = [
sg_id for port_in_db, sg_id in port_and_sgs if sg_id
]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
def get_port_from_device_mac(device_mac):
LOG.debug(_("Get_port_from_device_mac() called"))
session = db.get_session()
qry = session.query(models_v2.Port).filter_by(mac_address=device_mac)
return qry.first()
def set_port_status(port_id, status):
LOG.debug(_("Set_port_status as %s called"), status)
session = db.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.merge(port)
session.flush()
except exc.NoResultFound:
raise q_exc.PortNotFound(port_id=port_id)
| true | true |
f7308345fe160b2a1f7b7c825a95a3b7f2aa10bc | 6,872 | py | Python | scripts/07_tensor_linear_regresssion.py | UnacceptableBehaviour/pytorch_tut_00 | ca74b9bde8485f651bda9314b8f4a7ed277db787 | [
"MIT"
] | null | null | null | scripts/07_tensor_linear_regresssion.py | UnacceptableBehaviour/pytorch_tut_00 | ca74b9bde8485f651bda9314b8f4a7ed277db787 | [
"MIT"
] | null | null | null | scripts/07_tensor_linear_regresssion.py | UnacceptableBehaviour/pytorch_tut_00 | ca74b9bde8485f651bda9314b8f4a7ed277db787 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# PyTorch Tutorial 07 - Linear Regression
# https://www.youtube.com/watch?v=YAJ5XBwlN4o&list=PLqnslRFeH2UrcDBWF5mfPGpqQDSta6VK4&index=7
#from __future__ import print_function
import torch
print("\n" * 20)
print("-" * 80)
print("-" * 80)
print("\n" * 2)
#### Steps in Torch ML pipeline
# 1) Design Model (input, output size, forward pass)
# 2) Construct the loss & optimiser
# 3) Training Loop
# - forward pass: compute prediction
# - backward pass: gradients
# - update weights
# 0m - review Steps in Torch ML pipeline
# 1m - library imports
# 2m - coding starts - prepare data
# 4m30 - 1) Design Model (input, output size, forward pass)
# 5m40 - 2) Construct the loss & optimiser
# 7m - 3) Training Loop
# 10m - plot
import torch
import torch.nn as nn # PyTorch nn module has high-level APIs to build a neural network.
# Torch. nn module uses Tensors and Automatic differentiation modules for training and building layers such as input,
# hidden, and output layers - DOCS: https://pytorch.org/docs/stable/nn.html
import numpy as np # NumPy is a library for the Python programming language, adding support for large,
# multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate
# on these arrays - DOCS: https://numpy.org/doc/stable/user/whatisnumpy.html
from sklearn import datasets # to generate a regression dataset
# Scikit-learn is a library in Python that provides many unsupervised and supervised
# learning algorithms. It contains a lot of efficient tools for machine learning and statistical modeling including
# classification, regression, clustering and dimensionality reduction. Built upon some of the technology you might
# already be familiar with, like NumPy, pandas, and Matplotlib!
# DOCS: https://scikit-learn.org/stable/
import matplotlib.pyplot as plt # Matplotlib is a plotting library for the Python programming language. It provides an
# object-oriented API for embedding plots into applications using general-purpose GUI toolkits like Tkinter,
# wxPython, Qt, or GTK - DOCS:
# cheatsheets: https://github.com/matplotlib/cheatsheets#cheatsheets
# How to plot & save graph hello world: https://github.com/UnacceptableBehaviour/latex_maths#python---matplotlib-numpy
# 0) prepare data - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
x_numpy, y_numpy = datasets.make_regression(n_samples=100, n_features=1, noise=20, random_state=1)
# returned from ^
# change data type from double to float32 - avoid erros later
X = torch.from_numpy(x_numpy.astype(np.float32)) # create torch tensor from numpy array
Y = torch.from_numpy(y_numpy.astype(np.float32))
print(f"\n Y = torch.from_numpy(y_numpy.astype(np.float32)) \n{ Y }")
# Y = torch.from_numpy(y_numpy.astype(np.float32)) # tensor w a single row - see square brackets
# tensor([-5.5539e+01, -1.0662e+01, 2.2757e+01, 1.0110e+02, 1.4434e+02,
# 3.3289e+01, 3.3015e+01, -2.5887e+01, -9.9639e+01, 2.3803e+01,
# -4.5589e+01, -8.3388e+00, -9.5315e+01, 3.6407e+01, -8.7293e+01,
# 6.7669e+01, -1.3687e+01, -5.5441e+01, -6.5340e+01, -5.4450e+01,
# -2.8835e+01, 1.7884e+02, 6.5084e+01, 2.6668e+01, -1.8546e+01,
# -4.1499e+01, 8.5583e-01, 4.4562e+01, 1.1598e+02, -6.4620e+01,
# -2.5931e+01, -6.0882e+01, 1.8720e+01, 7.5070e+01, 1.1720e+02,
# -2.2698e+01, -5.6363e+01, 1.8084e+02, -1.9257e+02, 6.8503e+01,
# 1.6552e+02, 1.0500e+02, -7.0434e+01, -5.8769e+01, -4.1576e+01,
# 7.3247e+01, 4.0966e+01, 8.0462e+01, -2.8794e+01, 3.4234e+01,
# -4.1715e+01, 1.4355e+01, 7.9336e+01, 2.7129e+01, -3.9487e+01,
# 6.6805e+01, 9.5531e+01, 3.5610e+00, 1.0857e-01, 5.6495e+01,
# 5.1575e+01, -2.0974e+00, -2.6656e+01, 3.9742e+01, 3.6101e+01,
# -7.5602e+01, 1.9713e+01, -7.1601e+01, -1.9904e+01, -7.6708e+01,
# -1.1834e+02, -2.9825e+01, 1.5108e+02, 5.2923e+01, -5.9552e+01,
# 3.0721e+01, -2.9355e+01, -4.4786e+01, 1.0006e+02, 1.5058e+02,
# 1.2200e+02, -1.8186e+02, 3.4739e+00, -2.2980e+01, 4.5184e+01,
# 9.8606e+01, -9.2779e+00, -5.2478e+01, 3.8593e+01, -1.9997e+02,
# -9.5201e+00, -3.4724e+00, -3.5312e+01, 7.5406e+01, 1.7570e+01,
# -2.3960e+01, 1.3209e+02, 2.0608e+01, 5.1111e+01, -2.6306e+01])
print(f"\n Y.shape[0] \n{ Y.shape[0] }") # 100
y = Y.view(Y.shape[0], 1) # reshape to a column tensor Y.view(ROW, COL) Y.view(100, 1)
print(f"\n y = Y.view(y.shape[0], 1) \n{ y }")
# tensor([[-5.5539e+01],
# [-1.0662e+01],
# [ 2.2757e+01],
# [ 1.0110e+02],
# .
# 100 in total
# .
# [ 1.3209e+02],
# [ 2.0608e+01],
# [ 5.1111e+01],
# [-2.6306e+01]])
print(f"\n y.shape \n{ y.shape }") # new little y shape = torch.Size([100, 1]) ROWS, COLS
print(f"\n X.shape \n{ X.shape }")
n_samples, n_features = X.shape
#print(f"\n \n{ }")
# 1) model - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# in LINEAR REGRESSION case this is ONE layer
input_size = n_features
output_size = 1
model = nn.Linear(input_size, output_size) # built in Linear model
# 2) loss optimizer - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
learning_rate = 0.01
criterion = nn.MSELoss() # for LINEAR REGRESSION - BUILT IN Loss function Mean Squared Error Loss
# nn.MSELoss() creates a criterion - https://pytorch.org/docs/stable/generated/torch.nn.MSELoss.html
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # SGD - Stocastic Gradient Descent
# https://pytorch.org/docs/stable/optim.html?highlight=torch%20optim%20sgd#torch.optim.SGD
# w/ optional Nesterov momentum :o
# 3) training loop - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
num_epochs = 100
for epoch in range(num_epochs):
# - forward pass: compute prediction
y_predicted = model(X) # call model passing in data X
loss = criterion(y_predicted, y) # actual labels & predicted - output = criterion(input, target)
# - backward pass: gradients
loss.backward()
# - update weights
optimizer.step()
optimizer.zero_grad()
if (epoch+1) % 10 == 0:
print(f'epoch: {epoch+1}, loss = {loss.item():.4f}')
# plot
predicted = model(X).detach().numpy() # prevent gradient tracking?
label_data = plt.plot(x_numpy, y_numpy, 'ro')
label_model = plt.plot(x_numpy, predicted, 'b')
plt.xlabel('X')
plt.ylabel('Y')
plt.legend(['data','model'])
plt.show()
print('plt.show')
print(f"\n x_numpy \n{ x_numpy }")
print(f"\n y_numpy \n{ y_numpy }")
print(f"\n predicted \n{ predicted }")
#print(f"\n \n{ }")
#print(f"\n \n{ }")
print('\n')
| 42.95 | 120 | 0.625437 |
import torch
print("\n" * 20)
print("-" * 80)
print("-" * 80)
print("\n" * 2)
from sklearn import datasets
import matplotlib.pyplot as plt
e_regression(n_samples=100, n_features=1, noise=20, random_state=1)
X = torch.from_numpy(x_numpy.astype(np.float32))
Y = torch.from_numpy(y_numpy.astype(np.float32))
print(f"\n Y = torch.from_numpy(y_numpy.astype(np.float32)) \n{ Y }")
n{ Y.shape[0] }")
y = Y.view(Y.shape[0], 1)
print(f"\n y = Y.view(y.shape[0], 1) \n{ y }")
print(f"\n y.shape \n{ y.shape }")
print(f"\n X.shape \n{ X.shape }")
n_samples, n_features = X.shape
input_size = n_features
output_size = 1
model = nn.Linear(input_size, output_size)
learning_rate = 0.01
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
= 100
for epoch in range(num_epochs):
y_predicted = model(X)
loss = criterion(y_predicted, y)
loss.backward()
optimizer.step()
optimizer.zero_grad()
if (epoch+1) % 10 == 0:
print(f'epoch: {epoch+1}, loss = {loss.item():.4f}')
predicted = model(X).detach().numpy()
label_data = plt.plot(x_numpy, y_numpy, 'ro')
label_model = plt.plot(x_numpy, predicted, 'b')
plt.xlabel('X')
plt.ylabel('Y')
plt.legend(['data','model'])
plt.show()
print('plt.show')
print(f"\n x_numpy \n{ x_numpy }")
print(f"\n y_numpy \n{ y_numpy }")
print(f"\n predicted \n{ predicted }")
print('\n')
| true | true |
f73083901e3441f13ab867dc840908e3619b3a39 | 18,332 | py | Python | zerver/lib/markdown/fenced_code.py | narendrapsgim/zulip | e2df0d171f921d1e2b09d5de72088ffcc6a0f5f4 | [
"Apache-2.0"
] | 17,004 | 2015-09-25T18:27:24.000Z | 2022-03-31T22:02:32.000Z | zerver/lib/markdown/fenced_code.py | narendrapsgim/zulip | e2df0d171f921d1e2b09d5de72088ffcc6a0f5f4 | [
"Apache-2.0"
] | 20,344 | 2015-09-25T19:02:42.000Z | 2022-03-31T23:54:40.000Z | zerver/lib/markdown/fenced_code.py | narendrapsgim/zulip | e2df0d171f921d1e2b09d5de72088ffcc6a0f5f4 | [
"Apache-2.0"
] | 7,271 | 2015-09-25T18:48:39.000Z | 2022-03-31T21:06:11.000Z | """
Fenced Code Extension for Python Markdown
=========================================
This extension adds Fenced Code Blocks to Python-Markdown.
>>> import markdown
>>> text = '''
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> print html
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Works with safe_mode also (we check this because we are using the HtmlStash):
>>> print markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Include tilde's in a code block and wrap with blank lines:
>>> text = '''
... ~~~~~~~~
...
... ~~~~
... ~~~~~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code>
~~~~
</code></pre>
Removes trailing whitespace from code blocks that cause horizontal scrolling
>>> import markdown
>>> text = '''
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block \t\t\t\t\t\t\t
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> print html
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Language tags:
>>> text = '''
... ~~~~{.python}
... # Some python code
... ~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code class="python"># Some python code
</code></pre>
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://packages.python.org/Markdown/extensions/fenced_code_blocks.html>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.4+](http://python.org)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
* [Pygments (optional)](http://pygments.org)
"""
import re
from typing import Any, Callable, Dict, Iterable, List, Mapping, MutableSequence, Optional, Sequence
import lxml.html
from django.utils.html import escape
from markdown import Markdown
from markdown.extensions import Extension, codehilite
from markdown.extensions.codehilite import CodeHiliteExtension, parse_hl_lines
from markdown.preprocessors import Preprocessor
from pygments.lexers import find_lexer_class_by_name
from pygments.util import ClassNotFound
from zerver.lib.exceptions import MarkdownRenderingException
from zerver.lib.markdown.preprocessor_priorities import PREPROCESSOR_PRIORITES
from zerver.lib.tex import render_tex
# Global vars
FENCE_RE = re.compile(
r"""
# ~~~ or ```
(?P<fence>
^(?:~{3,}|`{3,})
)
[ ]* # spaces
(?:
# language, like ".py" or "{javascript}"
\{?\.?
(?P<lang>
[a-zA-Z0-9_+-./#]+
) # "py" or "javascript"
[ ]* # spaces
# header for features that use fenced block header syntax (like spoilers)
(?P<header>
[^ ~`][^~`]*
)?
\}?
)?
$
""",
re.VERBOSE,
)
CODE_WRAP = "<pre><code{}>{}\n</code></pre>"
LANG_TAG = ' class="{}"'
def validate_curl_content(lines: List[str]) -> None:
error_msg = """
Missing required -X argument in curl command:
{command}
""".strip()
for line in lines:
regex = r'curl [-](sS)?X "?(GET|DELETE|PATCH|POST)"?'
if line.startswith("curl"):
if re.search(regex, line) is None:
raise MarkdownRenderingException(error_msg.format(command=line.strip()))
CODE_VALIDATORS: Dict[Optional[str], Callable[[List[str]], None]] = {
"curl": validate_curl_content,
}
class FencedCodeExtension(Extension):
def __init__(self, config: Mapping[str, Any] = {}) -> None:
self.config = {
"run_content_validators": [
config.get("run_content_validators", False),
"Boolean specifying whether to run content validation code in CodeHandler",
],
}
for key, value in config.items():
self.setConfig(key, value)
def extendMarkdown(self, md: Markdown) -> None:
"""Add FencedBlockPreprocessor to the Markdown instance."""
md.registerExtension(self)
processor = FencedBlockPreprocessor(
md, run_content_validators=self.config["run_content_validators"][0]
)
md.preprocessors.register(
processor, "fenced_code_block", PREPROCESSOR_PRIORITES["fenced_code_block"]
)
class ZulipBaseHandler:
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: Optional[str] = None,
process_contents: bool = False,
) -> None:
self.processor = processor
self.output = output
self.fence = fence
self.process_contents = process_contents
self.lines: List[str] = []
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
self.lines.append(line.rstrip())
def done(self) -> None:
if self.lines:
text = "\n".join(self.lines)
text = self.format_text(text)
# For code blocks, the contents should not receive further
# processing. Whereas with quote and spoiler blocks, we
# explicitly want Markdown formatting of the content
# inside. This behavior is controlled by the
# process_contents configuration flag.
if not self.process_contents:
text = self.processor.placeholder(text)
processed_lines = text.split("\n")
self.output.append("")
self.output.extend(processed_lines)
self.output.append("")
self.processor.pop()
def format_text(self, text: str) -> str:
"""Returns a formatted text.
Subclasses should override this method.
"""
raise NotImplementedError()
def generic_handler(
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: str,
lang: Optional[str],
header: Optional[str],
run_content_validators: bool = False,
default_language: Optional[str] = None,
) -> ZulipBaseHandler:
if lang is not None:
lang = lang.lower()
if lang in ("quote", "quoted"):
return QuoteHandler(processor, output, fence, default_language)
elif lang == "math":
return TexHandler(processor, output, fence)
elif lang == "spoiler":
return SpoilerHandler(processor, output, fence, header)
else:
return CodeHandler(processor, output, fence, lang, run_content_validators)
def check_for_new_fence(
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
line: str,
run_content_validators: bool = False,
default_language: Optional[str] = None,
) -> None:
m = FENCE_RE.match(line)
if m:
fence = m.group("fence")
lang: Optional[str] = m.group("lang")
header: Optional[str] = m.group("header")
if not lang and default_language:
lang = default_language
handler = generic_handler(
processor, output, fence, lang, header, run_content_validators, default_language
)
processor.push(handler)
else:
output.append(line)
class OuterHandler(ZulipBaseHandler):
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
run_content_validators: bool = False,
default_language: Optional[str] = None,
) -> None:
self.run_content_validators = run_content_validators
self.default_language = default_language
super().__init__(processor, output)
def handle_line(self, line: str) -> None:
check_for_new_fence(
self.processor, self.output, line, self.run_content_validators, self.default_language
)
class CodeHandler(ZulipBaseHandler):
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: str,
lang: Optional[str],
run_content_validators: bool = False,
) -> None:
self.lang = lang
self.run_content_validators = run_content_validators
super().__init__(processor, output, fence)
def done(self) -> None:
# run content validators (if any)
if self.run_content_validators:
validator = CODE_VALIDATORS.get(self.lang, lambda text: None)
validator(self.lines)
super().done()
def format_text(self, text: str) -> str:
return self.processor.format_code(self.lang, text)
class QuoteHandler(ZulipBaseHandler):
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: str,
default_language: Optional[str] = None,
) -> None:
self.default_language = default_language
super().__init__(processor, output, fence, process_contents=True)
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
check_for_new_fence(
self.processor, self.lines, line, default_language=self.default_language
)
def format_text(self, text: str) -> str:
return self.processor.format_quote(text)
class SpoilerHandler(ZulipBaseHandler):
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: str,
spoiler_header: Optional[str],
) -> None:
self.spoiler_header = spoiler_header
super().__init__(processor, output, fence, process_contents=True)
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
check_for_new_fence(self.processor, self.lines, line)
def format_text(self, text: str) -> str:
return self.processor.format_spoiler(self.spoiler_header, text)
class TexHandler(ZulipBaseHandler):
def format_text(self, text: str) -> str:
return self.processor.format_tex(text)
class CodeHilite(codehilite.CodeHilite):
def _parseHeader(self) -> None:
# Python-Markdown has a feature to parse-and-hide shebang
# lines present in code blocks:
#
# https://python-markdown.github.io/extensions/code_hilite/#shebang-no-path
#
# While using shebang lines for language detection is
# reasonable, we don't want this feature because it can be
# really confusing when doing anything else in a one-line code
# block that starts with `!` (which would then render as an
# empty code block!). So we disable the feature, by
# overriding this function, which implements it in CodeHilite
# upstream.
# split text into lines
lines = self.src.split("\n")
# Python-Markdown pops out the first line which we are avoiding here.
# Examine first line
fl = lines[0]
c = re.compile(
r"""
(?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
(?P<lang>[\w#.+-]*) # The language
\s* # Arbitrary whitespace
# Optional highlight lines, single- or double-quote-delimited
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))?
""",
re.VERBOSE,
)
# Search first line for shebang
m = c.search(fl)
if m:
# We have a match
try:
self.lang = m.group("lang").lower()
except IndexError: # nocoverage
self.lang = None
if self.options["linenos"] is None and m.group("shebang"):
# Overridable and Shebang exists - use line numbers
self.options["linenos"] = True
self.options["hl_lines"] = parse_hl_lines(m.group("hl_lines"))
self.src = "\n".join(lines).strip("\n")
class FencedBlockPreprocessor(Preprocessor):
def __init__(self, md: Markdown, run_content_validators: bool = False) -> None:
super().__init__(md)
self.checked_for_codehilite = False
self.run_content_validators = run_content_validators
self.codehilite_conf: Mapping[str, Sequence[Any]] = {}
def push(self, handler: ZulipBaseHandler) -> None:
self.handlers.append(handler)
def pop(self) -> None:
self.handlers.pop()
def run(self, lines: Iterable[str]) -> List[str]:
"""Match and store Fenced Code Blocks in the HtmlStash."""
output: List[str] = []
processor = self
self.handlers: List[ZulipBaseHandler] = []
default_language = None
try:
default_language = self.md.zulip_realm.default_code_block_language
except AttributeError:
pass
handler = OuterHandler(processor, output, self.run_content_validators, default_language)
self.push(handler)
for line in lines:
self.handlers[-1].handle_line(line)
while self.handlers:
self.handlers[-1].done()
# This fiddly handling of new lines at the end of our output was done to make
# existing tests pass. Markdown is just kind of funny when it comes to new lines,
# but we could probably remove this hack.
if len(output) > 2 and output[-2] != "":
output.append("")
return output
def format_code(self, lang: Optional[str], text: str) -> str:
if lang:
langclass = LANG_TAG.format(lang)
else:
langclass = ""
# Check for code hilite extension
if not self.checked_for_codehilite:
for ext in self.md.registeredExtensions:
if isinstance(ext, CodeHiliteExtension):
self.codehilite_conf = ext.config
break
self.checked_for_codehilite = True
# If config is not empty, then the codehighlite extension
# is enabled, so we call it to highlite the code
if self.codehilite_conf:
highliter = CodeHilite(
text,
linenums=self.codehilite_conf["linenums"][0],
guess_lang=self.codehilite_conf["guess_lang"][0],
css_class=self.codehilite_conf["css_class"][0],
style=self.codehilite_conf["pygments_style"][0],
use_pygments=self.codehilite_conf["use_pygments"][0],
lang=(lang or None),
noclasses=self.codehilite_conf["noclasses"][0],
)
code = highliter.hilite().rstrip("\n")
else:
code = CODE_WRAP.format(langclass, self._escape(text))
# To support our "view in playground" feature, the frontend
# needs to know what Pygments language was used for
# highlighting this code block. We record this in a data
# attribute attached to the outer `pre` element.
# Unfortunately, the pygments API doesn't offer a way to add
# this, so we need to do it in a post-processing step.
if lang:
div_tag = lxml.html.fromstring(code)
# For the value of our data element, we get the lexer
# subclass name instead of directly using the language,
# since that canonicalizes aliases (Eg: `js` and
# `javascript` will be mapped to `JavaScript`).
try:
code_language = find_lexer_class_by_name(lang).name
except ClassNotFound:
# If there isn't a Pygments lexer by this name, we
# still tag it with the user's data-code-language
# value, since this allows hooking up a "playground"
# for custom "languages" that aren't known to Pygments.
code_language = lang
div_tag.attrib["data-code-language"] = code_language
code = lxml.html.tostring(div_tag, encoding="unicode")
return code
def format_quote(self, text: str) -> str:
paragraphs = text.split("\n")
quoted_paragraphs = []
for paragraph in paragraphs:
lines = paragraph.split("\n")
quoted_paragraphs.append("\n".join("> " + line for line in lines))
return "\n".join(quoted_paragraphs)
def format_spoiler(self, header: Optional[str], text: str) -> str:
output = []
header_div_open_html = '<div class="spoiler-block"><div class="spoiler-header">'
end_header_start_content_html = '</div><div class="spoiler-content" aria-hidden="true">'
footer_html = "</div></div>"
output.append(self.placeholder(header_div_open_html))
if header is not None:
output.append(header)
output.append(self.placeholder(end_header_start_content_html))
output.append(text)
output.append(self.placeholder(footer_html))
return "\n\n".join(output)
def format_tex(self, text: str) -> str:
paragraphs = text.split("\n\n")
tex_paragraphs = []
for paragraph in paragraphs:
html = render_tex(paragraph, is_inline=False)
if html is not None:
tex_paragraphs.append(html)
else:
tex_paragraphs.append('<span class="tex-error">' + escape(paragraph) + "</span>")
return "\n\n".join(tex_paragraphs)
def placeholder(self, code: str) -> str:
return self.md.htmlStash.store(code)
def _escape(self, txt: str) -> str:
"""basic html escaping"""
txt = txt.replace("&", "&")
txt = txt.replace("<", "<")
txt = txt.replace(">", ">")
txt = txt.replace('"', """)
return txt
def makeExtension(*args: Any, **kwargs: None) -> FencedCodeExtension:
return FencedCodeExtension(kwargs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32.971223 | 100 | 0.603971 | import re
from typing import Any, Callable, Dict, Iterable, List, Mapping, MutableSequence, Optional, Sequence
import lxml.html
from django.utils.html import escape
from markdown import Markdown
from markdown.extensions import Extension, codehilite
from markdown.extensions.codehilite import CodeHiliteExtension, parse_hl_lines
from markdown.preprocessors import Preprocessor
from pygments.lexers import find_lexer_class_by_name
from pygments.util import ClassNotFound
from zerver.lib.exceptions import MarkdownRenderingException
from zerver.lib.markdown.preprocessor_priorities import PREPROCESSOR_PRIORITES
from zerver.lib.tex import render_tex
FENCE_RE = re.compile(
r"""
# ~~~ or ```
(?P<fence>
^(?:~{3,}|`{3,})
)
[ ]* # spaces
(?:
# language, like ".py" or "{javascript}"
\{?\.?
(?P<lang>
[a-zA-Z0-9_+-./#]+
) # "py" or "javascript"
[ ]* # spaces
# header for features that use fenced block header syntax (like spoilers)
(?P<header>
[^ ~`][^~`]*
)?
\}?
)?
$
""",
re.VERBOSE,
)
CODE_WRAP = "<pre><code{}>{}\n</code></pre>"
LANG_TAG = ' class="{}"'
def validate_curl_content(lines: List[str]) -> None:
error_msg = """
Missing required -X argument in curl command:
{command}
""".strip()
for line in lines:
regex = r'curl [-](sS)?X "?(GET|DELETE|PATCH|POST)"?'
if line.startswith("curl"):
if re.search(regex, line) is None:
raise MarkdownRenderingException(error_msg.format(command=line.strip()))
CODE_VALIDATORS: Dict[Optional[str], Callable[[List[str]], None]] = {
"curl": validate_curl_content,
}
class FencedCodeExtension(Extension):
def __init__(self, config: Mapping[str, Any] = {}) -> None:
self.config = {
"run_content_validators": [
config.get("run_content_validators", False),
"Boolean specifying whether to run content validation code in CodeHandler",
],
}
for key, value in config.items():
self.setConfig(key, value)
def extendMarkdown(self, md: Markdown) -> None:
md.registerExtension(self)
processor = FencedBlockPreprocessor(
md, run_content_validators=self.config["run_content_validators"][0]
)
md.preprocessors.register(
processor, "fenced_code_block", PREPROCESSOR_PRIORITES["fenced_code_block"]
)
class ZulipBaseHandler:
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: Optional[str] = None,
process_contents: bool = False,
) -> None:
self.processor = processor
self.output = output
self.fence = fence
self.process_contents = process_contents
self.lines: List[str] = []
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
self.lines.append(line.rstrip())
def done(self) -> None:
if self.lines:
text = "\n".join(self.lines)
text = self.format_text(text)
if not self.process_contents:
text = self.processor.placeholder(text)
processed_lines = text.split("\n")
self.output.append("")
self.output.extend(processed_lines)
self.output.append("")
self.processor.pop()
def format_text(self, text: str) -> str:
raise NotImplementedError()
def generic_handler(
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: str,
lang: Optional[str],
header: Optional[str],
run_content_validators: bool = False,
default_language: Optional[str] = None,
) -> ZulipBaseHandler:
if lang is not None:
lang = lang.lower()
if lang in ("quote", "quoted"):
return QuoteHandler(processor, output, fence, default_language)
elif lang == "math":
return TexHandler(processor, output, fence)
elif lang == "spoiler":
return SpoilerHandler(processor, output, fence, header)
else:
return CodeHandler(processor, output, fence, lang, run_content_validators)
def check_for_new_fence(
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
line: str,
run_content_validators: bool = False,
default_language: Optional[str] = None,
) -> None:
m = FENCE_RE.match(line)
if m:
fence = m.group("fence")
lang: Optional[str] = m.group("lang")
header: Optional[str] = m.group("header")
if not lang and default_language:
lang = default_language
handler = generic_handler(
processor, output, fence, lang, header, run_content_validators, default_language
)
processor.push(handler)
else:
output.append(line)
class OuterHandler(ZulipBaseHandler):
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
run_content_validators: bool = False,
default_language: Optional[str] = None,
) -> None:
self.run_content_validators = run_content_validators
self.default_language = default_language
super().__init__(processor, output)
def handle_line(self, line: str) -> None:
check_for_new_fence(
self.processor, self.output, line, self.run_content_validators, self.default_language
)
class CodeHandler(ZulipBaseHandler):
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: str,
lang: Optional[str],
run_content_validators: bool = False,
) -> None:
self.lang = lang
self.run_content_validators = run_content_validators
super().__init__(processor, output, fence)
def done(self) -> None:
if self.run_content_validators:
validator = CODE_VALIDATORS.get(self.lang, lambda text: None)
validator(self.lines)
super().done()
def format_text(self, text: str) -> str:
return self.processor.format_code(self.lang, text)
class QuoteHandler(ZulipBaseHandler):
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: str,
default_language: Optional[str] = None,
) -> None:
self.default_language = default_language
super().__init__(processor, output, fence, process_contents=True)
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
check_for_new_fence(
self.processor, self.lines, line, default_language=self.default_language
)
def format_text(self, text: str) -> str:
return self.processor.format_quote(text)
class SpoilerHandler(ZulipBaseHandler):
def __init__(
self,
processor: "FencedBlockPreprocessor",
output: MutableSequence[str],
fence: str,
spoiler_header: Optional[str],
) -> None:
self.spoiler_header = spoiler_header
super().__init__(processor, output, fence, process_contents=True)
def handle_line(self, line: str) -> None:
if line.rstrip() == self.fence:
self.done()
else:
check_for_new_fence(self.processor, self.lines, line)
def format_text(self, text: str) -> str:
return self.processor.format_spoiler(self.spoiler_header, text)
class TexHandler(ZulipBaseHandler):
def format_text(self, text: str) -> str:
return self.processor.format_tex(text)
class CodeHilite(codehilite.CodeHilite):
def _parseHeader(self) -> None:
# really confusing when doing anything else in a one-line code
# block that starts with `!` (which would then render as an
# empty code block!). So we disable the feature, by
# overriding this function, which implements it in CodeHilite
# upstream.
# split text into lines
lines = self.src.split("\n")
# Python-Markdown pops out the first line which we are avoiding here.
# Examine first line
fl = lines[0]
c = re.compile(
r"""
(?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
(?P<lang>[\w#.+-]*) # The language
\s* # Arbitrary whitespace
# Optional highlight lines, single- or double-quote-delimited
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))?
""",
re.VERBOSE,
)
# Search first line for shebang
m = c.search(fl)
if m:
# We have a match
try:
self.lang = m.group("lang").lower()
except IndexError: # nocoverage
self.lang = None
if self.options["linenos"] is None and m.group("shebang"):
# Overridable and Shebang exists - use line numbers
self.options["linenos"] = True
self.options["hl_lines"] = parse_hl_lines(m.group("hl_lines"))
self.src = "\n".join(lines).strip("\n")
class FencedBlockPreprocessor(Preprocessor):
def __init__(self, md: Markdown, run_content_validators: bool = False) -> None:
super().__init__(md)
self.checked_for_codehilite = False
self.run_content_validators = run_content_validators
self.codehilite_conf: Mapping[str, Sequence[Any]] = {}
def push(self, handler: ZulipBaseHandler) -> None:
self.handlers.append(handler)
def pop(self) -> None:
self.handlers.pop()
def run(self, lines: Iterable[str]) -> List[str]:
output: List[str] = []
processor = self
self.handlers: List[ZulipBaseHandler] = []
default_language = None
try:
default_language = self.md.zulip_realm.default_code_block_language
except AttributeError:
pass
handler = OuterHandler(processor, output, self.run_content_validators, default_language)
self.push(handler)
for line in lines:
self.handlers[-1].handle_line(line)
while self.handlers:
self.handlers[-1].done()
# This fiddly handling of new lines at the end of our output was done to make
# existing tests pass. Markdown is just kind of funny when it comes to new lines,
# but we could probably remove this hack.
if len(output) > 2 and output[-2] != "":
output.append("")
return output
def format_code(self, lang: Optional[str], text: str) -> str:
if lang:
langclass = LANG_TAG.format(lang)
else:
langclass = ""
# Check for code hilite extension
if not self.checked_for_codehilite:
for ext in self.md.registeredExtensions:
if isinstance(ext, CodeHiliteExtension):
self.codehilite_conf = ext.config
break
self.checked_for_codehilite = True
# If config is not empty, then the codehighlite extension
# is enabled, so we call it to highlite the code
if self.codehilite_conf:
highliter = CodeHilite(
text,
linenums=self.codehilite_conf["linenums"][0],
guess_lang=self.codehilite_conf["guess_lang"][0],
css_class=self.codehilite_conf["css_class"][0],
style=self.codehilite_conf["pygments_style"][0],
use_pygments=self.codehilite_conf["use_pygments"][0],
lang=(lang or None),
noclasses=self.codehilite_conf["noclasses"][0],
)
code = highliter.hilite().rstrip("\n")
else:
code = CODE_WRAP.format(langclass, self._escape(text))
# To support our "view in playground" feature, the frontend
# needs to know what Pygments language was used for
# highlighting this code block. We record this in a data
# attribute attached to the outer `pre` element.
# Unfortunately, the pygments API doesn't offer a way to add
# this, so we need to do it in a post-processing step.
if lang:
div_tag = lxml.html.fromstring(code)
# For the value of our data element, we get the lexer
# subclass name instead of directly using the language,
# since that canonicalizes aliases (Eg: `js` and
# `javascript` will be mapped to `JavaScript`).
try:
code_language = find_lexer_class_by_name(lang).name
except ClassNotFound:
# If there isn't a Pygments lexer by this name, we
# still tag it with the user's data-code-language
# value, since this allows hooking up a "playground"
# for custom "languages" that aren't known to Pygments.
code_language = lang
div_tag.attrib["data-code-language"] = code_language
code = lxml.html.tostring(div_tag, encoding="unicode")
return code
def format_quote(self, text: str) -> str:
paragraphs = text.split("\n")
quoted_paragraphs = []
for paragraph in paragraphs:
lines = paragraph.split("\n")
quoted_paragraphs.append("\n".join("> " + line for line in lines))
return "\n".join(quoted_paragraphs)
def format_spoiler(self, header: Optional[str], text: str) -> str:
output = []
header_div_open_html = '<div class="spoiler-block"><div class="spoiler-header">'
end_header_start_content_html = '</div><div class="spoiler-content" aria-hidden="true">'
footer_html = "</div></div>"
output.append(self.placeholder(header_div_open_html))
if header is not None:
output.append(header)
output.append(self.placeholder(end_header_start_content_html))
output.append(text)
output.append(self.placeholder(footer_html))
return "\n\n".join(output)
def format_tex(self, text: str) -> str:
paragraphs = text.split("\n\n")
tex_paragraphs = []
for paragraph in paragraphs:
html = render_tex(paragraph, is_inline=False)
if html is not None:
tex_paragraphs.append(html)
else:
tex_paragraphs.append('<span class="tex-error">' + escape(paragraph) + "</span>")
return "\n\n".join(tex_paragraphs)
def placeholder(self, code: str) -> str:
return self.md.htmlStash.store(code)
def _escape(self, txt: str) -> str:
txt = txt.replace("&", "&")
txt = txt.replace("<", "<")
txt = txt.replace(">", ">")
txt = txt.replace('"', """)
return txt
def makeExtension(*args: Any, **kwargs: None) -> FencedCodeExtension:
return FencedCodeExtension(kwargs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| true | true |
f73083f83f4bca4deac6272da174e16b8fa14446 | 3,149 | py | Python | heat/tests/clients/test_glance_client.py | ISCAS-VDI/heat-base | ca8390434edfd8396c7e46651e1e31ff488b2307 | [
"Apache-2.0"
] | 1 | 2015-12-18T21:46:55.000Z | 2015-12-18T21:46:55.000Z | heat/tests/clients/test_glance_client.py | ISCAS-VDI/heat-base | ca8390434edfd8396c7e46651e1e31ff488b2307 | [
"Apache-2.0"
] | null | null | null | heat/tests/clients/test_glance_client.py | ISCAS-VDI/heat-base | ca8390434edfd8396c7e46651e1e31ff488b2307 | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from glanceclient import exc
from glanceclient.openstack.common.apiclient import exceptions
import mock
from heat.engine.clients.os import glance
from heat.tests import common
from heat.tests import utils
class GlanceUtilsTest(common.HeatTestCase):
"""Basic tests for :module:'heat.engine.resources.clients.os.glance'."""
def setUp(self):
super(GlanceUtilsTest, self).setUp()
self.glance_client = mock.MagicMock()
con = utils.dummy_context()
c = con.clients
self.glance_plugin = c.client_plugin('glance')
self.glance_plugin.client = lambda: self.glance_client
self.my_image = mock.MagicMock()
def test_find_image_by_name_or_id(self):
"""Tests the find_image_by_name_or_id function."""
img_id = str(uuid.uuid4())
img_name = 'myfakeimage'
self.my_image.id = img_id
self.my_image.name = img_name
self.glance_client.images.get.side_effect = [
self.my_image,
exc.HTTPNotFound(),
exc.HTTPNotFound(),
exc.HTTPNotFound()]
self.glance_client.images.list.side_effect = [
[self.my_image],
[],
[self.my_image, self.my_image]]
self.assertEqual(img_id,
self.glance_plugin.find_image_by_name_or_id(img_id))
self.assertEqual(img_id,
self.glance_plugin.find_image_by_name_or_id(img_name))
self.assertRaises(exceptions.NotFound,
self.glance_plugin.find_image_by_name_or_id,
'noimage')
self.assertRaises(exceptions.NoUniqueMatch,
self.glance_plugin.find_image_by_name_or_id,
'myfakeimage')
class ImageConstraintTest(common.HeatTestCase):
def setUp(self):
super(ImageConstraintTest, self).setUp()
self.ctx = utils.dummy_context()
self.mock_find_image = mock.Mock()
self.ctx.clients.client_plugin(
'glance').find_image_by_name_or_id = self.mock_find_image
self.constraint = glance.ImageConstraint()
def test_validation(self):
self.mock_find_image.side_effect = ["id1",
exceptions.NotFound(),
exceptions.NoUniqueMatch()]
self.assertTrue(self.constraint.validate("foo", self.ctx))
self.assertFalse(self.constraint.validate("bar", self.ctx))
self.assertFalse(self.constraint.validate("baz", self.ctx))
| 38.876543 | 79 | 0.643061 |
import uuid
from glanceclient import exc
from glanceclient.openstack.common.apiclient import exceptions
import mock
from heat.engine.clients.os import glance
from heat.tests import common
from heat.tests import utils
class GlanceUtilsTest(common.HeatTestCase):
def setUp(self):
super(GlanceUtilsTest, self).setUp()
self.glance_client = mock.MagicMock()
con = utils.dummy_context()
c = con.clients
self.glance_plugin = c.client_plugin('glance')
self.glance_plugin.client = lambda: self.glance_client
self.my_image = mock.MagicMock()
def test_find_image_by_name_or_id(self):
img_id = str(uuid.uuid4())
img_name = 'myfakeimage'
self.my_image.id = img_id
self.my_image.name = img_name
self.glance_client.images.get.side_effect = [
self.my_image,
exc.HTTPNotFound(),
exc.HTTPNotFound(),
exc.HTTPNotFound()]
self.glance_client.images.list.side_effect = [
[self.my_image],
[],
[self.my_image, self.my_image]]
self.assertEqual(img_id,
self.glance_plugin.find_image_by_name_or_id(img_id))
self.assertEqual(img_id,
self.glance_plugin.find_image_by_name_or_id(img_name))
self.assertRaises(exceptions.NotFound,
self.glance_plugin.find_image_by_name_or_id,
'noimage')
self.assertRaises(exceptions.NoUniqueMatch,
self.glance_plugin.find_image_by_name_or_id,
'myfakeimage')
class ImageConstraintTest(common.HeatTestCase):
def setUp(self):
super(ImageConstraintTest, self).setUp()
self.ctx = utils.dummy_context()
self.mock_find_image = mock.Mock()
self.ctx.clients.client_plugin(
'glance').find_image_by_name_or_id = self.mock_find_image
self.constraint = glance.ImageConstraint()
def test_validation(self):
self.mock_find_image.side_effect = ["id1",
exceptions.NotFound(),
exceptions.NoUniqueMatch()]
self.assertTrue(self.constraint.validate("foo", self.ctx))
self.assertFalse(self.constraint.validate("bar", self.ctx))
self.assertFalse(self.constraint.validate("baz", self.ctx))
| true | true |
f730842547db0fa4ac95c76cc51c2fc28fb8e9b8 | 7,385 | py | Python | imcsdk/mometa/export/ExportLdapCACertificate.py | vadimkuznetsov/imcsdk | ed038ce1dbc8031f99d2dfb3ccee3bf0b48309d8 | [
"Apache-2.0"
] | null | null | null | imcsdk/mometa/export/ExportLdapCACertificate.py | vadimkuznetsov/imcsdk | ed038ce1dbc8031f99d2dfb3ccee3bf0b48309d8 | [
"Apache-2.0"
] | null | null | null | imcsdk/mometa/export/ExportLdapCACertificate.py | vadimkuznetsov/imcsdk | ed038ce1dbc8031f99d2dfb3ccee3bf0b48309d8 | [
"Apache-2.0"
] | 1 | 2019-11-10T18:42:04.000Z | 2019-11-10T18:42:04.000Z | """This module contains the general information for ExportLdapCACertificate ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class ExportLdapCACertificateConsts:
PROTOCOL_FTP = "ftp"
PROTOCOL_HTTP = "http"
PROTOCOL_NONE = "none"
PROTOCOL_SCP = "scp"
PROTOCOL_SFTP = "sftp"
PROTOCOL_TFTP = "tftp"
class ExportLdapCACertificate(ManagedObject):
"""This is ExportLdapCACertificate class."""
consts = ExportLdapCACertificateConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("ExportLdapCACertificate", "exportLdapCACertificate", "ldap-ca-cert-export", VersionMeta.Version2013e, "InputOutput", 0x1ff, [], ["admin", "read-only", "user"], [u'ldapCACertificateManagement'], [], ["Get"]),
"modular": MoMeta("ExportLdapCACertificate", "exportLdapCACertificate", "ldap-ca-cert-export", VersionMeta.Version2013e, "InputOutput", 0x1ff, [], ["admin", "read-only", "user"], [u'ldapCACertificateManagement'], [], ["Get"])
}
prop_meta = {
"classic": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"export_progress": MoPropertyMeta("export_progress", "exportProgress", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"export_status": MoPropertyMeta("export_status", "exportStatus", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"protocol": MoPropertyMeta("protocol", "protocol", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["ftp", "http", "none", "scp", "sftp", "tftp"], []),
"pwd": MoPropertyMeta("pwd", "pwd", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"remote_file": MoPropertyMeta("remote_file", "remoteFile", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""[^\(\)~`'\?\\"";<>\|&\*\^$%]{0,255}""", [], []),
"remote_server": MoPropertyMeta("remote_server", "remoteServer", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, r"""(([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:) |((([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6})|(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)+)|([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]))""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"user": MoPropertyMeta("user", "user", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x100, 0, 255, None, [], []),
},
"modular": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"export_progress": MoPropertyMeta("export_progress", "exportProgress", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"export_status": MoPropertyMeta("export_status", "exportStatus", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"protocol": MoPropertyMeta("protocol", "protocol", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["ftp", "http", "none", "scp", "sftp", "tftp"], []),
"pwd": MoPropertyMeta("pwd", "pwd", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"remote_file": MoPropertyMeta("remote_file", "remoteFile", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""[^\(\)~`'\?\\"";<>\|&\*\^$%]{0,255}""", [], []),
"remote_server": MoPropertyMeta("remote_server", "remoteServer", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, r"""([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"user": MoPropertyMeta("user", "user", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x100, 0, 255, None, [], []),
},
}
prop_map = {
"classic": {
"childAction": "child_action",
"dn": "dn",
"exportProgress": "export_progress",
"exportStatus": "export_status",
"protocol": "protocol",
"pwd": "pwd",
"remoteFile": "remote_file",
"remoteServer": "remote_server",
"rn": "rn",
"status": "status",
"user": "user",
},
"modular": {
"childAction": "child_action",
"dn": "dn",
"exportProgress": "export_progress",
"exportStatus": "export_status",
"protocol": "protocol",
"pwd": "pwd",
"remoteFile": "remote_file",
"remoteServer": "remote_server",
"rn": "rn",
"status": "status",
"user": "user",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.export_progress = None
self.export_status = None
self.protocol = None
self.pwd = None
self.remote_file = None
self.remote_server = None
self.status = None
self.user = None
ManagedObject.__init__(self, "ExportLdapCACertificate", parent_mo_or_dn, **kwargs)
| 69.018692 | 907 | 0.579282 |
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class ExportLdapCACertificateConsts:
PROTOCOL_FTP = "ftp"
PROTOCOL_HTTP = "http"
PROTOCOL_NONE = "none"
PROTOCOL_SCP = "scp"
PROTOCOL_SFTP = "sftp"
PROTOCOL_TFTP = "tftp"
class ExportLdapCACertificate(ManagedObject):
consts = ExportLdapCACertificateConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("ExportLdapCACertificate", "exportLdapCACertificate", "ldap-ca-cert-export", VersionMeta.Version2013e, "InputOutput", 0x1ff, [], ["admin", "read-only", "user"], [u'ldapCACertificateManagement'], [], ["Get"]),
"modular": MoMeta("ExportLdapCACertificate", "exportLdapCACertificate", "ldap-ca-cert-export", VersionMeta.Version2013e, "InputOutput", 0x1ff, [], ["admin", "read-only", "user"], [u'ldapCACertificateManagement'], [], ["Get"])
}
prop_meta = {
"classic": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"export_progress": MoPropertyMeta("export_progress", "exportProgress", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"export_status": MoPropertyMeta("export_status", "exportStatus", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"protocol": MoPropertyMeta("protocol", "protocol", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["ftp", "http", "none", "scp", "sftp", "tftp"], []),
"pwd": MoPropertyMeta("pwd", "pwd", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"remote_file": MoPropertyMeta("remote_file", "remoteFile", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""[^\(\)~`'\?\\"";<>\|&\*\^$%]{0,255}""", [], []),
"remote_server": MoPropertyMeta("remote_server", "remoteServer", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, r"""(([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:) |((([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6})|(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)+)|([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]))""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"user": MoPropertyMeta("user", "user", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x100, 0, 255, None, [], []),
},
"modular": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"export_progress": MoPropertyMeta("export_progress", "exportProgress", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"export_status": MoPropertyMeta("export_status", "exportStatus", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"protocol": MoPropertyMeta("protocol", "protocol", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["ftp", "http", "none", "scp", "sftp", "tftp"], []),
"pwd": MoPropertyMeta("pwd", "pwd", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"remote_file": MoPropertyMeta("remote_file", "remoteFile", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""[^\(\)~`'\?\\"";<>\|&\*\^$%]{0,255}""", [], []),
"remote_server": MoPropertyMeta("remote_server", "remoteServer", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, r"""([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"user": MoPropertyMeta("user", "user", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x100, 0, 255, None, [], []),
},
}
prop_map = {
"classic": {
"childAction": "child_action",
"dn": "dn",
"exportProgress": "export_progress",
"exportStatus": "export_status",
"protocol": "protocol",
"pwd": "pwd",
"remoteFile": "remote_file",
"remoteServer": "remote_server",
"rn": "rn",
"status": "status",
"user": "user",
},
"modular": {
"childAction": "child_action",
"dn": "dn",
"exportProgress": "export_progress",
"exportStatus": "export_status",
"protocol": "protocol",
"pwd": "pwd",
"remoteFile": "remote_file",
"remoteServer": "remote_server",
"rn": "rn",
"status": "status",
"user": "user",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.export_progress = None
self.export_status = None
self.protocol = None
self.pwd = None
self.remote_file = None
self.remote_server = None
self.status = None
self.user = None
ManagedObject.__init__(self, "ExportLdapCACertificate", parent_mo_or_dn, **kwargs)
| true | true |
f730844764dcd79f2f76188b5b0b2bdde3759d30 | 30,151 | py | Python | eta/core/labels.py | MagicCodess/eta | 4599292a4de1f5f477e159787e2c2127d9fbde1b | [
"Apache-2.0"
] | null | null | null | eta/core/labels.py | MagicCodess/eta | 4599292a4de1f5f477e159787e2c2127d9fbde1b | [
"Apache-2.0"
] | null | null | null | eta/core/labels.py | MagicCodess/eta | 4599292a4de1f5f477e159787e2c2127d9fbde1b | [
"Apache-2.0"
] | null | null | null | """
Core data structures for working with labels.
Copyright 2017-2022, Voxel51, Inc.
voxel51.com
"""
# pragma pylint: disable=redefined-builtin
# pragma pylint: disable=unused-wildcard-import
# pragma pylint: disable=wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from future.utils import iteritems
# pragma pylint: enable=redefined-builtin
# pragma pylint: enable=unused-wildcard-import
# pragma pylint: enable=wildcard-import
from collections import defaultdict
import logging
import eta.core.serial as etas
import eta.core.utils as etau
logger = logging.getLogger(__name__)
class Labels(etas.Serializable):
"""Base class for `eta.core.serial.Serializable` classes that hold labels
representing attributes, objects, frames, events, images, videos, etc.
Labels classes have associated Schema classes that describe the
ontologies over the labels class.
"""
def __bool__(self):
"""Whether this instance has labels of any kind."""
return not self.is_empty
@property
def is_empty(self):
"""Whether this instance has no labels of any kind."""
raise NotImplementedError("subclasses must implement is_empty")
@classmethod
def get_schema_cls(cls):
"""Gets the LabelsSchema class for the labels.
Subclasses can override this method, but, by default, this
implementation assumes the convention that labels class `<Labels>` has
associated schema class `<Labels>Schema` defined in the same module.
Returns:
the LabelsSchema class
"""
class_name = etau.get_class_name(cls)
return etau.get_class(class_name + "Schema")
def get_active_schema(self):
"""Returns a LabelsSchema that describes the active schema of the
labels.
Returns:
a LabelsSchema
"""
schema_cls = self.get_schema_cls()
return schema_cls.build_active_schema(self)
def filter_by_schema(self, schema):
"""Filters the labels by the given schema.
Args:
schema: a LabelsSchema
"""
raise NotImplementedError(
"subclasses must implement `filter_by_schema()`"
)
class LabelsSchema(etas.Serializable):
"""Base class for schemas of Labels classes."""
def __bool__(self):
"""Whether this schema has labels of any kind."""
return not self.is_empty
@property
def is_empty(self):
"""Whether this schema has no labels of any kind."""
raise NotImplementedError("subclasses must implement is_empty")
def add(self, labels):
"""Incorporates the Labels into the schema.
Args:
label: a Labels instance
"""
labels_schema = self.build_active_schema(labels)
self.merge_schema(labels_schema)
def add_iterable(self, iterable):
"""Incorporates the given iterable of Labels into the schema.
Args:
iterable: an iterable of Labels
"""
for labels in iterable:
self.add(labels)
def validate(self, labels):
"""Validates that the Labels are compliant with the schema.
Args:
labels: a Labels instance
Raises:
LabelsSchemaError: if the labels violate the schema
"""
raise NotImplementedError("subclasses must implement `validate()`")
def validate_subset_of_schema(self, schema):
"""Validates that this schema is a subset of the given LabelsSchema.
Args:
schema: a LabelsSchema
Raises:
LabelsSchemaError: if this schema is not a subset of the given
schema
"""
raise NotImplementedError(
"subclasses must implement `validate_subset_of_schema()`"
)
def validate_schema_type(self, schema):
"""Validates that this schema is an instance of same type as the given
schema.
Args:
schema: a LabelsSchema
Raises:
LabelsSchemaError: if this schema is not of the same type as the
given schema
"""
if not isinstance(self, type(schema)):
raise LabelsSchemaError(
"Expected `self` to match schema type %s; found %s"
% (type(self), type(schema))
)
def is_valid(self, labels):
"""Whether the Labels are compliant with the schema.
Args:
labels: a Labels instance
Returns:
True/False
"""
try:
self.validate(labels)
return True
except LabelsSchemaError:
return False
def is_subset_of_schema(self, schema):
"""Whether this schema is a subset of the given schema.
Args:
schema: a LabelsSchema
Returns:
True/False
"""
try:
self.validate_subset_of_schema(schema)
return True
except LabelsSchemaError:
return False
@classmethod
def build_active_schema(cls, labels):
"""Builds a LabelsSchema that describes the active schema of the
labels.
Args:
labels: a Labels instance
Returns:
a LabelsSchema
"""
raise NotImplementedError(
"subclasses must implement `build_active_schema()`"
)
def merge_schema(self, schema):
"""Merges the given LabelsSchema into this schema.
Args:
schema: a LabelsSchema
"""
raise NotImplementedError("subclasses must implement `merge_schema()`")
class LabelsSchemaError(Exception):
"""Error raisesd when a LabelsSchema is violated."""
pass
class HasLabelsSchema(object):
"""Mixin for Label classes that can optionally store and enforce
`LabelsSchema`s on their labels.
For efficiency, schemas are not automatically enforced when new labels are
added to HasLabelsSchema instances. Rather, users must manually call
`validate_schema()` when they would like to validate the schema.
Attributes:
schema: the enforced LabelsSchema, or None
"""
def __init__(self, schema=None):
"""Initializes the HasLabelsSchema mixin.
Args:
schema: (optional) a LabelsSchema to enforce on the labels. By
default, no schema is enforced
"""
self.schema = schema
@property
def has_schema(self):
"""Whether the labels have an enforced schema."""
return self.schema is not None
def get_schema(self):
"""Gets the current enforced schema for the labels, or None if no
schema is enforced.
Returns:
a LabelsSchema, or None
"""
return self.schema
def set_schema(self, schema, filter_by_schema=False, validate=False):
"""Sets the enforced schema to the given LabelsSchema.
Args:
schema: a LabelsSchema to assign
filter_by_schema: whether to filter labels that are not compliant
with the schema. By default, this is False
validate: whether to validate that the labels (after filtering, if
applicable) are compliant with the new schema. By default, this
is False
Raises:
LabelsSchemaError: if `validate` was `True` and this object
contains labels that are not compliant with the schema
"""
self.schema = schema
if not self.has_schema:
return
if filter_by_schema:
self.filter_by_schema(self.schema) # pylint: disable=no-member
if validate:
self.validate_schema()
def validate_schema(self):
"""Validates that the labels are compliant with the current schema.
Raises:
LabelsSchemaError: if this object contains labels that are not
compliant with the schema
"""
if self.has_schema:
self.schema.validate(self)
def freeze_schema(self):
"""Sets the schema for the labels to the current active schema."""
self.set_schema(self.get_active_schema()) # pylint: disable=no-member
def remove_schema(self):
"""Removes the enforced schema from the labels."""
self.set_schema(None)
class HasLabelsSupport(object):
"""Mixin for Label classes that describe videos and can keep track of
their own support, i.e., the frames for which they contain labels.
The support is represented via a `eta.core.frameutils.FrameRanges`
instance.
For efficiency, supports should not be automatically updated when new
labels are added to HasLabelsSupport instances. Rather, the support is
dynamically computed when the `support` property is accessed.
Alternatively, the current support can be frozen via `freeze_support()`
to avoid recomputing it each time `support` is called.
"""
def __init__(self, support=None):
"""Initializes the HasLabelsSupport mixin.
Args:
support: (optional) a FrameRanges instance describing the frozen
support of the labels. By default, the support is not frozen
"""
self._support = support
@property
def support(self):
"""A FrameRanges instance describing the frames for which this instance
contains labels.
If this instance has a frozen support, it is returned. Otherwise, the
support is dynamically computed via `_compute_support()`.
"""
if self.is_support_frozen:
return self._support
return self._compute_support()
@property
def is_support_frozen(self):
"""Whether the support is currently frozen."""
return self._support is not None
def set_support(self, support):
"""Sets the support to the given value.
This action freezes the support for this instance.
Args:
support: a FrameRanges
"""
self._support = support
def merge_support(self, support):
"""Merges the given support into the current support.
This action freezes the support for this instance.
Args:
support: a FrameRanges
"""
new_support = self.support.merge(support)
self.set_support(new_support)
def freeze_support(self):
"""Freezes the support to the current `support`.
This optional optimization is useful to avoid recalculating the support
of the labels each time `support` is called.
"""
if not self.is_support_frozen:
self._support = self._compute_support()
def clear_support(self):
"""Clears the frozen support, if necessary."""
self._support = None
def _compute_support(self):
"""Computes the current support of the labels in this instance.
Returns:
a FrameRanges
"""
raise NotImplementedError(
"subclasses must implement _compute_support()"
)
class HasFramewiseView(object):
"""Mixin for Label classes that describe videos and can be rendered in
a framewise view by a LabelsFrameRenderer.
"""
@property
def framewise_renderer_cls(self):
"""The LabelsFrameRenderer used by this class."""
raise NotImplementedError(
"subclasses must implement framewise_renderer_cls()"
)
def render_framewise(self, in_place=False):
"""Renders a framewise version of the labels.
Args:
in_place: whether to perform the rendering in-place. By default,
this is False
Returns:
a framewise version of the labels
"""
renderer = self.framewise_renderer_cls(self)
return renderer.render(in_place=in_place)
class HasSpatiotemporalView(object):
"""Mixin for Label classes that describe videos and can be rendered in a
spatiotemporal view by a LabelsSpatiotemporalRenderer.
"""
@property
def spatiotemporal_renderer_cls(self):
"""The LabelsSpatiotemporalRenderer used by this class."""
raise NotImplementedError(
"subclasses must implement spatiotemporal_renderer_cls()"
)
def render_spatiotemporal(self, in_place=False):
"""Renders a spatiotemporal version of the labels.
Args:
in_place: whether to perform the rendering in-place. By default,
this is False
Returns:
a spatiotemporal version of the labels
"""
renderer = self.spatiotemporal_renderer_cls(self)
return renderer.render(in_place=in_place)
class LabelsContainer(Labels, HasLabelsSchema, etas.Container):
"""Base class for `eta.core.serial.Container`s of Labels.
`LabelsContainer`s can optionally store a LabelsContainerSchema instance
that governs the schema of the labels in the container.
"""
def __init__(self, schema=None, **kwargs):
"""Creates a LabelsContainer instance.
Args:
schema: an optional LabelsContainerSchema to enforce on the labels
in this container. By default, no schema is enforced
**kwargs: valid keyword arguments for `eta.core.serial.Container()`
Raises:
LabelsSchemaError: if a schema was provided but the labels added to
the container violate it
"""
HasLabelsSchema.__init__(self, schema=schema)
etas.Container.__init__(self, **kwargs)
def __bool__(self):
return etas.Container.__bool__(self)
@property
def is_empty(self):
"""Whether this container has no labels."""
return etas.Container.is_empty(self)
def remove_empty_labels(self):
"""Removes all empty Labels from the container."""
self.filter_elements([lambda labels: not labels.is_empty])
def add_container(self, container):
"""Appends the labels in the given LabelContainer to the container.
Args:
container: a LabelsContainer
Raises:
LabelsSchemaError: if this container has a schema enforced and any
labels in the container violate it
"""
self.add_iterable(container)
def attributes(self):
"""Returns the list of class attributes that will be serialized.
Returns:
a list of attribute names
"""
_attrs = []
if self.has_schema:
_attrs.append("schema")
_attrs += super(LabelsContainer, self).attributes()
return _attrs
@classmethod
def from_dict(cls, d):
"""Constructs a LabelsContainer from a JSON dictionary.
Args:
d: a JSON dictionary
Returns:
a LabelsContainer
"""
schema = d.get("schema", None)
if schema is not None:
schema_cls = cls.get_schema_cls()
schema = schema_cls.from_dict(schema)
return super(LabelsContainer, cls).from_dict(d, schema=schema)
def validate_schema(self):
"""Validates that the labels are compliant with the current schema.
Raises:
LabelsSchemaError: if the container has labels that are not
compliant with the schema
"""
if self.has_schema:
for labels in self:
self._validate_labels(labels)
def _validate_labels(self, labels):
if self.has_schema:
self.schema.validate(labels)
class LabelsContainerSchema(LabelsSchema):
"""Base class for schemas of `LabelsContainer`s."""
def add(self, labels):
"""Incorporates the Labels into the schema.
Args:
label: a Labels instance
"""
self.merge_schema(labels.get_active_schema())
def add_container(self, container):
"""Incorporates the given `LabelsContainer`s elements into the schema.
Args:
container: a LabelsContainer
"""
self.add_iterable(container)
def add_iterable(self, iterable):
"""Incorporates the given iterable of Labels into the schema.
Args:
iterable: an iterable of Labels
"""
for labels in iterable:
self.add(labels)
@classmethod
def build_active_schema(cls, container):
"""Builds a LabelsContainerSchema describing the active schema of the
LabelsContainer.
Args:
container: a LabelsContainer
Returns:
a LabelsContainerSchema
"""
schema = cls()
for labels in container:
schema.add(labels.get_active_schema())
return schema
class LabelsContainerSchemaError(LabelsSchemaError):
"""Error raisesd when a LabelsContainerSchema is violated."""
pass
class LabelsSet(Labels, HasLabelsSchema, etas.Set):
"""Base class for `eta.core.serial.Set`s of Labels.
`LabelsSet`s can optionally store a LabelsSchema instance that governs
the schemas of the Labels in the set.
"""
def __init__(self, schema=None, **kwargs):
"""Creates a LabelsSet instance.
Args:
schema: an optional LabelsSchema to enforce on each element of the
set. By default, no schema is enforced
**kwargs: valid keyword arguments for `eta.core.serial.Set()`
Raises:
LabelsSchemaError: if a schema was provided but the labels added to
the container violate it
"""
HasLabelsSchema.__init__(self, schema=schema)
etas.Set.__init__(self, **kwargs)
def __getitem__(self, key):
"""Gets the Labels for the given key.
If the key is not found, an empty Labels is created and returned.
Args:
key: the key
Returns:
a Labels instance
"""
if key not in self:
logger.warning(
"Key '%s' not found; creating new %s",
key,
self._ELE_CLS.__name__,
)
# pylint: disable=not-callable
labels = self._ELE_CLS(**{self._ELE_KEY_ATTR: key})
self.add(labels)
return super(LabelsSet, self).__getitem__(key)
def __bool__(self):
return etas.Set.__bool__(self)
@property
def is_empty(self):
"""Whether this set has no labels."""
return etas.Set.is_empty(self)
@classmethod
def get_schema_cls(cls):
"""Gets the schema class for the Labels in the set.
Returns:
the LabelsSchema class
"""
return cls._ELE_CLS.get_schema_cls()
def empty(self):
"""Returns an empty copy of the LabelsSet.
The schema of the set is preserved, if applicable.
Returns:
an empty LabelsSet
"""
return self.__class__(schema=self.schema)
def remove_empty_labels(self):
"""Removes all empty Labels from the set."""
self.filter_elements([lambda labels: not labels.is_empty])
def add_set(self, labels_set):
"""Adds the labels in the given LabelSet to the set.
Args:
labels_set: a LabelsSet
Raises:
LabelsSchemaError: if this set has a schema enforced and any labels
in the set violate it
"""
self.add_iterable(labels_set)
def get_active_schema(self):
"""Gets the LabelsSchema describing the active schema of the set.
Returns:
a LabelsSchema
"""
schema_cls = self.get_schema_cls()
schema = schema_cls()
for labels in self:
schema.merge_schema(schema_cls.build_active_schema(labels))
return schema
def filter_by_schema(self, schema):
"""Removes labels from the set that are not compliant with the given
schema.
Args:
schema: a LabelsSchema
"""
for labels in self:
labels.filter_by_schema(schema)
def set_schema(self, schema, filter_by_schema=False, validate=False):
"""Sets the enforced schema to the given LabelsSchema.
Args:
schema: a LabelsSchema to assign
filter_by_schema: whether to filter labels that are not compliant
with the schema. By default, this is False
validate: whether to validate that the labels (after filtering, if
applicable) are compliant with the new schema. By default, this
is False
Raises:
LabelsSchemaError: if `validate` was `True` and this object
contains labels that are not compliant with the schema
"""
self.schema = schema
for labels in self:
labels.set_schema(
schema, filter_by_schema=filter_by_schema, validate=validate
)
def attributes(self):
"""Returns the list of class attributes that will be serialized.
Returns:
a list of attribute names
"""
_attrs = []
if self.has_schema:
_attrs.append("schema")
_attrs += super(LabelsSet, self).attributes()
return _attrs
@classmethod
def from_dict(cls, d):
"""Constructs a LabelsSet from a JSON dictionary.
Args:
d: a JSON dictionary
Returns:
a LabelsSet
"""
schema = d.get("schema", None)
if schema is not None:
schema_cls = cls.get_schema_cls()
schema = schema_cls.from_dict(schema)
return super(LabelsSet, cls).from_dict(d, schema=schema)
@classmethod
def from_labels_patt(cls, labels_patt):
"""Creates a LabelsSet from a pattern of Labels files on disk.
Args:
labels_patt: a pattern with one or more numeric sequences for
Labels files on disk
Returns:
a LabelsSet
"""
labels_set = cls()
for labels_path in etau.get_pattern_matches(labels_patt):
labels_set.add(cls._ELE_CLS.from_json(labels_path))
return labels_set
def validate_schema(self):
"""Validates that the labels in the set are compliant with the current
schema.
Raises:
LabelsSchemaError: if the set has labels that are not compliant
with the schema
"""
if self.has_schema:
for labels in self:
self._validate_labels(labels)
def _validate_labels(self, labels):
if self.has_schema:
self.schema.validate(labels)
class LabelsRenderer(object):
"""Interface for classes that render Labels instances in a specified
format.
`LabelsRenderer`s must follow the strict convention that, when
`in_place == False`, they do not modify or pass by reference any components
of the source labels that they are rendering. In particular, any labels
they produce are deep copies of the source labels.
"""
#
# The Labels class that this renderer takes as input
#
# Subclasses MUST set this field
#
_LABELS_CLS = None
@property
def labels_cls(self):
"""The Labels subclass that this renderer takes as input."""
return self._LABELS_CLS
def render(self, in_place=False):
"""Renders the labels in the format specified by the class.
Args:
in_place: whether to perform the rendering in-place. By default,
this is False
Returns:
a `labels_cls` instance
"""
raise NotImplementedError("subclasses must implement render()")
class LabelsContainerRenderer(LabelsRenderer):
"""Base class for rendering labels in `LabelsContainer`s in a specified
format.
The only thing that subclasses need to do to implement this interface is
to define their `_LABELS_CLS` and `_ELEMENT_RENDERER_CLS`.
"""
#
# The LabelsRenderer class to use to render elements of the container
#
# Subclasses MUST set this field
#
_ELEMENT_RENDERER_CLS = None
def __init__(self, container):
"""Creates a LabelsContainerRenderer instance.
Args:
container: a LabelsContainer
"""
self._container = container
@property
def element_renderer_cls(self):
"""The LabelsRenderer class to use to render elements of the container.
"""
return self._ELEMENT_RENDERER_CLS
def render(self, in_place=False):
"""Renders the container in the format specified by the class.
Args:
in_place: whether to perform the rendering in-place. By default,
this is False
Returns:
a `labels_cls` instance
"""
if in_place:
return self._render_in_place()
return self._render_copy()
def _render_in_place(self):
for labels in self._container:
# pylint: disable=not-callable
renderer = self.element_renderer_cls(labels)
renderer.render(in_place=True)
return self._container
def _render_copy(self):
new_container = self._container.empty()
for labels in self._container:
# pylint: disable=not-callable
renderer = self.element_renderer_cls(labels)
element = renderer.render(in_place=False)
new_container.add(element)
return new_container
class LabelsFrameRenderer(LabelsRenderer):
"""Interface for classes that render Labels at the frame-level."""
#
# The per-frame Labels class that this renderer outputs
#
# Subclasses MUST set this field
#
_FRAME_LABELS_CLS = None
@property
def frame_labels_cls(self):
"""The per-frame Labels class that this renderer outputs."""
return self._FRAME_LABELS_CLS
def render_frame(self, frame_number, in_place=False):
"""Renders the labels for the given frame.
Args:
frame_number: the frame number
in_place: whether to perform the rendering in-place (i.e., without
deep copying objects). By default, this is False
Returns:
a `frame_labels_cls` instance, or None if no labels exist for the
given frame
"""
raise NotImplementedError("subclasses must implement render_frame()")
def render_all_frames(self, in_place=False):
"""Renders the labels for all possible frames.
Args:
in_place: whether to perform the rendering in-place (i.e., without
deep copying objects). By default, this is False
Returns:
a dictionary mapping frame numbers to `frame_labels_cls` instances
"""
raise NotImplementedError(
"subclasses must implement render_all_frames()"
)
class LabelsContainerFrameRenderer(
LabelsFrameRenderer, LabelsContainerRenderer
):
"""Base class for rendering labels in `LabelsContainer`s at the
frame-level.
The only thing that subclasses need to do to implement this interface is
to define their `_LABELS_CLS`, `_FRAME_LABELS_CLS`, and
`_ELEMENT_RENDERER_CLS`.
"""
#
# The LabelsFrameRenderer class to use to render elements of the container
#
# Subclasses MUST set this field
#
_ELEMENT_RENDERER_CLS = None
@property
def element_renderer_cls(self):
"""The LabelsFrameRenderer class to use to render elements of the
container.
"""
return self._ELEMENT_RENDERER_CLS
def render_frame(self, frame_number, in_place=False):
"""Renders the container for the given frame.
Args:
frame_number: the frame number
in_place: whether to perform the rendering in-place (i.e., without
deep copying objects). By default, this is False
Returns:
a `frame_labels_cls` instance, which may be empty if no labels
exist for the specified frame
"""
# pylint: disable=not-callable
frame_elements = self.frame_labels_cls()
for labels in self._container:
# pylint: disable=not-callable
renderer = self.element_renderer_cls(labels)
frame_element = renderer.render_frame(
frame_number, in_place=in_place
)
if frame_element is not None:
frame_elements.add(frame_element)
return frame_elements
def render_all_frames(self, in_place=False):
"""Renders the container for all possible frames.
Args:
in_place: whether to perform the rendering in-place (i.e., without
deep copying objects). By default, this is False
Returns:
a dictionary mapping frame numbers to `frame_labels_cls` instances
"""
# pylint: disable=not-callable
frame_elements_map = defaultdict(self.frame_labels_cls)
for labels in self._container:
# pylint: disable=not-callable
renderer = self.element_renderer_cls(labels)
frames_map = renderer.render_all_frames(in_place=in_place)
for frame_number, frame_element in iteritems(frames_map):
frame_elements_map[frame_number].add(frame_element)
return dict(frame_elements_map)
class LabelsSpatiotemporalRenderer(LabelsRenderer):
"""Interface for classes that render Labels in spatiotemporal format."""
pass
class LabelsContainerSpatiotemporalRenderer(
LabelsSpatiotemporalRenderer, LabelsContainerRenderer
):
"""Base class for rendering labels for `LabelsContainer`s in spatiotemporal
format.
The only thing that subclasses need to do to implement this interface is
to define their `_LABELS_CLS` and `_ELEMENT_RENDERER_CLS`.
"""
pass
| 30.000995 | 79 | 0.629631 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from future.utils import iteritems
from collections import defaultdict
import logging
import eta.core.serial as etas
import eta.core.utils as etau
logger = logging.getLogger(__name__)
class Labels(etas.Serializable):
def __bool__(self):
return not self.is_empty
@property
def is_empty(self):
raise NotImplementedError("subclasses must implement is_empty")
@classmethod
def get_schema_cls(cls):
class_name = etau.get_class_name(cls)
return etau.get_class(class_name + "Schema")
def get_active_schema(self):
schema_cls = self.get_schema_cls()
return schema_cls.build_active_schema(self)
def filter_by_schema(self, schema):
raise NotImplementedError(
"subclasses must implement `filter_by_schema()`"
)
class LabelsSchema(etas.Serializable):
def __bool__(self):
return not self.is_empty
@property
def is_empty(self):
raise NotImplementedError("subclasses must implement is_empty")
def add(self, labels):
labels_schema = self.build_active_schema(labels)
self.merge_schema(labels_schema)
def add_iterable(self, iterable):
for labels in iterable:
self.add(labels)
def validate(self, labels):
raise NotImplementedError("subclasses must implement `validate()`")
def validate_subset_of_schema(self, schema):
raise NotImplementedError(
"subclasses must implement `validate_subset_of_schema()`"
)
def validate_schema_type(self, schema):
if not isinstance(self, type(schema)):
raise LabelsSchemaError(
"Expected `self` to match schema type %s; found %s"
% (type(self), type(schema))
)
def is_valid(self, labels):
try:
self.validate(labels)
return True
except LabelsSchemaError:
return False
def is_subset_of_schema(self, schema):
try:
self.validate_subset_of_schema(schema)
return True
except LabelsSchemaError:
return False
@classmethod
def build_active_schema(cls, labels):
raise NotImplementedError(
"subclasses must implement `build_active_schema()`"
)
def merge_schema(self, schema):
raise NotImplementedError("subclasses must implement `merge_schema()`")
class LabelsSchemaError(Exception):
pass
class HasLabelsSchema(object):
def __init__(self, schema=None):
self.schema = schema
@property
def has_schema(self):
return self.schema is not None
def get_schema(self):
return self.schema
def set_schema(self, schema, filter_by_schema=False, validate=False):
self.schema = schema
if not self.has_schema:
return
if filter_by_schema:
self.filter_by_schema(self.schema)
if validate:
self.validate_schema()
def validate_schema(self):
if self.has_schema:
self.schema.validate(self)
def freeze_schema(self):
self.set_schema(self.get_active_schema())
def remove_schema(self):
self.set_schema(None)
class HasLabelsSupport(object):
def __init__(self, support=None):
self._support = support
@property
def support(self):
if self.is_support_frozen:
return self._support
return self._compute_support()
@property
def is_support_frozen(self):
return self._support is not None
def set_support(self, support):
self._support = support
def merge_support(self, support):
new_support = self.support.merge(support)
self.set_support(new_support)
def freeze_support(self):
if not self.is_support_frozen:
self._support = self._compute_support()
def clear_support(self):
self._support = None
def _compute_support(self):
raise NotImplementedError(
"subclasses must implement _compute_support()"
)
class HasFramewiseView(object):
@property
def framewise_renderer_cls(self):
raise NotImplementedError(
"subclasses must implement framewise_renderer_cls()"
)
def render_framewise(self, in_place=False):
renderer = self.framewise_renderer_cls(self)
return renderer.render(in_place=in_place)
class HasSpatiotemporalView(object):
@property
def spatiotemporal_renderer_cls(self):
raise NotImplementedError(
"subclasses must implement spatiotemporal_renderer_cls()"
)
def render_spatiotemporal(self, in_place=False):
renderer = self.spatiotemporal_renderer_cls(self)
return renderer.render(in_place=in_place)
class LabelsContainer(Labels, HasLabelsSchema, etas.Container):
def __init__(self, schema=None, **kwargs):
HasLabelsSchema.__init__(self, schema=schema)
etas.Container.__init__(self, **kwargs)
def __bool__(self):
return etas.Container.__bool__(self)
@property
def is_empty(self):
return etas.Container.is_empty(self)
def remove_empty_labels(self):
self.filter_elements([lambda labels: not labels.is_empty])
def add_container(self, container):
self.add_iterable(container)
def attributes(self):
_attrs = []
if self.has_schema:
_attrs.append("schema")
_attrs += super(LabelsContainer, self).attributes()
return _attrs
@classmethod
def from_dict(cls, d):
schema = d.get("schema", None)
if schema is not None:
schema_cls = cls.get_schema_cls()
schema = schema_cls.from_dict(schema)
return super(LabelsContainer, cls).from_dict(d, schema=schema)
def validate_schema(self):
if self.has_schema:
for labels in self:
self._validate_labels(labels)
def _validate_labels(self, labels):
if self.has_schema:
self.schema.validate(labels)
class LabelsContainerSchema(LabelsSchema):
def add(self, labels):
self.merge_schema(labels.get_active_schema())
def add_container(self, container):
self.add_iterable(container)
def add_iterable(self, iterable):
for labels in iterable:
self.add(labels)
@classmethod
def build_active_schema(cls, container):
schema = cls()
for labels in container:
schema.add(labels.get_active_schema())
return schema
class LabelsContainerSchemaError(LabelsSchemaError):
pass
class LabelsSet(Labels, HasLabelsSchema, etas.Set):
def __init__(self, schema=None, **kwargs):
HasLabelsSchema.__init__(self, schema=schema)
etas.Set.__init__(self, **kwargs)
def __getitem__(self, key):
if key not in self:
logger.warning(
"Key '%s' not found; creating new %s",
key,
self._ELE_CLS.__name__,
)
labels = self._ELE_CLS(**{self._ELE_KEY_ATTR: key})
self.add(labels)
return super(LabelsSet, self).__getitem__(key)
def __bool__(self):
return etas.Set.__bool__(self)
@property
def is_empty(self):
return etas.Set.is_empty(self)
@classmethod
def get_schema_cls(cls):
return cls._ELE_CLS.get_schema_cls()
def empty(self):
return self.__class__(schema=self.schema)
def remove_empty_labels(self):
self.filter_elements([lambda labels: not labels.is_empty])
def add_set(self, labels_set):
self.add_iterable(labels_set)
def get_active_schema(self):
schema_cls = self.get_schema_cls()
schema = schema_cls()
for labels in self:
schema.merge_schema(schema_cls.build_active_schema(labels))
return schema
def filter_by_schema(self, schema):
for labels in self:
labels.filter_by_schema(schema)
def set_schema(self, schema, filter_by_schema=False, validate=False):
self.schema = schema
for labels in self:
labels.set_schema(
schema, filter_by_schema=filter_by_schema, validate=validate
)
def attributes(self):
_attrs = []
if self.has_schema:
_attrs.append("schema")
_attrs += super(LabelsSet, self).attributes()
return _attrs
@classmethod
def from_dict(cls, d):
schema = d.get("schema", None)
if schema is not None:
schema_cls = cls.get_schema_cls()
schema = schema_cls.from_dict(schema)
return super(LabelsSet, cls).from_dict(d, schema=schema)
@classmethod
def from_labels_patt(cls, labels_patt):
labels_set = cls()
for labels_path in etau.get_pattern_matches(labels_patt):
labels_set.add(cls._ELE_CLS.from_json(labels_path))
return labels_set
def validate_schema(self):
if self.has_schema:
for labels in self:
self._validate_labels(labels)
def _validate_labels(self, labels):
if self.has_schema:
self.schema.validate(labels)
class LabelsRenderer(object):
_LABELS_CLS = None
@property
def labels_cls(self):
return self._LABELS_CLS
def render(self, in_place=False):
raise NotImplementedError("subclasses must implement render()")
class LabelsContainerRenderer(LabelsRenderer):
_ELEMENT_RENDERER_CLS = None
def __init__(self, container):
self._container = container
@property
def element_renderer_cls(self):
return self._ELEMENT_RENDERER_CLS
def render(self, in_place=False):
if in_place:
return self._render_in_place()
return self._render_copy()
def _render_in_place(self):
for labels in self._container:
renderer = self.element_renderer_cls(labels)
renderer.render(in_place=True)
return self._container
def _render_copy(self):
new_container = self._container.empty()
for labels in self._container:
renderer = self.element_renderer_cls(labels)
element = renderer.render(in_place=False)
new_container.add(element)
return new_container
class LabelsFrameRenderer(LabelsRenderer):
_FRAME_LABELS_CLS = None
@property
def frame_labels_cls(self):
return self._FRAME_LABELS_CLS
def render_frame(self, frame_number, in_place=False):
raise NotImplementedError("subclasses must implement render_frame()")
def render_all_frames(self, in_place=False):
raise NotImplementedError(
"subclasses must implement render_all_frames()"
)
class LabelsContainerFrameRenderer(
LabelsFrameRenderer, LabelsContainerRenderer
):
_ELEMENT_RENDERER_CLS = None
@property
def element_renderer_cls(self):
return self._ELEMENT_RENDERER_CLS
def render_frame(self, frame_number, in_place=False):
frame_elements = self.frame_labels_cls()
for labels in self._container:
renderer = self.element_renderer_cls(labels)
frame_element = renderer.render_frame(
frame_number, in_place=in_place
)
if frame_element is not None:
frame_elements.add(frame_element)
return frame_elements
def render_all_frames(self, in_place=False):
frame_elements_map = defaultdict(self.frame_labels_cls)
for labels in self._container:
renderer = self.element_renderer_cls(labels)
frames_map = renderer.render_all_frames(in_place=in_place)
for frame_number, frame_element in iteritems(frames_map):
frame_elements_map[frame_number].add(frame_element)
return dict(frame_elements_map)
class LabelsSpatiotemporalRenderer(LabelsRenderer):
pass
class LabelsContainerSpatiotemporalRenderer(
LabelsSpatiotemporalRenderer, LabelsContainerRenderer
):
pass
| true | true |
f73084dd6dc2a8e45a03307bb76941dc0e054034 | 2,458 | py | Python | examples/image_classification/classify.py | sergioalberto/rpi-deep-learning | 94024c2b2c225dc607954874bdcd345b805b1561 | [
"MIT"
] | null | null | null | examples/image_classification/classify.py | sergioalberto/rpi-deep-learning | 94024c2b2c225dc607954874bdcd345b805b1561 | [
"MIT"
] | 14 | 2021-03-19T04:37:26.000Z | 2022-03-12T00:23:16.000Z | examples/image_classification/classify.py | sergioalberto/rpi-deep-learning-lab | 94024c2b2c225dc607954874bdcd345b805b1561 | [
"MIT"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from tflite_runtime.interpreter import Interpreter
from tensorflow.lite.python.interpreter import Interpreter
import numpy as np
import argparse
from PIL import Image
parser = argparse.ArgumentParser(description='Image Classification')
parser.add_argument('--filename', type=str, help='Specify the filename', required=True)
parser.add_argument('--model_path', type=str, help='Specify the model path', required=True)
parser.add_argument('--label_path', type=str, help='Specify the label map', required=True)
parser.add_argument('--top_k', type=int, help='How many top results', default=3)
args = parser.parse_args()
filename = args.filename
model_path = args.model_path
label_path = args.label_path
top_k_results = args.top_k
with open(label_path, 'r') as f:
labels = list(map(str.strip, f.readlines()))
# Load TFLite model and allocate tensors
interpreter = Interpreter(model_path=model_path)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Read image
img = Image.open(filename).convert('RGB')
# Get input size
input_shape = input_details[0]['shape']
size = input_shape[:2] if len(input_shape) == 3 else input_shape[1:3]
# Preprocess image
img = img.resize(size)
img = np.array(img)
# Add a batch dimension
input_data = np.expand_dims(img, axis=0)
# Point the data to be used for testing and run the interpreter
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
# Obtain results and map them to the classes
predictions = interpreter.get_tensor(output_details[0]['index'])[0]
# Get indices of the top k results
top_k_indices = np.argsort(predictions)[::-1][:top_k_results]
for i in range(top_k_results):
print(labels[top_k_indices[i]], predictions[top_k_indices[i]] / 255.0)
| 34.619718 | 91 | 0.767697 |
from tensorflow.lite.python.interpreter import Interpreter
import numpy as np
import argparse
from PIL import Image
parser = argparse.ArgumentParser(description='Image Classification')
parser.add_argument('--filename', type=str, help='Specify the filename', required=True)
parser.add_argument('--model_path', type=str, help='Specify the model path', required=True)
parser.add_argument('--label_path', type=str, help='Specify the label map', required=True)
parser.add_argument('--top_k', type=int, help='How many top results', default=3)
args = parser.parse_args()
filename = args.filename
model_path = args.model_path
label_path = args.label_path
top_k_results = args.top_k
with open(label_path, 'r') as f:
labels = list(map(str.strip, f.readlines()))
interpreter = Interpreter(model_path=model_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
img = Image.open(filename).convert('RGB')
input_shape = input_details[0]['shape']
size = input_shape[:2] if len(input_shape) == 3 else input_shape[1:3]
img = img.resize(size)
img = np.array(img)
input_data = np.expand_dims(img, axis=0)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
predictions = interpreter.get_tensor(output_details[0]['index'])[0]
top_k_indices = np.argsort(predictions)[::-1][:top_k_results]
for i in range(top_k_results):
print(labels[top_k_indices[i]], predictions[top_k_indices[i]] / 255.0)
| true | true |
f73086577458af1fa28ed63eff9c394107997af2 | 29,942 | py | Python | binance/websockets.py | pnpn521521/python-binance-with-futures-websocket | 483737fb364f4de07427df4c2b76e63561b40bbb | [
"MIT"
] | 17 | 2020-04-22T11:24:11.000Z | 2022-03-14T10:03:02.000Z | binance/websockets.py | pnpn521521/python-binance-with-futures-websocket | 483737fb364f4de07427df4c2b76e63561b40bbb | [
"MIT"
] | null | null | null | binance/websockets.py | pnpn521521/python-binance-with-futures-websocket | 483737fb364f4de07427df4c2b76e63561b40bbb | [
"MIT"
] | 7 | 2020-06-30T20:30:28.000Z | 2021-07-28T02:25:27.000Z | # coding=utf-8
import json
import threading
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
from twisted.internet import reactor, ssl
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.internet.error import ReactorAlreadyRunning
from binance.client import Client
class BinanceClientProtocol(WebSocketClientProtocol):
def __init__(self):
super(WebSocketClientProtocol, self).__init__()
def onConnect(self, response):
# reset the delay after reconnecting
self.factory.resetDelay()
def onMessage(self, payload, isBinary):
if not isBinary:
try:
payload_obj = json.loads(payload.decode('utf8'))
except ValueError:
pass
else:
self.factory.callback(payload_obj)
class BinanceReconnectingClientFactory(ReconnectingClientFactory):
# set initial delay to a short time
initialDelay = 0.1
maxDelay = 10
maxRetries = 5
class BinanceClientFactory(WebSocketClientFactory, BinanceReconnectingClientFactory):
protocol = BinanceClientProtocol
_reconnect_error_payload = {
'e': 'error',
'm': 'Max reconnect retries reached'
}
def clientConnectionFailed(self, connector, reason):
self.retry(connector)
if self.retries > self.maxRetries:
self.callback(self._reconnect_error_payload)
def clientConnectionLost(self, connector, reason):
self.retry(connector)
if self.retries > self.maxRetries:
self.callback(self._reconnect_error_payload)
class BinanceSocketManager(threading.Thread):
STREAM_URL = 'wss://stream.binance.com:9443/'
WEBSOCKET_DEPTH_5 = '5'
WEBSOCKET_DEPTH_10 = '10'
WEBSOCKET_DEPTH_20 = '20'
DEFAULT_USER_TIMEOUT = 30 * 60 # 30 minutes
def __init__(self, client, user_timeout=DEFAULT_USER_TIMEOUT):
"""Initialise the BinanceSocketManager
:param client: Binance API client
:type client: binance.Client
:param user_timeout: Custom websocket timeout
:type user_timeout: int
"""
threading.Thread.__init__(self)
self._conns = {}
self._client = client
self._user_timeout = user_timeout
self._timers = {'user': None, 'margin': None}
self._listen_keys = {'user': None, 'margin': None}
self._account_callbacks = {'user': None, 'margin': None}
def _start_socket(self, path, callback, prefix='ws/'):
if path in self._conns:
return False
factory_url = self.STREAM_URL + prefix + path
factory = BinanceClientFactory(factory_url)
factory.protocol = BinanceClientProtocol
factory.callback = callback
factory.reconnect = True
context_factory = ssl.ClientContextFactory()
self._conns[path] = connectWS(factory, context_factory)
return path
def start_depth_socket(self, symbol, callback, depth=None):
"""Start a websocket for symbol market depth returning either a diff or a partial book
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#partial-book-depth-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:param depth: optional Number of depth entries to return, default None. If passed returns a partial book instead of a diff
:type depth: str
:returns: connection key string if successful, False otherwise
Partial Message Format
.. code-block:: python
{
"lastUpdateId": 160, # Last update ID
"bids": [ # Bids to be updated
[
"0.0024", # price level to be updated
"10", # quantity
[] # ignore
]
],
"asks": [ # Asks to be updated
[
"0.0026", # price level to be updated
"100", # quantity
[] # ignore
]
]
}
Diff Message Format
.. code-block:: python
{
"e": "depthUpdate", # Event type
"E": 123456789, # Event time
"s": "BNBBTC", # Symbol
"U": 157, # First update ID in event
"u": 160, # Final update ID in event
"b": [ # Bids to be updated
[
"0.0024", # price level to be updated
"10", # quantity
[] # ignore
]
],
"a": [ # Asks to be updated
[
"0.0026", # price level to be updated
"100", # quantity
[] # ignore
]
]
}
"""
socket_name = symbol.lower() + '@depth'
if depth and depth != '1':
socket_name = '{}{}'.format(socket_name, depth)
return self._start_socket(socket_name, callback)
def start_kline_socket(self, symbol, callback, interval=Client.KLINE_INTERVAL_1MINUTE):
"""Start a websocket for symbol kline data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#klinecandlestick-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:param interval: Kline interval, default KLINE_INTERVAL_1MINUTE
:type interval: str
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "kline", # event type
"E": 1499404907056, # event time
"s": "ETHBTC", # symbol
"k": {
"t": 1499404860000, # start time of this bar
"T": 1499404919999, # end time of this bar
"s": "ETHBTC", # symbol
"i": "1m", # interval
"f": 77462, # first trade id
"L": 77465, # last trade id
"o": "0.10278577", # open
"c": "0.10278645", # close
"h": "0.10278712", # high
"l": "0.10278518", # low
"v": "17.47929838", # volume
"n": 4, # number of trades
"x": false, # whether this bar is final
"q": "1.79662878", # quote volume
"V": "2.34879839", # volume of active buy
"Q": "0.24142166", # quote volume of active buy
"B": "13279784.01349473" # can be ignored
}
}
"""
socket_name = '{}@kline_{}'.format(symbol.lower(), interval)
return self._start_socket(socket_name, callback)
def start_miniticker_socket(self, callback, update_time=1000):
"""Start a miniticker websocket for all trades
This is not in the official Binance api docs, but this is what
feeds the right column on a ticker page on Binance.
:param callback: callback function to handle messages
:type callback: function
:param update_time: time between callbacks in milliseconds, must be 1000 or greater
:type update_time: int
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
[
{
'e': '24hrMiniTicker', # Event type
'E': 1515906156273, # Event time
's': 'QTUMETH', # Symbol
'c': '0.03836900', # close
'o': '0.03953500', # open
'h': '0.04400000', # high
'l': '0.03756000', # low
'v': '147435.80000000', # volume
'q': '5903.84338533' # quote volume
}
]
"""
return self._start_socket('!miniTicker@arr@{}ms'.format(update_time), callback)
def start_trade_socket(self, symbol, callback):
"""Start a websocket for symbol trade data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#trade-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "trade", # Event type
"E": 123456789, # Event time
"s": "BNBBTC", # Symbol
"t": 12345, # Trade ID
"p": "0.001", # Price
"q": "100", # Quantity
"b": 88, # Buyer order Id
"a": 50, # Seller order Id
"T": 123456785, # Trade time
"m": true, # Is the buyer the market maker?
"M": true # Ignore.
}
"""
return self._start_socket(symbol.lower() + '@trade', callback)
def start_aggtrade_socket(self, symbol, callback):
"""Start a websocket for symbol trade data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#aggregate-trade-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "aggTrade", # event type
"E": 1499405254326, # event time
"s": "ETHBTC", # symbol
"a": 70232, # aggregated tradeid
"p": "0.10281118", # price
"q": "8.15632997", # quantity
"f": 77489, # first breakdown trade id
"l": 77489, # last breakdown trade id
"T": 1499405254324, # trade time
"m": false, # whether buyer is a maker
"M": true # can be ignored
}
"""
return self._start_socket(symbol.lower() + '@aggTrade', callback)
def start_symbol_ticker_socket(self, symbol, callback):
"""Start a websocket for a symbol's ticker data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#individual-symbol-ticker-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "24hrTicker", # Event type
"E": 123456789, # Event time
"s": "BNBBTC", # Symbol
"p": "0.0015", # Price change
"P": "250.00", # Price change percent
"w": "0.0018", # Weighted average price
"x": "0.0009", # Previous day's close price
"c": "0.0025", # Current day's close price
"Q": "10", # Close trade's quantity
"b": "0.0024", # Best bid price
"B": "10", # Bid bid quantity
"a": "0.0026", # Best ask price
"A": "100", # Best ask quantity
"o": "0.0010", # Open price
"h": "0.0025", # High price
"l": "0.0010", # Low price
"v": "10000", # Total traded base asset volume
"q": "18", # Total traded quote asset volume
"O": 0, # Statistics open time
"C": 86400000, # Statistics close time
"F": 0, # First trade ID
"L": 18150, # Last trade Id
"n": 18151 # Total number of trades
}
"""
return self._start_socket(symbol.lower() + '@ticker', callback)
def start_ticker_socket(self, callback):
"""Start a websocket for all ticker data
By default all markets are included in an array.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#all-market-tickers-stream
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
[
{
'F': 278610,
'o': '0.07393000',
's': 'BCCBTC',
'C': 1509622420916,
'b': '0.07800800',
'l': '0.07160300',
'h': '0.08199900',
'L': 287722,
'P': '6.694',
'Q': '0.10000000',
'q': '1202.67106335',
'p': '0.00494900',
'O': 1509536020916,
'a': '0.07887800',
'n': 9113,
'B': '1.00000000',
'c': '0.07887900',
'x': '0.07399600',
'w': '0.07639068',
'A': '2.41900000',
'v': '15743.68900000'
}
]
"""
return self._start_socket('!ticker@arr', callback)
def start_symbol_book_ticker_socket(self, symbol, callback):
"""Start a websocket for the best bid or ask's price or quantity for a specified symbol.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#individual-symbol-book-ticker-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"u":400900217, // order book updateId
"s":"BNBUSDT", // symbol
"b":"25.35190000", // best bid price
"B":"31.21000000", // best bid qty
"a":"25.36520000", // best ask price
"A":"40.66000000" // best ask qty
}
"""
return self._start_socket(symbol.lower() + '@bookTicker', callback)
def start_book_ticker_socket(self, callback):
"""Start a websocket for the best bid or ask's price or quantity for all symbols.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#all-book-tickers-stream
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
// Same as <symbol>@bookTicker payload
}
"""
return self._start_socket('!bookTicker', callback)
def start_multiplex_socket(self, streams, callback):
"""Start a multiplexed socket using a list of socket names.
User stream sockets can not be included.
Symbols in socket name must be lowercase i.e bnbbtc@aggTrade, neobtc@ticker
Combined stream events are wrapped as follows: {"stream":"<streamName>","data":<rawPayload>}
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md
:param streams: list of stream names in lower case
:type streams: list
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format - see Binance API docs for all types
"""
stream_path = 'streams={}'.format('/'.join(streams))
return self._start_socket(stream_path, callback, 'stream?')
def start_user_socket(self, callback):
"""Start a websocket for user data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/user-data-stream.md
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format - see Binance API docs for all types
"""
# Get the user listen key
user_listen_key = self._client.stream_get_listen_key()
# and start the socket with this specific key
return self._start_account_socket('user', user_listen_key, callback)
def start_margin_socket(self, callback):
"""Start a websocket for margin data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/user-data-stream.md
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format - see Binance API docs for all types
"""
# Get the user margin listen key
margin_listen_key = self._client.margin_stream_get_listen_key()
# and start the socket with this specific key
return self._start_account_socket('margin', margin_listen_key, callback)
def _start_account_socket(self, socket_type, listen_key, callback):
"""Starts one of user or margin socket"""
self._check_account_socket_open(listen_key)
self._listen_keys[socket_type] = listen_key
self._account_callbacks[socket_type] = callback
conn_key = self._start_socket(listen_key, callback)
if conn_key:
# start timer to keep socket alive
self._start_socket_timer(socket_type)
return conn_key
def _check_account_socket_open(self, listen_key):
if not listen_key:
return
for conn_key in self._conns:
if len(conn_key) >= 60 and conn_key[:60] == listen_key:
self.stop_socket(conn_key)
break
def _start_socket_timer(self, socket_type):
callback = self._keepalive_account_socket
self._timers[socket_type] = threading.Timer(self._user_timeout, callback, [socket_type])
self._timers[socket_type].setDaemon(True)
self._timers[socket_type].start()
def _keepalive_account_socket(self, socket_type):
if socket_type == 'user':
listen_key_func = self._client.stream_get_listen_key
callback = self._account_callbacks[socket_type]
else:
listen_key_func = self._client.margin_stream_get_listen_key
callback = self._account_callbacks[socket_type]
listen_key = listen_key_func()
if listen_key != self._listen_keys[socket_type]:
self._start_account_socket(socket_type, listen_key, callback)
def stop_socket(self, conn_key):
"""Stop a websocket given the connection key
:param conn_key: Socket connection key
:type conn_key: string
:returns: connection key string if successful, False otherwise
"""
if conn_key not in self._conns:
return
# disable reconnecting if we are closing
self._conns[conn_key].factory = WebSocketClientFactory(self.STREAM_URL + 'tmp_path')
self._conns[conn_key].disconnect()
del(self._conns[conn_key])
# check if we have a user stream socket
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys['user']:
self._stop_account_socket('user')
# or a margin stream socket
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys['margin']:
self._stop_account_socket('margin')
def _stop_account_socket(self, socket_type):
if not self._listen_keys[socket_type]:
return
if self._timers[socket_type]:
self._timers[socket_type].cancel()
self._timers[socket_type] = None
self._listen_keys[socket_type] = None
def run(self):
try:
reactor.run(installSignalHandlers=False)
except ReactorAlreadyRunning:
# Ignore error about reactor already running
pass
def close(self):
"""Close all connections
"""
keys = set(self._conns.keys())
for key in keys:
self.stop_socket(key)
self._conns = {}
class BinanceFuturesSocketManager(threading.Thread):
STREAM_URL = "wss://fstream.binance.com/"
WEBSOCKET_DEPTH_5 = "5"
WEBSOCKET_DEPTH_10 = "10"
WEBSOCKET_DEPTH_20 = "20"
DEFAULT_USER_TIMEOUT = 30 * 60 # 30 minutes
def __init__(self, client, user_timeout=DEFAULT_USER_TIMEOUT):
"""Initialise the BinanceSocketManager
:param client: Binance API client
:type client: binance.Client
:param user_timeout: Custom websocket timeout
:type user_timeout: int
"""
threading.Thread.__init__(self)
self._conns = {}
self._client = client
self._user_timeout = user_timeout
self._timers = {"user": None, "margin": None, "futures": None}
self._listen_keys = {"user": None, "margin": None, "futures": None}
self._account_callbacks = {"user": None, "margin": None, "futures": None}
def _start_socket(self, path, callback, prefix="ws/"):
if path in self._conns:
return False
factory_url = self.STREAM_URL + prefix + path
factory = BinanceClientFactory(factory_url)
factory.protocol = BinanceClientProtocol
factory.callback = callback
factory.reconnect = True
context_factory = ssl.ClientContextFactory()
self._conns[path] = connectWS(factory, context_factory)
return path
def start_futures_aggtrade_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@aggTrade", callback)
def start_futures_symbol_markprice_socket(self, symbol, callback, update_time=None):
socket_name = symbol.lower() + "@markPrice"
if update_time:
socket_name = "{}@{}s".format(socket_name, update_time)
return self._start_socket(socket_name, callback)
def start_futures_markprice_socket(self, callback, update_time=None):
socket_name = "!markPrice@arr"
if update_time:
socket_name = "{}@{}s".format(socket_name, update_time)
return self._start_socket(socket_name, callback)
def start_futures_kline_socket(
self, symbol, callback, interval=Client.KLINE_INTERVAL_1MINUTE
):
socket_name = "{}@kline_{}".format(symbol.lower(), interval)
return self._start_socket(socket_name, callback)
def start_futures_symbol_miniticker_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@miniTicker", callback)
def start_futures_miniticker_socket(self, callback):
return self._start_socket("!miniTicker@arr", callback)
def start_futures_symbol_ticker_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@ticker", callback)
def start_futures_ticker_socket(self, callback):
return self._start_socket("!ticker@arr", callback)
def start_futures_symbol_book_ticker_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@bookTicker", callback)
def startt_futures_book_ticker_socket(self, callback):
return self._start_socket("!bookTicker", callback)
def start_futures_symbol_force_order_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@forceOrder", callback)
def start_futures_force_order_socket(self, callback):
return self._start_socket("!forceOrder@arr", callback)
def start_futures_depth_socket(
self, symbol, callback, depth=None, update_time=None
):
socket_name = symbol.lower() + "@depth"
if depth:
socket_name = "{}{}".format(socket_name, depth)
if update_time:
socket_name = "{}@{}ms".format(socket_name, update_time)
return self._start_socket(socket_name, callback)
def start_futures_multiplex_socket(self, streams, callback):
"""Start a multiplexed socket using a list of socket names.
User stream sockets can not be included.
Symbols in socket name must be lowercase i.e bnbbtc@aggTrade, neobtc@ticker
Combined stream events are wrapped as follows: {"stream":"<streamName>","data":<rawPayload>}
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md
:param streams: list of stream names in lower case
:type streams: list
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format - see Binance API docs for all types
"""
stream_path = "streams={}".format("/".join(streams))
return self._start_socket(stream_path, callback, "stream?")
def start_futures_socket(self, callback):
# Get the user listen key
futures_listen_key = self._client.futures_stream_get_listen_key()
# and start the socket with this specific key
return self._start_account_socket("futures", futures_listen_key, callback)
def _start_account_socket(self, socket_type, listen_key, callback):
"""Starts one of user or margin socket"""
self._check_account_socket_open(listen_key)
self._listen_keys[socket_type] = listen_key
self._account_callbacks[socket_type] = callback
conn_key = self._start_socket(listen_key, callback)
if conn_key:
# start timer to keep socket alive
self._start_socket_timer(socket_type)
return conn_key
def _check_account_socket_open(self, listen_key):
if not listen_key:
return
for conn_key in self._conns:
if len(conn_key) >= 60 and conn_key[:60] == listen_key:
self.stop_socket(conn_key)
break
def _start_socket_timer(self, socket_type):
callback = self._keepalive_account_socket
self._timers[socket_type] = threading.Timer(
self._user_timeout, callback, [socket_type]
)
self._timers[socket_type].setDaemon(True)
self._timers[socket_type].start()
def _keepalive_account_socket(self, socket_type):
if socket_type == "user":
listen_key_func = self._client.stream_get_listen_key
callback = self._account_callbacks[socket_type]
elif socket_type == "margin":
listen_key_func = self._client.margin_stream_get_listen_key
callback = self._account_callbacks[socket_type]
else:
listen_key_func = self._client.futures_stream_get_listen_key
callback = self._account_callbacks[socket_type]
listen_key = listen_key_func()
if listen_key != self._listen_keys[socket_type]:
self._start_account_socket(socket_type, listen_key, callback)
def stop_socket(self, conn_key):
"""Stop a websocket given the connection key
:param conn_key: Socket connection key
:type conn_key: string
:returns: connection key string if successful, False otherwise
"""
if conn_key not in self._conns:
return
# disable reconnecting if we are closing
self._conns[conn_key].factory = WebSocketClientFactory(
self.STREAM_URL + "tmp_path"
)
self._conns[conn_key].disconnect()
del self._conns[conn_key]
# check if we have a user stream socket
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys["user"]:
self._stop_account_socket("user")
# or a margin stream socket
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys["margin"]:
self._stop_account_socket("margin")
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys["futures"]:
self._stop_account_socket("futures")
def _stop_account_socket(self, socket_type):
if not self._listen_keys[socket_type]:
return
if self._timers[socket_type]:
self._timers[socket_type].cancel()
self._timers[socket_type] = None
self._listen_keys[socket_type] = None
def run(self):
try:
reactor.run(installSignalHandlers=False)
except ReactorAlreadyRunning:
# Ignore error about reactor already running
pass
def close(self):
"""Close all connections
"""
keys = set(self._conns.keys())
for key in keys:
self.stop_socket(key)
self._conns = {} | 36.514634 | 141 | 0.583962 |
import json
import threading
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
from twisted.internet import reactor, ssl
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.internet.error import ReactorAlreadyRunning
from binance.client import Client
class BinanceClientProtocol(WebSocketClientProtocol):
def __init__(self):
super(WebSocketClientProtocol, self).__init__()
def onConnect(self, response):
self.factory.resetDelay()
def onMessage(self, payload, isBinary):
if not isBinary:
try:
payload_obj = json.loads(payload.decode('utf8'))
except ValueError:
pass
else:
self.factory.callback(payload_obj)
class BinanceReconnectingClientFactory(ReconnectingClientFactory):
initialDelay = 0.1
maxDelay = 10
maxRetries = 5
class BinanceClientFactory(WebSocketClientFactory, BinanceReconnectingClientFactory):
protocol = BinanceClientProtocol
_reconnect_error_payload = {
'e': 'error',
'm': 'Max reconnect retries reached'
}
def clientConnectionFailed(self, connector, reason):
self.retry(connector)
if self.retries > self.maxRetries:
self.callback(self._reconnect_error_payload)
def clientConnectionLost(self, connector, reason):
self.retry(connector)
if self.retries > self.maxRetries:
self.callback(self._reconnect_error_payload)
class BinanceSocketManager(threading.Thread):
STREAM_URL = 'wss://stream.binance.com:9443/'
WEBSOCKET_DEPTH_5 = '5'
WEBSOCKET_DEPTH_10 = '10'
WEBSOCKET_DEPTH_20 = '20'
DEFAULT_USER_TIMEOUT = 30 * 60
def __init__(self, client, user_timeout=DEFAULT_USER_TIMEOUT):
threading.Thread.__init__(self)
self._conns = {}
self._client = client
self._user_timeout = user_timeout
self._timers = {'user': None, 'margin': None}
self._listen_keys = {'user': None, 'margin': None}
self._account_callbacks = {'user': None, 'margin': None}
def _start_socket(self, path, callback, prefix='ws/'):
if path in self._conns:
return False
factory_url = self.STREAM_URL + prefix + path
factory = BinanceClientFactory(factory_url)
factory.protocol = BinanceClientProtocol
factory.callback = callback
factory.reconnect = True
context_factory = ssl.ClientContextFactory()
self._conns[path] = connectWS(factory, context_factory)
return path
def start_depth_socket(self, symbol, callback, depth=None):
socket_name = symbol.lower() + '@depth'
if depth and depth != '1':
socket_name = '{}{}'.format(socket_name, depth)
return self._start_socket(socket_name, callback)
def start_kline_socket(self, symbol, callback, interval=Client.KLINE_INTERVAL_1MINUTE):
socket_name = '{}@kline_{}'.format(symbol.lower(), interval)
return self._start_socket(socket_name, callback)
def start_miniticker_socket(self, callback, update_time=1000):
return self._start_socket('!miniTicker@arr@{}ms'.format(update_time), callback)
def start_trade_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + '@trade', callback)
def start_aggtrade_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + '@aggTrade', callback)
def start_symbol_ticker_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + '@ticker', callback)
def start_ticker_socket(self, callback):
return self._start_socket('!ticker@arr', callback)
def start_symbol_book_ticker_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + '@bookTicker', callback)
def start_book_ticker_socket(self, callback):
return self._start_socket('!bookTicker', callback)
def start_multiplex_socket(self, streams, callback):
stream_path = 'streams={}'.format('/'.join(streams))
return self._start_socket(stream_path, callback, 'stream?')
def start_user_socket(self, callback):
user_listen_key = self._client.stream_get_listen_key()
return self._start_account_socket('user', user_listen_key, callback)
def start_margin_socket(self, callback):
margin_listen_key = self._client.margin_stream_get_listen_key()
return self._start_account_socket('margin', margin_listen_key, callback)
def _start_account_socket(self, socket_type, listen_key, callback):
self._check_account_socket_open(listen_key)
self._listen_keys[socket_type] = listen_key
self._account_callbacks[socket_type] = callback
conn_key = self._start_socket(listen_key, callback)
if conn_key:
self._start_socket_timer(socket_type)
return conn_key
def _check_account_socket_open(self, listen_key):
if not listen_key:
return
for conn_key in self._conns:
if len(conn_key) >= 60 and conn_key[:60] == listen_key:
self.stop_socket(conn_key)
break
def _start_socket_timer(self, socket_type):
callback = self._keepalive_account_socket
self._timers[socket_type] = threading.Timer(self._user_timeout, callback, [socket_type])
self._timers[socket_type].setDaemon(True)
self._timers[socket_type].start()
def _keepalive_account_socket(self, socket_type):
if socket_type == 'user':
listen_key_func = self._client.stream_get_listen_key
callback = self._account_callbacks[socket_type]
else:
listen_key_func = self._client.margin_stream_get_listen_key
callback = self._account_callbacks[socket_type]
listen_key = listen_key_func()
if listen_key != self._listen_keys[socket_type]:
self._start_account_socket(socket_type, listen_key, callback)
def stop_socket(self, conn_key):
if conn_key not in self._conns:
return
self._conns[conn_key].factory = WebSocketClientFactory(self.STREAM_URL + 'tmp_path')
self._conns[conn_key].disconnect()
del(self._conns[conn_key])
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys['user']:
self._stop_account_socket('user')
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys['margin']:
self._stop_account_socket('margin')
def _stop_account_socket(self, socket_type):
if not self._listen_keys[socket_type]:
return
if self._timers[socket_type]:
self._timers[socket_type].cancel()
self._timers[socket_type] = None
self._listen_keys[socket_type] = None
def run(self):
try:
reactor.run(installSignalHandlers=False)
except ReactorAlreadyRunning:
pass
def close(self):
keys = set(self._conns.keys())
for key in keys:
self.stop_socket(key)
self._conns = {}
class BinanceFuturesSocketManager(threading.Thread):
STREAM_URL = "wss://fstream.binance.com/"
WEBSOCKET_DEPTH_5 = "5"
WEBSOCKET_DEPTH_10 = "10"
WEBSOCKET_DEPTH_20 = "20"
DEFAULT_USER_TIMEOUT = 30 * 60
def __init__(self, client, user_timeout=DEFAULT_USER_TIMEOUT):
threading.Thread.__init__(self)
self._conns = {}
self._client = client
self._user_timeout = user_timeout
self._timers = {"user": None, "margin": None, "futures": None}
self._listen_keys = {"user": None, "margin": None, "futures": None}
self._account_callbacks = {"user": None, "margin": None, "futures": None}
def _start_socket(self, path, callback, prefix="ws/"):
if path in self._conns:
return False
factory_url = self.STREAM_URL + prefix + path
factory = BinanceClientFactory(factory_url)
factory.protocol = BinanceClientProtocol
factory.callback = callback
factory.reconnect = True
context_factory = ssl.ClientContextFactory()
self._conns[path] = connectWS(factory, context_factory)
return path
def start_futures_aggtrade_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@aggTrade", callback)
def start_futures_symbol_markprice_socket(self, symbol, callback, update_time=None):
socket_name = symbol.lower() + "@markPrice"
if update_time:
socket_name = "{}@{}s".format(socket_name, update_time)
return self._start_socket(socket_name, callback)
def start_futures_markprice_socket(self, callback, update_time=None):
socket_name = "!markPrice@arr"
if update_time:
socket_name = "{}@{}s".format(socket_name, update_time)
return self._start_socket(socket_name, callback)
def start_futures_kline_socket(
self, symbol, callback, interval=Client.KLINE_INTERVAL_1MINUTE
):
socket_name = "{}@kline_{}".format(symbol.lower(), interval)
return self._start_socket(socket_name, callback)
def start_futures_symbol_miniticker_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@miniTicker", callback)
def start_futures_miniticker_socket(self, callback):
return self._start_socket("!miniTicker@arr", callback)
def start_futures_symbol_ticker_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@ticker", callback)
def start_futures_ticker_socket(self, callback):
return self._start_socket("!ticker@arr", callback)
def start_futures_symbol_book_ticker_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@bookTicker", callback)
def startt_futures_book_ticker_socket(self, callback):
return self._start_socket("!bookTicker", callback)
def start_futures_symbol_force_order_socket(self, symbol, callback):
return self._start_socket(symbol.lower() + "@forceOrder", callback)
def start_futures_force_order_socket(self, callback):
return self._start_socket("!forceOrder@arr", callback)
def start_futures_depth_socket(
self, symbol, callback, depth=None, update_time=None
):
socket_name = symbol.lower() + "@depth"
if depth:
socket_name = "{}{}".format(socket_name, depth)
if update_time:
socket_name = "{}@{}ms".format(socket_name, update_time)
return self._start_socket(socket_name, callback)
def start_futures_multiplex_socket(self, streams, callback):
stream_path = "streams={}".format("/".join(streams))
return self._start_socket(stream_path, callback, "stream?")
def start_futures_socket(self, callback):
futures_listen_key = self._client.futures_stream_get_listen_key()
return self._start_account_socket("futures", futures_listen_key, callback)
def _start_account_socket(self, socket_type, listen_key, callback):
self._check_account_socket_open(listen_key)
self._listen_keys[socket_type] = listen_key
self._account_callbacks[socket_type] = callback
conn_key = self._start_socket(listen_key, callback)
if conn_key:
self._start_socket_timer(socket_type)
return conn_key
def _check_account_socket_open(self, listen_key):
if not listen_key:
return
for conn_key in self._conns:
if len(conn_key) >= 60 and conn_key[:60] == listen_key:
self.stop_socket(conn_key)
break
def _start_socket_timer(self, socket_type):
callback = self._keepalive_account_socket
self._timers[socket_type] = threading.Timer(
self._user_timeout, callback, [socket_type]
)
self._timers[socket_type].setDaemon(True)
self._timers[socket_type].start()
def _keepalive_account_socket(self, socket_type):
if socket_type == "user":
listen_key_func = self._client.stream_get_listen_key
callback = self._account_callbacks[socket_type]
elif socket_type == "margin":
listen_key_func = self._client.margin_stream_get_listen_key
callback = self._account_callbacks[socket_type]
else:
listen_key_func = self._client.futures_stream_get_listen_key
callback = self._account_callbacks[socket_type]
listen_key = listen_key_func()
if listen_key != self._listen_keys[socket_type]:
self._start_account_socket(socket_type, listen_key, callback)
def stop_socket(self, conn_key):
if conn_key not in self._conns:
return
self._conns[conn_key].factory = WebSocketClientFactory(
self.STREAM_URL + "tmp_path"
)
self._conns[conn_key].disconnect()
del self._conns[conn_key]
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys["user"]:
self._stop_account_socket("user")
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys["margin"]:
self._stop_account_socket("margin")
if len(conn_key) >= 60 and conn_key[:60] == self._listen_keys["futures"]:
self._stop_account_socket("futures")
def _stop_account_socket(self, socket_type):
if not self._listen_keys[socket_type]:
return
if self._timers[socket_type]:
self._timers[socket_type].cancel()
self._timers[socket_type] = None
self._listen_keys[socket_type] = None
def run(self):
try:
reactor.run(installSignalHandlers=False)
except ReactorAlreadyRunning:
pass
def close(self):
keys = set(self._conns.keys())
for key in keys:
self.stop_socket(key)
self._conns = {} | true | true |
f73087aa81776159702022d95866d5b4fbe8cb4f | 3,942 | py | Python | level3/usb/decrypt.py | fishilico/sstic-2016 | 9a05bb18df4c8d2e76f1e30fda6b38b1bc930e8c | [
"Beerware"
] | null | null | null | level3/usb/decrypt.py | fishilico/sstic-2016 | 9a05bb18df4c8d2e76f1e30fda6b38b1bc930e8c | [
"Beerware"
] | null | null | null | level3/usb/decrypt.py | fishilico/sstic-2016 | 9a05bb18df4c8d2e76f1e30fda6b38b1bc930e8c | [
"Beerware"
] | 1 | 2020-04-03T06:19:11.000Z | 2020-04-03T06:19:11.000Z | #!/usr/bin/env python3
import binascii
import hashlib
import os.path
import struct
import sys
sys.path.insert(0, os.path.dirname(__file__))
import rc4
# Helper functions
def rol32(val, shift):
val = val & 0xffffffff
shift = shift & 0x1f
if not shift:
return val
return ((val << shift) & 0xffffffff) | (val >> (32 - shift))
def ror32(val, shift):
return rol32(val, 32 - shift)
# Load encrypted data
with open('extracted_encrypted_data.bin', 'rb') as f:
all_data = f.read()
enc_data = all_data[0x40:]
def rc6_decrypt(ks, enc_block):
"""https://en.wikipedia.org/wiki/RC6"""
a, b, c, d = enc_block
a -= ks[0xa8 // 4]
c -= ks[0xac // 4]
for iround in range(19, -1, -1):
a, b, c, d = [x & 0xffffffff for x in (d, a, b, c)]
u = ror32(d * (2 * d + 1), 5)
t = ror32(b * (2 * b + 1), 5)
c = rol32(c - ks[2 * iround + 3], t) ^ u
a = rol32(a - ks[2 * iround + 2], u) ^ t
d = (d - ks[1]) & 0xffffffff
b = (b - ks[0]) & 0xffffffff
return a, b, c, d
# TODO: key derivation with key "551C2016B00B5F00"
key_state = [
0x2129ab75, 0x975374c8, 0x5eead5ac, 0x2c8b312f,
0xfd0a1322, 0x80d0133c, 0x16a849c2, 0x42064c4a,
0x75fe77f5, 0x4ddaf4d7, 0xe9221458, 0x46a97a25,
0xfea74495, 0xe119d517, 0x055f2605, 0xc6706c81,
0x4d966822, 0xadc3e831, 0x68c68bdf, 0xfcb57dac,
0x7df33f01, 0xefb6081f, 0x98eb29eb, 0x668352b7,
0x98a1545b, 0x0a3e64cd, 0x9b16a929, 0x2233c1c4,
0x7879ec25, 0x17c4466a, 0x6e0b37ea, 0xde30ebb2,
0x01ef095c, 0x35fbdb33, 0xa97b35b7, 0xdfbf652c,
0xaf668798, 0xb7846548, 0xafd8706a, 0x2d346ced,
0xbb33dfe3, 0xae79adfc, 0xc3115146, 0x05a51471,
]
# Decrypt with RC6-CBC with 128-bit blocks (4 32-bit numbers)
iv = [0, 0, 0, 0]
dec_data = bytearray(len(enc_data))
for blkoffset in range(0, len(enc_data), 16):
enc_block = struct.unpack('<IIII', enc_data[blkoffset:blkoffset + 16])
dec_block = rc6_decrypt(key_state, enc_block)
dec_block = [i ^ d for i, d in zip(iv, dec_block)]
dec_data[blkoffset:blkoffset + 16] = struct.pack('<IIII', *dec_block)
iv = enc_block
# dec_data contains chunks
offset = 0
chunk_index = 0
while offset < len(dec_data):
chunck_length = struct.unpack('<I', dec_data[offset:offset + 4])[0]
rc4_key = dec_data[offset + 4:offset + 0x14]
payload_md5 = dec_data[offset + 0x14:offset + 0x24]
enc_payload = dec_data[offset + 0x24:offset + 0x24 + chunck_length]
print("Chunk {} at {:#x}: {:#x} bytes".format(chunk_index, offset, chunck_length))
if chunck_length == 0:
break
keystream = rc4.RC4(rc4_key)
dec_payload = bytearray(e ^ k for e, k in zip(enc_payload, keystream))
with open('decrypted_chunk_{}.bin'.format(chunk_index), 'wb') as f:
f.write(dec_payload)
print(" {}".format(binascii.hexlify(payload_md5).decode('ascii')))
print(" {}".format(hashlib.md5(dec_payload).hexdigest()))
assert payload_md5 == hashlib.md5(dec_payload).digest()
offset += 0x24 + chunck_length
chunk_index += 1
"""
Chunk 0 at 0x0: 0x39 bytes
a83bd78eaf49903dfd64447fcd35831a
a83bd78eaf49903dfd64447fcd35831a
Chunk 1 at 0x5d: 0xc15 bytes
ad2713a0668ac3f421a00b7b21430b4f
ad2713a0668ac3f421a00b7b21430b4f
Chunk 2 at 0xc96: 0x34631 bytes
671d51af77f541605ea91e81e8dc70f0
671d51af77f541605ea91e81e8dc70f0
Chunk 3 at 0x352eb: 0x1b234 bytes
8ff9f891acf83a5ee95f69084b4d48d2
8ff9f891acf83a5ee95f69084b4d48d2
Chunk 4 at 0x50543: 0xfbe0 bytes
c4e5abbc8c4ddff3853db0fcb9eb55ff
c4e5abbc8c4ddff3853db0fcb9eb55ff
Chunk 5 at 0x60147: 0xb9f7 bytes
0cb3389fedc86b4ff4a86db0b492b273
0cb3389fedc86b4ff4a86db0b492b273
Chunk 6 at 0x6bb62: 0x83d5 bytes
03d5e4c549945d4ac5b1e3b973606d61
03d5e4c549945d4ac5b1e3b973606d61
Chunk 7 at 0x73f5b: 0x12500a bytes
581ae98e6119f7672ba38c74b1c427ce
581ae98e6119f7672ba38c74b1c427ce
Chunk 8 at 0x198f89: 0x0 bytes
"""
| 32.04878 | 86 | 0.691781 |
import binascii
import hashlib
import os.path
import struct
import sys
sys.path.insert(0, os.path.dirname(__file__))
import rc4
def rol32(val, shift):
val = val & 0xffffffff
shift = shift & 0x1f
if not shift:
return val
return ((val << shift) & 0xffffffff) | (val >> (32 - shift))
def ror32(val, shift):
return rol32(val, 32 - shift)
with open('extracted_encrypted_data.bin', 'rb') as f:
all_data = f.read()
enc_data = all_data[0x40:]
def rc6_decrypt(ks, enc_block):
a, b, c, d = enc_block
a -= ks[0xa8 // 4]
c -= ks[0xac // 4]
for iround in range(19, -1, -1):
a, b, c, d = [x & 0xffffffff for x in (d, a, b, c)]
u = ror32(d * (2 * d + 1), 5)
t = ror32(b * (2 * b + 1), 5)
c = rol32(c - ks[2 * iround + 3], t) ^ u
a = rol32(a - ks[2 * iround + 2], u) ^ t
d = (d - ks[1]) & 0xffffffff
b = (b - ks[0]) & 0xffffffff
return a, b, c, d
key_state = [
0x2129ab75, 0x975374c8, 0x5eead5ac, 0x2c8b312f,
0xfd0a1322, 0x80d0133c, 0x16a849c2, 0x42064c4a,
0x75fe77f5, 0x4ddaf4d7, 0xe9221458, 0x46a97a25,
0xfea74495, 0xe119d517, 0x055f2605, 0xc6706c81,
0x4d966822, 0xadc3e831, 0x68c68bdf, 0xfcb57dac,
0x7df33f01, 0xefb6081f, 0x98eb29eb, 0x668352b7,
0x98a1545b, 0x0a3e64cd, 0x9b16a929, 0x2233c1c4,
0x7879ec25, 0x17c4466a, 0x6e0b37ea, 0xde30ebb2,
0x01ef095c, 0x35fbdb33, 0xa97b35b7, 0xdfbf652c,
0xaf668798, 0xb7846548, 0xafd8706a, 0x2d346ced,
0xbb33dfe3, 0xae79adfc, 0xc3115146, 0x05a51471,
]
iv = [0, 0, 0, 0]
dec_data = bytearray(len(enc_data))
for blkoffset in range(0, len(enc_data), 16):
enc_block = struct.unpack('<IIII', enc_data[blkoffset:blkoffset + 16])
dec_block = rc6_decrypt(key_state, enc_block)
dec_block = [i ^ d for i, d in zip(iv, dec_block)]
dec_data[blkoffset:blkoffset + 16] = struct.pack('<IIII', *dec_block)
iv = enc_block
offset = 0
chunk_index = 0
while offset < len(dec_data):
chunck_length = struct.unpack('<I', dec_data[offset:offset + 4])[0]
rc4_key = dec_data[offset + 4:offset + 0x14]
payload_md5 = dec_data[offset + 0x14:offset + 0x24]
enc_payload = dec_data[offset + 0x24:offset + 0x24 + chunck_length]
print("Chunk {} at {:#x}: {:#x} bytes".format(chunk_index, offset, chunck_length))
if chunck_length == 0:
break
keystream = rc4.RC4(rc4_key)
dec_payload = bytearray(e ^ k for e, k in zip(enc_payload, keystream))
with open('decrypted_chunk_{}.bin'.format(chunk_index), 'wb') as f:
f.write(dec_payload)
print(" {}".format(binascii.hexlify(payload_md5).decode('ascii')))
print(" {}".format(hashlib.md5(dec_payload).hexdigest()))
assert payload_md5 == hashlib.md5(dec_payload).digest()
offset += 0x24 + chunck_length
chunk_index += 1
| true | true |
f73087de99d28d62c47ee47f567d8a406a1bc2b8 | 1,502 | py | Python | var/spack/repos/builtin/packages/eztrace/package.py | robertodr/spack | 9b809e01b47d48f01b3d257912fe1b752943cd3d | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2020-08-13T15:24:33.000Z | 2021-10-18T18:38:19.000Z | var/spack/repos/builtin/packages/eztrace/package.py | robertodr/spack | 9b809e01b47d48f01b3d257912fe1b752943cd3d | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2020-07-24T17:09:12.000Z | 2022-01-17T22:39:06.000Z | var/spack/repos/builtin/packages/eztrace/package.py | robertodr/spack | 9b809e01b47d48f01b3d257912fe1b752943cd3d | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-01-22T14:01:28.000Z | 2020-07-23T21:35:12.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Eztrace(AutotoolsPackage):
"""EZTrace is a tool to automatically generate execution traces
of HPC applications."""
homepage = "http://eztrace.gforge.inria.fr"
url = "https://gitlab.com/eztrace/eztrace/-/archive/eztrace-1.1-10/eztrace-eztrace-1.1-10.tar.gz"
maintainers = ['trahay']
version('1.1-10', sha256='97aba8f3b3b71e8e2f7ef47e00c262234e27b9cb4a870c85c525317a83a3f0d4')
depends_on('mpi')
# Does not work on Darwin due to MAP_POPULATE
conflicts('platform=darwin')
def patch(self):
filter_file(
'"DEFAULT_OUTFILE"',
'" DEFAULT_OUTFILE "',
'extlib/gtg/extlib/otf/tools/otfshrink/otfshrink.cpp',
string=True
)
def setup_build_environment(self, env):
if self.spec.satisfies('%fj'):
env.set('LDFLAGS', '--linkfortran')
def configure_args(self):
args = ["--with-mpi={0}".format(self.spec["mpi"].prefix)]
return args
@run_before('build')
def fix_libtool(self):
if self.spec.satisfies('%fj'):
libtools = ['extlib/gtg/libtool',
'extlib/opari2/build-frontend/libtool']
for f in libtools:
filter_file('wl=""', 'wl="-Wl,"', f, string=True)
| 31.957447 | 106 | 0.62783 |
from spack import *
class Eztrace(AutotoolsPackage):
homepage = "http://eztrace.gforge.inria.fr"
url = "https://gitlab.com/eztrace/eztrace/-/archive/eztrace-1.1-10/eztrace-eztrace-1.1-10.tar.gz"
maintainers = ['trahay']
version('1.1-10', sha256='97aba8f3b3b71e8e2f7ef47e00c262234e27b9cb4a870c85c525317a83a3f0d4')
depends_on('mpi')
conflicts('platform=darwin')
def patch(self):
filter_file(
'"DEFAULT_OUTFILE"',
'" DEFAULT_OUTFILE "',
'extlib/gtg/extlib/otf/tools/otfshrink/otfshrink.cpp',
string=True
)
def setup_build_environment(self, env):
if self.spec.satisfies('%fj'):
env.set('LDFLAGS', '--linkfortran')
def configure_args(self):
args = ["--with-mpi={0}".format(self.spec["mpi"].prefix)]
return args
@run_before('build')
def fix_libtool(self):
if self.spec.satisfies('%fj'):
libtools = ['extlib/gtg/libtool',
'extlib/opari2/build-frontend/libtool']
for f in libtools:
filter_file('wl=""', 'wl="-Wl,"', f, string=True)
| true | true |
f7308819cec616f6758ef2e8960686762613ad90 | 18,855 | py | Python | cbcintacctsdk/apis/api_base.py | Cold-Bore-Capital/sageintacct-sdk-py | a65c89aba9987a6d74238ad7d28b39ddd704d68c | [
"MIT"
] | null | null | null | cbcintacctsdk/apis/api_base.py | Cold-Bore-Capital/sageintacct-sdk-py | a65c89aba9987a6d74238ad7d28b39ddd704d68c | [
"MIT"
] | null | null | null | cbcintacctsdk/apis/api_base.py | Cold-Bore-Capital/sageintacct-sdk-py | a65c89aba9987a6d74238ad7d28b39ddd704d68c | [
"MIT"
] | null | null | null | """
API Base class with util functions
"""
import json
import datetime
import uuid
from warnings import warn
from typing import Dict, List, Tuple
from urllib.parse import unquote
import re
import xmltodict
import requests
from ..exceptions import SageIntacctSDKError, ExpiredTokenError, InvalidTokenError, NoPrivilegeError, \
WrongParamsError, NotFoundItemError, InternalServerError, DataIntegrityWarning
from .constants import dimensions_fields_mapping
class ApiBase:
"""The base class for all API classes."""
def __init__(self, dimension: str = None, pagesize: int = 2000, post_legacy_method: str = None):
self._sender_id = None
self._sender_password = None
self._session_id = None
self._api_url = 'https://api.intacct.com/ia/xml/xmlgw.phtml'
self._dimension = dimension
self._pagesize = pagesize
self._post_legacy_method = post_legacy_method
@property
def dimension(self):
return self._post_legacy_method
@dimension.setter
def dimension(self, dimension: str):
"""
Set the sender id for APIs
:param post_legacy_method: sender id
:return: None
"""
self._dimension = dimension
@property
def post_legacy_method(self):
return self._post_legacy_method
@post_legacy_method.setter
def post_legacy_method(self, post_legacy_method: str):
"""
Set the sender id for APIs
:param post_legacy_method: sender id
:return: None
"""
self._post_legacy_method = post_legacy_method
@property
def sender_id(self):
"""
Set the sender id for APIs
:param sender_id: sender id
:return: None
"""
return self._sender_id
@sender_id.setter
def sender_id(self, sender_id: str):
"""
Set the sender id for APIs
:param sender_id: sender id
:return: None
"""
self._sender_id = sender_id
def set_sender_id(self, sender_id: str):
"""
Set the sender id for APIs
:param sender_id: sender id
:return: None
"""
self._sender_id = sender_id
def set_sender_password(self, sender_password: str):
"""
Set the sender password for APIs
:param sender_password: sender id
:return: None
"""
self._sender_password = sender_password
def get_session_id(self, user_id: str, company_id: str, user_password: str, entity_id: str = None):
"""
Sets the session id for APIs
:param access_token: acceess token (JWT)
:return: session id
"""
timestamp = datetime.datetime.now()
dict_body = {
'request': {
'control': {
'senderid': self._sender_id,
'password': self._sender_password,
'controlid': timestamp,
'uniqueid': False,
'dtdversion': 3.0,
'includewhitespace': False
},
'operation': {
'authentication': {
'login': {
'userid': user_id,
'companyid': company_id,
'password': user_password,
'locationid': entity_id
}
},
'content': {
'function': {
'@controlid': str(uuid.uuid4()),
'getAPISession': None
}
}
}
}
}
response = self._post_request(dict_body, self._api_url)
if response['authentication']['status'] == 'success':
session_details = response['result']['data']['api']
self._api_url = session_details['endpoint']
self._session_id = session_details['sessionid']
return self._session_id
else:
raise SageIntacctSDKError('Error: {0}'.format(response['errormessage']))
# TODO: Change to property decorators
def set_session_id(self, session_id: str):
"""
Set the session id for APIs
:param session_id: session id
:return: None
"""
self._session_id = session_id
def _support_id_msg(self, errormessages):
"""Finds whether the error messages is list / dict and assign type and error assignment.
Parameters:
errormessages (dict / list): error message received from Sage Intacct.
Returns:
Error message assignment and type.
"""
error = {}
if isinstance(errormessages['error'], list):
error['error'] = errormessages['error'][0]
error['type'] = 'list'
elif isinstance(errormessages['error'], dict):
error['error'] = errormessages['error']
error['type'] = 'dict'
return error
def _decode_support_id(self, errormessages):
"""Decodes Support ID.
Parameters:
errormessages (dict / list): error message received from Sage Intacct.
Returns:
Same error message with decoded Support ID.
"""
support_id_msg = self._support_id_msg(errormessages)
data_type = support_id_msg['type']
error = support_id_msg['error']
if (error and error['description2']):
message = error['description2']
support_id = re.search('Support ID: (.*)]', message)
if support_id.group(1):
decoded_support_id = unquote(support_id.group(1))
message = message.replace(support_id.group(1), decoded_support_id)
# Converting dict to list even for single error response
if data_type == 'dict':
errormessages['error'] = [errormessages['error']]
errormessages['error'][0]['description2'] = message if message else None
return errormessages
def _post_request(self, dict_body: dict, api_url: str):
"""Create a HTTP post request.
Parameters:
data (dict): HTTP POST body data for the wanted API.
api_url (str): Url for the wanted API.
Returns:
A response from the request (dict).
"""
api_headers = {
'content-type': 'application/xml'
}
body = xmltodict.unparse(dict_body)
response = requests.post(api_url, headers=api_headers, data=body)
parsed_xml = xmltodict.parse(response.text, force_list={self._dimension})
parsed_response = json.loads(json.dumps(parsed_xml))
if response.status_code == 200:
if parsed_response['response']['control']['status'] == 'success':
api_response = parsed_response['response']['operation']
if parsed_response['response']['control']['status'] == 'failure':
exception_msg = self._decode_support_id(parsed_response['response']['errormessage'])
raise WrongParamsError('Some of the parameters are wrong', exception_msg)
if api_response['authentication']['status'] == 'failure':
raise InvalidTokenError('Invalid token / Incorrect credentials', api_response['errormessage'])
if api_response['result']['status'] == 'success':
return api_response
if api_response['result']['status'] == 'failure':
exception_msg = self._decode_support_id(api_response['result']['errormessage'])
for error in exception_msg['error']:
if error['description2'] and 'You do not have permission for API' in error['description2']:
raise InvalidTokenError('The user has insufficient privilege', exception_msg)
raise WrongParamsError('Error during {0}'.format(api_response['result']['function']), exception_msg)
if response.status_code == 400:
raise WrongParamsError('Some of the parameters are wrong', parsed_response)
if response.status_code == 401:
raise InvalidTokenError('Invalid token / Incorrect credentials', parsed_response)
if response.status_code == 403:
raise NoPrivilegeError('Forbidden, the user has insufficient privilege', parsed_response)
if response.status_code == 404:
raise NotFoundItemError('Not found item with ID', parsed_response)
if response.status_code == 498:
raise ExpiredTokenError('Expired token, try to refresh it', parsed_response)
if response.status_code == 500:
raise InternalServerError('Internal server error', parsed_response)
raise SageIntacctSDKError('Error: {0}'.format(parsed_response))
def format_and_send_request(self, data: Dict):
"""Format data accordingly to convert them to xml.
Parameters:
data (dict): HTTP POST body data for the wanted API.
Returns:
A response from the __post_request (dict).
"""
key = next(iter(data))
timestamp = datetime.datetime.now()
dict_body = {
'request': {
'control': {
'senderid': self._sender_id,
'password': self._sender_password,
'controlid': timestamp,
'uniqueid': False,
'dtdversion': 3.0,
'includewhitespace': False
},
'operation': {
'authentication': {
'sessionid': self._session_id
},
'content': {
'function': {
'@controlid': str(uuid.uuid4()),
key: data[key]
}
}
}
}
}
response = self._post_request(dict_body, self._api_url)
return response['result']
def post(self, data: Dict):
if self._dimension in ('CCTRANSACTION', 'EPPAYMENT', 'create_invoice', 'create_aradjustment','update_invoice','update_customer'):
return self._construct_post_legacy_payload(data)
elif self._dimension == 'readReport':
return self._construct_run_report(data)
elif (self._dimension == 'ARINVOICE' and self._post_legacy_method=='delete'):
return self._construct_delete(data)
elif (self._dimension =='custom_report'):
return self._construct_read_custom_report(data)
else:
return self._construct_post_payload(data)
def _construct_post_payload(self, data: Dict):
payload = {
'create': {
self._dimension: data
}
}
return self.format_and_send_request(payload)
def _construct_run_report(self, data: str):
payload = {
"readReport": {
#'type': "interactive",
'report': data
}}
return self.format_and_send_request(payload)
def _construct_read_custom_report(self, data: str):
payload = {
"readReport": {
'@type': "interactive",
'@returnDef': "true",
'report': data
}}
return self.format_and_send_request(payload)
def _construct_delete(self, data: str) -> str:
payload = {"delete": data}
return self.format_and_send_request(payload)
def _construct_post_legacy_payload(self, data: Dict):
payload = {
self._post_legacy_method: data
}
return self.format_and_send_request(payload)
def _construct_post_legacy_aradjustment_payload(self, data: Dict):
payload = {
'create_aradjustment': data
}
return self.format_and_send_request(payload)
def count(self):
get_count = {
'query': {
'object': self._dimension,
'select': {
'field': 'RECORDNO'
},
'pagesize': '1'
}
}
response = self.format_and_send_request(get_count)
return int(response['data']['@totalcount'])
def read_by_query(self, fields: list = None):
"""Read by Query from Sage Intacct
Parameters:
fields (list): Get selective fields to be returned. (optional).
Returns:
Dict.
"""
payload = {
'readByQuery': {
'object': self._dimension,
'fields': ','.join(fields) if fields else '*',
'query': None,
'pagesize': '1000'
}
}
return self.format_and_send_request(payload)
def get(self, field: str, value: str, fields: list = None):
"""Get data from Sage Intacct based on filter.
Parameters:
field (str): A parameter to filter by the field. (required).
value (str): A parameter to filter by the field - value. (required).
Returns:
Dict.
"""
data = {
'readByQuery': {
'object': self._dimension,
'fields': ','.join(fields) if fields else '*',
'query': "{0} = '{1}'".format(field, value),
'pagesize': '1000'
}
}
return self.format_and_send_request(data)['data']
def get_all(self, field: str = None, value: str = None, fields: list = None):
"""Get all data from Sage Intacct
Returns:
List of Dict.
"""
complete_data = []
count = self.count()
pagesize = self._pagesize
for offset in range(0, count, pagesize):
data = {
'query': {
'object': self._dimension,
'select': {
'field': fields if fields else dimensions_fields_mapping[self._dimension]
},
'pagesize': pagesize,
'offset': offset
}
}
if field and value:
data['query']['filter'] = {
'equalto': {
'field': field,
'value': value
}
}
paginated_data = self.format_and_send_request(data)['data'][self._dimension]
complete_data.extend(paginated_data)
return complete_data
__query_filter = List[Tuple[str, str, str]]
def get_by_query(self, fields: List[str] = None,
and_filter: __query_filter = None,
or_filter: __query_filter = None,
filter_payload: dict = None):
"""Get data from Sage Intacct using query method based on filter.
See sage intacct documentation here for query structures:
https://developer.intacct.com/web-services/queries/
Parameters:
fields (str): A parameter to filter by the field. (required).
and_filter (list(tuple)): List of tuple containing (operator (str),field (str), value (str))
or_filter (list(tuple)): List of tuple containing (operator (str),field (str), value (str))
filter_payload (dict): Formatted query payload in dictionary format.
if 'between' operators is used on and_filter or or_filter field must be submitted as
[str,str]
if 'in' operator is used field may be submitted as [str,str,str,...]
Returns:
Dict.
"""
complete_data = []
count = self.count()
pagesize = self._pagesize
offset = 0
formatted_filter = filter_payload
data = {
'query': {
'object': self._dimension,
'select': {
'field': fields if fields else dimensions_fields_mapping[self._dimension]
},
'pagesize': pagesize,
'offset': offset
}
}
if and_filter and or_filter:
formatted_filter = {'and': {}}
for operator, field, value in and_filter:
formatted_filter['and'].setdefault(operator, {}).update({'field': field, 'value': value})
formatted_filter['and']['or'] = {}
for operator, field, value in or_filter:
formatted_filter['and']['or'].setdefault(operator, {}).update({'field': field, 'value': value})
elif and_filter:
if len(and_filter) > 1:
formatted_filter = {'and': {}}
for operator, field, value in and_filter:
formatted_filter['and'].setdefault(operator, {}).update({'field': field, 'value': value})
else:
formatted_filter = {}
for operator, field, value in and_filter:
formatted_filter.setdefault(operator, {}).update({'field': field, 'value': value})
elif or_filter:
if len(or_filter) > 1:
formatted_filter = {'or': {}}
for operator, field, value in or_filter:
formatted_filter['or'].setdefault(operator, {}).update({'field': field, 'value': value})
else:
formatted_filter = {}
for operator, field, value in or_filter:
formatted_filter.setdefault(operator, {}).update({'field': field, 'value': value})
if formatted_filter:
data['query']['filter'] = formatted_filter
for offset in range(0, count, pagesize):
data['query']['offset'] = offset
paginated_data = self.format_and_send_request(data)['data']
try:
complete_data.extend(paginated_data[self._dimension])
except:
pass
filtered_total = int(paginated_data['@totalcount'])
if paginated_data['@numremaining'] == '0':
break
if filtered_total != len(complete_data):
warn(message='Your data may not be complete. Records returned do not equal total query record count',
category=DataIntegrityWarning)
return complete_data
def get_lookup(self):
""" Returns all fields with attributes from the object called on.
Parameters:
self
Returns:
Dict.
"""
data = {'lookup': {'object': self._dimension}}
return self.format_and_send_request(data)['data']
| 34.723757 | 137 | 0.547759 | import json
import datetime
import uuid
from warnings import warn
from typing import Dict, List, Tuple
from urllib.parse import unquote
import re
import xmltodict
import requests
from ..exceptions import SageIntacctSDKError, ExpiredTokenError, InvalidTokenError, NoPrivilegeError, \
WrongParamsError, NotFoundItemError, InternalServerError, DataIntegrityWarning
from .constants import dimensions_fields_mapping
class ApiBase:
def __init__(self, dimension: str = None, pagesize: int = 2000, post_legacy_method: str = None):
self._sender_id = None
self._sender_password = None
self._session_id = None
self._api_url = 'https://api.intacct.com/ia/xml/xmlgw.phtml'
self._dimension = dimension
self._pagesize = pagesize
self._post_legacy_method = post_legacy_method
@property
def dimension(self):
return self._post_legacy_method
@dimension.setter
def dimension(self, dimension: str):
self._dimension = dimension
@property
def post_legacy_method(self):
return self._post_legacy_method
@post_legacy_method.setter
def post_legacy_method(self, post_legacy_method: str):
self._post_legacy_method = post_legacy_method
@property
def sender_id(self):
return self._sender_id
@sender_id.setter
def sender_id(self, sender_id: str):
self._sender_id = sender_id
def set_sender_id(self, sender_id: str):
self._sender_id = sender_id
def set_sender_password(self, sender_password: str):
self._sender_password = sender_password
def get_session_id(self, user_id: str, company_id: str, user_password: str, entity_id: str = None):
timestamp = datetime.datetime.now()
dict_body = {
'request': {
'control': {
'senderid': self._sender_id,
'password': self._sender_password,
'controlid': timestamp,
'uniqueid': False,
'dtdversion': 3.0,
'includewhitespace': False
},
'operation': {
'authentication': {
'login': {
'userid': user_id,
'companyid': company_id,
'password': user_password,
'locationid': entity_id
}
},
'content': {
'function': {
'@controlid': str(uuid.uuid4()),
'getAPISession': None
}
}
}
}
}
response = self._post_request(dict_body, self._api_url)
if response['authentication']['status'] == 'success':
session_details = response['result']['data']['api']
self._api_url = session_details['endpoint']
self._session_id = session_details['sessionid']
return self._session_id
else:
raise SageIntacctSDKError('Error: {0}'.format(response['errormessage']))
def set_session_id(self, session_id: str):
self._session_id = session_id
def _support_id_msg(self, errormessages):
error = {}
if isinstance(errormessages['error'], list):
error['error'] = errormessages['error'][0]
error['type'] = 'list'
elif isinstance(errormessages['error'], dict):
error['error'] = errormessages['error']
error['type'] = 'dict'
return error
def _decode_support_id(self, errormessages):
support_id_msg = self._support_id_msg(errormessages)
data_type = support_id_msg['type']
error = support_id_msg['error']
if (error and error['description2']):
message = error['description2']
support_id = re.search('Support ID: (.*)]', message)
if support_id.group(1):
decoded_support_id = unquote(support_id.group(1))
message = message.replace(support_id.group(1), decoded_support_id)
if data_type == 'dict':
errormessages['error'] = [errormessages['error']]
errormessages['error'][0]['description2'] = message if message else None
return errormessages
def _post_request(self, dict_body: dict, api_url: str):
api_headers = {
'content-type': 'application/xml'
}
body = xmltodict.unparse(dict_body)
response = requests.post(api_url, headers=api_headers, data=body)
parsed_xml = xmltodict.parse(response.text, force_list={self._dimension})
parsed_response = json.loads(json.dumps(parsed_xml))
if response.status_code == 200:
if parsed_response['response']['control']['status'] == 'success':
api_response = parsed_response['response']['operation']
if parsed_response['response']['control']['status'] == 'failure':
exception_msg = self._decode_support_id(parsed_response['response']['errormessage'])
raise WrongParamsError('Some of the parameters are wrong', exception_msg)
if api_response['authentication']['status'] == 'failure':
raise InvalidTokenError('Invalid token / Incorrect credentials', api_response['errormessage'])
if api_response['result']['status'] == 'success':
return api_response
if api_response['result']['status'] == 'failure':
exception_msg = self._decode_support_id(api_response['result']['errormessage'])
for error in exception_msg['error']:
if error['description2'] and 'You do not have permission for API' in error['description2']:
raise InvalidTokenError('The user has insufficient privilege', exception_msg)
raise WrongParamsError('Error during {0}'.format(api_response['result']['function']), exception_msg)
if response.status_code == 400:
raise WrongParamsError('Some of the parameters are wrong', parsed_response)
if response.status_code == 401:
raise InvalidTokenError('Invalid token / Incorrect credentials', parsed_response)
if response.status_code == 403:
raise NoPrivilegeError('Forbidden, the user has insufficient privilege', parsed_response)
if response.status_code == 404:
raise NotFoundItemError('Not found item with ID', parsed_response)
if response.status_code == 498:
raise ExpiredTokenError('Expired token, try to refresh it', parsed_response)
if response.status_code == 500:
raise InternalServerError('Internal server error', parsed_response)
raise SageIntacctSDKError('Error: {0}'.format(parsed_response))
def format_and_send_request(self, data: Dict):
key = next(iter(data))
timestamp = datetime.datetime.now()
dict_body = {
'request': {
'control': {
'senderid': self._sender_id,
'password': self._sender_password,
'controlid': timestamp,
'uniqueid': False,
'dtdversion': 3.0,
'includewhitespace': False
},
'operation': {
'authentication': {
'sessionid': self._session_id
},
'content': {
'function': {
'@controlid': str(uuid.uuid4()),
key: data[key]
}
}
}
}
}
response = self._post_request(dict_body, self._api_url)
return response['result']
def post(self, data: Dict):
if self._dimension in ('CCTRANSACTION', 'EPPAYMENT', 'create_invoice', 'create_aradjustment','update_invoice','update_customer'):
return self._construct_post_legacy_payload(data)
elif self._dimension == 'readReport':
return self._construct_run_report(data)
elif (self._dimension == 'ARINVOICE' and self._post_legacy_method=='delete'):
return self._construct_delete(data)
elif (self._dimension =='custom_report'):
return self._construct_read_custom_report(data)
else:
return self._construct_post_payload(data)
def _construct_post_payload(self, data: Dict):
payload = {
'create': {
self._dimension: data
}
}
return self.format_and_send_request(payload)
def _construct_run_report(self, data: str):
payload = {
"readReport": {
'report': data
}}
return self.format_and_send_request(payload)
def _construct_read_custom_report(self, data: str):
payload = {
"readReport": {
'@type': "interactive",
'@returnDef': "true",
'report': data
}}
return self.format_and_send_request(payload)
def _construct_delete(self, data: str) -> str:
payload = {"delete": data}
return self.format_and_send_request(payload)
def _construct_post_legacy_payload(self, data: Dict):
payload = {
self._post_legacy_method: data
}
return self.format_and_send_request(payload)
def _construct_post_legacy_aradjustment_payload(self, data: Dict):
payload = {
'create_aradjustment': data
}
return self.format_and_send_request(payload)
def count(self):
get_count = {
'query': {
'object': self._dimension,
'select': {
'field': 'RECORDNO'
},
'pagesize': '1'
}
}
response = self.format_and_send_request(get_count)
return int(response['data']['@totalcount'])
def read_by_query(self, fields: list = None):
payload = {
'readByQuery': {
'object': self._dimension,
'fields': ','.join(fields) if fields else '*',
'query': None,
'pagesize': '1000'
}
}
return self.format_and_send_request(payload)
def get(self, field: str, value: str, fields: list = None):
data = {
'readByQuery': {
'object': self._dimension,
'fields': ','.join(fields) if fields else '*',
'query': "{0} = '{1}'".format(field, value),
'pagesize': '1000'
}
}
return self.format_and_send_request(data)['data']
def get_all(self, field: str = None, value: str = None, fields: list = None):
complete_data = []
count = self.count()
pagesize = self._pagesize
for offset in range(0, count, pagesize):
data = {
'query': {
'object': self._dimension,
'select': {
'field': fields if fields else dimensions_fields_mapping[self._dimension]
},
'pagesize': pagesize,
'offset': offset
}
}
if field and value:
data['query']['filter'] = {
'equalto': {
'field': field,
'value': value
}
}
paginated_data = self.format_and_send_request(data)['data'][self._dimension]
complete_data.extend(paginated_data)
return complete_data
__query_filter = List[Tuple[str, str, str]]
def get_by_query(self, fields: List[str] = None,
and_filter: __query_filter = None,
or_filter: __query_filter = None,
filter_payload: dict = None):
complete_data = []
count = self.count()
pagesize = self._pagesize
offset = 0
formatted_filter = filter_payload
data = {
'query': {
'object': self._dimension,
'select': {
'field': fields if fields else dimensions_fields_mapping[self._dimension]
},
'pagesize': pagesize,
'offset': offset
}
}
if and_filter and or_filter:
formatted_filter = {'and': {}}
for operator, field, value in and_filter:
formatted_filter['and'].setdefault(operator, {}).update({'field': field, 'value': value})
formatted_filter['and']['or'] = {}
for operator, field, value in or_filter:
formatted_filter['and']['or'].setdefault(operator, {}).update({'field': field, 'value': value})
elif and_filter:
if len(and_filter) > 1:
formatted_filter = {'and': {}}
for operator, field, value in and_filter:
formatted_filter['and'].setdefault(operator, {}).update({'field': field, 'value': value})
else:
formatted_filter = {}
for operator, field, value in and_filter:
formatted_filter.setdefault(operator, {}).update({'field': field, 'value': value})
elif or_filter:
if len(or_filter) > 1:
formatted_filter = {'or': {}}
for operator, field, value in or_filter:
formatted_filter['or'].setdefault(operator, {}).update({'field': field, 'value': value})
else:
formatted_filter = {}
for operator, field, value in or_filter:
formatted_filter.setdefault(operator, {}).update({'field': field, 'value': value})
if formatted_filter:
data['query']['filter'] = formatted_filter
for offset in range(0, count, pagesize):
data['query']['offset'] = offset
paginated_data = self.format_and_send_request(data)['data']
try:
complete_data.extend(paginated_data[self._dimension])
except:
pass
filtered_total = int(paginated_data['@totalcount'])
if paginated_data['@numremaining'] == '0':
break
if filtered_total != len(complete_data):
warn(message='Your data may not be complete. Records returned do not equal total query record count',
category=DataIntegrityWarning)
return complete_data
def get_lookup(self):
data = {'lookup': {'object': self._dimension}}
return self.format_and_send_request(data)['data']
| true | true |
f730894e33a039bd60f085adc8ba9927f4c02448 | 17,661 | py | Python | binance.py | mrhuytran/bnb-api-wrapper | 569e6eddc9c44f50a918b046cdb248bee60ac0e1 | [
"MIT"
] | 1 | 2021-02-19T17:23:04.000Z | 2021-02-19T17:23:04.000Z | binance.py | mrhuytran/bnb-api-wrapper | 569e6eddc9c44f50a918b046cdb248bee60ac0e1 | [
"MIT"
] | null | null | null | binance.py | mrhuytran/bnb-api-wrapper | 569e6eddc9c44f50a918b046cdb248bee60ac0e1 | [
"MIT"
] | 1 | 2020-11-16T05:59:49.000Z | 2020-11-16T05:59:49.000Z | import requests
import json
from datetime import datetime
import time
import pandas as pd
from pandas import DataFrame as df
import hmac
import hashlib
from interval_enum import Interval
from order_enum import Order
class BinanceClient:
def __init__(self, api_key, api_secret):
self.key = api_key
self.secret = api_secret
self.base = 'https://api.binance.com'
self.endpoint = {
'klines': '/api/v1/klines',
'price_ticker': '/api/v3/ticker/price',
'24hr_ticker': '/api/v3/ticker/24hr',
'historical_trade': '/api/v3/historicalTrades', # recent trades on the market
'order': '/api/v3/order',
'test_order': '/api/v3/order/test',
'open_order': '/api/v3/openOrders', # all open orders
'all_order': '/api/v3/allOrders', # all orders: active, cancelled, filler
'my_trade': '/api/v3/myTrades' # all trades for a specific symbol on the account
}
'''
***********************************************************
GET METHODS
***********************************************************
'''
'''
return klines for a specified symbol
@param
required - symbol: str, interval: Interval
'''
def get_klines(self, symbol, interval):
# specifying parameters for request body
params = {
'symbol': symbol,
'interval': interval.value
}
# specifying url enpoint
url = self.base + self.endpoint['klines']
# get api response
response = requests.get(url, params=params)
# convert json to dict
data = json.loads(response.text)
# convert dict to data frame
klines_df = df(data)
# get open time and close time from klines_df
o_timestamp_df = klines_df[0] # open timestamp
c_timestamp_df = klines_df[6] # close timestamp
# create empty arrays for formatted datetime
o_time = [] # open time
c_time = [] # close time
# convert timestamps to datetime format
for (o_timestamp, c_timestamp) in zip(o_timestamp_df, c_timestamp_df):
o_time.append(datetime.fromtimestamp(int(o_timestamp/1000)))
c_time.append(datetime.fromtimestamp(int(c_timestamp/1000)))
# convert datetime to string datetime format for df
o_timestamp_df = df(o_time)
c_timestamp_df = df(c_time)
# replacing the original timestamp with formatted datetime string
klines_df[0] = o_timestamp_df
klines_df[6] = c_timestamp_df
# modifying dataframe
klines_df.pop(11)
klines_df.columns = ['openTime', 'open', 'high', 'low', 'close',
'volume', 'closeTime', 'quoteAssetVol',
'no. of trades', 'taker_buy_baseAssetVol',
'taker_buy_quoteAssetVol']
return klines_df
'''
return current price
1. for a symbol if symbol is specified
2. for all symbols
@param
optional - symbol: str
'''
def get_price(self, symbol=None):
# specifying parameters for request body
params = {
'symbol': symbol
}
# specifying url endpoint
url = self.base + self.endpoint['price_ticker']
# get api response
response = requests.get(url, params=params)
# convert json to dict
data = json.loads(response.text)
# convert dict to dataframe
if isinstance(data, list):
price_df = df(data)
else:
price_df = df([data])
return price_df
'''
return 24 hour ticker
1. for a symbol if symbol is specified
2. for all symbols
@param
optional - symbol: str
'''
def get_24hr_ticker(self, symbol=None):
# specify parameters for request body
params = {
'symbol': symbol
}
# specifying url endpoint
url = self.base + self.endpoint['24hr_ticker']
# request api response
response = requests.get(url, params=params)
# convert json to dict
data = json.loads(response.text)
# convert dict to dataframe
if isinstance(data, list):
ticker_df = df(data)
else:
ticker_df = df([data])
# get openTime and closeTime from ticker_df
open_time_df = ticker_df['openTime']
close_time_df = ticker_df['closeTime']
# create new empty arrays for openTime and closeTime
open_time = []
close_time = []
# convert timestamps to datetime format
for (o, c) in zip(open_time_df, close_time_df):
open_time.append(datetime.fromtimestamp(int(o/1000)))
close_time.append(datetime.fromtimestamp(int(c/1000)))
# convert timestamps to string format
open_time_df = df(open_time)
close_time_df = df(close_time)
# replace timestamps in ticker_df with formatted timestamps
ticker_df['openTime'] = open_time_df
ticker_df['closeTime'] = close_time_df
return ticker_df
'''
return list of historical trades
1. start from a specific trade if tradeId is specified upto
the specified amount of trade records
2. most recent trades if tradeId is not specified
a. most recent 500 trades if limit is not specified
b. the amount of trades specified by limit
@param
required - symbol: str
optional - limit: int, tradeId: long
'''
def get_historical_trade(self, symbol, limit=None, tradeId=None):
# specifying parameter for request body
params = {
'symbol': symbol,
'limit': limit,
'fromId': tradeId
}
# specifying url endpoint
url = self.base + self.endpoint['historical_trade']
# request api response
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
# when exception occurs
if not isinstance(data, list):
return data
# convert dict to dataframe
trade_df = df(data)
if not trade_df.empty:
# get time from trade_df
time_df = trade_df['time']
# make new empty array for time
_time = []
# convert timestamp to datetime format
for t in time_df:
_time.append(datetime.fromtimestamp(int(t/1000)))
# convert timestamp to string format
time_df = df(_time)
# replace timestamp in trade_df with formatted timestamp
trade_df['time'] = time_df
return trade_df
'''
get the status of an order
@param
required - symbol: str, orderId: long
'''
def get_query_order(self, symbol, orderId):
# specify parameters for request body
params = {
'symbol': symbol,
'orderId': orderId,
'timestamp': int(round(time.time()*1000))
}
# specify url endpoint
url = self.base + self.endpoint['order']
# sign request
self.sign_request(params)
# request api response
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
return data
'''
return list of open orders
1. of a symbol if symbol is specified
2. of all symbols if symbol is not specified
@param
optional - symbol: str
'''
def get_open_order(self, symbol=None):
# specify general paramenters for request body
params = {
'timestamp': int(round(time.time()*1000))
}
# specify optional parameters for request body
if symbol != None:
params['symbol'] = symbol
# specify url endpoint
url = self.base + self.endpoint['open_order']
# sign request
self.sign_request(params)
# request api response
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': self.key})
# convert json to dict
data = json.loads(response.text)
# when exception occurs
if not isinstance(data, list):
return data
# convert dict to dataframe
open_order_df = df(data)
# if dataframe is not empty
if not open_order_df.empty:
# get time and updateTime form open_order_df
time_df = open_order_df['time'] # time
updateTime_df = open_order_df['updateTime'] # updateTime
# create new empty arrays for time and updateTime
_time = []
_updateTime = []
# convert time and updateTime to datetime format
for (t, u) in zip(time_df, updateTime_df):
_time.append(datetime.fromtimestamp(int(t/1000)))
_updateTime.append(datetime.fromtimestamp(int(u/1000)))
# convert time and updateTime to df
time_df = df(_time)
updateTime_df = df(_updateTime)
# replace original timestamps with formatted timestamps in open_order_df
open_order_df['time'] = time_df
open_order_df['updateTime'] = updateTime_df
return open_order_df
'''
return all orders of the specified symbol: active, canceled, filled
1. if orderId is specified, return orders with id >= orderId
2. else, return most recent orders for this symbol
@param
required - symbol: str
optional - orderId: long, limit: int
'''
def get_all_order(self, symbol, orderId=None, limit=None):
# specify the general parameters for request body
params = {
'symbol': symbol,
'timestamp': int(round(time.time()*1000))
}
# specify optional parameters for request body
if limit != None:
if orderId != None:
params['orderId'] = orderId
params['limit'] = limit
else:
params['limit'] = limit
else:
if orderId != None:
params['orderId'] = orderId
# specify url endpoint
url = self.base + self.endpoint['all_order']
# sign request
self.sign_request(params)
# request api response
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': self.key})
# convert json to dict
data = json.loads(response.text)
# when exception occurs
if not isinstance(data, list):
return data
# convert data to dataframe
all_order_df = df(data)
# time and updateTime from all_order_df
time_df = all_order_df['time'] # time
updateTime_df = all_order_df['updateTime'] # updateTime
# create new empty arrays for time and updateTime
_time = []
_updateTime = []
# convert time and updateTime to datetime format
for (t, u) in zip(time_df, updateTime_df):
_time.append(datetime.fromtimestamp(int(t/1000)))
_updateTime.append(datetime.fromtimestamp(int(u/1000)))
# convert time and updateTime to df
time_df = df(_time)
updateTime_df = df(_updateTime)
# replace original timestamps with formatted timestamps in all_order_df
all_order_df['time'] = time_df
all_order_df['updateTime'] = updateTime_df
return all_order_df
'''
***********************************************************
POST METHODS
***********************************************************
'''
'''
make a new order
1. set test=True if want to test order
2. set test=False if want to place order and the order is relected on the account
@private
@params
required - symbol: str, side: enum, orderType: enum
'''
def __new_order(self, symbol, side, orderType, test=True, timeInForce=None, quantity=None,
quoteOrderQty=None, price=None, stopPrice=None, icebergQty=None):
# specify the general parameters for request body
params = {
'symbol': symbol,
'side': side.value,
'type': orderType.value,
'newOrderRespType': 'RESULT',
'timestamp': int(round(time.time()*1000))
}
# specify option parameters for request body
if orderType == Order.LIMIT:
params['timeInForce'] = timeInForce
params['quantity'] = quantity
params['price'] = price
if icebergQty != None:
params['icebergQty'] = icebergQty
elif orderType == Order.MARKET:
params['quantity'] = quantity
elif orderType == Order.STOP_LOSS:
params['quantity'] = quantity
params['stopPrice'] = stopPrice
elif orderType == Order.STOP_LOSS_LIMIT:
params['timeInForce'] = timeInForce
params['quantity'] = quantity
params['price'] = price
params['stopPrice'] = stopPrice
if icebergQty != None:
params['icebergQty'] = icebergQty
elif orderType == Order.TAKE_PROFIT:
params['quantity'] = quantity
params['stopPrice'] = stopPrice
elif orderType == Order.TAKE_PROFIT_LIMIT:
params['timeInForce'] = timeInForce
params['quantity'] = quantity
params['price'] = price
params['stopPrice'] = stopPrice
if icebergQty != None:
params['icebergQty'] = icebergQty
elif orderType == Order.LIMIT_MAKER:
params['quantity'] = quantity
params['price'] = price
else:
raise Exception('Invalid order type.')
# specify url endpoint
if test == True:
url = self.base + self.endpoint['test_order']
else:
url = self.base + self.endpoint['order']
# sign request
self.sign_request(params)
# initialize new order, request api response
response = requests.post(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
return data
'''
make a new buy order
1. set test=True if want to test buy order
2. set test=False if want to place buy order and the buy order is relected on the account
@params
required - symbol: str, orderType: enum
'''
def buy(self, symbol, orderType, test=True, timeInForce=None, quantity=None,
quoteOrderQty=None, price=None, stopPrice=None, icebergQty=None):
return self.__new_order(symbol, Order.BUY, orderType, test=test, timeInForce=timeInForce, quantity=quantity,
quoteOrderQty=quoteOrderQty, price=price, stopPrice=stopPrice, icebergQty=icebergQty)
'''
make a new sell order
1. set test=True if want to test sell order
2. set test=False if want to place sell order and the sell order is relected on the account
@params
required - symbol: str, orderType: enum
'''
def sell(self, symbol, orderType, test=True, timeInForce=None, quantity=None,
quoteOrderQty=None, price=None, stopPrice=None, icebergQty=None):
return self.__new_order(symbol, Order.SELL, orderType, test=test, timeInForce=timeInForce, quantity=quantity,
quoteOrderQty=quoteOrderQty, price=price, stopPrice=stopPrice, icebergQty=icebergQty)
'''
***********************************************************
DELETE METHODS
***********************************************************
'''
'''
cancel an open order
@param
@require symbol: str, orderId: long
'''
def cancel_order(self, symbol, orderId):
# specify parameters for request body
params = {
'symbol': symbol,
'orderId': orderId,
'timestamp': int(round(time.time()*1000))
}
# specify url endpoint
url = self.base + self.endpoint['order']
# sign request
self.sign_request(params)
# initialize cancel order, request api response
response = requests.delete(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
return data
'''
sign your request to Binance API
'''
def sign_request(self, params: dict):
#make a query string
query_string = '&'.join(["{}={}".format(d,params[d]) for d in params])
#hashing secret
signature = hmac.new(self.secret.encode('utf-8'),
query_string.encode('utf-8'),
hashlib.sha256)
# add your signature to the request body
params['signature'] = signature.hexdigest()
| 33.135084 | 120 | 0.554895 | import requests
import json
from datetime import datetime
import time
import pandas as pd
from pandas import DataFrame as df
import hmac
import hashlib
from interval_enum import Interval
from order_enum import Order
class BinanceClient:
def __init__(self, api_key, api_secret):
self.key = api_key
self.secret = api_secret
self.base = 'https://api.binance.com'
self.endpoint = {
'klines': '/api/v1/klines',
'price_ticker': '/api/v3/ticker/price',
'24hr_ticker': '/api/v3/ticker/24hr',
'historical_trade': '/api/v3/historicalTrades',
'order': '/api/v3/order',
'test_order': '/api/v3/order/test',
'open_order': '/api/v3/openOrders',
'all_order': '/api/v3/allOrders',
'my_trade': '/api/v3/myTrades'
}
def get_klines(self, symbol, interval):
params = {
'symbol': symbol,
'interval': interval.value
}
url = self.base + self.endpoint['klines']
response = requests.get(url, params=params)
data = json.loads(response.text)
klines_df = df(data)
o_timestamp_df = klines_df[0]
c_timestamp_df = klines_df[6]
o_time = []
c_time = []
for (o_timestamp, c_timestamp) in zip(o_timestamp_df, c_timestamp_df):
o_time.append(datetime.fromtimestamp(int(o_timestamp/1000)))
c_time.append(datetime.fromtimestamp(int(c_timestamp/1000)))
o_timestamp_df = df(o_time)
c_timestamp_df = df(c_time)
klines_df[0] = o_timestamp_df
klines_df[6] = c_timestamp_df
klines_df.pop(11)
klines_df.columns = ['openTime', 'open', 'high', 'low', 'close',
'volume', 'closeTime', 'quoteAssetVol',
'no. of trades', 'taker_buy_baseAssetVol',
'taker_buy_quoteAssetVol']
return klines_df
def get_price(self, symbol=None):
params = {
'symbol': symbol
}
url = self.base + self.endpoint['price_ticker']
response = requests.get(url, params=params)
data = json.loads(response.text)
if isinstance(data, list):
price_df = df(data)
else:
price_df = df([data])
return price_df
def get_24hr_ticker(self, symbol=None):
params = {
'symbol': symbol
}
url = self.base + self.endpoint['24hr_ticker']
response = requests.get(url, params=params)
data = json.loads(response.text)
if isinstance(data, list):
ticker_df = df(data)
else:
ticker_df = df([data])
open_time_df = ticker_df['openTime']
close_time_df = ticker_df['closeTime']
open_time = []
close_time = []
for (o, c) in zip(open_time_df, close_time_df):
open_time.append(datetime.fromtimestamp(int(o/1000)))
close_time.append(datetime.fromtimestamp(int(c/1000)))
open_time_df = df(open_time)
close_time_df = df(close_time)
ticker_df['openTime'] = open_time_df
ticker_df['closeTime'] = close_time_df
return ticker_df
def get_historical_trade(self, symbol, limit=None, tradeId=None):
params = {
'symbol': symbol,
'limit': limit,
'fromId': tradeId
}
url = self.base + self.endpoint['historical_trade']
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
if not isinstance(data, list):
return data
trade_df = df(data)
if not trade_df.empty:
time_df = trade_df['time']
_time = []
for t in time_df:
_time.append(datetime.fromtimestamp(int(t/1000)))
time_df = df(_time)
trade_df['time'] = time_df
return trade_df
def get_query_order(self, symbol, orderId):
params = {
'symbol': symbol,
'orderId': orderId,
'timestamp': int(round(time.time()*1000))
}
url = self.base + self.endpoint['order']
self.sign_request(params)
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
return data
def get_open_order(self, symbol=None):
params = {
'timestamp': int(round(time.time()*1000))
}
if symbol != None:
params['symbol'] = symbol
url = self.base + self.endpoint['open_order']
self.sign_request(params)
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
if not isinstance(data, list):
return data
open_order_df = df(data)
if not open_order_df.empty:
time_df = open_order_df['time']
updateTime_df = open_order_df['updateTime']
_time = []
_updateTime = []
for (t, u) in zip(time_df, updateTime_df):
_time.append(datetime.fromtimestamp(int(t/1000)))
_updateTime.append(datetime.fromtimestamp(int(u/1000)))
time_df = df(_time)
updateTime_df = df(_updateTime)
open_order_df['time'] = time_df
open_order_df['updateTime'] = updateTime_df
return open_order_df
def get_all_order(self, symbol, orderId=None, limit=None):
params = {
'symbol': symbol,
'timestamp': int(round(time.time()*1000))
}
if limit != None:
if orderId != None:
params['orderId'] = orderId
params['limit'] = limit
else:
params['limit'] = limit
else:
if orderId != None:
params['orderId'] = orderId
url = self.base + self.endpoint['all_order']
self.sign_request(params)
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
if not isinstance(data, list):
return data
all_order_df = df(data)
time_df = all_order_df['time']
updateTime_df = all_order_df['updateTime']
_time = []
_updateTime = []
for (t, u) in zip(time_df, updateTime_df):
_time.append(datetime.fromtimestamp(int(t/1000)))
_updateTime.append(datetime.fromtimestamp(int(u/1000)))
time_df = df(_time)
updateTime_df = df(_updateTime)
all_order_df['time'] = time_df
all_order_df['updateTime'] = updateTime_df
return all_order_df
def __new_order(self, symbol, side, orderType, test=True, timeInForce=None, quantity=None,
quoteOrderQty=None, price=None, stopPrice=None, icebergQty=None):
params = {
'symbol': symbol,
'side': side.value,
'type': orderType.value,
'newOrderRespType': 'RESULT',
'timestamp': int(round(time.time()*1000))
}
if orderType == Order.LIMIT:
params['timeInForce'] = timeInForce
params['quantity'] = quantity
params['price'] = price
if icebergQty != None:
params['icebergQty'] = icebergQty
elif orderType == Order.MARKET:
params['quantity'] = quantity
elif orderType == Order.STOP_LOSS:
params['quantity'] = quantity
params['stopPrice'] = stopPrice
elif orderType == Order.STOP_LOSS_LIMIT:
params['timeInForce'] = timeInForce
params['quantity'] = quantity
params['price'] = price
params['stopPrice'] = stopPrice
if icebergQty != None:
params['icebergQty'] = icebergQty
elif orderType == Order.TAKE_PROFIT:
params['quantity'] = quantity
params['stopPrice'] = stopPrice
elif orderType == Order.TAKE_PROFIT_LIMIT:
params['timeInForce'] = timeInForce
params['quantity'] = quantity
params['price'] = price
params['stopPrice'] = stopPrice
if icebergQty != None:
params['icebergQty'] = icebergQty
elif orderType == Order.LIMIT_MAKER:
params['quantity'] = quantity
params['price'] = price
else:
raise Exception('Invalid order type.')
if test == True:
url = self.base + self.endpoint['test_order']
else:
url = self.base + self.endpoint['order']
self.sign_request(params)
response = requests.post(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
return data
def buy(self, symbol, orderType, test=True, timeInForce=None, quantity=None,
quoteOrderQty=None, price=None, stopPrice=None, icebergQty=None):
return self.__new_order(symbol, Order.BUY, orderType, test=test, timeInForce=timeInForce, quantity=quantity,
quoteOrderQty=quoteOrderQty, price=price, stopPrice=stopPrice, icebergQty=icebergQty)
def sell(self, symbol, orderType, test=True, timeInForce=None, quantity=None,
quoteOrderQty=None, price=None, stopPrice=None, icebergQty=None):
return self.__new_order(symbol, Order.SELL, orderType, test=test, timeInForce=timeInForce, quantity=quantity,
quoteOrderQty=quoteOrderQty, price=price, stopPrice=stopPrice, icebergQty=icebergQty)
def cancel_order(self, symbol, orderId):
params = {
'symbol': symbol,
'orderId': orderId,
'timestamp': int(round(time.time()*1000))
}
url = self.base + self.endpoint['order']
self.sign_request(params)
response = requests.delete(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
return data
def sign_request(self, params: dict):
query_string = '&'.join(["{}={}".format(d,params[d]) for d in params])
signature = hmac.new(self.secret.encode('utf-8'),
query_string.encode('utf-8'),
hashlib.sha256)
params['signature'] = signature.hexdigest()
| true | true |
f7308a1085cb46493a7cbe0bd9834d3db9018f4f | 946 | py | Python | Initial files/dht_publish.py | vinayrnair/ESP32-DHT22-Sensors | cc7a34eb866b39aa62ce8cdf676371aa9d6a3d28 | [
"Unlicense"
] | 1 | 2021-12-07T09:40:38.000Z | 2021-12-07T09:40:38.000Z | Initial files/dht_publish.py | vinayrnair/ESP32-DHT22-Sensors | cc7a34eb866b39aa62ce8cdf676371aa9d6a3d28 | [
"Unlicense"
] | null | null | null | Initial files/dht_publish.py | vinayrnair/ESP32-DHT22-Sensors | cc7a34eb866b39aa62ce8cdf676371aa9d6a3d28 | [
"Unlicense"
] | null | null | null | from time import sleep
from umqtt.simple import MQTTClient
from machine import Pin
from dht import DHT22
SERVER = 'ip address' # MQTT Server Address (Change to the IP address of your Pi)
CLIENT_ID = 'ESP32_DHT22_Sensor'
TOPIC = b'temp_humidity'
client = MQTTClient(CLIENT_ID, SERVER)
client.connect() # Connect to MQTT broker
sensor = DHT22(Pin(23, Pin.IN, Pin.PULL_UP)) # DHT-22 on GPIO 23 (input with internal pull-up resistor)
while True:
try:
sensor.measure() # Poll sensor
t = sensor.temperature()
h = sensor.humidity()
if isinstance(t, float) and isinstance(h, float): # Confirm sensor results are numeric
msg = (b'{0:3.1f},{1:3.1f}'.format(t, h))
client.publish(TOPIC, msg) # Publish sensor data to MQTT topic
print(msg)
else:
print('Invalid sensor readings.')
except OSError:
print('Failed to read sensor.')
sleep(4)
| 32.62069 | 105 | 0.651163 | from time import sleep
from umqtt.simple import MQTTClient
from machine import Pin
from dht import DHT22
SERVER = 'ip address'
CLIENT_ID = 'ESP32_DHT22_Sensor'
TOPIC = b'temp_humidity'
client = MQTTClient(CLIENT_ID, SERVER)
client.connect()
sensor = DHT22(Pin(23, Pin.IN, Pin.PULL_UP))
while True:
try:
sensor.measure()
t = sensor.temperature()
h = sensor.humidity()
if isinstance(t, float) and isinstance(h, float):
msg = (b'{0:3.1f},{1:3.1f}'.format(t, h))
client.publish(TOPIC, msg)
print(msg)
else:
print('Invalid sensor readings.')
except OSError:
print('Failed to read sensor.')
sleep(4)
| true | true |
f7308a5b65edfbcf8a90beaa071fcbcffe8b5c75 | 1,912 | py | Python | echopype/utils/prov.py | mbdunn/echopype | a53290801d1ca062d45c00ca2c541d54682dd40a | [
"Apache-2.0"
] | null | null | null | echopype/utils/prov.py | mbdunn/echopype | a53290801d1ca062d45c00ca2c541d54682dd40a | [
"Apache-2.0"
] | null | null | null | echopype/utils/prov.py | mbdunn/echopype | a53290801d1ca062d45c00ca2c541d54682dd40a | [
"Apache-2.0"
] | null | null | null | from datetime import datetime as dt
from pathlib import PosixPath
from typing import Any, Dict, List, Tuple, Union
# TODO: uncomment after release (causes flake8 to fail)
# from _echopype_version import version as ECHOPYPE_VERSION
from typing_extensions import Literal
ProcessType = Literal["conversion", "processing"]
def echopype_prov_attrs(process_type: ProcessType) -> Dict[str, str]:
"""
Standard echopype software attributes for provenance
Parameters
----------
process_type : ProcessType
Echopype process function type
"""
# TODO: change hard coded 0.6.0 after release
prov_dict = {
f"{process_type}_software_name": "echopype",
f"{process_type}_software_version": "0.6.0", # ECHOPYPE_VERSION,
f"{process_type}_time": dt.utcnow().isoformat(timespec="seconds") + "Z", # use UTC time
}
return prov_dict
def source_files_vars(source_paths: Union[str, List[Any]]) -> Dict[str, Tuple]:
"""
Create source_filenames provenance variable dict to be used for creating
xarray dataarray.
Parameters
----------
source_paths: Union[str, List[Any]]
Source file paths as either a single path string or a list of Path-type paths
Returns
-------
source_files_var: Dict[str, Tuple]
Single-element dict containing a tuple for creating the
source_filenames xarray dataarray with filenames dimension
"""
# Handle a plain string containing a single path,
# a single pathlib Path, or a list of strings or pathlib paths
if type(source_paths) in (str, PosixPath):
source_files = [str(source_paths)]
else:
source_files = [str(p) for p in source_paths]
source_files_var = {
"source_filenames": (
"filenames",
source_files,
{"long_name": "Source filenames"},
),
}
return source_files_var
| 29.875 | 96 | 0.668933 | from datetime import datetime as dt
from pathlib import PosixPath
from typing import Any, Dict, List, Tuple, Union
from typing_extensions import Literal
ProcessType = Literal["conversion", "processing"]
def echopype_prov_attrs(process_type: ProcessType) -> Dict[str, str]:
prov_dict = {
f"{process_type}_software_name": "echopype",
f"{process_type}_software_version": "0.6.0",
f"{process_type}_time": dt.utcnow().isoformat(timespec="seconds") + "Z",
}
return prov_dict
def source_files_vars(source_paths: Union[str, List[Any]]) -> Dict[str, Tuple]:
if type(source_paths) in (str, PosixPath):
source_files = [str(source_paths)]
else:
source_files = [str(p) for p in source_paths]
source_files_var = {
"source_filenames": (
"filenames",
source_files,
{"long_name": "Source filenames"},
),
}
return source_files_var
| true | true |
f7308d265ec40e3e5b63ae6f5eca1836d9790623 | 1,861 | py | Python | aliyun-python-sdk-ess/aliyunsdkess/request/v20140828/DescribeNotificationConfigurationsRequest.py | LittleJober/aliyun-openapi-python-sdk | f45cfa2248a5c8c47b2cebc1d4d1c2516b94df76 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-ess/aliyunsdkess/request/v20140828/DescribeNotificationConfigurationsRequest.py | LittleJober/aliyun-openapi-python-sdk | f45cfa2248a5c8c47b2cebc1d4d1c2516b94df76 | [
"Apache-2.0"
] | 1 | 2020-05-31T14:51:47.000Z | 2020-05-31T14:51:47.000Z | aliyun-python-sdk-ess/aliyunsdkess/request/v20140828/DescribeNotificationConfigurationsRequest.py | LittleJober/aliyun-openapi-python-sdk | f45cfa2248a5c8c47b2cebc1d4d1c2516b94df76 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkess.endpoint import endpoint_data
class DescribeNotificationConfigurationsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ess', '2014-08-28', 'DescribeNotificationConfigurations','ess')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_ScalingGroupId(self):
return self.get_query_params().get('ScalingGroupId')
def set_ScalingGroupId(self,ScalingGroupId):
self.add_query_param('ScalingGroupId',ScalingGroupId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | 37.979592 | 92 | 0.780226 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkess.endpoint import endpoint_data
class DescribeNotificationConfigurationsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ess', '2014-08-28', 'DescribeNotificationConfigurations','ess')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_ScalingGroupId(self):
return self.get_query_params().get('ScalingGroupId')
def set_ScalingGroupId(self,ScalingGroupId):
self.add_query_param('ScalingGroupId',ScalingGroupId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | true | true |
f7308d359d4660aec3e1f8d361278eb035139385 | 2,776 | py | Python | jcp/lexer.py | RupasaiR/CC-Project | b50164ddadc45ef0f09edd791215b3e127feee58 | [
"MIT"
] | null | null | null | jcp/lexer.py | RupasaiR/CC-Project | b50164ddadc45ef0f09edd791215b3e127feee58 | [
"MIT"
] | null | null | null | jcp/lexer.py | RupasaiR/CC-Project | b50164ddadc45ef0f09edd791215b3e127feee58 | [
"MIT"
] | null | null | null | #-----------------------------------------------------------------------------------
# lexer.py
# Lexer for Java programming language in Python
# Reference: https://docs.oracle.com/javase/specs/jls/se7/html/jls-18.html
#-----------------------------------------------------------------------------------
import ply.lex as lex
# Reference: https://docs.oracle.com/javase/tutorial/java/nutsandbolts/_keywords.html
keywords = ('this', 'class', 'void', 'super', 'extends', 'implements', 'enum', 'interface',
'byte', 'short', 'int', 'long', 'char', 'float', 'double', 'boolean', 'null',
'true', 'false',
'final', 'public', 'protected', 'private', 'abstract', 'static', 'strictfp', 'transient', 'volatile',
'synchronized', 'native',
'throws', 'default',
'instanceof',
'if', 'else', 'while', 'for', 'switch', 'case', 'assert', 'do',
'break', 'continue', 'return', 'throw', 'try', 'catch', 'finally', 'new',
'package', 'import'
)
tokens = [
'NAME',
'NUM',
'CHAR_LITERAL',
'STRING_LITERAL',
'LINE_COMMENT', 'BLOCK_COMMENT',
'OR', 'AND',
'EQ', 'NEQ', 'GTEQ', 'LTEQ',
'LSHIFT', 'RSHIFT', 'RRSHIFT',
'TIMES_ASSIGN', 'DIVIDE_ASSIGN', 'REMAINDER_ASSIGN',
'PLUS_ASSIGN', 'MINUS_ASSIGN', 'LSHIFT_ASSIGN', 'RSHIFT_ASSIGN', 'RRSHIFT_ASSIGN',
'AND_ASSIGN', 'OR_ASSIGN', 'XOR_ASSIGN',
'PLUSPLUS', 'MINUSMINUS',
'ELLIPSIS'
] + [k.upper() for k in keywords]
literals = '()+-*/=?:,.^|&~!=[]{};<>@%'
t_NUM = r'\.?[0-9][0-9eE_lLdDa-fA-F.xXpP]*'
t_CHAR_LITERAL = r'\'([^\\\n]|(\\.))*?\''
t_STRING_LITERAL = r'\"([^\\\n]|(\\.))*?\"'
t_ignore_LINE_COMMENT = '//.*'
def t_BLOCK_COMMENT(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
t_OR = r'\|\|'
t_AND = '&&'
t_EQ = '=='
t_NEQ = '!='
t_GTEQ = '>='
t_LTEQ = '<='
t_LSHIFT = '<<'
t_RSHIFT = '>>'
t_RRSHIFT = '>>>'
t_TIMES_ASSIGN = r'\*='
t_DIVIDE_ASSIGN = '/='
t_REMAINDER_ASSIGN = '%='
t_PLUS_ASSIGN = r'\+='
t_MINUS_ASSIGN = '-='
t_LSHIFT_ASSIGN = '<<='
t_RSHIFT_ASSIGN = '>>='
t_RRSHIFT_ASSIGN = '>>>='
t_AND_ASSIGN = '&='
t_OR_ASSIGN = r'\|='
t_XOR_ASSIGN = '\^='
t_PLUSPLUS = r'\+\+'
t_MINUSMINUS = r'\-\-'
t_ELLIPSIS = r'\.\.\.'
t_ignore = ' \t\f'
def t_NAME(t):
'[A-Za-z_$][A-Za-z0-9_$]*'
if t.value in keywords:
t.type = t.value.upper()
return t
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_newline2(t):
r'(\r\n)+'
t.lexer.lineno += len(t.value) / 2
def t_error(t):
print("Illegal character '{}' ({}) in line {}".format(t.value[0], hex(ord(t.value[0])), t.lexer.lineno))
t.lexer.skip(1)
# Lexer build
lexer = lex.lex()
| 26.692308 | 113 | 0.507565 |
import ply.lex as lex
keywords = ('this', 'class', 'void', 'super', 'extends', 'implements', 'enum', 'interface',
'byte', 'short', 'int', 'long', 'char', 'float', 'double', 'boolean', 'null',
'true', 'false',
'final', 'public', 'protected', 'private', 'abstract', 'static', 'strictfp', 'transient', 'volatile',
'synchronized', 'native',
'throws', 'default',
'instanceof',
'if', 'else', 'while', 'for', 'switch', 'case', 'assert', 'do',
'break', 'continue', 'return', 'throw', 'try', 'catch', 'finally', 'new',
'package', 'import'
)
tokens = [
'NAME',
'NUM',
'CHAR_LITERAL',
'STRING_LITERAL',
'LINE_COMMENT', 'BLOCK_COMMENT',
'OR', 'AND',
'EQ', 'NEQ', 'GTEQ', 'LTEQ',
'LSHIFT', 'RSHIFT', 'RRSHIFT',
'TIMES_ASSIGN', 'DIVIDE_ASSIGN', 'REMAINDER_ASSIGN',
'PLUS_ASSIGN', 'MINUS_ASSIGN', 'LSHIFT_ASSIGN', 'RSHIFT_ASSIGN', 'RRSHIFT_ASSIGN',
'AND_ASSIGN', 'OR_ASSIGN', 'XOR_ASSIGN',
'PLUSPLUS', 'MINUSMINUS',
'ELLIPSIS'
] + [k.upper() for k in keywords]
literals = '()+-*/=?:,.^|&~!=[]{};<>@%'
t_NUM = r'\.?[0-9][0-9eE_lLdDa-fA-F.xXpP]*'
t_CHAR_LITERAL = r'\'([^\\\n]|(\\.))*?\''
t_STRING_LITERAL = r'\"([^\\\n]|(\\.))*?\"'
t_ignore_LINE_COMMENT = '//.*'
def t_BLOCK_COMMENT(t):
t.lexer.lineno += t.value.count('\n')
t_OR = r'\|\|'
t_AND = '&&'
t_EQ = '=='
t_NEQ = '!='
t_GTEQ = '>='
t_LTEQ = '<='
t_LSHIFT = '<<'
t_RSHIFT = '>>'
t_RRSHIFT = '>>>'
t_TIMES_ASSIGN = r'\*='
t_DIVIDE_ASSIGN = '/='
t_REMAINDER_ASSIGN = '%='
t_PLUS_ASSIGN = r'\+='
t_MINUS_ASSIGN = '-='
t_LSHIFT_ASSIGN = '<<='
t_RSHIFT_ASSIGN = '>>='
t_RRSHIFT_ASSIGN = '>>>='
t_AND_ASSIGN = '&='
t_OR_ASSIGN = r'\|='
t_XOR_ASSIGN = '\^='
t_PLUSPLUS = r'\+\+'
t_MINUSMINUS = r'\-\-'
t_ELLIPSIS = r'\.\.\.'
t_ignore = ' \t\f'
def t_NAME(t):
if t.value in keywords:
t.type = t.value.upper()
return t
def t_newline(t):
t.lexer.lineno += len(t.value)
def t_newline2(t):
t.lexer.lineno += len(t.value) / 2
def t_error(t):
print("Illegal character '{}' ({}) in line {}".format(t.value[0], hex(ord(t.value[0])), t.lexer.lineno))
t.lexer.skip(1)
lexer = lex.lex()
| true | true |
f7308dfb1be9d3dd6bfea4a978d12b10c84b88cb | 1,785 | py | Python | kessk_web/device/wexinSignature.py | yungs2017/kessk-switch | a56c73c756bb88e8ee38b7aa196fd58a4a802341 | [
"BSD-3-Clause"
] | 9 | 2019-09-30T04:24:39.000Z | 2021-07-15T06:08:20.000Z | kessk_web/device/wexinSignature.py | yungs2017/kessk-switch | a56c73c756bb88e8ee38b7aa196fd58a4a802341 | [
"BSD-3-Clause"
] | 6 | 2020-05-14T03:13:32.000Z | 2022-02-10T10:23:46.000Z | kessk_web/device/wexinSignature.py | yungs2017/kessk-switch | a56c73c756bb88e8ee38b7aa196fd58a4a802341 | [
"BSD-3-Clause"
] | 2 | 2020-12-19T07:12:01.000Z | 2021-05-24T02:21:15.000Z | import hashlib
import random
import string
import time
from django.core.cache import cache
import requests
from common.config import WECHAT_GET_JSSDK_TICKET_URL, WECHAT_GET_ACCESS_TOKEN_URL
class Signature:
"""
Get Wechat JSSDK signature
"""
def __init__(self,url):
self.ret = {
'nonceStr': self.__create_nonce_str(),
'jsapi_ticket': Base_authorization.get_ticket(),
'timestamp': self.__create_timestamp(),
'url': url
}
def __create_nonce_str(self):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))
def __create_timestamp(self):
return int(time.time())
def sign(self):
string = '&'.join(['%s=%s' % (key.lower(), self.ret[key]) for key in sorted(self.ret)]).encode('utf-8')
self.ret['signature'] = hashlib.sha1(string).hexdigest()
return self.ret
class Base_authorization():
"""
Get JSSDK ticket and accesstoken
Cache to Django table cache
"""
@classmethod
def get_ticket(cls):
key = 'ticket'
if cache.has_key(key):
ticket = cache.get(key)
else:
if cache.has_key('access_token'):
access_token = cache.get('access_token')
else:
access_token = cls.get_access_token()
ticket = requests.get(WECHAT_GET_JSSDK_TICKET_URL+access_token).json()['ticket']
cache.set(key,ticket,110*60)
return ticket
@staticmethod
def get_access_token():
key = 'access_token'
access_token = requests.get(WECHAT_GET_ACCESS_TOKEN_URL).json()['access_token']
# print(access_token.text)
cache.set(key,access_token,110*60)
return access_token | 29.262295 | 111 | 0.62521 | import hashlib
import random
import string
import time
from django.core.cache import cache
import requests
from common.config import WECHAT_GET_JSSDK_TICKET_URL, WECHAT_GET_ACCESS_TOKEN_URL
class Signature:
def __init__(self,url):
self.ret = {
'nonceStr': self.__create_nonce_str(),
'jsapi_ticket': Base_authorization.get_ticket(),
'timestamp': self.__create_timestamp(),
'url': url
}
def __create_nonce_str(self):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))
def __create_timestamp(self):
return int(time.time())
def sign(self):
string = '&'.join(['%s=%s' % (key.lower(), self.ret[key]) for key in sorted(self.ret)]).encode('utf-8')
self.ret['signature'] = hashlib.sha1(string).hexdigest()
return self.ret
class Base_authorization():
@classmethod
def get_ticket(cls):
key = 'ticket'
if cache.has_key(key):
ticket = cache.get(key)
else:
if cache.has_key('access_token'):
access_token = cache.get('access_token')
else:
access_token = cls.get_access_token()
ticket = requests.get(WECHAT_GET_JSSDK_TICKET_URL+access_token).json()['ticket']
cache.set(key,ticket,110*60)
return ticket
@staticmethod
def get_access_token():
key = 'access_token'
access_token = requests.get(WECHAT_GET_ACCESS_TOKEN_URL).json()['access_token']
cache.set(key,access_token,110*60)
return access_token | true | true |
f7308e9c4d895763eaa137bfcd7402eab1c334b6 | 388 | py | Python | src/euler_python_package/euler_python/medium/p124.py | wilsonify/euler | 5214b776175e6d76a7c6d8915d0e062d189d9b79 | [
"MIT"
] | null | null | null | src/euler_python_package/euler_python/medium/p124.py | wilsonify/euler | 5214b776175e6d76a7c6d8915d0e062d189d9b79 | [
"MIT"
] | null | null | null | src/euler_python_package/euler_python/medium/p124.py | wilsonify/euler | 5214b776175e6d76a7c6d8915d0e062d189d9b79 | [
"MIT"
] | null | null | null | def problem124():
LIMIT = 100000
# Modification of the sieve of Eratosthenes
rads = [0] + [1] * LIMIT
for i in range(2, len(rads)):
if rads[i] == 1:
for j in range(i, len(rads), i):
rads[j] *= i
data = sorted((rad, i) for (i, rad) in enumerate(rads))
return data[10000][1]
if __name__ == "__main__":
print(problem124())
| 22.823529 | 59 | 0.53866 | def problem124():
LIMIT = 100000
rads = [0] + [1] * LIMIT
for i in range(2, len(rads)):
if rads[i] == 1:
for j in range(i, len(rads), i):
rads[j] *= i
data = sorted((rad, i) for (i, rad) in enumerate(rads))
return data[10000][1]
if __name__ == "__main__":
print(problem124())
| true | true |
f7308f2ba96d7185ba98d55e78390dec75d685f8 | 1,923 | py | Python | Chapter05/utils.py | Kushalshingote/Hands-On-Generative-Adversarial-Networks-with-Keras | fccada4810ba1fe8b79c5a74420a590c95623b52 | [
"MIT"
] | 76 | 2019-05-27T23:38:53.000Z | 2021-12-19T00:31:13.000Z | Chapter05/utils.py | Kushalshingote/Hands-On-Generative-Adversarial-Networks-with-Keras | fccada4810ba1fe8b79c5a74420a590c95623b52 | [
"MIT"
] | 9 | 2019-05-29T21:01:32.000Z | 2020-07-30T12:00:02.000Z | Chapter05/utils.py | Kushalshingote/Hands-On-Generative-Adversarial-Networks-with-Keras | fccada4810ba1fe8b79c5a74420a590c95623b52 | [
"MIT"
] | 35 | 2019-05-12T04:20:54.000Z | 2022-03-03T19:46:06.000Z | import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
from math import ceil
import numpy as np
import argparse
from functools import partial
import os
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Reshape, Flatten
from keras.layers.merge import _Merge
from keras.layers.convolutional import Convolution2D, Conv2DTranspose
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam, RMSprop
from keras.datasets import mnist
from keras import backend as K
from keras.datasets import cifar10
def get_data():
# load cifar10 data
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# convert train and test data to float32
X_train = X_train.astype(np.float32)
X_test = X_test.astype(np.float32)
# scale train and test data to [-1, 1]
X_train = (X_train / 255) * 2 - 1
X_test = (X_train / 255) * 2 - 1
return X_train, X_test
def plot_images(images, filename):
# scale images to [0.0, 1.0]
images = (images + 1) / 2
h, w, c = images.shape[1:]
grid_size = ceil(np.sqrt(images.shape[0]))
images = (images.reshape(grid_size, grid_size, h, w, c)
.transpose(0, 2, 1, 3, 4)
.reshape(grid_size*h, grid_size*w, c))
plt.figure(figsize=(16, 16))
plt.imsave(filename, images)
plt.close('all')
def plot_losses(losses_d, losses_g, filename):
losses_d = np.array(losses_d)
fig, axes = plt.subplots(2, 2, figsize=(8, 8))
axes = axes.flatten()
axes[0].plot(losses_d[:, 0])
axes[1].plot(losses_d[:, 1])
axes[2].plot(losses_d[:, 2])
axes[3].plot(losses_g)
axes[0].set_title("losses_d")
axes[1].set_title("losses_d_real")
axes[2].set_title("losses_d_fake")
axes[3].set_title("losses_g")
plt.tight_layout()
plt.savefig(filename)
plt.close() | 30.52381 | 69 | 0.690588 | import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
from math import ceil
import numpy as np
import argparse
from functools import partial
import os
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Reshape, Flatten
from keras.layers.merge import _Merge
from keras.layers.convolutional import Convolution2D, Conv2DTranspose
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam, RMSprop
from keras.datasets import mnist
from keras import backend as K
from keras.datasets import cifar10
def get_data():
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = X_train.astype(np.float32)
X_test = X_test.astype(np.float32)
X_train = (X_train / 255) * 2 - 1
X_test = (X_train / 255) * 2 - 1
return X_train, X_test
def plot_images(images, filename):
images = (images + 1) / 2
h, w, c = images.shape[1:]
grid_size = ceil(np.sqrt(images.shape[0]))
images = (images.reshape(grid_size, grid_size, h, w, c)
.transpose(0, 2, 1, 3, 4)
.reshape(grid_size*h, grid_size*w, c))
plt.figure(figsize=(16, 16))
plt.imsave(filename, images)
plt.close('all')
def plot_losses(losses_d, losses_g, filename):
losses_d = np.array(losses_d)
fig, axes = plt.subplots(2, 2, figsize=(8, 8))
axes = axes.flatten()
axes[0].plot(losses_d[:, 0])
axes[1].plot(losses_d[:, 1])
axes[2].plot(losses_d[:, 2])
axes[3].plot(losses_g)
axes[0].set_title("losses_d")
axes[1].set_title("losses_d_real")
axes[2].set_title("losses_d_fake")
axes[3].set_title("losses_g")
plt.tight_layout()
plt.savefig(filename)
plt.close() | true | true |
f7308f9c00ebc9bbce9d89a7668d0310b9d0223d | 793 | py | Python | PythonVirtEnv/Lib/site-packages/plotly/validators/layout/grid/_subplots.py | zuhorski/EPL_Project | 2d2417652879cfbe33c44c003ad77b7222590849 | [
"MIT"
] | 7 | 2021-09-29T09:46:36.000Z | 2022-03-24T08:30:41.000Z | PythonVirtEnv/Lib/site-packages/plotly/validators/layout/grid/_subplots.py | zuhorski/EPL_Project | 2d2417652879cfbe33c44c003ad77b7222590849 | [
"MIT"
] | 1 | 2021-09-30T16:56:21.000Z | 2021-10-15T09:14:12.000Z | PythonVirtEnv/Lib/site-packages/plotly/validators/layout/grid/_subplots.py | zuhorski/EPL_Project | 2d2417652879cfbe33c44c003ad77b7222590849 | [
"MIT"
] | 1 | 2021-09-29T22:34:05.000Z | 2021-09-29T22:34:05.000Z | import _plotly_utils.basevalidators
class SubplotsValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="subplots", parent_name="layout.grid", **kwargs):
super(SubplotsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dimensions=kwargs.pop("dimensions", 2),
edit_type=kwargs.pop("edit_type", "plot"),
free_length=kwargs.pop("free_length", True),
items=kwargs.pop(
"items",
{
"valType": "enumerated",
"values": ["/^x([2-9]|[1-9][0-9]+)?y([2-9]|[1-9][0-9]+)?$/", ""],
"editType": "plot",
},
),
**kwargs
)
| 36.045455 | 85 | 0.515763 | import _plotly_utils.basevalidators
class SubplotsValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="subplots", parent_name="layout.grid", **kwargs):
super(SubplotsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dimensions=kwargs.pop("dimensions", 2),
edit_type=kwargs.pop("edit_type", "plot"),
free_length=kwargs.pop("free_length", True),
items=kwargs.pop(
"items",
{
"valType": "enumerated",
"values": ["/^x([2-9]|[1-9][0-9]+)?y([2-9]|[1-9][0-9]+)?$/", ""],
"editType": "plot",
},
),
**kwargs
)
| true | true |
f730903cf9f64cb688db3b16064ab4dafccd0ee0 | 297 | py | Python | 5.3-Thursday/writing.py | lraynes/activities | 5438ff9869df9d67757817fd4994be545eb38604 | [
"MIT"
] | null | null | null | 5.3-Thursday/writing.py | lraynes/activities | 5438ff9869df9d67757817fd4994be545eb38604 | [
"MIT"
] | null | null | null | 5.3-Thursday/writing.py | lraynes/activities | 5438ff9869df9d67757817fd4994be545eb38604 | [
"MIT"
] | null | null | null | import os
import csv
output_path = os.path.join(".", "output", "new.csv")
with open(output_path, "w", newline="") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=",")
csvwriter.writerow(["First Name", "Last Name", "SSN"])
csvwriter.writerow(["Laura", "Raynes", "555-55-5555"])
| 29.7 | 58 | 0.653199 | import os
import csv
output_path = os.path.join(".", "output", "new.csv")
with open(output_path, "w", newline="") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=",")
csvwriter.writerow(["First Name", "Last Name", "SSN"])
csvwriter.writerow(["Laura", "Raynes", "555-55-5555"])
| true | true |
f73090ca8723e36f89646c3925d6c5c1658f9fa9 | 24,872 | py | Python | harvester_e2e_tests/scenarios/test_vm_actions.py | tjjh89017/tests | ab7a7dbc380f2585cf6de709d203912cf34fa84a | [
"Apache-2.0"
] | 5 | 2021-06-21T08:17:44.000Z | 2022-03-25T03:12:13.000Z | harvester_e2e_tests/scenarios/test_vm_actions.py | tjjh89017/tests | ab7a7dbc380f2585cf6de709d203912cf34fa84a | [
"Apache-2.0"
] | 107 | 2021-06-07T07:31:14.000Z | 2022-03-30T07:24:33.000Z | harvester_e2e_tests/scenarios/test_vm_actions.py | tjjh89017/tests | ab7a7dbc380f2585cf6de709d203912cf34fa84a | [
"Apache-2.0"
] | 17 | 2021-05-26T21:05:54.000Z | 2022-03-29T00:49:50.000Z | # Copyright (c) 2021 SUSE LLC
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 3 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, contact SUSE LLC.
#
# To contact SUSE about this file by physical or electronic mail,
# you may find current contact information at www.suse.com
from harvester_e2e_tests import utils
import polling2
import time
import json
import pytest
pytest_plugins = [
'harvester_e2e_tests.fixtures.keypair',
'harvester_e2e_tests.fixtures.vm',
'harvester_e2e_tests.fixtures.volume',
'harvester_e2e_tests.fixtures.backuptarget'
]
def backup_restore_migrated_vm(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget):
"""
Backup Restore Testing
Covers:
backup-and-restore-13-Restore Backup for VM that was live migrated
backup-and-restore-14-Backup Single VM that has been live migrated
before
"""
backup_json = utils.random_name()
try:
vm_name = vm_with_volume['metadata']['name']
vm_instance_json = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, vm_with_volume)
vm_node_before_migrate = vm_instance_json['status']['nodeName']
resp = admin_session.get(harvester_api_endpoints.get_node % (
vm_node_before_migrate))
resp = admin_session.get(harvester_api_endpoints.list_nodes)
assert resp.status_code == 200, 'Failed to list nodes: %s' % (
resp.content)
nodes_json = resp.json()['data']
for node in nodes_json:
if node['metadata']['name'] != vm_node_before_migrate:
node_to_migrate = node['metadata']['name']
resp = admin_session.put(harvester_api_endpoints.migrate_vm % (
vm_name),
json={"nodeName": node_to_migrate})
assert resp.status_code == 202, 'Failed to migrat VM to host %s' % (
node_to_migrate)
# give it some time for the VM to migrate
time.sleep(120)
def _check_vm_instance_migrated():
resp = admin_session.get(
harvester_api_endpoints.get_vm_instance % (
vm_name))
if resp.status_code == 200:
resp_json = resp.json()
if ('status' in resp_json and
'migrationState' in resp_json['status'] and
resp_json['status']['migrationState']['completed']):
return True
return False
success = polling2.poll(
_check_vm_instance_migrated,
step=5,
timeout=request.config.getoption('--wait-timeout'))
assert success, 'Timed out as waiting for VM to migrate : %s' % (
vm_name)
vmi_json_after_migrate = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, vm_with_volume)
vm_node_after_migrate = vmi_json_after_migrate['status']['nodeName']
assert vm_node_after_migrate != vm_node_before_migrate, (
'Failed to Migrate as Host remains same. '
'Node Before Migrate: %s; Node after Migrate: %s' % (
vm_node_before_migrate, vm_node_after_migrate))
# Create backup of Live migrated VM
backup_name = utils.random_name()
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget,
name=backup_name,
vm_name=vm_name)
# Stop VM
utils.stop_vm(request, admin_session,
harvester_api_endpoints, vm_name)
# Restore existing VM from backup
restore_name = utils.random_name()
utils.restore_vm_backup(request, admin_session,
harvester_api_endpoints,
name=restore_name,
vm_name=vm_name,
backup_name=backup_name)
utils.assert_vm_ready(request, admin_session,
harvester_api_endpoints,
vm_name, running=True)
resp = admin_session.get(harvester_api_endpoints.get_vm % (
vm_name))
assert resp.status_code == 200, 'Failed to get restor VM %s: %s' % (
vm_name, resp.content)
restored_vm_json = resp.json()
restored_vm_instance_json = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, restored_vm_json)
restored_vm_node = restored_vm_instance_json['status']['nodeName']
assert restored_vm_node == vm_node_after_migrate, (
'Node of restored VM not same as Node after VM migration '
'Node Of Restored VM: %s; VM Node after Migrate: %s' % (
restored_vm_node, vm_node_after_migrate))
finally:
if not request.config.getoption('--do-not-cleanup'):
if vm_with_volume:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, vm_with_volume)
if backup_json:
utils.delete_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget, backup_json)
def update_backup_yaml(request, admin_session,
harvester_api_endpoints,
basic_vm,
backuptarget):
vm_name = basic_vm['metadata']['name']
utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, basic_vm)
backup_name = utils.random_name()
backup_json = None
try:
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget,
name=backup_name,
vm_name=vm_name)
backup_json['metadata']['annotations'] = {
'test.harvesterhci.io': 'for-test-update'
}
resp = utils.poll_for_update_resource(
request, admin_session,
harvester_api_endpoints.update_vm_backup % (
backup_json['metadata']['name']),
backup_json,
harvester_api_endpoints.get_vm_backup % (
backup_json['metadata']['name']),
use_yaml=True)
updated_backup_data = resp.json()
assert updated_backup_data['metadata']['annotations'].get(
'test.harvesterhci.io') == 'for-test-update'
finally:
if not request.config.getoption('--do-not-cleanup'):
if backup_json:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, basic_vm)
utils.delete_vm_backup(
request, admin_session, harvester_api_endpoints,
backuptarget, backup_json)
@pytest.mark.virtual_machines_p1
@pytest.mark.p1
class TestVMActions:
"""
Test Virtual Machines opertions like restart,stop,start,pause,
unpause
Covers:
virtual-machines-55-VM operations stop,start,restart,pause,unpause
virtual-machines-50-VM Edit VM via YAML with CPU
"""
def test_create_vm(self, admin_session, harvester_api_endpoints, basic_vm):
# make sure the VM instance is successfully created
utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, basic_vm)
# make sure it has a cdrom device
devices = basic_vm['spec']['template']['spec']['domain']['devices']
disks = devices['disks']
found_cdrom = False
for disk in disks:
if 'cdrom' in disk:
found_cdrom = True
break
assert found_cdrom, 'Expecting "cdrom" in the disks list.'
def test_restart_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
vm_name = basic_vm['metadata']['name']
previous_uid = basic_vm['metadata']['uid']
utils.restart_vm(admin_session, harvester_api_endpoints, previous_uid,
vm_name, request.config.getoption('--wait-timeout'))
def test_stop_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
vm_name = basic_vm['metadata']['name']
utils.stop_vm(request, admin_session, harvester_api_endpoints,
vm_name)
def test_start_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
# NOTE: this step must be done after VM has stopped
resp = admin_session.put(harvester_api_endpoints.start_vm % (
basic_vm['metadata']['name']))
assert resp.status_code == 202, (
'Failed to start VM instance %s: %s' % (
basic_vm['metadata']['name'], resp.content))
# give it some time for the VM to start
time.sleep(120)
def _check_vm_instance_started():
resp = admin_session.get(
harvester_api_endpoints.get_vm_instance % (
basic_vm['metadata']['name']))
if resp.status_code == 200:
resp_json = resp.json()
if ('status' in resp_json and
resp_json['status']['phase'] == 'Running'):
return True
return False
success = polling2.poll(
_check_vm_instance_started,
step=5,
timeout=request.config.getoption('--wait-timeout'))
assert success, 'Failed to get VM instance for: %s' % (
basic_vm['metadata']['name'])
def test_pause_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
resp = admin_session.put(harvester_api_endpoints.pause_vm % (
basic_vm['metadata']['name']))
assert resp.status_code == 200, 'Failed to pause VM instance %s' % (
basic_vm['metadata']['name'])
# give it some time for the VM to pause
time.sleep(60)
def _check_vm_instance_paused():
resp = admin_session.get(harvester_api_endpoints.get_vm % (
basic_vm['metadata']['name']))
if resp.status_code == 200:
resp_json = resp.json()
if 'status' in resp_json:
for condition in resp_json['status']['conditions']:
if (condition['type'] == 'Paused' and
condition['status'] == 'True'):
return True
return False
success = polling2.poll(
_check_vm_instance_paused,
step=5,
timeout=request.config.getoption('--wait-timeout'))
assert success, 'Timed out while waiting for VM to be paused.'
def test_unpause_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
# NOTE: make sure to execute this step after _paused_vm()
resp = admin_session.put(harvester_api_endpoints.unpause_vm % (
basic_vm['metadata']['name']))
assert resp.status_code == 200, 'Failed to unpause VM instance %s' % (
basic_vm['metadata']['name'])
# give it some time to unpause
time.sleep(10)
def _check_vm_instance_unpaused():
resp = admin_session.get(harvester_api_endpoints.get_vm % (
basic_vm['metadata']['name']))
if resp.status_code == 200:
resp_json = resp.json()
if ('status' in resp_json and
'ready' in resp_json['status'] and
resp_json['status']['ready']):
return True
return False
success = polling2.poll(
_check_vm_instance_unpaused,
step=5,
timeout=request.config.getoption('--wait-timeout'))
assert success, 'Timed out while waiting for VM to be unpaused.'
def test_update_vm_cpu(self, request, admin_session,
harvester_api_endpoints, basic_vm):
vm_name = basic_vm['metadata']['name']
vm_instance_json = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, basic_vm)
previous_uid = vm_instance_json['metadata']['uid']
domain_data = basic_vm['spec']['template']['spec']['domain']
updated_cores = domain_data['cpu']['cores'] + 1
domain_data['cpu']['cores'] = updated_cores
resp = utils.poll_for_update_resource(
request, admin_session,
harvester_api_endpoints.update_vm % (vm_name),
basic_vm,
harvester_api_endpoints.get_vm % (vm_name))
updated_vm_data = resp.json()
updated_domain_data = (
updated_vm_data['spec']['template']['spec']['domain'])
assert updated_domain_data['cpu']['cores'] == updated_cores
# restart the VM instance for the changes to take effect
utils.restart_vm(admin_session, harvester_api_endpoints, previous_uid,
vm_name, request.config.getoption('--wait-timeout'))
@pytest.mark.volumes_p2
@pytest.mark.volumes_p1
@pytest.mark.p2
@pytest.mark.p1
class TestVMVolumes:
def test_create_vm_with_external_volume(self, admin_session,
harvester_api_endpoints,
vm_with_volume):
"""
Test virtual machines
Covers:
virtual-machines-11-Create VM with two disk volumes
"""
# make sure the VM instance is successfully created
utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, vm_with_volume)
# make sure it's data volumes are in-use
volumes = vm_with_volume['spec']['template']['spec']['volumes']
for volume in volumes:
resp = admin_session.get(harvester_api_endpoints.get_volume % (
volume['persistentVolumeClaim']['claimName']))
assert resp.status_code == 200, (
'Failed to lookup volume %s: %s' % (
volume['persistentVolumeClaim']['claimName'],
resp.content))
volume_json = resp.json()
owned_by = json.loads(
volume_json['metadata']['annotations'].get(
'harvesterhci.io/owned-by'))
expected_owner = '%s/%s' % (
vm_with_volume['metadata']['namespace'],
vm_with_volume['metadata']['name'])
# make sure VM is one of the owners
found = False
for owner in owned_by:
if (owner['schema'] == 'kubevirt.io.virtualmachine' and
expected_owner in owner['refs']):
found = True
break
assert found, ('Expecting %s to be in volume %s owners list' % (
expected_owner, volume['persistentVolumeClaim']['claimName']))
def test_delete_volume_in_use(self, request, admin_session,
harvester_api_endpoints, vm_with_volume):
"""
Volume testing
Covers:
Negative vol-01-Delete Volume that is in use
vol-13-Validate volume shows as in use when attached
"""
volumes = vm_with_volume['spec']['template']['spec']['volumes']
for volume in volumes:
# try to delete a volume in 'in-use' state and it should
# fail
resp = admin_session.delete(
harvester_api_endpoints.delete_volume % (
volume['persistentVolumeClaim']['claimName']))
assert resp.status_code not in [200, 201], (
'Deleting "in-use" volumes should not be permitted: %s' % (
resp.content))
def test_delete_vm_then_volumes(self, request, admin_session,
harvester_api_endpoints,
vm_with_volume, volume):
"""
Volume testing
Covers:
vol-15-Delete volume that was attached to VM but now is not
"""
# delete the VM but keep the volumes
utils.delete_vm(request, admin_session, harvester_api_endpoints,
vm_with_volume, remove_all_disks=False)
volumes = vm_with_volume['spec']['template']['spec']['volumes']
for data_vol in volumes:
volume_name = data_vol['persistentVolumeClaim']['claimName']
resp = admin_session.get(harvester_api_endpoints.get_volume % (
volume_name))
assert resp.status_code == 200, (
'Failed to lookup data volume %s: %s' % (
volume_name, resp.content))
# now cleanup the volume
utils.delete_volume_by_name(request, admin_session,
harvester_api_endpoints, volume_name)
@pytest.mark.skip("https://github.com/harvester/harvester/issues/1339")
@pytest.mark.backups3
@pytest.mark.backup_and_restore_p1
@pytest.mark.p1
def test_backup_single_vm_s3(request, admin_session,
harvester_api_endpoints, basic_vm,
backuptarget_s3):
"""
Backup and Restore
Covers:
backup-and-restore-02-Backup Single VM s3
backup-and-restore-07-Delete single Backup
backup-and-restore-01-create backup target
"""
vm_name = basic_vm['metadata']['name']
backup_name = utils.random_name()
backup_json = None
try:
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget_s3,
name=backup_name,
vm_name=vm_name)
finally:
if not request.config.getoption('--do-not-cleanup'):
if backup_json:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, basic_vm)
utils.delete_vm_backup(
request, admin_session, harvester_api_endpoints,
backuptarget_s3, backup_json)
@pytest.mark.backupnfs
@pytest.mark.backup_and_restore_p1
@pytest.mark.p1
def test_backup_single_vm_nfs(request, admin_session,
harvester_api_endpoints, basic_vm,
backuptarget_nfs):
"""
Backup and Restore
Covers:
vol-02-Backup Single VM nfs
"""
vm_name = basic_vm['metadata']['name']
backup_name = utils.random_name()
backup_json = None
try:
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget_nfs,
name=backup_name,
vm_name=vm_name)
finally:
if not request.config.getoption('--do-not-cleanup'):
if backup_json:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, basic_vm)
utils.delete_vm_backup(
request, admin_session, harvester_api_endpoints,
backuptarget_nfs, backup_json)
@pytest.mark.skip("https://github.com/harvester/harvester/issues/1473")
@pytest.mark.backups3
@pytest.mark.backup_and_restore_p2
@pytest.mark.p2
def test_backup_restore_migrated_vm_s3(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget_s3):
"""
Backup and Restore
Covers:
backup-and-restore-13-Restore Backup S3 for VM that was
live migrated
backup-and-restore-14-Backup single vm S3 for VM that was
live migrated before
"""
backup_restore_migrated_vm(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget_s3)
@pytest.mark.skip("https://github.com/harvester/harvester/issues/1473")
@pytest.mark.backupnfs
@pytest.mark.backup_and_restore_p2
@pytest.mark.p2
def test_backup_restore_migrated_vm_nfs(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget_nfs):
"""
Backup and Restore
Covers:
backup-and-restore-13-Restore Backup nfs for VM that was
live migrated
backup-and-restore-14-Backup single vm nfs for VM that was
live migrated before
"""
backup_restore_migrated_vm(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget_nfs)
@pytest.mark.backupnfs
@pytest.mark.backup_and_restore_p2
@pytest.mark.p2
def test_update_backup_yaml_nfs(request, admin_session,
harvester_api_endpoints, basic_vm,
backuptarget_nfs):
"""
Backup Restore Testing
Covers:
backup-and-restore-11-Edit Backup nfs
"""
update_backup_yaml(request, admin_session,
harvester_api_endpoints,
basic_vm,
backuptarget_nfs)
@pytest.mark.skip("https://github.com/harvester/harvester/issues/1339")
@pytest.mark.backups3
@pytest.mark.backup_and_restore_p2
@pytest.mark.p2
def test_update_backup_yaml_s3(request, admin_session,
harvester_api_endpoints, basic_vm,
backuptarget_s3):
"""
Backup Restore Testing
Covers:
backup-and-restore-11-Edit Backup s3
"""
update_backup_yaml(request, admin_session,
harvester_api_endpoints,
basic_vm,
backuptarget_s3)
@pytest.mark.backupnfs
@pytest.mark.backup_and_restore_p1
@pytest.mark.p1
def test_restore_backup_vm_on(request, admin_session,
harvester_api_endpoints,
basic_vm, backuptarget_nfs):
"""
Backup Restore Testing
Covers:
Negative backup-and-restore-08-Restore Backup Negative
"""
# make sure the VM instance is successfully created
vm_instance_json = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, basic_vm)
vm_name = vm_instance_json['metadata']['name']
backup_name = utils.random_name()
backup_json = None
try:
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget_nfs,
name=backup_name,
vm_name=vm_name)
restore_name = utils.random_name()
request_json = utils.get_json_object_from_template(
'basic_vm_restore',
name=restore_name,
vm_name=vm_name,
backup_name=backup_name
)
resp = admin_session.post(
harvester_api_endpoints.create_vm_restore,
json=request_json)
content = resp.json()
assert 'please stop the VM' in content['message']
finally:
if not request.config.getoption('--do-not-cleanup'):
if backup_json:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, basic_vm)
utils.delete_vm_backup(
request, admin_session, harvester_api_endpoints,
backuptarget_nfs, backup_json)
| 41.801681 | 79 | 0.575064 |
from harvester_e2e_tests import utils
import polling2
import time
import json
import pytest
pytest_plugins = [
'harvester_e2e_tests.fixtures.keypair',
'harvester_e2e_tests.fixtures.vm',
'harvester_e2e_tests.fixtures.volume',
'harvester_e2e_tests.fixtures.backuptarget'
]
def backup_restore_migrated_vm(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget):
backup_json = utils.random_name()
try:
vm_name = vm_with_volume['metadata']['name']
vm_instance_json = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, vm_with_volume)
vm_node_before_migrate = vm_instance_json['status']['nodeName']
resp = admin_session.get(harvester_api_endpoints.get_node % (
vm_node_before_migrate))
resp = admin_session.get(harvester_api_endpoints.list_nodes)
assert resp.status_code == 200, 'Failed to list nodes: %s' % (
resp.content)
nodes_json = resp.json()['data']
for node in nodes_json:
if node['metadata']['name'] != vm_node_before_migrate:
node_to_migrate = node['metadata']['name']
resp = admin_session.put(harvester_api_endpoints.migrate_vm % (
vm_name),
json={"nodeName": node_to_migrate})
assert resp.status_code == 202, 'Failed to migrat VM to host %s' % (
node_to_migrate)
time.sleep(120)
def _check_vm_instance_migrated():
resp = admin_session.get(
harvester_api_endpoints.get_vm_instance % (
vm_name))
if resp.status_code == 200:
resp_json = resp.json()
if ('status' in resp_json and
'migrationState' in resp_json['status'] and
resp_json['status']['migrationState']['completed']):
return True
return False
success = polling2.poll(
_check_vm_instance_migrated,
step=5,
timeout=request.config.getoption('--wait-timeout'))
assert success, 'Timed out as waiting for VM to migrate : %s' % (
vm_name)
vmi_json_after_migrate = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, vm_with_volume)
vm_node_after_migrate = vmi_json_after_migrate['status']['nodeName']
assert vm_node_after_migrate != vm_node_before_migrate, (
'Failed to Migrate as Host remains same. '
'Node Before Migrate: %s; Node after Migrate: %s' % (
vm_node_before_migrate, vm_node_after_migrate))
backup_name = utils.random_name()
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget,
name=backup_name,
vm_name=vm_name)
utils.stop_vm(request, admin_session,
harvester_api_endpoints, vm_name)
restore_name = utils.random_name()
utils.restore_vm_backup(request, admin_session,
harvester_api_endpoints,
name=restore_name,
vm_name=vm_name,
backup_name=backup_name)
utils.assert_vm_ready(request, admin_session,
harvester_api_endpoints,
vm_name, running=True)
resp = admin_session.get(harvester_api_endpoints.get_vm % (
vm_name))
assert resp.status_code == 200, 'Failed to get restor VM %s: %s' % (
vm_name, resp.content)
restored_vm_json = resp.json()
restored_vm_instance_json = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, restored_vm_json)
restored_vm_node = restored_vm_instance_json['status']['nodeName']
assert restored_vm_node == vm_node_after_migrate, (
'Node of restored VM not same as Node after VM migration '
'Node Of Restored VM: %s; VM Node after Migrate: %s' % (
restored_vm_node, vm_node_after_migrate))
finally:
if not request.config.getoption('--do-not-cleanup'):
if vm_with_volume:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, vm_with_volume)
if backup_json:
utils.delete_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget, backup_json)
def update_backup_yaml(request, admin_session,
harvester_api_endpoints,
basic_vm,
backuptarget):
vm_name = basic_vm['metadata']['name']
utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, basic_vm)
backup_name = utils.random_name()
backup_json = None
try:
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget,
name=backup_name,
vm_name=vm_name)
backup_json['metadata']['annotations'] = {
'test.harvesterhci.io': 'for-test-update'
}
resp = utils.poll_for_update_resource(
request, admin_session,
harvester_api_endpoints.update_vm_backup % (
backup_json['metadata']['name']),
backup_json,
harvester_api_endpoints.get_vm_backup % (
backup_json['metadata']['name']),
use_yaml=True)
updated_backup_data = resp.json()
assert updated_backup_data['metadata']['annotations'].get(
'test.harvesterhci.io') == 'for-test-update'
finally:
if not request.config.getoption('--do-not-cleanup'):
if backup_json:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, basic_vm)
utils.delete_vm_backup(
request, admin_session, harvester_api_endpoints,
backuptarget, backup_json)
@pytest.mark.virtual_machines_p1
@pytest.mark.p1
class TestVMActions:
def test_create_vm(self, admin_session, harvester_api_endpoints, basic_vm):
utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, basic_vm)
devices = basic_vm['spec']['template']['spec']['domain']['devices']
disks = devices['disks']
found_cdrom = False
for disk in disks:
if 'cdrom' in disk:
found_cdrom = True
break
assert found_cdrom, 'Expecting "cdrom" in the disks list.'
def test_restart_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
vm_name = basic_vm['metadata']['name']
previous_uid = basic_vm['metadata']['uid']
utils.restart_vm(admin_session, harvester_api_endpoints, previous_uid,
vm_name, request.config.getoption('--wait-timeout'))
def test_stop_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
vm_name = basic_vm['metadata']['name']
utils.stop_vm(request, admin_session, harvester_api_endpoints,
vm_name)
def test_start_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
resp = admin_session.put(harvester_api_endpoints.start_vm % (
basic_vm['metadata']['name']))
assert resp.status_code == 202, (
'Failed to start VM instance %s: %s' % (
basic_vm['metadata']['name'], resp.content))
time.sleep(120)
def _check_vm_instance_started():
resp = admin_session.get(
harvester_api_endpoints.get_vm_instance % (
basic_vm['metadata']['name']))
if resp.status_code == 200:
resp_json = resp.json()
if ('status' in resp_json and
resp_json['status']['phase'] == 'Running'):
return True
return False
success = polling2.poll(
_check_vm_instance_started,
step=5,
timeout=request.config.getoption('--wait-timeout'))
assert success, 'Failed to get VM instance for: %s' % (
basic_vm['metadata']['name'])
def test_pause_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
resp = admin_session.put(harvester_api_endpoints.pause_vm % (
basic_vm['metadata']['name']))
assert resp.status_code == 200, 'Failed to pause VM instance %s' % (
basic_vm['metadata']['name'])
time.sleep(60)
def _check_vm_instance_paused():
resp = admin_session.get(harvester_api_endpoints.get_vm % (
basic_vm['metadata']['name']))
if resp.status_code == 200:
resp_json = resp.json()
if 'status' in resp_json:
for condition in resp_json['status']['conditions']:
if (condition['type'] == 'Paused' and
condition['status'] == 'True'):
return True
return False
success = polling2.poll(
_check_vm_instance_paused,
step=5,
timeout=request.config.getoption('--wait-timeout'))
assert success, 'Timed out while waiting for VM to be paused.'
def test_unpause_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
resp = admin_session.put(harvester_api_endpoints.unpause_vm % (
basic_vm['metadata']['name']))
assert resp.status_code == 200, 'Failed to unpause VM instance %s' % (
basic_vm['metadata']['name'])
time.sleep(10)
def _check_vm_instance_unpaused():
resp = admin_session.get(harvester_api_endpoints.get_vm % (
basic_vm['metadata']['name']))
if resp.status_code == 200:
resp_json = resp.json()
if ('status' in resp_json and
'ready' in resp_json['status'] and
resp_json['status']['ready']):
return True
return False
success = polling2.poll(
_check_vm_instance_unpaused,
step=5,
timeout=request.config.getoption('--wait-timeout'))
assert success, 'Timed out while waiting for VM to be unpaused.'
def test_update_vm_cpu(self, request, admin_session,
harvester_api_endpoints, basic_vm):
vm_name = basic_vm['metadata']['name']
vm_instance_json = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, basic_vm)
previous_uid = vm_instance_json['metadata']['uid']
domain_data = basic_vm['spec']['template']['spec']['domain']
updated_cores = domain_data['cpu']['cores'] + 1
domain_data['cpu']['cores'] = updated_cores
resp = utils.poll_for_update_resource(
request, admin_session,
harvester_api_endpoints.update_vm % (vm_name),
basic_vm,
harvester_api_endpoints.get_vm % (vm_name))
updated_vm_data = resp.json()
updated_domain_data = (
updated_vm_data['spec']['template']['spec']['domain'])
assert updated_domain_data['cpu']['cores'] == updated_cores
utils.restart_vm(admin_session, harvester_api_endpoints, previous_uid,
vm_name, request.config.getoption('--wait-timeout'))
@pytest.mark.volumes_p2
@pytest.mark.volumes_p1
@pytest.mark.p2
@pytest.mark.p1
class TestVMVolumes:
def test_create_vm_with_external_volume(self, admin_session,
harvester_api_endpoints,
vm_with_volume):
utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, vm_with_volume)
volumes = vm_with_volume['spec']['template']['spec']['volumes']
for volume in volumes:
resp = admin_session.get(harvester_api_endpoints.get_volume % (
volume['persistentVolumeClaim']['claimName']))
assert resp.status_code == 200, (
'Failed to lookup volume %s: %s' % (
volume['persistentVolumeClaim']['claimName'],
resp.content))
volume_json = resp.json()
owned_by = json.loads(
volume_json['metadata']['annotations'].get(
'harvesterhci.io/owned-by'))
expected_owner = '%s/%s' % (
vm_with_volume['metadata']['namespace'],
vm_with_volume['metadata']['name'])
# make sure VM is one of the owners
found = False
for owner in owned_by:
if (owner['schema'] == 'kubevirt.io.virtualmachine' and
expected_owner in owner['refs']):
found = True
break
assert found, ('Expecting %s to be in volume %s owners list' % (
expected_owner, volume['persistentVolumeClaim']['claimName']))
def test_delete_volume_in_use(self, request, admin_session,
harvester_api_endpoints, vm_with_volume):
volumes = vm_with_volume['spec']['template']['spec']['volumes']
for volume in volumes:
# try to delete a volume in 'in-use' state and it should
# fail
resp = admin_session.delete(
harvester_api_endpoints.delete_volume % (
volume['persistentVolumeClaim']['claimName']))
assert resp.status_code not in [200, 201], (
'Deleting "in-use" volumes should not be permitted: %s' % (
resp.content))
def test_delete_vm_then_volumes(self, request, admin_session,
harvester_api_endpoints,
vm_with_volume, volume):
# delete the VM but keep the volumes
utils.delete_vm(request, admin_session, harvester_api_endpoints,
vm_with_volume, remove_all_disks=False)
volumes = vm_with_volume['spec']['template']['spec']['volumes']
for data_vol in volumes:
volume_name = data_vol['persistentVolumeClaim']['claimName']
resp = admin_session.get(harvester_api_endpoints.get_volume % (
volume_name))
assert resp.status_code == 200, (
'Failed to lookup data volume %s: %s' % (
volume_name, resp.content))
# now cleanup the volume
utils.delete_volume_by_name(request, admin_session,
harvester_api_endpoints, volume_name)
@pytest.mark.skip("https://github.com/harvester/harvester/issues/1339")
@pytest.mark.backups3
@pytest.mark.backup_and_restore_p1
@pytest.mark.p1
def test_backup_single_vm_s3(request, admin_session,
harvester_api_endpoints, basic_vm,
backuptarget_s3):
vm_name = basic_vm['metadata']['name']
backup_name = utils.random_name()
backup_json = None
try:
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget_s3,
name=backup_name,
vm_name=vm_name)
finally:
if not request.config.getoption('--do-not-cleanup'):
if backup_json:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, basic_vm)
utils.delete_vm_backup(
request, admin_session, harvester_api_endpoints,
backuptarget_s3, backup_json)
@pytest.mark.backupnfs
@pytest.mark.backup_and_restore_p1
@pytest.mark.p1
def test_backup_single_vm_nfs(request, admin_session,
harvester_api_endpoints, basic_vm,
backuptarget_nfs):
vm_name = basic_vm['metadata']['name']
backup_name = utils.random_name()
backup_json = None
try:
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget_nfs,
name=backup_name,
vm_name=vm_name)
finally:
if not request.config.getoption('--do-not-cleanup'):
if backup_json:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, basic_vm)
utils.delete_vm_backup(
request, admin_session, harvester_api_endpoints,
backuptarget_nfs, backup_json)
@pytest.mark.skip("https://github.com/harvester/harvester/issues/1473")
@pytest.mark.backups3
@pytest.mark.backup_and_restore_p2
@pytest.mark.p2
def test_backup_restore_migrated_vm_s3(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget_s3):
backup_restore_migrated_vm(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget_s3)
@pytest.mark.skip("https://github.com/harvester/harvester/issues/1473")
@pytest.mark.backupnfs
@pytest.mark.backup_and_restore_p2
@pytest.mark.p2
def test_backup_restore_migrated_vm_nfs(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget_nfs):
backup_restore_migrated_vm(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget_nfs)
@pytest.mark.backupnfs
@pytest.mark.backup_and_restore_p2
@pytest.mark.p2
def test_update_backup_yaml_nfs(request, admin_session,
harvester_api_endpoints, basic_vm,
backuptarget_nfs):
update_backup_yaml(request, admin_session,
harvester_api_endpoints,
basic_vm,
backuptarget_nfs)
@pytest.mark.skip("https://github.com/harvester/harvester/issues/1339")
@pytest.mark.backups3
@pytest.mark.backup_and_restore_p2
@pytest.mark.p2
def test_update_backup_yaml_s3(request, admin_session,
harvester_api_endpoints, basic_vm,
backuptarget_s3):
update_backup_yaml(request, admin_session,
harvester_api_endpoints,
basic_vm,
backuptarget_s3)
@pytest.mark.backupnfs
@pytest.mark.backup_and_restore_p1
@pytest.mark.p1
def test_restore_backup_vm_on(request, admin_session,
harvester_api_endpoints,
basic_vm, backuptarget_nfs):
# make sure the VM instance is successfully created
vm_instance_json = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, basic_vm)
vm_name = vm_instance_json['metadata']['name']
backup_name = utils.random_name()
backup_json = None
try:
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget_nfs,
name=backup_name,
vm_name=vm_name)
restore_name = utils.random_name()
request_json = utils.get_json_object_from_template(
'basic_vm_restore',
name=restore_name,
vm_name=vm_name,
backup_name=backup_name
)
resp = admin_session.post(
harvester_api_endpoints.create_vm_restore,
json=request_json)
content = resp.json()
assert 'please stop the VM' in content['message']
finally:
if not request.config.getoption('--do-not-cleanup'):
if backup_json:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, basic_vm)
utils.delete_vm_backup(
request, admin_session, harvester_api_endpoints,
backuptarget_nfs, backup_json)
| true | true |
f7309124ed3ffe0189fdb1c51cf417808d3ed9f7 | 1,726 | py | Python | data/mnist.py | aPere3/MVAProject-RecVis16 | 83b581c37cb486ec855e4a40652860df4e56b363 | [
"MIT"
] | null | null | null | data/mnist.py | aPere3/MVAProject-RecVis16 | 83b581c37cb486ec855e4a40652860df4e56b363 | [
"MIT"
] | null | null | null | data/mnist.py | aPere3/MVAProject-RecVis16 | 83b581c37cb486ec855e4a40652860df4e56b363 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This module contains a method to load mnist data. Based on David Larson work at:
http://g.sweyla.com/blog/2012/mnist-numpy/a
"""
import numpy
import os, struct
from array import array as pyarray
def load_mnist(dataset="training", digits=numpy.arange(10), path="mnist"):
"""
The Mnist loading methods from David Larson. Can be checked out at: http://g.sweyla.com/blog/2012/mnist-numpy/a
:param dataset: 'training' or 'testing' depending on the files to load.
:param digits: digits to load
:param path: path to the mnist directory
:return: X, y: data and labels
"""
if dataset == "training":
fname_img = os.path.join(path, 'train-images.idx3-ubyte')
fname_lbl = os.path.join(path, 'train-labels.idx1-ubyte')
elif dataset == "testing":
fname_img = os.path.join(path, 't10k-images.idx3-ubyte')
fname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte')
else:
raise ValueError("dataset must be 'testing' or 'training'")
flbl = open(fname_lbl, 'rb')
magic_nr, size = struct.unpack(">II", flbl.read(8))
lbl = pyarray("b", flbl.read())
flbl.close()
fimg = open(fname_img, 'rb')
magic_nr, size, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = pyarray("B", fimg.read())
fimg.close()
ind = [ k for k in range(size) if lbl[k] in digits ]
N = len(ind)
images = numpy.zeros((N, rows, cols), dtype=numpy.uint8)
labels = numpy.zeros((N, 1), dtype=numpy.int8)
for i in range(len(ind)):
images[i] = numpy.array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols))
labels[i] = lbl[ind[i]]
return images, labels
| 32.566038 | 115 | 0.634994 |
import numpy
import os, struct
from array import array as pyarray
def load_mnist(dataset="training", digits=numpy.arange(10), path="mnist"):
if dataset == "training":
fname_img = os.path.join(path, 'train-images.idx3-ubyte')
fname_lbl = os.path.join(path, 'train-labels.idx1-ubyte')
elif dataset == "testing":
fname_img = os.path.join(path, 't10k-images.idx3-ubyte')
fname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte')
else:
raise ValueError("dataset must be 'testing' or 'training'")
flbl = open(fname_lbl, 'rb')
magic_nr, size = struct.unpack(">II", flbl.read(8))
lbl = pyarray("b", flbl.read())
flbl.close()
fimg = open(fname_img, 'rb')
magic_nr, size, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = pyarray("B", fimg.read())
fimg.close()
ind = [ k for k in range(size) if lbl[k] in digits ]
N = len(ind)
images = numpy.zeros((N, rows, cols), dtype=numpy.uint8)
labels = numpy.zeros((N, 1), dtype=numpy.int8)
for i in range(len(ind)):
images[i] = numpy.array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols))
labels[i] = lbl[ind[i]]
return images, labels
| true | true |
f73092536f6b16f929d0669d98b5a821729b2f1a | 4,328 | py | Python | tests/modules/notifications/resources/utils.py | WildMeOrg/houston | 8102229421388e44234c07ee6cb73bf705b6fba0 | [
"Apache-2.0"
] | 6 | 2021-04-06T19:50:52.000Z | 2022-01-19T17:42:33.000Z | tests/modules/notifications/resources/utils.py | WildMeOrg/houston | 8102229421388e44234c07ee6cb73bf705b6fba0 | [
"Apache-2.0"
] | 491 | 2021-01-20T01:10:00.000Z | 2022-03-31T19:30:48.000Z | tests/modules/notifications/resources/utils.py | WildMeOrg/houston | 8102229421388e44234c07ee6cb73bf705b6fba0 | [
"Apache-2.0"
] | 2 | 2021-03-12T02:33:55.000Z | 2021-03-16T20:18:43.000Z | # -*- coding: utf-8 -*-
"""
notification resources utils
-------------
"""
import json
from tests import utils as test_utils
PATH = '/api/v1/notifications/'
EXPECTED_NOTIFICATION_KEYS = {
'guid',
'is_read',
'message_type',
'sender_name',
'sender_guid',
'message_values',
}
EXPECTED_LIST_KEYS = {
'guid',
'is_read',
'message_type',
'sender_name',
'sender_guid',
}
def create_notification(
flask_app_client, user, data, expected_status_code=200, expected_error=''
):
if user:
with flask_app_client.login(user, auth_scopes=('notifications:write',)):
response = flask_app_client.post(
'%s' % PATH,
content_type='application/json',
data=json.dumps(data),
)
else:
response = flask_app_client.post(
'%s' % PATH,
content_type='application/json',
data=json.dumps(data),
)
if expected_status_code == 200:
test_utils.validate_dict_response(response, 200, {'guid'})
elif 400 <= expected_status_code < 500:
test_utils.validate_dict_response(
response, expected_status_code, {'status', 'message'}
)
assert response.json['message'] == expected_error, response.json['message']
else:
test_utils.validate_dict_response(
response, expected_status_code, {'status', 'message'}
)
return response
def patch_notification(
flask_app_client,
notification_guid,
user,
data,
expected_status_code=200,
expected_error=None,
):
return test_utils.patch_via_flask(
flask_app_client,
user,
scopes='notifications:write',
path=f'{PATH}{notification_guid}',
data=data,
expected_status_code=expected_status_code,
response_200={'guid'},
expected_error=expected_error,
)
def read_notification(
flask_app_client, user, notification_guid, expected_status_code=200
):
return test_utils.get_dict_via_flask(
flask_app_client,
user,
scopes='notifications:read',
path=f'{PATH}{notification_guid}',
expected_status_code=expected_status_code,
response_200=EXPECTED_NOTIFICATION_KEYS,
)
def read_all_notifications(flask_app_client, user, expected_status_code=200):
return test_utils.get_list_via_flask(
flask_app_client,
user,
scopes='notifications:read',
path=PATH,
expected_status_code=expected_status_code,
expected_fields=EXPECTED_LIST_KEYS,
)
def read_all_unread_notifications(flask_app_client, user, expected_status_code=200):
return test_utils.get_list_via_flask(
flask_app_client,
user,
scopes='notifications:read',
path=f'{PATH}unread',
expected_status_code=expected_status_code,
expected_fields=EXPECTED_LIST_KEYS,
)
def get_unread_notifications(json_data, from_user_guid, notification_type):
return list(
filter(
lambda notif: notif['message_type'] == notification_type
and notif['sender_guid'] == from_user_guid
and notif['is_read'] is False,
json_data,
)
)
def mark_notification_as_read(
flask_app_client, user, notif_guid, expected_status_code=200
):
data = [test_utils.patch_replace_op('is_read', True)]
patch_notification(flask_app_client, notif_guid, user, data, expected_status_code)
def mark_all_notifications_as_read(flask_app_client, user):
unread_notifs = read_all_unread_notifications(flask_app_client, user)
for notif in unread_notifs.json:
mark_notification_as_read(flask_app_client, user, notif['guid'])
# Not a traditional util, this deletes all notifications in the system, the reason being that when many
# notifications are used, they are marked as read and cannot be recreated. This is intentional by design
# But it means that the tests can be non deterministic in that they can work or fail depending on what has
# happened before
def delete_all_notifications(db):
from app.modules.notifications.models import Notification
notifs = Notification.query.all()
for notif in notifs:
with db.session.begin(subtransactions=True):
db.session.delete(notif)
| 29.243243 | 106 | 0.676063 |
import json
from tests import utils as test_utils
PATH = '/api/v1/notifications/'
EXPECTED_NOTIFICATION_KEYS = {
'guid',
'is_read',
'message_type',
'sender_name',
'sender_guid',
'message_values',
}
EXPECTED_LIST_KEYS = {
'guid',
'is_read',
'message_type',
'sender_name',
'sender_guid',
}
def create_notification(
flask_app_client, user, data, expected_status_code=200, expected_error=''
):
if user:
with flask_app_client.login(user, auth_scopes=('notifications:write',)):
response = flask_app_client.post(
'%s' % PATH,
content_type='application/json',
data=json.dumps(data),
)
else:
response = flask_app_client.post(
'%s' % PATH,
content_type='application/json',
data=json.dumps(data),
)
if expected_status_code == 200:
test_utils.validate_dict_response(response, 200, {'guid'})
elif 400 <= expected_status_code < 500:
test_utils.validate_dict_response(
response, expected_status_code, {'status', 'message'}
)
assert response.json['message'] == expected_error, response.json['message']
else:
test_utils.validate_dict_response(
response, expected_status_code, {'status', 'message'}
)
return response
def patch_notification(
flask_app_client,
notification_guid,
user,
data,
expected_status_code=200,
expected_error=None,
):
return test_utils.patch_via_flask(
flask_app_client,
user,
scopes='notifications:write',
path=f'{PATH}{notification_guid}',
data=data,
expected_status_code=expected_status_code,
response_200={'guid'},
expected_error=expected_error,
)
def read_notification(
flask_app_client, user, notification_guid, expected_status_code=200
):
return test_utils.get_dict_via_flask(
flask_app_client,
user,
scopes='notifications:read',
path=f'{PATH}{notification_guid}',
expected_status_code=expected_status_code,
response_200=EXPECTED_NOTIFICATION_KEYS,
)
def read_all_notifications(flask_app_client, user, expected_status_code=200):
return test_utils.get_list_via_flask(
flask_app_client,
user,
scopes='notifications:read',
path=PATH,
expected_status_code=expected_status_code,
expected_fields=EXPECTED_LIST_KEYS,
)
def read_all_unread_notifications(flask_app_client, user, expected_status_code=200):
return test_utils.get_list_via_flask(
flask_app_client,
user,
scopes='notifications:read',
path=f'{PATH}unread',
expected_status_code=expected_status_code,
expected_fields=EXPECTED_LIST_KEYS,
)
def get_unread_notifications(json_data, from_user_guid, notification_type):
return list(
filter(
lambda notif: notif['message_type'] == notification_type
and notif['sender_guid'] == from_user_guid
and notif['is_read'] is False,
json_data,
)
)
def mark_notification_as_read(
flask_app_client, user, notif_guid, expected_status_code=200
):
data = [test_utils.patch_replace_op('is_read', True)]
patch_notification(flask_app_client, notif_guid, user, data, expected_status_code)
def mark_all_notifications_as_read(flask_app_client, user):
unread_notifs = read_all_unread_notifications(flask_app_client, user)
for notif in unread_notifs.json:
mark_notification_as_read(flask_app_client, user, notif['guid'])
def delete_all_notifications(db):
from app.modules.notifications.models import Notification
notifs = Notification.query.all()
for notif in notifs:
with db.session.begin(subtransactions=True):
db.session.delete(notif)
| true | true |
f730926324462a884c4e33f7f7f63d6260891d16 | 3,685 | py | Python | tutorials/plot_notebook.py | jnothman/sphinx-gallery | b930662613a32fe05f16b39f86fafdb4c8d6f424 | [
"BSD-3-Clause"
] | null | null | null | tutorials/plot_notebook.py | jnothman/sphinx-gallery | b930662613a32fe05f16b39f86fafdb4c8d6f424 | [
"BSD-3-Clause"
] | null | null | null | tutorials/plot_notebook.py | jnothman/sphinx-gallery | b930662613a32fe05f16b39f86fafdb4c8d6f424 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
========================
Notebook styled examples
========================
The gallery is capable of transforming python files into reStructuredText files
with a notebook structure. For this to be used you need to respect some syntax
rules.
It makes a lot of sense to contrast this output rst file with the
:download:`original python script <plot_notebook.py>` to get better feeling of
the necessary file structure.
Anything before the python script docstring is ignored by sphinx-gallery and
will not appear in the rst file, nor will it be executed.
This python docstring requires an reStructuredText title to name the file and
correctly build the reference links.
Once you close the docstring you would be writing python code. This code gets
executed by sphinx gallery shows the plots and attaches the generating code.
Nevertheless you can break your code into blocks and give the rendered file
a notebook style. In this case you have to include a code comment breaker
a line of at least 20 hashes and then every comment start with the a new hash.
As in this example we start by first writing this module
style docstring, then for the first code block we write the example file author
and script license continued by the import modules instructions.
"""
# Code source: Óscar Nájera
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
##############################################################################
# This code block is executed, although it produces no output. Lines starting
# with a simple hash are code comment and get treated as part of the code
# block. To include this new comment string we started the new block with a
# long line of hashes.
#
# The sphinx-gallery parser will assume everything after this splitter and that
# continues to start with a **comment hash and space** (respecting code style)
# is text that has to be rendered in
# html format. Keep in mind to always keep your comments always together by
# comment hashes. That means to break a paragraph you still need to commend
# that line break.
#
# In this example the next block of code produces some plotable data. Code is
# executed, figure is saved and then code is presented next, followed by the
# inlined figure.
x = np.linspace(-np.pi, np.pi, 300)
xx, yy = np.meshgrid(x, x)
z = np.cos(xx) + np.cos(yy)
plt.figure()
plt.imshow(z)
plt.colorbar()
plt.xlabel('$x$')
plt.ylabel('$y$')
###########################################################################
# Again it is possble to continue the discussion with a new python string. This
# time to introduce the next code block generates 2 separate figures.
plt.figure()
plt.imshow(z, cmap=plt.cm.get_cmap('hot'))
plt.figure()
plt.imshow(z, cmap=plt.cm.get_cmap('Spectral'), interpolation='none')
##########################################################################
# There's some subtle differences between rendered html rendered comment
# strings and code comment strings which I'll demonstrate below. (Some of this
# only makes sense if you look at the
# :download:`raw python script <plot_notebook.py>`)
#
# Comments in comment blocks remain nested in the text.
def dummy():
"""Dummy function to make sure docstrings don't get rendered as text"""
pass
# Code comments not preceded by the hash splitter are left in code blocks.
string = """
Triple-quoted string which tries to break parser but doesn't.
"""
############################################################################
# Finally, I'll call ``show`` at the end just so someone running the python
# code directly will see the plots; this is not necessary for creating the docs
plt.show()
| 37.989691 | 79 | 0.690638 |
import numpy as np
import matplotlib.pyplot as plt
| true | true |
f73092a62dc94f15d8005c38a7f97315b3879895 | 2,582 | py | Python | src/openstack_cli/commands/conf/keys/export.py | hapylestat/openstack_cli | be627f0b3c7ab9bf1032c36faca2ad101e53fb0e | [
"Apache-2.0"
] | null | null | null | src/openstack_cli/commands/conf/keys/export.py | hapylestat/openstack_cli | be627f0b3c7ab9bf1032c36faca2ad101e53fb0e | [
"Apache-2.0"
] | 1 | 2021-03-02T07:15:43.000Z | 2021-03-02T07:15:43.000Z | src/openstack_cli/commands/conf/keys/export.py | hapylestat/openstack_cli | be627f0b3c7ab9bf1032c36faca2ad101e53fb0e | [
"Apache-2.0"
] | 1 | 2021-03-23T10:00:56.000Z | 2021-03-23T10:00:56.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from openstack_cli.commands.conf.keys.list import _keys_list
from openstack_cli.modules.apputils.terminal import Console
from openstack_cli.core.config import Configuration
from openstack_cli.modules.apputils.discovery import CommandMetaInfo
from openstack_cli.modules.openstack import VMKeypairItemValue, OpenStack
__module__ = CommandMetaInfo("export", "Export ssh keys to disk")
__args__ = __module__.arg_builder\
.add_default_argument("name", str, "Name of the key to be exported", default="")
def _keys_export(conf: Configuration, ostack: OpenStack, name: str):
if not name:
_keys = _keys_list(conf, ostack, True)
item = Console.ask("Select key to export", _type=int)
if item is None or item > len(_keys) - 1:
Console.print_warning("Invalid selection, aborting")
return
name = _keys[item].name
_key: VMKeypairItemValue
try:
_key = conf.get_key(name)
except KeyError as e:
Console.print_error(str(e))
return
d = os.getcwd()
_public_file_path = os.path.join(d, f"{_key.name}.public.key")
_private_file_path = os.path.join(d, f"{_key.name}.private.key")
if _key.public_key:
try:
with open(_public_file_path, "w+", encoding="UTF-8") as f:
f.write(_key.public_key)
Console.print(f"Public key: {_public_file_path}")
except IOError as e:
Console.print_error(f"{_key.name}(public): {str(e)}")
if _key.private_key:
try:
with open(_private_file_path, "w+", encoding="UTF-8") as f:
f.write(_key.private_key)
Console.print(f"Private key: {_private_file_path}")
except IOError as e:
Console.print_error(f"{_key.name}(private): {str(e)}")
def __init__(conf: Configuration, name: str):
ostack = OpenStack(conf)
_keys_export(conf, ostack, name)
| 36.366197 | 82 | 0.72773 |
import os
from openstack_cli.commands.conf.keys.list import _keys_list
from openstack_cli.modules.apputils.terminal import Console
from openstack_cli.core.config import Configuration
from openstack_cli.modules.apputils.discovery import CommandMetaInfo
from openstack_cli.modules.openstack import VMKeypairItemValue, OpenStack
__module__ = CommandMetaInfo("export", "Export ssh keys to disk")
__args__ = __module__.arg_builder\
.add_default_argument("name", str, "Name of the key to be exported", default="")
def _keys_export(conf: Configuration, ostack: OpenStack, name: str):
if not name:
_keys = _keys_list(conf, ostack, True)
item = Console.ask("Select key to export", _type=int)
if item is None or item > len(_keys) - 1:
Console.print_warning("Invalid selection, aborting")
return
name = _keys[item].name
_key: VMKeypairItemValue
try:
_key = conf.get_key(name)
except KeyError as e:
Console.print_error(str(e))
return
d = os.getcwd()
_public_file_path = os.path.join(d, f"{_key.name}.public.key")
_private_file_path = os.path.join(d, f"{_key.name}.private.key")
if _key.public_key:
try:
with open(_public_file_path, "w+", encoding="UTF-8") as f:
f.write(_key.public_key)
Console.print(f"Public key: {_public_file_path}")
except IOError as e:
Console.print_error(f"{_key.name}(public): {str(e)}")
if _key.private_key:
try:
with open(_private_file_path, "w+", encoding="UTF-8") as f:
f.write(_key.private_key)
Console.print(f"Private key: {_private_file_path}")
except IOError as e:
Console.print_error(f"{_key.name}(private): {str(e)}")
def __init__(conf: Configuration, name: str):
ostack = OpenStack(conf)
_keys_export(conf, ostack, name)
| true | true |
f73093a92dabd28a9639dd0153472843f0f05b2b | 4,305 | py | Python | huaweicloud-sdk-osm/huaweicloudsdkosm/v2/model/accessory_limit_vo.py | NQLoong/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | 1 | 2021-11-03T07:54:50.000Z | 2021-11-03T07:54:50.000Z | huaweicloud-sdk-osm/huaweicloudsdkosm/v2/model/accessory_limit_vo.py | mawenbo-huawei/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-osm/huaweicloudsdkosm/v2/model/accessory_limit_vo.py | mawenbo-huawei/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
class AccessoryLimitVo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'limit_count': 'str',
'limit_size': 'str',
'limit_file_type': 'str'
}
attribute_map = {
'limit_count': 'limit_count',
'limit_size': 'limit_size',
'limit_file_type': 'limit_file_type'
}
def __init__(self, limit_count=None, limit_size=None, limit_file_type=None):
"""AccessoryLimitVo - a model defined in huaweicloud sdk"""
self._limit_count = None
self._limit_size = None
self._limit_file_type = None
self.discriminator = None
if limit_count is not None:
self.limit_count = limit_count
if limit_size is not None:
self.limit_size = limit_size
if limit_file_type is not None:
self.limit_file_type = limit_file_type
@property
def limit_count(self):
"""Gets the limit_count of this AccessoryLimitVo.
限制文件数量
:return: The limit_count of this AccessoryLimitVo.
:rtype: str
"""
return self._limit_count
@limit_count.setter
def limit_count(self, limit_count):
"""Sets the limit_count of this AccessoryLimitVo.
限制文件数量
:param limit_count: The limit_count of this AccessoryLimitVo.
:type: str
"""
self._limit_count = limit_count
@property
def limit_size(self):
"""Gets the limit_size of this AccessoryLimitVo.
限制文件大小,单位是M
:return: The limit_size of this AccessoryLimitVo.
:rtype: str
"""
return self._limit_size
@limit_size.setter
def limit_size(self, limit_size):
"""Sets the limit_size of this AccessoryLimitVo.
限制文件大小,单位是M
:param limit_size: The limit_size of this AccessoryLimitVo.
:type: str
"""
self._limit_size = limit_size
@property
def limit_file_type(self):
"""Gets the limit_file_type of this AccessoryLimitVo.
限制文件类型
:return: The limit_file_type of this AccessoryLimitVo.
:rtype: str
"""
return self._limit_file_type
@limit_file_type.setter
def limit_file_type(self, limit_file_type):
"""Sets the limit_file_type of this AccessoryLimitVo.
限制文件类型
:param limit_file_type: The limit_file_type of this AccessoryLimitVo.
:type: str
"""
self._limit_file_type = limit_file_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AccessoryLimitVo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.090909 | 80 | 0.571196 |
import pprint
import re
import six
class AccessoryLimitVo:
sensitive_list = []
openapi_types = {
'limit_count': 'str',
'limit_size': 'str',
'limit_file_type': 'str'
}
attribute_map = {
'limit_count': 'limit_count',
'limit_size': 'limit_size',
'limit_file_type': 'limit_file_type'
}
def __init__(self, limit_count=None, limit_size=None, limit_file_type=None):
self._limit_count = None
self._limit_size = None
self._limit_file_type = None
self.discriminator = None
if limit_count is not None:
self.limit_count = limit_count
if limit_size is not None:
self.limit_size = limit_size
if limit_file_type is not None:
self.limit_file_type = limit_file_type
@property
def limit_count(self):
return self._limit_count
@limit_count.setter
def limit_count(self, limit_count):
self._limit_count = limit_count
@property
def limit_size(self):
return self._limit_size
@limit_size.setter
def limit_size(self, limit_size):
self._limit_size = limit_size
@property
def limit_file_type(self):
return self._limit_file_type
@limit_file_type.setter
def limit_file_type(self, limit_file_type):
self._limit_file_type = limit_file_type
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, AccessoryLimitVo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f730942a921d93e237889ddabf84129fe2aacd58 | 26 | py | Python | e.py | Flonky/e | 0954acd82ebd74e6a433aa778a3844dc7b8acc60 | [
"MIT"
] | 1 | 2022-03-24T18:18:29.000Z | 2022-03-24T18:18:29.000Z | e.py | Flonky/e | 0954acd82ebd74e6a433aa778a3844dc7b8acc60 | [
"MIT"
] | null | null | null | e.py | Flonky/e | 0954acd82ebd74e6a433aa778a3844dc7b8acc60 | [
"MIT"
] | null | null | null | while True:
print("e") | 13 | 14 | 0.576923 | while True:
print("e") | true | true |
f7309493f11a75d980777cffc4ea62f00325b0e2 | 23,031 | py | Python | test/functional/test_framework/util.py | DancingAxolotl/encocoinplus | b3dcc750c48a4f4e2ffebd104e5426544fe2f6b8 | [
"MIT"
] | 1 | 2020-04-07T10:09:00.000Z | 2020-04-07T10:09:00.000Z | test/functional/test_framework/util.py | DancingAxolotl/encocoinplus | b3dcc750c48a4f4e2ffebd104e5426544fe2f6b8 | [
"MIT"
] | 13 | 2020-05-08T11:14:37.000Z | 2020-05-12T10:03:53.000Z | test/functional/test_framework/util.py | DancingAxolotl/encocoinplus | b3dcc750c48a4f4e2ffebd104e5426544fe2f6b8 | [
"MIT"
] | 22 | 2020-02-10T09:17:20.000Z | 2020-07-10T10:33:26.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 20) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring (%s) not found in: %s" % (message, e.error['message']))
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.5)
# Print the cause of the timeout
assert_greater_than(attempts, attempt)
assert_greater_than(timeout, time.time())
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "epgc.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("listenonion=0\n")
f.write("enablezeromint=0\n")
f.write("staking=0\n")
f.write("spendzeroconfchange=1\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser�' + str(n), 'rpcpass�' + str(n)
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "epgc.conf")):
with open(os.path.join(datadir, "epgc.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for addr in [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(addr)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def connect_nodes_clique(nodes):
l = len(nodes)
for a in range(l):
for b in range(a, l):
connect_nodes_bi(nodes, a, b)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
time.sleep(5)
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
#if flush_scheduler:
#for r in rpc_connections:
# r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
# Transaction/Block functions
#############################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = float(satoshi_round(send_value / 2))
outputs[addr2] = float(satoshi_round(send_value / 2))
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = float(satoshi_round(change))
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
### EPGC specific utils ###
vZC_DENOMS = [1, 5, 10, 50, 100, 500, 1000, 5000]
DEFAULT_FEE = 0.01
SPORK_ACTIVATION_TIME = 1563253447
SPORK_DEACTIVATION_TIME = 4070908800
def DecimalAmt(x):
"""Return Decimal from float for equality checks against rpc outputs"""
return Decimal("{:0.8f}".format(x))
| 38.00495 | 142 | 0.649516 |
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
nError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
if fee > (tx_size + 20) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring (%s) not found in: %s" % (message, e.error['message']))
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.5)
assert_greater_than(attempts, attempt)
assert_greater_than(timeout, time.time())
raise RuntimeError('Unreachable')
t_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "epgc.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("listenonion=0\n")
f.write("enablezeromint=0\n")
f.write("staking=0\n")
f.write("spendzeroconfchange=1\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser�' + str(n), 'rpcpass�' + str(n)
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "epgc.conf")):
with open(os.path.join(datadir, "epgc.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for addr in [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(addr)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def connect_nodes_clique(nodes):
l = len(nodes)
for a in range(l):
for b in range(a, l):
connect_nodes_bi(nodes, a, b)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
time.sleep(5)
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
change_address = from_node.getnewaddress()
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = float(satoshi_round(send_value / 2))
outputs[addr2] = float(satoshi_round(send_value / 2))
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
def gen_return_txouts():
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
txouts = txouts + "0000000000000000"
txouts = txouts + "fd0402"
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = float(satoshi_round(change))
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
T_FEE = 0.01
SPORK_ACTIVATION_TIME = 1563253447
SPORK_DEACTIVATION_TIME = 4070908800
def DecimalAmt(x):
return Decimal("{:0.8f}".format(x))
| true | true |
f7309534df4a2fc8176f000b46935824e18a7d45 | 6,764 | py | Python | bindings/python/ensmallen_graph/datasets/string/kandleriavitulina.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/kandleriavitulina.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/kandleriavitulina.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph Kandleria vitulina.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 22:43:58.503677
The undirected graph Kandleria vitulina has 2015 nodes and 162591 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.08013 and has 6 connected components, where the component with most
nodes has 2004 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 137, the mean node degree is 161.38, and
the node degree mode is 3. The top 5 most central nodes are 1410658.JHWI01000027_gene1818
(degree 942), 1410658.JHWI01000005_gene908 (degree 696), 1410658.JHWI01000008_gene614
(degree 672), 1410658.JHWI01000040_gene2018 (degree 626) and 1410658.JHWI01000014_gene961
(degree 598).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import KandleriaVitulina
# Then load the graph
graph = KandleriaVitulina()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def KandleriaVitulina(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Kandleria vitulina graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Kandleria vitulina graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 22:43:58.503677
The undirected graph Kandleria vitulina has 2015 nodes and 162591 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.08013 and has 6 connected components, where the component with most
nodes has 2004 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 137, the mean node degree is 161.38, and
the node degree mode is 3. The top 5 most central nodes are 1410658.JHWI01000027_gene1818
(degree 942), 1410658.JHWI01000005_gene908 (degree 696), 1410658.JHWI01000008_gene614
(degree 672), 1410658.JHWI01000040_gene2018 (degree 626) and 1410658.JHWI01000014_gene961
(degree 598).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import KandleriaVitulina
# Then load the graph
graph = KandleriaVitulina()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="KandleriaVitulina",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.413613 | 223 | 0.706978 | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph
def KandleriaVitulina(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
return AutomaticallyRetrievedGraph(
graph_name="KandleriaVitulina",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true | true |
f73095a96bda44dd33e649a8f9b2f81b0f8a2d5e | 108 | py | Python | modules/2.79/bpy/types/NodeSocketInterfaceIntUnsigned.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | modules/2.79/bpy/types/NodeSocketInterfaceIntUnsigned.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | modules/2.79/bpy/types/NodeSocketInterfaceIntUnsigned.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | class NodeSocketInterfaceIntUnsigned:
default_value = None
max_value = None
min_value = None
| 13.5 | 37 | 0.722222 | class NodeSocketInterfaceIntUnsigned:
default_value = None
max_value = None
min_value = None
| true | true |
f7309823f58463b82e823f3fd4ecc77467f835fd | 11,759 | py | Python | pml/engineer_tests.py | gatapia/py_ml_utils | 844d8b62a7c5cc0a80f4f62c0bfda092aac57ade | [
"MIT"
] | 183 | 2015-01-11T13:01:01.000Z | 2022-02-08T04:45:33.000Z | pml/engineer_tests.py | gatapia/py_ml_utils | 844d8b62a7c5cc0a80f4f62c0bfda092aac57ade | [
"MIT"
] | 13 | 2015-05-12T17:39:42.000Z | 2018-07-29T18:01:38.000Z | pml/engineer_tests.py | gatapia/py_ml_utils | 844d8b62a7c5cc0a80f4f62c0bfda092aac57ade | [
"MIT"
] | 166 | 2015-01-28T18:05:55.000Z | 2022-02-08T04:45:34.000Z | from __future__ import print_function, absolute_import
import unittest, math
import pandas as pd
import numpy as np
from . import *
class T(base_pandas_extensions_tester.BasePandasExtensionsTester):
def test_concat(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f']})
df.engineer('concat(c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,c_2)'].values,
np.array(['ad', 'be', 'cf'], 'object')))
def test_concat_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'c_3': ['h', 'i', 'j']})
df.engineer('concat(c_3, c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_3,c_1,c_2)'].values,
np.array(['had', 'ibe', 'jcf'], 'object')))
def test_concat_with_numerical_col(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3]})
df.engineer('concat(c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,n_2)'].values,
np.array(['a1', 'b2', 'c3'], 'object')))
def test_concat_with_numerical_col_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6]})
df.engineer('concat(n_3,c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(n_3,c_1,n_2)'].values,
np.array(['4a1', '5b2', '6c3'], 'object')))
def test_multiplication(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3)'].values,
np.array([4, 10, 18], long)))
def test_multiplication_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3, n_4)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3,n_4)'].values,
np.array([4*7, 80, 18*9], long)))
def test_square_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 1*1, 4*4, 7*7],
['b', 2, 5, 8, 2*2, 5*5, 8*8],
['c', 3, 6, 9, 3*3, 6*6, 9*9],
], 'object'))
def test_square_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(n_3, 2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 4*4],
['b', 2, 5, 8, 5*5],
['c', 3, 6, 9, 6*6],
], 'object'))
def test_log_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(1), math.log(4), math.log(7)],
['b', 2, 5, 8, math.log(2), math.log(5), math.log(8)],
['c', 3, 6, 9, math.log(3), math.log(6), math.log(9)],
], 'object')))
def test_log_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(4)],
['b', 2, 5, 8, math.log(5)],
['c', 3, 6, 9, math.log(6)],
], 'object')))
def test_sqrt_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(1), math.sqrt(4), math.sqrt(7)],
['b', 2, 5, 8, math.sqrt(2), math.sqrt(5), math.sqrt(8)],
['c', 3, 6, 9, math.sqrt(3), math.sqrt(6), math.sqrt(9)],
], 'object')))
def test_sqrt_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(4)],
['b', 2, 5, 8, math.sqrt(5)],
['c', 3, 6, 9, math.sqrt(6)],
], 'object')))
def test_rolling_sum_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_sum(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_' + col])
def test_rolling_mean_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_mean(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_' + col], rtol=1e-3)
def test_rolling_median_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_median(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_' + col])
def test_rolling_min_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_min(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_' + col])
def test_rolling_max_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_max(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_' + col])
def test_rolling_std_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_std(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_' + col], rtol=1e-3)
def test_rolling_var_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_var(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_' + col], rtol=1e-3)
# Multiple Columns
def test_rolling_sum_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_sum(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_rolling_sum(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 6, 10, 10, 9, 8], df['n_rolling_sum(n_2,3)'])
def test_rolling_mean_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_mean(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_rolling_mean(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 2, 3.333, 3.333, 3, 2.666], df['n_rolling_mean(n_2,3)'], rtol=1e-3)
def test_rolling_median_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_median(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_rolling_median(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 2, 3, 3, 2, 2], df['n_rolling_median(n_2,3)'])
def test_rolling_min_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_min(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_rolling_min(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 1, 2, 2, 2, 2], df['n_rolling_min(n_2,3)'])
def test_rolling_max_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_max(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_rolling_max(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 3, 5, 5, 5, 4], df['n_rolling_max(n_2,3)'])
def test_rolling_std_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_std(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_rolling_std(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 1.528, 1.528, 1.732, 1.1547], df['n_rolling_std(n_2,3)'], rtol=1e-3)
def test_rolling_var_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_var(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_rolling_var(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 2.333, 2.333, 3, 1.333], df['n_rolling_var(n_2,3)'], rtol=1e-3)
def test_method_chaining(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.\
engineer('concat(c_1, c_2)').\
engineer('concat(c_1, n_2)').\
engineer('mult(n_2, n_3)').\
engineer('lg(n_2)').\
engineer('pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_call_semi_col_sep(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_with_arr_arg(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)'.split(';'))
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_long_method_chains(self):
df1 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df2 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df1.engineer('mult(lg(mult(n_1, n_2)), lg(pow(n_1, 3)))')
df2.engineer('mult(n_1,n_2);pow(n_1,3)')
df2.engineer('lg(pow(n_1,3));lg(mult(n_1, n_2))')
df2.engineer('mult(lg(mult(n_1,n_2)),lg(pow(n_1, 3)))')
np.testing.assert_array_equal(df1.columns.values.sort(), df2.columns.values.sort());
np.testing.assert_array_equal(df1['n_mult(n_1,n_2)'].values, df2['n_mult(n_1,n_2)'].values);
np.testing.assert_array_equal(df1['n_pow(n_1,3)'], df2['n_pow(n_1,3)']);
np.testing.assert_array_equal(df1['n_lg(pow(n_1,3))'], df2['n_lg(pow(n_1,3))']);
np.testing.assert_array_equal(df1['n_lg(mult(n_1,n_2))'], df2['n_lg(mult(n_1,n_2))']);
np.testing.assert_array_equal(df1['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))'], df2['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))']);
| 46.478261 | 133 | 0.541628 | from __future__ import print_function, absolute_import
import unittest, math
import pandas as pd
import numpy as np
from . import *
class T(base_pandas_extensions_tester.BasePandasExtensionsTester):
def test_concat(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f']})
df.engineer('concat(c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,c_2)'].values,
np.array(['ad', 'be', 'cf'], 'object')))
def test_concat_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'c_3': ['h', 'i', 'j']})
df.engineer('concat(c_3, c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_3,c_1,c_2)'].values,
np.array(['had', 'ibe', 'jcf'], 'object')))
def test_concat_with_numerical_col(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3]})
df.engineer('concat(c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,n_2)'].values,
np.array(['a1', 'b2', 'c3'], 'object')))
def test_concat_with_numerical_col_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6]})
df.engineer('concat(n_3,c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(n_3,c_1,n_2)'].values,
np.array(['4a1', '5b2', '6c3'], 'object')))
def test_multiplication(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3)'].values,
np.array([4, 10, 18], long)))
def test_multiplication_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3, n_4)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3,n_4)'].values,
np.array([4*7, 80, 18*9], long)))
def test_square_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 1*1, 4*4, 7*7],
['b', 2, 5, 8, 2*2, 5*5, 8*8],
['c', 3, 6, 9, 3*3, 6*6, 9*9],
], 'object'))
def test_square_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(n_3, 2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 4*4],
['b', 2, 5, 8, 5*5],
['c', 3, 6, 9, 6*6],
], 'object'))
def test_log_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(1), math.log(4), math.log(7)],
['b', 2, 5, 8, math.log(2), math.log(5), math.log(8)],
['c', 3, 6, 9, math.log(3), math.log(6), math.log(9)],
], 'object')))
def test_log_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(4)],
['b', 2, 5, 8, math.log(5)],
['c', 3, 6, 9, math.log(6)],
], 'object')))
def test_sqrt_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(1), math.sqrt(4), math.sqrt(7)],
['b', 2, 5, 8, math.sqrt(2), math.sqrt(5), math.sqrt(8)],
['c', 3, 6, 9, math.sqrt(3), math.sqrt(6), math.sqrt(9)],
], 'object')))
def test_sqrt_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(4)],
['b', 2, 5, 8, math.sqrt(5)],
['c', 3, 6, 9, math.sqrt(6)],
], 'object')))
def test_rolling_sum_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_sum(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_' + col])
def test_rolling_mean_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_mean(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_' + col], rtol=1e-3)
def test_rolling_median_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_median(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_' + col])
def test_rolling_min_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_min(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_' + col])
def test_rolling_max_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_max(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_' + col])
def test_rolling_std_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_std(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_' + col], rtol=1e-3)
def test_rolling_var_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_var(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_' + col], rtol=1e-3)
def test_rolling_sum_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_sum(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_rolling_sum(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 6, 10, 10, 9, 8], df['n_rolling_sum(n_2,3)'])
def test_rolling_mean_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_mean(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_rolling_mean(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 2, 3.333, 3.333, 3, 2.666], df['n_rolling_mean(n_2,3)'], rtol=1e-3)
def test_rolling_median_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_median(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_rolling_median(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 2, 3, 3, 2, 2], df['n_rolling_median(n_2,3)'])
def test_rolling_min_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_min(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_rolling_min(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 1, 2, 2, 2, 2], df['n_rolling_min(n_2,3)'])
def test_rolling_max_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_max(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_rolling_max(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 3, 5, 5, 5, 4], df['n_rolling_max(n_2,3)'])
def test_rolling_std_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_std(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_rolling_std(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 1.528, 1.528, 1.732, 1.1547], df['n_rolling_std(n_2,3)'], rtol=1e-3)
def test_rolling_var_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_var(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_rolling_var(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 2.333, 2.333, 3, 1.333], df['n_rolling_var(n_2,3)'], rtol=1e-3)
def test_method_chaining(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.\
engineer('concat(c_1, c_2)').\
engineer('concat(c_1, n_2)').\
engineer('mult(n_2, n_3)').\
engineer('lg(n_2)').\
engineer('pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_call_semi_col_sep(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_with_arr_arg(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)'.split(';'))
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_long_method_chains(self):
df1 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df2 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df1.engineer('mult(lg(mult(n_1, n_2)), lg(pow(n_1, 3)))')
df2.engineer('mult(n_1,n_2);pow(n_1,3)')
df2.engineer('lg(pow(n_1,3));lg(mult(n_1, n_2))')
df2.engineer('mult(lg(mult(n_1,n_2)),lg(pow(n_1, 3)))')
np.testing.assert_array_equal(df1.columns.values.sort(), df2.columns.values.sort());
np.testing.assert_array_equal(df1['n_mult(n_1,n_2)'].values, df2['n_mult(n_1,n_2)'].values);
np.testing.assert_array_equal(df1['n_pow(n_1,3)'], df2['n_pow(n_1,3)']);
np.testing.assert_array_equal(df1['n_lg(pow(n_1,3))'], df2['n_lg(pow(n_1,3))']);
np.testing.assert_array_equal(df1['n_lg(mult(n_1,n_2))'], df2['n_lg(mult(n_1,n_2))']);
np.testing.assert_array_equal(df1['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))'], df2['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))']);
| true | true |
f73099b5d58d809058597f4eeb4632505a3407c8 | 404 | py | Python | package/cloudshell/cp/azure/models/azure_blob_url.py | tim-spiglanin/Azure-Shell | 58c52994f0d6cfd798c5dca33737419ec18363d4 | [
"Apache-2.0"
] | 5 | 2016-09-08T08:33:47.000Z | 2020-02-10T12:31:15.000Z | package/cloudshell/cp/azure/models/azure_blob_url.py | tim-spiglanin/Azure-Shell | 58c52994f0d6cfd798c5dca33737419ec18363d4 | [
"Apache-2.0"
] | 505 | 2016-08-09T07:41:03.000Z | 2021-02-08T20:26:46.000Z | package/cloudshell/cp/azure/models/azure_blob_url.py | tim-spiglanin/Azure-Shell | 58c52994f0d6cfd798c5dca33737419ec18363d4 | [
"Apache-2.0"
] | 5 | 2016-12-21T12:52:55.000Z | 2021-07-08T09:50:42.000Z | class AzureBlobUrlModel(object):
def __init__(self, storage_name, container_name, blob_name):
"""
:param storage_name: (str) Azure storage name
:param container_name: (str) Azure container name
:param blob_name: (str) Azure Blob name
"""
self.storage_name = storage_name
self.container_name = container_name
self.blob_name = blob_name
| 33.666667 | 64 | 0.658416 | class AzureBlobUrlModel(object):
def __init__(self, storage_name, container_name, blob_name):
self.storage_name = storage_name
self.container_name = container_name
self.blob_name = blob_name
| true | true |
f7309b8ab0ec6cc5672805a9ee0b86213e917ba4 | 964 | py | Python | webdriver_test_tools/config/__init__.py | connordelacruz/webdriver-test-tools | fe6906839e4423562c6d4d0aa6b10b2ea90bff6b | [
"MIT"
] | 5 | 2018-07-02T13:18:59.000Z | 2019-10-14T04:55:31.000Z | webdriver_test_tools/config/__init__.py | connordelacruz/webdriver-test-tools | fe6906839e4423562c6d4d0aa6b10b2ea90bff6b | [
"MIT"
] | 1 | 2019-10-16T20:54:25.000Z | 2019-10-16T20:54:25.000Z | webdriver_test_tools/config/__init__.py | connordelacruz/webdriver-test-tools | fe6906839e4423562c6d4d0aa6b10b2ea90bff6b | [
"MIT"
] | 1 | 2019-09-03T05:29:41.000Z | 2019-09-03T05:29:41.000Z | """Default configurations for various items in the test framework.
This module imports the following classes:
:class:`webdriver_test_tools.config.browser.BrowserConfig`
:class:`webdriver_test_tools.config.browser.BrowserStackConfig`
:class:`webdriver_test_tools.config.projectfiles.ProjectFilesConfig`
:class:`webdriver_test_tools.config.site.SiteConfig`
:class:`webdriver_test_tools.config.test.TestSuiteConfig`
:class:`webdriver_test_tools.config.webdriver.WebDriverConfig`
.. toctree::
webdriver_test_tools.config.browser
webdriver_test_tools.config.browserstack
webdriver_test_tools.config.projectfiles
webdriver_test_tools.config.site
webdriver_test_tools.config.test
webdriver_test_tools.config.webdriver
"""
from .projectfiles import ProjectFilesConfig
from .site import SiteConfig
from .test import TestSuiteConfig
from .webdriver import WebDriverConfig
from .browser import BrowserConfig, BrowserStackConfig
| 35.703704 | 72 | 0.823651 | from .projectfiles import ProjectFilesConfig
from .site import SiteConfig
from .test import TestSuiteConfig
from .webdriver import WebDriverConfig
from .browser import BrowserConfig, BrowserStackConfig
| true | true |
f7309bc8a28f4f8cdb7fb8535f464ad2bbe04bfe | 392 | py | Python | tests/test_invenio_circulation.py | NRodriguezcuellar/invenio-circulation | 8e0c977849eb76ba9a342542dae3b6a6ef5bae16 | [
"MIT"
] | 1 | 2020-04-27T14:47:30.000Z | 2020-04-27T14:47:30.000Z | tests/test_invenio_circulation.py | NRodriguezcuellar/invenio-circulation | 8e0c977849eb76ba9a342542dae3b6a6ef5bae16 | [
"MIT"
] | 1 | 2020-06-09T15:23:04.000Z | 2020-06-09T15:23:04.000Z | tests/test_invenio_circulation.py | NRodriguezcuellar/invenio-circulation | 8e0c977849eb76ba9a342542dae3b6a6ef5bae16 | [
"MIT"
] | 1 | 2020-01-13T17:10:13.000Z | 2020-01-13T17:10:13.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2018-2019 CERN.
# Copyright (C) 2018-2019 RERO.
#
# Invenio-Circulation is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Module tests."""
def test_version():
"""Test version import."""
from invenio_circulation import __version__
assert __version__
| 23.058824 | 77 | 0.701531 |
def test_version():
from invenio_circulation import __version__
assert __version__
| true | true |
f7309c92dce2b8723801c5e1bdb0c965492b3e58 | 12,782 | py | Python | lib/coinchooser.py | parkbyte/electrum-parkbyte | 32fec1a172e1b39e5b57df93a972f4d9b4e595c4 | [
"MIT"
] | null | null | null | lib/coinchooser.py | parkbyte/electrum-parkbyte | 32fec1a172e1b39e5b57df93a972f4d9b4e595c4 | [
"MIT"
] | null | null | null | lib/coinchooser.py | parkbyte/electrum-parkbyte | 32fec1a172e1b39e5b57df93a972f4d9b4e595c4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Electrum - lightweight ParkByte client
# Copyright (C) 2015 kyuupichan@gmail
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from collections import defaultdict, namedtuple
from math import floor, log10
import struct
from parkbyte import sha256, COIN, TYPE_ADDRESS
from transaction import Transaction
from util import NotEnoughFunds, PrintError, profiler
# A simple deterministic PRNG. Used to deterministically shuffle a
# set of coins - the same set of coins should produce the same output.
# Although choosing UTXOs "randomly" we want it to be deterministic,
# so if sending twice from the same UTXO set we choose the same UTXOs
# to spend. This prevents attacks on users by malicious or stale
# servers.
class PRNG:
def __init__(self, seed):
self.sha = sha256(seed)
self.pool = bytearray()
def get_bytes(self, n):
while len(self.pool) < n:
self.pool.extend(self.sha)
self.sha = sha256(self.sha)
result, self.pool = self.pool[:n], self.pool[n:]
return result
def random(self):
# Returns random double in [0, 1)
four = self.get_bytes(4)
return struct.unpack("I", four)[0] / 4294967296.0
def randint(self, start, end):
# Returns random integer in [start, end)
return start + int(self.random() * (end - start))
def choice(self, seq):
return seq[int(self.random() * len(seq))]
def shuffle(self, x):
for i in reversed(xrange(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = int(self.random() * (i+1))
x[i], x[j] = x[j], x[i]
Bucket = namedtuple('Bucket', ['desc', 'size', 'value', 'coins'])
def strip_unneeded(bkts, sufficient_funds):
'''Remove buckets that are unnecessary in achieving the spend amount'''
bkts = sorted(bkts, key = lambda bkt: bkt.value)
for i in range(len(bkts)):
if not sufficient_funds(bkts[i + 1:]):
return bkts[i:]
# Shouldn't get here
return bkts
class CoinChooserBase(PrintError):
def keys(self, coins):
raise NotImplementedError
def bucketize_coins(self, coins):
keys = self.keys(coins)
buckets = defaultdict(list)
for key, coin in zip(keys, coins):
buckets[key].append(coin)
def make_Bucket(desc, coins):
size = sum(Transaction.estimated_input_size(coin)
for coin in coins)
value = sum(coin['value'] for coin in coins)
return Bucket(desc, size, value, coins)
return map(make_Bucket, buckets.keys(), buckets.values())
def penalty_func(self, tx):
def penalty(candidate):
return 0
return penalty
def change_amounts(self, tx, count, fee_estimator, dust_threshold):
# Break change up if bigger than max_change
output_amounts = [o[2] for o in tx.outputs()]
# Don't split change of less than 0.02 PKB
max_change = max(max(output_amounts) * 1.25, 0.02 * COIN)
# Use N change outputs
for n in range(1, count + 1):
# How much is left if we add this many change outputs?
change_amount = max(0, tx.get_fee() - fee_estimator(n))
if change_amount // n <= max_change:
break
# Get a handle on the precision of the output amounts; round our
# change to look similar
def trailing_zeroes(val):
s = str(val)
return len(s) - len(s.rstrip('0'))
zeroes = map(trailing_zeroes, output_amounts)
min_zeroes = min(zeroes)
max_zeroes = max(zeroes)
zeroes = range(max(0, min_zeroes - 1), (max_zeroes + 1) + 1)
# Calculate change; randomize it a bit if using more than 1 output
remaining = change_amount
amounts = []
while n > 1:
average = remaining // n
amount = self.p.randint(int(average * 0.7), int(average * 1.3))
precision = min(self.p.choice(zeroes), int(floor(log10(amount))))
amount = int(round(amount, -precision))
amounts.append(amount)
remaining -= amount
n -= 1
# Last change output. Round down to maximum precision but lose
# no more than 100 satoshis to fees (2dp)
N = pow(10, min(2, zeroes[0]))
amount = (remaining // N) * N
amounts.append(amount)
assert sum(amounts) <= change_amount
return amounts
def change_outputs(self, tx, change_addrs, fee_estimator, dust_threshold):
amounts = self.change_amounts(tx, len(change_addrs), fee_estimator,
dust_threshold)
assert min(amounts) >= 0
assert len(change_addrs) >= len(amounts)
# If change is above dust threshold after accounting for the
# size of the change output, add it to the transaction.
dust = sum(amount for amount in amounts if amount < dust_threshold)
amounts = [amount for amount in amounts if amount >= dust_threshold]
change = [(TYPE_ADDRESS, addr, amount)
for addr, amount in zip(change_addrs, amounts)]
self.print_error('change:', change)
if dust:
self.print_error('not keeping dust', dust)
return change
def make_tx(self, coins, outputs, change_addrs, fee_estimator,
dust_threshold):
'''Select unspent coins to spend to pay outputs. If the change is
greater than dust_threshold (after adding the change output to
the transaction) it is kept, otherwise none is sent and it is
added to the transaction fee.'''
# Deterministic randomness from coins
utxos = [c['prevout_hash'] + str(c['prevout_n']) for c in coins]
self.p = PRNG(''.join(sorted(utxos)))
# Copy the ouputs so when adding change we don't modify "outputs"
tx = Transaction.from_io([], outputs[:])
# Size of the transaction with no inputs and no change
base_size = tx.estimated_size()
spent_amount = tx.output_value()
def sufficient_funds(buckets):
'''Given a list of buckets, return True if it has enough
value to pay for the transaction'''
total_input = sum(bucket.value for bucket in buckets)
total_size = sum(bucket.size for bucket in buckets) + base_size
return total_input >= spent_amount + fee_estimator(total_size)
# Collect the coins into buckets, choose a subset of the buckets
buckets = self.bucketize_coins(coins)
buckets = self.choose_buckets(buckets, sufficient_funds,
self.penalty_func(tx))
tx.add_inputs([coin for b in buckets for coin in b.coins])
tx_size = base_size + sum(bucket.size for bucket in buckets)
# This takes a count of change outputs and returns a tx fee;
# each pay-to-parkbyte-address output serializes as 34 bytes
fee = lambda count: fee_estimator(tx_size + count * 34)
change = self.change_outputs(tx, change_addrs, fee, dust_threshold)
tx.add_outputs(change)
self.print_error("using %d inputs" % len(tx.inputs()))
self.print_error("using buckets:", [bucket.desc for bucket in buckets])
return tx
class CoinChooserOldestFirst(CoinChooserBase):
'''Maximize transaction priority. Select the oldest unspent
transaction outputs in your wallet, that are sufficient to cover
the spent amount. Then, remove any unneeded inputs, starting with
the smallest in value.
'''
def keys(self, coins):
return [coin['prevout_hash'] + ':' + str(coin['prevout_n'])
for coin in coins]
def choose_buckets(self, buckets, sufficient_funds, penalty_func):
'''Spend the oldest buckets first.'''
# Unconfirmed coins are young, not old
adj_height = lambda height: 99999999 if height == 0 else height
buckets.sort(key = lambda b: max(adj_height(coin['height'])
for coin in b.coins))
selected = []
for bucket in buckets:
selected.append(bucket)
if sufficient_funds(selected):
return strip_unneeded(selected, sufficient_funds)
else:
raise NotEnoughFunds()
class CoinChooserRandom(CoinChooserBase):
def bucket_candidates(self, buckets, sufficient_funds):
'''Returns a list of bucket sets.'''
candidates = set()
# Add all singletons
for n, bucket in enumerate(buckets):
if sufficient_funds([bucket]):
candidates.add((n, ))
# And now some random ones
attempts = min(100, (len(buckets) - 1) * 10 + 1)
permutation = range(len(buckets))
for i in range(attempts):
# Get a random permutation of the buckets, and
# incrementally combine buckets until sufficient
self.p.shuffle(permutation)
bkts = []
for count, index in enumerate(permutation):
bkts.append(buckets[index])
if sufficient_funds(bkts):
candidates.add(tuple(sorted(permutation[:count + 1])))
break
else:
raise NotEnoughFunds()
candidates = [[buckets[n] for n in c] for c in candidates]
return [strip_unneeded(c, sufficient_funds) for c in candidates]
def choose_buckets(self, buckets, sufficient_funds, penalty_func):
candidates = self.bucket_candidates(buckets, sufficient_funds)
penalties = [penalty_func(cand) for cand in candidates]
winner = candidates[penalties.index(min(penalties))]
self.print_error("Bucket sets:", len(buckets))
self.print_error("Winning penalty:", min(penalties))
return winner
class CoinChooserPrivacy(CoinChooserRandom):
'''Attempts to better preserve user privacy. First, if any coin is
spent from a user address, all coins are. Compared to spending
from other addresses to make up an amount, this reduces
information leakage about sender holdings. It also helps to
reduce blockchain UTXO bloat, and reduce future privacy loss that
would come from reusing that address' remaining UTXOs. Second, it
penalizes change that is quite different to the sent amount.
Third, it penalizes change that is too big.'''
def keys(self, coins):
return [coin['address'] for coin in coins]
def penalty_func(self, tx):
min_change = min(o[2] for o in tx.outputs()) * 0.75
max_change = max(o[2] for o in tx.outputs()) * 1.33
spent_amount = sum(o[2] for o in tx.outputs())
def penalty(buckets):
badness = len(buckets) - 1
total_input = sum(bucket.value for bucket in buckets)
change = float(total_input - spent_amount)
# Penalize change not roughly in output range
if change < min_change:
badness += (min_change - change) / (min_change + 10000)
elif change > max_change:
badness += (change - max_change) / (max_change + 10000)
# Penalize large change; 5 PKB excess ~= using 1 more input
badness += change / (COIN * 5)
return badness
return penalty
COIN_CHOOSERS = {'Priority': CoinChooserOldestFirst,
'Privacy': CoinChooserPrivacy}
def get_name(config):
kind = config.get('coin_chooser')
if not kind in COIN_CHOOSERS:
kind = 'Priority'
return kind
def get_coin_chooser(config):
klass = COIN_CHOOSERS[get_name(config)]
return klass()
| 39.819315 | 79 | 0.634408 |
from collections import defaultdict, namedtuple
from math import floor, log10
import struct
from parkbyte import sha256, COIN, TYPE_ADDRESS
from transaction import Transaction
from util import NotEnoughFunds, PrintError, profiler
class PRNG:
def __init__(self, seed):
self.sha = sha256(seed)
self.pool = bytearray()
def get_bytes(self, n):
while len(self.pool) < n:
self.pool.extend(self.sha)
self.sha = sha256(self.sha)
result, self.pool = self.pool[:n], self.pool[n:]
return result
def random(self):
four = self.get_bytes(4)
return struct.unpack("I", four)[0] / 4294967296.0
def randint(self, start, end):
return start + int(self.random() * (end - start))
def choice(self, seq):
return seq[int(self.random() * len(seq))]
def shuffle(self, x):
for i in reversed(xrange(1, len(x))):
j = int(self.random() * (i+1))
x[i], x[j] = x[j], x[i]
Bucket = namedtuple('Bucket', ['desc', 'size', 'value', 'coins'])
def strip_unneeded(bkts, sufficient_funds):
bkts = sorted(bkts, key = lambda bkt: bkt.value)
for i in range(len(bkts)):
if not sufficient_funds(bkts[i + 1:]):
return bkts[i:]
return bkts
class CoinChooserBase(PrintError):
def keys(self, coins):
raise NotImplementedError
def bucketize_coins(self, coins):
keys = self.keys(coins)
buckets = defaultdict(list)
for key, coin in zip(keys, coins):
buckets[key].append(coin)
def make_Bucket(desc, coins):
size = sum(Transaction.estimated_input_size(coin)
for coin in coins)
value = sum(coin['value'] for coin in coins)
return Bucket(desc, size, value, coins)
return map(make_Bucket, buckets.keys(), buckets.values())
def penalty_func(self, tx):
def penalty(candidate):
return 0
return penalty
def change_amounts(self, tx, count, fee_estimator, dust_threshold):
# Break change up if bigger than max_change
output_amounts = [o[2] for o in tx.outputs()]
# Don't split change of less than 0.02 PKB
max_change = max(max(output_amounts) * 1.25, 0.02 * COIN)
for n in range(1, count + 1):
change_amount = max(0, tx.get_fee() - fee_estimator(n))
if change_amount // n <= max_change:
break
def trailing_zeroes(val):
s = str(val)
return len(s) - len(s.rstrip('0'))
zeroes = map(trailing_zeroes, output_amounts)
min_zeroes = min(zeroes)
max_zeroes = max(zeroes)
zeroes = range(max(0, min_zeroes - 1), (max_zeroes + 1) + 1)
remaining = change_amount
amounts = []
while n > 1:
average = remaining // n
amount = self.p.randint(int(average * 0.7), int(average * 1.3))
precision = min(self.p.choice(zeroes), int(floor(log10(amount))))
amount = int(round(amount, -precision))
amounts.append(amount)
remaining -= amount
n -= 1
N = pow(10, min(2, zeroes[0]))
amount = (remaining // N) * N
amounts.append(amount)
assert sum(amounts) <= change_amount
return amounts
def change_outputs(self, tx, change_addrs, fee_estimator, dust_threshold):
amounts = self.change_amounts(tx, len(change_addrs), fee_estimator,
dust_threshold)
assert min(amounts) >= 0
assert len(change_addrs) >= len(amounts)
dust = sum(amount for amount in amounts if amount < dust_threshold)
amounts = [amount for amount in amounts if amount >= dust_threshold]
change = [(TYPE_ADDRESS, addr, amount)
for addr, amount in zip(change_addrs, amounts)]
self.print_error('change:', change)
if dust:
self.print_error('not keeping dust', dust)
return change
def make_tx(self, coins, outputs, change_addrs, fee_estimator,
dust_threshold):
utxos = [c['prevout_hash'] + str(c['prevout_n']) for c in coins]
self.p = PRNG(''.join(sorted(utxos)))
tx = Transaction.from_io([], outputs[:])
# Size of the transaction with no inputs and no change
base_size = tx.estimated_size()
spent_amount = tx.output_value()
def sufficient_funds(buckets):
total_input = sum(bucket.value for bucket in buckets)
total_size = sum(bucket.size for bucket in buckets) + base_size
return total_input >= spent_amount + fee_estimator(total_size)
# Collect the coins into buckets, choose a subset of the buckets
buckets = self.bucketize_coins(coins)
buckets = self.choose_buckets(buckets, sufficient_funds,
self.penalty_func(tx))
tx.add_inputs([coin for b in buckets for coin in b.coins])
tx_size = base_size + sum(bucket.size for bucket in buckets)
# This takes a count of change outputs and returns a tx fee;
# each pay-to-parkbyte-address output serializes as 34 bytes
fee = lambda count: fee_estimator(tx_size + count * 34)
change = self.change_outputs(tx, change_addrs, fee, dust_threshold)
tx.add_outputs(change)
self.print_error("using %d inputs" % len(tx.inputs()))
self.print_error("using buckets:", [bucket.desc for bucket in buckets])
return tx
class CoinChooserOldestFirst(CoinChooserBase):
def keys(self, coins):
return [coin['prevout_hash'] + ':' + str(coin['prevout_n'])
for coin in coins]
def choose_buckets(self, buckets, sufficient_funds, penalty_func):
# Unconfirmed coins are young, not old
adj_height = lambda height: 99999999 if height == 0 else height
buckets.sort(key = lambda b: max(adj_height(coin['height'])
for coin in b.coins))
selected = []
for bucket in buckets:
selected.append(bucket)
if sufficient_funds(selected):
return strip_unneeded(selected, sufficient_funds)
else:
raise NotEnoughFunds()
class CoinChooserRandom(CoinChooserBase):
def bucket_candidates(self, buckets, sufficient_funds):
candidates = set()
# Add all singletons
for n, bucket in enumerate(buckets):
if sufficient_funds([bucket]):
candidates.add((n, ))
# And now some random ones
attempts = min(100, (len(buckets) - 1) * 10 + 1)
permutation = range(len(buckets))
for i in range(attempts):
# Get a random permutation of the buckets, and
# incrementally combine buckets until sufficient
self.p.shuffle(permutation)
bkts = []
for count, index in enumerate(permutation):
bkts.append(buckets[index])
if sufficient_funds(bkts):
candidates.add(tuple(sorted(permutation[:count + 1])))
break
else:
raise NotEnoughFunds()
candidates = [[buckets[n] for n in c] for c in candidates]
return [strip_unneeded(c, sufficient_funds) for c in candidates]
def choose_buckets(self, buckets, sufficient_funds, penalty_func):
candidates = self.bucket_candidates(buckets, sufficient_funds)
penalties = [penalty_func(cand) for cand in candidates]
winner = candidates[penalties.index(min(penalties))]
self.print_error("Bucket sets:", len(buckets))
self.print_error("Winning penalty:", min(penalties))
return winner
class CoinChooserPrivacy(CoinChooserRandom):
def keys(self, coins):
return [coin['address'] for coin in coins]
def penalty_func(self, tx):
min_change = min(o[2] for o in tx.outputs()) * 0.75
max_change = max(o[2] for o in tx.outputs()) * 1.33
spent_amount = sum(o[2] for o in tx.outputs())
def penalty(buckets):
badness = len(buckets) - 1
total_input = sum(bucket.value for bucket in buckets)
change = float(total_input - spent_amount)
# Penalize change not roughly in output range
if change < min_change:
badness += (min_change - change) / (min_change + 10000)
elif change > max_change:
badness += (change - max_change) / (max_change + 10000)
# Penalize large change; 5 PKB excess ~= using 1 more input
badness += change / (COIN * 5)
return badness
return penalty
COIN_CHOOSERS = {'Priority': CoinChooserOldestFirst,
'Privacy': CoinChooserPrivacy}
def get_name(config):
kind = config.get('coin_chooser')
if not kind in COIN_CHOOSERS:
kind = 'Priority'
return kind
def get_coin_chooser(config):
klass = COIN_CHOOSERS[get_name(config)]
return klass()
| true | true |
f7309d29d60d2cbdb6763d9b77b9ea53b7fcf2ae | 631 | py | Python | scripts/AddTag/R21/addsbtag.py | AsheAnn/C4D_Python | 3fa5e1d8b4f94efb03f820a61789d276c8a5b045 | [
"MIT"
] | null | null | null | scripts/AddTag/R21/addsbtag.py | AsheAnn/C4D_Python | 3fa5e1d8b4f94efb03f820a61789d276c8a5b045 | [
"MIT"
] | null | null | null | scripts/AddTag/R21/addsbtag.py | AsheAnn/C4D_Python | 3fa5e1d8b4f94efb03f820a61789d276c8a5b045 | [
"MIT"
] | null | null | null | # ----------------------------------------------------------------------
# Note:
# - This is the Python code used in Script Manager. ##
# Compatible:
# - Win / Mac
# - R21
# ----------------------------------------------------------------------
import c4d
# Main function
def main():
c4d.CallCommand(100004708, 100004708) # Large Icons
c4d.CallCommand(180000042, 180000042) # Soft Body Tag
# Execute main()
if __name__=='__main__':
main() | 31.55 | 204 | 0.328051 |
import c4d
def main():
c4d.CallCommand(100004708, 100004708)
c4d.CallCommand(180000042, 180000042)
if __name__=='__main__':
main() | true | true |
f7309e2c4b077ea8962cfa2d8cd82d87e8d4bf65 | 2,158 | py | Python | Analysis Scripts/co2_offset.py | bonesbb/HASPR | 856af4a480f4ff135591bbbcc1267898d88cbf0d | [
"MIT"
] | 1 | 2019-07-07T19:54:17.000Z | 2019-07-07T19:54:17.000Z | Analysis Scripts/co2_offset.py | bonesbb/HASPR | 856af4a480f4ff135591bbbcc1267898d88cbf0d | [
"MIT"
] | null | null | null | Analysis Scripts/co2_offset.py | bonesbb/HASPR | 856af4a480f4ff135591bbbcc1267898d88cbf0d | [
"MIT"
] | null | null | null | # HASPR - High-Altitude Solar Power Research
# Script to calculate CO2-equivalent offset given generation profiles
# Version 0.1
# Author: neyring
from os import walk
import haspr
from haspr import Result
from haspr import Dataset
import numpy as np
from numpy import genfromtxt
# PARAMETERS #
# path to .csv file of grid CI data (Wh, UTC, 1h res, no leap days):
ciPath = "D:\\00_Results\\04_CO2 Offset\\1_Swiss Grid CI - 1h - UTC.csv"
# directory containing generation profiles (1h res, Wh) to run our analyses on (without leap days):
inputDirectory = "D:\\00_Results\\Out"
# directory to write output to:
haspr.outputDirectory = "D:\\00_Results\\04_CO2 Offset\\Case 5 - 30 to 65 deg winter opt"
# OS path delimiter ("\\" for windows, "/" for unix)"
haspr.osPathDelimiter = "\\"
# extract carbon intensity data:
ci = Dataset("ci")
haspr.get_csv_data(ciPath, ci)
timestamps = []
ci_values = []
for p in ci.payload:
timestamps.append(str(p[0]))
ci_values.append(float(p[1]))
ci_values = np.array(ci_values) # use numpy for efficient element-wise calculations
# get all file names in inputDirectory:
file_names = []
for (dirpath, dirnames, filenames) in walk(inputDirectory):
file_names.extend(filenames)
# cycle through files and build result objects:
results = []
for f in file_names:
file_path = inputDirectory + haspr.osPathDelimiter + f
# get generation profile:
extracted_array = genfromtxt(file_path, delimiter=',', skip_header=1)
gen_values = extracted_array[:, 1] # we only want generation values
# get carbon offset for current generation profile:
carbon_offset = np.multiply(ci_values, gen_values)
# build current result object:
result_title = f[0:len(f) - 4] + " - CO2-eq offset"
current_result = Result(result_title)
current_result.payload.append("Time [UTC], CO2-eq offset [g]")
for i in range(8760):
str_to_append = str(timestamps[i]) + ", " + str(carbon_offset[i])
current_result.payload.append(str_to_append)
results.append(current_result)
# dump all results:
for r in results:
r.dump()
| 33.71875 | 100 | 0.696942 |
from os import walk
import haspr
from haspr import Result
from haspr import Dataset
import numpy as np
from numpy import genfromtxt
ciPath = "D:\\00_Results\\04_CO2 Offset\\1_Swiss Grid CI - 1h - UTC.csv"
inputDirectory = "D:\\00_Results\\Out"
haspr.outputDirectory = "D:\\00_Results\\04_CO2 Offset\\Case 5 - 30 to 65 deg winter opt"
haspr.osPathDelimiter = "\\"
# extract carbon intensity data:
ci = Dataset("ci")
haspr.get_csv_data(ciPath, ci)
timestamps = []
ci_values = []
for p in ci.payload:
timestamps.append(str(p[0]))
ci_values.append(float(p[1]))
ci_values = np.array(ci_values) # use numpy for efficient element-wise calculations
# get all file names in inputDirectory:
file_names = []
for (dirpath, dirnames, filenames) in walk(inputDirectory):
file_names.extend(filenames)
# cycle through files and build result objects:
results = []
for f in file_names:
file_path = inputDirectory + haspr.osPathDelimiter + f
# get generation profile:
extracted_array = genfromtxt(file_path, delimiter=',', skip_header=1)
gen_values = extracted_array[:, 1] # we only want generation values
# get carbon offset for current generation profile:
carbon_offset = np.multiply(ci_values, gen_values)
# build current result object:
result_title = f[0:len(f) - 4] + " - CO2-eq offset"
current_result = Result(result_title)
current_result.payload.append("Time [UTC], CO2-eq offset [g]")
for i in range(8760):
str_to_append = str(timestamps[i]) + ", " + str(carbon_offset[i])
current_result.payload.append(str_to_append)
results.append(current_result)
# dump all results:
for r in results:
r.dump()
| true | true |
f7309e42f5d8076d77031b024afee339b3886980 | 1,817 | py | Python | setup.py | JockeJarre/robotframework-CSVLibrary | e9e567ca3c7d3199ddde167c42c310fcd5657c96 | [
"Apache-2.0"
] | null | null | null | setup.py | JockeJarre/robotframework-CSVLibrary | e9e567ca3c7d3199ddde167c42c310fcd5657c96 | [
"Apache-2.0"
] | null | null | null | setup.py | JockeJarre/robotframework-CSVLibrary | e9e567ca3c7d3199ddde167c42c310fcd5657c96 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from os.path import join, dirname, abspath
from setuptools import setup
def read(rel_path):
here = abspath(dirname(__file__))
with open(join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
REQUIREMENTS = read('requirements.txt').splitlines()
DESCRIPTION = read('README.md')
setup(name='robotframework-csvlibrary',
version=get_version("CSVLibrary/__init__.py"),
description='CSV library for Robot Framework',
long_description=DESCRIPTION,
long_description_content_type='text/markdown',
author='Marcin Mierzejewski',
author_email='<mmierz@gmail.com>',
url='https://github.com/s4int/robotframework-CSVLibrary',
license='Apache License 2.0',
keywords='robotframework testing csv',
platforms='any',
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Testing",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
install_requires=REQUIREMENTS,
packages=['CSVLibrary'],
)
| 34.283019 | 63 | 0.624656 |
from os.path import join, dirname, abspath
from setuptools import setup
def read(rel_path):
here = abspath(dirname(__file__))
with open(join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
REQUIREMENTS = read('requirements.txt').splitlines()
DESCRIPTION = read('README.md')
setup(name='robotframework-csvlibrary',
version=get_version("CSVLibrary/__init__.py"),
description='CSV library for Robot Framework',
long_description=DESCRIPTION,
long_description_content_type='text/markdown',
author='Marcin Mierzejewski',
author_email='<mmierz@gmail.com>',
url='https://github.com/s4int/robotframework-CSVLibrary',
license='Apache License 2.0',
keywords='robotframework testing csv',
platforms='any',
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Testing",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
install_requires=REQUIREMENTS,
packages=['CSVLibrary'],
)
| true | true |
f7309efcb09c67631eaa89d0877022da259f67fd | 566 | py | Python | 083-remove-duplicates-from-sorted-list/remove_duplicates_from_sorted_list.py | cnluocj/leetcode | 5b870a63ba1aab3db1e05421c91f404a9aabc489 | [
"MIT"
] | null | null | null | 083-remove-duplicates-from-sorted-list/remove_duplicates_from_sorted_list.py | cnluocj/leetcode | 5b870a63ba1aab3db1e05421c91f404a9aabc489 | [
"MIT"
] | null | null | null | 083-remove-duplicates-from-sorted-list/remove_duplicates_from_sorted_list.py | cnluocj/leetcode | 5b870a63ba1aab3db1e05421c91f404a9aabc489 | [
"MIT"
] | null | null | null | """
60.36%
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
current = head
while current and current.next:
next_node = current.next
if next_node.val == current.val:
current.next = next_node.next
else:
current = current.next
return head | 23.583333 | 45 | 0.530035 |
class Solution(object):
def deleteDuplicates(self, head):
current = head
while current and current.next:
next_node = current.next
if next_node.val == current.val:
current.next = next_node.next
else:
current = current.next
return head | true | true |
f7309f8fa8176e6126bbfea93a4688ef973e5475 | 2,172 | py | Python | tests/util/test_rwlock.py | abichinger/pycasbin | 6166f298406d029a8540a12a4157d7b0072f8c8e | [
"Apache-2.0"
] | 915 | 2018-11-25T01:00:39.000Z | 2022-03-30T11:21:34.000Z | tests/util/test_rwlock.py | ffyuanda/pycasbin | 230132e459420aaa519d1eb9479f8996bdbbbd2a | [
"Apache-2.0"
] | 231 | 2019-02-13T09:29:51.000Z | 2022-03-28T16:32:51.000Z | tests/util/test_rwlock.py | ffyuanda/pycasbin | 230132e459420aaa519d1eb9479f8996bdbbbd2a | [
"Apache-2.0"
] | 173 | 2019-02-08T02:22:33.000Z | 2022-03-10T15:16:11.000Z | from unittest import TestCase
from casbin.util.rwlock import RWLockWrite
from concurrent.futures import ThreadPoolExecutor
import time
import queue
class TestRWLock(TestCase):
def gen_locks(self):
rw_lock = RWLockWrite()
rl = rw_lock.gen_rlock()
wl = rw_lock.gen_wlock()
return (rl, wl)
def test_multiple_readers(self):
[rl, _] = self.gen_locks()
delay = 5 / 1000 # 5ms
num_readers = 1000
start = time.time()
def read():
with rl:
time.sleep(delay)
executor = ThreadPoolExecutor(num_readers)
futures = [executor.submit(read) for i in range(num_readers)]
[future.result() for future in futures]
exec_time = time.time() - start
self.assertLess(exec_time, delay * num_readers)
def test_single_writer(self):
[_, wl] = self.gen_locks()
delay = 5 / 1000 # 5ms
num_writers = 10
start = time.time()
def write():
with wl:
time.sleep(delay)
executor = ThreadPoolExecutor(num_writers)
futures = [executor.submit(write) for i in range(num_writers)]
[future.result() for future in futures]
exec_time = time.time() - start
self.assertGreaterEqual(exec_time, delay * num_writers)
def test_writer_preference(self):
[rl, wl] = self.gen_locks()
q = queue.Queue()
delay = 5 / 1000 # 5ms
start = time.time()
def read():
with rl:
time.sleep(delay)
q.put("r")
def write():
with wl:
time.sleep(delay)
q.put("w")
executor = ThreadPoolExecutor(10)
futures = [executor.submit(read) for i in range(3)]
time.sleep(1 / 1000)
futures += [executor.submit(write) for i in range(3)]
time.sleep(1 / 1000)
futures += [executor.submit(read) for i in range(3)]
[future.result() for future in futures]
sequence = ""
while not q.empty():
sequence += q.get()
self.assertEqual(sequence, "rrrwwwrrr")
| 26.814815 | 70 | 0.563076 | from unittest import TestCase
from casbin.util.rwlock import RWLockWrite
from concurrent.futures import ThreadPoolExecutor
import time
import queue
class TestRWLock(TestCase):
def gen_locks(self):
rw_lock = RWLockWrite()
rl = rw_lock.gen_rlock()
wl = rw_lock.gen_wlock()
return (rl, wl)
def test_multiple_readers(self):
[rl, _] = self.gen_locks()
delay = 5 / 1000
num_readers = 1000
start = time.time()
def read():
with rl:
time.sleep(delay)
executor = ThreadPoolExecutor(num_readers)
futures = [executor.submit(read) for i in range(num_readers)]
[future.result() for future in futures]
exec_time = time.time() - start
self.assertLess(exec_time, delay * num_readers)
def test_single_writer(self):
[_, wl] = self.gen_locks()
delay = 5 / 1000
num_writers = 10
start = time.time()
def write():
with wl:
time.sleep(delay)
executor = ThreadPoolExecutor(num_writers)
futures = [executor.submit(write) for i in range(num_writers)]
[future.result() for future in futures]
exec_time = time.time() - start
self.assertGreaterEqual(exec_time, delay * num_writers)
def test_writer_preference(self):
[rl, wl] = self.gen_locks()
q = queue.Queue()
delay = 5 / 1000
start = time.time()
def read():
with rl:
time.sleep(delay)
q.put("r")
def write():
with wl:
time.sleep(delay)
q.put("w")
executor = ThreadPoolExecutor(10)
futures = [executor.submit(read) for i in range(3)]
time.sleep(1 / 1000)
futures += [executor.submit(write) for i in range(3)]
time.sleep(1 / 1000)
futures += [executor.submit(read) for i in range(3)]
[future.result() for future in futures]
sequence = ""
while not q.empty():
sequence += q.get()
self.assertEqual(sequence, "rrrwwwrrr")
| true | true |
f7309fc308191a299ba7877254d0da7512e8a712 | 1,237 | py | Python | tests/python/unittest/test_lang_target.py | titikid/tvm | 0cf3765b28d457d2503ec20b551e9a8eadb1491d | [
"Apache-2.0"
] | 6 | 2019-08-29T19:00:57.000Z | 2020-06-15T14:55:16.000Z | tests/python/unittest/test_lang_target.py | titikid/tvm | 0cf3765b28d457d2503ec20b551e9a8eadb1491d | [
"Apache-2.0"
] | 1 | 2020-10-23T18:56:21.000Z | 2020-10-23T18:56:33.000Z | tests/python/unittest/test_lang_target.py | titikid/tvm | 0cf3765b28d457d2503ec20b551e9a8eadb1491d | [
"Apache-2.0"
] | 3 | 2018-06-29T17:19:21.000Z | 2020-12-11T07:50:33.000Z | import tvm
@tvm.target.generic_func
def mygeneric(data):
# default generic function
return data + 1
@mygeneric.register(["cuda", "gpu"])
def cuda_func(data):
return data + 2
@mygeneric.register("rocm")
def rocm_func(data):
return data + 3
@mygeneric.register("cpu")
def rocm_func(data):
return data + 10
def test_target_dispatch():
with tvm.target.cuda():
assert mygeneric(1) == 3
with tvm.target.rocm():
assert mygeneric(1) == 4
with tvm.target.create("cuda"):
assert mygeneric(1) == 3
with tvm.target.arm_cpu():
assert mygeneric(1) == 11
with tvm.target.create("metal"):
assert mygeneric(1) == 3
assert tvm.target.current_target() == None
def test_target_string_parse():
target = tvm.target.create("cuda -libs=cublas,cudnn")
assert target.target_name == "cuda"
assert target.options == ['-libs=cublas,cudnn']
assert target.keys == ['cuda', 'gpu']
assert target.libs == ['cublas', 'cudnn']
assert str(target) == str(tvm.target.cuda("-libs=cublas,cudnn"))
assert tvm.target.intel_graphics().device_name == "intel_graphics"
if __name__ == "__main__":
test_target_dispatch()
test_target_string_parse()
| 22.490909 | 70 | 0.654002 | import tvm
@tvm.target.generic_func
def mygeneric(data):
return data + 1
@mygeneric.register(["cuda", "gpu"])
def cuda_func(data):
return data + 2
@mygeneric.register("rocm")
def rocm_func(data):
return data + 3
@mygeneric.register("cpu")
def rocm_func(data):
return data + 10
def test_target_dispatch():
with tvm.target.cuda():
assert mygeneric(1) == 3
with tvm.target.rocm():
assert mygeneric(1) == 4
with tvm.target.create("cuda"):
assert mygeneric(1) == 3
with tvm.target.arm_cpu():
assert mygeneric(1) == 11
with tvm.target.create("metal"):
assert mygeneric(1) == 3
assert tvm.target.current_target() == None
def test_target_string_parse():
target = tvm.target.create("cuda -libs=cublas,cudnn")
assert target.target_name == "cuda"
assert target.options == ['-libs=cublas,cudnn']
assert target.keys == ['cuda', 'gpu']
assert target.libs == ['cublas', 'cudnn']
assert str(target) == str(tvm.target.cuda("-libs=cublas,cudnn"))
assert tvm.target.intel_graphics().device_name == "intel_graphics"
if __name__ == "__main__":
test_target_dispatch()
test_target_string_parse()
| true | true |
f730a1a680a5e31f14718b5fcf0fe938a393cf81 | 4,041 | py | Python | examples/decoupledibpm/flatplate3dRe100_GPU/scripts/plotForceCoefficients.py | jedbrown/PetIBM | 8584d824e0ffbbe2ea413dcf081e79a72b39bf5f | [
"BSD-3-Clause"
] | null | null | null | examples/decoupledibpm/flatplate3dRe100_GPU/scripts/plotForceCoefficients.py | jedbrown/PetIBM | 8584d824e0ffbbe2ea413dcf081e79a72b39bf5f | [
"BSD-3-Clause"
] | null | null | null | examples/decoupledibpm/flatplate3dRe100_GPU/scripts/plotForceCoefficients.py | jedbrown/PetIBM | 8584d824e0ffbbe2ea413dcf081e79a72b39bf5f | [
"BSD-3-Clause"
] | null | null | null | """
Plots the steady-state force coefficients of an inclined flat-plate with
aspect-ratio 2 at Reynolds number 100 for angles of attack between 0 and 90
degrees.
Compares with experimental results reported in Taira et al. (2007).
_References:_
* Taira, K., Dickson, W. B., Colonius,
T., Dickinson, M. H., & Rowley, C. W. (2007).
Unsteadiness in flow over a flat plate at angle-of-attack at low Reynolds
numbers.
AIAA Paper, 710, 2007.
"""
import os
import numpy
from matplotlib import pyplot
if not os.environ.get('PETIBM_EXAMPLES'):
raise KeyError('Environment variable PETIBM_EXAMPLES is not set; '
'Set PETIBM_EXAMPLES as the root directory of the examples.')
script_dir = os.path.dirname(os.path.realpath(__file__))
root_dir = os.sep.join(script_dir.split(os.sep)[:-1])
# Read forces and computes mean values for each angle of inclination.
time_limits = (15.0, 20.0)
angles = numpy.arange(0, 90 + 1, 10, dtype=numpy.int32)
cd = numpy.zeros_like(angles, dtype=numpy.float64)
cl = numpy.zeros_like(angles, dtype=numpy.float64)
for i, angle in enumerate(angles):
filepath = os.path.join(root_dir, 'AoA{}'.format(angle), 'forces.txt')
with open(filepath, 'r') as infile:
data = numpy.loadtxt(infile, dtype=numpy.float64, unpack=True)
mask = numpy.where(numpy.logical_and(data[0] >= time_limits[0],
data[0] <= time_limits[1]))[0]
cd[i], cl[i] = data[1][mask].mean(), data[2][mask].mean()
# Read experimental data from Taira et al. (2007).
directory = os.path.join(os.environ['PETIBM_EXAMPLES'], 'data')
taira = {'cd': {'aoa': None, 'values': None,
'filename': 'taira_et_al_2007_flatPlateRe100AR2_CdvsAoA.dat'},
'cl': {'aoa': None, 'values': None,
'filename': 'taira_et_al_2007_flatPlateRe100AR2_ClvsAoA.dat'}}
for key in taira.keys():
filepath = os.path.join(directory, taira[key]['filename'])
with open(filepath, 'r') as infile:
data = numpy.loadtxt(infile, dtype=numpy.float64, unpack=True)
taira[key]['aoa'], taira[key]['values'] = data[0], data[1]
# Plots the force coefficients versus the angle-of-attack and compares with
# experimental results reported in Taira et al. (2007).
pyplot.style.use('seaborn-dark')
fig, ax = pyplot.subplots(2, figsize=(6.0, 6.0), sharex=True)
ax[0].grid(zorder=0)
ax[0].set_ylabel('$C_D$', fontname='DejaVu Serif', fontsize=16)
ax[0].scatter(angles, cd,
label='PetIBM',
marker='x', s=40,
facecolors='black', edgecolors='none',
zorder=4)
ax[0].scatter(taira['cd']['aoa'], taira['cd']['values'],
label='Taira et al. (2007)',
marker='o', s=40,
facecolors='none', edgecolors='#1B9E77',
zorder=3)
ax[0].set_ylim(0.0, 2.0)
ax[1].grid(zorder=0)
ax[1].set_xlabel('angle of attack (deg)',
fontname='DejaVu Serif', fontsize=16)
ax[1].set_ylabel('$C_L$', fontname='DejaVu Serif', fontsize=16)
ax[1].scatter(angles, cl,
label='PetIBM',
marker='x', s=40,
facecolors='black', edgecolors='none',
zorder=4)
ax[1].scatter(taira['cl']['aoa'], taira['cl']['values'],
label='Taira et al. (2007)',
marker='o', s=40,
facecolors='none', edgecolors='#1B9E77',
zorder=3)
ax[1].set_xlim(0.0, 90.0)
ax[1].set_ylim(0.0, 2.0)
for a in ax:
for method in ['get_xticklabels', 'get_yticklabels']:
for label in getattr(a, method)():
label.set_fontname('DejaVu Serif')
label.set_fontsize(14)
handles, labels = ax[0].get_legend_handles_labels()
fig.legend(handles, labels,
ncol=2, loc='center', prop={'family': 'serif', 'size': 14},
frameon=False, bbox_to_anchor=(0.54, 0.53))
fig.tight_layout()
# Save the figure.
figures_dir = os.path.join(root_dir, 'figures')
if not os.path.isdir(figures_dir):
os.makedirs(figures_dir)
filepath = os.path.join(figures_dir, 'forceCoefficients.png')
fig.savefig(filepath)
pyplot.show()
| 39.23301 | 78 | 0.644642 |
import os
import numpy
from matplotlib import pyplot
if not os.environ.get('PETIBM_EXAMPLES'):
raise KeyError('Environment variable PETIBM_EXAMPLES is not set; '
'Set PETIBM_EXAMPLES as the root directory of the examples.')
script_dir = os.path.dirname(os.path.realpath(__file__))
root_dir = os.sep.join(script_dir.split(os.sep)[:-1])
time_limits = (15.0, 20.0)
angles = numpy.arange(0, 90 + 1, 10, dtype=numpy.int32)
cd = numpy.zeros_like(angles, dtype=numpy.float64)
cl = numpy.zeros_like(angles, dtype=numpy.float64)
for i, angle in enumerate(angles):
filepath = os.path.join(root_dir, 'AoA{}'.format(angle), 'forces.txt')
with open(filepath, 'r') as infile:
data = numpy.loadtxt(infile, dtype=numpy.float64, unpack=True)
mask = numpy.where(numpy.logical_and(data[0] >= time_limits[0],
data[0] <= time_limits[1]))[0]
cd[i], cl[i] = data[1][mask].mean(), data[2][mask].mean()
directory = os.path.join(os.environ['PETIBM_EXAMPLES'], 'data')
taira = {'cd': {'aoa': None, 'values': None,
'filename': 'taira_et_al_2007_flatPlateRe100AR2_CdvsAoA.dat'},
'cl': {'aoa': None, 'values': None,
'filename': 'taira_et_al_2007_flatPlateRe100AR2_ClvsAoA.dat'}}
for key in taira.keys():
filepath = os.path.join(directory, taira[key]['filename'])
with open(filepath, 'r') as infile:
data = numpy.loadtxt(infile, dtype=numpy.float64, unpack=True)
taira[key]['aoa'], taira[key]['values'] = data[0], data[1]
pyplot.style.use('seaborn-dark')
fig, ax = pyplot.subplots(2, figsize=(6.0, 6.0), sharex=True)
ax[0].grid(zorder=0)
ax[0].set_ylabel('$C_D$', fontname='DejaVu Serif', fontsize=16)
ax[0].scatter(angles, cd,
label='PetIBM',
marker='x', s=40,
facecolors='black', edgecolors='none',
zorder=4)
ax[0].scatter(taira['cd']['aoa'], taira['cd']['values'],
label='Taira et al. (2007)',
marker='o', s=40,
facecolors='none', edgecolors='#1B9E77',
zorder=3)
ax[0].set_ylim(0.0, 2.0)
ax[1].grid(zorder=0)
ax[1].set_xlabel('angle of attack (deg)',
fontname='DejaVu Serif', fontsize=16)
ax[1].set_ylabel('$C_L$', fontname='DejaVu Serif', fontsize=16)
ax[1].scatter(angles, cl,
label='PetIBM',
marker='x', s=40,
facecolors='black', edgecolors='none',
zorder=4)
ax[1].scatter(taira['cl']['aoa'], taira['cl']['values'],
label='Taira et al. (2007)',
marker='o', s=40,
facecolors='none', edgecolors='#1B9E77',
zorder=3)
ax[1].set_xlim(0.0, 90.0)
ax[1].set_ylim(0.0, 2.0)
for a in ax:
for method in ['get_xticklabels', 'get_yticklabels']:
for label in getattr(a, method)():
label.set_fontname('DejaVu Serif')
label.set_fontsize(14)
handles, labels = ax[0].get_legend_handles_labels()
fig.legend(handles, labels,
ncol=2, loc='center', prop={'family': 'serif', 'size': 14},
frameon=False, bbox_to_anchor=(0.54, 0.53))
fig.tight_layout()
figures_dir = os.path.join(root_dir, 'figures')
if not os.path.isdir(figures_dir):
os.makedirs(figures_dir)
filepath = os.path.join(figures_dir, 'forceCoefficients.png')
fig.savefig(filepath)
pyplot.show()
| true | true |
f730a1ae102d08afb1c37051c52914eb79ec8212 | 1,395 | py | Python | utils/parser/parser.py | reeegry/youtube-parser-bot | 475e232f80445ae6ba3e988d844b61bada6c0aed | [
"MIT"
] | null | null | null | utils/parser/parser.py | reeegry/youtube-parser-bot | 475e232f80445ae6ba3e988d844b61bada6c0aed | [
"MIT"
] | null | null | null | utils/parser/parser.py | reeegry/youtube-parser-bot | 475e232f80445ae6ba3e988d844b61bada6c0aed | [
"MIT"
] | null | null | null | import urllib.request
import json
import requests
from data.config import *
def youtube_get_information(channel_id):
api_key = YOUTUBE_API_KEY
base_search_url = "https://www.googleapis.com/youtube/v3/search?"
base_video_link = "https://www.youtube.com/watch?v="
first_url = base_search_url + f"key={api_key}&channelId={channel_id}&part=snippet,id&order=date&maxResults=1"
inp = urllib.request.urlopen(first_url)
resp = json.load(inp)
for i in resp["items"]:
print(i)
if i["id"]["kind"] == "youtube#video":
video_link = base_video_link + i["id"]["videoId"]
video_title = i["snippet"]["title"]
return video_title, video_link
def twitch_get_information(channel_name):
URL = f'https://api.twitch.tv/helix/streams?user_login={channel_name}'
auth_url = 'https://id.twitch.tv/oauth2/token'
aut_params = {'client_id': CLIENT_ID,
'client_secret': SECRET,
'grant_type': 'client_credentials'
}
aut_call = requests.post(url=auth_url, params=aut_params)
access_token = aut_call.json()['access_token']
head = {
'Client-ID' : CLIENT_ID,
'Authorization' : "Bearer " + access_token
}
r = requests.get(URL, headers = head).json()['data']
if r:
if r[0]['type'] == 'live':
return True
return False
| 29.680851 | 113 | 0.629391 | import urllib.request
import json
import requests
from data.config import *
def youtube_get_information(channel_id):
api_key = YOUTUBE_API_KEY
base_search_url = "https://www.googleapis.com/youtube/v3/search?"
base_video_link = "https://www.youtube.com/watch?v="
first_url = base_search_url + f"key={api_key}&channelId={channel_id}&part=snippet,id&order=date&maxResults=1"
inp = urllib.request.urlopen(first_url)
resp = json.load(inp)
for i in resp["items"]:
print(i)
if i["id"]["kind"] == "youtube#video":
video_link = base_video_link + i["id"]["videoId"]
video_title = i["snippet"]["title"]
return video_title, video_link
def twitch_get_information(channel_name):
URL = f'https://api.twitch.tv/helix/streams?user_login={channel_name}'
auth_url = 'https://id.twitch.tv/oauth2/token'
aut_params = {'client_id': CLIENT_ID,
'client_secret': SECRET,
'grant_type': 'client_credentials'
}
aut_call = requests.post(url=auth_url, params=aut_params)
access_token = aut_call.json()['access_token']
head = {
'Client-ID' : CLIENT_ID,
'Authorization' : "Bearer " + access_token
}
r = requests.get(URL, headers = head).json()['data']
if r:
if r[0]['type'] == 'live':
return True
return False
| true | true |
f730a1efa3b379ce51a02f804023a5cabbc94433 | 12,104 | py | Python | models/yolo.py | QamarQuqa/Real-time-Traffic-and-Pedestrian-Counting | c53384f496e816bb852afea328d94b9e963fe753 | [
"MIT"
] | 4 | 2021-07-21T07:48:02.000Z | 2022-03-16T00:42:33.000Z | models/yolo.py | QamarQuqa/Real-time-Traffic-and-Pedestrian-Counting | c53384f496e816bb852afea328d94b9e963fe753 | [
"MIT"
] | 3 | 2021-11-06T09:18:21.000Z | 2022-01-11T14:19:40.000Z | models/yolo.py | QamarQuqa/Real-time-Traffic-and-Pedestrian-Counting | c53384f496e816bb852afea328d94b9e963fe753 | [
"MIT"
] | 3 | 2021-12-26T03:05:06.000Z | 2022-01-14T07:54:56.000Z | # YOLOv5 YOLO-specific modules
import argparse
import logging
import sys
from copy import deepcopy
sys.path.append('./') # to run '$ python *.py' files in subdirectories
logger = logging.getLogger(__name__)
from models.common import *
from models.experimental import *
from utils.autoanchor import check_anchor_order
from utils.general import make_divisible, check_file, set_logging
from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
select_device, copy_attr
try:
import thop # for FLOPS computation
except ImportError:
thop = None
class Detect(nn.Module):
stride = None # strides computed during build
export = False # onnx export
def __init__(self, nc=80, anchors=(), ch=()): # detection layer
super(Detect, self).__init__()
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.zeros(1)] * self.nl # init grid
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
self.register_buffer('anchors', a) # shape(nl,na,2)
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
def forward(self, x):
# x = x.copy() # for profiling
z = [] # inference output
self.training |= self.export
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training: # inference
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
y = x[i].sigmoid()
y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1), x)
@staticmethod
def _make_grid(nx=20, ny=20):
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
class Model(nn.Module):
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
super(Model, self).__init__()
if isinstance(cfg, dict):
self.yaml = cfg # model dict
else: # is *.yaml
import yaml # for torch hub
self.yaml_file = Path(cfg).name
with open(cfg) as f:
self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict
# Define model
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
if nc and nc != self.yaml['nc']:
logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
self.yaml['nc'] = nc # override yaml value
if anchors:
logger.info(f'Overriding model.yaml anchors with anchors={anchors}')
self.yaml['anchors'] = round(anchors) # override yaml value
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
# print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
# Build strides, anchors
m = self.model[-1] # Detect()
if isinstance(m, Detect):
s = 256 # 2x min stride
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
m.anchors /= m.stride.view(-1, 1, 1)
check_anchor_order(m)
self.stride = m.stride
self._initialize_biases() # only run once
# print('Strides: %s' % m.stride.tolist())
# Init weights, biases
initialize_weights(self)
self.info()
logger.info('')
def forward(self, x, augment=False, profile=False):
if augment:
img_size = x.shape[-2:] # height, width
s = [1, 0.83, 0.67] # scales
f = [None, 3, None] # flips (2-ud, 3-lr)
y = [] # outputs
for si, fi in zip(s, f):
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
yi = self.forward_once(xi)[0] # forward
# cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
yi[..., :4] /= si # de-scale
if fi == 2:
yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
elif fi == 3:
yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
y.append(yi)
return torch.cat(y, 1), None # augmented inference, train
else:
return self.forward_once(x, profile) # single-scale inference, train
def forward_once(self, x, profile=False):
y, dt = [], [] # outputs
for m in self.model:
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS
t = time_synchronized()
for _ in range(10):
_ = m(x)
dt.append((time_synchronized() - t) * 100)
print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if profile:
print('%.1fms total' % sum(dt))
return x
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
# https://arxiv.org/abs/1708.02002 section 3.3
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
m = self.model[-1] # Detect() module
for mi, s in zip(m.m, m.stride): # from
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def _print_biases(self):
m = self.model[-1] # Detect() module
for mi in m.m: # from
b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
# def _print_weights(self):
# for m in self.model.modules():
# if type(m) is Bottleneck:
# print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
print('Fusing layers... ')
for m in self.model.modules():
if type(m) is Conv and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, 'bn') # remove batchnorm
m.forward = m.fuseforward # update forward
self.info()
return self
def nms(self, mode=True): # add or remove NMS module
present = type(self.model[-1]) is NMS # last layer is NMS
if mode and not present:
print('Adding NMS... ')
m = NMS() # module
m.f = -1 # from
m.i = self.model[-1].i + 1 # index
self.model.add_module(name='%s' % m.i, module=m) # add
self.eval()
elif not mode and present:
print('Removing NMS... ')
self.model = self.model[:-1] # remove
return self
def autoshape(self): # add autoShape module
print('Adding autoShape... ')
m = autoShape(self) # wrap model
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
return m
def info(self, verbose=False, img_size=640): # print model information
model_info(self, verbose, img_size)
def parse_model(d, ch): # model_dict, input_channels(3)
logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
pass
n = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP,
C3, C3TR]:
c1, c2 = ch[f], args[0]
if c2 != no: # if not output
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
if m in [BottleneckCSP, C3, C3TR]:
args.insert(2, n) # number of repeats
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[x] for x in f])
elif m is Detect:
args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
elif m is Contract:
c2 = ch[f] * args[0] ** 2
elif m is Expand:
c2 = ch[f] // args[0] ** 2
else:
c2 = ch[f]
m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
np = sum([x.numel() for x in m_.parameters()]) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
if i == 0:
ch = []
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
opt = parser.parse_args()
opt.cfg = check_file(opt.cfg) # check file
set_logging()
device = select_device(opt.device)
# Create model
model = Model(opt.cfg).to(device)
model.train()
# Profile
# img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
# y = model(img, profile=True)
# Tensorboard
# from torch.utils.tensorboard import SummaryWriter
# tb_writer = SummaryWriter()
# print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/")
# tb_writer.add_graph(model.model, img) # add model to tensorboard
# tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
| 43.539568 | 119 | 0.542052 |
import argparse
import logging
import sys
from copy import deepcopy
sys.path.append('./')
logger = logging.getLogger(__name__)
from models.common import *
from models.experimental import *
from utils.autoanchor import check_anchor_order
from utils.general import make_divisible, check_file, set_logging
from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
select_device, copy_attr
try:
import thop
except ImportError:
thop = None
class Detect(nn.Module):
stride = None
export = False
def __init__(self, nc=80, anchors=(), ch=()):
super(Detect, self).__init__()
self.nc = nc
self.no = nc + 5
self.nl = len(anchors)
self.na = len(anchors[0]) // 2
self.grid = [torch.zeros(1)] * self.nl
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
self.register_buffer('anchors', a)
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2))
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch)
def forward(self, x):
self.training |= self.export
for i in range(self.nl):
x[i] = self.m[i](x[i])
bs, _, ny, nx = x[i].shape
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training:
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
y = x[i].sigmoid()
y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i]
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i]
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1), x)
@staticmethod
def _make_grid(nx=20, ny=20):
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
class Model(nn.Module):
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None):
super(Model, self).__init__()
if isinstance(cfg, dict):
self.yaml = cfg
else:
import yaml
self.yaml_file = Path(cfg).name
with open(cfg) as f:
self.yaml = yaml.load(f, Loader=yaml.SafeLoader)
ch = self.yaml['ch'] = self.yaml.get('ch', ch)
if nc and nc != self.yaml['nc']:
logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
self.yaml['nc'] = nc
if anchors:
logger.info(f'Overriding model.yaml anchors with anchors={anchors}')
self.yaml['anchors'] = round(anchors)
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch])
self.names = [str(i) for i in range(self.yaml['nc'])]
m = self.model[-1]
if isinstance(m, Detect):
s = 256
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))])
m.anchors /= m.stride.view(-1, 1, 1)
check_anchor_order(m)
self.stride = m.stride
self._initialize_biases()
initialize_weights(self)
self.info()
logger.info('')
def forward(self, x, augment=False, profile=False):
if augment:
img_size = x.shape[-2:]
s = [1, 0.83, 0.67]
f = [None, 3, None]
y = []
for si, fi in zip(s, f):
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
yi = self.forward_once(xi)[0]
yi[..., :4] /= si
if fi == 2:
yi[..., 1] = img_size[0] - yi[..., 1]
elif fi == 3:
yi[..., 0] = img_size[1] - yi[..., 0]
y.append(yi)
return torch.cat(y, 1), None
else:
return self.forward_once(x, profile)
def forward_once(self, x, profile=False):
y, dt = [], []
for m in self.model:
if m.f != -1:
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f]
if profile:
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0
t = time_synchronized()
for _ in range(10):
_ = m(x)
dt.append((time_synchronized() - t) * 100)
print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
x = m(x)
y.append(x if m.i in self.save else None)
if profile:
print('%.1fms total' % sum(dt))
return x
def _initialize_biases(self, cf=None):
m = self.model[-1]
for mi, s in zip(m.m, m.stride):
b = mi.bias.view(m.na, -1)
b.data[:, 4] += math.log(8 / (640 / s) ** 2)
b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum())
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def _print_biases(self):
m = self.model[-1]
for mi in m.m:
b = mi.bias.detach().view(m.na, -1).T
print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
f):
print('Fusing layers... ')
for m in self.model.modules():
if type(m) is Conv and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn)
delattr(m, 'bn')
m.forward = m.fuseforward
self.info()
return self
def nms(self, mode=True):
present = type(self.model[-1]) is NMS
if mode and not present:
print('Adding NMS... ')
m = NMS()
m.f = -1
m.i = self.model[-1].i + 1
self.model.add_module(name='%s' % m.i, module=m)
self.eval()
elif not mode and present:
print('Removing NMS... ')
self.model = self.model[:-1]
return self
def autoshape(self):
print('Adding autoShape... ')
m = autoShape(self)
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=())
return m
def info(self, verbose=False, img_size=640):
model_info(self, verbose, img_size)
def parse_model(d, ch):
logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors
no = na * (nc + 5)
layers, save, c2 = [], [], ch[-1]
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']):
m = eval(m) if isinstance(m, str) else m
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a
except:
pass
n = max(round(n * gd), 1) if n > 1 else n
if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP,
C3, C3TR]:
c1, c2 = ch[f], args[0]
if c2 != no:
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
if m in [BottleneckCSP, C3, C3TR]:
args.insert(2, n)
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[x] for x in f])
elif m is Detect:
args.append([ch[x] for x in f])
if isinstance(args[1], int):
args[1] = [list(range(args[1] * 2))] * len(f)
elif m is Contract:
c2 = ch[f] * args[0] ** 2
elif m is Expand:
c2 = ch[f] // args[0] ** 2
else:
c2 = ch[f]
m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args)
t = str(m)[8:-2].replace('__main__.', '')
np = sum([x.numel() for x in m_.parameters()])
m_.i, m_.f, m_.type, m_.np = i, f, t, np
logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args))
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1)
layers.append(m_)
if i == 0:
ch = []
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
opt = parser.parse_args()
opt.cfg = check_file(opt.cfg)
set_logging()
device = select_device(opt.device)
model = Model(opt.cfg).to(device)
model.train()
| true | true |
f730a306db38384a6b876c13045c0824325f5a80 | 95 | py | Python | matversion.py | lukasijus/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-and-gradient-checking | ce33f2a830abaf974aa1b04b61a7f25d2c4b2bd7 | [
"MIT"
] | 1 | 2020-08-06T18:51:26.000Z | 2020-08-06T18:51:26.000Z | matversion.py | lukasijus/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-and-gradient-checking | ce33f2a830abaf974aa1b04b61a7f25d2c4b2bd7 | [
"MIT"
] | null | null | null | matversion.py | lukasijus/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-and-gradient-checking | ce33f2a830abaf974aa1b04b61a7f25d2c4b2bd7 | [
"MIT"
] | null | null | null | #import matplotlib
#print(matplotlib.__version__)
import tensorflow as tf
print(tf.__version__) | 23.75 | 30 | 0.842105 |
import tensorflow as tf
print(tf.__version__) | true | true |
f730a4182acb8d42e7b585c4b972507dc5f1fbb5 | 5,276 | py | Python | egs/voxceleb/v1/nnet/lib/extract.py | deciding/tf-kaldi-speaker | ceaed721e502a71434d910fd73b202940ea2ce60 | [
"Apache-2.0"
] | null | null | null | egs/voxceleb/v1/nnet/lib/extract.py | deciding/tf-kaldi-speaker | ceaed721e502a71434d910fd73b202940ea2ce60 | [
"Apache-2.0"
] | 1 | 2022-02-10T06:48:05.000Z | 2022-02-10T06:48:05.000Z | egs/voxceleb/v1/nnet/lib/extract.py | deciding/tf-kaldi-speaker | ceaed721e502a71434d910fd73b202940ea2ce60 | [
"Apache-2.0"
] | null | null | null | import argparse
import numpy as np
import os
import sys
import numpy, scipy, sklearn
from model.trainer import Trainer
from misc.utils import Params
from dataset.kaldi_io import FeatureReader, open_or_fd, read_mat_ark, write_vec_flt
from six.moves import range
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--gpu", type=int, default=-1, help="The GPU id. GPU disabled if -1.")
parser.add_argument("-m", "--min-chunk-size", type=int, default=25, help="The minimum length of the segments. Any segment shorted than this value will be ignored.")
parser.add_argument("-s", "--chunk-size", type=int, default=10000, help="The length of the segments used to extract the embeddings. "
"Segments longer than this value will be splited before extraction. "
"Then the splited embeddings will be averaged to get the final embedding. "
"L2 normalizaion will be applied before the averaging if specified.")
parser.add_argument("-n", "--normalize", action="store_true", help="Normalize the embedding before averaging and output.")
parser.add_argument("--node", type=str, default="", help="The node to output the embeddings.")
parser.add_argument("model_dir", type=str, help="The model directory.")
parser.add_argument("rspecifier", type=str, help="Kaldi feature rspecifier (or ark file).")
parser.add_argument("wspecifier", type=str, help="Kaldi output wspecifier (or ark file).")
args = parser.parse_args()
if args.gpu == -1:
# Disable GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# In the GPU situation, it is difficult to know how to specify the GPU id.
# If the program is launched locally, you can set CUDA_VISIBLE_DEVICES to the id.
# However, if SGE is used, we cannot simply set CUDA_VISIBLE_DEVICES.
# So it is better to specify the GPU id outside the program.
# Give an arbitrary number (except for -1) to --gpu can enable it. Leave it blank if you want to disable gpu.
import tensorflow as tf
if __name__ == '__main__':
tf.reset_default_graph()
tf.logging.set_verbosity(tf.logging.INFO)
nnet_dir = os.path.join(args.model_dir, "nnet")
config_json = os.path.join(args.model_dir, "nnet/config.json")
if not os.path.isfile(config_json):
sys.exit("Cannot find params.json in %s" % config_json)
params = Params(config_json)
# Change the output node if necessary
if len(args.node) != 0:
params.embedding_node = args.node
tf.logging.info("Extract embedding from %s" % params.embedding_node)
with open(os.path.join(nnet_dir, "feature_dim"), "r") as f:
dim = int(f.readline().strip())
#trainer = Trainer(params, args.model_dir, dim, single_cpu=True)
trainer = Trainer(params, args.model_dir, dim)
trainer.build("predict")
if args.rspecifier.rsplit(".", 1)[1] == "scp":
# The rspecifier cannot be scp
sys.exit("The rspecifier must be ark or input pipe")
fp_out = open_or_fd(args.wspecifier, "wb")
# import pdb;pdb.set_trace()
# args.rspecifier=args.rspecifier.replace('JOB', '1')
for index, (key, feature) in enumerate(read_mat_ark(args.rspecifier)):
if feature.shape[0] < args.min_chunk_size:
tf.logging.info("[INFO] Key %s length too short, %d < %d, skip." % (key, feature.shape[0], args.min_chunk_size))
continue
if feature.shape[0] > args.chunk_size:
feature_array = []
feature_length = []
num_chunks = int(np.ceil(float(feature.shape[0] - args.chunk_size) / (args.chunk_size / 2))) + 1
tf.logging.info("[INFO] Key %s length %d > %d, split to %d segments." % (key, feature.shape[0], args.chunk_size, num_chunks))
for i in range(num_chunks):
start = int(i * (args.chunk_size / 2))
this_chunk_size = args.chunk_size if feature.shape[0] - start > args.chunk_size else feature.shape[0] - start
feature_length.append(this_chunk_size)
feature_array.append(feature[start:start+this_chunk_size])
feature_length = np.expand_dims(np.array(feature_length), axis=1)
# Except for the last feature, the length of other features should be the same (=chunk_size)
embeddings = trainer.predict(np.array(feature_array[:-1], dtype=np.float32))
embedding_last = trainer.predict(feature_array[-1])
embeddings = np.concatenate([embeddings, np.expand_dims(embedding_last, axis=0)], axis=0)
if args.normalize:
embeddings /= np.sqrt(np.sum(np.square(embeddings), axis=1, keepdims=True))
embedding = np.sum(embeddings * feature_length, axis=0) / np.sum(feature_length)
else:
tf.logging.info("[INFO] Key %s length %d." % (key, feature.shape[0]))
embedding = trainer.predict(feature)
tf.logging.info("[INFO] Key %s finished predicting" % (key))
if args.normalize:
embedding /= np.sqrt(np.sum(np.square(embedding)))
write_vec_flt(fp_out, embedding, key=key)
tf.logging.info("[INFO] Key %s finished writing" % (key))
fp_out.close()
trainer.close()
| 52.237624 | 164 | 0.657127 | import argparse
import numpy as np
import os
import sys
import numpy, scipy, sklearn
from model.trainer import Trainer
from misc.utils import Params
from dataset.kaldi_io import FeatureReader, open_or_fd, read_mat_ark, write_vec_flt
from six.moves import range
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--gpu", type=int, default=-1, help="The GPU id. GPU disabled if -1.")
parser.add_argument("-m", "--min-chunk-size", type=int, default=25, help="The minimum length of the segments. Any segment shorted than this value will be ignored.")
parser.add_argument("-s", "--chunk-size", type=int, default=10000, help="The length of the segments used to extract the embeddings. "
"Segments longer than this value will be splited before extraction. "
"Then the splited embeddings will be averaged to get the final embedding. "
"L2 normalizaion will be applied before the averaging if specified.")
parser.add_argument("-n", "--normalize", action="store_true", help="Normalize the embedding before averaging and output.")
parser.add_argument("--node", type=str, default="", help="The node to output the embeddings.")
parser.add_argument("model_dir", type=str, help="The model directory.")
parser.add_argument("rspecifier", type=str, help="Kaldi feature rspecifier (or ark file).")
parser.add_argument("wspecifier", type=str, help="Kaldi output wspecifier (or ark file).")
args = parser.parse_args()
if args.gpu == -1:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
if __name__ == '__main__':
tf.reset_default_graph()
tf.logging.set_verbosity(tf.logging.INFO)
nnet_dir = os.path.join(args.model_dir, "nnet")
config_json = os.path.join(args.model_dir, "nnet/config.json")
if not os.path.isfile(config_json):
sys.exit("Cannot find params.json in %s" % config_json)
params = Params(config_json)
if len(args.node) != 0:
params.embedding_node = args.node
tf.logging.info("Extract embedding from %s" % params.embedding_node)
with open(os.path.join(nnet_dir, "feature_dim"), "r") as f:
dim = int(f.readline().strip())
trainer = Trainer(params, args.model_dir, dim)
trainer.build("predict")
if args.rspecifier.rsplit(".", 1)[1] == "scp":
sys.exit("The rspecifier must be ark or input pipe")
fp_out = open_or_fd(args.wspecifier, "wb")
for index, (key, feature) in enumerate(read_mat_ark(args.rspecifier)):
if feature.shape[0] < args.min_chunk_size:
tf.logging.info("[INFO] Key %s length too short, %d < %d, skip." % (key, feature.shape[0], args.min_chunk_size))
continue
if feature.shape[0] > args.chunk_size:
feature_array = []
feature_length = []
num_chunks = int(np.ceil(float(feature.shape[0] - args.chunk_size) / (args.chunk_size / 2))) + 1
tf.logging.info("[INFO] Key %s length %d > %d, split to %d segments." % (key, feature.shape[0], args.chunk_size, num_chunks))
for i in range(num_chunks):
start = int(i * (args.chunk_size / 2))
this_chunk_size = args.chunk_size if feature.shape[0] - start > args.chunk_size else feature.shape[0] - start
feature_length.append(this_chunk_size)
feature_array.append(feature[start:start+this_chunk_size])
feature_length = np.expand_dims(np.array(feature_length), axis=1)
embeddings = trainer.predict(np.array(feature_array[:-1], dtype=np.float32))
embedding_last = trainer.predict(feature_array[-1])
embeddings = np.concatenate([embeddings, np.expand_dims(embedding_last, axis=0)], axis=0)
if args.normalize:
embeddings /= np.sqrt(np.sum(np.square(embeddings), axis=1, keepdims=True))
embedding = np.sum(embeddings * feature_length, axis=0) / np.sum(feature_length)
else:
tf.logging.info("[INFO] Key %s length %d." % (key, feature.shape[0]))
embedding = trainer.predict(feature)
tf.logging.info("[INFO] Key %s finished predicting" % (key))
if args.normalize:
embedding /= np.sqrt(np.sum(np.square(embedding)))
write_vec_flt(fp_out, embedding, key=key)
tf.logging.info("[INFO] Key %s finished writing" % (key))
fp_out.close()
trainer.close()
| true | true |
f730a42cacff927c57b8618033a20ff148ab5b71 | 1,637 | py | Python | config.py | rafael-carvalho/investigo-meraki | 3c75ebc54e38b822e5ae452f4faf12d527d95f4d | [
"Apache-2.0"
] | 2 | 2018-04-05T08:52:54.000Z | 2020-05-16T15:43:44.000Z | config.py | meraki/investigo-spark-bot | 34665143724522d463873d704dd8d77861748358 | [
"Apache-2.0"
] | null | null | null | config.py | meraki/investigo-spark-bot | 34665143724522d463873d704dd8d77861748358 | [
"Apache-2.0"
] | 2 | 2018-04-18T08:35:59.000Z | 2020-03-12T22:13:19.000Z | import os
class Config(object):
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
DEBUG = False
SECRET_KEY = os.environ['SECRET_KEY']
DATABASE_URL = os.environ['DATABASE_URL']
SQLALCHEMY_DATABASE_URI = DATABASE_URL
SQLALCHEMY_TRACK_MODIFICATIONS = True
TROPO_API_KEY_TEXT = os.environ.get('TROPO_API_KEY_TEXT', "TEXT TOKEN NOT PROVIDED")
TROPO_API_KEY_VOICE = os.environ.get('TROPO_API_KEY_VOICE', "VOICE TOKEN NOT PROVIDED")
SPARK_TOKEN = os.environ.get('SPARK_TOKEN', "TOKEN-NOT-PROVIDED")
ON_CISCO_NETWORK = os.environ.get('ON_CISCO_NETWORK', False)
NOTIFICATION_SMS_PHONE_NUMBER = os.environ.get('NOTIFICATION_SMS_PHONE_NUMBER', False)
SPARK_DEFAULT_ROOM_ID = os.environ.get('SPARK_DEFAULT_ROOM_ID', False)
SMS_ENABLED = os.environ.get('SMS_ENABLED', False)
SMS_ENABLED = (SMS_ENABLED == 'True' or SMS_ENABLED == 'TRUE')
SHOW_WEB_LINK = os.environ.get('SHOW_WEB_LINK', False)
SHOW_WEB_LINK = (SHOW_WEB_LINK == 'True' or SHOW_WEB_LINK == 'TRUE')
ADMIN_NAME = os.environ.get('ADMIN_NAME', '')
MERAKI_VALIDATOR_TOKEN = os.environ.get('MERAKI_VALIDATOR', "TOKEN-NOT-PROVIDED")
# Application threads. A common general assumption is
# using 2 per available processor cores - to handle
# incoming requests using one and performing background
# operations using the other.
THREADS_PER_PAGE = 2
class ProductionConfig(Config):
DEBUG = False
class StagingConfig(Config):
DEVELOPMENT = True
DEBUG = True
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
class TestingConfig(Config):
TESTING = True
| 34.829787 | 91 | 0.729994 | import os
class Config(object):
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
DEBUG = False
SECRET_KEY = os.environ['SECRET_KEY']
DATABASE_URL = os.environ['DATABASE_URL']
SQLALCHEMY_DATABASE_URI = DATABASE_URL
SQLALCHEMY_TRACK_MODIFICATIONS = True
TROPO_API_KEY_TEXT = os.environ.get('TROPO_API_KEY_TEXT', "TEXT TOKEN NOT PROVIDED")
TROPO_API_KEY_VOICE = os.environ.get('TROPO_API_KEY_VOICE', "VOICE TOKEN NOT PROVIDED")
SPARK_TOKEN = os.environ.get('SPARK_TOKEN', "TOKEN-NOT-PROVIDED")
ON_CISCO_NETWORK = os.environ.get('ON_CISCO_NETWORK', False)
NOTIFICATION_SMS_PHONE_NUMBER = os.environ.get('NOTIFICATION_SMS_PHONE_NUMBER', False)
SPARK_DEFAULT_ROOM_ID = os.environ.get('SPARK_DEFAULT_ROOM_ID', False)
SMS_ENABLED = os.environ.get('SMS_ENABLED', False)
SMS_ENABLED = (SMS_ENABLED == 'True' or SMS_ENABLED == 'TRUE')
SHOW_WEB_LINK = os.environ.get('SHOW_WEB_LINK', False)
SHOW_WEB_LINK = (SHOW_WEB_LINK == 'True' or SHOW_WEB_LINK == 'TRUE')
ADMIN_NAME = os.environ.get('ADMIN_NAME', '')
MERAKI_VALIDATOR_TOKEN = os.environ.get('MERAKI_VALIDATOR', "TOKEN-NOT-PROVIDED")
THREADS_PER_PAGE = 2
class ProductionConfig(Config):
DEBUG = False
class StagingConfig(Config):
DEVELOPMENT = True
DEBUG = True
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
class TestingConfig(Config):
TESTING = True
| true | true |
f730a48aa8629fddb68ef5a3a25e8da73942fea2 | 5,876 | py | Python | tensorflow/python/compat/compat.py | lightyang/tensorflow | 1a455a77d80fa788fd7963530dd130ad7d902226 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/compat/compat.py | lightyang/tensorflow | 1a455a77d80fa788fd7963530dd130ad7d902226 | [
"Apache-2.0"
] | 2 | 2021-08-25T16:13:06.000Z | 2022-02-10T02:19:43.000Z | tensorflow/python/compat/compat.py | Hyperclaw79/tensorflow | 14c58e1d380b2001ffdf7ef782d44ad1a21f763c | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 2, 5)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
| 35.185629 | 82 | 0.747617 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 2, 5)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
| true | true |
f730a4db01573a009901b98513880dc1ffb68715 | 807 | py | Python | env/lib/python3.6/site-packages/nibabel/tests/test_imageglobals.py | Raniac/NEURO-LEARN | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | env/lib/python3.6/site-packages/nibabel/tests/test_imageglobals.py | Raniac/neurolearn_dev | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | env/lib/python3.6/site-packages/nibabel/tests/test_imageglobals.py | Raniac/NEURO-LEARN | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 1 | 2020-07-17T12:49:49.000Z | 2020-07-17T12:49:49.000Z | # emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
""" Tests for imageglobals module
"""
from nose.tools import (assert_true, assert_false, assert_raises,
assert_equal, assert_not_equal)
from .. import imageglobals as igs
def test_errorlevel():
orig_level = igs.error_level
for level in (10, 20, 30):
with igs.ErrorLevel(level):
assert_equal(igs.error_level, level)
assert_equal(igs.error_level, orig_level)
| 33.625 | 78 | 0.545229 | true | true | |
f730a5be097b0dd193d78fd67d840787ea818e19 | 886 | py | Python | graph_peak_caller/analysis/diploratio_v_motifrate.py | uio-bmi/graph_peak_caller | 89deeabf3cd0b23fba49b1304f1c81222fb534d7 | [
"BSD-3-Clause"
] | 10 | 2018-04-19T21:54:31.000Z | 2021-07-22T12:46:33.000Z | graph_peak_caller/analysis/diploratio_v_motifrate.py | uio-bmi/graph_peak_caller | 89deeabf3cd0b23fba49b1304f1c81222fb534d7 | [
"BSD-3-Clause"
] | 9 | 2018-01-30T20:41:36.000Z | 2021-01-28T23:00:18.000Z | graph_peak_caller/analysis/diploratio_v_motifrate.py | uio-bmi/graph_peak_caller | 89deeabf3cd0b23fba49b1304f1c81222fb534d7 | [
"BSD-3-Clause"
] | 3 | 2019-08-20T21:43:53.000Z | 2022-01-20T14:39:34.000Z | import numpy as np
import matplotlib.pyplot as plt
def plot(base_name):
def get_hist(s):
return s["summary"][0]*s["diplo_hist"]
motif = np.load(base_name + "/limited_summits_alignments_motif_summary.npz")
nonmotif = np.load(base_name + "/limited_summits_alignments_nonmotif_summary.npz")
motif_hist = get_hist(motif)
nonmotif_hist = get_hist(nonmotif)
cum_motif = np.cumsum(motif_hist)
cum_nonmotif = np.cumsum(nonmotif_hist)
cum_total = cum_motif + cum_nonmotif
ratio = np.where(cum_total == 0, 0, cum_motif/cum_total)
plt.plot(np.linspace(0, 1, 100), ratio, label=base_name)
if __name__ == "__main__":
import sys
paths = sys.argv[1].split(",")
for path in paths:
plot(path)
plt.xlabel("Ratio of reads covered by diplotypes threshold")
plt.ylabel("Motif match percentage")
plt.legend()
plt.show()
| 31.642857 | 86 | 0.691874 | import numpy as np
import matplotlib.pyplot as plt
def plot(base_name):
def get_hist(s):
return s["summary"][0]*s["diplo_hist"]
motif = np.load(base_name + "/limited_summits_alignments_motif_summary.npz")
nonmotif = np.load(base_name + "/limited_summits_alignments_nonmotif_summary.npz")
motif_hist = get_hist(motif)
nonmotif_hist = get_hist(nonmotif)
cum_motif = np.cumsum(motif_hist)
cum_nonmotif = np.cumsum(nonmotif_hist)
cum_total = cum_motif + cum_nonmotif
ratio = np.where(cum_total == 0, 0, cum_motif/cum_total)
plt.plot(np.linspace(0, 1, 100), ratio, label=base_name)
if __name__ == "__main__":
import sys
paths = sys.argv[1].split(",")
for path in paths:
plot(path)
plt.xlabel("Ratio of reads covered by diplotypes threshold")
plt.ylabel("Motif match percentage")
plt.legend()
plt.show()
| true | true |
f730a5caa27df176131385076f8a70b5752aa206 | 1,224 | py | Python | os_windows/_i18n.py | cloudbase/oslo.windows | 8dd9a41499653ea7c58e238877942ab1d4c46636 | [
"Apache-2.0"
] | 2 | 2015-09-02T21:39:10.000Z | 2016-06-16T01:54:20.000Z | os_windows/_i18n.py | cloudbase/oslo.windows | 8dd9a41499653ea7c58e238877942ab1d4c46636 | [
"Apache-2.0"
] | null | null | null | os_windows/_i18n.py | cloudbase/oslo.windows | 8dd9a41499653ea7c58e238877942ab1d4c46636 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html .
"""
import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='oslo.windows')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
| 31.384615 | 78 | 0.747549 |
import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='oslo.windows')
_ = _translators.primary
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
| true | true |
f730a64dcfd7ad054c235267a643fa3816394fc1 | 261 | py | Python | month01/all_code/day02/exercise06.py | chaofan-zheng/tedu-python-demo | abe983ddc52690f4726cf42cc6390cba815026d8 | [
"Apache-2.0"
] | 4 | 2021-01-07T14:25:15.000Z | 2021-02-01T10:36:10.000Z | month01/all_code/day02/exercise06.py | chaofan-zheng/tedu-python-demo | abe983ddc52690f4726cf42cc6390cba815026d8 | [
"Apache-2.0"
] | null | null | null | month01/all_code/day02/exercise06.py | chaofan-zheng/tedu-python-demo | abe983ddc52690f4726cf42cc6390cba815026d8 | [
"Apache-2.0"
] | null | null | null | """
练习1:在终端中输入一个疫情确诊人数再录入一个治愈人数,
打印治愈比例
格式:治愈比例为xx%
效果:
请输入确诊人数:500
请 输入治愈人数:495
治愈比例为99.0%
"""
confirmed = int(input("请输入确诊人数:"))
cure = int(input("请输入治愈人数:"))
result = cure / confirmed * 100
print("治愈比例为" + str(result) + "%")
| 18.642857 | 34 | 0.59387 | confirmed = int(input("请输入确诊人数:"))
cure = int(input("请输入治愈人数:"))
result = cure / confirmed * 100
print("治愈比例为" + str(result) + "%")
| true | true |
f730a690b6b79400b52d582014d1ddf3b05f2bf6 | 369 | py | Python | h1st_contrib/iot_mgmt/maint_ops/migrations/0073_auto_20181121_0024.py | h1st-ai/h1st-contrib | 38fbb1fff4513bb3433bc12f2b436836e5e51c80 | [
"Apache-2.0"
] | 1 | 2022-02-19T18:55:43.000Z | 2022-02-19T18:55:43.000Z | h1st_contrib/iot_mgmt/maint_ops/migrations/0073_auto_20181121_0024.py | h1st-ai/h1st-contrib | 38fbb1fff4513bb3433bc12f2b436836e5e51c80 | [
"Apache-2.0"
] | null | null | null | h1st_contrib/iot_mgmt/maint_ops/migrations/0073_auto_20181121_0024.py | h1st-ai/h1st-contrib | 38fbb1fff4513bb3433bc12f2b436836e5e51c80 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.1.1 on 2018-11-21 00:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('IoT_MaintOps', '0072_auto_20181115_0423'),
]
operations = [
migrations.AlterModelOptions(
name='equipmentinstancedailyriskscore',
options={'ordering': ()},
),
]
| 20.5 | 52 | 0.620596 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('IoT_MaintOps', '0072_auto_20181115_0423'),
]
operations = [
migrations.AlterModelOptions(
name='equipmentinstancedailyriskscore',
options={'ordering': ()},
),
]
| true | true |
f730a88b0aa1fb26bbd6dfb345fe135d202fd7b6 | 8,692 | py | Python | sdk/python/pulumi_mongodbatlas/get_cloud_provider_snapshot.py | pulumi/pulumi-mongodbatlas | 0d5c085dcfd871b56fb4cf582620260b70caa07a | [
"ECL-2.0",
"Apache-2.0"
] | 9 | 2020-04-28T19:12:30.000Z | 2022-03-22T23:04:46.000Z | sdk/python/pulumi_mongodbatlas/get_cloud_provider_snapshot.py | pulumi/pulumi-mongodbatlas | 0d5c085dcfd871b56fb4cf582620260b70caa07a | [
"ECL-2.0",
"Apache-2.0"
] | 59 | 2020-06-12T12:12:52.000Z | 2022-03-28T18:14:50.000Z | sdk/python/pulumi_mongodbatlas/get_cloud_provider_snapshot.py | pulumi/pulumi-mongodbatlas | 0d5c085dcfd871b56fb4cf582620260b70caa07a | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-09-25T21:22:08.000Z | 2021-08-30T20:06:18.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetCloudProviderSnapshotResult',
'AwaitableGetCloudProviderSnapshotResult',
'get_cloud_provider_snapshot',
]
@pulumi.output_type
class GetCloudProviderSnapshotResult:
"""
A collection of values returned by getCloudProviderSnapshot.
"""
def __init__(__self__, cluster_name=None, created_at=None, description=None, expires_at=None, id=None, master_key_uuid=None, mongod_version=None, project_id=None, snapshot_id=None, snapshot_type=None, status=None, storage_size_bytes=None, type=None):
if cluster_name and not isinstance(cluster_name, str):
raise TypeError("Expected argument 'cluster_name' to be a str")
pulumi.set(__self__, "cluster_name", cluster_name)
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if expires_at and not isinstance(expires_at, str):
raise TypeError("Expected argument 'expires_at' to be a str")
pulumi.set(__self__, "expires_at", expires_at)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if master_key_uuid and not isinstance(master_key_uuid, str):
raise TypeError("Expected argument 'master_key_uuid' to be a str")
pulumi.set(__self__, "master_key_uuid", master_key_uuid)
if mongod_version and not isinstance(mongod_version, str):
raise TypeError("Expected argument 'mongod_version' to be a str")
pulumi.set(__self__, "mongod_version", mongod_version)
if project_id and not isinstance(project_id, str):
raise TypeError("Expected argument 'project_id' to be a str")
pulumi.set(__self__, "project_id", project_id)
if snapshot_id and not isinstance(snapshot_id, str):
raise TypeError("Expected argument 'snapshot_id' to be a str")
pulumi.set(__self__, "snapshot_id", snapshot_id)
if snapshot_type and not isinstance(snapshot_type, str):
raise TypeError("Expected argument 'snapshot_type' to be a str")
pulumi.set(__self__, "snapshot_type", snapshot_type)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if storage_size_bytes and not isinstance(storage_size_bytes, int):
raise TypeError("Expected argument 'storage_size_bytes' to be a int")
pulumi.set(__self__, "storage_size_bytes", storage_size_bytes)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> str:
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> str:
"""
UTC ISO 8601 formatted point in time when Atlas took the snapshot.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def description(self) -> str:
"""
UDescription of the snapshot. Only present for on-demand snapshots.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="expiresAt")
def expires_at(self) -> str:
"""
UTC ISO 8601 formatted point in time when Atlas will delete the snapshot.
"""
return pulumi.get(self, "expires_at")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="masterKeyUuid")
def master_key_uuid(self) -> str:
"""
Unique ID of the AWS KMS Customer Master Key used to encrypt the snapshot. Only visible for clusters using Encryption at Rest via Customer KMS.
"""
return pulumi.get(self, "master_key_uuid")
@property
@pulumi.getter(name="mongodVersion")
def mongod_version(self) -> str:
"""
Version of the MongoDB server.
"""
return pulumi.get(self, "mongod_version")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> str:
return pulumi.get(self, "project_id")
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> str:
return pulumi.get(self, "snapshot_id")
@property
@pulumi.getter(name="snapshotType")
def snapshot_type(self) -> str:
"""
Specified the type of snapshot. Valid values are onDemand and scheduled.
"""
return pulumi.get(self, "snapshot_type")
@property
@pulumi.getter
def status(self) -> str:
"""
Current status of the snapshot. One of the following values: queued, inProgress, completed, failed.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="storageSizeBytes")
def storage_size_bytes(self) -> int:
"""
Specifies the size of the snapshot in bytes.
"""
return pulumi.get(self, "storage_size_bytes")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of cluster: replicaSet or shardedCluster.
"""
return pulumi.get(self, "type")
class AwaitableGetCloudProviderSnapshotResult(GetCloudProviderSnapshotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCloudProviderSnapshotResult(
cluster_name=self.cluster_name,
created_at=self.created_at,
description=self.description,
expires_at=self.expires_at,
id=self.id,
master_key_uuid=self.master_key_uuid,
mongod_version=self.mongod_version,
project_id=self.project_id,
snapshot_id=self.snapshot_id,
snapshot_type=self.snapshot_type,
status=self.status,
storage_size_bytes=self.storage_size_bytes,
type=self.type)
def get_cloud_provider_snapshot(cluster_name: Optional[str] = None,
project_id: Optional[str] = None,
snapshot_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCloudProviderSnapshotResult:
"""
`CloudProviderSnapshot` provides an Cloud Backup Snapshot datasource. Atlas Cloud Backup Snapshots provide localized backup storage using the native snapshot functionality of the cluster’s cloud service.
> **NOTE:** Groups and projects are synonymous terms. You may find `groupId` in the official documentation.
:param str cluster_name: The name of the Atlas cluster that contains the snapshot you want to retrieve.
:param str snapshot_id: The unique identifier of the snapshot you want to retrieve.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['projectId'] = project_id
__args__['snapshotId'] = snapshot_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('mongodbatlas:index/getCloudProviderSnapshot:getCloudProviderSnapshot', __args__, opts=opts, typ=GetCloudProviderSnapshotResult).value
return AwaitableGetCloudProviderSnapshotResult(
cluster_name=__ret__.cluster_name,
created_at=__ret__.created_at,
description=__ret__.description,
expires_at=__ret__.expires_at,
id=__ret__.id,
master_key_uuid=__ret__.master_key_uuid,
mongod_version=__ret__.mongod_version,
project_id=__ret__.project_id,
snapshot_id=__ret__.snapshot_id,
snapshot_type=__ret__.snapshot_type,
status=__ret__.status,
storage_size_bytes=__ret__.storage_size_bytes,
type=__ret__.type)
| 40.0553 | 254 | 0.666015 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetCloudProviderSnapshotResult',
'AwaitableGetCloudProviderSnapshotResult',
'get_cloud_provider_snapshot',
]
@pulumi.output_type
class GetCloudProviderSnapshotResult:
def __init__(__self__, cluster_name=None, created_at=None, description=None, expires_at=None, id=None, master_key_uuid=None, mongod_version=None, project_id=None, snapshot_id=None, snapshot_type=None, status=None, storage_size_bytes=None, type=None):
if cluster_name and not isinstance(cluster_name, str):
raise TypeError("Expected argument 'cluster_name' to be a str")
pulumi.set(__self__, "cluster_name", cluster_name)
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if expires_at and not isinstance(expires_at, str):
raise TypeError("Expected argument 'expires_at' to be a str")
pulumi.set(__self__, "expires_at", expires_at)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if master_key_uuid and not isinstance(master_key_uuid, str):
raise TypeError("Expected argument 'master_key_uuid' to be a str")
pulumi.set(__self__, "master_key_uuid", master_key_uuid)
if mongod_version and not isinstance(mongod_version, str):
raise TypeError("Expected argument 'mongod_version' to be a str")
pulumi.set(__self__, "mongod_version", mongod_version)
if project_id and not isinstance(project_id, str):
raise TypeError("Expected argument 'project_id' to be a str")
pulumi.set(__self__, "project_id", project_id)
if snapshot_id and not isinstance(snapshot_id, str):
raise TypeError("Expected argument 'snapshot_id' to be a str")
pulumi.set(__self__, "snapshot_id", snapshot_id)
if snapshot_type and not isinstance(snapshot_type, str):
raise TypeError("Expected argument 'snapshot_type' to be a str")
pulumi.set(__self__, "snapshot_type", snapshot_type)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if storage_size_bytes and not isinstance(storage_size_bytes, int):
raise TypeError("Expected argument 'storage_size_bytes' to be a int")
pulumi.set(__self__, "storage_size_bytes", storage_size_bytes)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> str:
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> str:
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def description(self) -> str:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="expiresAt")
def expires_at(self) -> str:
return pulumi.get(self, "expires_at")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="masterKeyUuid")
def master_key_uuid(self) -> str:
return pulumi.get(self, "master_key_uuid")
@property
@pulumi.getter(name="mongodVersion")
def mongod_version(self) -> str:
return pulumi.get(self, "mongod_version")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> str:
return pulumi.get(self, "project_id")
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> str:
return pulumi.get(self, "snapshot_id")
@property
@pulumi.getter(name="snapshotType")
def snapshot_type(self) -> str:
return pulumi.get(self, "snapshot_type")
@property
@pulumi.getter
def status(self) -> str:
return pulumi.get(self, "status")
@property
@pulumi.getter(name="storageSizeBytes")
def storage_size_bytes(self) -> int:
return pulumi.get(self, "storage_size_bytes")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetCloudProviderSnapshotResult(GetCloudProviderSnapshotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCloudProviderSnapshotResult(
cluster_name=self.cluster_name,
created_at=self.created_at,
description=self.description,
expires_at=self.expires_at,
id=self.id,
master_key_uuid=self.master_key_uuid,
mongod_version=self.mongod_version,
project_id=self.project_id,
snapshot_id=self.snapshot_id,
snapshot_type=self.snapshot_type,
status=self.status,
storage_size_bytes=self.storage_size_bytes,
type=self.type)
def get_cloud_provider_snapshot(cluster_name: Optional[str] = None,
project_id: Optional[str] = None,
snapshot_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCloudProviderSnapshotResult:
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['projectId'] = project_id
__args__['snapshotId'] = snapshot_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('mongodbatlas:index/getCloudProviderSnapshot:getCloudProviderSnapshot', __args__, opts=opts, typ=GetCloudProviderSnapshotResult).value
return AwaitableGetCloudProviderSnapshotResult(
cluster_name=__ret__.cluster_name,
created_at=__ret__.created_at,
description=__ret__.description,
expires_at=__ret__.expires_at,
id=__ret__.id,
master_key_uuid=__ret__.master_key_uuid,
mongod_version=__ret__.mongod_version,
project_id=__ret__.project_id,
snapshot_id=__ret__.snapshot_id,
snapshot_type=__ret__.snapshot_type,
status=__ret__.status,
storage_size_bytes=__ret__.storage_size_bytes,
type=__ret__.type)
| true | true |
f730a8d6bd9bfbf555172824425acb3083358392 | 686 | py | Python | coq/tools/make-both-time-files.py | reichel3/TacTok | c344e76263de04311af8a0030c07aec95d87f71c | [
"MIT"
] | 7 | 2020-11-23T02:45:36.000Z | 2022-03-18T03:03:33.000Z | coq/tools/make-both-time-files.py | reichel3/TacTok | c344e76263de04311af8a0030c07aec95d87f71c | [
"MIT"
] | 4 | 2021-02-23T03:03:51.000Z | 2021-11-13T00:07:38.000Z | coq/tools/make-both-time-files.py | reichel3/TacTok | c344e76263de04311af8a0030c07aec95d87f71c | [
"MIT"
] | 2 | 2021-01-19T17:56:28.000Z | 2022-03-28T04:39:41.000Z | #!/usr/bin/env python
import sys
from TimeFileMaker import *
if __name__ == '__main__':
USAGE = 'Usage: %s [--sort-by=auto|absolute|diff] AFTER_FILE_NAME BEFORE_FILE_NAME [OUTPUT_FILE_NAME ..]' % sys.argv[0]
HELP_STRING = r'''Formats timing information from the output of two invocations of `make TIMED=1` into a sorted table.
The input is expected to contain lines in the format:
FILE_NAME (...user: NUMBER_IN_SECONDS...)
'''
sort_by, args = parse_args(sys.argv, USAGE, HELP_STRING)
left_dict = get_times(args[1])
right_dict = get_times(args[2])
table = make_diff_table_string(left_dict, right_dict, sort_by=sort_by)
print_or_write_table(table, args[3:])
| 40.352941 | 123 | 0.728863 |
import sys
from TimeFileMaker import *
if __name__ == '__main__':
USAGE = 'Usage: %s [--sort-by=auto|absolute|diff] AFTER_FILE_NAME BEFORE_FILE_NAME [OUTPUT_FILE_NAME ..]' % sys.argv[0]
HELP_STRING = r'''Formats timing information from the output of two invocations of `make TIMED=1` into a sorted table.
The input is expected to contain lines in the format:
FILE_NAME (...user: NUMBER_IN_SECONDS...)
'''
sort_by, args = parse_args(sys.argv, USAGE, HELP_STRING)
left_dict = get_times(args[1])
right_dict = get_times(args[2])
table = make_diff_table_string(left_dict, right_dict, sort_by=sort_by)
print_or_write_table(table, args[3:])
| true | true |
f730a93f581867bc217a894c85120b1515d73e81 | 2,516 | py | Python | z2/part3/updated_part2_batch/jm/parser_errors_2/125359744.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 1 | 2020-04-16T12:13:47.000Z | 2020-04-16T12:13:47.000Z | z2/part3/updated_part2_batch/jm/parser_errors_2/125359744.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 18 | 2020-03-06T17:50:15.000Z | 2020-05-19T14:58:30.000Z | z2/part3/updated_part2_batch/jm/parser_errors_2/125359744.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 18 | 2020-03-06T17:45:13.000Z | 2020-06-09T19:18:31.000Z | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 125359744
"""
"""
random actions, total chaos
"""
board = gamma_new(3, 4, 4, 4)
assert board is not None
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 1, 0) == 1
assert gamma_move(board, 3, 0, 0) == 1
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_free_fields(board, 2) == 8
assert gamma_move(board, 3, 1, 1) == 1
assert gamma_move(board, 3, 0, 1) == 1
assert gamma_busy_fields(board, 3) == 3
assert gamma_move(board, 4, 2, 2) == 1
assert gamma_move(board, 4, 2, 0) == 1
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 1, 2, 3) == 1
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 2, 0, 1) == 0
board860826090 = gamma_board(board)
assert board860826090 is not None
assert board860826090 == ("..1\n" "114\n" "33.\n" "324\n")
del board860826090
board860826090 = None
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 3, 0, 3) == 1
assert gamma_move(board, 4, 3, 1) == 0
assert gamma_move(board, 4, 1, 1) == 0
assert gamma_golden_possible(board, 4) == 1
board371322770 = gamma_board(board)
assert board371322770 is not None
assert board371322770 == ("3.1\n" "114\n" "33.\n" "324\n")
del board371322770
board371322770 = None
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 3, 1, 1) == 0
assert gamma_move(board, 4, 1, 2) == 0
assert gamma_move(board, 4, 2, 3) == 0
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 3, 1, 1) == 0
assert gamma_move(board, 4, 3, 1) == 0
assert gamma_move(board, 4, 0, 3) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_free_fields(board, 1) == 2
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 2, 2, 1) == 1
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_busy_fields(board, 3) == 4
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 1, 3) == 1
assert gamma_move(board, 4, 1, 3) == 0
gamma_delete(board)
| 29.255814 | 58 | 0.678855 | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
board = gamma_new(3, 4, 4, 4)
assert board is not None
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 1, 0) == 1
assert gamma_move(board, 3, 0, 0) == 1
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_free_fields(board, 2) == 8
assert gamma_move(board, 3, 1, 1) == 1
assert gamma_move(board, 3, 0, 1) == 1
assert gamma_busy_fields(board, 3) == 3
assert gamma_move(board, 4, 2, 2) == 1
assert gamma_move(board, 4, 2, 0) == 1
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 1, 2, 3) == 1
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 2, 0, 1) == 0
board860826090 = gamma_board(board)
assert board860826090 is not None
assert board860826090 == ("..1\n" "114\n" "33.\n" "324\n")
del board860826090
board860826090 = None
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 3, 0, 3) == 1
assert gamma_move(board, 4, 3, 1) == 0
assert gamma_move(board, 4, 1, 1) == 0
assert gamma_golden_possible(board, 4) == 1
board371322770 = gamma_board(board)
assert board371322770 is not None
assert board371322770 == ("3.1\n" "114\n" "33.\n" "324\n")
del board371322770
board371322770 = None
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 3, 1, 1) == 0
assert gamma_move(board, 4, 1, 2) == 0
assert gamma_move(board, 4, 2, 3) == 0
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 3, 1, 1) == 0
assert gamma_move(board, 4, 3, 1) == 0
assert gamma_move(board, 4, 0, 3) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_free_fields(board, 1) == 2
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 2, 2, 1) == 1
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_busy_fields(board, 3) == 4
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 1, 3) == 1
assert gamma_move(board, 4, 1, 3) == 0
gamma_delete(board)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.