text
stringlengths 2
999k
|
|---|
#!/usr/local/bin/python
# Find all po files, and copy them to specified directory.
# Useful for uploading to Google Translator tool
#
# e.g.
# python copy_po_files_to_dir.py /tmp
import fnmatch
import os
from shutil import copyfile
import sys
from time import strftime
add_date = True
po_files = []
for root, dirnames, filenames in os.walk('.'):
for filename in fnmatch.filter(filenames, '*.po'):
pathname = os.path.join(root, filename)
# Get language code
try:
language_code = pathname.split("/")[1]
except:
continue
if add_date:
iso_date = strftime("%Y%m%dT%H%M%S")
new_file_name = "%s_%s_%s" % (language_code, iso_date, filename)
else:
new_file_name = "%s_%s" % (language_code, filename)
destination_dir = sys.argv[1]
new_pathname = os.path.join(destination_dir, new_file_name)
copyfile(pathname, new_pathname)
print("%s --> %s" % (pathname, new_pathname))
|
"""
Test the endpoints in the ``/oauth2`` blueprint.
"""
import pytest
from fence.jwt.token import SCOPE_DESCRIPTION, CLIENT_ALLOWED_SCOPES
def test_all_scopes_have_description():
for scope in CLIENT_ALLOWED_SCOPES:
assert scope in SCOPE_DESCRIPTION
@pytest.mark.parametrize("method", ["GET", "POST"])
def test_oauth2_authorize(oauth_test_client, method):
"""Test ``/oauth2/authorize``."""
data = {"confirm": "yes"}
oauth_test_client.authorize(method=method, data=data)
@pytest.mark.parametrize("method", ["GET", "POST"])
def test_oauth2_authorize_get_public_client(oauth_test_client_public, method):
"""Test ``/oauth2/authorize`` with a public client."""
data = {"confirm": "yes"}
oauth_test_client_public.authorize(method=method, data=data)
def test_oauth2_token_post(oauth_test_client):
"""Test ``POST /oauth2/token``."""
data = {"confirm": "yes"}
oauth_test_client.authorize(data=data)
oauth_test_client.token()
def test_oauth2_token_post_public_client(oauth_test_client_public):
"""Test ``POST /oauth2/token`` for public client."""
data = {"confirm": "yes"}
oauth_test_client_public.authorize(data=data)
oauth_test_client_public.token()
def test_oauth2_token_refresh(oauth_test_client):
"""Test the refresh endpoint."""
data = {"confirm": "yes"}
oauth_test_client.authorize(data=data)
oauth_test_client.token()
oauth_test_client.refresh()
def test_oauth2_token_refresh_public_client(oauth_test_client_public):
"""Test the refresh endpoint for public client."""
data = {"confirm": "yes"}
oauth_test_client_public.authorize(data=data)
oauth_test_client_public.token()
oauth_test_client_public.refresh()
def test_oauth2_token_post_revoke(oauth_test_client):
"""
Test the following procedure:
- ``POST /oauth2/authorize`` successfully to obtain code
- ``POST /oauth2/token`` successfully to obtain token
- ``POST /oauth2/revoke`` to revoke the refresh token
- Refresh token should no longer be usable at this point.
"""
data = {"confirm": "yes"}
oauth_test_client.authorize(data=data)
oauth_test_client.token()
oauth_test_client.revoke()
# Try to use refresh token.
refresh_token = oauth_test_client.token_response.refresh_token
oauth_test_client.refresh(refresh_token, do_asserts=False)
response = oauth_test_client.refresh_response.response
assert response.status_code == 400
assert response.json["error"] == "invalid_request"
|
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import get_user_model
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm):
model = get_user_model()
fields = ('username', 'email')
|
# Copyright (c) 2009-2022 The Regents of the University of Michigan.
# Part of HOOMD-blue, released under the BSD 3-Clause License.
# features
"""Energy minimizer for molecular dynamics."""
from hoomd.md.minimize.fire import FIRE
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from mock import call
from airflow.contrib.operators.cassandra_to_gcs import (
CassandraToGoogleCloudStorageOperator,
)
TMP_FILE_NAME = "temp-file"
class CassandraToGCSTest(unittest.TestCase):
@mock.patch("airflow.contrib.operators.cassandra_to_gcs.NamedTemporaryFile")
@mock.patch(
"airflow.contrib.operators.cassandra_to_gcs.GoogleCloudStorageHook.upload"
)
@mock.patch("airflow.contrib.operators.cassandra_to_gcs.CassandraHook")
def test_execute(self, mock_hook, mock_upload, mock_tempfile):
test_bucket = "test-bucket"
schema = "schema.json"
filename = "data.json"
gzip = True
mock_tempfile.return_value.name = TMP_FILE_NAME
operator = CassandraToGoogleCloudStorageOperator(
task_id="test-cas-to-gcs",
cql="select * from keyspace1.table1",
bucket=test_bucket,
filename=filename,
schema_filename=schema,
gzip=gzip,
)
operator.execute(None)
mock_hook.return_value.get_conn.assert_called_once_with()
call_schema = call(test_bucket, schema, TMP_FILE_NAME, "application/json", gzip)
call_data = call(test_bucket, filename, TMP_FILE_NAME, "application/json", gzip)
mock_upload.assert_has_calls([call_schema, call_data], any_order=True)
def test_convert_value(self):
op = CassandraToGoogleCloudStorageOperator
self.assertEqual(op.convert_value("None", None), None)
self.assertEqual(op.convert_value("int", 1), 1)
self.assertEqual(op.convert_value("float", 1.0), 1.0)
self.assertEqual(op.convert_value("str", "text"), "text")
self.assertEqual(op.convert_value("bool", True), True)
self.assertEqual(op.convert_value("dict", {"a": "b"}), {"a": "b"})
from datetime import datetime
now = datetime.now()
self.assertEqual(op.convert_value("datetime", now), str(now))
from cassandra.util import Date
date_str = "2018-01-01"
date = Date(date_str)
self.assertEqual(op.convert_value("date", date), str(date_str))
import uuid
from base64 import b64encode
test_uuid = uuid.uuid4()
encoded_uuid = b64encode(test_uuid.bytes).decode("ascii")
self.assertEqual(op.convert_value("uuid", test_uuid), encoded_uuid)
b = b"abc"
encoded_b = b64encode(b).decode("ascii")
self.assertEqual(op.convert_value("binary", b), encoded_b)
from decimal import Decimal
d = Decimal(1.0)
self.assertEqual(op.convert_value("decimal", d), float(d))
from cassandra.util import Time
time = Time(0)
self.assertEqual(op.convert_value("time", time), "00:00:00")
date_str_lst = ["2018-01-01", "2018-01-02", "2018-01-03"]
date_lst = [Date(d) for d in date_str_lst]
self.assertEqual(op.convert_value("list", date_lst), date_str_lst)
date_tpl = tuple(date_lst)
self.assertEqual(
op.convert_value("tuple", date_tpl),
{"field_0": "2018-01-01", "field_1": "2018-01-02", "field_2": "2018-01-03"},
)
|
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import get_namespace as get_services_namespace
from ....core import run_request
from ....core import same_doc_as
from ..models import ErrorEntity
from ..models import RewardCreate
from ..models import RewardInfo
from ..models import RewardPagingSlicedResult
from ..models import RewardUpdate
from ..models import ValidationErrorEntity
from ..operations.reward import CreateReward
from ..operations.reward import DeleteReward
from ..operations.reward import ExportRewards
from ..operations.reward import GetReward
from ..operations.reward import GetReward1
from ..operations.reward import ImportRewards
from ..operations.reward import QueryRewards
from ..operations.reward import QueryRewards1
from ..operations.reward import UpdateReward
@same_doc_as(CreateReward)
def create_reward(body: Optional[RewardCreate] = None, namespace: Optional[str] = None):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = CreateReward.create(
body=body,
namespace=namespace,
)
return run_request(request)
@same_doc_as(DeleteReward)
def delete_reward(reward_id: str, namespace: Optional[str] = None):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = DeleteReward.create(
reward_id=reward_id,
namespace=namespace,
)
return run_request(request)
@same_doc_as(ExportRewards)
def export_rewards(namespace: Optional[str] = None):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = ExportRewards.create(
namespace=namespace,
)
return run_request(request)
@same_doc_as(GetReward)
def get_reward(reward_id: str, namespace: Optional[str] = None):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = GetReward.create(
reward_id=reward_id,
namespace=namespace,
)
return run_request(request)
@same_doc_as(GetReward1)
def get_reward_1(reward_id: str, namespace: Optional[str] = None):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = GetReward1.create(
reward_id=reward_id,
namespace=namespace,
)
return run_request(request)
@same_doc_as(ImportRewards)
def import_rewards(replace_existing: bool, file: Optional[Any] = None, namespace: Optional[str] = None):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = ImportRewards.create(
replace_existing=replace_existing,
file=file,
namespace=namespace,
)
return run_request(request)
@same_doc_as(QueryRewards)
def query_rewards(event_topic: Optional[str] = None, offset: Optional[int] = None, limit: Optional[int] = None, sort_by: Optional[str] = None, namespace: Optional[str] = None):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = QueryRewards.create(
event_topic=event_topic,
offset=offset,
limit=limit,
sort_by=sort_by,
namespace=namespace,
)
return run_request(request)
@same_doc_as(QueryRewards1)
def query_rewards_1(event_topic: Optional[str] = None, offset: Optional[int] = None, limit: Optional[int] = None, sort_by: Optional[str] = None, namespace: Optional[str] = None):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = QueryRewards1.create(
event_topic=event_topic,
offset=offset,
limit=limit,
sort_by=sort_by,
namespace=namespace,
)
return run_request(request)
@same_doc_as(UpdateReward)
def update_reward(reward_id: str, body: Optional[RewardUpdate] = None, namespace: Optional[str] = None):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = UpdateReward.create(
reward_id=reward_id,
body=body,
namespace=namespace,
)
return run_request(request)
|
"""
Quantilization functions and related stuff
"""
from functools import partial
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.common import (
is_integer,
is_scalar,
is_categorical_dtype,
is_datetime64_dtype,
is_timedelta64_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
ensure_int64)
import pandas.core.algorithms as algos
import pandas.core.nanops as nanops
from pandas._libs.lib import infer_dtype
from pandas import (to_timedelta, to_datetime,
Categorical, Timestamp, Timedelta,
Series, Index, Interval, IntervalIndex)
import numpy as np
def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
include_lowest=False, duplicates='raise'):
"""
Bin values into discrete intervals.
Use `cut` when you need to segment and sort data values into bins. This
function is also useful for going from a continuous variable to a
categorical variable. For example, `cut` could convert ages to groups of
age ranges. Supports binning into an equal number of bins, or a
pre-specified array of bins.
Parameters
----------
x : array-like
The input array to be binned. Must be 1-dimensional.
bins : int, sequence of scalars, or pandas.IntervalIndex
The criteria to bin by.
* int : Defines the number of equal-width bins in the range of `x`. The
range of `x` is extended by .1% on each side to include the minimum
and maximum values of `x`.
* sequence of scalars : Defines the bin edges allowing for non-uniform
width. No extension of the range of `x` is done.
* IntervalIndex : Defines the exact bins to be used.
right : bool, default True
Indicates whether `bins` includes the rightmost edge or not. If
``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]``
indicate (1,2], (2,3], (3,4]. This argument is ignored when
`bins` is an IntervalIndex.
labels : array or bool, optional
Specifies the labels for the returned bins. Must be the same length as
the resulting bins. If False, returns only integer indicators of the
bins. This affects the type of the output container (see below).
This argument is ignored when `bins` is an IntervalIndex.
retbins : bool, default False
Whether to return the bins or not. Useful when bins is provided
as a scalar.
precision : int, default 3
The precision at which to store and display the bins labels.
include_lowest : bool, default False
Whether the first interval should be left-inclusive or not.
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.23.0
Returns
-------
out : pandas.Categorical, Series, or ndarray
An array-like object representing the respective bin for each value
of `x`. The type depends on the value of `labels`.
* True (default) : returns a Series for Series `x` or a
pandas.Categorical for all other inputs. The values stored within
are Interval dtype.
* sequence of scalars : returns a Series for Series `x` or a
pandas.Categorical for all other inputs. The values stored within
are whatever the type in the sequence is.
* False : returns an ndarray of integers.
bins : numpy.ndarray or IntervalIndex.
The computed or specified bins. Only returned when `retbins=True`.
For scalar or sequence `bins`, this is an ndarray with the computed
bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For
an IntervalIndex `bins`, this is equal to `bins`.
See Also
--------
qcut : Discretize variable into equal-sized buckets based on rank
or based on sample quantiles.
pandas.Categorical : Array type for storing data that come from a
fixed set of values.
Series : One-dimensional array with axis labels (including time series).
pandas.IntervalIndex : Immutable Index implementing an ordered,
sliceable set.
Notes
-----
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Series or pandas.Categorical object.
Examples
--------
Discretize into three equal-sized bins.
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3)
... # doctest: +ELLIPSIS
[(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True)
... # doctest: +ELLIPSIS
([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...
array([0.994, 3. , 5. , 7. ]))
Discovers the same bins, but assign them specific labels. Notice that
the returned Categorical's categories are `labels` and is ordered.
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]),
... 3, labels=["bad", "medium", "good"])
[bad, good, medium, medium, good, bad]
Categories (3, object): [bad < medium < good]
``labels=False`` implies you just want the bins back.
>>> pd.cut([0, 1, 1, 2], bins=4, labels=False)
array([0, 1, 1, 3])
Passing a Series as an input returns a Series with categorical dtype:
>>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> pd.cut(s, 3)
... # doctest: +ELLIPSIS
a (1.992, 4.667]
b (1.992, 4.667]
c (4.667, 7.333]
d (7.333, 10.0]
e (7.333, 10.0]
dtype: category
Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ...
Passing a Series as an input returns a Series with mapping value.
It is used to map numerically to intervals based on bins.
>>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False)
... # doctest: +ELLIPSIS
(a 0.0
b 1.0
c 2.0
d 3.0
e 4.0
dtype: float64, array([0, 2, 4, 6, 8]))
Use `drop` optional when bins is not unique
>>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True,
... right=False, duplicates='drop')
... # doctest: +ELLIPSIS
(a 0.0
b 1.0
c 2.0
d 3.0
e 3.0
dtype: float64, array([0, 2, 4, 6, 8]))
Passing an IntervalIndex for `bins` results in those categories exactly.
Notice that values not covered by the IntervalIndex are set to NaN. 0
is to the left of the first bin (which is closed on the right), and 1.5
falls between two bins.
>>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
>>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins)
[NaN, (0, 1], NaN, (2, 3], (4, 5]]
Categories (3, interval[int64]): [(0, 1] < (2, 3] < (4, 5]]
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
# for handling the cut for datetime and timedelta objects
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
try: # for array-like
sz = x.size
except AttributeError:
x = np.asarray(x)
sz = x.size
if sz == 0:
raise ValueError('Cannot cut empty array')
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = [mi + 0.0 for mi in rng]
if mn == mx: # adjust end points before binning
mn -= .001 * abs(mn) if mn != 0 else .001
mx += .001 * abs(mx) if mx != 0 else .001
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
elif isinstance(bins, IntervalIndex):
pass
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins, dtype)
if (np.diff(bins) < 0).any():
raise ValueError('bins must increase monotonically.')
fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels,
precision=precision,
include_lowest=include_lowest,
dtype=dtype,
duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name, dtype)
def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'):
"""
Quantile-based discretization function. Discretize variable into
equal-sized buckets based on rank or based on sample quantiles. For example
1000 values for 10 quantiles would produce a Categorical object indicating
quantile membership for each data point.
Parameters
----------
x : 1d ndarray or Series
q : integer or array of quantiles
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the (bins, labels) or not. Can be useful if bins
is given as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.20.0
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
Out of bounds values will be NA in the resulting Categorical object
Examples
--------
>>> pd.qcut(range(5), 4)
... # doctest: +ELLIPSIS
[(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]
Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ...
>>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, medium, bad, bad]
Categories (3, object): [good < medium < bad]
>>> pd.qcut(range(5), 4, labels=False)
array([0, 0, 1, 2, 3])
"""
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if is_integer(q):
quantiles = np.linspace(0, 1, q + 1)
else:
quantiles = q
bins = algos.quantile(x, quantiles)
fac, bins = _bins_to_cuts(x, bins, labels=labels,
precision=precision, include_lowest=True,
dtype=dtype, duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name, dtype)
def _bins_to_cuts(x, bins, right=True, labels=None,
precision=3, include_lowest=False,
dtype=None, duplicates='raise'):
if duplicates not in ['raise', 'drop']:
raise ValueError("invalid value for 'duplicates' parameter, "
"valid options are: raise, drop")
if isinstance(bins, IntervalIndex):
# we have a fast-path here
ids = bins.get_indexer(x)
result = algos.take_nd(bins, ids)
result = Categorical(result, categories=bins, ordered=True)
return result, bins
unique_bins = algos.unique(bins)
if len(unique_bins) < len(bins) and len(bins) != 2:
if duplicates == 'raise':
raise ValueError("Bin edges must be unique: {bins!r}.\nYou "
"can drop duplicate edges by setting "
"the 'duplicates' kwarg".format(bins=bins))
else:
bins = unique_bins
side = 'left' if right else 'right'
ids = ensure_int64(bins.searchsorted(x, side=side))
if include_lowest:
# Numpy 1.9 support: ensure this mask is a Numpy array
ids[np.asarray(x == bins[0])] = 1
na_mask = isna(x) | (ids == len(bins)) | (ids == 0)
has_nas = na_mask.any()
if labels is not False:
if labels is None:
labels = _format_labels(bins, precision, right=right,
include_lowest=include_lowest,
dtype=dtype)
else:
if len(labels) != len(bins) - 1:
raise ValueError('Bin labels must be one fewer than '
'the number of bin edges')
if not is_categorical_dtype(labels):
labels = Categorical(labels, categories=labels, ordered=True)
np.putmask(ids, na_mask, 0)
result = algos.take_nd(labels, ids - 1)
else:
result = ids - 1
if has_nas:
result = result.astype(np.float64)
np.putmask(result, na_mask, np.nan)
return result, bins
def _trim_zeros(x):
while len(x) > 1 and x[-1] == '0':
x = x[:-1]
if len(x) > 1 and x[-1] == '.':
x = x[:-1]
return x
def _coerce_to_type(x):
"""
if the passed data is of datetime/timedelta type,
this method converts it to numeric so that cut method can
handle it
"""
dtype = None
if is_datetime64tz_dtype(x):
dtype = x.dtype
elif is_datetime64_dtype(x):
x = to_datetime(x)
dtype = np.datetime64
elif is_timedelta64_dtype(x):
x = to_timedelta(x)
dtype = np.timedelta64
if dtype is not None:
# GH 19768: force NaT to NaN during integer conversion
x = np.where(x.notna(), x.view(np.int64), np.nan)
return x, dtype
def _convert_bin_to_numeric_type(bins, dtype):
"""
if the passed bin is of datetime/timedelta type,
this method converts it to integer
Parameters
----------
bins : list-like of bins
dtype : dtype of data
Raises
------
ValueError if bins are not of a compat dtype to dtype
"""
bins_dtype = infer_dtype(bins)
if is_timedelta64_dtype(dtype):
if bins_dtype in ['timedelta', 'timedelta64']:
bins = to_timedelta(bins).view(np.int64)
else:
raise ValueError("bins must be of timedelta64 dtype")
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
if bins_dtype in ['datetime', 'datetime64']:
bins = to_datetime(bins).view(np.int64)
else:
raise ValueError("bins must be of datetime64 dtype")
return bins
def _convert_bin_to_datelike_type(bins, dtype):
"""
Convert bins to a DatetimeIndex or TimedeltaIndex if the orginal dtype is
datelike
Parameters
----------
bins : list-like of bins
dtype : dtype of data
Returns
-------
bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is
datelike
"""
if is_datetime64tz_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype):
bins = Index(bins.astype(np.int64), dtype=dtype)
return bins
def _format_labels(bins, precision, right=True,
include_lowest=False, dtype=None):
""" based on the dtype, return our labels """
closed = 'right' if right else 'left'
if is_datetime64tz_dtype(dtype):
formatter = partial(Timestamp, tz=dtype.tz)
adjust = lambda x: x - Timedelta('1ns')
elif is_datetime64_dtype(dtype):
formatter = Timestamp
adjust = lambda x: x - Timedelta('1ns')
elif is_timedelta64_dtype(dtype):
formatter = Timedelta
adjust = lambda x: x - Timedelta('1ns')
else:
precision = _infer_precision(precision, bins)
formatter = lambda x: _round_frac(x, precision)
adjust = lambda x: x - 10 ** (-precision)
breaks = [formatter(b) for b in bins]
labels = IntervalIndex.from_breaks(breaks, closed=closed)
if right and include_lowest:
# we will adjust the left hand side by precision to
# account that we are all right closed
v = adjust(labels[0].left)
i = IntervalIndex([Interval(v, labels[0].right, closed='right')])
labels = i.append(labels[1:])
return labels
def _preprocess_for_cut(x):
"""
handles preprocessing for cut where we convert passed
input to array, strip the index information and store it
separately
"""
x_is_series = isinstance(x, Series)
series_index = None
name = None
if x_is_series:
series_index = x.index
name = x.name
# Check that the passed array is a Pandas or Numpy object
# We don't want to strip away a Pandas data-type here (e.g. datetimetz)
ndim = getattr(x, 'ndim', None)
if ndim is None:
x = np.asarray(x)
if x.ndim != 1:
raise ValueError("Input array must be 1 dimensional")
return x_is_series, series_index, name, x
def _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name, dtype):
"""
handles post processing for the cut method where
we combine the index information if the originally passed
datatype was a series
"""
if x_is_series:
fac = Series(fac, index=series_index, name=name)
if not retbins:
return fac
bins = _convert_bin_to_datelike_type(bins, dtype)
return fac, bins
def _round_frac(x, precision):
"""
Round the fractional part of the given number
"""
if not np.isfinite(x) or x == 0:
return x
else:
frac, whole = np.modf(x)
if whole == 0:
digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision
else:
digits = precision
return np.around(x, digits)
def _infer_precision(base_precision, bins):
"""Infer an appropriate precision for _round_frac
"""
for precision in range(base_precision, 20):
levels = [_round_frac(b, precision) for b in bins]
if algos.unique(levels).size == bins.size:
return precision
return base_precision # default
|
#!/usr/bin/env python
# coding: utf-8
# 2D CNN Results: DescribeResult(nobs=20, minmax=(0.61764705, 0.9117647), mean=0.80882347, variance=0.007421234, skewness=-0.6640346646308899, kurtosis=-0.6421787948529669)
# 175.59881496429443
#
# 1D CNN Results: DescribeResult(nobs=20, minmax=(0.5, 0.7647059), mean=0.6014706, variance=0.004778274, skewness=0.3888928294181824, kurtosis=-0.02869078516654877)
# 123.57975792884827
#
# Random Forest Results: DescribeResult(nobs=20, minmax=(0.6176470588235294, 0.8823529411764706), mean=0.786764705882353, variance=0.004905754871608083, skewness=-0.7272137568240163, kurtosis=-0.03250413165303323)
# 5.735104084014893
#
# 2D CNN Results: DescribeResult(nobs=20, minmax=(0.64705884, 0.9117647), mean=0.8073529, variance=0.004778273, skewness=-0.5594719052314758, kurtosis=-0.329387229030631)
# 2D CNN + RF Results: DescribeResult(nobs=20, minmax=(0.7058823529411765, 0.9411764705882353), mean=0.8147058823529412, variance=0.004744126752868325, skewness=-0.08274438139518366, kurtosis=-1.007018099697539)
# 231.00545716285706
#
# 1D CNN Results: DescribeResult(nobs=20, minmax=(0.44117647, 0.7058824), mean=0.5897059, variance=0.005779913, skewness=-0.48788294196128845, kurtosis=-0.8254267041213263)
# 1D CNN + RF Results: DescribeResult(nobs=20, minmax=(0.6176470588235294, 0.9411764705882353), mean=0.7808823529411765, variance=0.009240120196685478, skewness=-0.014222735970944922, kurtosis=-0.9416469236922418)
# 130.11345314979553
#
# Fully Connected ANN Results: DescribeResult(nobs=20, minmax=(0.44117647, 0.7647059), mean=0.62647057, variance=0.006201056, skewness=-0.5996153354644775, kurtosis=0.13958413460101404)
# 75.08602714538574
#
# Dictionary Learning Results:
# In[ ]:
|
from pwn import *
from Crypto.Random import get_random_bytes
import base64
# fairly standard padding oracle attack
#
# the goal is to xor the ticket so that, decrypted, it reads
#
# ...junk.. "numbers:jackpot1,jackpot2,...jackpot5" "\x01"
# <--- at least 49 chars ---------------------->
# theoretically could be less, but chall has mean rng
#
# 'n' in 'numbers' falls onto block 2, so 1st and 2nd plaintext blocks get scrambled in last step
def connect():
while True: # wait for sufficiently short jackpot numbers (otherwise success chance too low)
r = remote("127.0.0.1", 25002)
r.recvuntil("raffle ticket:\n")
ticket64 = r.recvuntil("\n").strip()
ticket = base64.b64decode(ticket64)
r.recvuntil("numbers are:\n")
numbers = r.recvuntil("\n").strip()
for c in b"[] ":
numbers = numbers.replace(bytes([c]), b"")
if len(numbers) <= 40: return r, ticket, numbers
r.close()
def redeem(msg):
msg64 = base64.b64encode(msg)
r.recvuntil("Redeem a ticket:\n")
r.send(msg64 + b"\n")
while True:
in1 = r.recvuntil("\n")
if b"invalid" in in1:
return 0
if b"did not win" in in1:
return 1
if b"bctf" in in1:
print(in1) # print flag line
exit(0)
def xor(msg, pos, otp): # xor a segment in msg at pos
n = len(otp)
xorred = bytes( [ msg[pos + i] ^ otp[i] for i in range(n)] )
return msg[:pos] + xorred + msg[(pos + n):]
def getBlock(ticket0, start): # extract a block of plaintext
ptxt = b""
otp = b""
for pos in range(1, 17):
# naive solver, assumes first byte found gives 0x01 ending
for b in range(1, 257): # try 0 last
bb = bytes([ b & 0xff ])
ticket = xor(ticket0, start - pos, bb + otp)[:(start + 16)]
if redeem(ticket) == 1:
otp = bb + otp
ptxt = bytes([ (b ^ pos) & 0xff]) + ptxt
break
else:
print("bad")
return b"", b""
otp = bytes( [ v ^ pos ^ (pos + 1) for v in otp] )
return ptxt
r, ticket0, lst = connect()
print(f"jackpot nums: {lst} ({len(lst)})")
n0 = len(ticket0)
print(f"len(ticket0): {n0}")
code = redeem(ticket0)
print(f"original ticket check: {code}")
goal = b"numbers:" + lst + b"\x01"
Ngoal = len(goal)
print(Ngoal, goal)
# mutate ticket until we get 3 blocks of goal (last 48 chars)
for i in range(3):
ptxt0 = getBlock(ticket0, n0 - 16 * (i + 1))
goal0 = goal[Ngoal - 16*(i+1):Ngoal - 16*i]
print(i, goal0, ptxt0)
ticket0 = xor(ticket0, n0 - 16 * (i + 2), xor(ptxt0, 0, goal0))
print( redeem(ticket0) )
# do that first char by stochastically changing previous block
while True:
s = get_random_bytes(4)
ticket = xor(ticket0, n0 - 68, s)
redeem(ticket) # terminates if jackpot found
sys.stdout.write(".")
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Updates generated docs from Python doc comments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import os.path
import sys
import tensorflow as tf
from tensorflow.contrib import ffmpeg
from tensorflow.python import debug as tf_debug
from tensorflow.python.client import client_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import docs
from tensorflow.python.framework import framework_lib
FLAGS = None
PREFIX_TEXT = """
Note: Functions taking `Tensor` arguments can also take anything accepted by
[`tf.convert_to_tensor`](framework.md#convert_to_tensor).
"""
def module_names():
return [
"tf",
"tf.errors",
"tf.image",
"tf.nn",
"tf.train",
"tf.python_io",
"tf.summary",
"tf.test",
"tf.contrib.bayesflow.entropy",
"tf.contrib.bayesflow.monte_carlo",
"tf.contrib.bayesflow.stochastic_graph",
"tf.contrib.bayesflow.stochastic_tensor",
"tf.contrib.bayesflow.variational_inference",
"tf.contrib.copy_graph",
"tf.contrib.crf",
"tf.contrib.distributions",
"tf.contrib.distributions.bijector",
"tf.contrib.ffmpeg",
"tf.contrib.framework",
"tf.contrib.graph_editor",
"tf.contrib.integrate",
"tf.contrib.layers",
"tf.contrib.learn",
"tf.contrib.learn.monitors",
"tf.contrib.legacy_seq2seq",
"tf.contrib.linalg",
"tf.contrib.losses",
"tf.contrib.metrics",
"tf.contrib.opt",
"tf.contrib.rnn",
"tf.contrib.solvers",
"tf.contrib.training",
"tf.contrib.util",
"tf_debug",
]
def find_module(base_module, name):
if name == "tf":
return base_module
# Special case for ffmpeg is needed since it's not linked in by default due
# to size concerns.
elif name == "tf.contrib.ffmpeg":
return ffmpeg
elif name == "tf_debug":
return tf_debug
elif name.startswith("tf."):
subname = name[3:]
subnames = subname.split(".")
parent_module = base_module
for s in subnames:
if not hasattr(parent_module, s):
raise ValueError(
"Module not found: {}. Submodule {} not found in parent module {}."
" Possible candidates are {}".format(
name, s, parent_module.__name__, dir(parent_module)))
parent_module = getattr(parent_module, s)
return parent_module
else:
raise ValueError(
"Invalid module name: {}. Module names must start with 'tf.'".format(
name))
def get_module_to_name(names):
return collections.OrderedDict([(find_module(tf, x), x) for x in names])
def all_libraries(module_to_name, members, documented):
"""Make a list of the individual files that we want to create.
Args:
module_to_name: Dictionary mapping modules to short names.
members: Dictionary mapping member name to (fullname, member).
documented: Set of documented names to update.
Returns:
List of (filename, docs.Library) pairs.
"""
def library(name, title, module=None, **args):
if module is None:
module = sys.modules["tensorflow.python.ops." + name]
return (name + ".md", docs.Library(title=title,
module_to_name=module_to_name,
members=members,
documented=documented,
module=module,
**args))
return collections.OrderedDict([
# Splits of module 'tf'.
library("framework", "Building Graphs", framework_lib),
library("check_ops", "Asserts and boolean checks."),
library("constant_op", "Constants, Sequences, and Random Values",
constant_op, prefix=PREFIX_TEXT),
library("state_ops",
"Variables",
exclude_symbols=["create_partitioned_variables"],
prefix=PREFIX_TEXT),
library("array_ops",
"Tensor Transformations",
exclude_symbols=["list_diff"],
prefix=PREFIX_TEXT),
library("math_ops",
"Math",
exclude_symbols=["sparse_matmul", "arg_min", "arg_max",
"lin_space", "sparse_segment_mean_grad"],
prefix=PREFIX_TEXT),
library("string_ops", "Strings",
prefix=PREFIX_TEXT),
library("histogram_ops", "Histograms"),
library("control_flow_ops", "Control Flow", prefix=PREFIX_TEXT),
library("functional_ops", "Higher Order Functions", prefix=PREFIX_TEXT),
library("tensor_array_ops", "TensorArray Operations", prefix=PREFIX_TEXT),
library("session_ops", "Tensor Handle Operations", prefix=PREFIX_TEXT),
library("image", "Images", tf.image, exclude_symbols=["ResizeMethod"],
prefix=PREFIX_TEXT),
library("sparse_ops",
"Sparse Tensors",
exclude_symbols=["serialize_sparse", "serialize_many_sparse",
"deserialize_many_sparse"],
prefix=PREFIX_TEXT),
library("io_ops",
"Inputs and Readers",
exclude_symbols=["LookupTableBase", "HashTable",
"initialize_all_tables",
"parse_single_sequence_example",
"string_to_hash_bucket"],
prefix=PREFIX_TEXT),
library("python_io", "Data IO (Python functions)", tf.python_io),
library("nn",
"Neural Network",
tf.nn,
exclude_symbols=["conv2d_backprop_input",
"conv2d_backprop_filter", "avg_pool_grad",
"max_pool_grad", "max_pool_grad_with_argmax",
"batch_norm_with_global_normalization_grad",
"lrn_grad", "relu6_grad", "softplus_grad",
"softsign_grad", "xw_plus_b", "relu_layer",
"lrn", "batch_norm_with_global_normalization",
"batch_norm_with_global_normalization_grad",
"all_candidate_sampler", "seq2seq"],
prefix=PREFIX_TEXT),
library("client", "Running Graphs", client_lib),
library("train",
"Training",
tf.train,
exclude_symbols=["Feature", "Features", "BytesList", "FloatList",
"Int64List", "Example", "InferenceExample",
"FeatureList", "FeatureLists", "RankingExample",
"SequenceExample"]),
library("script_ops",
"Wraps python functions",
prefix=PREFIX_TEXT),
library("summary", "Summary Operations", tf.summary),
library("test", "Testing", tf.test),
library("contrib.bayesflow.entropy",
"BayesFlow Entropy (contrib)",
tf.contrib.bayesflow.entropy),
library("contrib.bayesflow.monte_carlo",
"BayesFlow Monte Carlo (contrib)",
tf.contrib.bayesflow.monte_carlo),
library("contrib.bayesflow.stochastic_graph",
"BayesFlow Stochastic Graph (contrib)",
tf.contrib.bayesflow.stochastic_graph),
library("contrib.bayesflow.stochastic_tensor",
"BayesFlow Stochastic Tensors (contrib)",
tf.contrib.bayesflow.stochastic_tensor),
library("contrib.bayesflow.variational_inference",
"BayesFlow Variational Inference (contrib)",
tf.contrib.bayesflow.variational_inference),
library("contrib.crf", "CRF (contrib)", tf.contrib.crf),
library("contrib.distributions", "Statistical Distributions (contrib)",
tf.contrib.distributions),
library("contrib.distributions.bijector",
"Random variable transformations (contrib)",
tf.contrib.distributions.bijector),
library("contrib.ffmpeg", "FFmpeg (contrib)", ffmpeg),
library("contrib.framework", "Framework (contrib)", tf.contrib.framework),
library("contrib.graph_editor", "Graph Editor (contrib)",
tf.contrib.graph_editor),
library("contrib.integrate", "Integrate (contrib)", tf.contrib.integrate),
library("contrib.layers", "Layers (contrib)", tf.contrib.layers),
library("contrib.learn", "Learn (contrib)", tf.contrib.learn),
library("contrib.learn.monitors", "Monitors (contrib)",
tf.contrib.learn.monitors),
library("contrib.legacy_seq2seq", "Sequence to Sequence (contrib)",
tf.contrib.legacy_seq2seq),
library("contrib.linalg", "Linear Algebra (contrib)",
tf.contrib.linalg),
library("contrib.losses", "Losses (contrib)", tf.contrib.losses),
library("contrib.opt", "Optimization (contrib)", tf.contrib.opt),
library("contrib.rnn", "RNN and Cells (contrib)", tf.contrib.rnn),
library("contrib.metrics", "Metrics (contrib)", tf.contrib.metrics),
library("contrib.training", "Training (contrib)", tf.contrib.training),
library("contrib.util", "Utilities (contrib)", tf.contrib.util),
library("contrib.copy_graph", "Copying Graph Elements (contrib)",
tf.contrib.copy_graph),
library("tf_debug", "TensorFlow Debugger", tf_debug),
])
_hidden_symbols = ["Event", "LogMessage", "Summary", "SessionLog", "xrange",
"HistogramProto", "ConfigProto", "NodeDef", "GraphDef",
"GPUOptions", "GraphOptions", "RunOptions", "RunMetadata",
"SessionInterface", "BaseSession", "NameAttrList",
"AttrValue", "OptimizerOptions",
"CollectionDef", "MetaGraphDef", "QueueRunnerDef",
"SaverDef", "VariableDef", "TestCase", "GrpcServer",
"ClusterDef", "JobDef", "ServerDef"]
# TODO(skleinfeld, deannarubin) Address shortname
# conflict between tf.contrib.learn.NanLossDuringTrainingError and
# tf.contrib.learn.monitors.NanLossDuringTrainingError, arising due
# to imports in learn/python/learn/__init__.py
# TODO(wicke): Remove contrib.layers.relu* after shortnames are
# disabled. These conflict with tf.nn.relu*
EXCLUDE = frozenset(["tf.contrib.learn.monitors.NanLossDuringTrainingError",
"tf.contrib.layers.relu", "tf.contrib.layers.relu6",
"tf.contrib.framework.assert_global_step",
"tf.contrib.framework.get_global_step",
"tf.contrib.learn.NanLossDuringTrainingError",
"tf.contrib.layers.stack",
"tf.confusion_matrix"])
def main(unused_argv):
if not FLAGS.out_dir:
tf.logging.error("out_dir not specified")
return -1
# Document libraries
documented = set()
module_to_name = get_module_to_name(module_names())
members = docs.collect_members(module_to_name, exclude=EXCLUDE)
libraries = all_libraries(module_to_name, members, documented).items()
# Define catch_all library before calling write_libraries to avoid complaining
# about generically hidden symbols.
catch_all = docs.Library(title="Catch All", module=None,
exclude_symbols=_hidden_symbols,
module_to_name=module_to_name, members=members,
documented=documented)
# Write docs to files
docs.write_libraries(FLAGS.out_dir, libraries)
# Make it easy to search for hidden symbols
if FLAGS.print_hidden_regex:
hidden = set(_hidden_symbols)
for _, lib in libraries:
hidden.update(lib.exclude_symbols)
print(r"hidden symbols regex = r'\b(%s)\b'" % "|".join(sorted(hidden)))
# Verify that all symbols are mentioned in some library doc.
catch_all.assert_no_leftovers()
# Generate index
with open(os.path.join(FLAGS.out_dir, "index.md"), "w") as f:
docs.Index(module_to_name, members, libraries,
"../../api_docs/python/").write_markdown_to_file(f)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--out_dir",
type=str,
default=None,
help="Directory to which docs should be written.")
parser.add_argument(
"--print_hidden_regex",
type="bool",
nargs="?",
const=True,
default=False,
help="Dump a regular expression matching any hidden symbol")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
# -*- coding: utf-8 -*-
"""
meepo_examples.tutorial.mysql
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A demo script on how to use meepo with mysql row-based binlog.
"""
import logging
import click
import pymysql
from meepo.utils import setup_logger
setup_logger()
logger = logging.getLogger("meepo_examples.tutorial.mysql")
from meepo._compat import urlparse
def db_prepare(dsn):
parsed = urlparse(dsn)
db_settings = {
"host": parsed.hostname,
"port": parsed.port or 3306,
"user": parsed.username,
"passwd": parsed.password
}
conn = pymysql.connect(**db_settings)
cursor = conn.cursor()
sql = """
DROP DATABASE IF EXISTS meepo_test;
CREATE DATABASE meepo_test;
DROP TABLE IF EXISTS meepo_test.test;
CREATE TABLE meepo_test.test (
id INT NOT NULL AUTO_INCREMENT,
data VARCHAR (256) NOT NULL,
PRIMARY KEY (id)
);
RESET MASTER;
"""
cursor.execute(sql)
logger.info("table created.")
# genereate binlog
sql = """
INSERT INTO test (data) VALUES ('a');
INSERT INTO test (data) VALUES ('b'), ('c'), ('d');
UPDATE test SET data = 'aa' WHERE id = 1;
UPDATE test SET data = 'bb' WHERE id = 2;
UPDATE test SET data = 'cc' WHERE id != 1;
DELETE FROM test WHERE id != 1;
DELETE FROM test WHERE id = 1;
"""
cursor.execute(sql)
cursor.close()
conn.commit()
conn.close()
logger.info("binlog created.")
@click.command()
@click.option('-m', '--mysql_dsn')
def main(mysql_dsn):
# make sure the user has permission to read binlog
mysql_dsn = mysql_dsn or "mysql+pymysql://root@localhost/meepo_test"
from meepo.sub.dummy import print_sub
print_sub(["test"])
from meepo.pub import mysql_pub
mysql_pub(mysql_dsn, ["test"])
if __name__ == "__main__":
main()
|
import matplotlib.pyplot as plt
import torch
from torch.cuda.amp import autocast
from tqdm import tqdm
from ..audio_zen.acoustics.feature import mag_phase, drop_band
from ..audio_zen.acoustics.mask import build_complex_ideal_ratio_mask, decompress_cIRM
from ..audio_zen.trainer.base_trainer import BaseTrainer
plt.switch_backend('agg')
class Trainer(BaseTrainer):
def __init__(self, dist, rank, config, resume, only_validation, model, loss_function, optimizer, train_dataloader, validation_dataloader):
super().__init__(dist, rank, config, resume, only_validation, model, loss_function, optimizer)
self.train_dataloader = train_dataloader
self.valid_dataloader = validation_dataloader
def _train_epoch(self, epoch):
loss_total = 0.0
progress_bar = None
if self.rank == 0:
progress_bar = tqdm(total=len(self.train_dataloader), desc=f"Training")
for noisy, clean in self.train_dataloader:
self.optimizer.zero_grad()
noisy = noisy.to(self.rank)
clean = clean.to(self.rank)
noisy_complex = self.torch_stft(noisy)
clean_complex = self.torch_stft(clean)
noisy_mag, _ = mag_phase(noisy_complex)
ground_truth_cIRM = build_complex_ideal_ratio_mask(noisy_complex, clean_complex) # [B, F, T, 2]
ground_truth_cIRM = drop_band(
ground_truth_cIRM.permute(0, 3, 1, 2), # [B, 2, F ,T]
self.model.module.num_groups_in_drop_band
).permute(0, 2, 3, 1)
with autocast(enabled=self.use_amp):
# [B, F, T] => [B, 1, F, T] => model => [B, 2, F, T] => [B, F, T, 2]
noisy_mag = noisy_mag.unsqueeze(1)
cRM = self.model(noisy_mag)
cRM = cRM.permute(0, 2, 3, 1)
loss = self.loss_function(ground_truth_cIRM, cRM)
self.scaler.scale(loss).backward()
self.scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip_grad_norm_value)
self.scaler.step(self.optimizer)
self.scaler.update()
loss_total += loss.item()
if self.rank == 0:
progress_bar.update(1)
if self.rank == 0:
self.writer.add_scalar(f"Loss/Train", loss_total / len(self.train_dataloader), epoch)
@torch.no_grad()
def _validation_epoch(self, epoch):
progress_bar = None
if self.rank == 0:
progress_bar = tqdm(total=len(self.valid_dataloader), desc=f"Validation")
visualization_n_samples = self.visualization_config["n_samples"]
visualization_num_workers = self.visualization_config["num_workers"]
visualization_metrics = self.visualization_config["metrics"]
loss_total = 0.0
loss_list = {"With_reverb": 0.0, "No_reverb": 0.0, }
item_idx_list = {"With_reverb": 0, "No_reverb": 0, }
noisy_y_list = {"With_reverb": [], "No_reverb": [], }
clean_y_list = {"With_reverb": [], "No_reverb": [], }
enhanced_y_list = {"With_reverb": [], "No_reverb": [], }
validation_score_list = {"With_reverb": 0.0, "No_reverb": 0.0}
# speech_type in ("with_reverb", "no_reverb")
for i, (noisy, clean, name, speech_type) in enumerate(self.valid_dataloader):
assert len(name) == 1, "The batch size for the validation stage must be one."
name = name[0]
speech_type = speech_type[0]
noisy = noisy.to(self.rank)
clean = clean.to(self.rank)
noisy_complex = self.torch_stft(noisy)
clean_complex = self.torch_stft(clean)
noisy_mag, _ = mag_phase(noisy_complex)
cIRM = build_complex_ideal_ratio_mask(noisy_complex, clean_complex) # [B, F, T, 2]
noisy_mag = noisy_mag.unsqueeze(1)
cRM = self.model(noisy_mag)
cRM = cRM.permute(0, 2, 3, 1)
loss = self.loss_function(cIRM, cRM)
cRM = decompress_cIRM(cRM)
enhanced_real = cRM[..., 0] * noisy_complex.real - cRM[..., 1] * noisy_complex.imag
enhanced_imag = cRM[..., 1] * noisy_complex.real + cRM[..., 0] * noisy_complex.imag
enhanced_complex = torch.stack((enhanced_real, enhanced_imag), dim=-1)
enhanced = self.torch_istft(enhanced_complex, length=noisy.size(-1))
noisy = noisy.detach().squeeze(0).cpu().numpy()
clean = clean.detach().squeeze(0).cpu().numpy()
enhanced = enhanced.detach().squeeze(0).cpu().numpy()
assert len(noisy) == len(clean) == len(enhanced)
loss_total += loss
# Separated loss
loss_list[speech_type] += loss
item_idx_list[speech_type] += 1
if item_idx_list[speech_type] <= visualization_n_samples:
self.spec_audio_visualization(noisy, enhanced, clean, name, epoch, mark=speech_type)
noisy_y_list[speech_type].append(noisy)
clean_y_list[speech_type].append(clean)
enhanced_y_list[speech_type].append(enhanced)
if self.rank == 0:
progress_bar.update(1)
self.writer.add_scalar(f"Loss/Validation_Total", loss_total / len(self.valid_dataloader), epoch)
for speech_type in ("With_reverb", "No_reverb"):
self.writer.add_scalar(f"Loss/{speech_type}", loss_list[speech_type] / len(self.valid_dataloader), epoch)
validation_score_list[speech_type] = self.metrics_visualization(
noisy_y_list[speech_type], clean_y_list[speech_type], enhanced_y_list[speech_type],
visualization_metrics, epoch, visualization_num_workers, mark=speech_type
)
return validation_score_list["No_reverb"]
|
#!/usr/bin/env python
# --*-- coding:UTF-8 --*--
"""
this is a gauss unmixing methos for IRM(isothermal remanent magnetisition) acquisition curves
which is edit from
1D Gaussian Mixture Example
---------------------------
see below and https://github.com/astroML/astroML/blob/master/book_figures/chapter4/fig_GMM_1D.py
"""
import os
import re
from scipy import interpolate
from matplotlib import pyplot as plt
import numpy as np
from sklearn.mixture import GMM
def loggaussfit(x_measure, y):
"""
将x, gradient(y) 当做是一组密度分布曲线, 根据曲线重新估计随机数列,然后对随机数列进行拟合,并转换坐标
"""
y_gradient = []
for i in np.gradient(y):
if i >0:
y_gradient.append(i)
else:
y_gradient.append(10**-11)
x_fit = np.log10(x_measure)
x_interp = np.linspace(x_fit.min(), x_fit.max(), 1000)
y_interp = interpolate.splev(x_interp, interpolate.splrep(x_fit, y_gradient))
x_fit = x_interp
y_gradient = y_interp
D = []
for i in np.arange(len(x_fit)):
xx = x_fit[i]
yx = y_gradient[i]
frequency = yx / sum(y_interp)
numbers = np.int(frequency * 10**5)
if numbers != 0:
D.extend([xx]*numbers)
X = []
for i in np.array(D):
X.append([i])
X = np.array(X)
#--------------------------------------
# Learn the best-fit GMM models
# Here we'll use GMM in the standard way: the fit() method
# uses an Expectation-Maximization approach to find the best
# mixture of Gaussians for the data
# fit models with 1-10 components
N = np.arange(1, 5)
models = [None for i in range(len(N))]
for i in range(len(N)):
models[i] = GMM(N[i]).fit(X)
# compute the AIC and the BIC
AIC = [m.aic(X) for m in models]
BIC = [m.bic(X) for m in models]
#------------------------------------------------------------
# Plot the results
# We'll use three panels:
# 1) data + best-fit mixture
# 2) AIC and BIC vs number of components
# 3) probability that a point came from each component
fig = plt.figure(figsize=(10, 8), dpi=100, facecolor='white')
fig.subplots_adjust(left=0.12, right=0.97,
bottom=0.21, top=0.9, wspace=0.5, hspace=0.5)
fig.suptitle(sample.split('.irmc')[0])
color = ['#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00']
'''
----------------------------------------------------------------# plot 1: data + best-fit mixture
'''
ax = fig.add_subplot(222)
M_best = models[np.argmin(AIC)]
print M_best.params
x = x_fit#np.linspace(X.min(), X.max(), 1000)#np.array(x_mid)
logprob, responsibilities = M_best.score_samples(x.reshape((-1,1)))#M_best.eval(x)
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
y_pdf = (np.mean(y_gradient))*(pdf / np.mean(pdf))
y_in = (np.mean(y_gradient))*(pdf_individual / (np.mean(pdf)))
#ax.hist(X, 50, normed=True, histtype='stepfilled', alpha=0.4)
ax.scatter(x_measure, np.gradient(y), facecolors='white', edgecolors='k', s=10, marker='s', alpha=1)
ax.plot(10**x, y_pdf, '-k')
for i in np.arange(y_in.shape[1]):
ax.plot(10**x, y_in[:, i], '--k', color=color[i])
ax.text(0.04, 0.96, "Best-fit Mixture",
ha='left', va='top', transform=ax.transAxes)
ax.set_xlabel('Filed (mT)')
ax.set_ylabel('IRM acquisition gradient')
ax.set_ylim(0, 10**-8)
ax.set_xscale('log')
ax.set_xlim(2, 3000)
ax.set_ylim(-y_pdf.max()*0.1, y_pdf.max()*1.3)
'''
---------------------------------------------------------------##plot add: irm aqcuisition curve:
'''
ax = fig.add_subplot(221)
ax.scatter(x_measure, y/y.max(), facecolors='white', edgecolors='k', s=10, marker='s', alpha=0.5)
y_irm = y_pdf / sum(y_pdf)
y_acumulation = []
m = 0
for i in y_irm:
m = m+i
y_acumulation.append(m)
ax.plot(10**x, y_acumulation, '-k')
y_irm = y_in / sum(y_pdf)
for i in np.arange(y_irm.shape[1]):
y_acumulation = []
m = 0
for n in y_irm[:, i]:
m = m + n
y_acumulation.append(m)
ax.plot(10**x, y_acumulation, '--k', color=color[i])
ax.set_xlabel('Filed (mT)')
ax.set_ylabel('normalized IRM')
ax.set_xscale('log')
ax.set_xlim(2, 3000)
ax.set_ylim(0, 1.1)
'''
--------------------------------------------------------------# plot 2: AIC and BIC
'''
ax = fig.add_subplot(223)
ax.plot(N, AIC, '-r', label='AIC', alpha=0.5)
ax.plot(N, BIC, '--k', label='BIC')
ax.set_xlabel('n. components')
ax.set_ylabel('information criterion')
ax.legend(loc=1, frameon=False)
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
'''
--------------------------------------------------------------# plot 3: posterior probabilities for each component
'''
ax = fig.add_subplot(224)
p = M_best.predict_proba(x.reshape((-1,1)))#M_best.predict_proba(x)
#p = p[:, (1, 0, 2)] # rearrange order so the plot looks better
p = p.cumsum(1).T
ax.fill_between(10**x, 0, p[0], color=color[0], alpha=0.3)
for i in np.arange(1, p.shape[0]):
if i < p.shape[0] - 1:
ax.fill_between(10**x, p[i-1], p[i], color=color[i], alpha=0.3)
if i == p.shape[0] - 1:
ax.fill_between(10**x, p[i-1], 1, color=color[i], alpha=0.3)
#ax.set_xlim(0, 4)
ax.set_ylim(0, 1)
ax.set_xlabel('Filed (mT)')
ax.set_ylabel(r'posterior probabilities')
ax.set_xscale('log')
ax.set_xlim(3, 2000)
#ax.text(0, 3, 'class 1', rotation='vertical')
#ax.text(0.3, 3, 'class 2', rotation='vertical')
#ax.text(3, 5, 'class 3', rotation='vertical')
path = '/backup/jiabo/rockdata/MSM33-55-1/'
sample = 'MSM33-55-1_d99.irmc'
l = []
with open(path + sample) as dat:
data = dat.readlines()
data = data[87:-2]
for lines in data:
var = lines.split(',')
l.append(var)
L = np.array(l, dtype=np.float64).T
y = L[1]
x_measure = L[0]*10**3
loggaussfit(x_measure, y)
plt.show()
'''
for line in os.listdir(path):
if re.search(r'\Sirmc$', line):
sample = line.replace('.irmc', '')
loggaussfit(path, sample+'.irmc')
plt.savefig('/backup/jiabo/irmgraph/'+sample)
'''
'''
"""
1D Gaussian Mixture Example
---------------------------
Figure 4.2.
Example of a one-dimensional Gaussian mixture model with three components.
The left panel shows a histogram of the data, along with the best-fit model
for a mixture with three components. The center panel shows the model selection
criteria AIC (see Section 4.3) and BIC (see Section 5.4) as a function of the
number of components. Both are minimized for a three-component model. The
right panel shows the probability that a given point is drawn from each class
as a function of its position. For a given x value, the vertical extent of
each region is proportional to that probability. Note that extreme values
are most likely to belong to class 1.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from matplotlib import pyplot as plt
import numpy as np
from sklearn.mixture import GMM
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Set up the dataset.
# We'll use scikit-learn's Gaussian Mixture Model to sample
# data from a mixture of Gaussians. The usual way of using
# this involves fitting the mixture to data: we'll see that
# below. Here we'll set the internal means, covariances,
# and weights by-hand.
np.random.seed(1)
gmm = GMM(3, n_iter=1)
gmm.means_ = np.array([[-1], [0], [3]])
gmm.covars_ = np.array([[1.5], [1], [0.5]]) ** 2
gmm.weights_ = np.array([0.3, 0.5, 0.2])
X = gmm.sample(1000)
#------------------------------------------------------------
# Learn the best-fit GMM models
# Here we'll use GMM in the standard way: the fit() method
# uses an Expectation-Maximization approach to find the best
# mixture of Gaussians for the data
# fit models with 1-10 components
N = np.arange(1, 11)
models = [None for i in range(len(N))]
for i in range(len(N)):
models[i] = GMM(N[i]).fit(X)
# compute the AIC and the BIC
AIC = [m.aic(X) for m in models]
BIC = [m.bic(X) for m in models]
#------------------------------------------------------------
# Plot the results
# We'll use three panels:
# 1) data + best-fit mixture
# 2) AIC and BIC vs number of components
# 3) probability that a point came from each component
fig = plt.figure(figsize=(5, 1.7))
fig.subplots_adjust(left=0.12, right=0.97,
bottom=0.21, top=0.9, wspace=0.5)
# plot 1: data + best-fit mixture
ax = fig.add_subplot(131)
M_best = models[np.argmin(AIC)]
x = np.linspace(-6, 6, 1000)
logprob, responsibilities = M_best..score_samples(x.reshape((-1,1)))#eval(x)
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
ax.hist(X, 30, normed=True, histtype='stepfilled', alpha=0.4)
ax.plot(x, pdf, '-k')
ax.plot(x, pdf_individual, '--k')
ax.text(0.04, 0.96, "Best-fit Mixture",
ha='left', va='top', transform=ax.transAxes)
ax.set_xlabel('$x$')
ax.set_ylabel('$p(x)$')
# plot 2: AIC and BIC
ax = fig.add_subplot(132)
ax.plot(N, AIC, '-k', label='AIC')
ax.plot(N, BIC, '--k', label='BIC')
ax.set_xlabel('n. components')
ax.set_ylabel('information criterion')
ax.legend(loc=2)
# plot 3: posterior probabilities for each component
ax = fig.add_subplot(133)
p = M_best.predict_proba(x.reshape((-1,1)))#predict_proba(x)
p = p[:, (1, 0, 2)] # rearrange order so the plot looks better
p = p.cumsum(1).T
ax.fill_between(x, 0, p[0], color='gray', alpha=0.3)
ax.fill_between(x, p[0], p[1], color='gray', alpha=0.5)
ax.fill_between(x, p[1], 1, color='gray', alpha=0.7)
ax.set_xlim(-6, 6)
ax.set_ylim(0, 1)
ax.set_xlabel('$x$')
ax.set_ylabel(r'$p({\rm class}|x)$')
ax.text(-5, 0.3, 'class 1', rotation='vertical')
ax.text(0, 0.5, 'class 2', rotation='vertical')
ax.text(3, 0.3, 'class 3', rotation='vertical')
plt.show()
'''
|
import uuid
from flask import request
from flask_restful import Resource, marshal
from FlaskProject.extendsions import cache
from blog.models import UserBlog
from common import blog_fields
from common.status import HTTP_406_UNKNOW_ACCESS, HTTP_400_ERROR, HTTP_201_CREATE_OK, TIMEOUT, HTTP_200_OK
from .models import Users
class UserResource(Resource):
def post(self):
token = request.args.get("token")
id = cache.get(token)
blog = UserBlog.query.filter_by(user_id=id).first()
if not blog:
data = {
"msg": "find error",
"status": HTTP_400_ERROR
}
return data
data = {
"msg": "find success",
"status": HTTP_200_OK,
"data": marshal(blog, blog_fields)
}
return data
def get(self):
action = request.args.get("action")
if action == "login":
return self.do_login()
elif action == "register":
return self.do_register()
else:
data = {
"msg": "unknow access",
"status": HTTP_406_UNKNOW_ACCESS
}
return data
def do_register(self):
username = request.args.get("username")
password = request.args.get("password")
email = request.args.get("email")
if username == "" or password == "" or email == "":
data = {
"msg": "not None",
"status": HTTP_400_ERROR
}
return data
user = Users()
user.username = username
user.password = password
user.email = email
if not user.save():
data = {
"msg": "save error",
"status": HTTP_400_ERROR
}
return data
data = {
"msg": "save success",
"status": HTTP_201_CREATE_OK
}
return data
def do_login(self):
username = request.args.get("username")
password = request.args.get("password")
user = Users.query.filter_by(username=username).first()
if not user:
data = {
"msg": "No user",
"status": HTTP_400_ERROR
}
return data
if not user.verify_password(password):
data = {
"msg": "Password error",
"status": HTTP_400_ERROR
}
return data
token = uuid.uuid4().hex
cache.set(token, user.id, TIMEOUT)
data = {
"msg": "login success",
"status": HTTP_200_OK,
"data": {
"token": token
}
}
return data
|
from src import TrainPageGetter, StationsGetter, StationTimetableGetter, config
from flask import Flask, jsonify, send_from_directory
from flask_cors import CORS
from datetime import datetime, timedelta
from dateutil import tz, parser
app = Flask(__name__)
CORS(app)
# Generate the station lookup table
config.global_station_list = {}
stations = StationsGetter.get_stations()
for station in stations:
config.global_station_list[station["name"]] = station["station_id"]
@app.route('/static/<path:path>')
def send_res(path):
return send_from_directory('static', path)
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/train/<string:train_id>')
def get_train(train_id):
train_list = TrainPageGetter.get_train(train_id)
return jsonify(train_list)
@app.route('/get-stations/')
def get_stations():
return jsonify(stations)
@app.route('/station/<int:station_id>')
def get_timetable(station_id):
timetable = StationTimetableGetter.get_timetable(station_id)
return jsonify(timetable)
def timetable_departures_filter(timetable):
departures_timetable = []
for item in timetable:
if item['is_origin'] or item['is_stop']:
departures_timetable.append(item)
return departures_timetable
def timetable_arrivals_filter(timetable):
departures_timetable = []
for item in timetable:
if item['is_destination'] or item['is_stop']:
departures_timetable.append(item)
return departures_timetable
def timestamp_current_filter(timetable):
current_timetable = []
for item in timetable:
timezone = tz.gettz('Europe/Bucharest')
beginning = datetime.now(tz=timezone) - timedelta(hours=1)
end = datetime.now(tz=timezone) + timedelta(hours=3)
if item['arrival_timestamp']:
arrival = parser.isoparse(item['arrival_timestamp'])
if beginning <= arrival <= end:
current_timetable.append(item)
continue
if beginning <= arrival + timedelta(minutes=item['delay']) <= end:
current_timetable.append(item)
continue
if item['departure_timestamp']:
departure = parser.isoparse(item['departure_timestamp'])
if beginning <= departure <= end:
current_timetable.append(item)
continue
if beginning <= departure + timedelta(minutes=item['delay']) <= end:
current_timetable.append(item)
continue
return current_timetable
@app.route('/station/<int:station_id>/departures')
def get_departures_timetable(station_id):
timetable = StationTimetableGetter.get_timetable(station_id)
timetable = timetable_departures_filter(timetable)
return jsonify(timetable)
@app.route('/station/<int:station_id>/departures/current')
def get_current_departures_timetable(station_id):
timetable = StationTimetableGetter.get_timetable(station_id)
timetable = timestamp_current_filter(timetable)
timetable = timetable_departures_filter(timetable)
return jsonify(timetable)
@app.route('/station/<int:station_id>/arrivals')
def get_arrivals_timetable(station_id):
timetable = StationTimetableGetter.get_timetable(station_id)
timetable = timetable_arrivals_filter(timetable)
return jsonify(timetable)
@app.route('/station/<int:station_id>/arrivals/current')
def get_current_arrivals_timetable(station_id):
timetable = StationTimetableGetter.get_timetable(station_id)
timetable = timestamp_current_filter(timetable)
timetable = timetable_arrivals_filter(timetable)
return jsonify(timetable)
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class SimInterfaceIPv6Config(Base):
"""Data associated with simulated IPv6 interface link configuration inside a Network Topology.
The SimInterfaceIPv6Config class encapsulates a list of simInterfaceIPv6Config resources that are managed by the system.
A list of resources can be retrieved from the server using the SimInterfaceIPv6Config.find() method.
"""
__slots__ = ()
_SDM_NAME = 'simInterfaceIPv6Config'
_SDM_ATT_MAP = {
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'EnableIp': 'enableIp',
'FromIP': 'fromIP',
'Name': 'name',
'SubnetPrefixLength': 'subnetPrefixLength',
'ToIP': 'toIP',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(SimInterfaceIPv6Config, self).__init__(parent, list_op)
@property
def Ospfv3PseudoInterface(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.ospfv3pseudointerface_bbc932877888c8c8400661ec299754a8.Ospfv3PseudoInterface): An instance of the Ospfv3PseudoInterface class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.ospfv3pseudointerface_bbc932877888c8c8400661ec299754a8 import Ospfv3PseudoInterface
if self._properties.get('Ospfv3PseudoInterface', None) is not None:
return self._properties.get('Ospfv3PseudoInterface')
else:
return Ospfv3PseudoInterface(self)
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def EnableIp(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable IPv6
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableIp']))
@property
def FromIP(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): 128 Bits IPv6 address.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FromIP']))
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def SubnetPrefixLength(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Subnet Prefix Length
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubnetPrefixLength']))
@property
def ToIP(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): 128 Bits IPv6 address.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ToIP']))
def update(self, Name=None):
# type: (str) -> SimInterfaceIPv6Config
"""Updates simInterfaceIPv6Config resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Name=None):
# type: (str) -> SimInterfaceIPv6Config
"""Adds a new simInterfaceIPv6Config resource on the json, only valid with config assistant
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with all currently retrieved simInterfaceIPv6Config resources using find and the newly added simInterfaceIPv6Config resources available through an iterator or index
Raises
------
- Exception: if this function is not being used with config assistance
"""
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Count=None, DescriptiveName=None, Name=None):
# type: (int, str, str) -> SimInterfaceIPv6Config
"""Finds and retrieves simInterfaceIPv6Config resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve simInterfaceIPv6Config resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all simInterfaceIPv6Config resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with matching simInterfaceIPv6Config resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of simInterfaceIPv6Config data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the simInterfaceIPv6Config resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def Abort(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
abort(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('abort', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def get_device_ids(self, PortNames=None, EnableIp=None, FromIP=None, SubnetPrefixLength=None, ToIP=None):
"""Base class infrastructure that gets a list of simInterfaceIPv6Config device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- EnableIp (str): optional regex of enableIp
- FromIP (str): optional regex of fromIP
- SubnetPrefixLength (str): optional regex of subnetPrefixLength
- ToIP (str): optional regex of toIP
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
|
"""
Module to dynamically generate a Starlette routing map based on a directory tree.
"""
import importlib
import inspect
import typing as t
from pathlib import Path
from starlette.routing import Route as StarletteRoute, BaseRoute, Mount
from nested_dict import nested_dict
from backend.route import Route
def construct_route_map_from_dict(route_dict: dict) -> list[BaseRoute]:
route_map = []
for mount, item in route_dict.items():
if inspect.isclass(item):
route_map.append(StarletteRoute(mount, item))
else:
route_map.append(Mount(mount, routes=construct_route_map_from_dict(item)))
# Order non-capturing routes before capturing routes
route_map.sort(key=lambda route: "{" in route.path)
return route_map
def is_route_class(member: t.Any) -> bool:
return inspect.isclass(member) and issubclass(member, Route) and member != Route
def route_classes() -> t.Iterator[tuple[Path, type[Route]]]:
routes_directory = Path("backend") / "routes"
for module_path in routes_directory.rglob("*.py"):
import_name = f"{'.'.join(module_path.parent.parts)}.{module_path.stem}"
route_module = importlib.import_module(import_name)
for _member_name, member in inspect.getmembers(route_module):
if is_route_class(member):
member.check_parameters()
yield (module_path, member)
def create_route_map() -> list[BaseRoute]:
route_dict = nested_dict()
for module_path, member in route_classes():
# module_path == Path("backend/routes/foo/bar/baz/bin.py")
# => levels == ["foo", "bar", "baz"]
levels = module_path.parent.parts[2:]
current_level = None
for level in levels:
if current_level is None:
current_level = route_dict[f"/{level}"]
else:
current_level = current_level[f"/{level}"]
if current_level is not None:
current_level[member.path] = member
else:
route_dict[member.path] = member
return construct_route_map_from_dict(route_dict.to_dict())
|
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import urllib
from tempest.api_schema.queuing.v1 import queues as queues_schema
from tempest.common import rest_client
from tempest.common.utils import data_utils
from tempest import config
CONF = config.CONF
class QueuingClientJSON(rest_client.RestClient):
def __init__(self, auth_provider):
super(QueuingClientJSON, self).__init__(auth_provider)
self.service = CONF.queuing.catalog_type
self.version = '1'
self.uri_prefix = 'v{0}'.format(self.version)
client_id = data_utils.rand_uuid_hex()
self.headers = {'Client-ID': client_id}
def list_queues(self):
uri = '{0}/queues'.format(self.uri_prefix)
resp, body = self.get(uri)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(queues_schema.list_queues, resp, body)
return resp, body
def create_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.put(uri, body=None)
return resp, body
def get_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.get(uri)
return resp, body
def head_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.head(uri)
return resp, body
def delete_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp = self.delete(uri)
return resp
def get_queue_stats(self, queue_name):
uri = '{0}/queues/{1}/stats'.format(self.uri_prefix, queue_name)
resp, body = self.get(uri)
body = json.loads(body)
self.validate_response(queues_schema.queue_stats, resp, body)
return resp, body
def get_queue_metadata(self, queue_name):
uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name)
resp, body = self.get(uri)
body = json.loads(body)
return resp, body
def set_queue_metadata(self, queue_name, rbody):
uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name)
resp, body = self.put(uri, body=json.dumps(rbody))
return resp, body
def post_messages(self, queue_name, rbody):
uri = '{0}/queues/{1}/messages'.format(self.uri_prefix, queue_name)
resp, body = self.post(uri, body=json.dumps(rbody),
extra_headers=True,
headers=self.headers)
body = json.loads(body)
return resp, body
def list_messages(self, queue_name):
uri = '{0}/queues/{1}/messages?echo=True'.format(self.uri_prefix,
queue_name)
resp, body = self.get(uri, extra_headers=True, headers=self.headers)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(queues_schema.list_messages, resp, body)
return resp, body
def get_single_message(self, message_uri):
resp, body = self.get(message_uri, extra_headers=True,
headers=self.headers)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(queues_schema.get_single_message, resp,
body)
return resp, body
def get_multiple_messages(self, message_uri):
resp, body = self.get(message_uri, extra_headers=True,
headers=self.headers)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(queues_schema.get_multiple_messages,
resp,
body)
return resp, body
def delete_messages(self, message_uri):
resp, body = self.delete(message_uri)
assert(resp['status'] == '204')
return resp, body
def post_claims(self, queue_name, rbody, url_params=False):
uri = '{0}/queues/{1}/claims'.format(self.uri_prefix, queue_name)
if url_params:
uri += '?%s' % urllib.urlencode(url_params)
resp, body = self.post(uri, body=json.dumps(rbody),
extra_headers=True,
headers=self.headers)
body = json.loads(body)
self.validate_response(queues_schema.claim_messages, resp, body)
return resp, body
def query_claim(self, claim_uri):
resp, body = self.get(claim_uri)
if resp['status'] != '204':
body = json.loads(body)
self.validate_response(queues_schema.query_claim, resp, body)
return resp, body
def update_claim(self, claim_uri, rbody):
resp, body = self.patch(claim_uri, body=json.dumps(rbody))
assert(resp['status'] == '204')
return resp, body
def release_claim(self, claim_uri):
resp, body = self.delete(claim_uri)
assert(resp['status'] == '204')
return resp, body
|
"""Test cases for base parser."""
import pytest
from lila.serialization.parser import Parser
def test_parse_field():
"""Check that NotImplementedError is raised if parse_field is called.
1. Create an instance of Parser class.
2. Try to parse a field.
3. Check that NotImplementedError is raised.
4. Check the error message.
"""
with pytest.raises(NotImplementedError) as error_info:
Parser().parse_field(data={})
assert error_info.value.args[0] == "Parser does not support siren fields"
def test_parse_action():
"""Check that NotImplementedError is raised if parse_action is called.
1. Create an instance of Parser class.
2. Try to parse an action.
3. Check that NotImplementedError is raised.
4. Check the error message.
"""
with pytest.raises(NotImplementedError) as error_info:
Parser().parse_action(data={})
assert error_info.value.args[0] == "Parser does not support siren actions"
def test_parse_link():
"""Check that NotImplementedError is raised if parse_link is called.
1. Create an instance of Parser class.
2. Try to parse a link.
3. Check that NotImplementedError is raised.
4. Check the error message.
"""
with pytest.raises(NotImplementedError) as error_info:
Parser().parse_link(data={})
assert error_info.value.args[0] == "Parser does not support siren links"
def test_parse_embedded_link():
"""Check that NotImplementedError is raised if parse_embedded_link is called.
1. Create an instance of Parser class.
2. Try to parse an embedded link.
3. Check that NotImplementedError is raised.
4. Check the error message.
"""
with pytest.raises(NotImplementedError) as error_info:
Parser().parse_embedded_link(data={})
assert error_info.value.args[0] == "Parser does not support embedded siren links"
def test_parse_entity():
"""Check that NotImplementedError is raised if parse_entity is called.
1. Create an instance of Parser class.
2. Try to parse an entity.
3. Check that NotImplementedError is raised.
4. Check the error message.
"""
with pytest.raises(NotImplementedError) as error_info:
Parser().parse_entity(data={})
assert error_info.value.args[0] == "Parser does not support siren entities"
def test_parse_embedded_representation():
"""Check that NotImplementedError is raised if parse_embedded_representation is called.
1. Create an instance of Parser class.
2. Try to parse an embedded representation.
3. Check that NotImplementedError is raised.
4. Check the error message.
"""
with pytest.raises(NotImplementedError) as error_info:
Parser().parse_embedded_representation(data={})
assert error_info.value.args[0] == "Parser does not support siren embedded representations"
|
"""
opcode module - potentially shared between dis and other modules which
operate on bytecodes (e.g. peephole optimizers).
"""
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG", "hasnargs"]
# It's a chicken-and-egg I'm afraid:
# We're imported before _opcode's made.
# With exception unheeded
# (stack_effect is not needed)
# Both our chickens and eggs are allayed.
# --Larry Hastings, 2013/11/23
try:
from _opcode import stack_effect
__all__.append('stack_effect')
except ImportError:
pass
cmp_op = ('<', '<=', '==', '!=', '>', '>=')
hasconst = []
hasname = []
hasjrel = []
hasjabs = []
haslocal = []
hascompare = []
hasfree = []
hasnargs = [] # unused
opmap = {}
opname = ['<%r>' % (op,) for op in range(256)]
def def_op(name, op):
opname[op] = name
opmap[name] = op
def name_op(name, op):
def_op(name, op)
hasname.append(op)
def jrel_op(name, op):
def_op(name, op)
hasjrel.append(op)
def jabs_op(name, op):
def_op(name, op)
hasjabs.append(op)
# Instruction opcodes for compiled code
# Blank lines correspond to available opcodes
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('DUP_TOP_TWO', 5)
def_op('ROT_FOUR', 6)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_INVERT', 15)
def_op('BINARY_MATRIX_MULTIPLY', 16)
def_op('INPLACE_MATRIX_MULTIPLY', 17)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('LOGICAL_XOR', 35)
def_op('RERAISE', 48)
def_op('WITH_EXCEPT_START', 49)
def_op('GET_AITER', 50)
def_op('GET_ANEXT', 51)
def_op('BEFORE_ASYNC_WITH', 52)
def_op('END_ASYNC_FOR', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('GET_YIELD_FROM_ITER', 69)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
def_op('GET_AWAITABLE', 73)
def_op('LOAD_ASSERTION_ERROR', 74)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('LIST_TO_TUPLE', 82)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('SETUP_ANNOTATIONS', 85)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('POP_EXCEPT', 89)
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
name_op('STORE_NAME', 90) # Index in name list
name_op('DELETE_NAME', 91) # ""
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
jrel_op('FOR_ITER', 93)
def_op('UNPACK_EX', 94)
name_op('STORE_ATTR', 95) # Index in name list
name_op('DELETE_ATTR', 96) # ""
name_op('STORE_GLOBAL', 97) # ""
name_op('DELETE_GLOBAL', 98) # ""
def_op('LOAD_CONST', 100) # Index in const list
hasconst.append(100)
name_op('LOAD_NAME', 101) # Index in name list
def_op('BUILD_TUPLE', 102) # Number of tuple items
def_op('BUILD_LIST', 103) # Number of list items
def_op('BUILD_SET', 104) # Number of set items
def_op('BUILD_MAP', 105) # Number of dict entries
name_op('LOAD_ATTR', 106) # Index in name list
def_op('COMPARE_OP', 107) # Comparison operator
hascompare.append(107)
name_op('IMPORT_NAME', 108) # Index in name list
name_op('IMPORT_FROM', 109) # Index in name list
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code
jabs_op('JUMP_IF_TRUE_OR_POP', 112) # ""
jabs_op('JUMP_ABSOLUTE', 113) # ""
jabs_op('POP_JUMP_IF_FALSE', 114) # ""
jabs_op('POP_JUMP_IF_TRUE', 115) # ""
name_op('LOAD_GLOBAL', 116) # Index in name list
def_op('IS_OP', 117)
def_op('CONTAINS_OP', 118)
jabs_op('JUMP_IF_NOT_EXC_MATCH', 121)
jrel_op('SETUP_FINALLY', 122) # Distance to target address
def_op('LOAD_FAST', 124) # Local variable number
haslocal.append(124)
def_op('STORE_FAST', 125) # Local variable number
haslocal.append(125)
def_op('DELETE_FAST', 126) # Local variable number
haslocal.append(126)
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
def_op('CALL_FUNCTION', 131) # #args
def_op('MAKE_FUNCTION', 132) # Flags
def_op('BUILD_SLICE', 133) # Number of items
def_op('LOAD_CLOSURE', 135)
hasfree.append(135)
def_op('LOAD_DEREF', 136)
hasfree.append(136)
def_op('STORE_DEREF', 137)
hasfree.append(137)
def_op('DELETE_DEREF', 138)
hasfree.append(138)
def_op('CALL_FUNCTION_KW', 141) # #args + #kwargs
def_op('CALL_FUNCTION_EX', 142) # Flags
jrel_op('SETUP_WITH', 143)
def_op('LIST_APPEND', 145)
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
def_op('LOAD_CLASSDEREF', 148)
hasfree.append(148)
def_op('EXTENDED_ARG', 144)
EXTENDED_ARG = 144
jrel_op('SETUP_ASYNC_WITH', 154)
def_op('FORMAT_VALUE', 155)
def_op('BUILD_CONST_KEY_MAP', 156)
def_op('BUILD_STRING', 157)
name_op('LOAD_METHOD', 160)
def_op('CALL_METHOD', 161)
def_op('LIST_EXTEND', 162)
def_op('SET_UPDATE', 163)
def_op('DICT_MERGE', 164)
def_op('DICT_UPDATE', 165)
del def_op, name_op, jrel_op, jabs_op
|
# -*- coding: utf-8 -*-
"""
"""
__title__ = "SweetRPG API Core"
__description__ = "Common code for API microservice applications"
__url__ = "https://github.com/sweetrpg/api-core"
__version__ = "0.0.81"
__build__ = 0x000000
__author__ = "Paul Schifferer"
__author_email__ = "dm@sweetrpg.com"
__license__ = "MIT"
__copyright__ = "Copyright 2021 SweetRPG"
__cake__ = "\u2728 \U0001f370 \u2728"
|
# Copyright (c) 2016 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.objects import l3_hamode
from neutron.tests.unit.objects import test_base as base
from neutron.tests.unit import testlib_api
class L3HARouterAgentPortBindingIfaceObjectTestCase(
base.BaseObjectIfaceTestCase):
_test_class = l3_hamode.L3HARouterAgentPortBinding
class L3HARouterAgentPortBindingDbObjectTestCase(base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = l3_hamode.L3HARouterAgentPortBinding
def setUp(self):
super(L3HARouterAgentPortBindingDbObjectTestCase,
self).setUp()
_network_id = self._create_test_network_id()
def get_port():
return self._create_test_port_id(network_id=_network_id)
self.update_obj_fields({'port_id': get_port,
'router_id': self._create_test_router_id,
'l3_agent_id': self._create_test_agent_id})
class L3HARouterNetworkIfaceObjectTestCase(base.BaseObjectIfaceTestCase):
_test_class = l3_hamode.L3HARouterNetwork
class L3HARouterNetworkDbObjectTestCase(base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = l3_hamode.L3HARouterNetwork
def setUp(self):
super(L3HARouterNetworkDbObjectTestCase, self).setUp()
network = self._create_test_network()
self.update_obj_fields({'network_id': network.id})
class L3HARouterVRIdAllocationIfaceObjectTestCase(
base.BaseObjectIfaceTestCase):
_test_class = l3_hamode.L3HARouterVRIdAllocation
class L3HARouterVRIdAllocationDbObjectTestCase(base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = l3_hamode.L3HARouterVRIdAllocation
def setUp(self):
super(L3HARouterVRIdAllocationDbObjectTestCase, self).setUp()
self.update_obj_fields(
{'network_id': lambda: self._create_test_network().id})
|
grocery_item = ["Potato", "Tomato", "Water", "Ginger", "Onion"]
print(grocery_item)
# for item in grocery_item:
# if item == 'Water':
# break
# print(item)
for i in range(0, len(grocery_item)):
print(grocery_item[i])
print("***Finished***")
|
"""
.. _tut_info_objects:
The :class:`Info <mne.Info>` data structure
==============================================
"""
from __future__ import print_function
import mne
import os.path as op
###############################################################################
# The :class:`Info <mne.Info>` data object is typically created
# when data is imported into MNE-Python and contains details such as:
#
# - date, subject information, and other recording details
# - the sampling rate
# - information about the data channels (name, type, position, etc.)
# - digitized points
# - sensor–head coordinate transformation matrices
#
# and so forth. See the :class:`the API reference <mne.Info>`
# for a complete list of all data fields. Once created, this object is passed
# around throughout the data analysis pipeline.
#
# It behaves as a nested Python dictionary:
# Read the info object from an example recording
info = mne.io.read_info(
op.join(mne.datasets.sample.data_path(), 'MEG', 'sample',
'sample_audvis_raw.fif'), verbose=False)
###############################################################################
# List all the fields in the info object
print('Keys in info dictionary:\n', info.keys())
###############################################################################
# Obtain the sampling rate of the data
print(info['sfreq'], 'Hz')
###############################################################################
# List all information about the first data channel
print(info['chs'][0])
###############################################################################
# .. _picking_channels:
#
# Obtaining subsets of channels
# -----------------------------
#
# There are a number of convenience functions to obtain channel indices, given
# an :class:`mne.Info` object.
###############################################################################
# Get channel indices by name
channel_indices = mne.pick_channels(info['ch_names'], ['MEG 0312', 'EEG 005'])
###############################################################################
# Get channel indices by regular expression
channel_indices = mne.pick_channels_regexp(info['ch_names'], 'MEG *')
###############################################################################
# Get channel indices by type
channel_indices = mne.pick_types(info, meg=True) # MEG only
channel_indices = mne.pick_types(info, eeg=True) # EEG only
###############################################################################
# MEG gradiometers and EEG channels
channel_indices = mne.pick_types(info, meg='grad', eeg=True)
###############################################################################
# Get a dictionary of channel indices, grouped by channel type
channel_indices_by_type = mne.io.pick.channel_indices_by_type(info)
print('The first three magnetometers:', channel_indices_by_type['mag'][:3])
###############################################################################
# Obtaining information about channels
# ------------------------------------
# Channel type of a specific channel
channel_type = mne.io.pick.channel_type(info, 75)
print('Channel #75 is of type:', channel_type)
###############################################################################
# Channel types of a collection of channels
meg_channels = mne.pick_types(info, meg=True)[:10]
channel_types = [mne.io.pick.channel_type(info, ch) for ch in meg_channels]
print('First 10 MEG channels are of type:\n', channel_types)
###############################################################################
# Dropping channels from an info structure
# ----------------------------------------
#
# It is possible to limit the info structure to only include a subset of
# channels with the :func:`mne.pick_info` function:
# Only keep EEG channels
eeg_indices = mne.pick_types(info, meg=False, eeg=True)
reduced_info = mne.pick_info(info, eeg_indices)
print(reduced_info)
|
# -*- coding:utf-8 -*-
# Created by Machine (Fan Jin build the code-generator)
import tornado, os, MySQLdb
import tornado.gen
import tornado.web
import json
class form3Handler(tornado.web.RequestHandler):
def get(self):
print('----------------------------Get form3--------------------------')
try:
edit = self.get_argument("edit", "0")
except:
edit = "0"
self.patient_id = self.get_argument("patient_id", "")
exist = 0
# Get the patient's data status from the database
conn = MySQLdb.connect( host = 'localhost',
user = 'debian-sys-maint',
passwd = 'fmvKL0UlQ558lKWG',
db = 'MData',
charset= 'utf8')
conn.autocommit(1)
self.cur = conn.cursor()
self.cur.execute('''SELECT form3 FROM data_status WHERE patient_id='{0}' '''.format(self.patient_id))
for row in self.cur:
exist = row[0]
self.cur.close()
res = [self.patient_id, "", "", "", "", "", "", "", "", "", "", "", "", "", ""]
if (exist==1):
# Get the data from the database
self.cur = conn.cursor()
self.cur.execute('''SELECT patient_id,xm,xb,xb_other,nl,sj_0,sj_1,sj_2,sj_3,sj_4,lx_0,lx_1,lx_2,lx_3,lx_4 FROM form3 WHERE patient_id='{0}' '''.format(self.patient_id))
for row in self.cur:
res = row
if (exist==1 and edit=="0"):
self.render("../html/read_form3_page.html", patient_id=self.patient_id, xm=res[1], xb=res[2], xb_other=res[3], nl=res[4], sj_0=res[5], sj_1=res[6], sj_2=res[7], sj_3=res[8], sj_4=res[9], lx_0=res[10], lx_1=res[11], lx_2=res[12], lx_3=res[13], lx_4=res[14])
else:
self.render("../html/edit_form3_page.html", patient_id=self.patient_id, xm=res[1], xb=res[2], xb_other=res[3], nl=res[4], sj_0=res[5], sj_1=res[6], sj_2=res[7], sj_3=res[8], sj_4=res[9], lx_0=res[10], lx_1=res[11], lx_2=res[12], lx_3=res[13], lx_4=res[14])
def post(self):
print('----------------------------Submit----------------------------')
self.patient_id = self.get_body_argument("patient_id")
xm = self.get_body_argument("xm")
xb = self.get_body_argument("xb")
xb_other = self.get_body_argument("xb_other")
nl = self.get_body_argument("nl")
sj_0 = self.get_body_argument("sj_0")
sj_1 = self.get_body_argument("sj_1")
sj_2 = self.get_body_argument("sj_2")
sj_3 = self.get_body_argument("sj_3")
sj_4 = self.get_body_argument("sj_4")
lx_0 = self.get_body_argument("lx_0")
lx_1 = self.get_body_argument("lx_1")
lx_2 = self.get_body_argument("lx_2")
lx_3 = self.get_body_argument("lx_3")
lx_4 = self.get_body_argument("lx_4")
conn = MySQLdb.connect( host = 'localhost',
user = 'debian-sys-maint',
passwd = 'fmvKL0UlQ558lKWG',
db = 'MData',
charset= 'utf8')
conn.autocommit(1)
# Insert the data into the database
self.cur = conn.cursor()
sqls = '''REPLACE INTO form3 (patient_id, xm, xb, xb_other, nl, sj_0, sj_1, sj_2, sj_3, sj_4, lx_0, lx_1, lx_2, lx_3, lx_4) VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}', '{8}', '{9}', '{10}', '{11}', '{12}', '{13}', '{14}') '''.format(self.patient_id, xm,xb,xb_other,nl,sj_0,sj_1,sj_2,sj_3,sj_4,lx_0,lx_1,lx_2,lx_3,lx_4)
self.cur.execute(sqls)
sqls = "SELECT * FROM data_status WHERE patient_id='" + self.patient_id + "'"
self.cur.execute(sqls)
exist_data = 0
for row in self.cur:
exist_data = row
exist_data = 1
if (exist_data == 1):
sqls = "UPDATE data_status SET form3=1 WHERE patient_id='" + self.patient_id + "'"
else:
sqls = "REPLACE INTO data_status (patient_id,form3) VALUES ('"+ self.patient_id + "'," + "1)"
self.cur.execute(sqls)
self.cur.close()
self.render("../html/read_form3_page.html", patient_id=self.patient_id, xm=xm, xb=xb, xb_other=xb_other, nl=nl, sj_0=sj_0, sj_1=sj_1, sj_2=sj_2, sj_3=sj_3, sj_4=sj_4, lx_0=lx_0, lx_1=lx_1, lx_2=lx_2, lx_3=lx_3, lx_4=lx_4)
|
# Faça um programa que leia algo pelo teclado e mostre na tela o seu tipo primitivo e todas as informações possíveis sobre ele.
thing = str(input('Digite algo: '))
print(f'É alfanumerico? : {thing.isalnum()}')
print(f'É alfabetico? : {thing.isalpha()}')
print(f'É minuscula? : {thing.islower()}')
print(f'É maiuscula? : {thing.isupper()}')
print(f'É um espaço? : {thing.isspace()}')
print(f'É numerico : {thing.isnumeric()}')
|
#! /usr/bin/env python3
import sys
import re
def vet_nucleotide_sequence(sequence):
"""
Return None if `sequence` is a valid RNA or DNA sequence, else raise exception.
Parameters
----------
sequence : str
A string representing a DNA or RNA sequence (upper or lower-case)
Returns
-------
None
Return nothing (None) if sequence is valid, otherwise raise an
exception.
Examples
--------
>>> vet_nucleotide_sequence('ACGTACGT') == None
True
>>> vet_nucleotide_sequence('not a valid sequence')
Traceback (most recent call last):
...
Exception: Invalid sequence: 'not a valid sequence'
Don't allow mixing of DNA and RNA!
>>> vet_nucleotide_sequence('AUTGC')
Traceback (most recent call last):
...
Exception: Invalid sequence: 'AUTGC'
Don't allow whitespace (or other characters) before, within, or after!
>>> vet_nucleotide_sequence(' ACGT ACGT ')
Traceback (most recent call last):
...
Exception: Invalid sequence: ' ACGT ACGT '
But, an empty string should be deemed valid
>>> vet_nucleotide_sequence('') == None
True
"""
##########################################################################
############################ EDIT CODE BELOW #############################
# `rna_pattern_str` and `dna_pattern_str` need to be regular expressions
# that will match any string of zero or more RNA and DNA bases,
# respectively (and only strings of zero or more RNA and DNA bases).
# Currently, `rna_pattern_str` and `dna_pattern_str` are strings of literal
# characters.
# These are valid regular expressions, but they will only match their
# respective strings exactly.
# Change `rna_pattern_str` and `dna_pattern_str` so that they will match
# any valid RNA and DNA sequence strings, respectively (and only strings of
# RNA and DNA bases).
# Read the docstring above for additional clues.
rna_pattern_str = r'AUCG'
dna_pattern_str = r'ATCG'
##########################################################################
rna_pattern = re.compile(rna_pattern_str)
dna_pattern = re.compile(dna_pattern_str)
if rna_pattern.match(sequence):
return
if dna_pattern.match(sequence):
return
else:
raise Exception("Invalid sequence: {0!r}".format(sequence))
def vet_codon(codon):
"""
Return None if `codon` is a valid RNA codon, else raise an exception.
Parameters
----------
codon : str
A string representing a codon (upper or lower-case)
Returns
-------
None
Return nothing (None) if codon is valid, otherwise raise an
exception.
Examples
--------
Valid codon
>>> vet_codon('AUG') == None
True
lower-case is also vaild
>>> vet_codon('aug') == None
True
DNA is not valid
>>> vet_codon('ATG')
Traceback (most recent call last):
...
Exception: Invalid codon: 'ATG'
A codon must be exactly 3 RNA bases long
>>> vet_codon('AUGG')
Traceback (most recent call last):
...
Exception: Invalid codon: 'AUGG'
"""
##########################################################################
############################ EDIT CODE BELOW #############################
# `codon_pattern_str` needs to be a regular expression that will match any
# codon (but only a string that is one codon).
# Currently, `codon_pattern_str` is only a string of literal characters.
# This is a valid regular expression, but it will only match 'AUG' exactly.
# Change `codon_pattern_str` so that it will match any valid codons, and
# only valid codons.
# Read the docstring above for additional clues.
codon_pattern_str = r'AUG'
##########################################################################
codon_pattern = re.compile(codon_pattern_str)
if codon_pattern.match(codon):
return
else:
raise Exception("Invalid codon: {0!r}".format(codon))
def find_first_orf(sequence,
start_codons = ['AUG'],
stop_codons = ['UAA', 'UAG', 'UGA']):
"""
Return the first open-reading frame in the DNA or RNA `sequence`.
An open-reading frame (ORF) is the part of an RNA sequence that is
translated into a peptide. It must begin with a start codon, followed by
zero or more codons (triplets of nucleotides), and end with a stop codon.
If there are no ORFs in the sequence, an empty string is returned.
Parameters
----------
sequence : str
A string representing a DNA or RNA sequence (upper or lower-case)
start_codons : list of strings
All possible start codons. Each codon must be a string of 3 RNA bases,
upper or lower-case.
stop_codons : list of strings
All possible stop codons. Each codon must be a string of 3 RNA bases,
upper or lower-case.
Returns
-------
str
An uppercase string of the first ORF found in the `sequence` that
starts with any one of the `start_codons` and ends with any one of the
`stop_codons`. If no ORF is found an empty string is returned.
Examples
--------
When the whole RNA sequence is an ORF:
>>> find_first_orf('AUGGUAUAA', ['AUG'], ['UAA'])
'AUGGUAUAA'
When the whole DNA sequence is an ORF:
>>> find_first_orf('ATGGTATAA', ['AUG'], ['UAA'])
'AUGGUAUAA'
When there is no ORF:
>>> find_first_orf('CUGGUAUAA', ['AUG'], ['UAA'])
''
When there is are bases before and after ORF:
>>> find_first_orf('CCAUGGUAUAACC', ['AUG'], ['UAA'])
'AUGGUAUAA'
"""
# Make sure the sequence is valid
vet_nucleotide_sequence(sequence)
# Make sure the codons are valid
for codon in start_codons:
vet_codon(codon)
for codon in stop_codons:
vet_codon(codon)
# Get copies of everything in uppercase
seq = sequence.upper()
starts = [c.upper() for c in start_codons]
stops = [c.upper() for c in stop_codons]
# Make sure seq is RNA
seq = seq.replace('T', 'U')
##########################################################################
############################ EDIT CODE BELOW #############################
# `orf_pattern_str` needs to be a regular expression that will match an
# open reading frame within a string of RNA bases. At this point we know
# the string only contains uppercase A, C, G, and U.
# I recommend starting by hardcoding the standard start and stop codons
# (the ones listed as defaults for this function) into the regular
# expression. After you get that working, then try generalizing it to work
# for any start/stop codons.
# Currently, `orf_pattern_str` is only a string of literal characters. This
# is a valid regular expression, but it will only match 'AUGGUAUAA'
# exactly. Change `orf_pattern_str` so that it will match any open reading
# frame.
# Read the docstring above for additional clues.
orf_pattern_str = r'AUGGUAUAA'
##########################################################################
# Create the regular expression object
orf_pattern = re.compile(orf_pattern_str)
# Search the sequence
match_object = orf_pattern.search(seq)
if match_object:
return match_object.group()
return ''
def parse_sequence_from_path(path):
# Try to open the path to read from it, and handle exceptions if they
# arrise
try:
file_stream = open(path, 'r')
except FileNotFoundError as e:
sys.stderr.write("Sorry, couldn't find path {}".format(path))
raise e
except IsADirectoryError as e:
sys.stderr.write("Sorry, path {} appears to be a directory".format(
path))
raise e
except:
sys.stderr.write("Sorry, something went wrong when trying to open {}".format(
path))
raise
# If we've reached here, the file is open and ready to read
sequence = ''
# A for loop to visit each line in the file
for line in file_stream:
# Strip whitespace from the line and concatenate it to the end of the
# sequence
sequence += line.strip()
return sequence
def main():
import argparse
# Create a command-line parser object
parser = argparse.ArgumentParser(
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
# Tell the parser what command-line arguments this script can receive
parser.add_argument('sequence',
metavar = 'SEQUENCE',
type = str,
help = ('The sequence to search for an open-reading frame. '
'If the path flag (\'-p\'/\'--path\') is specified, '
'then this should be a path to a file containing the '
'sequence to be searched.'))
parser.add_argument('-p', '--path',
action = 'store_true',
help = ('The sequence argument should be treated as a path to a '
'containing the sequence to be searched.'))
parser.add_argument('-s', '--start-codons',
type = str,
nargs = '+', # one or more arguments
default = ['AUG'],
help = ('One or more possible start codons.'))
parser.add_argument('-x', '--stop-codons',
type = str,
nargs = '+', # one or more arguments
default = ['UAA', 'UAG', 'UGA'],
help = ('One or more possible stop codons.'))
# Parse the command-line arguments into a 'dict'-like container
args = parser.parse_args()
# Check to see if the path option was set to True by the caller. If so, parse
# the sequence from the path
if args.path:
sequence = parse_sequence_from_path(args.sequence)
else:
sequence = args.sequence
orf = find_first_orf(sequence = sequence,
start_codons = args.start_codons,
stop_codons = args.stop_codons)
sys.stdout.write('{}\n'.format(orf))
if __name__ == '__main__':
main()
|
import torch
from bindsnet.network import Network
from bindsnet.network.monitors import Monitor, NetworkMonitor
from bindsnet.network.nodes import Input, IFNodes
from bindsnet.network.topology import Connection
class TestMonitor:
"""
Testing Monitor object.
"""
network = Network()
inpt = Input(75)
network.add_layer(inpt, name="X")
_if = IFNodes(25)
network.add_layer(_if, name="Y")
conn = Connection(inpt, _if, w=torch.rand(inpt.n, _if.n))
network.add_connection(conn, source="X", target="Y")
inpt_mon = Monitor(inpt, state_vars=["s"])
network.add_monitor(inpt_mon, name="X")
_if_mon = Monitor(_if, state_vars=["s", "v"])
network.add_monitor(_if_mon, name="Y")
network.run(inputs={"X": torch.bernoulli(torch.rand(100, inpt.n))}, time=100)
assert inpt_mon.get("s").size() == torch.Size([100, 1, inpt.n])
assert _if_mon.get("s").size() == torch.Size([100, 1, _if.n])
assert _if_mon.get("v").size() == torch.Size([100, 1, _if.n])
del network.monitors["X"], network.monitors["Y"]
inpt_mon = Monitor(inpt, state_vars=["s"], time=500)
network.add_monitor(inpt_mon, name="X")
_if_mon = Monitor(_if, state_vars=["s", "v"], time=500)
network.add_monitor(_if_mon, name="Y")
network.run(inputs={"X": torch.bernoulli(torch.rand(500, inpt.n))}, time=500)
assert inpt_mon.get("s").size() == torch.Size([500, 1, inpt.n])
assert _if_mon.get("s").size() == torch.Size([500, 1, _if.n])
assert _if_mon.get("v").size() == torch.Size([500, 1, _if.n])
class TestNetworkMonitor:
"""
Testing NetworkMonitor object.
"""
network = Network()
inpt = Input(25)
network.add_layer(inpt, name="X")
_if = IFNodes(75)
network.add_layer(_if, name="Y")
conn = Connection(inpt, _if, w=torch.rand(inpt.n, _if.n))
network.add_connection(conn, source="X", target="Y")
mon = NetworkMonitor(network, state_vars=["s", "v", "w"])
network.add_monitor(mon, name="monitor")
network.run(inputs={"X": torch.bernoulli(torch.rand(50, inpt.n))}, time=50)
recording = mon.get()
assert recording["X"]["s"].size() == torch.Size([50, 1, inpt.n])
assert recording["Y"]["s"].size() == torch.Size([50, 1, _if.n])
assert recording["Y"]["s"].size() == torch.Size([50, 1, _if.n])
del network.monitors["monitor"]
mon = NetworkMonitor(network, state_vars=["s", "v", "w"], time=50)
network.add_monitor(mon, name="monitor")
network.run(inputs={"X": torch.bernoulli(torch.rand(50, inpt.n))}, time=50)
recording = mon.get()
assert recording["X"]["s"].size() == torch.Size([50, 1, inpt.n])
assert recording["Y"]["s"].size() == torch.Size([50, 1, _if.n])
assert recording["Y"]["s"].size() == torch.Size([50, 1, _if.n])
if __name__ == "__main__":
tm = TestMonitor()
tnm = TestNetworkMonitor()
|
from cogs.fun.fun import Fun
def setup(bot):
bot.add_cog(Fun(bot))
|
import copy
import json
from datetime import datetime
import pytest
from flask import url_for
from freezegun import freeze_time
from app.main.views.dashboard import (
aggregate_notifications_stats,
aggregate_status_types,
aggregate_template_usage,
format_monthly_stats_to_list,
get_dashboard_totals,
get_tuples_of_financial_years,
)
from tests import (
organisation_json,
service_json,
validate_route_permission,
validate_route_permission_with_client,
)
from tests.conftest import (
ORGANISATION_ID,
SERVICE_ONE_ID,
create_active_caseworking_user,
create_active_user_view_permissions,
normalize_spaces,
)
stub_template_stats = [
{
'template_type': 'sms',
'template_name': 'one',
'template_id': 'id-1',
'status': 'created',
'count': 50,
'is_precompiled_letter': False
},
{
'template_type': 'email',
'template_name': 'two',
'template_id': 'id-2',
'status': 'created',
'count': 100,
'is_precompiled_letter': False
},
{
'template_type': 'email',
'template_name': 'two',
'template_id': 'id-2',
'status': 'technical-failure',
'count': 100,
'is_precompiled_letter': False
},
{
'template_type': 'letter',
'template_name': 'three',
'template_id': 'id-3',
'status': 'delivered',
'count': 300,
'is_precompiled_letter': False
},
{
'template_type': 'sms',
'template_name': 'one',
'template_id': 'id-1',
'status': 'delivered',
'count': 50,
'is_precompiled_letter': False
},
{
'template_type': 'letter',
'template_name': 'four',
'template_id': 'id-4',
'status': 'delivered',
'count': 400,
'is_precompiled_letter': True
},
{
'template_type': 'letter',
'template_name': 'four',
'template_id': 'id-4',
'status': 'cancelled',
'count': 5,
'is_precompiled_letter': True
},
{
'template_type': 'letter',
'template_name': 'thirty-three',
'template_id': 'id-33',
'status': 'cancelled',
'count': 5,
'is_precompiled_letter': False
},
]
@pytest.mark.parametrize('user', (
create_active_user_view_permissions(),
create_active_caseworking_user(),
))
def test_redirect_from_old_dashboard(
client_request,
user,
mocker,
):
mocker.patch('app.user_api_client.get_user', return_value=user)
expected_location = '/services/{}'.format(SERVICE_ONE_ID)
client_request.get_url(
'/services/{}/dashboard'.format(SERVICE_ONE_ID),
_expected_redirect=expected_location,
)
assert expected_location == url_for('main.service_dashboard', service_id=SERVICE_ONE_ID)
def test_redirect_caseworkers_to_templates(
client_request,
mocker,
active_caseworking_user,
):
mocker.patch('app.user_api_client.get_user', return_value=active_caseworking_user)
client_request.get(
'main.service_dashboard',
service_id=SERVICE_ONE_ID,
_expected_status=302,
_expected_redirect=url_for(
'main.choose_template',
service_id=SERVICE_ONE_ID,
)
)
def test_get_started(
client_request,
mocker,
mock_get_service_templates_when_no_templates_exist,
mock_has_no_jobs,
mock_get_service_statistics,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_inbound_sms_summary,
mock_get_returned_letter_statistics_with_no_returned_letters,
):
mocker.patch(
'app.template_statistics_client.get_template_statistics_for_service',
return_value=copy.deepcopy(stub_template_stats)
)
page = client_request.get(
'main.service_dashboard',
service_id=SERVICE_ONE_ID,
)
mock_get_service_templates_when_no_templates_exist.assert_called_once_with(SERVICE_ONE_ID)
assert 'Get started' in page.text
def test_get_started_is_hidden_once_templates_exist(
client_request,
mocker,
mock_get_service_templates,
mock_has_no_jobs,
mock_get_service_statistics,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_inbound_sms_summary,
mock_get_returned_letter_statistics_with_no_returned_letters,
):
mocker.patch(
'app.template_statistics_client.get_template_statistics_for_service',
return_value=copy.deepcopy(stub_template_stats)
)
page = client_request.get(
'main.service_dashboard',
service_id=SERVICE_ONE_ID,
)
mock_get_service_templates.assert_called_once_with(SERVICE_ONE_ID)
assert not page.find('h2', string='Get started')
def test_inbound_messages_not_visible_to_service_without_permissions(
client_request,
mocker,
service_one,
mock_get_service_templates_when_no_templates_exist,
mock_has_no_jobs,
mock_get_service_statistics,
mock_get_template_statistics,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_inbound_sms_summary,
mock_get_returned_letter_statistics_with_no_returned_letters,
):
service_one['permissions'] = []
page = client_request.get(
'main.service_dashboard',
service_id=SERVICE_ONE_ID,
)
assert not page.select('.big-number-meta-wrapper')
assert mock_get_inbound_sms_summary.called is False
def test_inbound_messages_shows_count_of_messages_when_there_are_messages(
client_request,
mocker,
service_one,
mock_get_service_templates_when_no_templates_exist,
mock_get_jobs,
mock_get_scheduled_job_stats,
mock_get_service_statistics,
mock_get_template_statistics,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_inbound_sms_summary,
mock_get_returned_letter_statistics_with_no_returned_letters,
):
service_one['permissions'] = ['inbound_sms']
page = client_request.get(
'main.service_dashboard',
service_id=SERVICE_ONE_ID,
)
banner = page.select('a.banner-dashboard')[1]
assert normalize_spaces(
banner.text
) == '9,999 text messages received latest message just now'
assert banner['href'] == url_for(
'main.inbox', service_id=SERVICE_ONE_ID
)
def test_inbound_messages_shows_count_of_messages_when_there_are_no_messages(
client_request,
mocker,
service_one,
mock_get_service_templates_when_no_templates_exist,
mock_get_jobs,
mock_get_scheduled_job_stats,
mock_get_service_statistics,
mock_get_template_statistics,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_inbound_sms_summary_with_no_messages,
mock_get_returned_letter_statistics_with_no_returned_letters,
):
service_one['permissions'] = ['inbound_sms']
page = client_request.get(
'main.service_dashboard',
service_id=SERVICE_ONE_ID,
)
banner = page.select('a.banner-dashboard')[1]
assert normalize_spaces(banner.text) == '0 text messages received'
assert banner['href'] == url_for(
'main.inbox', service_id=SERVICE_ONE_ID
)
@pytest.mark.parametrize('index, expected_row', enumerate([
'07900 900000 message-1 1 hour ago',
'07900 900000 message-2 1 hour ago',
'07900 900000 message-3 1 hour ago',
'07900 900002 message-4 3 hours ago',
'+33 1 12 34 56 78 message-5 5 hours ago',
'+1 202-555-0104 message-6 7 hours ago',
'+1 202-555-0104 message-7 9 hours ago',
'+682 12345 message-8 9 hours ago',
]))
def test_inbox_showing_inbound_messages(
client_request,
service_one,
mock_get_service_templates_when_no_templates_exist,
mock_get_jobs,
mock_get_service_statistics,
mock_get_template_statistics,
mock_get_annual_usage_for_service,
mock_get_most_recent_inbound_sms,
index,
expected_row,
):
service_one['permissions'] = ['inbound_sms']
page = client_request.get(
'main.inbox',
service_id=SERVICE_ONE_ID,
)
rows = page.select('tbody tr')
assert len(rows) == 8
assert normalize_spaces(rows[index].text) == expected_row
assert page.select_one('a[download]')['href'] == url_for(
'main.inbox_download',
service_id=SERVICE_ONE_ID,
)
def test_get_inbound_sms_shows_page_links(
client_request,
service_one,
mock_get_service_templates_when_no_templates_exist,
mock_get_jobs,
mock_get_service_statistics,
mock_get_template_statistics,
mock_get_annual_usage_for_service,
mock_get_most_recent_inbound_sms,
mock_get_inbound_number_for_service,
):
service_one['permissions'] = ['inbound_sms']
page = client_request.get(
'main.inbox',
service_id=SERVICE_ONE_ID,
page=2,
)
assert 'Next page' in page.find('li', {'class': 'next-page'}).text
assert 'Previous page' in page.find('li', {'class': 'previous-page'}).text
def test_empty_inbox(
client_request,
service_one,
mock_get_service_templates_when_no_templates_exist,
mock_get_jobs,
mock_get_service_statistics,
mock_get_template_statistics,
mock_get_annual_usage_for_service,
mock_get_most_recent_inbound_sms_with_no_messages,
mock_get_inbound_number_for_service,
):
service_one['permissions'] = ['inbound_sms']
page = client_request.get(
'main.inbox',
service_id=SERVICE_ONE_ID,
)
assert normalize_spaces(page.select('tbody tr')) == (
'When users text your service’s phone number (0781239871) you’ll see the messages here'
)
assert not page.select('a[download]')
assert not page.select('li.next-page')
assert not page.select('li.previous-page')
@pytest.mark.parametrize('endpoint', [
'main.inbox',
'main.inbox_updates',
])
def test_inbox_not_accessible_to_service_without_permissions(
client_request,
service_one,
endpoint,
):
service_one['permissions'] = []
client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
_expected_status=403,
)
def test_anyone_can_see_inbox(
client_request,
api_user_active,
service_one,
mocker,
mock_get_most_recent_inbound_sms_with_no_messages,
mock_get_inbound_number_for_service,
):
service_one['permissions'] = ['inbound_sms']
validate_route_permission_with_client(
mocker,
client_request,
'GET',
200,
url_for('main.inbox', service_id=service_one['id']),
['view_activity'],
api_user_active,
service_one,
)
def test_view_inbox_updates(
client_request,
service_one,
mocker,
mock_get_most_recent_inbound_sms_with_no_messages,
):
service_one['permissions'] += ['inbound_sms']
mock_get_partials = mocker.patch(
'app.main.views.dashboard.get_inbox_partials',
return_value={'messages': 'foo'},
)
response = client_request.get_response(
'main.inbox_updates', service_id=SERVICE_ONE_ID,
)
assert json.loads(response.get_data(as_text=True)) == {'messages': 'foo'}
mock_get_partials.assert_called_once_with(SERVICE_ONE_ID)
@freeze_time("2016-07-01 13:00")
def test_download_inbox(
client_request,
mock_get_inbound_sms,
):
response = client_request.get_response(
'main.inbox_download',
service_id=SERVICE_ONE_ID,
)
assert response.headers['Content-Type'] == (
'text/csv; '
'charset=utf-8'
)
assert response.headers['Content-Disposition'] == (
'inline; '
'filename="Received text messages 2016-07-01.csv"'
)
assert response.get_data(as_text=True) == (
'Phone number,Message,Received\r\n'
'07900 900000,message-1,2016-07-01 13:00\r\n'
'07900 900000,message-2,2016-07-01 12:59\r\n'
'07900 900000,message-3,2016-07-01 12:59\r\n'
'07900 900002,message-4,2016-07-01 10:59\r\n'
'+33 1 12 34 56 78,message-5,2016-07-01 08:59\r\n'
'+1 202-555-0104,message-6,2016-07-01 06:59\r\n'
'+1 202-555-0104,message-7,2016-07-01 04:59\r\n'
'+682 12345,message-8,2016-07-01 04:59\r\n'
)
@freeze_time("2016-07-01 13:00")
@pytest.mark.parametrize('message_content, expected_cell', [
('=2+5', '2+5'),
('==2+5', '2+5'),
('-2+5', '2+5'),
('+2+5', '2+5'),
('@2+5', '2+5'),
('looks safe,=2+5', '"looks safe,=2+5"'),
])
def test_download_inbox_strips_formulae(
mocker,
client_request,
fake_uuid,
message_content,
expected_cell,
):
mocker.patch(
'app.service_api_client.get_inbound_sms',
return_value={
'has_next': False,
'data': [{
'user_number': 'elevenchars',
'notify_number': 'foo',
'content': message_content,
'created_at': datetime.utcnow().isoformat(),
'id': fake_uuid,
}]
},
)
response = client_request.get_response(
'main.inbox_download',
service_id=SERVICE_ONE_ID,
)
assert expected_cell in response.get_data(as_text=True).split('\r\n')[1]
def test_returned_letters_not_visible_if_service_has_no_returned_letters(
client_request,
mocker,
service_one,
mock_get_service_templates_when_no_templates_exist,
mock_has_no_jobs,
mock_get_service_statistics,
mock_get_template_statistics,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_inbound_sms_summary,
mock_get_returned_letter_statistics_with_no_returned_letters,
):
page = client_request.get(
'main.service_dashboard',
service_id=SERVICE_ONE_ID,
)
assert not page.select('#total-returned-letters')
@pytest.mark.parametrize('reporting_date, expected_message', (
('2020-01-10 00:00:00.000000', (
'4,000 returned letters latest report today'
)),
('2020-01-09 23:59:59.000000', (
'4,000 returned letters latest report yesterday'
)),
('2020-01-08 12:12:12.000000', (
'4,000 returned letters latest report 2 days ago'
)),
('2019-12-10 00:00:00.000000', (
'4,000 returned letters latest report 1 month ago'
)),
))
@freeze_time('2020-01-10 12:34:00.000000')
def test_returned_letters_shows_count_of_recently_returned_letters(
client_request,
mocker,
service_one,
mock_get_service_templates_when_no_templates_exist,
mock_get_jobs,
mock_get_scheduled_job_stats,
mock_get_service_statistics,
mock_get_template_statistics,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_inbound_sms_summary,
reporting_date,
expected_message,
):
mocker.patch(
'app.service_api_client.get_returned_letter_statistics',
return_value={
'returned_letter_count': 4000,
'most_recent_report': reporting_date,
},
)
page = client_request.get(
'main.service_dashboard',
service_id=SERVICE_ONE_ID,
)
banner = page.select_one('#total-returned-letters')
assert normalize_spaces(banner.text) == expected_message
assert banner['href'] == url_for(
'main.returned_letter_summary', service_id=SERVICE_ONE_ID
)
@pytest.mark.parametrize('reporting_date, count, expected_message', (
('2020-02-02', 1, (
'1 returned letter latest report today'
)),
('2020-02-01', 1, (
'1 returned letter latest report yesterday'
)),
('2020-01-31', 1, (
'1 returned letter latest report 2 days ago'
)),
('2020-01-26', 1, (
'1 returned letter latest report 7 days ago'
)),
('2020-01-25', 0, (
'0 returned letters latest report 8 days ago'
)),
('2020-01-01', 0, (
'0 returned letters latest report 1 month ago'
)),
('2019-09-09', 0, (
'0 returned letters latest report 4 months ago'
)),
('2010-10-10', 0, (
'0 returned letters latest report 9 years ago'
)),
))
@freeze_time('2020-02-02')
def test_returned_letters_only_counts_recently_returned_letters(
client_request,
mocker,
service_one,
mock_get_service_templates_when_no_templates_exist,
mock_get_jobs,
mock_get_scheduled_job_stats,
mock_get_service_statistics,
mock_get_template_statistics,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_inbound_sms_summary_with_no_messages,
reporting_date,
count,
expected_message,
):
mocker.patch(
'app.service_api_client.get_returned_letter_statistics',
return_value={
'returned_letter_count': count,
'most_recent_report': reporting_date,
},
)
page = client_request.get(
'main.service_dashboard',
service_id=SERVICE_ONE_ID,
)
banner = page.select_one('#total-returned-letters')
assert normalize_spaces(banner.text) == expected_message
assert banner['href'] == url_for(
'main.returned_letter_summary', service_id=SERVICE_ONE_ID
)
def test_should_show_recent_templates_on_dashboard(
client_request,
mocker,
mock_get_service_templates,
mock_has_no_jobs,
mock_get_service_statistics,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_inbound_sms_summary,
mock_get_returned_letter_statistics_with_no_returned_letters,
):
mock_template_stats = mocker.patch('app.template_statistics_client.get_template_statistics_for_service',
return_value=copy.deepcopy(stub_template_stats))
page = client_request.get(
'main.service_dashboard',
service_id=SERVICE_ONE_ID,
)
mock_template_stats.assert_called_once_with(SERVICE_ONE_ID, limit_days=7)
headers = [header.text.strip() for header in page.find_all('h2') + page.find_all('h1')]
assert 'In the last 7 days' in headers
table_rows = page.find_all('tbody')[0].find_all('tr')
assert len(table_rows) == 4
assert 'Provided as PDF' in table_rows[0].find_all('th')[0].text
assert 'Letter' in table_rows[0].find_all('th')[0].text
assert '400' in table_rows[0].find_all('td')[0].text
assert 'three' in table_rows[1].find_all('th')[0].text
assert 'Letter template' in table_rows[1].find_all('th')[0].text
assert '300' in table_rows[1].find_all('td')[0].text
assert 'two' in table_rows[2].find_all('th')[0].text
assert 'Email template' in table_rows[2].find_all('th')[0].text
assert '200' in table_rows[2].find_all('td')[0].text
assert 'one' in table_rows[3].find_all('th')[0].text
assert 'Text message template' in table_rows[3].find_all('th')[0].text
assert '100' in table_rows[3].find_all('td')[0].text
@pytest.mark.parametrize('stats', (
pytest.param(
[stub_template_stats[0]],
),
pytest.param(
[stub_template_stats[0], stub_template_stats[1]],
marks=pytest.mark.xfail(raises=AssertionError),
)
))
def test_should_not_show_recent_templates_on_dashboard_if_only_one_template_used(
client_request,
mocker,
mock_get_service_templates,
mock_has_no_jobs,
mock_get_service_statistics,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_inbound_sms_summary,
mock_get_returned_letter_statistics_with_no_returned_letters,
stats,
):
mock_template_stats = mocker.patch(
'app.template_statistics_client.get_template_statistics_for_service',
return_value=stats,
)
page = client_request.get('main.service_dashboard', service_id=SERVICE_ONE_ID)
main = page.select_one('main').text
mock_template_stats.assert_called_once_with(SERVICE_ONE_ID, limit_days=7)
assert stats[0]['template_name'] == 'one'
assert stats[0]['template_name'] not in main
# count appears as total, but not per template
expected_count = stats[0]['count']
assert expected_count == 50
assert normalize_spaces(
page.select_one('#total-sms .big-number-smaller').text
) == (
'{} text messages sent'.format(expected_count)
)
@freeze_time("2016-07-01 12:00") # 4 months into 2016 financial year
@pytest.mark.parametrize('extra_args', [
{},
{'year': '2016'},
])
def test_should_show_redirect_from_template_history(
client_request,
extra_args,
):
client_request.get(
'main.template_history',
service_id=SERVICE_ONE_ID,
_expected_status=301,
**extra_args,
)
@freeze_time("2016-07-01 12:00") # 4 months into 2016 financial year
@pytest.mark.parametrize('extra_args', [
{},
{'year': '2016'},
])
def test_should_show_monthly_breakdown_of_template_usage(
client_request,
mock_get_monthly_template_usage,
extra_args,
):
page = client_request.get(
'main.template_usage',
service_id=SERVICE_ONE_ID,
**extra_args
)
mock_get_monthly_template_usage.assert_called_once_with(SERVICE_ONE_ID, 2016)
table_rows = page.select('tbody tr')
assert ' '.join(table_rows[0].text.split()) == (
'My first template '
'Text message template '
'2'
)
assert len(table_rows) == len(['April'])
assert len(page.select('.table-no-data')) == len(['May', 'June', 'July'])
def test_anyone_can_see_monthly_breakdown(
client_request,
api_user_active,
service_one,
mocker,
mock_get_monthly_notification_stats,
):
validate_route_permission_with_client(
mocker,
client_request,
'GET',
200,
url_for('main.monthly', service_id=service_one['id']),
['view_activity'],
api_user_active,
service_one,
)
def test_monthly_shows_letters_in_breakdown(
client_request,
service_one,
mock_get_monthly_notification_stats,
):
page = client_request.get(
'main.monthly',
service_id=service_one['id']
)
columns = page.select('.table-field-left-aligned .big-number-label')
assert normalize_spaces(columns[0].text) == 'emails'
assert normalize_spaces(columns[1].text) == 'text messages'
assert normalize_spaces(columns[2].text) == 'letters'
@pytest.mark.parametrize('endpoint', [
'main.monthly',
'main.template_usage',
])
@freeze_time("2015-01-01 15:15:15.000000")
def test_stats_pages_show_last_3_years(
client_request,
endpoint,
mock_get_monthly_notification_stats,
mock_get_monthly_template_usage,
):
page = client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
)
assert normalize_spaces(page.select_one('.pill').text) == (
'2014 to 2015 financial year '
'2013 to 2014 financial year '
'2012 to 2013 financial year'
)
def test_monthly_has_equal_length_tables(
client_request,
service_one,
mock_get_monthly_notification_stats,
):
page = client_request.get(
'main.monthly',
service_id=service_one['id']
)
assert page.select_one('.table-field-headings th').get('width') == "25%"
@freeze_time("2016-01-01 11:09:00.061258")
def test_should_show_upcoming_jobs_on_dashboard(
client_request,
mock_get_service_templates,
mock_get_template_statistics,
mock_get_service_statistics,
mock_get_jobs,
mock_get_scheduled_job_stats,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_inbound_sms_summary,
mock_get_returned_letter_statistics_with_no_returned_letters,
):
page = client_request.get(
'main.service_dashboard',
service_id=SERVICE_ONE_ID,
)
mock_get_jobs.assert_called_once_with(SERVICE_ONE_ID)
mock_get_scheduled_job_stats.assert_called_once_with(SERVICE_ONE_ID)
assert normalize_spaces(
page.select_one('main h2').text
) == (
'In the next few days'
)
assert normalize_spaces(
page.select_one('a.banner-dashboard').text
) == (
'2 files waiting to send '
'sending starts today at 11:09am'
)
assert page.select_one('a.banner-dashboard')['href'] == url_for(
'main.uploads', service_id=SERVICE_ONE_ID
)
def test_should_not_show_upcoming_jobs_on_dashboard_if_count_is_0(
mocker,
client_request,
mock_get_service_templates,
mock_get_template_statistics,
mock_get_service_statistics,
mock_has_jobs,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_inbound_sms_summary,
mock_get_returned_letter_statistics_with_no_returned_letters,
):
mocker.patch('app.job_api_client.get_scheduled_job_stats', return_value={
'count': 0,
'soonest_scheduled_for': None,
})
page = client_request.get(
'main.service_dashboard',
service_id=SERVICE_ONE_ID,
)
mock_has_jobs.assert_called_once_with(SERVICE_ONE_ID)
assert 'In the next few days' not in page.select_one('main').text
assert 'files waiting to send ' not in page.select_one('main').text
def test_should_not_show_upcoming_jobs_on_dashboard_if_service_has_no_jobs(
mocker,
client_request,
mock_get_service_templates,
mock_get_template_statistics,
mock_get_service_statistics,
mock_has_no_jobs,
mock_get_scheduled_job_stats,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_inbound_sms_summary,
mock_get_returned_letter_statistics_with_no_returned_letters,
):
page = client_request.get(
'main.service_dashboard',
service_id=SERVICE_ONE_ID,
)
mock_has_no_jobs.assert_called_once_with(SERVICE_ONE_ID)
assert mock_get_scheduled_job_stats.called is False
assert 'In the next few days' not in page.select_one('main').text
assert 'files waiting to send ' not in page.select_one('main').text
@pytest.mark.parametrize('permissions', (
['email', 'sms'],
['email', 'sms', 'letter'],
))
@pytest.mark.parametrize('totals', [
(
{
'email': {'requested': 0, 'delivered': 0, 'failed': 0},
'sms': {'requested': 99999, 'delivered': 0, 'failed': 0},
'letter': {'requested': 99999, 'delivered': 0, 'failed': 0}
},
),
(
{
'email': {'requested': 0, 'delivered': 0, 'failed': 0},
'sms': {'requested': 0, 'delivered': 0, 'failed': 0},
'letter': {'requested': 100000, 'delivered': 0, 'failed': 0},
},
),
])
def test_correct_font_size_for_big_numbers(
client_request,
mocker,
mock_get_service_templates,
mock_get_template_statistics,
mock_get_service_statistics,
mock_has_no_jobs,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_returned_letter_statistics_with_no_returned_letters,
service_one,
permissions,
totals,
):
service_one['permissions'] = permissions
mocker.patch(
'app.main.views.dashboard.get_dashboard_totals',
return_value=totals
)
page = client_request.get(
'main.service_dashboard',
service_id=service_one['id'],
)
assert (
len(page.select_one('[data-key=totals]').select('.govuk-grid-column-one-third'))
) == (
len(page.select_one('[data-key=usage]').select('.govuk-grid-column-one-third'))
) == (
len(page.select('.big-number-with-status .big-number-smaller'))
) == 3
def test_should_not_show_jobs_on_dashboard_for_users_with_uploads_page(
client_request,
service_one,
mock_get_service_templates,
mock_get_template_statistics,
mock_get_service_statistics,
mock_get_jobs,
mock_get_scheduled_job_stats,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_inbound_sms_summary,
mock_get_returned_letter_statistics_with_no_returned_letters,
):
page = client_request.get(
'main.service_dashboard',
service_id=SERVICE_ONE_ID,
)
for filename in {
"export 1/1/2016.xls",
"all email addresses.xlsx",
"applicants.ods",
"thisisatest.csv",
}:
assert filename not in page.select_one('main').text
@freeze_time("2012-03-31 12:12:12")
def test_usage_page(
client_request,
mock_get_annual_usage_for_service,
mock_get_monthly_usage_for_service,
mock_get_free_sms_fragment_limit
):
page = client_request.get(
'main.usage',
service_id=SERVICE_ONE_ID,
)
mock_get_monthly_usage_for_service.assert_called_once_with(SERVICE_ONE_ID, 2011)
mock_get_annual_usage_for_service.assert_called_once_with(SERVICE_ONE_ID, 2011)
mock_get_free_sms_fragment_limit.assert_called_with(SERVICE_ONE_ID, 2011)
nav = page.find('ul', {'class': 'pill'})
unselected_nav_links = nav.select('a:not(.pill-item--selected)')
assert normalize_spaces(nav.find('a', {'aria-current': 'page'}).text) == '2011 to 2012 financial year'
assert normalize_spaces(unselected_nav_links[0].text) == '2010 to 2011 financial year'
assert normalize_spaces(unselected_nav_links[1].text) == '2009 to 2010 financial year'
annual_usage = page.find_all('div', {'class': 'govuk-grid-column-one-third'})
# annual stats are shown in two rows, each with three column; email is col 1
email_column = normalize_spaces(annual_usage[0].text + annual_usage[3].text)
assert 'Emails' in email_column
assert '1,000 sent' in email_column
sms_column = normalize_spaces(annual_usage[1].text + annual_usage[4].text)
assert 'Text messages' in sms_column
assert '251,800 sent' in sms_column
assert '250,000 free allowance' in sms_column
assert '0 free allowance remaining' in sms_column
assert '£29.85 spent' in sms_column
assert '1,500 at 1.65 pence' in sms_column
assert '300 at 1.70 pence' in sms_column
letter_column = normalize_spaces(annual_usage[2].text + annual_usage[5].text)
assert 'Letters' in letter_column
assert '100 sent' in letter_column
assert '£30.00 spent' in letter_column
@freeze_time("2012-03-31 12:12:12")
def test_usage_page_no_sms_spend(
mocker,
client_request,
mock_get_monthly_usage_for_service,
mock_get_free_sms_fragment_limit
):
mocker.patch('app.billing_api_client.get_annual_usage_for_service', return_value=[
{
"notification_type": "sms",
"chargeable_units": 1000,
"charged_units": 0,
"rate": 0.0165,
"cost": 0
}
])
page = client_request.get(
'main.usage',
service_id=SERVICE_ONE_ID,
)
annual_usage = page.find_all('div', {'class': 'govuk-grid-column-one-third'})
sms_column = normalize_spaces(annual_usage[1].text + annual_usage[4].text)
assert 'Text messages' in sms_column
assert '250,000 free allowance' in sms_column
assert '249,000 free allowance remaining' in sms_column
assert '£0.00 spent' in sms_column
assert 'pence per message' not in sms_column
@freeze_time("2012-03-31 12:12:12")
def test_usage_page_monthly_breakdown(
client_request,
service_one,
mock_get_annual_usage_for_service,
mock_get_monthly_usage_for_service,
mock_get_free_sms_fragment_limit
):
page = client_request.get('main.usage', service_id=SERVICE_ONE_ID)
monthly_breakdown = normalize_spaces(page.find('table').text)
assert 'April' in monthly_breakdown
assert '249,860 free text messages' in monthly_breakdown
assert 'February' in monthly_breakdown
assert '£29.55' in monthly_breakdown
assert '140 free text messages' in monthly_breakdown
assert '960 text messages at 1.65p' in monthly_breakdown
assert '33 text messages at 1.70p' in monthly_breakdown
assert '5 first class letters at 33p' in monthly_breakdown
assert '10 second class letters at 31p' in monthly_breakdown
assert '3 international letters at 55p' in monthly_breakdown
assert '7 international letters at 84p' in monthly_breakdown
assert 'March' in monthly_breakdown
assert '£20.91' in monthly_breakdown
assert '1,230 text messages at 1.70p' in monthly_breakdown
@pytest.mark.parametrize(
'now, expected_number_of_months', [
(freeze_time("2017-03-31 11:09:00.061258"), 12),
(freeze_time("2017-01-01 11:09:00.061258"), 10)
]
)
def test_usage_page_monthly_breakdown_shows_months_so_far(
client_request,
service_one,
mock_get_annual_usage_for_service,
mock_get_monthly_usage_for_service,
mock_get_free_sms_fragment_limit,
now,
expected_number_of_months
):
with now:
page = client_request.get('main.usage', service_id=SERVICE_ONE_ID)
rows = page.find('table').find_all('tr', class_='table-row')
assert len(rows) == expected_number_of_months
@freeze_time("2012-03-31 12:12:12")
def test_usage_page_letter_breakdown_ordered_by_postage_and_rate(
client_request,
service_one,
mock_get_monthly_usage_for_service,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit
):
page = client_request.get('main.usage', service_id=SERVICE_ONE_ID)
row_for_feb = page.find('table').find_all('tr', class_='table-row')[10]
postage_details = row_for_feb.find_all('li', class_='tabular-numbers')
assert normalize_spaces(postage_details[3].text) == '5 first class letters at 33p'
assert normalize_spaces(postage_details[4].text) == '10 second class letters at 31p'
assert normalize_spaces(postage_details[5].text) == '3 international letters at 55p'
assert normalize_spaces(postage_details[6].text) == '7 international letters at 84p'
def test_usage_page_with_0_free_allowance(
mocker,
client_request,
mock_get_annual_usage_for_service,
mock_get_monthly_usage_for_service,
):
mocker.patch(
'app.billing_api_client.get_free_sms_fragment_limit_for_year',
return_value=0,
)
page = client_request.get(
'main.usage',
service_id=SERVICE_ONE_ID,
year=2020,
)
annual_usage = page.select('main .govuk-grid-column-one-third')
sms_column = normalize_spaces(annual_usage[1].text)
assert '0 free allowance' in sms_column
assert 'free allowance remaining' not in sms_column
def test_usage_page_with_year_argument(
client_request,
mock_get_annual_usage_for_service,
mock_get_monthly_usage_for_service,
mock_get_free_sms_fragment_limit,
):
client_request.get(
'main.usage',
service_id=SERVICE_ONE_ID,
year=2000,
)
mock_get_monthly_usage_for_service.assert_called_once_with(SERVICE_ONE_ID, 2000)
mock_get_annual_usage_for_service.assert_called_once_with(SERVICE_ONE_ID, 2000)
mock_get_free_sms_fragment_limit.assert_called_with(SERVICE_ONE_ID, 2000)
def test_usage_page_for_invalid_year(
client_request,
):
client_request.get(
'main.usage',
service_id=SERVICE_ONE_ID,
year='abcd',
_expected_status=404,
)
@freeze_time("2012-03-31 12:12:12")
def test_future_usage_page(
client_request,
mock_get_annual_usage_for_service_in_future,
mock_get_monthly_usage_for_service_in_future,
mock_get_free_sms_fragment_limit
):
client_request.get(
'main.usage',
service_id=SERVICE_ONE_ID,
year=2014,
)
mock_get_monthly_usage_for_service_in_future.assert_called_once_with(SERVICE_ONE_ID, 2014)
mock_get_annual_usage_for_service_in_future.assert_called_once_with(SERVICE_ONE_ID, 2014)
mock_get_free_sms_fragment_limit.assert_called_with(SERVICE_ONE_ID, 2014)
def _test_dashboard_menu(client_request, mocker, usr, service, permissions):
usr['permissions'][str(service['id'])] = permissions
usr['services'] = [service['id']]
mocker.patch('app.user_api_client.check_verify_code', return_value=(True, ''))
mocker.patch('app.service_api_client.get_services', return_value={'data': [service]})
mocker.patch('app.user_api_client.get_user', return_value=usr)
mocker.patch('app.user_api_client.get_user_by_email', return_value=usr)
mocker.patch('app.service_api_client.get_service', return_value={'data': service})
client_request.login(usr)
return client_request.get('main.service_dashboard', service_id=service['id'])
def test_menu_send_messages(
client_request,
mocker,
notify_admin,
api_user_active,
service_one,
mock_get_service_templates,
mock_has_no_jobs,
mock_get_template_statistics,
mock_get_service_statistics,
mock_get_annual_usage_for_service,
mock_get_inbound_sms_summary,
mock_get_free_sms_fragment_limit,
mock_get_returned_letter_statistics_with_no_returned_letters,
):
service_one['permissions'] = ['email', 'sms', 'letter', 'upload_letters']
page = _test_dashboard_menu(
client_request,
mocker,
api_user_active,
service_one,
['view_activity', 'send_texts', 'send_emails', 'send_letters']
)
page = str(page)
assert url_for(
'main.choose_template',
service_id=service_one['id'],
) in page
assert url_for('main.uploads', service_id=service_one['id']) in page
assert url_for('main.manage_users', service_id=service_one['id']) in page
assert url_for('main.service_settings', service_id=service_one['id']) not in page
assert url_for('main.api_keys', service_id=service_one['id']) not in page
assert url_for('main.view_providers') not in page
def test_menu_send_messages_when_service_does_not_have_upload_letters_permission(
client_request,
mocker,
api_user_active,
service_one,
mock_get_service_templates,
mock_has_no_jobs,
mock_get_template_statistics,
mock_get_service_statistics,
mock_get_annual_usage_for_service,
mock_get_inbound_sms_summary,
mock_get_free_sms_fragment_limit,
mock_get_returned_letter_statistics_with_no_returned_letters,
):
page = _test_dashboard_menu(
client_request,
mocker,
api_user_active,
service_one,
['view_activity', 'send_texts', 'send_emails', 'send_letters'])
assert page.select_one('.navigation')
assert url_for('main.uploads', service_id=service_one['id']) not in page.select_one('.navigation')
def test_menu_manage_service(
client_request,
mocker,
api_user_active,
service_one,
mock_get_service_templates,
mock_has_no_jobs,
mock_get_template_statistics,
mock_get_service_statistics,
mock_get_annual_usage_for_service,
mock_get_inbound_sms_summary,
mock_get_returned_letter_statistics_with_no_returned_letters,
mock_get_free_sms_fragment_limit,
):
page = _test_dashboard_menu(
client_request,
mocker,
api_user_active,
service_one,
['view_activity', 'manage_templates', 'manage_users', 'manage_settings'])
page = str(page)
assert url_for(
'main.choose_template',
service_id=service_one['id'],
) in page
assert url_for('main.manage_users', service_id=service_one['id']) in page
assert url_for('main.service_settings', service_id=service_one['id']) in page
assert url_for('main.api_keys', service_id=service_one['id']) not in page
def test_menu_manage_api_keys(
client_request,
mocker,
api_user_active,
service_one,
mock_get_service_templates,
mock_has_no_jobs,
mock_get_template_statistics,
mock_get_service_statistics,
mock_get_annual_usage_for_service,
mock_get_inbound_sms_summary,
mock_get_returned_letter_statistics_with_no_returned_letters,
mock_get_free_sms_fragment_limit,
):
page = _test_dashboard_menu(
client_request,
mocker,
api_user_active,
service_one,
['view_activity', 'manage_api_keys'])
page = str(page)
assert url_for('main.choose_template', service_id=service_one['id'],) in page
assert url_for('main.manage_users', service_id=service_one['id']) in page
assert url_for('main.service_settings', service_id=service_one['id']) in page
assert url_for('main.api_integration', service_id=service_one['id']) in page
def test_menu_all_services_for_platform_admin_user(
client_request,
mocker,
platform_admin_user,
service_one,
mock_get_service_templates,
mock_has_no_jobs,
mock_get_template_statistics,
mock_get_service_statistics,
mock_get_annual_usage_for_service,
mock_get_inbound_sms_summary,
mock_get_returned_letter_statistics_with_no_returned_letters,
mock_get_free_sms_fragment_limit,
):
page = _test_dashboard_menu(
client_request,
mocker,
platform_admin_user,
service_one,
[])
page = str(page)
assert url_for('main.choose_template', service_id=service_one['id']) in page
assert url_for('main.manage_users', service_id=service_one['id']) in page
assert url_for('main.service_settings', service_id=service_one['id']) in page
assert url_for('main.view_notifications', service_id=service_one['id'], message_type='email') in page
assert url_for('main.view_notifications', service_id=service_one['id'], message_type='sms') in page
assert url_for('main.api_keys', service_id=service_one['id']) not in page
def test_route_for_service_permissions(
mocker,
notify_admin,
api_user_active,
service_one,
mock_get_service,
mock_get_user,
mock_get_service_templates,
mock_has_no_jobs,
mock_get_template_statistics,
mock_get_service_statistics,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_inbound_sms_summary,
mock_get_returned_letter_statistics_with_no_returned_letters,
):
with notify_admin.test_request_context():
validate_route_permission(
mocker,
notify_admin,
"GET",
200,
url_for('main.service_dashboard', service_id=service_one['id']),
['view_activity'],
api_user_active,
service_one)
def test_aggregate_template_stats():
expected = aggregate_template_usage(copy.deepcopy(stub_template_stats))
assert len(expected) == 4
assert expected[0]['template_name'] == 'four'
assert expected[0]['count'] == 400
assert expected[0]['template_id'] == 'id-4'
assert expected[0]['template_type'] == 'letter'
assert expected[1]['template_name'] == 'three'
assert expected[1]['count'] == 300
assert expected[1]['template_id'] == 'id-3'
assert expected[1]['template_type'] == 'letter'
assert expected[2]['template_name'] == 'two'
assert expected[2]['count'] == 200
assert expected[2]['template_id'] == 'id-2'
assert expected[2]['template_type'] == 'email'
assert expected[3]['template_name'] == 'one'
assert expected[3]['count'] == 100
assert expected[3]['template_id'] == 'id-1'
assert expected[3]['template_type'] == 'sms'
def test_aggregate_notifications_stats():
expected = aggregate_notifications_stats(copy.deepcopy(stub_template_stats))
assert expected == {
"sms": {"requested": 100, "delivered": 50, "failed": 0},
"letter": {"requested": 700, "delivered": 700, "failed": 0},
"email": {"requested": 200, "delivered": 0, "failed": 100}
}
def test_service_dashboard_updates_gets_dashboard_totals(
mocker,
client_request,
mock_get_service_templates,
mock_get_template_statistics,
mock_get_service_statistics,
mock_has_no_jobs,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_inbound_sms_summary,
mock_get_returned_letter_statistics_with_no_returned_letters,
):
mocker.patch('app.main.views.dashboard.get_dashboard_totals', return_value={
'email': {'requested': 123, 'delivered': 0, 'failed': 0},
'sms': {'requested': 456, 'delivered': 0, 'failed': 0}
})
page = client_request.get(
'main.service_dashboard',
service_id=SERVICE_ONE_ID,
)
numbers = [number.text.strip() for number in page.find_all('span', class_='big-number-number')]
assert '123' in numbers
assert '456' in numbers
def test_get_dashboard_totals_adds_percentages():
stats = {
'sms': {
'requested': 3,
'delivered': 0,
'failed': 2
},
'email': {
'requested': 0,
'delivered': 0,
'failed': 0
}
}
assert get_dashboard_totals(stats)['sms']['failed_percentage'] == '66.7'
assert get_dashboard_totals(stats)['email']['failed_percentage'] == '0'
@pytest.mark.parametrize(
'failures,expected', [
(2, False),
(3, False),
(4, True)
]
)
def test_get_dashboard_totals_adds_warning(failures, expected):
stats = {
'sms': {
'requested': 100,
'delivered': 0,
'failed': failures
}
}
assert get_dashboard_totals(stats)['sms']['show_warning'] == expected
def test_format_monthly_stats_empty_case():
assert format_monthly_stats_to_list({}) == []
def test_format_monthly_stats_labels_month():
resp = format_monthly_stats_to_list({'2016-07': {}})
assert resp[0]['name'] == 'July'
def test_format_monthly_stats_has_stats_with_failure_rate():
resp = format_monthly_stats_to_list({
'2016-07': {'sms': _stats(3, 1, 2)}
})
assert resp[0]['sms_counts'] == {
'failed': 2,
'failed_percentage': '66.7',
'requested': 3,
'show_warning': True,
}
def test_format_monthly_stats_works_for_email_letter():
resp = format_monthly_stats_to_list({
'2016-07': {
'sms': {},
'email': {},
'letter': {},
}
})
assert isinstance(resp[0]['sms_counts'], dict)
assert isinstance(resp[0]['email_counts'], dict)
assert isinstance(resp[0]['letter_counts'], dict)
def _stats(requested, delivered, failed):
return {'requested': requested, 'delivered': delivered, 'failed': failed}
@pytest.mark.parametrize('dict_in, expected_failed, expected_requested', [
(
{},
0,
0
),
(
{'temporary-failure': 1, 'permanent-failure': 1, 'technical-failure': 1},
3,
3,
),
(
{'created': 1, 'pending': 1, 'sending': 1, 'delivered': 1},
0,
4,
),
])
def test_aggregate_status_types(dict_in, expected_failed, expected_requested):
sms_counts = aggregate_status_types({'sms': dict_in})['sms_counts']
assert sms_counts['failed'] == expected_failed
assert sms_counts['requested'] == expected_requested
def test_get_tuples_of_financial_years():
assert list(get_tuples_of_financial_years(
lambda year: 'http://example.com?year={}'.format(year),
start=2040,
end=2041,
)) == [
('financial year', 2041, 'http://example.com?year=2041', '2041 to 2042'),
('financial year', 2040, 'http://example.com?year=2040', '2040 to 2041'),
]
def test_get_tuples_of_financial_years_defaults_to_2015():
assert 2015 in list(get_tuples_of_financial_years(
lambda year: 'http://example.com?year={}'.format(year),
end=2040,
))[-1]
def test_org_breadcrumbs_do_not_show_if_service_has_no_org(
client_request,
mock_get_template_statistics,
mock_get_service_templates_when_no_templates_exist,
mock_has_no_jobs,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_returned_letter_statistics_with_no_returned_letters,
):
page = client_request.get('main.service_dashboard', service_id=SERVICE_ONE_ID)
assert not page.select('.navigation-organisation-link')
def test_org_breadcrumbs_do_not_show_if_user_is_not_an_org_member(
mocker,
mock_get_service_templates_when_no_templates_exist,
mock_has_no_jobs,
active_caseworking_user,
client_request,
mock_get_template_folders,
mock_get_returned_letter_statistics_with_no_returned_letters,
mock_get_api_keys,
):
# active_caseworking_user is not an org member
service_one_json = service_json(SERVICE_ONE_ID,
users=[active_caseworking_user['id']],
restricted=False,
organisation_id=ORGANISATION_ID)
mocker.patch('app.service_api_client.get_service', return_value={'data': service_one_json})
client_request.login(active_caseworking_user, service=service_one_json)
page = client_request.get('main.service_dashboard', service_id=SERVICE_ONE_ID, _follow_redirects=True)
assert not page.select('.navigation-organisation-link')
def test_org_breadcrumbs_show_if_user_is_a_member_of_the_services_org(
mocker,
mock_get_template_statistics,
mock_get_service_templates_when_no_templates_exist,
mock_has_no_jobs,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_returned_letter_statistics_with_no_returned_letters,
active_user_with_permissions,
client_request,
):
# active_user_with_permissions (used by the client_request) is an org member
service_one_json = service_json(SERVICE_ONE_ID,
users=[active_user_with_permissions['id']],
restricted=False,
organisation_id=ORGANISATION_ID)
mocker.patch('app.service_api_client.get_service', return_value={'data': service_one_json})
mocker.patch('app.organisations_client.get_organisation', return_value=organisation_json(
id_=ORGANISATION_ID,
))
page = client_request.get('main.service_dashboard', service_id=SERVICE_ONE_ID)
assert page.select_one('.navigation-organisation-link')['href'] == url_for(
'main.organisation_dashboard',
org_id=ORGANISATION_ID,
)
def test_org_breadcrumbs_do_not_show_if_user_is_a_member_of_the_services_org_but_service_is_in_trial_mode(
mocker,
mock_get_template_statistics,
mock_get_service_templates_when_no_templates_exist,
mock_has_no_jobs,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_returned_letter_statistics_with_no_returned_letters,
active_user_with_permissions,
client_request,
):
# active_user_with_permissions (used by the client_request) is an org member
service_one_json = service_json(SERVICE_ONE_ID,
users=[active_user_with_permissions['id']],
organisation_id=ORGANISATION_ID)
mocker.patch('app.service_api_client.get_service', return_value={'data': service_one_json})
mocker.patch('app.models.service.Organisation')
page = client_request.get('main.service_dashboard', service_id=SERVICE_ONE_ID)
assert not page.select('.navigation-breadcrumb')
def test_org_breadcrumbs_show_if_user_is_platform_admin(
mocker,
mock_get_template_statistics,
mock_get_service_templates_when_no_templates_exist,
mock_has_no_jobs,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_returned_letter_statistics_with_no_returned_letters,
platform_admin_user,
client_request,
):
service_one_json = service_json(SERVICE_ONE_ID,
users=[platform_admin_user['id']],
organisation_id=ORGANISATION_ID)
mocker.patch('app.service_api_client.get_service', return_value={'data': service_one_json})
mocker.patch('app.organisations_client.get_organisation', return_value=organisation_json(
id_=ORGANISATION_ID,
))
client_request.login(platform_admin_user, service_one_json)
page = client_request.get('main.service_dashboard', service_id=SERVICE_ONE_ID)
assert page.select_one('.navigation-organisation-link')['href'] == url_for(
'main.organisation_dashboard',
org_id=ORGANISATION_ID,
)
def test_breadcrumb_shows_if_service_is_suspended(
mocker,
mock_get_template_statistics,
mock_get_service_templates_when_no_templates_exist,
mock_has_no_jobs,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_returned_letter_statistics_with_no_returned_letters,
active_user_with_permissions,
client_request,
):
service_one_json = service_json(
SERVICE_ONE_ID,
active=False,
users=[active_user_with_permissions['id']],
)
mocker.patch('app.service_api_client.get_service', return_value={'data': service_one_json})
page = client_request.get('main.service_dashboard', service_id=SERVICE_ONE_ID)
assert 'Suspended' in page.select_one('.navigation-service-name').text
@pytest.mark.parametrize('permissions', (
['email', 'sms'],
['email', 'sms', 'letter'],
))
def test_service_dashboard_shows_usage(
client_request,
service_one,
mock_get_service_templates,
mock_get_template_statistics,
mock_has_no_jobs,
mock_get_annual_usage_for_service,
mock_get_free_sms_fragment_limit,
mock_get_returned_letter_statistics_with_no_returned_letters,
permissions,
):
service_one['permissions'] = permissions
page = client_request.get('main.service_dashboard', service_id=SERVICE_ONE_ID)
assert normalize_spaces(
page.select_one('[data-key=usage]').text
) == (
'Unlimited '
'free email allowance '
'£29.85 '
'spent on text messages '
'£30.00 '
'spent on letters'
)
def test_service_dashboard_shows_free_allowance(
mocker,
client_request,
service_one,
mock_get_service_templates,
mock_get_template_statistics,
mock_has_no_jobs,
mock_get_free_sms_fragment_limit,
mock_get_returned_letter_statistics_with_no_returned_letters,
):
mocker.patch('app.billing_api_client.get_annual_usage_for_service', return_value=[
{
"notification_type": "sms",
"chargeable_units": 1000,
"charged_units": 0,
"rate": 0.0165,
"cost": 0
}
])
page = client_request.get('main.service_dashboard', service_id=SERVICE_ONE_ID)
usage_text = normalize_spaces(page.select_one('[data-key=usage]').text)
assert 'spent on text messages' not in usage_text
assert '249,000 free text messages left' in usage_text
|
# -*- coding: utf-8 -*-
import os
import re
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
INSTALL_REQUIRES = [
'six>=1.9.0',
'enum34>=1.0.4',
'invoke>=0.10.1',
'requests>=2.6.2',
'decorator>=3.4.2',
'inflection>=0.3.0',
'schematics>=1.0.4,<2.0.0',
'python-dateutil>=2.4.2',
]
TEST_REQUIRES = [
'pytest',
'responses',
]
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--verbose']
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
def find_version(fname):
"""Attempts to find the version number in the file names fname.
Raises RuntimeError if not found.
"""
version = ''
with open(fname, 'r') as fp:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fp:
m = reg.match(line)
if m:
version = m.group(1)
break
if not version:
raise RuntimeError('Cannot find version information')
return version
def read(fname):
with open(fname) as fp:
content = fp.read()
return content
setup(
name='betfair.py',
version=find_version('betfair/__init__.py'),
description='Python client for the Betfair API '
'(https://api.developer.betfair.com/)',
long_description=open('README.rst').read(),
author='Joshua Carp',
author_email='jm.carp@gmail.com',
url='https://github.com/jmcarp/betfair.py',
packages=find_packages(exclude=('test*', )),
package_dir={'betfair': 'betfair'},
include_package_data=True,
install_requires=INSTALL_REQUIRES,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
],
test_suite='tests',
tests_require=TEST_REQUIRES,
cmdclass={'test': PyTest}
)
|
import os
import pkg_resources
DEFAULT_ENDPOINTS_PATH = "endpoints.yml"
DEFAULT_CREDENTIALS_PATH = "credentials.yml"
DEFAULT_CONFIG_PATH = "config.yml"
DEFAULT_DOMAIN_PATH = "domain.yml"
DEFAULT_ACTIONS_PATH = "actions"
DEFAULT_MODELS_PATH = "models"
DEFAULT_DATA_PATH = "data"
DEFAULT_RESULTS_PATH = "results"
DEFAULT_REQUEST_TIMEOUT = 60 * 5 # 5 minutes
DOCS_BASE_URL = "https://rasa.com/docs/rasa"
LEGACY_DOCS_BASE_URL = "https://legacy-docs.rasa.com"
FALLBACK_CONFIG_PATH = pkg_resources.resource_filename(
__name__, "cli/default_config.yml"
)
CONFIG_MANDATORY_KEYS_CORE = ["policies"]
CONFIG_MANDATORY_KEYS_NLU = ["language", "pipeline"]
CONFIG_MANDATORY_KEYS = CONFIG_MANDATORY_KEYS_CORE + CONFIG_MANDATORY_KEYS_NLU
MINIMUM_COMPATIBLE_VERSION = "1.0.0rc1"
GLOBAL_USER_CONFIG_PATH = os.path.expanduser("~/.config/rasa/global.yml")
DEFAULT_LOG_LEVEL = "INFO"
DEFAULT_LOG_LEVEL_RASA_X = "WARNING"
DEFAULT_LOG_LEVEL_LIBRARIES = "ERROR"
ENV_LOG_LEVEL = "LOG_LEVEL"
ENV_LOG_LEVEL_LIBRARIES = "LOG_LEVEL_LIBRARIES"
|
import inspect
import pprint
class ParametrizedObject(object):
"""
Get the object configuration from the __init__ method.
The same as is done in the sklearn package.
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the recommender"""
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default, kwonlyargs, kwonlydefaults, annotations = inspect.getfullargspec(init)
if varargs is not None:
raise RuntimeError(
"No varargs allowed in __init__ of model class!"
"Please correct: %{cls}".format(cls=cls)
)
args.pop(0) # remove self
args.sort()
return args
def get_params(self):
"""Get parameters for this model."""
return {
k: v.get_config() if isinstance(v, ParametrizedObject) else v
for k, v in [(key, getattr(self, key, None)) for key in self._get_param_names()]
}
def get_config(self):
"""
Returns dictionary representation for model configuration
:return: dict
"""
conf = dict(name=self.__class__.__name__)
conf.update(self.get_params())
return conf
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, pprint.pformat(self.get_params())[1:-1])
|
#!/usr/bin/env python
# $Id: rst2newlatex.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX using
the new LaTeX writer.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. This writer is EXPERIMENTAL and should not be used '
'in a production environment. ' + default_description)
publish_cmdline(writer_name='newlatex2e', description=description)
|
from random import random
import logging
from telegram.ext import JobQueue, Job, run_async
from typing import *
import re
import maya
from telegram import InlineKeyboardButton, InlineKeyboardMarkup, ParseMode, Message, Bot
import captions
import settings
import util
from custom_botlistbot import BotListBot
from dialog import messages
from settings import SELF_CHANNEL_USERNAME
from logzero import logger as log
def slang_datetime(dt) -> str:
maya_date = maya.MayaDT(dt.timestamp())
return maya_date.slang_time()
def find_bots_in_text(text: str, first=False):
matches = re.findall(settings.REGEX_BOT_ONLY, text)
if not matches:
return None
try:
return matches[0] if first else matches
except:
return None
def format_name(entity):
res = entity.first_name or ""
if entity.first_name and entity.last_name:
res += " " + entity.last_name
elif entity.last_name:
res = entity.last_name
return res
def validate_username(username: str):
if len(username) < 3:
return False
if username[0] != "@":
username = "@" + username
match = re.match(settings.REGEX_BOT_ONLY, username)
return username if match else False
def get_commands():
commands = ""
try:
with open("files/commands.txt", "rb") as file:
for command in file.readlines():
commands += "/" + command.decode("utf-8")
return commands
except FileNotFoundError:
log.error("File could not be opened.")
def get_channel():
from models import Channel
try:
return Channel.get(Channel.username == SELF_CHANNEL_USERNAME)
except Channel.DoesNotExist:
return False
def botlist_url_for_category(category):
return "http://t.me/{}/{}".format(
get_channel().username, category.current_message_id
)
def format_keyword(kw):
kw = kw[1:] if kw[0] == "#" else kw
kw = kw.replace(" ", "_")
kw = kw.replace("-", "_")
kw = kw.replace("'", "_")
kw = kw.lower()
return kw
def reroute_private_chat(
bot, update, quote, action, message, redirect_message=None, reply_markup=None
):
cid = update.effective_chat.id
mid = util.mid_from_update(update)
if redirect_message is None:
redirect_message = messages.REROUTE_PRIVATE_CHAT
if util.is_group_message(update):
update.message.reply_text(
redirect_message,
quote=quote,
parse_mode=ParseMode.MARKDOWN,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
captions.SWITCH_PRIVATE,
url="https://t.me/{}?start={}".format(
settings.SELF_BOT_NAME, action
),
),
InlineKeyboardButton(
"🔎 Switch to inline", switch_inline_query=action
),
]
]
),
)
else:
if mid:
bot.formatter.send_or_edit(cid, message, mid, reply_markup=reply_markup)
else:
update.message.reply_text(
message,
quote=quote,
parse_mode=ParseMode.MARKDOWN,
reply_markup=reply_markup,
)
def make_sticker(filename, out_file, max_height=512, transparent=True):
return # TODO: fix
from PIL import Image
image = Image.open(filename)
# resize sticker to match new max height
# optimize image dimensions for stickers
if max_height == 512:
resize_ratio = min(512 / image.width, 512 / image.height)
image = image.resize(
(int(image.width * resize_ratio), int(image.height * resize_ratio))
)
else:
image.thumbnail((512, max_height), Image.ANTIALIAS)
if transparent:
canvas = Image.new("RGBA", (512, image.height))
else:
canvas = Image.new("RGB", (512, image.height), color="white")
pos = (0, 0)
try:
canvas.paste(image, pos, mask=image)
except ValueError:
canvas.paste(image, pos)
canvas.save(out_file)
return out_file
@run_async
def try_delete_after(
job_queue: JobQueue,
messages: Union[List[Union[Message, int]], Union[Message, int]],
delay: Union[float, int],
):
if isinstance(messages, (Message, int)):
_messages = [messages]
else:
_messages = messages
@run_async
def delete_messages(*args, **kwargs):
# noinspection PyTypeChecker
bot: BotListBot = job_queue.bot
for m in _messages:
bot.delete_message(m.chat_id, m.message_id, timeout=10, safe=True)
job_queue.run_once(delete_messages, delay, name="try_delete_after")
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Utils for transpiler."""
import os
from .passmanager import PassManager, FlowController
from .propertyset import PropertySet
from .exceptions import TranspilerError, TranspilerAccessError
from .fencedobjs import FencedDAGCircuit, FencedPropertySet
from .basepasses import AnalysisPass, TransformationPass
from .coupling import CouplingMap
from .layout import Layout
from .transpile_circuit import transpile_circuit
|
from nltk.sentiment.util import mark_negation
from nltk.util import trigrams
import re
import validators
from .happy_tokenizer import Tokenizer
class SentimentTokenizer(object):
def __init__(self):
self.tknzr = Tokenizer()
@staticmethod
def reduce_lengthening(text):
"""
Replace repeated character sequences of length 3 or greater with sequences
of length 3.
"""
pattern = re.compile(r"(.)\1{2,}")
return pattern.sub(r"\1\1\1", text)
@staticmethod
def replace_username(token):
return '@__user__' if token.startswith('@') else token
@staticmethod
def replace_link(token):
return '__url__' if validators.url(token) else token
def __call__(self, t):
t = self.reduce_lengthening(t)
tokens = t.split(' ')
cleaned_tokens = []
for token in tokens:
token = self.replace_username(token)
token = self.replace_link(token)
cleaned_tokens.append(token)
rebuild_str = ' '.join(cleaned_tokens)
negated_tokens = mark_negation(list(self.tknzr.tokenize(rebuild_str)))
list_of_trigrams = list([' '.join(s) for s in trigrams(negated_tokens)])
return list_of_trigrams
|
import smbus2 as smbus
class BH1750:
def __init__(self,bus=0,address=0x23):
self.bus = smbus.SMBus(bus)
self.address = address
def get_data(self,type="lux"):
source = self.__read()
temp = source[0]
source[0] = source[1]
source[1] = temp
lux = (int.from_bytes(source, byteorder='little')/1.2)
return {"lux":lux}
def __read(self):
## By default, set HIGH RESOLUTION MODE 1
data = self.bus.read_i2c_block_data(self.address,0x20,2)
return data
|
#!/usr/bin/env python
import itertools, multiprocessing, os, random, sys, time
from optparse import OptionParser
import numpy as np
# Save people from having to set PYTHONPATH
import os
sys.path.insert(0, os.path.dirname(__file__))
from pygr import util, dnaseq, divsufsort, powrs
# The following functions allow us to parallelize the search with multiprocessing:
def set_globals(*args):
# This is ugly, but prevents us pickling this (static) data over and over again for multiprocessing
global data
data, = args
def calc_seed_kmers((motif, ii, nseeds)):
kmer = "".join(motif)
retval = powrs.Motif(kmer, False, data)
if ii % 100 == 0 and nseeds > 0:
pct = float(ii) / nseeds
bar = "=" * int(50*pct)
space = " " * (50 - len(bar))
print "[%s%s] %4.1f%%" % (bar, space, 100*pct)
#print "[%s%s] %4.1f%% %s %.2f" % (bar, space, 100*pct, kmer, retval.score) # DEBUG
return retval
def calc_improved_motif(best_m):
# Returns (new_motif, is_finished)
alt_ms = [powrs.Motif(k, best_m.both_strands, data, parent=best_m) for k in best_m.other_nbrs]
alt_ms.append(powrs.Motif(best_m.center, not best_m.both_strands, data, parent=best_m))
alt_ms.sort(reverse=True)
if alt_ms[0].score <= best_m.score:
return (best_m, True) # can't improve this further
else:
return (alt_ms[0], False) # we made an improvement
#@util.profile("wedmi.prof")
def main(argv):
'''usage: %prog [options] IN_GROUP.fa[.gz] OUT_GROUP.fa[.gz]
POWRS (POsition-sensitive WoRd Set) motif identification algorithm
[Formerly known as WEDMI (word edit-distance motif identifier).]
It simultaneously finds motifs and regions that distinguish
in-group sequences from out-group sequences.
Motifs are modeled as a central k-mer of fixed length,
and some or all of the k-mers one mutation away from it,
on one or both strands.
Input FASTA files should look like this:
>ATxGxxxx 3
ACTGACTG...
The "identifier" field should be used for a gene name,
and the "name" field should indicate how many copies of that gene are in the file.
Multiple copies generally occur when there are multiple gene models (transcripts)
for a single gene, and all of them get included rather than one picked at random.
Output is to stdout and includes 8 fields:
1. Score of the motif, -log_10(p-value)
2. Number of in-group genes matching the motif in the specified window
3. Seed word (k-mer)
4. Reverse complement of the seed, if motif occurs on both strands, otherwise dashes
5. Window start (bp from left edge, modified by -5 and -l)
6. Window end (bp from left edge, modified by -5 and -l)
7. Full motif pattern (alternate bases in lower case)
8. powrs.Motif rank
All selected seeds are optimized together, one cycle at a time.
Output is shown for all seeds during these intermediate cycles,
with cycles separated by a double line (=====================).
For the final output, any motif whose seed is part of a higher-ranking motif
is not printed; thus, some ranks will be "skipped" in last cycle of output.
'''
parser = OptionParser(usage=main.__doc__, version="%prog 1.0")
#parser.add_option("-l", "--long-name",
# action="store|store_true|store_false|store_const|append|count|callback",
# dest="var_name",
# type="int|string|float|...",
# default=True,
# metavar="FILE",
# help="munge FILE (default %default)"
# )
parser.add_option("-k", "--seed-size", type=int, default=8,
help="Initial seed k-mer size (default %default)")
parser.add_option("-g", "--midgap", type=int, default=0,
help="Search with (%default) bp gap in the middle of k-mer")
parser.add_option("-w", "--window-width", type=int, default=25,
help="Granularity for optimizing motif region (default %default bp)")
parser.add_option("-b", "--bins", type=int, default=1,
help="Number of bins to use in correcting sequence composition bias (%default)")
parser.add_option("-p", "--permute-evidence", default=False, action="store_true",
help="Randomly permute the group assignments, for estimating the null distribution of scores.")
parser.add_option("-A", "--min-genes", type=int, default=50,
help="Minimum number of sequences that a valid motif will match (default %default)")
parser.add_option("-B", "--max-genes-frac", type=float, default=0.20,
help="Maximum fraction of sequences that a valid motif will match (default %default)")
parser.add_option("-5", "--align-5p", action="store_true", default=False,
help="Align on 5' edge of sequences instead of 3' edge")
parser.add_option('-l', '--length', type=int, default=0,
help='Maximum promoter length -- only used for formatting output (default %default)')
parser.add_option('-i', '--improve-limit', type=int, default=1000,
help='After trying to improve all motifs, dicard all but N of them before starting a new cycle (default %default)')
parser.add_option('-I', '--improve-score-limit', type=float, default=0.1,
help="Don't bother trying to improve motifs that score below this (default %default)")
parser.add_option('-c', '--cluster-limit', type=int, default=200,
help='Only try to cluster the top N motifs (default %default)')
parser.add_option('-C', '--cluster-score-limit', type=float, default=6,
help="Don't bother trying to cluster motifs that score below this (default %default)")
parser.add_option('-P', '--parallel', action='store_true', default=False,
help='Use all available processors in parallel to speed computation')
parser.add_option('-v', '--verbose', action='store_true', default=False)
parser.add_option('--save', help='For debugging only.')
parser.add_option('--load', help='For debugging only.')
(options, args) = parser.parse_args(argv)
if len(args) == 2:
ingrp_file = util.gzopen(args[0], 'rb')
outgrp_file = util.gzopen(args[1], 'rb')
else:
parser.print_help()
print "Too many/few arguments!"
return 1
print "Loading sequences ..."
T = time.time()
fasta = list(dnaseq.read_fasta(ingrp_file))
out_fasta = list(dnaseq.read_fasta(outgrp_file))
fasta += out_fasta
evidence = np.zeros(len(fasta))
evidence[:-len(out_fasta)] = 1.
assert len(evidence) == len(fasta), "%i != %i" % (len(evidence), len(fasta))
if options.permute_evidence:
fasta_ids = [i for i,n,s in fasta]
if set(fasta_ids[:-len(out_fasta)]) & set(fasta_ids[-len(out_fasta):]):
print "In-group and out-group sequence IDs overlap -- reverting to simple shuffle!"
# In the worst case of complete overlap, all evidence gets set to 0 using the "smart" algo!
np.random.shuffle(evidence)
# TODO: this could screw up the weights, if they're not all equal to start with...
else:
print "Shuffling evidence by sequence ID ..."
# Randomly shuffle the evidence based on gene IDs, but so that all gene models
# for the same gene retain the same evidence. Thus weights are unaffected.
ev_map = dict((i,e) for (i,n,s), e in zip(fasta, evidence))
keys = ev_map.keys()
random.shuffle(keys)
ev_map = dict(zip(keys, ev_map.values()))
for ii, (ident, name, seq) in enumerate(fasta):
evidence[ii] = float(ev_map[ident])
# Need weights because some genes are represented by multiple gene models.
evidence_weights = np.array([1./int(n) for i,n,s in fasta])
# Evidence is pre-multiplied by the weights to save computations:
evidence *= evidence_weights
# Pre-calculate constants used in the cERMIT score function:
G = evidence_weights.sum() # total "number" of genes
mu = evidence.sum() / G # average evidence for all genes
print "G = %f mu = %f" % (G, mu)
# G and mu "should" be vectors or matrices to account for the fact that some genes
# are not full length. However, the number of genes within any window is not constant
# across the length of the window, particularly if it's long.
# So for now, I'm just going to ignore this problem. Results are still reasonable.
# This will be input to numpy.searchsorted() --
# add one to each length to account for the newlines in the file.
# Using searchsorted() is expensive, but padding all sequences to
# the same length with N's can make suffix array creation VERY expensive.
# Whether we're searching both strands or just one, we only write one sequence to the suffix array.
# Writing the sequence and its reverse complement makes indexing complicated,
# so instead we take the reverse complement of the search motif, which is simpler here.
seq_lens = np.array([len(s)+1 for i,n,s in fasta])
seq_offsets = seq_lens.cumsum() - 1 # the 0-based index within the file at which each sequence ends (just past last base)
# As an alternative to binary search, without padding all genes to the same length --
# maintain a lookup table that maps positions in the file to sequence numbers.
# This table will require one int per byte in the file, or about 4x as large as the sequence data.
# To reduce the size, pad all sequences so their total length (plus trailing newline)
# is evenly divisible by e.g. 32. Then divide indexes from the suffix array search
# by 32 before doing the lookup, and thus the lookup table can be 32x smaller,
# while ensuring that no sequence is padded with more than 31 "N" bases.
# However, I don't think that searchsorted() is a major bottleneck anymore, and so
# this scheme hasn't been implemented yet because the performance gains would be small.
# Cluster sequences ala Linhart et al for binned enrichment.
# This can (partially) correct for differences in base composition between in-group and out-group.
bins = powrs.SeqBins(fasta, evidence, evidence_weights, n_bins=options.bins)
# Sequences now held in memory instead of being written to a tmp file:
out_seq = []
for ident, name, seq in fasta:
out_seq.append(seq)
out_seq.append("\n")
out_seq = "".join(out_seq)
print time.time() - T, "seconds"
print "Building suffix array ..."
T = time.time()
suf = divsufsort.DivSufSort(out_seq)
print time.time() - T, "seconds"
# "global" data needed for calculating scores
data = dict(
bins=bins,
evidence=evidence,
ev_wts=evidence_weights,
G=G, mu=mu,
suf=suf,
seq_lens=seq_lens, seq_offsets=seq_offsets,
options=options)
# Set up for multiprocessing
if options.parallel:
os.nice(10) # reduce our priority, in case the user forgot to run us with "nice"
pool = multiprocessing.Pool(None, set_globals, [data])
map_func = lambda f,i: pool.imap_unordered(f,i)
else:
set_globals(data)
map_func = itertools.imap
if options.load:
all_motifs = util.gzunpickle(options.load)
else:
# Seed our search with small k-mers
print "Searching for all k-mers ..."
T = time.time()
seeds_iter = list(enumerate(itertools.product("ACGT", repeat=options.seed_size)))
if options.verbose: nseeds = 4.**options.seed_size
else: nseeds = 0 # don't print progress
motif_iter = map_func(calc_seed_kmers, [(motif, ii, nseeds) for ii, motif in seeds_iter])
# filtering by score on the fly reduces memory consumption when --allow-N and --seed-size are large
all_motifs = [motif for motif in motif_iter if motif.score >= options.improve_score_limit]
if len(all_motifs) < options.improve_limit:
print "*** Only %i motifs are candidates for improvement ***" % len(all_motifs)
print "*** Lower --improve-score-limit or --improve-limit ***"
# Refine the best seeds until they can't be further improved
print "Refining best motifs ..."
def print_best(prune=False, bar=True):
used_kmers = set()
for ii, best_m in enumerate(all_motifs[:options.improve_limit]):
if prune and best_m.center in used_kmers: continue
print "%s #%i" % (best_m, ii+1)
used_kmers.update(best_m.all_kmers)
if bar: print "="*80
finished_motifs = set()
while True:
all_motifs.sort(reverse=True)
if options.verbose: print_best()
improved_motifs = list(map_func(calc_improved_motif, set(all_motifs[:options.improve_limit]) - finished_motifs))
new_motifs = list(finished_motifs) # we won't adjust them, but they take up slots
keep_going = False
for best_m, is_finished in improved_motifs:
if is_finished: finished_motifs.add(best_m)
else: keep_going = True
new_motifs.append(best_m)
all_motifs = new_motifs
if not keep_going: break
all_motifs.sort(reverse=True)
# Unpruned (final) output can be helpful, even if we're not verbose
if not options.verbose: print_best()
# Final print-out, eliminating close relatives
print_best(prune=True)
if options.save:
util.gzpickle(all_motifs, options.save)
# end save/load block
# begin clustering
# Opportunistic clustering -- highest-scoring clusters get first crack at improving themselves.
all_clust = [powrs.Cluster(m, data) for m in all_motifs if m.score >= options.cluster_score_limit]
all_clust = all_clust[:options.cluster_limit]
merge_memo = {} # {(clust1, clust2) : new_clust}
print "Trying to merge %i motifs ..." % len(all_clust)
while True:
for clust1, clust2 in itertools.combinations(all_clust, 2):
lost_motifs = []
if (clust1, clust2) in merge_memo:
new_clust = merge_memo[clust1, clust2]
elif (clust2, clust1) in merge_memo:
assert False, "I don't think this can ever happen"
new_clust = merge_memo[clust2, clust1]
else:
new_clust = merge_memo[clust1, clust2] = clust1.try_merge(clust2)
# Single stranded motifs are dropped when we take the revcomp,
# so they will be "lost" to the clustering process at this stage,
# unless we capture them and return them to the pool.
if new_clust is None and clust2.revcomp is not None:
new_clust = merge_memo[clust1, clust2] = clust1.try_merge(clust2.revcomp)
lost_motifs = clust2.revcomp.lost_motifs
if new_clust is None and clust1.revcomp is not None:
new_clust = merge_memo[clust1, clust2] = clust1.revcomp.try_merge(clust2)
lost_motifs = clust1.revcomp.lost_motifs
if new_clust is None: continue
all_clust.remove(clust1)
all_clust.remove(clust2)
all_clust.append(new_clust)
# If we dropped single stranded motifs from the pool due to a revcomp,
# return them to the pool now.
for m in lost_motifs:
all_clust.append(powrs.Cluster(m, data))
all_clust.sort(reverse=True)
if options.verbose: print "Merged %s and %s" % (clust1, clust2)
break
else:
break # nothing was merged, quit trying
used_seeds = set()
used_kmers = set()
cluster_num = 1
for clust in all_clust:
if len(clust) == 1:
motif = clust.motifs[0] # the only one
# Suppress singleton rev. comps. of motifs that were clustered:
if motif.center in used_seeds: continue
# These are mostly uninteresting -- could have been merged, but failed to improve the score.
# (Or had a non-overlapping range, but that's pretty unlikely.)
if motif.center in used_kmers: continue
print "-"*80
print motif
else:
max_off = max(clust.offsets)
spacer = " " * (2*(options.seed_size + max_off) + 3)
fname = "cluster_%03i_%i_%i.seq" % (cluster_num, clust.start_user, clust.end_user)
print "-"*80
print "%8.2f [%6.1f] %s %6i %6i (%i motifs in %s)" % (
clust.score, clust.evidence, spacer, clust.start_user, clust.end_user, len(clust), fname)
for motif, offset in zip(clust.motifs, clust.offsets):
print motif.as_str(offset, max_off)
used_seeds.add(motif.center)
if motif.both_strands: used_seeds.add(dnaseq.reverse_complement(motif.center))
f = open(fname, "wb")
for seq in powrs.extract_positive_seqs(clust, evidence, suf, seq_lens, seq_offsets, options.align_5p, options.midgap):
f.write(seq)
f.write("\n")
f.close()
cluster_num += 1
used_kmers.update(clust.all_kmers)
print
print "Try: for f in cluster_*.seq; do seqlogo.sh $f ${f/.seq/.pdf}; done"
if np.isneginf(all_motifs[0].score):
print
print "*** Try adjusting -A and -B to eliminate -inf scores ***"
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import argparse
import os
import timeit
import collections
from pyspark import SparkContext
from pyspark.sql import functions as fn
from pyspark.sql.functions import lit, col, udf, collect_list, concat_ws, first, create_map, monotonically_increasing_id, row_number
from pyspark.sql.window import Window
from pyspark.sql.types import IntegerType, ArrayType, StringType, LongType, BooleanType
from pyspark.sql import HiveContext
from datetime import datetime, timedelta
from util import write_to_table, write_to_table_with_partition, print_batching_info, resolve_placeholder, load_config, load_batch_config, load_df
from itertools import chain
MAX_USER_IN_BUCKET = 10**9
def date_to_timestamp(dt):
epoch = datetime.utcfromtimestamp(0)
return int((dt - epoch).total_seconds())
def generate_trainready(hive_context, batch_config,
interval_time_in_seconds,
logs_table_name, trainready_table, aid_bucket_num):
def remove_no_show_records(df):
w = Window.partitionBy('aid', 'interval_starting_time', 'keyword_index')
df = df.withColumn('_show_counts', fn.sum(fn.when(col('is_click') == 0, 1).otherwise(0)).over(w))
df = df.filter(fn.udf(lambda x: x > 0, BooleanType())(df._show_counts))
return df
def group_batched_logs(df_logs):
# group logs from did + interval_time + keyword.
# group 1: group by did + interval_starting_time + keyword
df = df_logs.groupBy('aid', 'interval_starting_time', 'keyword_index').agg(
first('keyword').alias('keyword'),
first('age').alias('age'),
first('gender_index').alias('gender_index'),
first('aid_bucket').alias('aid_bucket'),
fn.sum(col('is_click')).alias('kw_clicks_count'),
fn.sum(fn.when(col('is_click') == 0, 1).otherwise(0)).alias('kw_shows_count'),
)
# df = df.orderBy('keyword_index')
df = df.withColumn('kwi_clicks_count', concat_ws(":", col('keyword_index'), col('kw_clicks_count')))
df = df.withColumn('kwi_shows_count', concat_ws(":", col('keyword_index'), col('kw_shows_count')))
df = df.withColumn('kw_clicks_count', concat_ws(":", col('keyword'), col('kw_clicks_count')))
df = df.withColumn('kw_shows_count', concat_ws(":", col('keyword'), col('kw_shows_count')))
# group 2: group by did + interval_starting_time
df = df.groupBy('aid', 'interval_starting_time').agg(
concat_ws(",", collect_list('keyword_index')).alias('kwi'),
concat_ws(",", collect_list('kwi_clicks_count')).alias('kwi_click_counts'),
concat_ws(",", collect_list('kwi_shows_count')).alias('kwi_show_counts'),
concat_ws(",", collect_list('keyword')).alias('interval_keywords'),
concat_ws(",", collect_list('kw_clicks_count')).alias('kw_click_counts'),
concat_ws(",", collect_list('kw_shows_count')).alias('kw_show_counts'),
first('age').alias('age'),
first('gender_index').alias('gender_index'),
first('aid_bucket').alias('aid_bucket')
)
return df
def sort_kwi_counts(unsorted_x):
unsorted_x = "{" + unsorted_x + "}"
d = eval(unsorted_x)
f = collections.OrderedDict(sorted(d.items()))
k = [str(i) + ':' + str(j) for i, j in f.iteritems()]
sorted_x = ','.join(k)
return sorted_x
def sort_kwi(unsorted_kwi):
l = [int(el) for el in unsorted_kwi.split(",")]
l.sort()
l = [str(item) for item in l]
sorted_kwi=','.join(l)
return sorted_kwi
def collect_trainready(df_trainready_batched_temp):
# group 3: group by did with the temp batched did-interval rows.
df = df_trainready_batched_temp
features = ['interval_starting_time', 'interval_keywords', 'kwi', 'kwi_click_counts', 'kwi_show_counts']
agg_attr_list = list(chain(*[(lit(attr), col(attr)) for attr in df.columns if attr in features]))
df = df.withColumn('attr_map', create_map(agg_attr_list))
df = df.groupBy('aid').agg(
collect_list('attr_map').alias('attr_map_list'),
first('age').alias('age'),
first('gender_index').alias('gender_index'),
first('aid_bucket').alias('aid_bucket')
)
return df
def build_feature_array(df):
'''
df['attr_map_list']=
[{u'kwi': u'14', u'interval_starting_time': u'1576713600', u'kwi_show_counts': u'14:2', u'kwi_click_counts': u'14:0', u'interval_keywords': u'info'},
{u'kwi': u'14,29', u'interval_starting_time': u'1576886400', u'kwi_show_counts': u'14:2,29:4', u'kwi_click_counts': u'14:0,29:0', u'interval_keywords': u'info,video'},
{u'kwi': u'14', u'interval_starting_time': u'1576800000', u'kwi_show_counts': u'14:4', u'kwi_click_counts': u'14:0', u'interval_keywords': u'info'}],
'''
def udf_function(attr_map_list):
tmp_list = []
for _dict in attr_map_list:
tmp_list.append((_dict['interval_starting_time'], _dict))
tmp_list.sort(reverse=True, key=lambda x: x[0])
interval_starting_time = []
interval_keywords = []
kwi = []
kwi_show_counts = []
kwi_click_counts = []
for time, _dict in tmp_list:
interval_starting_time.append(str(time))
interval_keywords.append(_dict['interval_keywords'])
kwi.append(_dict['kwi'])
kwi_show_counts.append(_dict['kwi_show_counts'])
kwi_click_counts.append(_dict['kwi_click_counts'])
return [interval_starting_time, interval_keywords, kwi, kwi_show_counts, kwi_click_counts]
df = df.withColumn('metrics_list', udf(udf_function, ArrayType(ArrayType(StringType())))(col('attr_map_list')))
return df
trainready_table_temp = trainready_table + '_temp'
timer_start = timeit.default_timer()
'''
1. Find the intervals per user did.
2. Agg on time and kewords so that we have one record be user for each interval.
e.g.
interval = day
unique users per day = 100m
number of records per interval = 100m
'''
start_date, end_date, load_minutes = batch_config
starting_time = datetime.strptime(start_date, "%Y-%m-%d")
ending_time = datetime.strptime(end_date, "%Y-%m-%d")
all_intervals = set()
st = date_to_timestamp(starting_time)
et = date_to_timestamp(ending_time)
x = st
while x < et:
interval_point = x - x % interval_time_in_seconds
all_intervals.add(interval_point)
x += interval_time_in_seconds
all_intervals = list(all_intervals)
all_intervals.sort()
batched_round = 1
for aid_bucket in range(aid_bucket_num):
for interval_point in all_intervals:
'''
We need the days since we have days partitions.
'''
day_lower = datetime.fromtimestamp(interval_point).strftime("%Y-%m-%d")
day_upper = datetime.fromtimestamp(interval_point+interval_time_in_seconds).strftime("%Y-%m-%d")
command = """SELECT *
FROM {}
WHERE
day >= '{}' AND day <= '{}' AND
interval_starting_time = '{}' AND
aid_bucket= '{}' """
df_logs = hive_context.sql(command.format(logs_table_name, day_lower, day_upper, interval_point, aid_bucket))
df_logs = remove_no_show_records(df_logs)
df_trainready = group_batched_logs(df_logs)
df_trainready = df_trainready.withColumn('kwi_click_counts', udf(sort_kwi_counts, StringType())(df_trainready.kwi_click_counts))
df_trainready = df_trainready.withColumn('kwi_show_counts', udf(sort_kwi_counts, StringType())(df_trainready.kwi_show_counts))
df_trainready = df_trainready.withColumn('kwi', udf(sort_kwi, StringType())(df_trainready.kwi))
mode = 'overwrite' if batched_round == 1 else 'append'
write_to_table_with_partition(df_trainready, trainready_table_temp, partition=('aid_bucket'), mode=mode)
batched_round += 1
'''
Now we need to agg for one user over all days to create the whole record.
e.g.
For
100 days
100M unique users per day
10 User buckets
We need cluster that can fit 1000M=1G records.
If not possible we need to increase user bucket number.
'''
trainready_table_temp
shift = 0
batched_round = 1
for aid_bucket in range(aid_bucket_num):
command = """SELECT *
FROM {}
WHERE
aid_bucket= '{}' """
df = hive_context.sql(command.format(trainready_table_temp, aid_bucket))
df = collect_trainready(df)
df = build_feature_array(df)
'''
at this point df is like below
[Row(age=6, gender=0, aid=u'773e03d2bc89d49c0c9c60270ee650e555abdf32cf5305c9fe27f081e1e64d91', metrics_list=[[u'1576800000'], [u'25'], [u'25:1'], [u'25:0']], aid_bucket=u'0')]
'''
for i, feature_name in enumerate(['interval_starting_time', 'interval_keywords', 'kwi', 'kwi_show_counts', 'kwi_click_counts']):
df = df.withColumn(feature_name, col('metrics_list').getItem(i))
# Filtering the users with less than 10 days activity
df = df.filter(udf(lambda x: len(x) > 10, BooleanType())(df.interval_starting_time))
# Add did_index
w = Window.orderBy("aid_bucket", "aid")
df = df.withColumn('row_number', row_number().over(w))
df = df.withColumn('aid_index', udf(lambda x: shift + x, LongType())(col('row_number')))
# df = df.withColumn('aid_index', udf(lambda x: aid_bucket * (MAX_USER_IN_BUCKET) + x, LongType())(col('row_number')))
df = df.select('age', 'gender_index', 'aid', 'aid_index', 'interval_starting_time', 'interval_keywords',
'kwi', 'kwi_show_counts', 'kwi_click_counts', 'aid_bucket')
mode = 'overwrite' if batched_round == 1 else 'append'
write_to_table_with_partition(df, trainready_table, partition=('aid_bucket'), mode=mode)
batched_round += 1
shift += df.count()
return
def run(hive_context, cfg):
cfg_logs = cfg['pipeline']['main_logs']
cfg_clean = cfg['pipeline']['main_clean']
logs_table_name = cfg_logs['logs_output_table_name']
interval_time_in_seconds = cfg_logs['interval_time_in_seconds']
cfg_train = cfg['pipeline']['main_trainready']
trainready_table = cfg_train['trainready_output_table']
aid_bucket_num = cfg_clean['did_bucket_num']
batch_config = load_batch_config(cfg)
generate_trainready(hive_context, batch_config, interval_time_in_seconds, logs_table_name, trainready_table, aid_bucket_num)
if __name__ == "__main__":
"""
This program performs the followings:
adds normalized data by adding index of features
groups data into time_intervals and dids (labeled by did)
"""
sc, hive_context, cfg = load_config(description="pre-processing train ready data")
hive_context.setConf("hive.exec.dynamic.partition", "true")
hive_context.setConf("hive.exec.dynamic.partition.mode", "nonstrict")
resolve_placeholder(cfg)
run(hive_context=hive_context, cfg=cfg)
sc.stop()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from concurrent.futures import ThreadPoolExecutor
from typing import Dict, List, Optional, Any, Final
import boto3
from fbpcp.decorator.error_handler import error_handler
from fbpcp.decorator.metrics import request_counter, duration_time, error_counter
from fbpcp.entity.cluster_instance import Cluster
from fbpcp.entity.container_definition import ContainerDefinition
from fbpcp.entity.container_instance import ContainerInstance
from fbpcp.error.pcp import PcpError
from fbpcp.gateway.aws import AWSGateway
from fbpcp.mapper.aws import (
map_ecstask_to_containerinstance,
map_esccluster_to_clusterinstance,
map_ecstaskdefinition_to_containerdefinition,
)
from fbpcp.metrics.emitter import MetricsEmitter
from fbpcp.metrics.getter import MetricsGetter
METRICS_RUN_TASK_COUNT = "aws.ecs.run_task.count"
METRICS_RUN_TASK_ERROR_COUNT = "aws.ecs.run_task.error.count"
METRICS_RUN_TASK_DURATION = "aws.ecs.run_task.duration"
class ECSGateway(AWSGateway, MetricsGetter):
def __init__(
self,
region: str,
access_key_id: Optional[str] = None,
access_key_data: Optional[str] = None,
config: Optional[Dict[str, Any]] = None,
metrics: Optional[MetricsEmitter] = None,
) -> None:
super().__init__(region, access_key_id, access_key_data, config)
# pyre-ignore
self.client = self.create_ecs_client()
self.metrics: Final[Optional[MetricsEmitter]] = metrics
def has_metrics(self) -> bool:
return self.metrics is not None
def get_metrics(self) -> MetricsEmitter:
if not self.metrics:
raise PcpError("ECSGateway doesn't have metrics emitter")
return self.metrics
# TODO: Create an interface to create a client per environment
def create_ecs_client(
self,
) -> boto3.client: # pyre-fixme boto3.client is not recognized
return boto3.client("ecs", region_name=self.region, **self.config)
@error_counter(METRICS_RUN_TASK_ERROR_COUNT)
@request_counter(METRICS_RUN_TASK_COUNT)
@duration_time(METRICS_RUN_TASK_DURATION)
@error_handler
def run_task(
self,
task_definition: str,
container: str,
cmd: str,
cluster: str,
subnets: List[str],
env_vars: Optional[Dict[str, str]] = None,
) -> ContainerInstance:
environment = []
if env_vars:
environment = [
{"name": env_name, "value": env_value}
for env_name, env_value in env_vars.items()
]
response = self.client.run_task(
taskDefinition=task_definition,
cluster=cluster,
networkConfiguration={
"awsvpcConfiguration": {
"subnets": subnets,
"assignPublicIp": "ENABLED",
}
},
overrides={
"containerOverrides": [
{
"name": container,
"command": [cmd],
"environment": environment,
}
]
},
)
if not response["tasks"]:
# common failures: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/api_failures_messages.html
failure = response["failures"][0]
self.logger.error(f"ECSGateway failed to create a task. Failure: {failure}")
raise PcpError(f"ECS failure: reason: {failure['reason']}")
return map_ecstask_to_containerinstance(response["tasks"][0])
@error_handler
def describe_tasks(
self, cluster: str, tasks: List[str]
) -> List[Optional[ContainerInstance]]:
response = self.client.describe_tasks(
cluster=cluster, tasks=tasks
) # not necessarily in order of `tasks`
arn_to_instance: Dict[str, Optional[ContainerInstance]] = {}
for resp_task_dict in response["tasks"]:
arn_to_instance[
resp_task_dict["taskArn"]
] = map_ecstask_to_containerinstance(resp_task_dict)
for failure in response["failures"]:
self.logger.error(
f"ECSGateway failed to describe a task {failure['arn']}, reason: {failure['reason']}"
)
return [arn_to_instance.get(arn, None) for arn in tasks]
@error_handler
def describe_task(self, cluster: str, task: str) -> Optional[ContainerInstance]:
return self.describe_tasks(cluster, [task])[0]
@error_handler
def list_tasks(self, cluster: str) -> List[str]:
return self.client.list_tasks(cluster=cluster)["taskArns"]
@error_handler
def stop_task(self, cluster: str, task_id: str) -> None:
self.client.stop_task(
cluster=cluster,
task=task_id,
)
@error_handler
def describe_clusters(
self,
clusters: Optional[List[str]] = None,
tags: Optional[Dict[str, str]] = None,
) -> List[Cluster]:
if not clusters:
clusters = self.list_clusters()
response = self.client.describe_clusters(clusters=clusters, include=["TAGS"])
cluster_instances = [
map_esccluster_to_clusterinstance(cluster)
for cluster in response["clusters"]
]
if tags:
return list(
filter(
lambda cluster_instance: tags.items()
<= cluster_instance.tags.items(),
cluster_instances,
)
)
return cluster_instances
@error_handler
def describe_cluster(self, cluster: str) -> Cluster:
return self.describe_clusters(clusters=[cluster])[0]
@error_handler
def list_clusters(self) -> List[str]:
return self.client.list_clusters()["clusterArns"]
@error_handler
def describe_task_definition(self, task_defination: str) -> ContainerDefinition:
return self._describe_task_definition_core(self.client, task_defination)
def _describe_task_definition_core(
self,
client: boto3.client,
task_defination: str,
) -> ContainerDefinition:
response = client.describe_task_definition(
taskDefinition=task_defination, include=["TAGS"]
)
return map_ecstaskdefinition_to_containerdefinition(
response["taskDefinition"], response["tags"]
)
@error_handler
def list_task_definitions(self) -> List[str]:
return self.client.list_task_definitions()["taskDefinitionArns"]
@error_handler
def describe_task_definitions(
self,
task_definitions: Optional[List[str]] = None,
tags: Optional[Dict[str, str]] = None,
) -> List[ContainerDefinition]:
if not task_definitions:
task_definitions = self.list_task_definitions()
container_definitions = []
for arn in task_definitions:
container_definition = self.describe_task_definition(arn)
if tags is None or tags.items() <= container_definition.tags.items():
container_definitions.append(container_definition)
return container_definitions
@error_handler
def describe_task_definitions_in_parallel(
self,
task_definitions: Optional[List[str]] = None,
tags: Optional[Dict[str, str]] = None,
max_workers: int = 8,
) -> List[ContainerDefinition]:
if not task_definitions:
task_definitions = self.list_task_definitions()
container_definitions = []
with ThreadPoolExecutor(max_workers=max_workers) as executor:
input_arguments = [
(
self.create_ecs_client(),
definition,
)
for definition in task_definitions
]
results = executor.map(
lambda args: self._describe_task_definition_core(*args),
input_arguments,
)
for container_definition in results:
if tags is None or tags.items() <= container_definition.tags.items():
container_definitions.append(container_definition)
return container_definitions
|
import pysmurf
#S = pysmurf.SmurfControl(make_logfile=False,setup=False,epics_root='test_epics',cfg_file='/usr/local/controls/Applications/smurf/pysmurf/pysmurf/cfg_files/experiment_fp28_smurfsrv04.cfg')
import numpy as np
import time
Vrange=np.linspace(0,0.195/6.,100)+S.get_tes_bias_bipolar(3)
Vrange=[Vrange,Vrange[::-1]]
Vrange=np.array(Vrange).flatten()
while True:
for Vtes in Vrange:
S.set_tes_bias_bipolar(7,Vtes)
time.sleep(0.005)
|
from setuptools import setup, find_packages
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest>=3', ]
setup(
name='botlander',
author="Lucas Eliaquim",
author_email='lucas_m-santos@hotmail.com',
version='1.0',
description= 'Your package short description.',
include_package_data=True,
url='https://gitlab.com/LEMSantos/botlander',
zip_safe=False,
packages=find_packages(include=['botlander', 'botlander.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
install_requires=[
'flask',
],
)
|
import bball
def main():
team1 = bball.Team('lakers')
team2 = bball.Team('celtics')
team1.detailed_players_info()
team2.detailed_players_info()
game = bball.Game(team1, team2)
game.start()
game.save_stats()
if __name__ == '__main__':
main()
#> Think Code
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""saved_model_estimator python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import saved_model_estimator
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
saved_model_estimator.__all__ = [
s for s in dir(saved_model_estimator) if not s.startswith('__')
]
from tensorflow_estimator.contrib.estimator.python.estimator.saved_model_estimator import *
|
# *_*coding:utf-8 *_*
import os
import json
import warnings
import numpy as np
from torch.utils.data import Dataset
warnings.filterwarnings('ignore')
def pc_normalize(pc):
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))
pc = pc / m
return pc
class PartNormalDataset(Dataset):
def __init__(self,root = './data/shapenetcore_partanno_segmentation_benchmark_v0_normal', npoints=2500, split='train', class_choice=None, normal_channel=False):
self.npoints = npoints
self.root = root
self.catfile = os.path.join(self.root, 'synsetoffset2category.txt')
self.cat = {}
self.normal_channel = normal_channel
with open(self.catfile, 'r') as f:
for line in f:
ls = line.strip().split()
self.cat[ls[0]] = ls[1]
self.cat = {k: v for k, v in self.cat.items()}
self.classes_original = dict(zip(self.cat, range(len(self.cat))))
if not class_choice is None:
self.cat = {k:v for k,v in self.cat.items() if k in class_choice}
# print(self.cat)
self.meta = {}
with open(os.path.join(self.root, 'train_test_split', 'shuffled_train_file_list.json'), 'r') as f:
train_ids = set([str(d.split('/')[2]) for d in json.load(f)])
with open(os.path.join(self.root, 'train_test_split', 'shuffled_val_file_list.json'), 'r') as f:
val_ids = set([str(d.split('/')[2]) for d in json.load(f)])
with open(os.path.join(self.root, 'train_test_split', 'shuffled_test_file_list.json'), 'r') as f:
test_ids = set([str(d.split('/')[2]) for d in json.load(f)])
for item in self.cat:
# print('category', item)
self.meta[item] = []
dir_point = os.path.join(self.root, self.cat[item])
fns = sorted(os.listdir(dir_point))
# print(fns[0][0:-4])
if split == 'trainval':
fns = [fn for fn in fns if ((fn[0:-4] in train_ids) or (fn[0:-4] in val_ids))]
elif split == 'train':
fns = [fn for fn in fns if fn[0:-4] in train_ids]
elif split == 'val':
fns = [fn for fn in fns if fn[0:-4] in val_ids]
elif split == 'test':
fns = [fn for fn in fns if fn[0:-4] in test_ids]
else:
print('Unknown split: %s. Exiting..' % (split))
exit(-1)
# print(os.path.basename(fns))
for fn in fns:
token = (os.path.splitext(os.path.basename(fn))[0])
self.meta[item].append(os.path.join(dir_point, token + '.txt'))
self.datapath = []
for item in self.cat:
for fn in self.meta[item]:
self.datapath.append((item, fn))
self.classes = {}
for i in self.cat.keys():
self.classes[i] = self.classes_original[i]
# Mapping from category ('Chair') to a list of int [10,11,12,13] as segmentation labels
self.seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43],
'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46],
'Mug': [36, 37], 'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Lamp': [24, 25, 26, 27],
'Table': [47, 48, 49], 'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40],
'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}
# for cat in sorted(self.seg_classes.keys()):
# print(cat, self.seg_classes[cat])
self.cache = {} # from index to (point_set, cls, seg) tuple
self.cache_size = 20000
def __getitem__(self, index):
if index in self.cache:
point_set, cls, seg = self.cache[index]
else:
fn = self.datapath[index]
cat = self.datapath[index][0]
cls = self.classes[cat]
cls = np.array([cls]).astype(np.int32)
data = np.loadtxt(fn[1]).astype(np.float32)
if not self.normal_channel:
point_set = data[:, 0:3]
else:
point_set = data[:, 0:6]
seg = data[:, -1].astype(np.int32)
if len(self.cache) < self.cache_size:
self.cache[index] = (point_set, cls, seg)
point_set[:, 0:3] = pc_normalize(point_set[:, 0:3])
choice = np.random.choice(len(seg), self.npoints, replace=True)
# resample
point_set = point_set[choice, :]
seg = seg[choice]
return point_set, cls, seg
def __len__(self):
return len(self.datapath)
|
__author__ = 'palmer'
|
import json
import mmap
from tqdm import tqdm
import string
from .text_dataset import TextDataset, TextDatasetCache
from typing import Tuple, List
import os
import tarfile
class CFQ(TextDataset):
URL = "https://storage.cloud.google.com/cfq_dataset/cfq1.1.tar.gz"
def tokenize_punctuation(self, text):
# From https://github.com/google-research/google-research/blob/master/cfq/preprocess.py
text = map(lambda c: ' %s ' % c if c in string.punctuation else c, text)
return ' '.join(''.join(text).split())
def preprocess_sparql(self, query):
# From https://github.com/google-research/google-research/blob/master/cfq/preprocess.py
"""Do various preprocessing on the SPARQL query."""
# Tokenize braces.
query = query.replace('count(*)', 'count ( * )')
tokens = []
for token in query.split():
# Replace 'ns:' prefixes.
if token.startswith('ns:'):
token = token[3:]
# Replace mid prefixes.
if token.startswith('m.'):
token = 'm_' + token[2:]
tokens.append(token)
return ' '.join(tokens).replace('\\n', ' ')
def load_data(self, fname: str) -> Tuple[List[str], List[str]]:
# Split the JSON manually, otherwise it requires infinite RAM and is very slow.
pin = "complexityMeasures".encode()
offset = 1
cnt = 0
inputs = []
outputs = []
with open(fname, "r") as f:
data = mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)
pbar = tqdm(total=len(data))
pbar.update(offset)
while True:
pos = data.find(pin, offset+6)
if pos < 0:
this = data[offset: len(data)-2]
else:
this = data[offset: pos-5]
new_offset = pos - 4
pbar.update(new_offset - offset)
offset = new_offset
d = json.loads(this.decode())
inputs.append(self.tokenize_punctuation(d["questionPatternModEntities"]))
outputs.append(self.preprocess_sparql(d["sparqlPatternModEntities"]))
cnt += 1
if pos < 0:
break
return inputs, outputs
def build_cache(self) -> TextDatasetCache:
index_table = {}
if not os.path.isdir(os.path.join(self.cache_dir, "cfq")):
gzfile = os.path.join(self.cache_dir, os.path.basename(self.URL))
if not os.path.isfile(gzfile):
assert False, f"Please download {self.URL} and place it in the {os.path.abspath(self.cache_dir)} "\
"folder. Google login needed."
with tarfile.open(gzfile, "r") as tf:
tf.extractall(path=self.cache_dir)
splitdir = os.path.join(self.cache_dir, "cfq", "splits")
for f in os.listdir(splitdir):
if not f.endswith(".json"):
continue
name = f[:-5].replace("_split", "")
with open(os.path.join(splitdir, f), "r") as f:
ind = json.loads(f.read())
index_table[name] = {
"train": ind["trainIdxs"],
"val": ind["devIdxs"],
"test": ind["testIdxs"]
}
in_sentences, out_sentences = self.load_data(os.path.join(self.cache_dir, "cfq/dataset.json"))
assert len(in_sentences) == len(out_sentences)
return TextDatasetCache().build(index_table, in_sentences, out_sentences, split_punctuation=False)
|
# -*- coding: utf-8 -*-
#
# Copyright IBM Corp. - Confidential Information
#
# Util classes for Splunk
#
import splunklib.client as splunk_client
import splunklib.results as splunk_results
import time
import requests
import urllib
from xml.dom import minidom
import json
import logging
LOG = logging.getLogger(__name__)
# Constants
SPLUNK_SECTION="splunk_integration"
class SearchFailure(Exception):
""" Search failed to execute """
def __init__(self, search_id, search_status):
fail_msg = "Query [{}] failed with status [{}]".format(search_id, search_status)
super(SearchFailure, self).__init__(fail_msg)
self.search_status = search_status
class SearchTimeout(Exception):
""" Query failed to complete in time specified """
def __init__(self, search_id, search_status):
fail_msg = "Query [{}] timed out. Final Status was [{}]".format(search_id, search_status)
super(SearchTimeout, self).__init__(fail_msg)
self.search_status = search_status
class SearchJobFailure(Exception):
""" Search job creation failure"""
def __init__(self, query):
fail_msg = "Failed to create search job for query [{}] ".format(query)
super(SearchJobFailure, self).__init__(fail_msg)
class RequestError(Exception):
""" Request error"""
def __init__(self, url, message):
fail_msg = "Request to url [{}] throws exception. Error [{}]".format(url, message)
super(RequestError, self).__init__(fail_msg)
class DeleteError(Exception):
""" Request error"""
def __init__(self, url, message):
fail_msg = "Delete request to url [{}] throws exception. Error [{}]".format(url, message)
super(DeleteError, self).__init__(fail_msg)
class SplunkClient(object):
""" Wrapper of splunklib.client"""
# member variables
splunk_service = None
time_out = 600
polling_interval = 5
max_return = 0
def __init__(self, host, port, username, password, verify=True):
"""Init splunk_service"""
self.splunk_service = self.connect(host, port, username, password, verify)
@staticmethod
def connect(host, port, username, password, verify):
"""
Connect to Splunk
:param host: hostname for splunk
:param port: port for splunk
:param username: user name to login
:param password: password to login
:param verify: True to validate the SSL cert
:return:
"""
LOG.info("Splunk SDK verify flag is {}".format(verify))
return splunk_client.connect(host=host,
port=port,
username=username,
password=password,
verify=verify)
def set_timeout(self, timeout):
self.time_out = timeout
def set_polling_interval(self, pollinginterval):
self.polling_interval = pollinginterval
def set_max_return(self, max):
self.max_return = max
def start_search(self, query, job_ttl=None):
"""Start a search for a query"""
query_args = {"search_mode": "normal",
"enable_lookups": True}
if self.max_return:
query_args["max_count"] = self.max_return
job = None
try:
job = self.splunk_service.jobs.create(query, **query_args)
if job_ttl:
job.set_ttl(job_ttl)
except Exception as e:
LOG.exception("Search job creation failed")
#
# If we failed to create a search job, it does not make sense to go further
#
raise SearchJobFailure(query)
return job
def execute_query(self, query):
"""
Execute splunk query
:param query: query string
:return:
"""
result = dict()
LOG.debug("Query: {}" .format(query))
splunk_job = self.start_search(query)
# Poll Splunk for result
start_time = time.time()
done = False
while not done:
if not splunk_job.is_ready():
pass
else:
splunk_job.refresh()
done = splunk_job["dispatchState"] in ("FAILED", "DONE")
stats = {"name": splunk_job.name,
"isDone": splunk_job.isDone,
"scanCount": int(splunk_job["scanCount"]),
"eventCount": int(splunk_job["eventCount"]),
"doneProgress": float(splunk_job["doneProgress"]) * 100,
"resultCount": int(splunk_job["resultCount"])}
status = ("\r%(doneProgress)03.1f%% %(scanCount)d scanned "
"%(eventCount)d matched %(resultCount)d results") % stats
LOG.debug(status)
if not done:
if self.time_out!= 0:
if time.time() - start_time > self.time_out:
#
# old sdk
#splunk_client.cancel_search(splunk_job)
#
splunk_job.cancel()
raise SearchTimeout(splunk_job.name, splunk_job["dispatchState"])
time.sleep(self.polling_interval)
if splunk_job["dispatchState"] != "DONE" or splunk_job["isFailed"] == True:
raise SearchFailure(splunk_job.name, splunk_job["dispatchState"] + u", " + unicode(splunk_job["messages"]))
reader = splunk_results.ResultsReader(splunk_job.results())
result = {"events": [row for row in reader]}
return result
class SplunkUtils(object):
""" Use python requests to call Splunk REST API"""
# Member variables
session_key = ""
base_url = ""
SUPPORTED_THREAT_TYPE = ["ip_intel", "file_intel", "user_intel", "http_intel",
"email_intel", "service_intel", "process_intel",
"registry_intel", "certificate_intel"]
def __init__(self, host, port, username, password, verify):
self.base_url = "https://{}:{}".format(host, port)
self.get_session_key(username, password, verify)
def get_session_key(self, username, password, verify):
"""
Get session_key from Splunk server
:param username: user name for splunk login
:param password: password for splunk login
:param verify: verify HTTPS cert or not
:return:
"""
headers = dict()
headers["Accept"] = "application/html"
url = self.base_url + "/services/auth/login"
try:
resp = requests.post(url,
headers=headers,
data=urllib.urlencode({"username": username,
"password": password}),
verify=verify)
#
# This one we only allows 200. Otherwise login failed
#
if resp.status_code == 200:
# docs.splunk.com/Documentation/Splunk/7.0.2/RESTTUT/RESTsearches
self.session_key = minidom.parseString(resp.content).getElementsByTagName("sessionKey")[0].childNodes[
0].nodeValue
else:
error_msg = "Splunk login failed for user {} with status {}".format(username, resp.status_code)
raise RequestError(url, error_msg)
except Exception as e:
raise e
return
def update_notable(self, event_id, comment, status, cafile):
"""
Update notable event
:param event_id: event_id for notable event to be updated
:param comment: comment to add to the notable event
:param status: status of the notable event to change to
:param cafile: Verify HTTPS cert or not
:return:
"""
headers = dict()
headers["Authorization"] = "Splunk {}".format(self.session_key)
args = dict()
args["comment"] = comment
args["status"] = status
args["ruleUIDs"] = [event_id]
ret = None
url = self.base_url + "/services/notable_update"
try:
resp = requests.post(url,
headers=headers,
data=args,
verify=cafile)
#
# We shall just return the response in json and let the post process
# to make decision.
#
ret = {"status_code": resp.status_code,
"content": resp.json()}
except requests.ConnectionError as e:
raise RequestError(url, "Connection error. " + str(e))
except requests.HTTPError as e:
raise RequestError(url, "An HTTP error. " + str(e))
except requests.URLRequired as e:
raise RequestError(url, "An valid URL is required.")
except requests.TooManyRedirects as e:
raise RequestError(url, "Too many redirects")
except requests.RequestException as e:
raise RequestError(url, "Ambiguous exception when handling request. " + str(e))
return ret
def delete_threat_intel_item(self, threat_type, item_key, cafile):
"""
Delete an item from the threat_intel collections.
:param threat_type: ip_intel, file_intel, user_intel, http_intel, email_intel, service_intel
process_intel, registry_intel, or certificate_intel
:param item_key: the _key for ite to delete
:param cafile: CA cert or False to skip cert verification
:return:
"""
headers = dict()
headers["Authorization"] = "Splunk {}".format(self.session_key)
url = "{0}/services/data/threat_intel/item/{1}/{2}".format(self.base_url, threat_type, item_key)
if threat_type not in self.SUPPORTED_THREAT_TYPE:
raise RequestError(url, "{} is not supported")
ret = {}
try:
resp = requests.delete(url,
headers=headers,
verify=cafile)
#
# We shall just return the response in json and let the post process
# to make decision.
#
ret = {"status_code": resp.status_code,
"content": resp.json()}
except Exception as e:
raise DeleteError(url, "Failed to delete: {}".format(str(e)))
return ret
def add_threat_intel_item(self, threat_type, threat_dict, cafile):
"""
Add a new threat intel item to the ThreatIntelligence collections
:param threat_type: ip_intel, file_intel, user_intel, http_intel, email_intel, service_intel
process_intel, registry_intel, or certificate_intel
:param threat_dict:
:param cafile:
:return:
"""
headers = dict()
headers["Authorization"] = "Splunk {}".format(self.session_key)
url = self.base_url + "/services/data/threat_intel/item/" + threat_type
if threat_type not in self.SUPPORTED_THREAT_TYPE:
raise RequestError(url, "{} is not supported")
item = {"item": json.dumps(threat_dict)}
try:
resp = requests.post(url,
headers=headers,
data=item,
verify=cafile)
#
# We shall just return the response in json and let the post process
# to make decision.
#
ret = {"status_code": resp.status_code,
"content": resp.json()}
except requests.ConnectionError as e:
raise RequestError(url, "Connection error. " + str(e))
except requests.HTTPError as e:
raise RequestError(url, "An HTTP error. " + str(e))
except requests.URLRequired as e:
raise RequestError(url, "An valid URL is required.")
except requests.TooManyRedirects as e:
raise RequestError(url, "Too many redirects")
except requests.RequestException as e:
raise RequestError(url, "Ambiguous exception when handling request. " + str(e))
return ret
|
import tensorflow as tf
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
def visualize(**images):
"""PLot images in one row."""
n = len(images)
plt.figure(figsize=(16, 5))
for i, (name, image) in enumerate(images.items()):
plt.subplot(1, n, i + 1)
plt.xticks([])
plt.yticks([])
plt.title(' '.join(name.split('_')).title())
plt.imshow(image)
plt.show()
def standard_totflite(val):
return val.numpy().astype(np.float32)
def symmetric_totflite(val):
return val.numpy().astype(np.float32)
def ui8_totflite(val):
return (val*255).numpy().astype(np.uint8)
totflite_dict = {}
totflite_dict[-1] = ui8_totflite
totflite_dict[0] = standard_totflite
totflite_dict[1] = symmetric_totflite
class Quantizer():
def __init__(self, dataset, model, name, append_datetime=True, batches=1, weights_checkpoint_name=None):
self.dataset = dataset
self.model = model
if append_datetime:
self.name = f'{name}_{datetime.now().strftime("%Y%m%d_%H%M%S")}_'
else:
self.name = name + '_'
self.saved_model_dirname = ''
self.batches = batches
self.tflite_ui8_model = None
self.tflite_f16_model = None
self.normalization = 0
self.weights_checkpoint_name = weights_checkpoint_name
def quantize(self):
def representative_data_gen():
# for input_value in tf.data.Dataset.from_tensor_slices(test_input).batch(1).take(1):
# yield [tf.cast(input_value, tf.float32) /255.]
for i in range(self.batches):
vals = self.dataset.__iter__().next()[0]
for val in vals:
yield [tf.expand_dims(tf.cast(val, tf.float32), axis=0)]
if isinstance(self.model, str):
loaded_model = tf.keras.models.load_model(
self.model,
compile=False)
loaded_model.trainable = False
converter = tf.lite.TFLiteConverter.from_keras_model(loaded_model)
self.saved_model_dirname = self.model
else:
if self.weights_checkpoint_name is not None:
self.model.load_weights(self.weights_checkpoint_name)
self.model.trainable = False
converter = tf.lite.TFLiteConverter.from_keras_model(self.model)
self.saved_model_dirname = self.name + 'saved_model'
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
# Ensure that if any ops can't be quantized, the converter throws an error
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
# Set the input and output tensors to uint8 (APIs added in r2.3)
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
self.tflite_ui8_model = converter.convert()
with open(f'{self.name}quant_ui8.tflite', 'wb') as f:
f.write(self.tflite_ui8_model)
if isinstance(self.model, str):
loaded_model = tf.keras.models.load_model(
self.model,
compile=False)
loaded_model.trainable = False
converter = tf.lite.TFLiteConverter.from_keras_model(loaded_model)
self.saved_model_dirname = self.model
else:
self.model.trainable = False
converter = tf.lite.TFLiteConverter.from_keras_model(self.model)
self.saved_model_dirname = self.name + 'saved_model'
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
# Ensure that if any ops can't be quantized, the converter throws an error
self.tflite_f16_model = converter.convert()
with open(f'{self.name}quant_f16.tflite', 'wb') as f:
f.write(self.tflite_f16_model)
'''params = tf.experimental.tensorrt.ConversionParams(
precision_mode='INT8',
maximum_cached_engines=1,
use_calibration=True)
converter = tf.experimental.tensorrt.Converter(
input_saved_model_dir=self.saved_model_dirname, conversion_params=params)
converter.convert(calibration_input_fn=representative_data_gen)
converter.save(self.name + 'tensorrt_ui8')
params = tf.experimental.tensorrt.ConversionParams(
precision_mode='FP16',
maximum_cached_engines=4)
converter = tf.experimental.tensorrt.Converter(
input_saved_model_dir=self.saved_model_dirname, conversion_params=params)
converter.convert()
converter.save(self.name + 'tensorrt_f16')'''
def vizualize_ui8_results(self, num_images):
self.vizualize_results(num_images, self.tflite_ui8_model, -1)
def vizualize_f16_results(self, num_images):
self.vizualize_results(
num_images, self.tflite_f16_model, self.normalization)
def vizualize_results(self, num_images, model, normalization):
interpreter = tf.lite.Interpreter(model_content=model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
print(input_details)
output_details = interpreter.get_output_details()
print(output_details)
it = next(self.dataset.__iter__())
images = it[0]
labels = it[1]
fig = plt.figure(figsize=(22, 22))
for i in range(num_images):
interpreter.set_tensor(input_details[0]['index'], np.expand_dims(
totflite_dict[normalization](images[i]), axis=0))
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
prediction = np.argmax(output_data, axis=3)[0]
visualize(
image=images[i],
predicted_mask=prediction*255,
reference_mask=np.argmax(labels[i], axis=-1)*255,
)
|
import datetime
import unittest
from hft.backtesting import backtest
import numpy as np
from hft.backtesting.output import StorageOutput
from hft.backtesting.readers import ListReader
from hft.units.metrics.instant import VWAP_volume
from hft.units.metrics.time import TradeMetric
from hft.utils.consts import TradeSides
from hft.utils.data import OrderBook, Trade
from test_utils import TestStrategy
class StrategyTest(unittest.TestCase):
@unittest.skip("works only alone")
def test_balance(self):
callables = [
('_trades volume_total', lambda trades: sum(map(lambda x: x.volume, trades))),
('_trades length', lambda trades: len(trades))
]
instant_metrics = [
VWAP_volume(volumes=[50000, 500000])
]
instant_metric_names = [metric.names() for metric in instant_metrics]
reader = ListReader([
OrderBook('test', datetime.datetime(2020, 3, 10, 8, 10, 30, 200),
np.array([9.5, 9.0, 8.5, 8,0]), np.array([1000, 100, 100, 100]),
np.array([10.0, 10.5, 11.0, 12.0]), np.array([100, 100, 100, 100])),
Trade('test', datetime.datetime(2020, 3, 10, 8, 10, 30, 300), TradeSides.SELL, 9.5, 100),
OrderBook('test', datetime.datetime(2020, 3, 10, 8, 10, 30, 300),
np.array([9.5, 9.0, 8.5, 8.0]), np.array([900, 100, 100, 100]),
np.array([10.0, 10.5, 11.0, 12.0]), np.array([100, 100, 100, 100])),
Trade('test', datetime.datetime(2020, 3, 10, 8, 10, 30, 400), TradeSides.SELL, 9.5, 400),
Trade('test', datetime.datetime(2020, 3, 10, 8, 10, 30, 400), TradeSides.SELL, 9.5, 500),
OrderBook('test', datetime.datetime(2020, 3, 10, 8, 10, 30, 400),
np.array([9.0, 8.5, 8.0, 7.0]), np.array([100, 100, 100, 200]),
np.array([10.0, 10.5, 11.0, 12.0]), np.array([100, 100, 100, 100])),
Trade('test', datetime.datetime(2020, 3, 10, 8, 10, 30, 500), TradeSides.SELL, 9.0, 550),
Trade('test', datetime.datetime(2020, 3, 10, 8, 10, 30, 500), TradeSides.SELL, 9.0, 200),
Trade('test', datetime.datetime(2020, 3, 10, 8, 10, 30, 500), TradeSides.SELL, 9.0, 1300),
])
time_metrics = [TradeMetric(callables, 60), TradeMetric(callables, 30)]
simulation = TestStrategy(instant_metrics, time_metrics_trade=time_metrics, reader=reader)
output = StorageOutput(instant_metric_names=instant_metric_names,
time_metric_names=[metric.metric_names for metric in time_metrics])
backtester = backtest.Backtest(reader, simulation, output)
initial_balance = dict(simulation.balance)
backtester._process_event(reader[0], type(reader[0]) == OrderBook)
for event in reader[:-2]:
backtester._process_event(event, type(event) == OrderBook)
self.assertEqual(initial_balance['USD'] - 550, simulation.balance['USD'])
self.assertAlmostEqual((450 + 100) / 9.5, simulation.balance['test'], delta=1e-3)
for event in reader[-2:]:
backtester._process_event(event, type(event) == OrderBook)
self.assertEqual(initial_balance['USD'] - 650, simulation.balance['USD'])
self.assertAlmostEqual(650.0 / 9.5, simulation.balance['test'], delta=1e-3)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""inference process of text2sql task
"""
import sys
import os
import traceback
import logging
import json
from argparse import ArgumentParser
import numpy as np
from text2sql.framework import register
from text2sql.framework.predictor import Predictor
from text2sql import cli_args
from text2sql.datalib import json_dataset
from text2sql.models.text2sql_model import Text2SQL
if __name__ == "__main__":
args = cli_args.init_args()
logging.basicConfig(level=logging.INFO,
format="%(levelname)s: %(asctime)s %(filename)s"
" [%(funcName)s:%(lineno)d][%(process)d] %(message)s",
datefmt="%m-%d %H:%M:%S",
filename=args.log_file,
filemode='a')
try:
param_dict = cli_args.init_config(args, args.config, args.db_max_len)
# 忽略 save_predict_file 的 dirname 恰好是个文件的情况
if 'save_predict_file' in param_dict["predictor"] and \
not os.path.isdir(os.path.dirname(param_dict["predictor"]['save_predict_file'])):
os.mkdir(os.path.dirname(param_dict["predictor"]['save_predict_file']))
register.import_modules()
dataset = json_dataset.T2SDataSet(param_dict["dataset_reader"])
model = Text2SQL(param_dict["model"], param_dict["predictor"]["save_predict_file"])
predictor = Predictor(param_dict["predictor"], dataset, model)
predictor.do_predict()
except Exception as e:
traceback.print_exc()
if args.log_file is not None:
logging.critical(traceback.format_exc())
exit(-1)
|
# -*- coding: utf-8 -*-
"""
Exceptions raised by MTH5
Created on Wed May 13 19:07:21 2020
@author: jpeacock
"""
# Schema Error
class MTSchemaError(Exception):
pass
class MTTimeError(Exception):
pass
class MTH5Error(Exception):
pass
class MTH5TableError(Exception):
pass
class MTTSError(Exception):
pass
|
#@+leo-ver=5-thin
#@+node:ekr.20140726091031.18152: * @file ../plugins/writers/__init__.py
# A dummy file to make leo.plugins.writers a package.
#@-leo
|
# -*- coding: utf-8 -*-
from .base64analyzer import Base64Analyzer
from base64 import b64decode
import binascii
class Base64AsciiAnalyzer(Base64Analyzer):
"""Analyzer to match base64 strings which decode to valid ASCII"""
name = 'Base64AsciiAnalyzer'
def __init__(self, actions, min_len=1, decode=False):
super().__init__(actions, min_len)
self.decode = decode
def verify(self, results):
"""Method to determine if found base64 decodes to valid ASCII"""
# find valid base64 strings with the parent class
validated_strings = super().verify(results)
# go through each base64 string and attempt to decode
base64_ascii_strings = []
for validated_string in validated_strings:
# Check if the string is valid base64
try:
decoded_string = b64decode(validated_string)
except binascii.Error:
# The string is no valid base64
continue
# Check if the valid base64 decodes to plain ascii
try:
b64_ascii_string = decoded_string.decode('ascii')
except UnicodeDecodeError:
continue
if self.decode:
base64_ascii_strings.append(b64_ascii_string)
else:
base64_ascii_strings.append(validated_string)
return base64_ascii_strings
|
import numpy as np
from chainer import cuda
from chainercv.links.model.faster_rcnn.utils.bbox2loc import bbox2loc
from chainercv.transforms.image.resize import resize
from chainercv.utils.bbox.bbox_iou import bbox_iou
class ProposalTargetCreator(object):
"""Assign ground truth classes, bounding boxes and masks to given RoIs.
The :meth:`__call__` of this class generates training targets
for each object proposal.
This is used to train FCIS [#FCIS]_.
.. [#FCIS] Yi Li, Haozhi Qi, Jifeng Dai, Xiangyang Ji, Yichen Wei. \
Fully Convolutional Instance-aware Semantic Segmentation. CVPR 2017.
Args:
n_sample (int): The number of sampled regions.
pos_ratio (float): Fraction of regions that is labeled as a
foreground.
pos_iou_thresh (float): IoU threshold for a RoI to be considered as a
foreground.
neg_iou_thresh_hi (float): RoI is considered to be the background
if IoU is in
[:obj:`neg_iou_thresh_hi`, :obj:`neg_iou_thresh_hi`).
neg_iou_thresh_lo (float): See above.
binary_thresh (float): Threshold for resized mask.
"""
def __init__(
self, n_sample=128,
pos_ratio=0.25, pos_iou_thresh=0.5,
neg_iou_thresh_hi=0.5, neg_iou_thresh_lo=0.1,
binary_thresh=0.4):
self.n_sample = n_sample
self.pos_ratio = pos_ratio
self.pos_iou_thresh = pos_iou_thresh
self.neg_iou_thresh_hi = neg_iou_thresh_hi
self.neg_iou_thresh_lo = neg_iou_thresh_lo
self.binary_thresh = binary_thresh
def __call__(
self, roi, mask, label, bbox,
loc_normalize_mean=(0., 0., 0., 0.),
loc_normalize_std=(0.2, 0.2, 0.5, 0.5),
mask_size=(21, 21),
):
"""Assigns ground truth to sampled proposals.
This function samples total of :obj:`self.n_sample` RoIs
from the combination of :obj:`roi`, :obj:`mask`, :obj:`label`
and :obj: `bbox`. The RoIs are assigned with the ground truth class
labels as well as bounding box offsets and scales to match the ground
truth bounding boxes. As many as :obj:`pos_ratio * self.n_sample` RoIs
are sampled as foregrounds.
Offsets and scales of bounding boxes are calculated using
:func:`chainercv.links.model.faster_rcnn.bbox2loc`.
Also, types of input arrays and output arrays are same.
Here are notations.
* :math:`S` is the total number of sampled RoIs, which equals \
:obj:`self.n_sample`.
* :math:`L` is number of object classes possibly including the \
background.
* :math:`H` is the image height.
* :math:`W` is the image width.
* :math:`RH` is the mask height.
* :math:`RW` is the mask width.
Args:
roi (array): Region of Interests (RoIs) from which we sample.
Its shape is :math:`(R, 4)`
mask (array): The coordinates of ground truth masks.
Its shape is :math:`(R', H, W)`.
label (array): Ground truth bounding box labels. Its shape
is :math:`(R',)`. Its range is :math:`[0, L - 1]`, where
:math:`L` is the number of foreground classes.
bbox (array): The coordinates of ground truth bounding boxes.
Its shape is :math:`(R', 4)`.
loc_normalize_mean (tuple of four floats): Mean values to normalize
coordinates of bounding boxes.
loc_normalize_std (tuple of four floats): Standard deviation of
the coordinates of bounding boxes.
mask_size (tuple of int or int): Generated mask size, which is
equal to :math:`(RH, RW)`.
Returns:
(array, array, array, array):
* **sample_roi**: Regions of interests that are sampled. \
Its shape is :math:`(S, 4)`.
* **gt_roi_mask**: Masks assigned to sampled RoIs. Its shape is \
:math:`(S, RH, RW)`.
* **gt_roi_label**: Labels assigned to sampled RoIs. Its shape is \
:math:`(S,)`. Its range is :math:`[0, L]`. The label with \
value 0 is the background.
* **gt_roi_loc**: Offsets and scales to match \
the sampled RoIs to the ground truth bounding boxes. \
Its shape is :math:`(S, 4)`.
"""
xp = cuda.get_array_module(roi)
roi = cuda.to_cpu(roi)
mask = cuda.to_cpu(mask)
label = cuda.to_cpu(label)
bbox = cuda.to_cpu(bbox)
if not isinstance(mask_size, tuple):
mask_size = (mask_size, mask_size)
n_bbox, _ = bbox.shape
roi = np.concatenate((roi, bbox), axis=0)
if self.n_sample is None:
n_sample = roi.shape[0]
else:
n_sample = self.n_sample
pos_roi_per_image = np.round(n_sample * self.pos_ratio)
iou = bbox_iou(roi, bbox)
gt_assignment = iou.argmax(axis=1)
max_iou = iou.max(axis=1)
# Offset range of classes from [0, n_fg_class - 1] to [1, n_fg_class].
# The label with value 0 is the background.
gt_roi_label = label[gt_assignment] + 1
# Select foreground RoIs as those with >= pos_iou_thresh IoU.
pos_index = np.where(max_iou >= self.pos_iou_thresh)[0]
pos_roi_per_this_image = int(min(pos_roi_per_image, pos_index.size))
if pos_index.size > 0:
pos_index = np.random.choice(
pos_index, size=pos_roi_per_this_image, replace=False)
# Select background RoIs as those within
# [neg_iou_thresh_lo, neg_iou_thresh_hi).
neg_index = np.where((max_iou < self.neg_iou_thresh_hi) &
(max_iou >= self.neg_iou_thresh_lo))[0]
neg_roi_per_this_image = self.n_sample - pos_roi_per_this_image
neg_roi_per_this_image = int(min(neg_roi_per_this_image,
neg_index.size))
if neg_index.size > 0:
neg_index = np.random.choice(
neg_index, size=neg_roi_per_this_image, replace=False)
# The indices that we're selecting (both foreground and background).
keep_index = np.append(pos_index, neg_index)
gt_roi_label = gt_roi_label[keep_index]
gt_roi_label[pos_roi_per_this_image:] = 0 # negative labels --> 0
sample_roi = roi[keep_index]
# locs
# Compute offsets and scales to match sampled RoIs to the GTs.
loc_normalize_mean = np.array(loc_normalize_mean, np.float32)
loc_normalize_std = np.array(loc_normalize_std, np.float32)
gt_roi_loc = bbox2loc(sample_roi, bbox[gt_assignment[keep_index]])
gt_roi_loc = gt_roi_loc - loc_normalize_mean
gt_roi_loc = gt_roi_loc / loc_normalize_std
# masks
gt_roi_mask = -1 * np.ones(
(len(keep_index), mask_size[0], mask_size[1]),
dtype=np.int32)
for i, pos_ind in enumerate(pos_index):
bb = np.round(sample_roi[i]).astype(np.int)
gt_msk = mask[gt_assignment[pos_ind]]
gt_roi_msk = gt_msk[bb[0]:bb[2], bb[1]:bb[3]]
gt_roi_msk = resize(
gt_roi_msk.astype(np.float32)[None], mask_size)[0]
gt_roi_msk = (gt_roi_msk >= self.binary_thresh).astype(np.int)
gt_roi_mask[i] = gt_roi_msk
if xp != np:
sample_roi = cuda.to_gpu(sample_roi)
gt_roi_mask = cuda.to_gpu(gt_roi_mask)
gt_roi_label = cuda.to_gpu(gt_roi_label)
gt_roi_loc = cuda.to_gpu(gt_roi_loc)
return sample_roi, gt_roi_mask, gt_roi_label, gt_roi_loc
|
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from direct.fsm import StateData
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.PythonUtil import *
from direct.task import Task
from . import CCharPaths
from toontown.toonbase import ToontownGlobals
class CharNeutralState(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('CharNeutralState')
def __init__(self, doneEvent, character):
StateData.StateData.__init__(self, doneEvent)
self.__doneEvent = doneEvent
self.character = character
StateData.StateData.load(self)
def enter(self, startTrack = None, playRate = None):
StateData.StateData.enter(self)
self.notify.debug('Neutral ' + self.character.getName() + '...')
self.__neutralTrack = Sequence(name=self.character.getName() + '-neutral')
if startTrack:
self.__neutralTrack.append(startTrack)
if playRate:
self.__neutralTrack.append(Func(self.character.setPlayRate, playRate, 'neutral'))
self.__neutralTrack.append(Func(self.character.loop, 'neutral'))
self.__neutralTrack.start()
def exit(self):
StateData.StateData.exit(self)
self.__neutralTrack.finish()
def __doneHandler(self):
doneStatus = {}
doneStatus['state'] = 'walk'
doneStatus['status'] = 'done'
messenger.send(self.__doneEvent, [doneStatus])
return Task.done
class CharWalkState(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('CharWalkState')
def __init__(self, doneEvent, character, diffPath = None):
StateData.StateData.__init__(self, doneEvent)
self.doneEvent = doneEvent
self.character = character
if diffPath == None:
self.paths = CCharPaths.getPaths(character.getName(), character.getCCLocation())
else:
self.paths = CCharPaths.getPaths(diffPath, character.getCCLocation())
self.speed = character.walkSpeed()
self.offsetX = 0
self.offsetY = 0
self.oldOffsetX = 0
self.olfOffsetY = 0
self.walkTrack = None
StateData.StateData.load(self)
return
def enter(self, startTrack = None, playRate = None):
StateData.StateData.enter(self)
self.notify.debug('Walking ' + self.character.getName() + '... from ' + str(self.walkInfo[0]) + ' to ' + str(self.walkInfo[1]))
posPoints = CCharPaths.getPointsFromTo(self.walkInfo[0], self.walkInfo[1], self.paths)
lastPos = posPoints[-1]
newLastPos = Point3(lastPos[0] + self.offsetX, lastPos[1] + self.offsetY, lastPos[2])
posPoints[-1] = newLastPos
firstPos = posPoints[0]
newFirstPos = Point3(firstPos[0] + self.oldOffsetX, firstPos[1] + self.oldOffsetY, firstPos[2])
posPoints[0] = newFirstPos
self.walkTrack = Sequence(name=self.character.getName() + '-walk')
if startTrack:
self.walkTrack.append(startTrack)
self.character.setPos(posPoints[0])
raycast = CCharPaths.getRaycastFlag(self.walkInfo[0], self.walkInfo[1], self.paths)
moveTrack = self.makePathTrack(self.character, posPoints, self.speed, raycast)
if playRate:
self.walkTrack.append(Func(self.character.setPlayRate, playRate, 'walk'))
self.walkTrack.append(Func(self.character.loop, 'walk'))
self.walkTrack.append(moveTrack)
doneEventName = self.character.getName() + 'WalkDone'
self.walkTrack.append(Func(messenger.send, doneEventName))
ts = globalClockDelta.localElapsedTime(self.walkInfo[2])
self.accept(doneEventName, self.doneHandler)
self.notify.debug('walkTrack.start(%s)' % ts)
self.walkTrack.start(ts)
def makePathTrack(self, nodePath, posPoints, velocity, raycast = 0):
track = Sequence()
if raycast:
track.append(Func(nodePath.enableRaycast, 1))
startHpr = nodePath.getHpr()
for pointIndex in range(len(posPoints) - 1):
startPoint = posPoints[pointIndex]
endPoint = posPoints[pointIndex + 1]
track.append(Func(nodePath.setPos, startPoint))
distance = Vec3(endPoint - startPoint).length()
duration = distance / velocity
curHpr = nodePath.getHpr()
nodePath.headsUp(endPoint[0], endPoint[1], endPoint[2])
destHpr = nodePath.getHpr()
reducedCurH = reduceAngle(curHpr[0])
reducedCurHpr = Vec3(reducedCurH, curHpr[1], curHpr[2])
reducedDestH = reduceAngle(destHpr[0])
shortestAngle = closestDestAngle(reducedCurH, reducedDestH)
shortestHpr = Vec3(shortestAngle, destHpr[1], destHpr[2])
turnTime = abs(shortestAngle) / 270.0
nodePath.setHpr(shortestHpr)
if duration - turnTime > 0.01:
track.append(Parallel(Func(nodePath.loop, 'walk'), LerpHprInterval(nodePath, turnTime, shortestHpr, startHpr=reducedCurHpr, name='lerp' + nodePath.getName() + 'Hpr'), LerpPosInterval(nodePath, duration=duration - turnTime, pos=Point3(endPoint), startPos=Point3(startPoint), fluid=1)))
nodePath.setHpr(startHpr)
if raycast:
track.append(Func(nodePath.enableRaycast, 0))
return track
def doneHandler(self):
doneStatus = {}
doneStatus['state'] = 'walk'
doneStatus['status'] = 'done'
messenger.send(self.doneEvent, [doneStatus])
return Task.done
def exit(self):
StateData.StateData.exit(self)
self.ignore(self.character.getName() + 'WalkDone')
if self.walkTrack:
self.walkTrack.finish()
self.walkTrack = None
return
def setWalk(self, srcNode, destNode, timestamp, offsetX = 0, offsetY = 0):
self.oldOffsetX = self.offsetX
self.oldOffsetY = self.offsetY
self.walkInfo = (srcNode, destNode, timestamp)
self.offsetX = offsetX
self.offsetY = offsetY
class CharFollowChipState(CharWalkState):
notify = DirectNotifyGlobal.directNotify.newCategory('CharFollowChipState')
completeRevolutionDistance = 13
def __init__(self, doneEvent, character, chipId):
CharWalkState.__init__(self, doneEvent, character)
self.offsetDict = {'a': (ToontownGlobals.DaleOrbitDistance, 0)}
self.chipId = chipId
def setWalk(self, srcNode, destNode, timestamp, offsetX = 0, offsetY = 0):
self.offsetDict[destNode] = (offsetX, offsetY)
self.srcNode = srcNode
self.destNode = destNode
self.orbitDistance = ToontownGlobals.DaleOrbitDistance
if (srcNode, destNode) in CCharPaths.DaleOrbitDistanceOverride:
self.orbitDistance = CCharPaths.DaleOrbitDistanceOverride[srcNode, destNode]
elif (destNode, srcNode) in CCharPaths.DaleOrbitDistanceOverride:
self.orbitDistance = CCharPaths.DaleOrbitDistanceOverride[destNode, srcNode]
CharWalkState.setWalk(self, srcNode, destNode, timestamp, offsetX, offsetY)
def makePathTrack(self, nodePath, posPoints, velocity, raycast = 0):
retval = Sequence()
if raycast:
retval.append(Func(nodePath.enableRaycast, 1))
chip = base.cr.doId2do.get(self.chipId)
self.chipPaths = CCharPaths.getPaths(chip.getName(), chip.getCCLocation())
self.posPoints = posPoints
chipDuration = chip.walk.walkTrack.getDuration()
self.notify.debug('chipDuration = %f' % chipDuration)
chipDistance = CCharPaths.getWalkDistance(self.srcNode, self.destNode, ToontownGlobals.ChipSpeed, self.chipPaths)
self.revolutions = chipDistance / self.completeRevolutionDistance
srcOffset = (0, 0)
if self.srcNode in self.offsetDict:
srcOffset = self.offsetDict[self.srcNode]
srcTheta = math.atan2(srcOffset[1], srcOffset[0])
if srcTheta < 0:
srcTheta += 2 * math.pi
if srcTheta > 0:
srcRev = (2 * math.pi - srcTheta) / (2 * math.pi)
else:
srcRev = 0
self.srcTheta = srcTheta
destOffset = (0, 0)
if self.destNode in self.offsetDict:
destOffset = self.offsetDict[self.destNode]
destTheta = math.atan2(destOffset[1], destOffset[0])
if destTheta < 0:
destTheta += 2 * math.pi
self.destTheta = destTheta
self.revolutions += srcRev
endingTheta = srcTheta + self.revolutions % 1.0 * 2 * math.pi
diffTheta = destTheta - endingTheta
destRev = diffTheta / (2 * math.pi)
self.revolutions += destRev
while self.revolutions < 1:
self.revolutions += 1
def positionDale(t):
self.orbitChip(t)
retval.append(LerpFunctionInterval(positionDale, chipDuration))
if raycast:
retval.append(Func(nodePath.enableRaycast, 0))
return retval
def orbitChip(self, t):
srcOffset = (0, 0)
if self.srcNode in self.offsetDict:
srcOffset = self.offsetDict[self.srcNode]
chipSrcPos = Point3(self.posPoints[0][0] - srcOffset[0], self.posPoints[0][1] - srcOffset[1], self.posPoints[0][2])
destOffset = (0, 0)
if self.destNode in self.offsetDict:
destOffset = self.offsetDict[self.destNode]
chipDestPos = Point3(self.posPoints[-1][0] - destOffset[0], self.posPoints[-1][1] - destOffset[1], self.posPoints[-1][2])
displacement = chipDestPos - chipSrcPos
displacement *= t
chipPos = chipSrcPos + displacement
diffTheta = t * self.revolutions * 2 * math.pi
curTheta = self.srcTheta + diffTheta
newOffsetX = math.cos(curTheta) * self.orbitDistance
newOffsetY = math.sin(curTheta) * self.orbitDistance
dalePos = Point3(chipPos[0] + newOffsetX, chipPos[1] + newOffsetY, chipPos[2])
self.character.setPos(dalePos)
newHeading = rad2Deg(curTheta)
newHeading %= 360
self.character.setH(newHeading)
|
import os
from distutils.util import strtobool
from urllib.parse import urlparse
from aiohttp.web import Response
from vortex.config import DOMAIN
from vortex.middlewares import middleware
ALLOWED_ORIGINS = os.getenv("VORTEX_ALLOWED_ORIGINS", "localhost")
DISABLE_ORIGIN_CHECK = strtobool(os.getenv("VORTEX_DISABLE_ORIGIN_CHECK", "False"))
ACCEPT = [
"text/html",
"application/xhtml+xml",
"application/xml",
"application/json;q=0.9",
"*/*;q=0.8",
]
@middleware
async def headers_middleware(request, handler):
origin = request.headers.get("Origin")
if origin:
parsed = urlparse(origin)
request.domain = parsed.hostname
else:
request.domain = DOMAIN or urlparse(str(request.url)).hostname
if request.method != "OPTIONS":
response = await handler(request)
else:
response = Response()
if origin and (
DISABLE_ORIGIN_CHECK
or (request.domain and request.domain.endswith(ALLOWED_ORIGINS))
):
response.headers["Access-Control-Allow-Origin"] = origin
response.headers["Access-Control-Allow-Credentials"] = "true"
response.headers[
"Access-Control-Allow-Headers"
] = "Origin, X-Requested-With, Content-Type, Accept, Authorization"
response.headers["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS, DELETE, PUT"
response.headers["Accept"] = ",".join(ACCEPT)
response.headers["Accept-Language"] = "en-us,en;q=0.5"
response.headers["Accept-Encoding"] = "gzip,deflate"
response.headers["Accept-Charset"] = "ISO-8859-1,utf-8;q=0.7,*;q=0.7"
return response
|
import os
from subprocess import check_output
import logging
LOGGER = logging.getLogger('PYWPS')
def ncdump(dataset):
'''
Returns the metadata of the dataset
Code taken from https://github.com/ioos/compliance-checker-web
'''
try:
output = check_output(['ncdump', '-h', dataset])
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split('\n')
# replace the filename for safety
dataset_id = os.path.basename(dataset) # 'uploaded-file'
lines[0] = 'netcdf {} {{'.format(dataset_id)
# decode to ascii
filtered_lines = ['{}\n'.format(line) for line in lines]
except Exception as err:
LOGGER.error("Could not generate ncdump: {}".format(err))
return "Error: generating ncdump failed"
return filtered_lines
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl.testing import parameterized
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow as tf
from tensorflow.python.framework import ops
from lingvo.core import gpipe
from lingvo.core import layers
from lingvo.core import py_utils
from lingvo.core import quant_utils
from lingvo.core import test_utils
class ActivationsTest(test_utils.TestCase):
def testGeluActivation(self):
with self.session(use_gpu=True):
inputs = tf.constant(
np.linspace(-10.0, 10.0, num=21, dtype='float32'), dtype=tf.float32)
grads_gelu = tf.gradients(layers.Gelu(inputs), inputs)
grads_relu = tf.gradients(tf.nn.relu(inputs), inputs)
self.assertEqual(0.0,
layers.Gelu(tf.constant(-10.0, dtype='float32')).eval())
self.assertEqual(0.0,
layers.Gelu(tf.constant(0.0, dtype='float32')).eval())
self.assertEqual(10.0,
layers.Gelu(tf.constant(10.0, dtype='float32')).eval())
actual_grads_gelu = grads_gelu[0].eval()
actual_grads_relu = grads_relu[0].eval()
self.assertAllClose(actual_grads_gelu[-5:], actual_grads_relu[-5:])
self.assertAllClose(actual_grads_gelu[:5], actual_grads_relu[:5])
# pyformat: disable
# pylint: disable=bad-whitespace
expected_grads_gelu = [
-7.69459925e-22, -9.25176121e-18, -4.04182472e-14, -6.39430453e-11,
-3.64552299e-08, -7.13557529e-06, -5.03641320e-04, -1.19456425e-02,
-8.52318183e-02, -8.33154917e-02, 5.00000000e-01, 1.08331549e+00,
1.08523178e+00, 1.01194561e+00, 1.00050366e+00, 1.00000715e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_grads_gelu, actual_grads_gelu)
class BatchNormLayerTest(test_utils.TestCase):
def testBatchNormLayerConstruction(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.BatchNormLayer.Params()
params.name = 'bn'
params.dim = 2
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
layers.BatchNormLayer(params)
bn_vars = tf.get_collection('BatchNormLayer_vars')
bn_var_names = [x.name for x in bn_vars]
expected_var_names = [
'bn/beta/var:0', 'bn/gamma/var:0', 'bn/moving_mean/var:0',
'bn/moving_variance/var:0'
]
self.assertEqual(expected_var_names, bn_var_names)
def testBatchNormLayerMoments(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
in_padding1 = tf.zeros([2, 2, 8, 1], dtype=tf.float32)
bn_in1 = tf.constant(
np.random.normal(0.1, 0.5, [2, 2, 8, 2]), dtype=tf.float32)
mean1, var1 = layers.BatchNormLayer._Moments(bn_in1, 1.0 - in_padding1)
mean2, var2 = tf.nn.moments(bn_in1, [0, 1, 2])
in_padding2 = tf.ones([2, 2, 8, 1], dtype=tf.float32)
bn_in2 = tf.constant(
np.random.normal(-0.3, 1.0, [2, 2, 8, 2]), dtype=tf.float32)
in_padding3 = tf.concat([in_padding1, in_padding2], 1)
bn_in3 = tf.concat([bn_in1, bn_in2], 1)
mean3, var3 = layers.BatchNormLayer._Moments(bn_in3, 1.0 - in_padding3)
mean4, var4 = tf.nn.moments(bn_in3, [0, 1, 2])
mean_diff = tf.reduce_sum(tf.square(mean3 - mean4))
var_diff = tf.reduce_sum(tf.square(var3 - var4))
tf.global_variables_initializer().run()
self.assertAllClose(mean2.eval(), mean1.eval())
self.assertAllClose(var2.eval(), var1.eval())
self.assertAllClose(mean3.eval(), mean1.eval())
self.assertAllClose(var3.eval(), var1.eval())
# Since tf.nn.moments() doesn't support padding, it is expected to produce
# different results than our own implementation (of moments).
self.assertAllClose(0.095987, mean_diff.eval())
self.assertAllClose(0.364456, var_diff.eval())
def testBatchNormLayerFProp(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.BatchNormLayer.Params()
params.name = 'bn'
params.dim = 3
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
bn_layer = layers.BatchNormLayer(params)
in_padding1 = tf.zeros([2, 8, 1], dtype=tf.float32)
bn_in1 = tf.constant(
np.random.normal(0.1, 0.5, [2, 8, 3]), dtype=tf.float32)
bn_out = bn_layer.FPropDefaultTheta(bn_in1, in_padding1)
sig1 = tf.reduce_sum(bn_out)
sig2 = tf.reduce_sum(bn_out * bn_out)
tf.global_variables_initializer().run()
self.assertAllClose(0.0, sig1.eval(), atol=1e-5)
self.assertAllClose(47.8371887, sig2.eval())
def testBatchNormLayerFPropUseGlobalStatsForTraining(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.BatchNormLayer.Params()
params.name = 'bn'
params.dim = 3
params.use_moving_avg_in_training = True
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
bn_layer = layers.BatchNormLayer(params)
in_padding1 = tf.zeros([2, 8, 1], dtype=tf.float32)
bn_in1 = tf.constant(
np.random.normal(0.1, 0.5, [2, 8, 3]), dtype=tf.float32)
bn_out = bn_layer.FPropDefaultTheta(bn_in1, in_padding1)
sig1 = tf.reduce_sum(bn_out)
sig2 = tf.reduce_sum(bn_out * bn_out)
tf.global_variables_initializer().run()
self.assertAllClose(2.6593573, sig1.eval(), atol=1e-5)
self.assertAllClose(15.464208, sig2.eval())
def testBatchNormLayerMomentsForConv(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
in_padding1 = tf.zeros([2, 8, 1, 1], dtype=tf.float32)
bn_in1 = tf.constant(
np.random.normal(0.1, 0.5, [2, 8, 4, 3]), dtype=tf.float32)
mean1, var1 = layers.BatchNormLayer._Moments(bn_in1, 1.0 - in_padding1)
mean2, var2 = tf.nn.moments(bn_in1, [0, 1, 2])
in_padding2 = tf.ones([2, 8, 1, 1], dtype=tf.float32)
bn_in2 = tf.constant(
np.random.normal(-0.3, 1.0, [2, 8, 4, 3]), dtype=tf.float32)
in_padding3 = tf.concat([in_padding1, in_padding2], 1)
bn_in3 = tf.concat([bn_in1, bn_in2], 1)
mean3, var3 = layers.BatchNormLayer._Moments(bn_in3, 1.0 - in_padding3)
mean4, var4 = tf.nn.moments(bn_in3, [0, 1, 2])
mean_diff = tf.reduce_sum(tf.square(mean3 - mean4))
var_diff = tf.reduce_sum(tf.square(var3 - var4))
tf.global_variables_initializer().run()
self.assertAllClose(mean2.eval(), mean1.eval())
self.assertAllClose(var2.eval(), var1.eval())
self.assertAllClose(mean3.eval(), mean1.eval())
self.assertAllClose(var3.eval(), var1.eval())
self.assertAllClose(0.1726295, mean_diff.eval())
self.assertAllClose(0.5592572093009949, var_diff.eval())
def testBatchNormLayerFPropForConv(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.BatchNormLayer.Params()
params.name = 'bn_conv'
params.dim = 32
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
bn_layer = layers.BatchNormLayer(params)
in_padding1 = tf.zeros([2, 8, 1, 1], dtype=tf.float32)
bn_in1 = tf.constant(
np.random.normal(0.1, 0.5, [2, 8, 4, 32]), dtype=tf.float32)
bn_out = bn_layer.FPropDefaultTheta(bn_in1, in_padding1)
sig1 = tf.reduce_sum(bn_out)
sig2 = tf.reduce_sum(bn_out * bn_out)
tf.global_variables_initializer().run()
self.assertAllClose(0.0, sig1.eval(), atol=1e-4)
self.assertAllClose(2039.398681, sig2.eval())
class ConvLayerTest(test_utils.TestCase):
"""Tests conv layers.
Note that there are multiple subclasses of BaseConv2DLayer and most cases
are tested via the concrete Conv2DLayer. Other tests are done against
other subclasses to cover key differences.
"""
def testConv2DLayerConstruction(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.Conv2DLayer.Params()
params.name = 'conv'
params.filter_shape = [3, 3, 3, 32]
params.filter_stride = [2, 2]
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
layers.Conv2DLayer(params)
conv_vars = tf.get_collection('Conv2DLayer_vars')
conv_var_names = [x.name for x in conv_vars]
expected_var_names = ['conv/w/var:0']
self.assertEqual(expected_var_names, conv_var_names)
bn_vars = tf.get_collection('BatchNormLayer_vars')
bn_var_names = [x.name for x in bn_vars]
expected_var_names = [
'conv/beta/var:0', 'conv/gamma/var:0', 'conv/moving_mean/var:0',
'conv/moving_variance/var:0'
]
self.assertEqual(expected_var_names, bn_var_names)
def testDepthwiseConv2DLayerConstruction(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.DepthwiseConv2DLayer.Params()
params.name = 'conv'
params.filter_shape = [3, 3, 3, 32]
params.filter_stride = [2, 2]
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
layers.DepthwiseConv2DLayer(params)
conv_vars = tf.get_collection('DepthwiseConv2DLayer_vars')
conv_var_names = [x.name for x in conv_vars]
expected_var_names = ['conv/w/var:0']
self.assertEqual(expected_var_names, conv_var_names)
bn_vars = tf.get_collection('BatchNormLayer_vars')
bn_var_names = [x.name for x in bn_vars]
expected_var_names = [
'conv/beta/var:0', 'conv/gamma/var:0', 'conv/moving_mean/var:0',
'conv/moving_variance/var:0'
]
self.assertEqual(expected_var_names, bn_var_names)
def testSeparableConv2DLayerConstruction(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.SeparableConv2DLayer.Params()
params.name = 'conv'
params.filter_shape = [3, 3, 3, 32]
params.filter_stride = [2, 2]
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
params.Instantiate()
# Vars for the outer conv layer.
conv_vars = tf.get_collection('SeparableConv2DLayer_vars')
conv_var_names = [x.name for x in conv_vars]
expected_var_names = ['conv/w/var:0']
self.assertSetEqual(set(expected_var_names), set(conv_var_names))
# Vars for the inner depthwise layer.
conv_vars = tf.get_collection('DepthwiseConv2DLayer_vars')
conv_var_names = [x.name for x in conv_vars]
expected_var_names = ['conv/depthwise_conv/w/var:0']
self.assertSetEqual(set(expected_var_names), set(conv_var_names))
bn_vars = tf.get_collection('BatchNormLayer_vars')
bn_var_names = [x.name for x in bn_vars]
expected_var_names = [
# Outer conv batchnorm.
'conv/beta/var:0',
'conv/gamma/var:0',
'conv/moving_mean/var:0',
'conv/moving_variance/var:0',
# Inner depthwise batchnorm.
'conv/depthwise_conv/beta/var:0',
'conv/depthwise_conv/gamma/var:0',
'conv/depthwise_conv/moving_mean/var:0',
'conv/depthwise_conv/moving_variance/var:0',
]
self.assertSetEqual(set(expected_var_names), set(bn_var_names))
def testConv2DLayerWithBiasConstruction(self):
"""Tests Conv2DLayer with only bias and without batch normalization."""
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.Conv2DLayer.Params()
params.name = 'conv'
params.filter_shape = [3, 3, 3, 32]
params.filter_stride = [2, 2]
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
params.bias = True
params.batch_norm = False
layers.Conv2DLayer(params)
conv_vars = tf.get_collection('Conv2DLayer_vars')
conv_var_names = [x.name for x in conv_vars]
# Has both 'w' and 'b'.
expected_var_names = ['conv/w/var:0', 'conv/b/var:0']
self.assertEqual(expected_var_names, conv_var_names)
# No BatchNorm variables.
bn_vars = tf.get_collection('BatchNormLayer_vars')
bn_var_names = [x.name for x in bn_vars]
expected_var_names = []
self.assertEqual(expected_var_names, bn_var_names)
def testDepthwiseConv2DLayerWithBiasConstruction(self):
"""Tests DepthwiseConv2D with only bias and without batch normalization."""
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.DepthwiseConv2DLayer.Params()
params.name = 'conv'
params.filter_shape = [3, 3, 3, 32]
params.filter_stride = [2, 2]
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
params.bias = True
params.batch_norm = False
layers.DepthwiseConv2DLayer(params)
conv_vars = tf.get_collection('DepthwiseConv2DLayer_vars')
conv_var_names = [x.name for x in conv_vars]
# Has both 'w' and 'b'.
expected_var_names = ['conv/w/var:0', 'conv/b/var:0']
self.assertEqual(expected_var_names, conv_var_names)
# No BatchNorm variables.
bn_vars = tf.get_collection('BatchNormLayer_vars')
bn_var_names = [x.name for x in bn_vars]
expected_var_names = []
self.assertEqual(expected_var_names, bn_var_names)
def testConv2DLayerOutShape(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.Conv2DLayer.Params()
params.name = 'conv'
params.filter_shape = [3, 3, 3, 32]
params.filter_stride = [2, 2]
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
conv_layer = layers.Conv2DLayer(params)
in_shape = [None, None, 10, 3]
out_shape = conv_layer.OutShape(in_shape)
self.assertEqual(out_shape, [None, None, 5, 32])
in_shape = [None, 20, 10, 3]
out_shape = conv_layer.OutShape(in_shape)
self.assertEqual(out_shape, [None, 10, 5, 32])
def testDepthwiseConv2DLayerOutShape(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.DepthwiseConv2DLayer.Params()
params.name = 'conv'
params.filter_shape = [3, 3, 3, 32]
params.filter_stride = [2, 2]
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
conv_layer = layers.DepthwiseConv2DLayer(params)
in_shape = [None, None, 10, 3]
out_shape = conv_layer.OutShape(in_shape)
self.assertEqual(out_shape, [None, None, 5, 96])
in_shape = [None, 20, 10, 3]
out_shape = conv_layer.OutShape(in_shape)
self.assertEqual(out_shape, [None, 10, 5, 96])
def testSeparableConv2DLayerOutShape(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.SeparableConv2DLayer.Params()
params.name = 'conv'
params.filter_shape = [3, 3, 3, 32]
params.filter_stride = [2, 2]
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
conv_layer = params.Instantiate()
in_shape = [None, None, 10, 3]
out_shape = conv_layer.OutShape(in_shape)
self.assertEqual(out_shape, [None, None, 5, 32])
in_shape = [None, 20, 10, 3]
out_shape = conv_layer.OutShape(in_shape)
self.assertEqual(out_shape, [None, 10, 5, 32])
def testConv2DLayerWithDilationOutShape(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.Conv2DLayer.Params()
params.name = 'conv'
params.filter_shape = [3, 3, 3, 32]
params.filter_stride = [1, 1]
params.dilation_rate = [2, 2]
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
conv_layer = layers.Conv2DLayer(params)
# dilation_rate does not change output shape.
in_shape = [None, None, 10, 3]
out_shape = conv_layer.OutShape(in_shape)
self.assertEqual(out_shape, [None, None, 10, 32])
in_shape = [None, 20, 10, 3]
out_shape = conv_layer.OutShape(in_shape)
self.assertEqual(out_shape, [None, 20, 10, 32])
def testDepthwiseConv2DLayerWithDilationOutShape(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.DepthwiseConv2DLayer.Params()
params.name = 'conv'
params.filter_shape = [3, 3, 3, 32]
params.filter_stride = [1, 1]
params.dilation_rate = [2, 2]
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
conv_layer = layers.DepthwiseConv2DLayer(params)
# dilation_rate does not change output shape.
in_shape = [None, None, 10, 3]
out_shape = conv_layer.OutShape(in_shape)
self.assertEqual(out_shape, [None, None, 10, 96])
in_shape = [None, 20, 10, 3]
out_shape = conv_layer.OutShape(in_shape)
self.assertEqual(out_shape, [None, 20, 10, 96])
def testSeparableConv2DLayerWithDilationOutShape(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.SeparableConv2DLayer.Params()
params.name = 'conv'
params.filter_shape = [3, 3, 3, 32]
params.filter_stride = [1, 1]
params.dilation_rate = [2, 2]
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
conv_layer = params.Instantiate()
# dilation_rate does not change output shape.
in_shape = [None, None, 10, 3]
out_shape = conv_layer.OutShape(in_shape)
self.assertEqual(out_shape, [None, None, 10, 32])
in_shape = [None, 20, 10, 3]
out_shape = conv_layer.OutShape(in_shape)
self.assertEqual(out_shape, [None, 20, 10, 32])
def testConvPoolComputeOutPadding(self):
with self.session(use_gpu=True):
in_padding = tf.constant(
[[0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]],
dtype=tf.float32)
out_padding = layers._ComputeConvOutputPadding(in_padding, 2, 2)
expected_out_padding = [[1, 1, 0, 0, 0, 1, 1, 0],
[1, 1, 0, 0, 0, 1, 1, 0]]
tf.global_variables_initializer().run()
self.assertAllClose(expected_out_padding, out_padding.eval().tolist())
def testConvPoolComputeOutPaddingUnevenStride(self):
with self.session(use_gpu=True):
in_padding = tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]],
dtype=tf.float32)
out_padding = layers._ComputeConvOutputPadding(in_padding, 3, 3)
expected_out_padding = [[0, 0, 0, 0, 1], [0, 0, 0, 1, 1], [0, 0, 1, 1, 1]]
tf.global_variables_initializer().run()
self.assertAllClose(expected_out_padding, out_padding.eval().tolist())
def _checkConvLayerShapes(self,
input_shape,
filter_shape,
filter_stride,
dilation_rate=None,
depth_multiplier=None,
params_builder=layers.Conv2DLayer.Params):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(398847392)
np.random.seed(12345)
params = params_builder()
params.name = 'conv'
params.filter_shape = filter_shape
params.filter_stride = filter_stride
if dilation_rate:
params.dilation_rate = dilation_rate
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
if depth_multiplier is not None:
params.depth_multiplier = depth_multiplier
conv_layer = params.Instantiate()
inp = tf.random_uniform(input_shape)
inp_pad = tf.floor(0.5 + tf.random_uniform(input_shape[:2]))
out, out_pad = conv_layer.FPropDefaultTheta(inp, inp_pad)
with self.session(use_gpu=True, graph=g) as sess:
tf.global_variables_initializer().run()
out, out_pad = sess.run([out, out_pad])
print(out.shape, out_pad.shape)
# We expect conv_layer.OutShape can compute the actual output shape.
self.assertAllEqual(out.shape, conv_layer.OutShape(inp.shape.as_list()))
# We expect out_pad.shape matches the 1st 2 dimensions of out.
self.assertAllEqual(out.shape[:2], out_pad.shape)
def testConv2DLayerOutputShapes(self):
self._checkConvLayerShapes([2, 4, 4, 3], [3, 3, 3, 32], [1, 1])
self._checkConvLayerShapes([2, 4, 4, 3], [3, 3, 3, 32], [2, 2])
self._checkConvLayerShapes([2, 10, 4, 3], [3, 3, 3, 32], [3, 3])
self._checkConvLayerShapes([2, 10, 4, 3], [3, 3, 3, 32], [1, 1],
dilation_rate=[2, 2])
self._checkConvLayerShapes([2, 10, 4, 3], [3, 3, 3, 32], [1, 1],
dilation_rate=[3, 3])
def testDepthwiseConv2DLayerOutputShapes(self):
self._checkConvLayerShapes(
[2, 4, 4, 3], [3, 3, 3, 32], [1, 1],
params_builder=layers.DepthwiseConv2DLayer.Params)
self._checkConvLayerShapes(
[2, 4, 4, 3], [3, 3, 3, 32], [2, 2],
params_builder=layers.DepthwiseConv2DLayer.Params)
self._checkConvLayerShapes(
[2, 10, 4, 3], [3, 3, 3, 32], [3, 3],
params_builder=layers.DepthwiseConv2DLayer.Params)
self._checkConvLayerShapes(
[2, 10, 4, 3], [3, 3, 3, 32], [1, 1],
dilation_rate=[2, 2],
params_builder=layers.DepthwiseConv2DLayer.Params)
self._checkConvLayerShapes(
[2, 10, 4, 3], [3, 3, 3, 32], [1, 1],
dilation_rate=[3, 3],
params_builder=layers.DepthwiseConv2DLayer.Params)
def testSeparableConv2DLayerOutputShapes(self):
self._checkConvLayerShapes(
[2, 4, 4, 3], [3, 3, 3, 32], [1, 1],
params_builder=layers.SeparableConv2DLayer.Params)
self._checkConvLayerShapes(
[2, 4, 4, 3], [3, 3, 3, 32], [2, 2],
params_builder=layers.SeparableConv2DLayer.Params)
self._checkConvLayerShapes(
[2, 10, 4, 3], [3, 3, 3, 32], [3, 3],
params_builder=layers.SeparableConv2DLayer.Params)
# Dilations.
self._checkConvLayerShapes(
[2, 10, 4, 3], [3, 3, 3, 32], [1, 1],
dilation_rate=[2, 2],
params_builder=layers.SeparableConv2DLayer.Params)
self._checkConvLayerShapes(
[2, 10, 4, 3], [3, 3, 3, 32], [1, 1],
dilation_rate=[3, 3],
params_builder=layers.SeparableConv2DLayer.Params)
# Depth multiplier.
self._checkConvLayerShapes(
[2, 4, 4, 3], [3, 3, 3, 32], [1, 1],
params_builder=layers.SeparableConv2DLayer.Params,
depth_multiplier=2)
self._checkConvLayerShapes(
[2, 4, 4, 3], [3, 3, 3, 32], [2, 2],
params_builder=layers.SeparableConv2DLayer.Params,
depth_multiplier=6)
self._checkConvLayerShapes(
[2, 10, 4, 3], [3, 3, 3, 32], [3, 3],
params_builder=layers.SeparableConv2DLayer.Params,
depth_multiplier=12)
def _evalConvLayerFProp(self,
params_builder=layers.Conv2DLayer.Params,
batch_norm=True,
weight_norm=False,
bias=False,
activation='RELU',
conv_last=False,
strides=(2, 2),
dilation_rate=(1, 1),
bn_fold_weights=False,
is_eval=False,
quantized=False):
self._ClearCachedSession()
tf.reset_default_graph()
with self.session(use_gpu=True) as sess:
tf.set_random_seed(398847392)
np.random.seed(12345)
params = params_builder()
params.name = 'conv'
params.filter_shape = [3, 3, 3, 2]
params.filter_stride = strides
params.dilation_rate = dilation_rate
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.conv_last = conv_last
params.batch_norm = batch_norm
params.bn_fold_weights = bn_fold_weights
params.weight_norm = weight_norm
params.bias = bias
params.activation = activation
params.is_eval = is_eval
if quantized:
params.qdomain.default = quant_utils.PassiveAsymQDomain.Params()
conv_layer = params.Instantiate()
in_padding1 = tf.zeros([2, 4], dtype=tf.float32)
inputs1 = tf.constant(
np.random.normal(0.1, 0.5, [2, 4, 4, 3]), dtype=tf.float32)
output1, _ = conv_layer.FPropDefaultTheta(inputs1, in_padding1)
output2, _ = conv_layer.FPropDefaultTheta(inputs1)
tf.global_variables_initializer().run()
v1, v2 = sess.run([output1, output2])
self.assertAllClose(v1, v2)
return v1
def testConv2DLayerFProp(self):
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output1 = [
[[[ 0.36669245, 0.91488785],
[ 0.07532132, 0. ]],
[[ 0.34952009, 0. ],
[ 1.91783941, 0. ]]],
[[[ 0.28304493, 0. ],
[ 0. , 0. ]],
[[ 0. , 0.86575812],
[ 0. , 1.60203481]]]]
# pyformat: enable
# pylint: enable=bad-whitespace
actual = self._evalConvLayerFProp()
print('actual = ', np.array_repr(actual))
self.assertAllClose(expected_output1, actual)
def testDepthwiseConv2DLayerFProp(self):
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output1 = [
[[[ 0.93514717, 0.35602099, 0. , 0.51261222, 0. ,
1.4310323 ],
[ 0. , 0. , 0.49176404, 0. , 1.01494753,
0.51337928]],
[[ 0.62087697, 0.34572476, 0. , 0.19352221, 0.47142431,
0. ],
[ 0.81119895, 1.00890303, 0.90471351, 0. , 1.22736526,
0. ]]],
[[[ 0. , 0. , 0.48927376, 0. , 0.74019426,
0. ],
[ 0. , 0. , 1.49952257, 0. , 0. ,
0. ]],
[[ 0.29156703, 0. , 0. , 1.14509106, 0. ,
0.74238932],
[ 0.91312039, 1.39783907, 0. , 1.47650909, 0. ,
0.37969294]]]]
# pyformat: enable
# pylint: enable=bad-whitespace
actual = self._evalConvLayerFProp(
params_builder=layers.DepthwiseConv2DLayer.Params)
print('actual = ', np.array_repr(actual))
self.assertAllClose(expected_output1, actual)
def testSeparableConv2DLayerFProp(self):
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output1 =[
[[[ 0.39866772, 0. ],
[ 1.36471784, 0. ]],
[[ 0. , 0. ],
[ 0. , 0. ]]],
[[[ 1.15356529, 0.1036691 ],
[ 0.12865055, 0.61244327]],
[[ 0.03609803, 1.81620765],
[ 0. , 0.23052886]]]]
# pyformat: enable
# pylint: enable=bad-whitespace
actual = self._evalConvLayerFProp(
params_builder=layers.SeparableConv2DLayer.Params)
print('actual = ', np.array_repr(actual))
self.assertAllClose(expected_output1, actual)
def testConv2DLayerWithDilationFProp(self):
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output1 = [
[[[ 0. , 0.48857123],
[ 1.07320869, 0. ],
[ 0. , 0.1550007 ],
[ 0. , 1.59097648]],
[[ 0. , 0. ],
[ 0.20024362, 0. ],
[ 0. , 0.64265913],
[ 1.52903616, 0. ]],
[[ 0.099805 , 0. ],
[ 0. , 0.61720949],
[ 1.31608474, 0. ],
[ 0. , 0. ]],
[[ 0.0175612 , 0. ],
[ 0. , 0.17234094],
[ 0.21719536, 0. ],
[ 1.68514931, 0. ]]],
[[[ 1.45240796, 0. ],
[ 0. , 0. ],
[ 0.72675145, 1.971596 ],
[ 0. , 0.01062769]],
[[ 0. , 1.70299017],
[ 1.36936104, 1.29897082],
[ 1.40132439, 1.74345171],
[ 0.02585058, 0.29061913]],
[[ 0. , 0. ],
[ 0.32962656, 0.05025356],
[ 0. , 0. ],
[ 0. , 0. ]],
[[ 0.97244394, 0. ],
[ 0.23401484, 0.5722279 ],
[ 0. , 0.40940297],
[ 0. , 0.52711827]]]]
# pyformat: enable
# pylint: enable=bad-whitespace
actual = self._evalConvLayerFProp(strides=[1, 1], dilation_rate=[2, 2])
print('testConvLayerWithDilationFProp actual = ', np.array_repr(actual))
self.assertAllClose(expected_output1, actual)
def testSeparableConv2DLayerWithDilationFProp(self):
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output1 = [
[[[ 0.21535617, 0.86965537],
[ 2.11499524, 1.2463783 ],
[ 0. , 0.39275286],
[ 0. , 0. ]],
[[ 1.12706482, 1.37450278],
[ 0. , 0. ],
[ 0. , 0. ],
[ 1.2390101 , 0.22932449]],
[[ 0. , 0. ],
[ 0.15051894, 1.32616639],
[ 0. , 0. ],
[ 0.72912866, 0.47753802]],
[[ 0.91655868, 0. ],
[ 0.88526261, 0.26690534],
[ 0. , 0.26084688],
[ 0.42923039, 0. ]]],
[[[ 0.82440329, 0. ],
[ 0.49015623, 0.52662987],
[ 0. , 0. ],
[ 0.35344127, 0. ]],
[[ 0. , 0. ],
[ 0. , 0. ],
[ 0.43848675, 0. ],
[ 0. , 1.21124518]],
[[ 1.1026746 , 1.39578998],
[ 0. , 0. ],
[ 0.34652925, 0. ],
[ 0. , 1.26868236]],
[[ 0.91519427, 0.09030763],
[ 0. , 0.59271163],
[ 0. , 0.54207176],
[ 0. , 0. ]]]]
# pyformat: enable
# pylint: enable=bad-whitespace
actual = self._evalConvLayerFProp(
strides=[1, 1],
dilation_rate=[2, 2],
params_builder=layers.SeparableConv2DLayer.Params)
print('testConvLayerWithDilationFProp actual = ', np.array_repr(actual))
self.assertAllClose(expected_output1, actual)
def testConv2DLayerConvFirstVsLastFProp(self):
"""Compare results of conv first vs. last."""
# ... with batch_norm and activation disabled.
self.assertAllClose(
self._evalConvLayerFProp(
batch_norm=False, activation='NONE', conv_last=False),
self._evalConvLayerFProp(
batch_norm=False, activation='NONE', conv_last=True))
def testConv2DLayerFPropConvLast(self):
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output1 = [
[[[ 0.22165056, 0.20731729],
[ 0.09577402, -0.15359652]],
[[ 0.07151584, 0.03027298],
[ 0.05370769, 0.0143405 ]]],
[[[-0.08854639, 0.06143938],
[-0.37708873, 0.00889082]],
[[-0.58154356, 0.30798748],
[-0.37575331, 0.54729235]]]]
# pyformat: enable
# pylint: enable=bad-whitespace
actual = self._evalConvLayerFProp(conv_last=True)
print(['ConvLast actual = ', np.array_repr(actual)])
self.assertAllClose(expected_output1, actual)
def testConv2DLayerConvWithBias(self):
"""Compare results with bias vs. with neither batch_norm nor bias."""
# Results should match since bias is initialized to be 0.
self.assertAllClose(
self._evalConvLayerFProp(batch_norm=False, bias=False),
self._evalConvLayerFProp(batch_norm=False, bias=True))
def testConv2DLayerWeightNormFProp(self):
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output = [
[[[ 0.37172362, 0.92405349],
[ 0.07635488, 0.]],
[[ 0.35431579, 0.],
[ 1.94415355, 0.]]],
[[[ 0.28692839, 0.],
[ 0. , 0.]],
[[ 0. , 0.87443149],
[ 0. , 1.61808443]]]]
# pyformat: enable
# pylint: enable=bad-whitespace
actual = self._evalConvLayerFProp(weight_norm=True)
print('actual1 = ', np.array_repr(actual))
self.assertAllClose(expected_output, actual)
def testDepthwiseConv2DLayerWeightNormFProp(self):
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output = [
[[[ 0.97023201, 0.37429881, 0. , 0.53157473, 0. ,
1.60764372],
[ 0. , 0. , 0.50401598, 0. , 1.07683432,
0.57673818]],
[[ 0.644171 , 0.36347377, 0. , 0.20068097, 0.50016963,
0. ],
[ 0.8416335 , 1.06069875, 0.92725372, 0. , 1.30220449,
0. ]]],
[[[ 0. , 0. , 0.50146359, 0. , 0.78532791,
0. ],
[ 0. , 0. , 1.53688192, 0. , 0. ,
0. ]],
[[ 0.302506 , 0. , 0. , 1.18745029, 0. ,
0.83401161],
[ 0.94737887, 1.46960247, 0. , 1.53112805, 0. ,
0.42655289]]]]
# pyformat: enable
# pylint: enable=bad-whitespace
actual = self._evalConvLayerFProp(
weight_norm=True, params_builder=layers.DepthwiseConv2DLayer.Params)
print('actual1 = ', np.array_repr(actual))
self.assertAllClose(expected_output, actual)
def testSeparableConv2DLayerWeightNormFProp(self):
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output = [
[[[ 0.41837293, 0. ],
[ 1.39592457, 0. ]],
[[ 0. , 0. ],
[ 0. , 0. ]]],
[[[ 1.20513153, 0.11938372],
[ 0.1284119 , 0.6927582 ]],
[[ 0.0227453 , 2.05591369],
[ 0. , 0.26530063]]]]
# pyformat: enable
# pylint: enable=bad-whitespace
actual = self._evalConvLayerFProp(
weight_norm=True, params_builder=layers.SeparableConv2DLayer.Params)
print('actual1 = ', np.array_repr(actual))
self.assertAllClose(expected_output, actual)
def testConv2DLayerFoldedBatchNormFProp(self):
actual_unfolded = self._evalConvLayerFProp(
batch_norm=True, bn_fold_weights=False)
actual_folded = self._evalConvLayerFProp(
batch_norm=True, bn_fold_weights=True)
print('testConvLayerFoldedBatchNormFProp folded = ',
np.array_repr(actual_folded))
print('testConvLayerFoldedBatchNormFProp unfolded = ',
np.array_repr(actual_unfolded))
self.assertAllClose(actual_folded, actual_unfolded)
def testDepthwiseConv2DLayerFoldedBatchNormFProp(self):
actual_unfolded = self._evalConvLayerFProp(
batch_norm=True,
bn_fold_weights=False,
params_builder=layers.DepthwiseConv2DLayer.Params)
actual_folded = self._evalConvLayerFProp(
batch_norm=True,
bn_fold_weights=True,
params_builder=layers.DepthwiseConv2DLayer.Params)
print('testDepthwiseConvLayerFoldedBatchNormFProp folded = ',
np.array_repr(actual_folded))
print('testDepthwiseConvLayerFoldedBatchNormFProp unfolded = ',
np.array_repr(actual_unfolded))
self.assertAllClose(actual_folded, actual_unfolded)
def testSeparableConv2DLayerFoldedBatchNormFProp(self):
actual_unfolded = self._evalConvLayerFProp(
batch_norm=True,
bn_fold_weights=False,
params_builder=layers.SeparableConv2DLayer.Params)
actual_folded = self._evalConvLayerFProp(
batch_norm=True,
bn_fold_weights=True,
params_builder=layers.SeparableConv2DLayer.Params)
print('testSeparableConvLayerFoldedBatchNormFProp folded = ',
np.array_repr(actual_folded))
print('testSeparableConvLayerFoldedBatchNormFProp unfolded = ',
np.array_repr(actual_unfolded))
self.assertAllClose(actual_folded, actual_unfolded)
def testConvLayerFoldedBatchNormFPropEval(self):
actual_unfolded = self._evalConvLayerFProp(
batch_norm=True, bn_fold_weights=False, is_eval=True)
actual_folded = self._evalConvLayerFProp(
batch_norm=True, bn_fold_weights=True, is_eval=True)
print('testConvLayerFoldedBatchNormFPropEval folded = ',
np.array_repr(actual_folded))
print('testConvLayerFoldedBatchNormFPropEval unfolded = ',
np.array_repr(actual_unfolded))
self.assertAllClose(actual_folded, actual_unfolded)
def testConv2DLayerNoPadding(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(24332)
p = layers.Conv2DLayerNoPadding.Params().Set(
name='test', filter_shape=(3, 3, 3, 5), filter_stride=(2, 2))
l = p.Instantiate()
x = tf.random_normal(shape=[17, 64, 64, 3])
y = l.FPropDefaultTheta(x)
with self.session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
y_val = sess.run(y)
self.assertEqual(y_val.shape, (17, 32, 32, 5))
def testConvLayerFoldedBatchNormFPropQuantized(self):
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output = [
[[[ 0.36997819, 0.91361964],
[ 0.07550576, 0. ]],
[[ 0.35487702, 0. ],
[ 1.92539668, 0. ]]],
[[[ 0.27937129, 0. ],
[ 0. , 0. ]],
[[ 0. , 0.86831617],
[ 0. , 1.59317136]]]]
# pyformat: enable
# pylint: enable=bad-whitespace
actual_folded = self._evalConvLayerFProp(
batch_norm=True, bn_fold_weights=True, quantized=True)
print('testConvLayerFoldedBatchNormFPropQuantized folded = ',
np.array_repr(actual_folded))
self.assertAllClose(actual_folded, expected_output)
def testCausalConvLayerFProp(self):
with self.session(use_gpu=True) as sess:
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.ConvLayer.Params()
params.name = 'conv'
params.filter_shape = [2, 1, 3, 2]
params.filter_stride = [1, 1]
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
params.causal_convolution = True
params.activation = 'NONE'
params.batch_norm = False
conv_layer = layers.ConvLayer(params)
in_padding1 = tf.zeros([2, 4], dtype=tf.float32)
inputs1 = tf.constant(
np.random.normal(0.1, 0.5, [2, 4, 3, 3]), dtype=tf.float32)
# Change the input for the last two steps.
inputs2 = tf.concat([inputs1[:, :2, :, :], inputs1[:, 2:, :, :] + 0.5], 1)
output1, _ = conv_layer.FPropDefaultTheta(inputs1, in_padding1)
output2, _ = conv_layer.FPropDefaultTheta(inputs2, in_padding1)
tf.global_variables_initializer().run()
v1, v2 = sess.run([output1, output2])
tf.logging.info('CausalConv output: %s', np.array_repr(v1))
# pylint: disable=bad-whitespace,bad-continuation,line-too-long
self.assertAllClose(v1, [
[[[-0.01093466, 0.00369835],
[ 0.03474921, 0.01418608],
[ 0.01887876, -0.00763734]],
[[-0.06922598, -0.04526342],
[-0.02428233, 0.02042499],
[-0.04504267, -0.01260209]],
[[-0.14253227, -0.11353028],
[-0.09067881, 0.03742362],
[ 0.01281691, 0.00644186]],
[[-0.06524619, -0.0555004 ],
[-0.18850081, -0.05325979],
[ 0.04960757, 0.05512709]]],
[[[-0.01077277, 0.03013588],
[ 0.00325067, -0.0223705 ],
[-0.00895232, 0.03310337]],
[[ 0.03113075, -0.02388876],
[ 0.03238059, 0.00590346],
[ 0.12839797, -0.02194144]],
[[-0.09115655, -0.06798521],
[-0.09801255, -0.01440183],
[-0.04321899, 0.00340509]],
[[-0.089603 , -0.07257183],
[-0.04469771, -0.0389927 ],
[-0.01747611, 0.00903451]]]
]) # pyformat: disable
# pylint: enable=bad-whitespace,bad-continuation,line-too-long
self.assertAllClose(v1[:, :2, :, :], v2[:, :2, :, :])
with self.assertRaises(AssertionError):
self.assertAllClose(v1[:, 2:, :, :], v2[:, 2:, :, :])
def testConvLayerBackProp(self):
with self.session(use_gpu=True) as sess:
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.ConvLayer.Params()
params.name = 'conv'
params.filter_shape = [3, 3, 3, 2]
params.filter_stride = [2, 2]
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
conv_layer = layers.ConvLayer(params)
in_padding1 = tf.zeros([2, 4], dtype=tf.float32)
inputs1 = tf.constant(
np.random.normal(0.1, 0.5, [2, 4, 4, 3]), dtype=tf.float32)
output1, _ = conv_layer.FPropDefaultTheta(inputs1, in_padding1)
loss = tf.reduce_sum(output1)
all_vars = tf.trainable_variables()
self.assertEqual(3, len(all_vars))
grads = tf.gradients(loss, all_vars)
tf.global_variables_initializer().run()
sym_grads = [sg.eval() for sg in grads]
num_grads = [
test_utils.ComputeNumericGradient(sess, loss, v) for v in all_vars
]
for sg, ng in zip(sym_grads, num_grads):
self.assertAllClose(sg, ng, rtol=1e-02, atol=1e-02)
def testConvLayerFPropTanh(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.ConvLayer.Params()
params.activation = 'TANH'
params.name = 'conv'
params.filter_shape = [3, 3, 3, 2]
params.filter_stride = [2, 2]
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
conv_layer = layers.ConvLayer(params)
in_padding1 = tf.zeros([2, 4], dtype=tf.float32)
inputs1 = tf.constant(
np.random.normal(0.1, 0.5, [2, 4, 4, 3]), dtype=tf.float32)
output1, _ = conv_layer.FPropDefaultTheta(inputs1, in_padding1)
tf.global_variables_initializer().run()
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output1 = [
[[[ 0.35109526, 0.72346997],
[ 0.0751792 , -0.84315312]],
[[ 0.33594984, -0.18976833],
[ 0.95773894, -0.28015777]]],
[[[ 0.27572086, -0.26577294],
[-0.38503852, -0.88501388]],
[[-0.92332661, 0.69921255],
[-0.75103623, 0.9219743 ]]]]
# pyformat: enable
# pylint: enable=bad-whitespace
actual = output1.eval()
print(['actual = ', actual])
self.assertAllClose(expected_output1, actual)
def testConvSetLayerConstruction(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.ConvSetLayer.Params()
params.name = 'conv_set'
params.filter_shapes = [[3, 3, 3, 32], [8, 5, 3, 64]]
params.cnn_tpl.filter_stride = [2, 2]
params.cnn_tpl.params_init = py_utils.WeightInit.Gaussian(0.1)
params.cnn_tpl.is_eval = False
layers.ConvSetLayer(params)
def _evalConvSetLayerFProp(self,
batch_norm=True,
bn_fold_weights=False,
weight_norm=False,
bias=False,
activation='RELU',
conv_last=False,
strides=(2, 2),
dilation_rate=(1, 1),
quantized=False,
dump_graphdef=False):
self._ClearCachedSession()
ops.reset_default_graph()
with self.session(use_gpu=True) as sess:
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.ConvSetLayer.Params()
params.name = 'conv_set'
params.filter_shapes = [[2, 2, 6, 1], [3, 5, 6, 3]]
params.cnn_tpl.filter_stride = strides
params.cnn_tpl.dilation_rate = dilation_rate
params.cnn_tpl.params_init = py_utils.WeightInit.Gaussian(0.1)
params.cnn_tpl.conv_last = conv_last
params.cnn_tpl.batch_norm = batch_norm
params.cnn_tpl.bn_fold_weights = bn_fold_weights
params.cnn_tpl.weight_norm = weight_norm
params.cnn_tpl.bias = bias
params.cnn_tpl.activation = activation
params.cnn_tpl.is_eval = False
if quantized:
params.qdomain.default = quant_utils.PassiveAsymQDomain.Params()
conv_set_layer = layers.ConvSetLayer(params)
in_padding1 = tf.zeros([2, 4], dtype=tf.float32)
inputs1 = tf.constant(
np.random.normal(0.1, 0.5, [2, 4, 4, 6]), dtype=tf.float32)
output1, _ = conv_set_layer.FPropDefaultTheta(inputs1, in_padding1)
tf.global_variables_initializer().run()
if dump_graphdef:
print('ConvSet GraphDef:', sess.graph.as_graph_def())
assert False, 'Disable "dump_graphdef" before submit'
return output1.eval()
def testConvSetLayerFProp(self):
# pyformat: disable
# pylint: disable=bad-whitespace,bad-continuation
expected_output1 = [
[[[ 1.04307961, 0. , 1.27613628, 0. ],
[ 0. , 0. , 0. , 1.21081829 ]],
[[ 0. , 0.18475296, 0. , 0. ],
[ 1.34087086 , 2.2726357 , 0. , 0. ]]],
[[[ 0. , 0.25231963, 0. , 0. ],
[ 1.13677704 , 0. , 0.996117 , 1.836285 ]],
[[ 0. , 0. , 1.04101253, 0. ],
[ 0.12628449 , 0.37599814, 0.3134549 , 0.51208746 ]]]
]
# pyformat: enable
# pylint: enable=bad-whitespace,bad-continuation
actual = self._evalConvSetLayerFProp()
print(['actual = ', np.array_repr(actual)])
self.assertAllClose(expected_output1, actual)
def testConvSetLayerFPropQuantized(self):
# pyformat: disable
# pylint: disable=bad-whitespace,bad-continuation
expected_output1 = [
[[[ 1.04016984, 0. , 1.28103447, 0. ],
[ 0. , 0. , 0. , 1.20986581]],
[[ 0. , 0.18681753, 0. , 0. ],
[ 1.35328221, 2.26849842, 0. , 0. ]]],
[[[ 0. , 0.24909003, 0. , 0. ],
[ 1.14100266, 0. , 0.98746401, 1.83259094]],
[[ 0. , 0. , 1.04084051, 0. ],
[ 0.12736773, 0.38253111, 0.32025862, 0.5159722 ]]]]
# pyformat: enable
# pylint: enable=bad-whitespace,bad-continuation
actual = self._evalConvSetLayerFProp(bn_fold_weights=True, quantized=True)
# Note that we don't have many ways to verify in a unit test that the
# quant nodes were added properly; however, if their placement changes,
# it will very likely perturb the golden values above. If digging deeper,
# add 'dump_graphdef=True' to the above call and inspect the graphdef:
# There should be one layer of fake_quant* nodes before the ConcatV2.
print('actual = ', np.array_repr(actual))
self.assertAllClose(expected_output1, actual)
# TODO(yonghui): more test for convolution layer
class PoolingLayerTest(test_utils.TestCase):
def testPoolLayerFProp(self):
with self.session(use_gpu=True):
params = layers.PoolingLayer.Params()
params.name = 'pool'
params.window_shape = [3, 3]
params.window_stride = [1, 2]
params.is_eval = False
pool_layer = layers.PoolingLayer(params)
in_padding1 = tf.zeros([2, 4], dtype=tf.float32)
inputs1 = tf.constant(
np.arange(96, dtype='float32').reshape([2, 4, 4, 3]),
dtype=tf.float32)
output1, _ = pool_layer.FPropDefaultTheta(inputs1, in_padding1)
tf.global_variables_initializer().run()
print([np.array_repr(output1.eval())])
# pyformat: disable
expected_output1 = [
[[[18., 19., 20.],
[21., 22., 23.]],
[[30., 31., 32.],
[33., 34., 35.]],
[[42., 43., 44.],
[45., 46., 47.]],
[[42., 43., 44.],
[45., 46., 47.]]],
[[[66., 67., 68.],
[69., 70., 71.]],
[[78., 79., 80.],
[81., 82., 83.]],
[[90., 91., 92.],
[93., 94., 95.]],
[[90., 91., 92.],
[93., 94., 95.]]]]
# pyformat: enable
self.assertAllClose(expected_output1, output1.eval())
def testPoolLayerMoreShapes(self):
with self.session(use_gpu=True):
for window_shape, window_stride in [
[[3, 3], [1, 2]],
[[2, 2], [1, 2]],
[[3, 4], [1, 3]],
]:
params = layers.PoolingLayer.Params()
params.name = 'pool'
params.window_shape = window_shape
params.window_stride = window_stride
params.is_eval = False
pool_layer = layers.PoolingLayer(params)
in_padding1 = tf.zeros([2, 4], dtype=tf.float32)
inputs1 = tf.constant(
np.arange(96, dtype='float32').reshape([2, 4, 4, 3]),
dtype=tf.float32)
output1, _ = pool_layer.FPropDefaultTheta(inputs1, in_padding1)
output2 = tf.nn.max_pool(inputs1, [1] + params.window_shape + [1],
[1] + params.window_stride + [1], 'SAME')
predicted_out_shape = pool_layer.OutShape(inputs1.shape.as_list())
tf.global_variables_initializer().run()
output1_v = output1.eval()
self.assertAllClose(output2.eval(), output1_v)
self.assertAllClose(predicted_out_shape, output1_v.shape)
class ProjectionLayerTest(test_utils.TestCase):
def testProjectionLayerConstruction(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.ProjectionLayer.Params()
params.name = 'proj'
params.input_dim = 2
params.output_dim = 3
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
layers.ProjectionLayer(params)
proj_vars = tf.get_collection('ProjectionLayer_vars')
proj_var_names = [x.name for x in proj_vars]
self.assertEqual(['proj/w/var:0'], proj_var_names)
bn_vars = tf.get_collection('BatchNormLayer_vars')
bn_var_names = [x.name for x in bn_vars]
expected_var_names = [
'proj/beta/var:0', 'proj/gamma/var:0', 'proj/moving_mean/var:0',
'proj/moving_variance/var:0'
]
self.assertEqual(expected_var_names, bn_var_names)
def _evalProjectionLayer(self,
reshape_to_2d=False,
batch_norm=True,
weight_norm=False,
activation='RELU',
affine_last=False,
input_dim=3,
output_dim=2,
quantized=False,
has_bias=False,
bn_fold_weights=None,
expect_bn_fold_weights=None,
is_eval=False,
layer_callback=None):
self._ClearCachedSession()
tf.reset_default_graph()
with self.session(use_gpu=True) as sess:
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.ProjectionLayer.Params()
params.name = 'proj'
params.input_dim = input_dim
params.output_dim = output_dim
params.has_bias = has_bias
if has_bias:
params.bias_init = 5.0
params.activation = activation
params.batch_norm = batch_norm
params.weight_norm = weight_norm
params.affine_last = affine_last
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.bn_fold_weights = bn_fold_weights
if quantized:
cc_schedule = quant_utils.FakeQuantizationSchedule.Params().Set(
clip_end_step=1, quant_start_step=1)
qdomain_default = quant_utils.SymmetricScheduledClipQDomain.Params(
).Set(cc_schedule=cc_schedule.Copy())
params.qdomain.default = qdomain_default.Copy()
params.is_eval = is_eval
in_padding = tf.zeros([2, 4, 1], dtype=tf.float32)
inputs = tf.constant(
np.random.normal(0.1, 0.5, [2, 4, 3]), dtype=tf.float32)
if reshape_to_2d:
in_padding = tf.reshape(in_padding, [-1, 1])
inputs = tf.reshape(inputs, [-1, 3])
proj_layer = layers.ProjectionLayer(params)
if layer_callback:
layer_callback(proj_layer)
if expect_bn_fold_weights is not None:
self.assertEqual(expect_bn_fold_weights, proj_layer._is_bn_folded)
output = proj_layer.FPropDefaultTheta(inputs, in_padding)
tf.global_variables_initializer().run()
if quantized:
# Put it in the fully quantized range.
sess.run(tf.assign(py_utils.GetOrCreateGlobalStepVar(), 5))
return output.eval()
def testProjectionLayerFProp(self):
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [
[[ 0. , 0.33779466],
[ 0.4527415 , 0.99911398],
[ 0.44320837, 0. ],
[ 0. , 0.04557215]],
[[ 0.69273949, 0. ],
[ 0.30908319, 0. ],
[ 0. , 0. ],
[ 0. , 1.54578114]]]
# pyformat: enable
# pylint: enable=bad-whitespace
for reshape_to_2d in (False, True):
actual = self._evalProjectionLayer(
reshape_to_2d=reshape_to_2d, expect_bn_fold_weights=False)
if reshape_to_2d:
expected_output = np.reshape(np.array(expected_output), (-1, 2))
tf.logging.info('expected = %s', expected_output)
tf.logging.info('actual = %s', np.array_repr(actual))
self.assertAllClose(expected_output, actual)
def testProjectionLayerFPropWithBias(self):
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [
[[ 4.98987579, 5.03493643],
[ 5.01192808, 5.0917592 ],
[ 5.01156807, 4.99741936],
[ 4.96849394, 5.00982761]],
[[ 5.02098131, 4.98014927],
[ 5.00650883, 4.87676954],
[ 4.98995209, 4.91770315],
[ 4.95948696, 5.138731 ]]]
# pyformat: enable
# pylint: enable=bad-whitespace
# Tested without batch_norm because batch_norm will mostly cancel out the
# affect of bias.
actual = self._evalProjectionLayer(
has_bias=True,
batch_norm=False,
expect_bn_fold_weights=False,
activation='RELU6')
tf.logging.info('expected = %s', expected_output)
tf.logging.info('actual = %s', np.array_repr(actual))
self.assertAllClose(expected_output, actual)
def testProjectionLayerExplicitFolding(self):
unfolded = self._evalProjectionLayer(
bn_fold_weights=False, expect_bn_fold_weights=False)
folded = self._evalProjectionLayer(
bn_fold_weights=True, expect_bn_fold_weights=True)
tf.logging.info('unfolded = %s', np.array_repr(unfolded))
tf.logging.info('folded = %s', np.array_repr(folded))
self.assertAllClose(folded, unfolded)
def testProjectionLayerExplicitFoldingEval(self):
unfolded = self._evalProjectionLayer(
bn_fold_weights=False, expect_bn_fold_weights=False, is_eval=True)
folded = self._evalProjectionLayer(
bn_fold_weights=True, expect_bn_fold_weights=True, is_eval=True)
tf.logging.info('unfolded = %s', np.array_repr(unfolded))
tf.logging.info('folded = %s', np.array_repr(folded))
self.assertAllClose(folded, unfolded)
def testProjectionLayerExplicitFoldingNoBatchNorm(self):
unfolded = self._evalProjectionLayer(
batch_norm=False, bn_fold_weights=False, expect_bn_fold_weights=False)
# Note that weight folding will report as disabled because batch norm is
# disabled.
folded = self._evalProjectionLayer(
batch_norm=False, bn_fold_weights=True, expect_bn_fold_weights=False)
tf.logging.info('unfolded = %s', np.array_repr(unfolded))
tf.logging.info('folded = %s', np.array_repr(folded))
self.assertAllClose(folded, unfolded)
def testProjectionLayerExplicitFoldingWithWeightNorm(self):
unfolded = self._evalProjectionLayer(
weight_norm=True, bn_fold_weights=False, expect_bn_fold_weights=False)
folded = self._evalProjectionLayer(
weight_norm=True, bn_fold_weights=True, expect_bn_fold_weights=True)
tf.logging.info('unfolded = %s', np.array_repr(unfolded))
tf.logging.info('folded = %s', np.array_repr(folded))
self.assertAllClose(folded, unfolded)
def testProjectionLayerWeightNorm(self):
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [
[[ 0. , 0.36285588],
[ 0.82909501, 1.07323885],
[ 0.81163716, 0. ],
[ 0. , 0.04895319]],
[[ 1.26859784, 0. ],
[ 0.56601691, 0. ],
[ 0. , 0. ],
[ 0. , 1.66046333]]]
# pyformat: enable
# pylint: enable=bad-whitespace
for reshape_to_2d in (False, True):
actual = self._evalProjectionLayer(
reshape_to_2d=reshape_to_2d, weight_norm=True)
if reshape_to_2d:
expected_output = np.reshape(np.array(expected_output), (-1, 2))
tf.logging.info('expected = %s', expected_output)
tf.logging.info('actual = %s', np.array_repr(actual))
self.assertAllClose(expected_output, actual)
def testProjectionLayerAffineFirstVsLastFProp(self):
"""Compare results of affine first vs. last."""
# ... with batch_norm and activation disabled.
self.assertAllClose(
self._evalProjectionLayer(
batch_norm=False, activation='NONE', affine_last=False),
self._evalProjectionLayer(
batch_norm=False, activation='NONE', affine_last=True))
def testProjectionLayerAffineLastFProp(self):
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output1 = [
[[ 0. , 0. ],
[ 0.03410175, 0.04741348],
[ 0.02665393, -0.02072855],
[-0.01116518, -0.06280501]],
[[ 0.04615254, -0.03589247],
[-0.00376316, -0.0464084 ],
[-0.01111402, -0.13706152],
[-0.02596203, 0.16340451]]]
# pyformat: enable
# pylint: enable=bad-whitespace
actual = self._evalProjectionLayer(affine_last=True)
print(['actual = ', np.array_repr(actual)])
self.assertAllClose(expected_output1, actual)
def testProjectionLayerBackProp(self):
with self.session(use_gpu=True) as sess:
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.ProjectionLayer.Params()
params.name = 'proj'
params.dtype = tf.float64
params.input_dim = 3
params.output_dim = 2
params.params_init = py_utils.WeightInit.Gaussian(0.01)
params.is_eval = False
proj_layer = layers.ProjectionLayer(params)
in_padding1 = tf.zeros([2, 4, 1], dtype=tf.float64)
inputs1 = tf.constant(
np.random.normal(0.1, 0.5, [2, 4, 3]), dtype=tf.float64)
output1 = proj_layer.FPropDefaultTheta(inputs1, in_padding1)
loss = tf.reduce_sum(output1)
all_vars = tf.trainable_variables()
self.assertEqual(3, len(all_vars))
grads = tf.gradients(loss, all_vars)
tf.global_variables_initializer().run()
sym_grads = [sg.eval() for sg in grads]
num_grads = [
test_utils.ComputeNumericGradient(sess, loss, v, 1e-6)
for v in all_vars
]
for sg, ng in zip(sym_grads, num_grads):
self.assertAllClose(sg, ng, rtol=1e-06, atol=1e-06)
def testProjectionLayerFPropQuantizedWithUnfusedActivation(self):
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [
[[-0.1328125, 0.3125 ],
[ 0.421875 , 0.734375 ],
[ 0.421875 , -0.109375 ],
[-0.6015625, 0.0078125]],
[[ 0.6015625, -0.3046875],
[ 0.3046875, -0.7578125],
[-0.125 , -0.7578125],
[-0.734375 , 0.7578125]]]
# pyformat: enable
# pylint: enable=bad-whitespace
def CheckLayer(proj_layer):
# Should not error because this qtensor is defined.
proj_layer.QTensor('activation', tf.convert_to_tensor(0.))
# The intermediate tensor should be defined.
proj_layer.QTensor('affine_matmul', tf.convert_to_tensor(0.))
# When quantization enabled, batchnorm folding should auto enable.
# TANH is unfused.
actual = self._evalProjectionLayer(
activation='TANH',
quantized=True,
expect_bn_fold_weights=True,
layer_callback=CheckLayer)
tf.logging.info('expected = %s', expected_output)
tf.logging.info('actual = %s', np.array_repr(actual))
self.assertAllClose(expected_output, actual)
def testProjectionLayerFPropQuantizedWithFusedActivation(self):
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [
[[ 0. , 0.3203125],
[ 0.453125 , 0.9375 ],
[ 0.4453125, 0. ],
[ 0. , 0.0078125]],
[[ 0.6953125, 0. ],
[ 0.3125 , 0. ],
[ 0. , 0. ],
[ 0. , 0.9921875]]]
# pyformat: enable
# pylint: enable=bad-whitespace
def CheckLayer(proj_layer):
# Should not error because this qtensor is defined.
proj_layer.QTensor('activation', tf.convert_to_tensor(0.))
with self.assertRaises(AssertionError):
# The intermediate tensor should *not* be quantized.
proj_layer.QTensor('affine_matmul', tf.convert_to_tensor(0.))
# When quantization enabled, batchnorm folding should auto enable.
# RELU6 is fused.
actual = self._evalProjectionLayer(
activation='RELU6',
quantized=True,
expect_bn_fold_weights=True,
layer_callback=CheckLayer)
tf.logging.info('expected = %s', expected_output)
tf.logging.info('actual = %s', np.array_repr(actual))
self.assertAllClose(expected_output, actual)
def testProjectionLayerFPropQuantizedOnlyMatmul(self):
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [
[[-0.0078125, 0.0390625],
[ 0.0078125, 0.09375 ],
[ 0.0078125, 0. ],
[-0.03125 , 0.015625 ]],
[[ 0.015625 , -0.015625 ],
[ 0.0078125, -0.125 ],
[-0.0078125, -0.078125 ],
[-0.0390625, 0.1484375]]]
# pyformat: enable
# pylint: enable=bad-whitespace
def CheckLayer(proj_layer):
# Should not error because this qtensor is defined.
proj_layer.QTensor('affine_matmul', tf.convert_to_tensor(0.))
actual = self._evalProjectionLayer(
activation='NONE',
quantized=True,
batch_norm=False,
expect_bn_fold_weights=False,
layer_callback=CheckLayer)
tf.logging.info('expected = %s', expected_output)
tf.logging.info('actual = %s', np.array_repr(actual))
self.assertAllClose(expected_output, actual)
def testProjectionLayerFPropQuantizedOnlyMatmulBias(self):
# pylint: disable=bad-whitespace
# pyformat: disable
# Saturated because of the out of range bias.
expected_output = [[[0.9921875, 0.9921875], [0.9921875, 0.9921875],
[0.9921875, 0.9921875], [0.9921875, 0.9921875]],
[[0.9921875, 0.9921875], [0.9921875, 0.9921875],
[0.9921875, 0.9921875], [0.9921875, 0.9921875]]]
# pyformat: enable
# pylint: enable=bad-whitespace
def CheckLayer(proj_layer):
# Should not error because this qtensor is defined.
proj_layer.QTensor('affine_matmul', tf.convert_to_tensor(0.))
actual = self._evalProjectionLayer(
activation='NONE',
quantized=True,
has_bias=True,
batch_norm=False,
expect_bn_fold_weights=False,
layer_callback=CheckLayer)
tf.logging.info('expected = %s', expected_output)
tf.logging.info('actual = %s', np.array_repr(actual))
self.assertAllClose(expected_output, actual)
def testFCLayerConstruction(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.FCLayer.Params()
params.name = 'fc'
params.input_dim = 2
params.output_dim = 3
params.params_init = py_utils.WeightInit.Gaussian(0.1)
layers.FCLayer(params)
proj_vars = tf.get_collection('FCLayer_vars')
proj_var_names = [x.name for x in proj_vars]
expected_var_names = ['fc/w/var:0', 'fc/b/var:0']
self.assertEqual(expected_var_names, proj_var_names)
def testFCLayerFProp(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.FCLayer.Params()
params.name = 'fc'
params.input_dim = 3
params.output_dim = 2
params.params_init = py_utils.WeightInit.Gaussian(0.1)
proj_layer = layers.FCLayer(params)
inputs = tf.constant(
np.random.normal(0.1, 0.5, [2, 4, 3]), dtype=tf.float32)
output = proj_layer.FPropDefaultTheta(inputs)
tf.global_variables_initializer().run()
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [
[[ 0. , 0.04883499],
[ 0.17094055, 0. ],
[ 0.09287541, 0. ],
[ 0. , 0.19471419]],
[[ 0.15290432, 0. ],
[ 0. , 0. ],
[ 0. , 0.10548697],
[ 0. , 0.22610095]]]
# pyformat: enable
# pylint: enable=bad-whitespace
actual = output.eval()
print(['actual = ', np.array_repr(actual)])
self.assertAllClose(expected_output, actual)
def testFCLayerBackProp(self):
with self.session(use_gpu=True) as sess:
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.FCLayer.Params()
params.name = 'fc'
params.dtype = tf.float64
params.input_dim = 3
params.output_dim = 2
params.params_init = py_utils.WeightInit.Gaussian(0.01)
proj_layer = layers.FCLayer(params)
inputs = tf.constant(
np.random.normal(0.1, 0.5, [2, 4, 3]), dtype=tf.float64)
output = proj_layer.FPropDefaultTheta(inputs)
loss = tf.reduce_sum(output)
all_vars = tf.trainable_variables()
self.assertEqual(2, len(all_vars))
grads = tf.gradients(loss, all_vars)
tf.global_variables_initializer().run()
sym_grads = [sg.eval() for sg in grads]
num_grads = [
test_utils.ComputeNumericGradient(sess, loss, v, 1e-6)
for v in all_vars
]
for sg, ng in zip(sym_grads, num_grads):
self.assertAllClose(sg, ng, rtol=1e-06, atol=1e-06)
def testStackingOverTimeFProp(self):
with self.session(use_gpu=True):
params = layers.StackingOverTime.Params()
params.name = 'stackingOverTime'
params.left_context = 2
params.right_context = 0
params.stride = 2
stacker = layers.StackingOverTime(params)
self.assertEqual(stacker.window_size, 3)
inputs = tf.constant(
[
[[1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6]], # batch 0
[[7, 7], [8, 8], [0, 0], [0, 0], [0, 0], [0, 0]]
], # batch 1
dtype=tf.float32)
paddings = tf.constant(
[
[[0], [0], [0], [0], [0], [0]], # batch 0
[[0], [0], [1], [1], [1], [1]]
], # batch 1
dtype=tf.float32)
outputs, output_paddings = stacker.FProp(inputs, paddings)
tf.global_variables_initializer().run()
print([np.array_repr(outputs.eval())])
expected_outputs = [
[[0, 0, 0, 0, 1, 1], [1, 1, 2, 2, 3, 3], [3, 3, 4, 4, 5,
5]], # batch 0
[[0, 0, 0, 0, 7, 7], [7, 7, 8, 8, 0, 0], [0, 0, 0, 0, 0,
0]] # batch 1
]
self.assertAllClose(expected_outputs, outputs.eval())
expected_output_paddings = [
[[0], [0], [0]], # batch 0
[[0], [0], [1]] # batch 1
]
self.assertAllClose(expected_output_paddings, output_paddings.eval())
def testStackingOverTimeFProp2(self):
with self.session(use_gpu=True) as sess:
params = layers.StackingOverTime.Params()
params.name = 'stackingOverTime'
params.left_context = 0
params.right_context = 1
params.stride = 2
stacker = layers.StackingOverTime(params)
self.assertEqual(stacker.window_size, 2)
inputs = tf.random_normal([2, 21, 16], seed=78123)
paddings = 1.0 - tf.sequence_mask([9, 14], 21, tf.float32)
paddings = tf.expand_dims(paddings, -1)
outputs, output_paddings = stacker.FProp(inputs, paddings)
tf.global_variables_initializer().run()
inputs_v, outputs_v, paddings_v = sess.run(
[inputs, outputs, output_paddings])
# length
self.assertAllEqual([5, 7], np.sum(1.0 - paddings_v, (1, 2)))
# input and output sums are equal
self.assertAllClose(np.sum(inputs_v, (1, 2)), np.sum(outputs_v, (1, 2)))
def testStackingOverTimeIdentityFProp(self):
with self.session(use_gpu=True):
params = layers.StackingOverTime.Params()
params.name = 'stackingOverTime'
params.left_context = 0
params.right_context = 0
params.stride = 1
stacker = layers.StackingOverTime(params)
self.assertEqual(stacker.window_size, 1)
inputs = tf.constant([[[1], [2], [3], [4], [5]]], dtype=tf.float32)
paddings = tf.zeros([1, 5, 1], dtype=tf.float32)
outputs, output_paddings = stacker.FProp(inputs, paddings)
tf.global_variables_initializer().run()
print([np.array_repr(outputs.eval())])
expected_outputs = [[[1], [2], [3], [4], [5]]]
self.assertAllClose(expected_outputs, outputs.eval())
expected_output_paddings = [[[0], [0], [0], [0], [0]]]
self.assertAllClose(expected_output_paddings, output_paddings.eval())
def _testUnstack(self, params, inputs):
with self.session(use_gpu=True) as sess:
stacker = params.Instantiate()
stacked, _ = stacker.FProp(inputs)
unstacked = stacker.Unstack(stacked)
inputs, stacked, unstacked = sess.run([inputs, stacked, unstacked])
expected_length = (
inputs.shape[1] - (inputs.shape[1] - 1) % stacker.params.stride)
self.assertAllClose(inputs[:, :expected_length, :], unstacked)
def testStackingOverTimeUnstack(self):
params = layers.StackingOverTime.Params()
params.name = 'stackingOverTime'
batch_size = 2
length = 7
depth = 3
inputs = tf.reshape(
tf.range(batch_size * length * depth), [batch_size, length, depth])
self._testUnstack(params.Set(left_context=2, stride=1), inputs)
self._testUnstack(params.Set(stride=2), inputs)
self._testUnstack(params.Set(stride=2, right_context=3), inputs)
self._testUnstack(params.Set(stride=3), inputs)
self._testUnstack(params.Set(stride=4, right_context=3), inputs)
class EmbeddingLayerTest(test_utils.TestCase):
def testEmbeddingLayer(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
params = layers.EmbeddingLayer.Params()
params.name = 'emb'
params.dtype = tf.float32
params.vocab_size = 80000
params.embedding_dim = 128
params.max_num_shards = 4
params.params_init = py_utils.WeightInit.Gaussian(0.01)
params.vn.global_vn = False
params.vn.per_step_vn = False
emb_layer = layers.EmbeddingLayer(params)
ids = tf.constant([[89], [100]])
embs = emb_layer.EmbLookupDefaultTheta(ids)
embs_sum = tf.reduce_sum(embs)
tf.global_variables_initializer().run()
test_utils.CompareToGoldenSingleFloat(self, 0.234941, embs_sum.eval())
def testCheckedIds(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
params = layers.EmbeddingLayer.Params()
params.name = 'emb'
params.dtype = tf.float32
params.vocab_size = 16
params.embedding_dim = 128
params.max_num_shards = 4
params.params_init = py_utils.WeightInit.Gaussian(0.01)
params.vn.global_vn = False
params.vn.per_step_vn = False
emb_layer = layers.EmbeddingLayer(params)
neg_ids = tf.constant([[-1]])
neg_embs = emb_layer.EmbLookupDefaultTheta(neg_ids)
oov_ids = tf.constant([[params.vocab_size]])
oov_embs = emb_layer.EmbLookupDefaultTheta(oov_ids)
tf.global_variables_initializer().run()
with self.assertRaises(tf.errors.InvalidArgumentError):
neg_embs.eval()
with self.assertRaises(tf.errors.InvalidArgumentError):
oov_embs.eval()
def testEmbeddingLayerScaling(self):
with self.session(use_gpu=True) as sess:
tf.set_random_seed(398847392)
params = layers.EmbeddingLayer.Params()
params.name = 'emb'
params.dtype = tf.float32
params.vocab_size = 80000
params.embedding_dim = 128
params.max_num_shards = 4
params.params_init = py_utils.WeightInit.Gaussian(0.01)
params.vn.global_vn = False
params.vn.per_step_vn = False
params.scale_sqrt_depth = True
emb_layer = layers.EmbeddingLayer(params)
ids = tf.constant([[89], [100]])
embs = emb_layer.EmbLookupDefaultTheta(ids)
embs_sum = tf.reduce_sum(embs)
tf.global_variables_initializer().run()
self.assertAllClose(0.23494134843349457 * params.embedding_dim**0.5,
sess.run(embs_sum))
def testEmbeddingLayerWithVN(self):
with self.session(use_gpu=True):
tf.set_random_seed(398847392)
params = layers.EmbeddingLayer.Params()
params.name = 'emb'
params.dtype = tf.float32
params.vocab_size = 80000
params.embedding_dim = 128
params.max_num_shards = 4
params.params_init = py_utils.WeightInit.Gaussian(0.01, seed=398847392)
params.vn.global_vn = True
params.vn.per_step_vn = False
params.vn.scale = 0.5
params.vn.seed = 398847392
emb_layer = layers.EmbeddingLayer(params)
self.assertEqual(len(emb_layer.vars.Flatten()), 4)
ids = tf.constant([[89], [100]])
embs = emb_layer.EmbLookupDefaultTheta(ids)
embs_sum = tf.reduce_sum(embs)
tf.global_variables_initializer().run()
test_utils.CompareToGoldenSingleFloat(self, -6.807296, embs_sum.eval())
def _testSimpleEmbeddingLayer(self, use_matmul, use_3d_weight_tensor,
fprop_mode):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(398847392)
params = layers.SimpleEmbeddingLayer.Params()
params.name = 'emb'
params.dtype = tf.float32
params.vocab_size = 8000
params.embedding_dim = 128
params.use_matmul = use_matmul
params.fprop_mode = fprop_mode
params.use_3d_weight_tensor = use_3d_weight_tensor
params.params_init = py_utils.WeightInit.Gaussian(0.01)
params.vn.global_vn = False
params.vn.per_step_vn = False
emb_layer = layers.SimpleEmbeddingLayer(params)
expected_fprop_mode = fprop_mode
if expected_fprop_mode is None:
expected_fprop_mode = 'matmul' if use_matmul else 'gather'
self.assertEqual(emb_layer._fprop_mode, expected_fprop_mode)
emb_matrix = emb_layer.vars.wm
ids = tf.constant([[89], [100]])
outputs = emb_layer.EmbLookupDefaultTheta(ids)
fast_outputs = emb_layer.EmbLookupDefaultThetaOnCpu(ids)
with self.session(use_gpu=True, graph=g) as sess:
tf.global_variables_initializer().run()
emb_matrix_val, ids_val, outputs_val, fast_outputs_val = sess.run(
[emb_matrix, ids, outputs, fast_outputs])
self.assertEqual(emb_matrix_val.shape, (8000, 128))
self.assertEqual(ids_val.shape, (2, 1))
self.assertEqual(outputs_val.shape, (2, 1, 128))
self.assertAllClose(emb_matrix_val[89, :], outputs_val[0, 0, :])
self.assertAllClose(emb_matrix_val[100, :], outputs_val[1, 0, :])
self.assertEqual(fast_outputs_val.shape, (2, 1, 128))
self.assertAllClose(emb_matrix_val[89, :], fast_outputs_val[0, 0, :])
self.assertAllClose(emb_matrix_val[100, :], fast_outputs_val[1, 0, :])
def testSimpleEmbeddingLayerForLoop(self):
self._testSimpleEmbeddingLayer(False, True, None)
def testSimpleEmbeddingLayerForLoop2D(self):
self._testSimpleEmbeddingLayer(False, False, None)
def testSimpleEmbeddingLayerMatmul(self):
self._testSimpleEmbeddingLayer(True, False, None)
def testSimpleEmbeddingLayerGather(self):
self._testSimpleEmbeddingLayer(False, False, 'gather')
def testSimpleEmbeddingLayerMasked(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(398847392)
params = layers.SimpleEmbeddingLayer.Params()
params.name = 'emd'
params.dtype = tf.float32
params.vocab_size = 10
params.embedding_dim = 5
params.fprop_mode = 'gather'
params.use_3d_weight_tensor = False
params.params_init = py_utils.WeightInit.Gaussian(0.01)
params.vn.global_vn = False
params.vn.per_step_vn = False
params.apply_pruning = True
emb_layer = layers.SimpleEmbeddingLayer(params)
emb_matrix = emb_layer.vars.wm
ids = tf.constant([[1], [2]])
outputs = emb_layer.EmbLookupDefaultTheta(ids)
self.assertTrue('wm' in emb_layer.vars.wm.name)
self.assertTrue('mask' in emb_layer.vars.mask.name)
self.assertTrue('threshold' in emb_layer.vars.threshold.name)
self.assertEqual(emb_layer.theta.wm.get_shape(), tf.TensorShape([10, 5]))
self.assertEqual(emb_layer.theta.mask.get_shape(), tf.TensorShape([10,
5]))
self.assertEqual(emb_layer.theta.threshold.get_shape(),
tf.TensorShape([]))
embedding_var_count = 1
wts = tf.get_collection('SimpleEmbeddingLayer_vars')
self.assertEqual(embedding_var_count, len(wts))
embedding_mask_count = 1
masks = tf.get_collection('masks')
self.assertEqual(embedding_mask_count, len(masks))
emebdding_threshold_count = 1
threshold = tf.get_collection('thresholds')
self.assertEqual(emebdding_threshold_count, len(threshold))
with self.session(use_gpu=False, graph=g) as sess:
tf.global_variables_initializer().run()
emb_matrix_val, _, outputs_val = sess.run([emb_matrix, ids, outputs])
self.assertAllClose(emb_matrix_val[1:3], outputs_val[:, 0, :])
def _testSimpleEmbeddingLayerGrad(self, use_matmul, use_3d_weight_tensor):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(398847392)
params = layers.SimpleEmbeddingLayer.Params()
params.name = 'emb'
params.dtype = tf.float32
params.vocab_size = 8000
params.embedding_dim = 128
params.use_matmul = use_matmul
params.use_3d_weight_tensor = use_3d_weight_tensor
params.params_init = py_utils.WeightInit.Gaussian(0.01)
params.vn.global_vn = False
params.vn.per_step_vn = False
emb_layer = layers.SimpleEmbeddingLayer(params)
ids = tf.constant([89, 100, 89, 89])
embs = emb_layer.EmbLookupDefaultTheta(ids) * tf.constant([[0.1], [0.2],
[0.3], [0.4]])
embs_sum = tf.reduce_sum(embs)
emb_weight = emb_layer.vars.wm
emb_grad, = tf.gradients(ys=[embs_sum], xs=[emb_weight])
with self.session(use_gpu=True, graph=g) as sess:
tf.global_variables_initializer().run()
emb_grad_val = sess.run(emb_grad)
if not use_matmul:
# tf.embedding_lookup's gradient is a sparse representation.
# For testing, we convert it to a dense representation.
o_grad_matrix = np.zeros((8000, 128))
for i in range(emb_grad_val.indices.shape[0]):
o_grad_matrix[emb_grad_val.indices[i], :] += emb_grad_val.values[i, :]
emb_grad_val = o_grad_matrix
expected_emb_grad = np.zeros(shape=(8000, 128))
expected_emb_grad[89, :] = 0.8
expected_emb_grad[100, :] = 0.2
self.assertAllClose(expected_emb_grad, emb_grad_val)
def testSimpleEmbeddingLayerGradForLoop(self):
self._testSimpleEmbeddingLayerGrad(False, True)
def testSimpleEmbeddingLayerGradForLoop2D(self):
self._testSimpleEmbeddingLayerGrad(False, False)
def testSimpleEmbeddingLayerGradMatmul(self):
self._testSimpleEmbeddingLayerGrad(True, False)
def testCompareEmbeddingLayers(self):
classes = 8000
dims = 128
g = tf.Graph()
with g.as_default():
ids = tf.placeholder(tf.int32)
def CreateSimple():
tf.set_random_seed(398847392)
p = layers.SimpleEmbeddingLayer.Params()
p.name = 'emb'
p.dtype = tf.float32
p.vocab_size = classes
p.embedding_dim = dims
p.params_init = py_utils.WeightInit.Gaussian(0.01)
p.vn.global_vn = False
p.vn.per_step_vn = False
return layers.SimpleEmbeddingLayer(p)
simple = CreateSimple()
simple_outs = simple.EmbLookupDefaultTheta(ids)
simple_grad = tf.gradients(simple_outs, simple.vars.wm)[0]
def CreateOriginal():
tf.set_random_seed(398847392)
p = layers.EmbeddingLayer.Params()
p.name = 'emb'
p.dtype = tf.float32
p.vocab_size = classes
p.embedding_dim = dims
p.max_num_shards = 1
p.params_init = py_utils.WeightInit.Gaussian(0.01)
p.vn.global_vn = False
p.vn.per_step_vn = False
return layers.EmbeddingLayer(p)
original = CreateOriginal()
weight = tf.identity(simple.vars.wm)
theta = py_utils.NestedMap()
theta.wm = [weight]
original_outs = original.EmbLookup(theta, ids)
original_grad = tf.gradients(original_outs, weight)[0]
ids_val = np.random.randint(0, high=classes, size=(4000,))
with self.session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
s_outs, s_grad, o_outs, o_grad = sess.run(
[simple_outs, simple_grad, original_outs, original_grad],
feed_dict={ids: ids_val})
self.assertAllClose(s_outs, o_outs)
self.assertAllClose(s_grad, o_grad)
def testPositionalEmbeddingLayer(self):
with self.session(use_gpu=False) as sess:
p = layers.PositionalEmbeddingLayer.Params()
p.name = 'position_emb'
p.min_timescale = 1
p.max_timescale = 7
p.embedding_dim = 4
seq_length = 11
pos_emb_layer = layers.PositionalEmbeddingLayer(p)
position_embs = pos_emb_layer.FPropDefaultTheta(seq_length)
actual_position_embs, = sess.run([position_embs])
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [
[ 0. , 0. , 1. , 1. ],
[ 0.84147096, 0.14237173, 0.54030228, 0.98981327],
[ 0.90929741, 0.28184283, -0.41614676, 0.95946062],
[ 0.14112 , 0.4155719 , -0.9899925 , 0.90956032],
[-0.7568025 , 0.54083425, -0.65364361, 0.84112918],
[-0.95892417, 0.65507787, 0.28366217, 0.75556135],
[-0.27941549, 0.75597537, 0.96017027, 0.65460002],
[ 0.65698659, 0.84147096, 0.7539022 , 0.54030228],
[ 0.98935831, 0.90982294, -0.14550003, 0.41499668],
[ 0.41211855, 0.9596386 , -0.91113025, 0.28123617],
[-0.54402113, 0.98990309, -0.83907151, 0.14174587]]
# pyformat: enable
# pylint: enable=bad-whitespace
print('expected_position_embs:', expected_output)
print('actual_position_embs:', actual_position_embs)
self.assertAllClose(actual_position_embs, expected_output)
def testPositionalEmbeddingLayerWithPosition(self):
with self.session(use_gpu=False) as sess:
p = layers.PositionalEmbeddingLayer.Params()
p.name = 'position_emb'
p.min_timescale = 1
p.max_timescale = 7
p.embedding_dim = 4
pos_tensor = tf.constant(
np.asarray([[0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3],
[0, 1, 2, 0, 1, 2, 3, 4, 0, 1, 0]]),
dtype=tf.int32)
pos_emb_layer = layers.PositionalEmbeddingLayer(p)
position_embs = pos_emb_layer.FPropWithPosition(pos_emb_layer.theta,
pos_tensor)
actual_position_embs, = sess.run([position_embs])
# pylint: disable=bad-whitespace,bad-continuation
# pyformat: disable
expected_output = [
[[ 0. , 0. , 1. , 1. ],
[ 0.84147096, 0.14237173, 0.54030228, 0.98981327],
[ 0.90929741, 0.28184283, -0.41614676, 0.95946062],
[ 0.14112 , 0.4155719 , -0.9899925 , 0.90956032],
[-0.7568025 , 0.54083425, -0.65364361, 0.84112918],
[-0.95892417, 0.65507787, 0.28366217, 0.75556135],
[-0.27941549, 0.75597537, 0.96017027, 0.65460002],
[ 0. , 0. , 1. , 1. ],
[ 0.84147096, 0.14237173, 0.54030228, 0.98981327],
[ 0.90929741, 0.28184283, -0.41614676, 0.95946062],
[ 0.14112 , 0.4155719 , -0.9899925 , 0.90956032]],
[[ 0. , 0. , 1. , 1. ],
[ 0.84147096, 0.14237173, 0.54030228, 0.98981327],
[ 0.90929741, 0.28184283, -0.41614676, 0.95946062],
[ 0. , 0. , 1. , 1. ],
[ 0.84147096, 0.14237173, 0.54030228, 0.98981327],
[ 0.90929741, 0.28184283, -0.41614676, 0.95946062],
[ 0.14112 , 0.4155719 , -0.9899925 , 0.90956032],
[-0.7568025 , 0.54083425, -0.65364361, 0.84112918],
[ 0. , 0. , 1. , 1. ],
[ 0.84147096, 0.14237173, 0.54030228, 0.98981327],
[ 0. , 0. , 1. , 1. ]]
]
# pyformat: enable
# pylint: enable=bad-whitespace,bad-continuation
print('expected_position_embs:', expected_output)
print('actual_position_embs:', actual_position_embs)
self.assertAllClose(actual_position_embs, expected_output)
def testPositionalEmbeddingLayerWithScaling(self):
with self.session(use_gpu=False) as sess:
p = layers.PositionalEmbeddingLayer.Params()
p.name = 'position_emb'
p.min_timescale = 1
p.max_timescale = 7
p.embedding_dim = 4
p.trainable_scaling = True
p.trainable_scaling_init = 1.0 / np.sqrt(p.embedding_dim)
seq_length = 11
pos_emb_layer = layers.PositionalEmbeddingLayer(p)
position_embs = pos_emb_layer.FPropDefaultTheta(seq_length)
tf.global_variables_initializer().run()
actual_position_embs, = sess.run([position_embs])
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [
[ 0. , 0. , 1. , 1. ],
[ 0.84147096, 0.14237173, 0.54030228, 0.98981327],
[ 0.90929741, 0.28184283, -0.41614676, 0.95946062],
[ 0.14112 , 0.4155719 , -0.9899925 , 0.90956032],
[-0.7568025 , 0.54083425, -0.65364361, 0.84112918],
[-0.95892417, 0.65507787, 0.28366217, 0.75556135],
[-0.27941549, 0.75597537, 0.96017027, 0.65460002],
[ 0.65698659, 0.84147096, 0.7539022 , 0.54030228],
[ 0.98935831, 0.90982294, -0.14550003, 0.41499668],
[ 0.41211855, 0.9596386 , -0.91113025, 0.28123617],
[-0.54402113, 0.98990309, -0.83907151, 0.14174587]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_output / np.sqrt(p.embedding_dim),
actual_position_embs)
class SoftmaxLayerTest(test_utils.TestCase):
def _RunSimpleFullSoftmax(self,
num_shards=1,
chunk_size=0,
inputs=None,
class_ids=None,
class_weights=None,
class_probabilities=None,
num_samples=0,
default_qdomain=None,
training_step=-1,
seed=None,
dtype=tf.float32,
fprop_dtype=None,
apply_pruning=False):
if fprop_dtype is None:
fprop_dtype = dtype
with self.session(use_gpu=True, graph=tf.Graph()) as sess:
if seed is not None:
tf.set_random_seed(seed)
if class_ids is None:
class_ids = tf.constant([[1], [5], [10]], dtype=tf.int32)
else:
class_ids = tf.constant(class_ids)
if class_weights is None:
class_weights = tf.constant([1.0, 0.4, 0.8], dtype=fprop_dtype)
else:
class_weights = tf.constant(class_weights)
np.random.seed(12345)
if inputs is None:
inputs = [tf.constant(np.random.rand(3, 10), dtype=fprop_dtype)]
else:
inputs = [tf.constant(inputs, dtype=fprop_dtype)]
params = layers.SimpleFullSoftmax.Params()
params.dtype = dtype
params.fprop_dtype = fprop_dtype
params.name = 'softmax'
params.input_dim = 10
params.num_classes = 32
params.num_shards = num_shards
params.chunk_size = chunk_size
params.apply_pruning = apply_pruning
params.params_init = py_utils.WeightInit.Gaussian(0.5, 123456)
params.random_seed = 12345678
if default_qdomain is not None:
params.qdomain.default = default_qdomain
if num_samples > 0:
# Turn on sampled soft-max; the asserts need to hold for it to be used.
params.num_sampled = num_samples
assert class_probabilities is None
assert chunk_size == 0
assert params.is_eval is not True
params.vn.global_vn = False
softmax = layers.SimpleFullSoftmax(params)
xent_loss = softmax.FProp(
softmax.theta,
inputs,
class_weights=class_weights,
class_ids=class_ids,
class_probabilities=class_probabilities)
all_vars = tf.get_collection('SimpleFullSoftmax_vars')
expected_var_names = []
for i in range(num_shards):
expected_var_names.append(u'softmax/weight_%d/var:0' % i)
expected_var_names.append(u'softmax/bias_%d/var:0' % i)
all_var_names = [v.name for v in all_vars]
self.assertEqual(sorted(expected_var_names), sorted(all_var_names))
tf.global_variables_initializer().run()
if training_step >= 0:
sess.run(tf.assign(py_utils.GetOrCreateGlobalStepVar(), training_step))
return sess.run(xent_loss)
def testSimpleFullSoftmaxMasked(self):
num_shards = 2
apply_pruning = True
params = layers.SimpleFullSoftmax.Params()
params.name = 'softmax'
params.dtype = tf.float32
params.input_dim = 10
params.num_classes = 32
params.fprop_dtype = tf.float32
params.num_shards = num_shards
params.apply_pruning = apply_pruning
params.random_seed = 12345678
softmax_layer = layers.SimpleFullSoftmax(params)
self.assertTrue('weight_0' in softmax_layer.vars.weight_0.name)
self.assertTrue('weight_1' in softmax_layer.vars.weight_1.name)
self.assertTrue('mask_0' in softmax_layer.vars.mask_0.name)
self.assertTrue('mask_1' in softmax_layer.vars.mask_1.name)
self.assertTrue('threshold_0' in softmax_layer.vars.threshold_0.name)
self.assertTrue('threshold_1' in softmax_layer.vars.threshold_1.name)
self.assertEqual(softmax_layer.theta.weight_0.get_shape(),
tf.TensorShape([10, 16]))
self.assertEqual(softmax_layer.theta.weight_1.get_shape(),
tf.TensorShape([10, 16]))
self.assertEqual(softmax_layer.theta.mask_0.get_shape(),
tf.TensorShape([10, 16]))
self.assertEqual(softmax_layer.theta.mask_1.get_shape(),
tf.TensorShape([10, 16]))
self.assertEqual(softmax_layer.theta.threshold_0.get_shape(),
tf.TensorShape([]))
self.assertEqual(softmax_layer.theta.threshold_0.get_shape(),
tf.TensorShape([]))
softmax_var_count = 4 # 2 each for weights and biases (we have 2 shards)
wts = tf.get_collection('SimpleFullSoftmax_vars')
self.assertEqual(softmax_var_count, len(wts))
softmax_mask_count = 2
masks = tf.get_collection('masks')
self.assertEqual(softmax_mask_count, len(masks))
softmax_threshold_count = 2
threshold = tf.get_collection('thresholds')
self.assertEqual(softmax_threshold_count, len(threshold))
# Sampled and Masked
xent_loss = self._RunSimpleFullSoftmax(
num_samples=32, seed=12345, apply_pruning=True)
loss = xent_loss.total_xent
log_perplexity = xent_loss.avg_xent
self.assertNear(loss, 8.681571, 1e-5)
self.assertNear(log_perplexity, 3.946169, 1e-5)
# Sharded and Masked
xent_loss = self._RunSimpleFullSoftmax(num_shards=2, apply_pruning=True)
loss = xent_loss.total_xent
log_perplexity = xent_loss.avg_xent
self.assertNear(loss, 6.14888, 1e-5)
self.assertNear(log_perplexity, 2.79495, 1e-5)
# Non_2D and Masked
xent_loss = self._RunSimpleFullSoftmax(
inputs=np.random.rand(4, 3, 10),
class_weights=np.ones((4, 3)),
class_ids=np.random.randint(32, size=(4, 3)),
apply_pruning=True)
self.assertEqual(xent_loss.logits.shape, (4, 3, 32))
self.assertEqual(xent_loss.per_example_xent.shape, (4, 3))
self.assertEqual(xent_loss.per_example_weight.shape, (4, 3))
xent_loss = self._RunSimpleFullSoftmax(
inputs=np.random.rand(4, 3, 10),
class_weights=np.ones((4, 3)),
class_probabilities=np.random.uniform(size=(4, 3, 32)),
apply_pruning=True)
self.assertEqual(xent_loss.logits.shape, (4, 3, 32))
self.assertEqual(xent_loss.per_example_xent.shape, (4, 3))
self.assertEqual(xent_loss.per_example_weight.shape, (4, 3))
# Chunked and Masked
for chunk_size in (0, 1, 2, 3, 4, 5):
print('chunk_size = ', chunk_size)
xent_output = self._RunSimpleFullSoftmax(
chunk_size=chunk_size, apply_pruning=True)
loss = xent_output.total_xent
log_perplexity = xent_output.avg_xent
print('xent_output ', xent_output)
print('xent_output.per_example_argmax.dtype ',
xent_output.per_example_argmax.dtype)
self.assertAllClose(loss, 6.22425)
self.assertAllClose(log_perplexity, 2.82920)
self.assertAllEqual(xent_output.per_example_argmax,
np.argmax(xent_output.logits, axis=1))
def testSimpleFullSoftmax_Sampled(self):
xent_loss = self._RunSimpleFullSoftmax(num_samples=32, seed=12345)
loss = xent_loss.total_xent
log_perplexity = xent_loss.avg_xent
self.assertNear(loss, 8.681571, 1e-5)
self.assertNear(log_perplexity, 3.946169, 1e-5)
def testSimpleFullSoftmax_SampledAndSharded(self):
xent_loss = self._RunSimpleFullSoftmax(
num_shards=4, num_samples=32, seed=12345)
loss = xent_loss.total_xent
log_perplexity = xent_loss.avg_xent
self.assertNear(loss, 8.510439, 1e-5)
self.assertNear(log_perplexity, 3.868381, 1e-5)
def testSimpleFullSoftmax_Non2D(self):
xent_loss = self._RunSimpleFullSoftmax(
inputs=np.random.rand(4, 3, 10),
class_weights=np.ones((4, 3)),
class_ids=np.random.randint(32, size=(4, 3)))
self.assertEqual(xent_loss.logits.shape, (4, 3, 32))
self.assertEqual(xent_loss.per_example_xent.shape, (4, 3))
self.assertEqual(xent_loss.per_example_weight.shape, (4, 3))
xent_loss = self._RunSimpleFullSoftmax(
inputs=np.random.rand(4, 3, 10),
class_weights=np.ones((4, 3)),
class_probabilities=np.random.uniform(size=(4, 3, 32)))
self.assertEqual(xent_loss.logits.shape, (4, 3, 32))
self.assertEqual(xent_loss.per_example_xent.shape, (4, 3))
self.assertEqual(xent_loss.per_example_weight.shape, (4, 3))
def _testSimpleFullSoftmax_Basic_Helper(self, dtype, fprop_dtype):
xent_loss = self._RunSimpleFullSoftmax(dtype=dtype, fprop_dtype=fprop_dtype)
loss = xent_loss.total_xent
log_perplexity = xent_loss.avg_xent
print(['loss', loss])
print(['log_perplexity', log_perplexity])
err = 1e-5
if fprop_dtype == tf.float16 or fprop_dtype == tf.bfloat16:
err = 1e-2
self.assertNear(loss, 6.22425, err=err)
self.assertNear(log_perplexity, 2.8292, err=err)
self.assertAllEqual(xent_loss.per_example_argmax,
np.argmax(xent_loss.logits, axis=1))
def testSimpleFullSoftmax_Basic_Float32(self):
self._testSimpleFullSoftmax_Basic_Helper(
dtype=tf.float32, fprop_dtype=tf.float32)
def testSimpleFullSoftmax_Basic_Float32Float16(self):
self._testSimpleFullSoftmax_Basic_Helper(
dtype=tf.float32, fprop_dtype=tf.float16)
def testSimpleFullSoftmax_Sharded(self):
xent_loss = self._RunSimpleFullSoftmax(2)
loss = xent_loss.total_xent
log_perplexity = xent_loss.avg_xent
print(['loss', loss])
print(['log_perplexity', log_perplexity])
self.assertNear(loss, 6.14888, 1e-5)
self.assertNear(log_perplexity, 2.79495, 1e-5)
def testSimpleFullSoftmax_Chunked(self):
for chunk_size in (0, 1, 2, 3, 4, 5):
print('chunk_size = ', chunk_size)
xent_output = self._RunSimpleFullSoftmax(chunk_size=chunk_size)
loss = xent_output.total_xent
log_perplexity = xent_output.avg_xent
print('xent_output ', xent_output)
print('xent_output.per_example_argmax.dtype ',
xent_output.per_example_argmax.dtype)
self.assertAllClose(loss, 6.22425)
self.assertAllClose(log_perplexity, 2.82920)
self.assertAllEqual(xent_output.per_example_argmax,
np.argmax(xent_output.logits, axis=1))
def testSimpleFullSoftmax_Basic_Distributions(self):
with self.session(use_gpu=False) as sess:
class_ids = tf.constant([1, 5, 10], dtype=tf.int32)
class_weights = tf.constant([1.0, 0.4, 0.8], dtype=tf.float32)
np.random.seed(12345)
inputs = [tf.constant(np.random.rand(3, 10), dtype=tf.float32)]
params = layers.SimpleFullSoftmax.Params()
params.name = 'softmax'
params.input_dim = 10
params.num_classes = 32
params.params_init = py_utils.WeightInit.Gaussian(0.5, 123456)
params.vn.global_vn = False
softmax = layers.SimpleFullSoftmax(params)
xent_loss = softmax.XentLoss(
inputs,
class_weights=class_weights,
class_probabilities=tf.one_hot(class_ids, params.num_classes))
tf.global_variables_initializer().run()
loss = sess.run(xent_loss.total_xent)
log_perplexity = sess.run(xent_loss.avg_xent)
print(['loss', loss])
print(['log_perplexity', log_perplexity])
self.assertNear(loss, 6.22425, 1e-5)
self.assertNear(log_perplexity, 2.8292, 1e-5)
def testSimpleFullSoftmax_GlobalVN(self):
with self.session(use_gpu=False) as sess:
class_ids = tf.constant([1, 5, 10], dtype=tf.int32)
class_weights = tf.constant([1.0, 0.4, 0.8], dtype=tf.float32)
np.random.seed(12345)
inputs = [tf.constant(np.random.rand(3, 10), dtype=tf.float32)]
params = layers.SimpleFullSoftmax.Params()
params.name = 'softmax'
params.input_dim = 10
params.num_classes = 32
params.params_init = py_utils.WeightInit.Gaussian(0.5, 123456)
params.vn.global_vn = True
params.vn.seed = 23456
params.vn.scale = 1.0
softmax = layers.SimpleFullSoftmax(params)
xent_loss = softmax.XentLoss(
inputs, class_weights=class_weights, class_ids=class_ids)
tf.global_variables_initializer().run()
loss = sess.run(xent_loss.total_xent)
log_perplexity = sess.run(xent_loss.avg_xent)
print(['testSimpleFullSoftmax_GlobalVN loss', loss])
print(['testSimpleFullSoftmax_GlobalVN log_perplexity', log_perplexity])
self.assertNear(loss, 19.9612, 1e-4)
self.assertNear(log_perplexity, 3.46426, 1e-4)
def testSimpleFullSoftmax_PerStepVN(self):
with self.session(use_gpu=False) as sess:
class_ids = tf.constant([1, 5, 10], dtype=tf.int32)
class_weights = tf.constant([1.0, 0.4, 0.8], dtype=tf.float32)
np.random.seed(12345)
inputs = [tf.constant(np.random.rand(3, 10), dtype=tf.float32)]
params = layers.SimpleFullSoftmax.Params()
params.name = 'softmax'
params.input_dim = 10
params.num_classes = 32
params.params_init = py_utils.WeightInit.Gaussian(0.5, 123456)
params.vn.global_vn = False
params.vn.per_step_vn = True
params.vn.seed = 23456
params.vn.scale = 1.0
softmax = layers.SimpleFullSoftmax(params)
xent_loss = softmax.XentLoss(
inputs, class_weights=class_weights, class_ids=class_ids)
tf.global_variables_initializer().run()
loss = sess.run(xent_loss.total_xent)
log_perplexity = sess.run(xent_loss.avg_xent)
print(['testShardedFullSoftmax_PerStepVN loss', loss])
print(['testShardedFullSoftmax_PerStepVN log_perplexity', log_perplexity])
self.assertNear(loss, 19.9612, 1e-4)
self.assertNear(log_perplexity, 3.46426, 1e-4)
def testSimpleFullSoftmax_FakeQuantized(self):
default_qdomain = quant_utils.SymmetricScheduledClipQDomain.Params()
default_qdomain.cc_schedule = quant_utils.FakeQuantizationSchedule.Params(
).Set(
clip_start_step=0, clip_end_step=2, quant_start_step=2)
xent_loss = self._RunSimpleFullSoftmax(
default_qdomain=default_qdomain, training_step=5)
loss = xent_loss.total_xent
log_perplexity = xent_loss.avg_xent
print(['loss', loss])
print(['log_perplexity', log_perplexity])
self.assertNear(loss, 6.285590, 1e-5)
self.assertNear(log_perplexity, 2.857086, 1e-5)
def _RunSimpleFullSoftmaxGradientChecker(self, batch_size, num_classes,
chunk_size, num_shards):
for (dtype, use_gpu, tolerance) in [(tf.float32, True, 1e-2),
(tf.float64, False, 1e-6)]:
tf.logging.info('dtype %s tolerance %g', dtype, tolerance)
with self.session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
input_dim = 10
np.random.seed(12345)
class_ids = tf.constant(
np.random.randint(num_classes, size=(batch_size, 1)),
dtype=tf.int32)
class_weights = tf.constant(np.random.rand(batch_size), dtype=dtype)
inputs = [
tf.constant(np.random.rand(batch_size, input_dim), dtype=dtype)
]
params = layers.SimpleFullSoftmax.Params()
params.name = 'softmax'
params.dtype = dtype
params.input_dim = input_dim
params.num_classes = num_classes
params.num_shards = num_shards
params.chunk_size = chunk_size
params.params_init = py_utils.WeightInit.Gaussian(0.5, 123456)
params.vn.global_vn = False
softmax = layers.SimpleFullSoftmax(params)
xent_loss = softmax.XentLoss(
inputs, class_weights=class_weights, class_ids=class_ids)
softmax_vars = softmax.vars.Flatten()
# Now add the backward graph.
grads = tf.gradients(xent_loss.total_xent, softmax_vars)
tf.global_variables_initializer().run()
assert len(softmax_vars) == len(grads)
for x, grad_x in zip(softmax_vars, grads):
grad_symbolic = sess.run(grad_x)
grad_numeric = test_utils.ComputeNumericGradient(
sess, xent_loss.total_xent, x)
self.assertAllClose(
grad_symbolic, grad_numeric, atol=tolerance, rtol=tolerance)
def testSimpleFullSoftmaxGradientChecker(self):
self._RunSimpleFullSoftmaxGradientChecker(3, 4, 0, 1)
self._RunSimpleFullSoftmaxGradientChecker(3, 4, 0, 2)
self._RunSimpleFullSoftmaxGradientChecker(3, 4, 2, 2)
self._RunSimpleFullSoftmaxGradientChecker(3, 4, 5, 2)
class SoftmaxLayerLogitsTest(test_utils.TestCase):
"""Testing SoftmaxLayer.Logits()."""
def _Logits(self, params, batch_size=2, seq_length=None):
with self.session(use_gpu=True, graph=tf.Graph()):
np.random.seed(12345)
tf.set_random_seed(1234)
params.name = 'softmax'
if not params.input_dim:
params.input_dim = 3
if not params.num_classes:
params.num_classes = 4
params.params_init = py_utils.WeightInit.Gaussian(0.5, 123456)
softmax = params.Instantiate()
input_dim = params.input_dim
if seq_length:
inputs = np.random.rand(batch_size, seq_length, input_dim)
else:
inputs = np.random.rand(batch_size, input_dim)
inputs = tf.constant(inputs, dtype=py_utils.FPropDtype(params))
logits = softmax.Logits(softmax.theta, inputs)
if seq_length:
logits = py_utils.HasShape(logits,
[batch_size, seq_length, params.num_classes])
else:
logits = py_utils.HasShape(logits, [batch_size, params.num_classes])
tf.global_variables_initializer().run()
return logits.eval()
def testConvSoftmaxLogits(self):
params = layers.ConvSoftmax.Params()
self.assertAllClose([[0.52536774, -0.17598523, 0.38314393, -0.36068222],
[0.75792629, -0.18001975, 0.42298675, -0.35423514]],
self._Logits(params))
def testSimpleFullSoftmax(self):
params = layers.SimpleFullSoftmax.Params()
self.assertAllClose([[0.52536774, -0.17598523, 0.38314393, -0.36068222],
[0.75792629, -0.18001975, 0.42298675, -0.35423514]],
self._Logits(params))
def testConvSoftmaxLogitsWith3DInputs(self):
params = layers.ConvSoftmax.Params()
logits = self._Logits(params, seq_length=5)
self.assertAllClose(6.9934864, np.sum(logits))
class FeedForwardNetTest(test_utils.TestCase):
def testFeedForwardNetConstruction(self):
with self.session(use_gpu=False):
p = layers.FeedForwardNet.Params().Set(
name='ffn',
input_dim=10,
hidden_layer_dims=[20, 30],
batch_norm=True,
activation='TANH',
params_init=py_utils.WeightInit.Uniform(1.0))
p.dropout.keep_prob = 0.5
proj_l = p.Instantiate()
a = tf.constant(1.0, shape=[20, 10])
proj_l.FPropDefaultTheta(a)
p = layers.FeedForwardNet.Params().Set(
name='ffn2',
input_dim=10,
hidden_layer_dims=[20, 30],
batch_norm=True,
activation='TANH',
params_init=py_utils.WeightInit.Uniform(1.0))
p.dropout = [
layers.DropoutLayer.Params().Set(keep_prob=0.5),
layers.DropoutLayer.Params().Set(keep_prob=0.9)
]
proj_l = p.Instantiate()
a = tf.constant(1.0, shape=[20, 10])
proj_l.FPropDefaultTheta(a)
p = layers.FeedForwardNet.Params().Set(
name='ffn3',
input_dim=10,
hidden_layer_dims=[20, 30],
batch_norm=[True, False],
activation=['TANH', 'RELU'],
params_init=py_utils.WeightInit.Uniform(1.0))
p.dropout = [
layers.DropoutLayer.Params().Set(keep_prob=0.5),
layers.DropoutLayer.Params().Set(keep_prob=0.9)
]
proj_l = p.Instantiate()
a = tf.constant(1.0, shape=[20, 10])
proj_l.FPropDefaultTheta(a)
def testFeedForwardNet(self):
with self.session(use_gpu=False) as sess:
tf.set_random_seed(398847392)
np.random.seed(12345)
p = layers.FeedForwardNet.Params().Set(
name='ffn',
input_dim=10,
hidden_layer_dims=[20, 30],
batch_norm=False,
activation=['RELU', 'NONE'])
params_init = py_utils.WeightInit.Xavier(scale=1.0, seed=837465638)
p.params_init = params_init
feedforward_net = p.Instantiate()
p1 = layers.ProjectionLayer.Params().Set(
name='p1',
input_dim=10,
output_dim=20,
activation='RELU',
batch_norm=False)
p1.params_init = params_init
p1_l = p1.Instantiate()
p2 = layers.ProjectionLayer.Params().Set(
name='p2',
input_dim=20,
output_dim=30,
activation='NONE',
batch_norm=False)
p2.params_init = params_init
p2_l = p2.Instantiate()
a = tf.constant(np.random.rand(5, 10), dtype=tf.float32)
out1 = feedforward_net.FPropDefaultTheta(a)
out2 = p2_l.FPropDefaultTheta(p1_l.FPropDefaultTheta(a))
tf.global_variables_initializer().run()
out1_v, out2_v = sess.run([out1, out2])
self.assertAllClose(out1_v, out2_v)
def testFeedForwardNetQuantized(self):
with self.session(use_gpu=False) as sess:
tf.set_random_seed(398847392)
np.random.seed(12345)
cc_schedule = quant_utils.FakeQuantizationSchedule.Params().Set(
clip_start_step=1,
clip_end_step=2,
quant_start_step=2,
start_cap=8.0,
end_cap=2.0)
proj_qdomain = quant_utils.SymmetricScheduledClipQDomain.Params().Set(
cc_schedule=cc_schedule)
p = layers.FeedForwardNet.Params().Set(
name='ffn',
input_dim=10,
hidden_layer_dims=[20, 30],
batch_norm=False,
activation=['RELU', 'NONE'])
p.qdomain.default = proj_qdomain.Copy()
params_init = py_utils.WeightInit.Xavier(scale=1.0, seed=837465638)
p.params_init = params_init
feedforward_net = p.Instantiate()
p1 = layers.ProjectionLayer.Params().Set(
name='p1',
input_dim=10,
output_dim=20,
activation='RELU',
batch_norm=False)
p1.qdomain.default = proj_qdomain.Copy()
p1.params_init = params_init
p1_l = p1.Instantiate()
p2 = layers.ProjectionLayer.Params().Set(
name='p2',
input_dim=20,
output_dim=30,
activation='NONE',
batch_norm=False)
p2.params_init = params_init
p2.qdomain.default = proj_qdomain.Copy()
p2_l = p2.Instantiate()
a = tf.constant(np.random.rand(5, 10), dtype=tf.float32)
out1 = feedforward_net.FPropDefaultTheta(a)
out2 = p2_l.FPropDefaultTheta(p1_l.FPropDefaultTheta(a))
tf.global_variables_initializer().run()
sess.run(tf.assign(py_utils.GetOrCreateGlobalStepVar(), 5))
out1_v, out2_v = sess.run([out1, out2])
self.assertAllClose(out1_v, out2_v)
def testFeedForwardNetBnFolded(self):
with self.session(use_gpu=False) as sess:
tf.set_random_seed(398847392)
np.random.seed(12345)
p = layers.FeedForwardNet.Params().Set(
name='ffn',
input_dim=10,
hidden_layer_dims=[20, 30],
batch_norm=True,
bn_fold_weights=True,
activation=['RELU', 'NONE'])
params_init = py_utils.WeightInit.Xavier(scale=1.0, seed=837465638)
p.params_init = params_init
feedforward_net = p.Instantiate()
p1 = layers.ProjectionLayer.Params().Set(
name='p1',
input_dim=10,
output_dim=20,
activation='RELU',
batch_norm=True,
bn_fold_weights=True)
p1.params_init = params_init
p1_l = p1.Instantiate()
p2 = layers.ProjectionLayer.Params().Set(
name='p2',
input_dim=20,
output_dim=30,
activation='NONE',
batch_norm=True,
bn_fold_weights=True)
p2.params_init = params_init
p2_l = p2.Instantiate()
a = tf.constant(np.random.rand(5, 10), dtype=tf.float32)
out1 = feedforward_net.FPropDefaultTheta(a)
out2 = p2_l.FPropDefaultTheta(p1_l.FPropDefaultTheta(a))
tf.global_variables_initializer().run()
out1_v, out2_v = sess.run([out1, out2])
self.assertAllClose(out1_v, out2_v)
def testFeedForwardNetSmokeTest(self):
with self.session(use_gpu=False):
tf.set_random_seed(398847392)
np.random.seed(12345)
p = layers.FeedForwardNet.Params().Set(
name='ffn',
input_dim=10,
hidden_layer_dims=[20, 30],
activation=['RELU', 'NONE'])
params_init = py_utils.WeightInit.Xavier(scale=1.0, seed=837465638)
p.params_init = params_init
feedforward_net = p.Instantiate()
a = tf.constant(np.random.rand(5, 10), dtype=tf.float32)
out = tf.reduce_sum(feedforward_net.FPropDefaultTheta(a))
out_abs = tf.reduce_sum(tf.abs(feedforward_net.FPropDefaultTheta(a)))
tf.global_variables_initializer().run()
# pyformat: disable
test_utils.CompareToGoldenSingleFloat(self, 8.190775, out.eval(), atol=1e-5) # pylint: disable=line-too-long
# pyformat: enable
test_utils.CompareToGoldenSingleFloat(self, 36.773586, out_abs.eval())
def testDropoutLayerTrain(self):
with self.session(use_gpu=True) as sess:
tf.set_random_seed(3980847392)
p = layers.DropoutLayer.Params()
p.keep_prob = 0.5
p.random_seed = 1234
p.name = 'dropout'
dl = p.Instantiate()
x = tf.random_normal([10, 10, 10, 3])
xd = dl.FPropDefaultTheta(x)
x, xd = sess.run([x, xd])
self.assertGreater((xd == 0).mean(), 0.3)
self.assertLess((xd == 0).mean(), 0.7)
self.assertAllClose(xd[xd != 0], x[xd != 0] / p.keep_prob)
def testDropoutLayerEval(self):
with self.session(use_gpu=True) as sess:
tf.set_random_seed(3980847392)
p = layers.DropoutLayer.Params()
p.keep_prob = 0.5
p.random_seed = 1234
p.name = 'dropout'
p.is_eval = True
dl = p.Instantiate()
x = tf.random_normal([10, 10, 10, 3])
xd = dl.FPropDefaultTheta(x)
x, xd = sess.run([x, xd])
self.assertAllEqual(xd, x)
class AddingAccumulatorTest(test_utils.TestCase):
"""Test for AddingAccumulator."""
def testAddingAccumulator(self):
with self.session():
layer_p = layers.IdentityLayer.Params()
layer_p.name = 'test'
layer = layer_p.Instantiate()
layer.RegisterAccumulator('acc1', layers.AddingAccumulator([],
tf.float32))
# Initial value.
self.assertEqual(0.0, layer.accumulators.acc1.GetValue().eval())
# Update/merge.
layer.accumulators.acc1.Update(1.0)
layer.accumulators.acc1.Update(1.0)
self.assertEqual(2.0, layer.accumulators.acc1.GetValue().eval())
# Reset.
layer.accumulators.Transform(lambda acc: acc.Reset())
self.assertEqual(0.0, layer.accumulators.acc1.GetValue().eval())
class BatchNormLayerNoPaddingTest(test_utils.TestCase, parameterized.TestCase):
def testBatchNormLayerNoPaddingConstruction(self):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.BatchNormLayerNoPadding.Params()
params.name = 'bn'
params.dim = 2
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
layers.BatchNormLayerNoPadding(params)
bn_vars = tf.get_collection('BatchNormLayerNoPadding_vars')
bn_var_names = [x.name for x in bn_vars]
expected_var_names = [
'bn/beta/var:0', 'bn/gamma/var:0', 'bn/moving_mean/var:0',
'bn/moving_variance/var:0'
]
self.assertEqual(expected_var_names, bn_var_names)
@parameterized.named_parameters({
'testcase_name': '_eval',
'is_eval': True,
}, {
'testcase_name': '_train',
'is_eval': False,
})
def testBatchNormLayerNoPaddingFProp(self, is_eval):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.BatchNormLayerNoPadding.Params()
params.name = 'bn'
params.dim = 3
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = is_eval
bn_layer = layers.BatchNormLayerNoPadding(params)
bn_in1 = tf.constant(
np.random.normal(0.1, 0.5, [2, 8, 3]), dtype=tf.float32)
bn_out = bn_layer.FPropDefaultTheta(bn_in1)
sig1 = tf.reduce_sum(bn_out)
sig2 = tf.reduce_sum(bn_out * bn_out)
expected_sig1 = 2.6593573 if is_eval else 0
expected_sig2 = 15.4642076 if is_eval else 47.850193
with self.session(use_gpu=True):
tf.global_variables_initializer().run()
self.assertAllClose(expected_sig1, sig1.eval(), atol=1e-5)
self.assertAllClose(expected_sig2, sig2.eval(), atol=1e-5)
def testBatchNormLayerNoPaddingFPropUseGlobalStatsForTraining(self):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.BatchNormLayerNoPadding.Params()
params.name = 'bn'
params.dim = 3
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
bn_layer = layers.BatchNormLayerNoPadding(params)
bn_in1 = tf.constant(
np.random.normal(0.1, 0.5, [2, 8, 3]), dtype=tf.float32)
bn_out = bn_layer.FPropDefaultTheta(bn_in1)
sig1 = tf.reduce_sum(bn_out)
sig2 = tf.reduce_sum(bn_out * bn_out)
with self.session(use_gpu=True):
tf.global_variables_initializer().run()
self.assertAllClose(1.19209289551e-06, sig1.eval(), atol=1e-5)
self.assertAllClose(47.8501930237, sig2.eval(), atol=1e-5)
def testBatchNormLayerNoPaddingPostTrainingStepUpdate(self):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.BatchNormLayerNoPadding.Params()
params.name = 'bn'
params.dim = 2
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
bn_layer = layers.BatchNormLayerNoPadding(params)
bn_layer.accumulators.counts.Update(0.0)
bn_layer.accumulators.mean_ss.Update([1.0, 1.0])
bn_layer.accumulators.variance_ss.Update([5.0, 5.0])
bn_updates = bn_layer.PostTrainingStepUpdate(tf.constant(100))
with self.session(use_gpu=True) as sess:
tf.global_variables_initializer().run()
sess.run(bn_updates)
moving_mean = sess.run(bn_layer.vars.moving_mean)
moving_std = sess.run(bn_layer.vars.moving_variance)
self.assertAllClose([0.0, 0.0], moving_mean)
self.assertAllClose([1.0, 1.0], moving_std)
def testBatchNormLayerNoPaddingFPropForConv(self):
tf.set_random_seed(398847392)
np.random.seed(12345)
params = layers.BatchNormLayerNoPadding.Params()
params.name = 'bn_conv'
params.dim = 32
params.params_init = py_utils.WeightInit.Gaussian(0.1)
params.is_eval = False
bn_layer = layers.BatchNormLayerNoPadding(params)
bn_in1 = tf.constant(
np.random.normal(0.1, 0.5, [2, 8, 4, 32]), dtype=tf.float32)
bn_out = bn_layer.FPropDefaultTheta(bn_in1)
sig1 = tf.reduce_sum(bn_out)
sig2 = tf.reduce_sum(bn_out * bn_out)
with self.session(use_gpu=True):
tf.global_variables_initializer().run()
self.assertAllClose(0.0, sig1.eval(), atol=1e-4)
self.assertAllClose(2039.398681, sig2.eval())
def _BuildDummyStackedBNLayer(self, splits):
num_micro_batches = 8
if splits == 0:
endpoint = layers.BatchNormLayerNoPadding.Params().Set(
decay=0.997, name='bn', dim=1)
else:
cell_tpl = []
for split in range(splits):
nets_to_split = [
layers.BatchNormLayerNoPadding.Params().Set(
decay=0.997, name='bn_{}'.format(split), dim=1),
]
split_layer = gpipe.FeatureExtractionLayer.Params().Set(
name='split_{}'.format(split), sub=nets_to_split)
cell_tpl.append(split_layer)
endpoint = gpipe.PipeliningLayer.Params().Set(
name='pipeline',
num_micro_batches=num_micro_batches,
cell_tpl=cell_tpl,
before_tpl=[])
layer = endpoint.Instantiate()
return layer
@parameterized.named_parameters({
'testcase_name': '_baseline',
'splits': 0,
}, {
'testcase_name': '_two_splits',
'splits': 2,
}, {
'testcase_name': '_four_splits',
'splits': 4,
})
def testBatchNormLayerNoPaddingAccumulators(self, splits):
batch_size = 1024
with self.session(graph=tf.Graph()) as sess:
# Construct a network where loss = w * x + b
inputs = tf.concat([
tf.ones([batch_size // 2, 1, 1, 1]),
tf.zeros([batch_size // 2, 1, 1, 1])
],
axis=0)
net = self._BuildDummyStackedBNLayer(splits)
logits = net.FPropDefaultTheta(inputs)
loss = tf.reduce_mean(logits)
grads = tf.gradients(loss, tf.trainable_variables())
# Check the accumulator values
counts = []
means = []
variances = []
for i in range(splits):
l = net.children['split_{}'.format(i)].children['bn_{}'.format(i)]
counts.append(l.accumulators.counts.GetValue())
means.append(l.accumulators.mean_ss.GetValue())
variances.append(l.accumulators.variance_ss.GetValue())
if splits == 0:
counts.append(net.accumulators.counts.GetValue())
means.append(net.accumulators.mean_ss.GetValue())
variances.append(net.accumulators.variance_ss.GetValue())
post_training_step_updates = net.PostTrainingStepUpdate(
net.theta.global_step)
sess.run(tf.global_variables_initializer())
_, count_vals, mean_vals, var_vals = sess.run(
[grads, counts, means, variances])
self.assertSameElements(count_vals, {batch_size})
self.assertEqual(batch_size // 2, mean_vals[0])
if len(mean_vals) > 1:
self.assertSameElements(mean_vals[1:], {0})
self.assertEqual(batch_size // 2, var_vals[0])
if len(var_vals) > 1:
self.assertSameElements(var_vals[1:], {0})
sess.run(post_training_step_updates)
moving_vars = sess.run(tf.get_collection('moving_vars'))
self.assertEqual(0.0015, moving_vars[0])
self.assertNear(0.997750, moving_vars[1], err=1.0e-6)
class LayerNormTest(test_utils.TestCase):
def testLayerNormFProp(self):
with self.session(use_gpu=True) as sess:
tf.set_random_seed(398847392)
np.random.seed(12345)
p = layers.LayerNorm.Params()
p.name = 'ln'
p.input_dim = 3
layer_norm = layers.LayerNorm(p)
npy_input = np.random.normal(1.0, 0.5,
[2, 4, 4, p.input_dim]).astype('float32')
inputs = tf.constant(npy_input, dtype=tf.float32)
output = layer_norm.FPropDefaultTheta(inputs)
tf.global_variables_initializer().run()
sym_output = sess.run(output)
# Mean should be zero and variance should be close to one.
self.assertNear(0.0, sym_output.sum(), 1e-5)
self.assertNear(1.0, np.var(sym_output), 1e-4)
# Compare with numpy.
mean = npy_input.mean(-1, keepdims=True)
variance = np.mean(np.square(npy_input - mean), -1, keepdims=True)
npy_output = (npy_input - mean) / np.sqrt(variance + p.epsilon)
self.assertAllClose(sym_output, npy_output)
def testLayerNormBProp(self):
with self.session(use_gpu=True) as sess:
tf.set_random_seed(398847392)
np.random.seed(12345)
p = layers.LayerNorm.Params()
p.name = 'ln'
p.input_dim = 3
layer_norm = layers.LayerNorm(p)
inputs = tf.constant(
np.random.normal(0.1, 0.5, [2, 4, 4, p.input_dim]), dtype=tf.float32)
output = layer_norm.FPropDefaultTheta(inputs)
loss = tf.reduce_sum(output)
all_vars = tf.trainable_variables()
self.assertEqual(2, len(all_vars))
grads = tf.gradients(loss, all_vars)
tf.global_variables_initializer().run()
sym_grads = [sg.eval() for sg in grads]
num_grads = [
test_utils.ComputeNumericGradient(sess, loss, v) for v in all_vars
]
for sg, ng in zip(sym_grads, num_grads):
self.assertAllClose(sg, ng, rtol=1e-02, atol=1e-02)
class DeterministicDropoutTest(test_utils.TestCase, parameterized.TestCase):
def testDeterministicDropoutLayer(self):
params = layers.DeterministicDropoutLayer.Params().Set(keep_prob=0.7)
params.name = 'drop'
dropout = layers.DeterministicDropoutLayer(params)
x = tf.ones([4, 6], dtype=tf.float32)
x_expected = np.array([
[1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 0, 0, 1, 1, 0],
[1, 0, 0, 1, 1, 1],
]) / 0.7
with self.session():
tf.assign(py_utils.GetOrCreateGlobalStepVar(), 1234).eval()
py_utils.ResetStepSeed(seed=5678)
x_val = dropout.FPropDefaultTheta(x).eval()
self.assertAllClose(x_expected, x_val)
self.assertEqual(5679, py_utils.GetStepSeed().eval())
# Different step seed gives different result.
x_val = dropout.FPropDefaultTheta(x).eval()
self.assertNotAllClose(x_expected, x_val)
# Different global step gives different result
tf.assign(py_utils.GetOrCreateGlobalStepVar(), 1235).eval()
py_utils.ResetStepSeed(seed=5678)
x_val = dropout.FPropDefaultTheta(x).eval()
self.assertNotAllClose(x_expected, x_val)
# The same seeds in the same session is consistent.
tf.assign(py_utils.GetOrCreateGlobalStepVar(), 1234).eval()
py_utils.ResetStepSeed(seed=5678)
x_val = dropout.FPropDefaultTheta(x).eval()
self.assertAllClose(x_expected, x_val)
# The same seeds in a different session is consistent.
with self.session():
tf.assign(py_utils.GetOrCreateGlobalStepVar(), 1234).eval()
py_utils.ResetStepSeed(seed=5678)
x_val = dropout.FPropDefaultTheta(x).eval()
self.assertAllClose(x_expected, x_val)
def testNoiseShapeBroadcastDims(self):
params = layers.DeterministicDropoutLayer.Params().Set(
keep_prob=0.7, noise_shape_broadcast_dims=[-1])
params.name = 'drop'
dropout = layers.DeterministicDropoutLayer(params)
x = tf.ones([4, 6])
x_expected = np.array([
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
]) / 0.7
with self.session():
tf.assign(py_utils.GetOrCreateGlobalStepVar(), 1234).eval()
self.assertEqual(1234, dropout.theta.global_step.eval())
py_utils.ResetStepSeed(seed=5678)
x_val = dropout.FPropDefaultTheta(x).eval()
self.assertEqual(5679, py_utils.GetStepSeed().eval())
self.assertAllClose(x_expected, x_val)
@parameterized.named_parameters(
{
'testcase_name': 'baseline',
'splits': 1,
'num_micro_batches': 1
},
{
'testcase_name': 'OneSplitTwoMicroBatches',
'splits': 1,
'num_micro_batches': 2
},
{
'testcase_name': 'TwoSplitsOneMicroBatch',
'splits': 2,
'num_micro_batches': 1
},
{
'testcase_name': 'TwoSplitsTwoMicroBatches',
'splits': 2,
'num_micro_batches': 2
},
)
def testDropoutInRecurrent(self, splits=1, num_micro_batches=1):
"""Test to verify the drop mask used in fprop and bprop is identical."""
assert splits in [1, 2, 4]
with self.session() as sess:
tf.set_random_seed(12345)
num_layers = 4
# Build a model with 4 dropout layers.
blocks = []
for l in range(num_layers):
blocks.append(layers.DeterministicDropoutLayer.Params().Set(
name='dropout_{}'.format(l), keep_prob=0.7))
# Divide the model into splits partitions.
cell_tpl = []
blocks_per_split = num_layers // splits
for i in range(splits):
sub = blocks[i * blocks_per_split:(i + 1) * blocks_per_split]
cell_tpl.append(gpipe.FeatureExtractionLayer.Params().Set(
name='cell_{}'.format(i), sub=sub))
# Parallelize partitions using pipeline.
p = gpipe.PipeliningLayer.Params().Set(
name='pipeline',
num_micro_batches=num_micro_batches,
cell_tpl=cell_tpl)
# Fake input
x = tf.ones([2, 3])
# Construct weights.
w = tf.get_variable(
'w', shape=[2, 3], initializer=tf.constant_initializer([[1] * 3] * 2))
mdl = p.Instantiate()
y = mdl.FPropDefaultTheta(x * w)
# Construct loss function such that gradients = final activation.
loss = tf.reduce_sum(y)
grads = py_utils.ComputeGradients(loss, py_utils.NestedMap(w=w))
tf.global_variables_initializer().run()
y_val = sess.run(y)
grads_val = sess.run(grads)['w'][1]
self.assertAllClose(y_val, grads_val)
class GradNormTrackerTest(test_utils.TestCase):
def testGradNormTracker(self):
with self.session(use_gpu=False) as sess:
tf.set_random_seed(398847392)
np.random.seed(12345)
p = layers.GradNormTracker.Params().Set(
name='grad_norm_tracker', clip_threshold=3.0)
grad_norm_tracker = p.Instantiate()
grad_norm = tf.placeholder(tf.float32)
grad_norm_clip = grad_norm_tracker.FPropDefaultTheta(grad_norm)
tf.global_variables_initializer().run()
random_normal = np.exp(np.random.normal(5.0, 1.0, size=10000))
# We are expected to reject 16% of the outliers.
outliers = np.exp(np.random.normal(7.0, 1.0, size=100))
total_rejections = 0
for i in range(100):
for j in range(100):
sess.run([grad_norm_clip], {grad_norm: random_normal[i * 100 + j]})
clip = sess.run([grad_norm_clip], {grad_norm: outliers[i]})[0]
if clip == 0.0:
total_rejections += 1
# Q(yonghui): Why is total_rejections not deterministic?
print('total_rejections', total_rejections)
self.assertGreater(total_rejections, 5)
def testGradNormTrackerClipCapMin(self):
with self.session(use_gpu=False) as sess:
tf.set_random_seed(398847392)
np.random.seed(12345)
p = layers.GradNormTracker.Params().Set(
name='grad_norm_tracker',
clip_threshold=3.0,
grad_norm_clip_cap_min=math.exp(10.0))
grad_norm_tracker = p.Instantiate()
grad_norm = tf.placeholder(tf.float32)
grad_norm_clip = grad_norm_tracker.FPropDefaultTheta(grad_norm)
tf.global_variables_initializer().run()
random_normal = np.exp(np.random.normal(5.0, 1.0, size=10000))
# We expect no outliers being rejected due to the grad_norm_clip_cap_min.
outliers = np.exp(np.random.normal(7.0, 1.0, size=100))
total_rejections = 0
for i in range(100):
for j in range(100):
sess.run([grad_norm_clip], {grad_norm: random_normal[i * 100 + j]})
clip = sess.run([grad_norm_clip], {grad_norm: outliers[i]})[0]
if clip == 0.0:
total_rejections += 1
print('total_rejections', total_rejections)
self.assertEqual(total_rejections, 0)
def testGradNormTrackerHasNan(self):
with self.session(use_gpu=False) as sess:
tf.set_random_seed(398847392)
np.random.seed(12345)
p = layers.GradNormTracker.Params().Set(
name='grad_norm_tracker', clip_threshold=3.0)
grad_norm_tracker = p.Instantiate()
grad_norm = tf.placeholder(tf.float32)
has_nan = tf.cast(tf.ones([]), dtype=tf.bool)
grad_norm_clip = grad_norm_tracker.FPropDefaultTheta(grad_norm, has_nan)
tf.global_variables_initializer().run()
random_normal = np.exp(np.random.normal(5.0, 1.0, size=10000))
outliers = np.exp(np.random.normal(7.0, 1.0, size=100))
total_rejections = 0
for i in range(100):
for j in range(100):
sess.run([grad_norm_clip], {grad_norm: random_normal[i * 100 + j]})
clip = sess.run([grad_norm_clip], {grad_norm: outliers[i]})[0]
if clip == 0.0:
total_rejections += 1
self.assertEqual(total_rejections, 100)
class HighwaySkipLayerTest(test_utils.TestCase):
def testHighwaySkipLayerConstruction(self):
with self.session(use_gpu=False):
p = layers.HighwaySkipLayer.Params().Set(
name='gffn',
input_dim=10,
carry_bias_init=1.0,
couple_carry_transform_gates=True,
batch_norm=False,
params_init=py_utils.WeightInit.Uniform(1.0))
proj_l = p.Instantiate()
a = tf.constant(1.0, shape=[20, 10])
b = tf.constant(-2.0, shape=[20, 10])
proj_l.FPropDefaultTheta(a, b)
def testHighwaySkipLayerCarryGate(self):
with self.session(use_gpu=False) as sess:
tf.set_random_seed(398847392)
np.random.seed(12345)
p = layers.HighwaySkipLayer.Params().Set(
name='gffn',
input_dim=10,
carry_bias_init=1000.0,
couple_carry_transform_gates=True,
batch_norm=False,
params_init=py_utils.WeightInit.Uniform(1.0))
proj_l = p.Instantiate()
a = tf.constant(1.0, shape=[20, 10])
b = tf.constant(-2.0, shape=[20, 10])
out = proj_l.FPropDefaultTheta(a, b)
tf.global_variables_initializer().run()
a, out = sess.run([a, out])
self.assertAllClose(a, out)
class UniformLabelSmootherTest(test_utils.TestCase):
def testUniformLabelSmoother(self):
with self.session(use_gpu=False):
params = layers.UniformLabelSmoother.Params()
params.name = 'uls'
params.num_classes = 5
params.uncertainty = 0.1
smooth_layer = layers.UniformLabelSmoother(params)
target_labels = tf.constant([[0, 1, 2, 3, 3, 3, 4]], dtype=tf.int32)
target_ids = tf.constant([[0, 0, 1, 2, 3, 3, 3]], dtype=tf.int32)
target_paddings = tf.zeros(tf.shape(target_ids))
output = smooth_layer.FPropDefaultTheta(target_paddings, target_labels,
target_ids)
tf.global_variables_initializer().run()
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [[
[0.89999998, 0.025 , 0.025 , 0.025 , 0.025 ],
[0.025 , 0.89999998, 0.025 , 0.025 , 0.025 ],
[0.025 , 0.025 , 0.89999998, 0.025 , 0.025 ],
[0.025 , 0.025 , 0.025 , 0.89999998, 0.025 ],
[0.025 , 0.025 , 0.025 , 0.89999998, 0.025 ],
[0.025 , 0.025 , 0.025 , 0.89999998, 0.025 ],
[0.025 , 0.025 , 0.025 , 0.025 , 0.89999998]
]]
# pyformat: enable
# pylint: enable=bad-whitespace
output_v = output.eval()
self.assertAllClose(expected_output, output_v, atol=1e-2, rtol=1e-2)
self.assertAllClose(np.ones(output_v.shape[:-1]), output_v.sum(-1))
def testUniformLabelSmootherLargerToken(self):
with self.session(use_gpu=False):
params = layers.UniformLabelSmoother.Params()
params.name = 'uls'
params.num_classes = 5
params.uncertainty = 0.1
params.uncertainty_larger = 0.2
params.token_id_uncertainty_larger = 4
smooth_layer = layers.UniformLabelSmoother(params)
target_labels = tf.constant([[0, 1, 2, 3, 3, 3, 3]], dtype=tf.int32)
target_ids = tf.constant([[0, 0, 1, 2, 4, 4, 4]], dtype=tf.int32)
target_paddings = tf.zeros(tf.shape(target_ids))
output = smooth_layer.FPropDefaultTheta(target_paddings, target_labels,
target_ids)
tf.global_variables_initializer().run()
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [[
[0.89999998, 0.025 , 0.025 , 0.025 , 0.025 ],
[0.025 , 0.89999998, 0.025 , 0.025 , 0.025 ],
[0.025 , 0.025 , 0.89999998, 0.025 , 0.025 ],
[0.025 , 0.025 , 0.025 , 0.89999998, 0.025 ],
[0.05 , 0.05 , 0.05 , 0.80000001, 0.05 ],
[0.05 , 0.05 , 0.05 , 0.80000001, 0.05 ],
[0.05 , 0.05 , 0.05 , 0.80000001, 0.05 ]
]]
# pyformat: enable
# pylint: enable=bad-whitespace
output_v = output.eval()
self.assertAllClose(expected_output, output_v, atol=1e-2, rtol=1e-2)
self.assertAllClose(np.ones(output_v.shape[:-1]), output_v.sum(-1))
class WeightedSumLayerTest(test_utils.TestCase):
def testWeightedSumLayer(self):
with self.session(use_gpu=True) as sess:
np.random.seed(505837249)
depth = 4
batch = 2
n_sources = 3
ctxs = [[[1.0, 2.0, 3.0, 4.0], [2.0, 3.0, 4.0, 5.0]],
[[3.0, 4.0, 5.0, 6.0], [6.0, 7.0, 8.0, 9.0]],
[[4.0, 5.0, 6.0, 7.0], [7.0, 8.0, 1.0, 2.0]]]
p = layers.WeightedSumLayer.Params()
p.name = 'transparent_layer'
p.num_sources = n_sources
p.random_seed = 505837249
merger = p.Instantiate()
ctxs = [tf.expand_dims(i, 2) for i in ctxs]
ctx = tf.squeeze(merger.FProp(merger.theta, ctxs), 2)
tf.global_variables_initializer().run()
actual_ctx = sess.run(ctx)
# pylint: disable=bad-whitespace
# pyformat: disable
expected_ctx = [[ 2.66666675, 3.66666675, 4.66666698, 5.66666698],
[ 5.0, 6.0, 4.33333349, 5.33333349]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertEqual(actual_ctx.shape, (batch, depth))
self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)
def testWeightedSumLayerGlobalWeightAndMinimalProb(self):
with self.session(use_gpu=True) as sess:
np.random.seed(505837249)
depth = 4
batch = 2
n_sources = 3
ctxs = [[[1.0, 2.0, 3.0, 4.0], [2.0, 3.0, 4.0, 5.0]],
[[3.0, 4.0, 5.0, 6.0], [6.0, 7.0, 8.0, 9.0]],
[[4.0, 5.0, 6.0, 7.0], [7.0, 8.0, 1.0, 2.0]]]
p = layers.WeightedSumLayer.Params()
p.name = 'transparent_layer'
p.num_sources = n_sources
p.random_seed = 505837249
p.minimal_prob = 0.01
p.global_weight_scale = 10.0
merger = p.Instantiate()
ctxs = [tf.expand_dims(i, 2) for i in ctxs]
ctx = tf.squeeze(merger.FProp(merger.theta, ctxs), 2)
tf.global_variables_initializer().run()
actual_ctx = sess.run(ctx)
# pylint: disable=bad-whitespace
# pyformat: disable
expected_ctx = [[ 2.66666675, 3.66666675, 4.66666698, 5.66666698],
[ 5.0, 6.0, 4.33333349, 5.33333349]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertEqual(actual_ctx.shape, (batch, depth))
self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)
class GatedAverageLayerTest(test_utils.TestCase):
def testGatedAverageLayer(self):
with self.session(use_gpu=True) as sess:
np.random.seed(505837249)
depth = 4
batch = 2
num_inputs = 3
inp_1 = np.asarray([[0.0, 0.0, 0.0, 0.0], [-1.0, -1.0, 1.0, 1.0]],
dtype=np.float32)
inp_2 = np.asarray([[1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, 1.0, 1.0]],
dtype=np.float32)
inp_3 = np.asarray([[-1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, 1.0, 1.0]],
dtype=np.float32)
p = layers.GatedAverageLayer.Params()
p.name = 'gated_avg_layer'
p.num_inputs = num_inputs
p.num_nodes = depth
p.random_seed = 505837249
g_avg = p.Instantiate()
avg = g_avg.FProp(g_avg.theta, [inp_1, inp_2, inp_3])
tf.global_variables_initializer().run()
actual_avg = sess.run(avg)
# pylint: disable=bad-whitespace
# pyformat: disable
expected_avg = [
[ 0.13070658, 0.13070658, 0.13070658, 0.13070658],
[ -1.0, -1.0, 1.0 , 1.0]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertEqual(actual_avg.shape, (batch, depth))
self.assertAllClose(expected_avg, actual_avg, rtol=1e-05, atol=1e-05)
class LHUCLayerTest(test_utils.TestCase):
def testLHUCLayer(self):
with self.session(use_gpu=True) as sess:
np.random.seed(505837249)
depth = 4
batch = 2
inp = np.asarray([[1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0]],
dtype=np.float32)
p = layers.LHUCLayer.Params()
p.name = 'lhuc_layer'
p.input_dim = depth
p.random_seed = 505837249
lhuc = p.Instantiate()
lhuc = lhuc.FProp(lhuc.theta, inp)
tf.global_variables_initializer().run()
actual_avg = sess.run(lhuc)
# pylint: disable=bad-whitespace
# pyformat: disable
expected_avg = [[1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertEqual(actual_avg.shape, (batch, depth))
self.assertAllClose(expected_avg, actual_avg, rtol=1e-05, atol=1e-05)
class ResidualAdapterLayerTest(test_utils.TestCase):
def testResidualAdapterLayer(self):
with self.session(use_gpu=True) as sess:
np.random.seed(505837249)
depth = 4
batch = 2
inp = np.asarray([[1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0]],
dtype=np.float32)
p = layers.ResidualAdapterLayer.Params()
p.name = 'resadap_layer'
p.input_dim = depth
p.bottleneck_dim = 2
p.random_seed = 505837249
resadap = p.Instantiate()
resadap = resadap.FProp(resadap.theta, inp)
tf.global_variables_initializer().run()
actual_avg = sess.run(resadap)
# pylint: disable=bad-whitespace
# pyformat: disable
expected_avg = [[1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertEqual(actual_avg.shape, (batch, depth))
self.assertAllClose(expected_avg, actual_avg, rtol=1e-05, atol=1e-05)
class GluLayerTest(test_utils.TestCase):
def testGlu(self):
with self.session(use_gpu=True) as sess:
tf.set_random_seed(3980847392)
inputs = tf.random_normal([5, 2, 3], seed=948387483)
paddings = tf.zeros([5, 2])
p = layers.GluLayer.Params()
p.name = 'glu_layers'
p.input_dim = 3
glu_layer = layers.GluLayer(p)
h = glu_layer.FPropDefaultTheta(inputs, paddings)
tf.global_variables_initializer().run()
actual_layer_output = sess.run(h)
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [
[[ -1.84272185e-01, -3.82728219e-01, 8.69752645e-01],
[ 4.42533880e-01, 1.51665461e+00, 3.26201534e+00]],
[[ -7.06624031e-01, -6.52632236e-01, 1.22156203e+00],
[ 1.66484845e+00, 5.98078966e-01, 1.14039946e+00]],
[[ 3.26439053e-01, 2.47359693e-01, -1.14889514e+00],
[ 7.71084905e-01, 1.07083774e+00, 1.74589559e-01]],
[[ 5.70576251e-01, 7.95466423e-01, -4.07778949e-01],
[ -8.71581078e-01, -5.38501918e-01, -2.50373930e-01]],
[[ -3.88817638e-01, 5.84501982e-01, -6.60797715e-01],
[ -1.34579837e+00, -2.18637614e-03, 1.55258143e+00]]]
# pyformat: enable
# pylint: enable=bad-whitespace
print(np.array_repr(actual_layer_output))
self.assertAllClose(actual_layer_output, expected_output)
def testGluWithoutResidual(self):
with self.session(use_gpu=True) as sess:
tf.set_random_seed(3980847392)
inputs = tf.random_normal([5, 2, 3], seed=948387483)
paddings = tf.zeros([5, 2])
p = layers.GluLayer.Params()
p.name = 'glu_layers'
p.input_dim = 3
p.output_dim = 4
p.apply_residual = False
glu_layer = layers.GluLayer(p)
h = glu_layer.FPropDefaultTheta(inputs, paddings)
tf.global_variables_initializer().run()
actual_layer_output = sess.run(h)
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [
[[ 0.2498899 , 0. , 0.62683833, 0. ],
[ 0.34115699, 0. , 0.38020864, 0. ]],
[[ 0.3014423 , 0. , 0.59274423, 0. ],
[ 0. , 0.35897657, 0.2908403 , 0.03678071]],
[[ 0. , 0.78786391, 0. , 0.38839644],
[ 0. , 0.44012907, 0. , 0.41553062]],
[[ 0. , 0.61838603, 0. , 0.41521466],
[ 0.34117079, 0. , 0.0372162 , 0. ]],
[[ 0. , 0. , 0. , 0.28136203],
[ 0.34413674, 0. , 0.30943182, 0. ]]]
# pyformat: enable
# pylint: enable=bad-whitespace
print(np.array_repr(actual_layer_output))
self.assertAllClose(actual_layer_output, expected_output)
if __name__ == '__main__':
tf.test.main()
|
import souvlaki as sv
def mutate_word(word, style):
if style.startswith('$'):
if len(word) < 2:
return word.upper()
return word[0].title() + word[1:]
elif style.islower():
return word.lower()
elif style.istitle():
return word.title()
elif style.isupper():
return word.upper()
else:
raise ValueError('Could not infer capitalization style from word: ' + str(style))
def generate_word(token, word_source):
if token[0] == 'NOUN':
return(mutate_word(word_source.noun(), token[1]))
elif token[0] == 'ADJ':
return(mutate_word(word_source.adjective(), token[1]))
elif token[0] == 'PREFIX':
return(mutate_word(word_source.prefix(), token[1]))
else:
raise ValueError('Invalid part of speech: ' + str(token[0]))
def generate_from_tokens(tokens, word_source):
if len(tokens) == 0:
return ''
num_names = 1
if tokens[0][0] == 'INTEGER':
if len(tokens) < 2:
raise ValueError('Integer token must be followed by a space')
num_names = int(tokens[0][1])
tokens = tokens[2:]
if num_names == 0:
raise ValueError('Number of names to generate must be nonzero')
names = []
for i in range(num_names):
name = ''
for token in tokens:
if token[0] == 'DELIMITER':
if not token[1] == '&':
name += token[1]
else:
name += generate_word(token, word_source)
names.append(name)
if num_names == 1:
return names[0]
return names
def generate(string, word_source):
return generate_from_tokens(sv.parse(string), word_source)
|
from app import db
class Temperature(db.Model):
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.Text)
degrees = db.Column(db.Float)
|
from data_specification.enums.data_type import DataType
from spynnaker.pyNN.models.neuron.plasticity.stdp.timing_dependence\
.abstract_timing_dependence import AbstractTimingDependence
from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure\
.synapse_structure_weight_only import SynapseStructureWeightOnly
from spynnaker.pyNN.models.neuron.plasticity.stdp.common \
import plasticity_helpers
import logging
logger = logging.getLogger(__name__)
# Constants
LOOKUP_TAU_SIZE = 256
LOOKUP_TAU_SHIFT = 0
class TimingDependenceVogels2011(AbstractTimingDependence):
def __init__(self, alpha, tau=20.0):
AbstractTimingDependence.__init__(self)
self._alpha = alpha
self._tau = tau
self._synapse_structure = SynapseStructureWeightOnly()
@property
def tau(self):
return self._tau
def is_same_as(self, other):
if (other is None) or (not isinstance(
other, TimingDependenceVogels2011)):
return False
return ((self._tau == other._tau) and (self._alpha == other._alpha))
@property
def vertex_executable_suffix(self):
return "vogels_2011"
@property
def pre_trace_n_bytes(self):
# Trace entries consist of a single 16-bit number
return 2
def get_parameters_sdram_usage_in_bytes(self):
return 4 + (2 * LOOKUP_TAU_SIZE)
@property
def n_weight_terms(self):
return 1
def write_parameters(self, spec, machine_time_step, weight_scales):
# Check timestep is valid
if machine_time_step != 1000:
raise NotImplementedError("STDP LUT generation currently only "
"supports 1ms timesteps")
# Write alpha to spec
fixed_point_alpha = plasticity_helpers.float_to_fixed(
self._alpha, plasticity_helpers.STDP_FIXED_POINT_ONE)
spec.write_value(data=fixed_point_alpha, data_type=DataType.INT32)
# Write lookup table
plasticity_helpers.write_exp_lut(
spec, self.tau, LOOKUP_TAU_SIZE, LOOKUP_TAU_SHIFT)
@property
def synaptic_structure(self):
return self._synapse_structure
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import unittest
from typing import Dict, List, Optional
import frappe
from frappe.core.doctype.doctype.doctype import (
CannotIndexedError,
DoctypeLinkError,
HiddenAndMandatoryWithoutDefaultError,
IllegalMandatoryError,
InvalidFieldNameError,
UniqueFieldnameError,
WrongOptionsDoctypeLinkError,
validate_links_table_fieldnames,
)
# test_records = frappe.get_test_records('DocType')
class TestDocType(unittest.TestCase):
def tearDown(self):
frappe.db.rollback()
def test_validate_name(self):
self.assertRaises(frappe.NameError, new_doctype("_Some DocType").insert)
self.assertRaises(frappe.NameError, new_doctype("8Some DocType").insert)
self.assertRaises(frappe.NameError, new_doctype("Some (DocType)").insert)
self.assertRaises(
frappe.NameError,
new_doctype("Some Doctype with a name whose length is more than 61 characters").insert,
)
for name in ("Some DocType", "Some_DocType", "Some-DocType"):
if frappe.db.exists("DocType", name):
frappe.delete_doc("DocType", name)
doc = new_doctype(name).insert()
doc.delete()
def test_doctype_unique_constraint_dropped(self):
if frappe.db.exists("DocType", "With_Unique"):
frappe.delete_doc("DocType", "With_Unique")
dt = new_doctype("With_Unique", unique=1)
dt.insert()
doc1 = frappe.new_doc("With_Unique")
doc2 = frappe.new_doc("With_Unique")
doc1.some_fieldname = "Something"
doc1.name = "one"
doc2.some_fieldname = "Something"
doc2.name = "two"
doc1.insert()
self.assertRaises(frappe.UniqueValidationError, doc2.insert)
frappe.db.rollback()
dt.fields[0].unique = 0
dt.save()
doc2.insert()
doc1.delete()
doc2.delete()
def test_validate_search_fields(self):
doc = new_doctype("Test Search Fields")
doc.search_fields = "some_fieldname"
doc.insert()
self.assertEqual(doc.name, "Test Search Fields")
# check if invalid fieldname is allowed or not
doc.search_fields = "some_fieldname_1"
self.assertRaises(frappe.ValidationError, doc.save)
# check if no value fields are allowed in search fields
field = doc.append("fields", {})
field.fieldname = "some_html_field"
field.fieldtype = "HTML"
field.label = "Some HTML Field"
doc.search_fields = "some_fieldname,some_html_field"
self.assertRaises(frappe.ValidationError, doc.save)
def test_depends_on_fields(self):
doc = new_doctype("Test Depends On", depends_on="eval:doc.__islocal == 0")
doc.insert()
# check if the assignment operation is allowed in depends_on
field = doc.fields[0]
field.depends_on = "eval:doc.__islocal = 0"
self.assertRaises(frappe.ValidationError, doc.save)
def test_all_depends_on_fields_conditions(self):
import re
docfields = frappe.get_all(
"DocField",
or_filters={
"ifnull(depends_on, '')": ("!=", ""),
"ifnull(collapsible_depends_on, '')": ("!=", ""),
"ifnull(mandatory_depends_on, '')": ("!=", ""),
"ifnull(read_only_depends_on, '')": ("!=", ""),
},
fields=[
"parent",
"depends_on",
"collapsible_depends_on",
"mandatory_depends_on",
"read_only_depends_on",
"fieldname",
"fieldtype",
],
)
pattern = r'[\w\.:_]+\s*={1}\s*[\w\.@\'"]+'
for field in docfields:
for depends_on in [
"depends_on",
"collapsible_depends_on",
"mandatory_depends_on",
"read_only_depends_on",
]:
condition = field.get(depends_on)
if condition:
self.assertFalse(re.match(pattern, condition))
def test_data_field_options(self):
doctype_name = "Test Data Fields"
valid_data_field_options = frappe.model.data_field_options + ("",)
invalid_data_field_options = ("Invalid Option 1", frappe.utils.random_string(5))
for field_option in valid_data_field_options + invalid_data_field_options:
test_doctype = frappe.get_doc(
{
"doctype": "DocType",
"name": doctype_name,
"module": "Core",
"custom": 1,
"fields": [
{"fieldname": "{0}_field".format(field_option), "fieldtype": "Data", "options": field_option}
],
}
)
if field_option in invalid_data_field_options:
# assert that only data options in frappe.model.data_field_options are valid
self.assertRaises(frappe.ValidationError, test_doctype.insert)
else:
test_doctype.insert()
self.assertEqual(test_doctype.name, doctype_name)
test_doctype.delete()
def test_sync_field_order(self):
import os
from frappe.modules.import_file import get_file_path
# create test doctype
test_doctype = frappe.get_doc(
{
"doctype": "DocType",
"module": "Core",
"fields": [
{"label": "Field 1", "fieldname": "field_1", "fieldtype": "Data"},
{"label": "Field 2", "fieldname": "field_2", "fieldtype": "Data"},
{"label": "Field 3", "fieldname": "field_3", "fieldtype": "Data"},
{"label": "Field 4", "fieldname": "field_4", "fieldtype": "Data"},
],
"permissions": [{"role": "System Manager", "read": 1}],
"name": "Test Field Order DocType",
"__islocal": 1,
}
)
path = get_file_path(test_doctype.module, test_doctype.doctype, test_doctype.name)
initial_fields_order = ["field_1", "field_2", "field_3", "field_4"]
frappe.delete_doc_if_exists("DocType", "Test Field Order DocType")
if os.path.isfile(path):
os.remove(path)
try:
frappe.flags.allow_doctype_export = 1
test_doctype.save()
# assert that field_order list is being created with the default order
test_doctype_json = frappe.get_file_json(path)
self.assertTrue(test_doctype_json.get("field_order"))
self.assertEqual(len(test_doctype_json["fields"]), len(test_doctype_json["field_order"]))
self.assertListEqual(
[f["fieldname"] for f in test_doctype_json["fields"]], test_doctype_json["field_order"]
)
self.assertListEqual(
[f["fieldname"] for f in test_doctype_json["fields"]], initial_fields_order
)
self.assertListEqual(test_doctype_json["field_order"], initial_fields_order)
# remove field_order to test reload_doc/sync/migrate is backwards compatible without field_order
del test_doctype_json["field_order"]
with open(path, "w+") as txtfile:
txtfile.write(frappe.as_json(test_doctype_json))
# assert that field_order is actually removed from the json file
test_doctype_json = frappe.get_file_json(path)
self.assertFalse(test_doctype_json.get("field_order"))
# make sure that migrate/sync is backwards compatible without field_order
frappe.reload_doctype(test_doctype.name, force=True)
test_doctype.reload()
# assert that field_order list is being created with the default order again
test_doctype.save()
test_doctype_json = frappe.get_file_json(path)
self.assertTrue(test_doctype_json.get("field_order"))
self.assertEqual(len(test_doctype_json["fields"]), len(test_doctype_json["field_order"]))
self.assertListEqual(
[f["fieldname"] for f in test_doctype_json["fields"]], test_doctype_json["field_order"]
)
self.assertListEqual(
[f["fieldname"] for f in test_doctype_json["fields"]], initial_fields_order
)
self.assertListEqual(test_doctype_json["field_order"], initial_fields_order)
# reorder fields: swap row 1 and 3
test_doctype.fields[0], test_doctype.fields[2] = test_doctype.fields[2], test_doctype.fields[0]
for i, f in enumerate(test_doctype.fields):
f.idx = i + 1
# assert that reordering fields only affects `field_order` rather than `fields` attr
test_doctype.save()
test_doctype_json = frappe.get_file_json(path)
self.assertListEqual(
[f["fieldname"] for f in test_doctype_json["fields"]], initial_fields_order
)
self.assertListEqual(
test_doctype_json["field_order"], ["field_3", "field_2", "field_1", "field_4"]
)
# reorder `field_order` in the json file: swap row 2 and 4
test_doctype_json["field_order"][1], test_doctype_json["field_order"][3] = (
test_doctype_json["field_order"][3],
test_doctype_json["field_order"][1],
)
with open(path, "w+") as txtfile:
txtfile.write(frappe.as_json(test_doctype_json))
# assert that reordering `field_order` from json file is reflected in DocType upon migrate/sync
frappe.reload_doctype(test_doctype.name, force=True)
test_doctype.reload()
self.assertListEqual(
[f.fieldname for f in test_doctype.fields], ["field_3", "field_4", "field_1", "field_2"]
)
# insert row in the middle and remove first row (field 3)
test_doctype.append("fields", {"label": "Field 5", "fieldname": "field_5", "fieldtype": "Data"})
test_doctype.fields[4], test_doctype.fields[3] = test_doctype.fields[3], test_doctype.fields[4]
test_doctype.fields[3], test_doctype.fields[2] = test_doctype.fields[2], test_doctype.fields[3]
test_doctype.remove(test_doctype.fields[0])
for i, f in enumerate(test_doctype.fields):
f.idx = i + 1
test_doctype.save()
test_doctype_json = frappe.get_file_json(path)
self.assertListEqual(
[f["fieldname"] for f in test_doctype_json["fields"]],
["field_1", "field_2", "field_4", "field_5"],
)
self.assertListEqual(
test_doctype_json["field_order"], ["field_4", "field_5", "field_1", "field_2"]
)
except:
raise
finally:
frappe.flags.allow_doctype_export = 0
def test_unique_field_name_for_two_fields(self):
doc = new_doctype("Test Unique Field")
field_1 = doc.append("fields", {})
field_1.fieldname = "some_fieldname_1"
field_1.fieldtype = "Data"
field_2 = doc.append("fields", {})
field_2.fieldname = "some_fieldname_1"
field_2.fieldtype = "Data"
self.assertRaises(UniqueFieldnameError, doc.insert)
def test_fieldname_is_not_name(self):
doc = new_doctype("Test Name Field")
field_1 = doc.append("fields", {})
field_1.label = "Name"
field_1.fieldtype = "Data"
doc.insert()
self.assertEqual(doc.fields[1].fieldname, "name1")
doc.fields[1].fieldname = "name"
self.assertRaises(InvalidFieldNameError, doc.save)
def test_illegal_mandatory_validation(self):
doc = new_doctype("Test Illegal mandatory")
field_1 = doc.append("fields", {})
field_1.fieldname = "some_fieldname_1"
field_1.fieldtype = "Section Break"
field_1.reqd = 1
self.assertRaises(IllegalMandatoryError, doc.insert)
def test_link_with_wrong_and_no_options(self):
doc = new_doctype("Test link")
field_1 = doc.append("fields", {})
field_1.fieldname = "some_fieldname_1"
field_1.fieldtype = "Link"
self.assertRaises(DoctypeLinkError, doc.insert)
field_1.options = "wrongdoctype"
self.assertRaises(WrongOptionsDoctypeLinkError, doc.insert)
def test_hidden_and_mandatory_without_default(self):
doc = new_doctype("Test hidden and mandatory")
field_1 = doc.append("fields", {})
field_1.fieldname = "some_fieldname_1"
field_1.fieldtype = "Data"
field_1.reqd = 1
field_1.hidden = 1
self.assertRaises(HiddenAndMandatoryWithoutDefaultError, doc.insert)
def test_field_can_not_be_indexed_validation(self):
doc = new_doctype("Test index")
field_1 = doc.append("fields", {})
field_1.fieldname = "some_fieldname_1"
field_1.fieldtype = "Long Text"
field_1.search_index = 1
self.assertRaises(CannotIndexedError, doc.insert)
def test_cancel_link_doctype(self):
import json
from frappe.desk.form.linked_with import cancel_all_linked_docs, get_submitted_linked_docs
# create doctype
link_doc = new_doctype("Test Linked Doctype")
link_doc.is_submittable = 1
for data in link_doc.get("permissions"):
data.submit = 1
data.cancel = 1
link_doc.insert()
doc = new_doctype("Test Doctype")
doc.is_submittable = 1
field_2 = doc.append("fields", {})
field_2.label = "Test Linked Doctype"
field_2.fieldname = "test_linked_doctype"
field_2.fieldtype = "Link"
field_2.options = "Test Linked Doctype"
for data in link_doc.get("permissions"):
data.submit = 1
data.cancel = 1
doc.insert()
# create doctype data
data_link_doc = frappe.new_doc("Test Linked Doctype")
data_link_doc.some_fieldname = "Data1"
data_link_doc.insert()
data_link_doc.save()
data_link_doc.submit()
data_doc = frappe.new_doc("Test Doctype")
data_doc.some_fieldname = "Data1"
data_doc.test_linked_doctype = data_link_doc.name
data_doc.insert()
data_doc.save()
data_doc.submit()
docs = get_submitted_linked_docs(link_doc.name, data_link_doc.name)
dump_docs = json.dumps(docs.get("docs"))
cancel_all_linked_docs(dump_docs)
data_link_doc.cancel()
data_doc.load_from_db()
self.assertEqual(data_link_doc.docstatus, 2)
self.assertEqual(data_doc.docstatus, 2)
# delete doctype record
data_doc.delete()
data_link_doc.delete()
# delete doctype
link_doc.delete()
doc.delete()
frappe.db.commit()
def test_ignore_cancelation_of_linked_doctype_during_cancel(self):
import json
from frappe.desk.form.linked_with import cancel_all_linked_docs, get_submitted_linked_docs
# create linked doctype
link_doc = new_doctype("Test Linked Doctype 1")
link_doc.is_submittable = 1
for data in link_doc.get("permissions"):
data.submit = 1
data.cancel = 1
link_doc.insert()
# create first parent doctype
test_doc_1 = new_doctype("Test Doctype 1")
test_doc_1.is_submittable = 1
field_2 = test_doc_1.append("fields", {})
field_2.label = "Test Linked Doctype 1"
field_2.fieldname = "test_linked_doctype_a"
field_2.fieldtype = "Link"
field_2.options = "Test Linked Doctype 1"
for data in test_doc_1.get("permissions"):
data.submit = 1
data.cancel = 1
test_doc_1.insert()
# crete second parent doctype
doc = new_doctype("Test Doctype 2")
doc.is_submittable = 1
field_2 = doc.append("fields", {})
field_2.label = "Test Linked Doctype 1"
field_2.fieldname = "test_linked_doctype_a"
field_2.fieldtype = "Link"
field_2.options = "Test Linked Doctype 1"
for data in link_doc.get("permissions"):
data.submit = 1
data.cancel = 1
doc.insert()
# create doctype data
data_link_doc_1 = frappe.new_doc("Test Linked Doctype 1")
data_link_doc_1.some_fieldname = "Data1"
data_link_doc_1.insert()
data_link_doc_1.save()
data_link_doc_1.submit()
data_doc_2 = frappe.new_doc("Test Doctype 1")
data_doc_2.some_fieldname = "Data1"
data_doc_2.test_linked_doctype_a = data_link_doc_1.name
data_doc_2.insert()
data_doc_2.save()
data_doc_2.submit()
data_doc = frappe.new_doc("Test Doctype 2")
data_doc.some_fieldname = "Data1"
data_doc.test_linked_doctype_a = data_link_doc_1.name
data_doc.insert()
data_doc.save()
data_doc.submit()
docs = get_submitted_linked_docs(link_doc.name, data_link_doc_1.name)
dump_docs = json.dumps(docs.get("docs"))
cancel_all_linked_docs(dump_docs, ignore_doctypes_on_cancel_all=["Test Doctype 2"])
# checking that doc for Test Doctype 2 is not canceled
self.assertRaises(frappe.LinkExistsError, data_link_doc_1.cancel)
data_doc.load_from_db()
data_doc_2.load_from_db()
self.assertEqual(data_link_doc_1.docstatus, 2)
# linked doc is canceled
self.assertEqual(data_doc_2.docstatus, 2)
# ignored doctype 2 during cancel
self.assertEqual(data_doc.docstatus, 1)
# delete doctype record
data_doc.cancel()
data_doc.delete()
data_doc_2.delete()
data_link_doc_1.delete()
# delete doctype
link_doc.delete()
doc.delete()
test_doc_1.delete()
frappe.db.commit()
def test_links_table_fieldname_validation(self):
doc = new_doctype("Test Links Table Validation")
# check valid data
doc.append("links", {"link_doctype": "User", "link_fieldname": "first_name"})
validate_links_table_fieldnames(doc) # no error
doc.links = [] # reset links table
# check invalid doctype
doc.append("links", {"link_doctype": "User2", "link_fieldname": "first_name"})
self.assertRaises(frappe.DoesNotExistError, validate_links_table_fieldnames, doc)
doc.links = [] # reset links table
# check invalid fieldname
doc.append("links", {"link_doctype": "User", "link_fieldname": "a_field_that_does_not_exists"})
self.assertRaises(InvalidFieldNameError, validate_links_table_fieldnames, doc)
def test_create_virtual_doctype(self):
"""Test virtual DOcTYpe."""
virtual_doc = new_doctype("Test Virtual Doctype")
virtual_doc.is_virtual = 1
virtual_doc.insert()
virtual_doc.save()
doc = frappe.get_doc("DocType", "Test Virtual Doctype")
self.assertEqual(doc.is_virtual, 1)
self.assertFalse(frappe.db.table_exists("Test Virtual Doctype"))
def test_default_fieldname(self):
fields = [
{"label": "title", "fieldname": "title", "fieldtype": "Data", "default": "{some_fieldname}"}
]
dt = new_doctype("DT with default field", fields=fields)
dt.insert()
dt.delete()
def test_autoincremented_doctype_transition(self):
frappe.delete_doc("testy_autoinc_dt")
dt = new_doctype("testy_autoinc_dt", autoname="autoincrement").insert(ignore_permissions=True)
dt.autoname = "hash"
try:
dt.save(ignore_permissions=True)
except frappe.ValidationError as e:
self.assertEqual(e.args[0], "Cannot change to/from Autoincrement naming rule")
else:
self.fail("Shouldnt be possible to transition autoincremented doctype to any other naming rule")
finally:
# cleanup
dt.delete(ignore_permissions=True)
def new_doctype(
name, unique: bool = False, depends_on: str = "", fields: Optional[List[Dict]] = None, **kwargs
):
doc = frappe.get_doc(
{
"doctype": "DocType",
"module": "Core",
"custom": 1,
"fields": [
{
"label": "Some Field",
"fieldname": "some_fieldname",
"fieldtype": "Data",
"unique": unique,
"depends_on": depends_on,
}
],
"permissions": [
{
"role": "System Manager",
"read": 1,
}
],
"name": name,
**kwargs,
}
)
if fields:
for f in fields:
doc.append("fields", f)
return doc
|
from random import choice
languages = [
'python',
'C++',
'JavaScript',
'Java',
'go',
'ruby',
'Kotlin',
'Dart',
'Swift',
]
print('----------------------')
print(choice(languages))
print('----------------------')
|
# hash_filter.py
import hashlib
def j2_hash_filter(value, hash_type="sha1"):
"""
Example filter providing custom Jinja2 filter - hash
Hash type defaults to 'sha1' if one is not specified
:param value: value to be hashed
:param hash_type: valid hash type
:return: computed hash as a hexadecimal string
"""
hash_func = getattr(hashlib, hash_type, None)
if hash_func:
computed_hash = hash_func(value.encode("utf-8")).hexdigest()
else:
raise AttributeError(
"No hashing function named {hname}".format(hname=hash_type)
)
return computed_hash
|
"""
random
"""
import random
start = 0
end = 100
seed = 123
rnd = random.Random(seed) # random object with seed
print(rnd.random())
print(random.random())
print(random.randint(start, end))
print(random.randrange(end))
print(random.randrange(start+2, end))
print(random.randrange(start+2, end, step=2))
students = [
["student 1", random.randint(0, 20), random.randint(0, 20), random.randint(0, 20)],
["student 2", random.randint(0, 20), random.randint(0, 20), random.randint(0, 20)],
["student 3", random.randint(0, 20), random.randint(0, 20), random.randint(0, 20)],
["student 4", random.randint(0, 20), random.randint(0, 20), random.randint(0, 20)]
]
print(students)
random.shuffle(students)
print(students)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, CTERA Networks Ltd.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: ctera_filer_volume
short_description: CTERA-Networks Filer volume configuration and management
description:
- Create, modify and delete volumes.
extends_documentation_fragment:
- ctera.ctera.ctera
author:
- Saimon Michelson (@saimonation)
- Ygal Blum (@ygalblum)
options:
state:
description:
- Whether the specified volume should exist or not.
type: str
choices: ['present', 'absent']
default: 'present'
name:
description: The name of the volume
required: True
type: str
filesystem:
description: Filesystem to use, defaults to xfs
required: False
type: str
size:
description: Size of the volume in MBs, if not set the entire disk will be used
required: False
type: int
device:
description: Name of the device to use for the volume, can be left as None if there the gateway has only one
required: False
type: str
passphrase:
description: Passphrase for the volume
required: False
type: str
requirements:
- cterasdk
'''
EXAMPLES = '''
- name: create new volume
ctera_filer_volume:
name: main
ctera_host: "{{ ctera_filer_hostname }}"
ctera_user: "{{ ctera_filer_user }}"
ctera_password: "{{ ctera_filer_password }}"
'''
RETURN = '''
name:
description: Name of the newly created volume
returned: when state is present
type: str
sample: main
size:
description: Size of the newly created volume
returned: when state is present
type: str
sample: 1024
'''
import ansible_collections.ctera.ctera.plugins.module_utils.ctera_common as ctera_common
from ansible_collections.ctera.ctera.plugins.module_utils.ctera_filer_base import CteraFilerBase
try:
from cterasdk import CTERAException
except ImportError: # pragma: no cover
pass # caught by ctera_common
class CteraFilerVolume(CteraFilerBase):
_create_params = ['name', 'size', 'filesystem', 'device', 'passphrase']
def __init__(self):
super().__init__(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
name=dict(type='str', required=True),
size=dict(type='int', required=False),
filesystem=dict(type='str', required=False),
device=dict(type='str', required=False),
passphrase=dict(type='str', required=False, no_log=True)
))
@property
def _generic_failure_message(self): # pragma: no cover
return 'Volume management failed'
def _execute(self):
state = self.parameters.pop('state')
volume = self._get_volume()
if state == 'present':
self._ensure_present(volume)
else:
self._ensure_absent(volume)
def _ensure_present(self, volume):
if volume:
modified_attributes = ctera_common.get_modified_attributes(volume, self.parameters)
if modified_attributes:
desired_size = modified_attributes.get('size')
if desired_size is not None:
self._ctera_filer.volumes.modify(self.parameters['name'], size=desired_size)
self.ansible_module.ctera_return_value().changed().msg('Volume modified').put(
name=self.parameters['name'], size=desired_size)
else:
self.ansible_module.ctera_return_value().skipped().msg('Currently you can only modify the volume size').put(
name=self.parameters['name'])
else:
self.ansible_module.ctera_return_value().skipped().msg('Volume details did not change').put(name=self.parameters['name'])
else:
create_params = {k: v for k, v in self.parameters.items() if k in CteraFilerVolume._create_params}
self._ctera_filer.volumes.add(**create_params)
self.ansible_module.ctera_return_value().changed().msg('Volume created').put(**create_params)
def _ensure_absent(self, volume):
if volume:
self._ctera_filer.volumes.delete(self.parameters['name'])
self.ansible_module.ctera_return_value().changed().msg('Volume deleted').put(name=self.parameters['name'])
else:
self.ansible_module.ctera_return_value().skipped().msg('Volume already does not exist').put(name=self.parameters['name'])
def _get_volume(self):
volume = None
try:
volume = self._ctera_filer.volumes.get(name=self.parameters['name'])
except CTERAException as error:
if error.response.code != 404: # pylint: disable=no-member
raise
return self._to_volume_dict(volume) if volume else {}
@staticmethod
def _to_volume_dict(volume):
volume_dict = {k: v for k, v in volume.__dict__.items() if not k.startswith("_")}
return volume_dict
def main(): # pragma: no cover
CteraFilerVolume().run()
if __name__ == '__main__': # pragma: no cover
main()
|
#!/usr/bin/python -i
import sys
import xml.etree.ElementTree as etree
try:
import urllib.request as urllib2
except ImportError:
import urllib2
import json
#############################
# vuid_mapping.py script
#
# VUID Mapping Details
# The Vulkan spec creation process automatically generates string-based unique IDs for each Valid Usage statement
# For implicit VUs, the format is VUID-<func|struct>-[<param_name>]-<type>
# func|struct is the name of the API function or structure that the VU is under
# param_name is an optional entry with the name of the function or struct parameter
# type is the type of implicit check, see table below for possible values
#
# For explicit VUs, the format is VUID-<func|struct>-[<param_name>]-<uniqueid>
# All fields are the same as implicit VUs except the last parameter is a globally unique integer ID instead of a string type
#
# The values below are used to map the strings into unique integers that are used for the unique enum values returned by debug callbacks
# Here's how the bits of the numerical unique ID map to the ID type and values
# 31:21 - 11 bits that map to unique value for the function/struct
# 20:1 - 20 bits that map to param-type combo for implicit VU and uniqueid for explicit VU
# 0 - 1 bit on for implicit VU or off for explicit VU
#
# For implicit VUs 20:1 is split into 20:9 for parameter and 8:1 for type
FUNC_STRUCT_SHIFT = 21
EXPLICIT_ID_SHIFT = 1
IMPLICIT_TYPE_SHIFT = 1
IMPLICIT_PARAM_SHIFT = 9
explicit_bit0 = 0x0 # All explicit IDs are even
implicit_bit0 = 0x1 # All implicit IDs are odd
# Implicit type values, shifted up by ID_SHIFT bits in final ID
implicit_type_map = {
'parameter' : 0,
'requiredbitmask' : 1,
'zerobitmask' : 2,
'parent' : 3,
'commonparent' : 4,
'sType' : 5,
'pNext' : 6,
'unique' : 7,
'queuetype' : 8,
'recording' : 9,
'cmdpool' : 10,
'renderpass' : 11,
'bufferlevel' : 12,
'arraylength' : 13,
}
# Function/struct value mappings, shifted up FUNC_STRUCT_SHIFT bits in final ID
func_struct_id_map = {
'VkAcquireNextImageInfoKHX' : 0,
'VkAllocationCallbacks' : 1,
'VkAndroidSurfaceCreateInfoKHR' : 2,
'VkApplicationInfo' : 3,
'VkAttachmentDescription' : 4,
'VkAttachmentReference' : 5,
'VkBindBufferMemoryInfoKHR' : 6,
'VkBindImageMemoryInfoKHR' : 7,
'VkBindImageMemorySwapchainInfoKHX' : 8,
'VkBindSparseInfo' : 9,
'VkBufferCreateInfo' : 10,
'VkBufferImageCopy' : 11,
'VkBufferMemoryBarrier' : 12,
'VkBufferViewCreateInfo' : 13,
'VkClearAttachment' : 14,
'VkClearDepthStencilValue' : 15,
'VkClearValue' : 16,
'VkCmdProcessCommandsInfoNVX' : 17,
'VkCmdReserveSpaceForCommandsInfoNVX' : 18,
'VkCommandBufferAllocateInfo' : 19,
'VkCommandBufferBeginInfo' : 20,
'VkCommandBufferInheritanceInfo' : 21,
'VkCommandPoolCreateInfo' : 22,
'VkComponentMapping' : 23,
'VkComputePipelineCreateInfo' : 24,
'VkCopyDescriptorSet' : 25,
'VkD3D12FenceSubmitInfoKHR' : 26,
'VkDebugMarkerMarkerInfoEXT' : 27,
'VkDebugMarkerObjectNameInfoEXT' : 28,
'VkDebugMarkerObjectTagInfoEXT' : 29,
'VkDebugReportCallbackCreateInfoEXT' : 30,
'VkDedicatedAllocationBufferCreateInfoNV' : 31,
'VkDedicatedAllocationImageCreateInfoNV' : 32,
'VkDedicatedAllocationMemoryAllocateInfoNV' : 33,
'VkDescriptorBufferInfo' : 34,
'VkDescriptorImageInfo' : 35,
'VkDescriptorPoolCreateInfo' : 36,
'VkDescriptorPoolSize' : 37,
'VkDescriptorSetAllocateInfo' : 38,
'VkDescriptorSetLayoutBinding' : 39,
'VkDescriptorSetLayoutCreateInfo' : 40,
'VkDescriptorUpdateTemplateCreateInfoKHR' : 41,
'VkDescriptorUpdateTemplateEntryKHR' : 42,
'VkDeviceCreateInfo' : 43,
'VkDeviceEventInfoEXT' : 44,
'VkDeviceGeneratedCommandsFeaturesNVX' : 45,
'VkDeviceGeneratedCommandsLimitsNVX' : 46,
'VkDeviceGroupBindSparseInfoKHX' : 47,
'VkDeviceGroupCommandBufferBeginInfoKHX' : 48,
'VkDeviceGroupDeviceCreateInfoKHX' : 49,
'VkDeviceGroupPresentInfoKHX' : 50,
'VkDeviceGroupRenderPassBeginInfoKHX' : 51,
'VkDeviceGroupSubmitInfoKHX' : 52,
'VkDeviceGroupSwapchainCreateInfoKHX' : 53,
'VkDeviceQueueCreateInfo' : 54,
'VkDispatchIndirectCommand' : 55,
'VkDisplayEventInfoEXT' : 56,
'VkDisplayModeCreateInfoKHR' : 57,
'VkDisplayPowerInfoEXT' : 58,
'VkDisplayPresentInfoKHR' : 59,
'VkDisplaySurfaceCreateInfoKHR' : 60,
'VkDrawIndexedIndirectCommand' : 61,
'VkDrawIndirectCommand' : 62,
'VkEventCreateInfo' : 63,
'VkExportMemoryAllocateInfoKHR' : 64,
'VkExportMemoryAllocateInfoNV' : 65,
'VkExportMemoryWin32HandleInfoKHR' : 66,
'VkExportMemoryWin32HandleInfoNV' : 67,
'VkExportSemaphoreCreateInfoKHR' : 68,
'VkExportSemaphoreWin32HandleInfoKHR' : 69,
'VkExternalMemoryBufferCreateInfoKHR' : 70,
'VkExternalMemoryImageCreateInfoKHR' : 71,
'VkExternalMemoryImageCreateInfoNV' : 72,
'VkFenceCreateInfo' : 73,
'VkFramebufferCreateInfo' : 74,
'VkGraphicsPipelineCreateInfo' : 75,
'VkIOSSurfaceCreateInfoMVK' : 76,
'VkImageBlit' : 77,
'VkImageCopy' : 78,
'VkImageCreateInfo' : 79,
'VkImageMemoryBarrier' : 80,
'VkImageResolve' : 81,
'VkImageSubresource' : 82,
'VkImageSubresourceLayers' : 83,
'VkImageSubresourceRange' : 84,
'VkImageSwapchainCreateInfoKHX' : 85,
'VkImageViewCreateInfo' : 86,
'VkImportMemoryFdInfoKHR' : 87,
'VkImportMemoryWin32HandleInfoKHR' : 88,
'VkImportMemoryWin32HandleInfoNV' : 89,
'VkImportSemaphoreFdInfoKHR' : 90,
'VkImportSemaphoreWin32HandleInfoKHR' : 91,
'VkIndirectCommandsLayoutCreateInfoNVX' : 92,
'VkIndirectCommandsLayoutTokenNVX' : 93,
'VkIndirectCommandsTokenNVX' : 94,
'VkInstanceCreateInfo' : 95,
'VkMacOSSurfaceCreateInfoMVK' : 96,
'VkMappedMemoryRange' : 97,
'VkMemoryAllocateFlagsInfoKHX' : 98,
'VkMemoryAllocateInfo' : 99,
'VkMemoryBarrier' : 100,
'VkMirSurfaceCreateInfoKHR' : 101,
'VkObjectTableCreateInfoNVX' : 102,
'VkObjectTableDescriptorSetEntryNVX' : 103,
'VkObjectTableEntryNVX' : 104,
'VkObjectTableIndexBufferEntryNVX' : 105,
'VkObjectTablePipelineEntryNVX' : 106,
'VkObjectTablePushConstantEntryNVX' : 107,
'VkObjectTableVertexBufferEntryNVX' : 108,
'VkPhysicalDeviceDiscardRectanglePropertiesEXT' : 109,
'VkPhysicalDeviceExternalBufferInfoKHR' : 110,
'VkPhysicalDeviceExternalImageFormatInfoKHR' : 111,
'VkPhysicalDeviceExternalSemaphoreInfoKHR' : 112,
'VkPhysicalDeviceFeatures' : 113,
'VkPhysicalDeviceFeatures2KHR' : 114,
'VkPhysicalDeviceImageFormatInfo2KHR' : 115,
'VkPhysicalDeviceMultiviewFeaturesKHX' : 116,
'VkPhysicalDevicePushDescriptorPropertiesKHR' : 117,
'VkPhysicalDeviceSparseImageFormatInfo2KHR' : 118,
'VkPhysicalDeviceSurfaceInfo2KHR' : 119,
'VkPipelineCacheCreateInfo' : 120,
'VkPipelineColorBlendAttachmentState' : 121,
'VkPipelineColorBlendStateCreateInfo' : 122,
'VkPipelineDepthStencilStateCreateInfo' : 123,
'VkPipelineDiscardRectangleStateCreateInfoEXT' : 124,
'VkPipelineDynamicStateCreateInfo' : 125,
'VkPipelineInputAssemblyStateCreateInfo' : 126,
'VkPipelineLayoutCreateInfo' : 127,
'VkPipelineMultisampleStateCreateInfo' : 128,
'VkPipelineRasterizationStateCreateInfo' : 129,
'VkPipelineRasterizationStateRasterizationOrderAMD' : 130,
'VkPipelineShaderStageCreateInfo' : 131,
'VkPipelineTessellationStateCreateInfo' : 132,
'VkPipelineVertexInputStateCreateInfo' : 133,
'VkPipelineViewportStateCreateInfo' : 134,
'VkPipelineViewportSwizzleStateCreateInfoNV' : 135,
'VkPipelineViewportWScalingStateCreateInfoNV' : 136,
'VkPresentInfoKHR' : 137,
'VkPresentRegionKHR' : 138,
'VkPresentRegionsKHR' : 139,
'VkPresentTimesInfoGOOGLE' : 140,
'VkPushConstantRange' : 141,
'VkQueryPoolCreateInfo' : 142,
'VkRectLayerKHR' : 143,
'VkRenderPassBeginInfo' : 144,
'VkRenderPassCreateInfo' : 145,
'VkRenderPassMultiviewCreateInfoKHX' : 146,
'VkSamplerCreateInfo' : 147,
'VkSemaphoreCreateInfo' : 148,
'VkShaderModuleCreateInfo' : 149,
'VkSparseBufferMemoryBindInfo' : 150,
'VkSparseImageMemoryBind' : 151,
'VkSparseImageMemoryBindInfo' : 152,
'VkSparseImageOpaqueMemoryBindInfo' : 153,
'VkSparseMemoryBind' : 154,
'VkSpecializationInfo' : 155,
'VkSpecializationMapEntry' : 156,
'VkStencilOpState' : 157,
'VkSubmitInfo' : 158,
'VkSubpassDependency' : 159,
'VkSubpassDescription' : 160,
'VkSurfaceCapabilities2EXT' : 161,
'VkSwapchainCounterCreateInfoEXT' : 162,
'VkSwapchainCreateInfoKHR' : 163,
'VkValidationFlagsEXT' : 164,
'VkVertexInputAttributeDescription' : 165,
'VkVertexInputBindingDescription' : 166,
'VkViSurfaceCreateInfoNN' : 167,
'VkViewport' : 168,
'VkViewportSwizzleNV' : 169,
'VkWaylandSurfaceCreateInfoKHR' : 170,
'VkWin32KeyedMutexAcquireReleaseInfoKHR' : 171,
'VkWin32KeyedMutexAcquireReleaseInfoNV' : 172,
'VkWin32SurfaceCreateInfoKHR' : 173,
'VkWriteDescriptorSet' : 174,
'VkXcbSurfaceCreateInfoKHR' : 175,
'VkXlibSurfaceCreateInfoKHR' : 176,
'vkAcquireNextImage2KHX' : 177,
'vkAcquireNextImageKHR' : 178,
'vkAcquireXlibDisplayEXT' : 179,
'vkAllocateCommandBuffers' : 180,
'vkAllocateDescriptorSets' : 181,
'vkAllocateMemory' : 182,
'vkBeginCommandBuffer' : 183,
'vkBindBufferMemory' : 184,
'vkBindBufferMemory2KHR' : 185,
'vkBindImageMemory' : 186,
'vkBindImageMemory2KHR' : 187,
'vkCmdBeginQuery' : 188,
'vkCmdBeginRenderPass' : 189,
'vkCmdBindDescriptorSets' : 190,
'vkCmdBindIndexBuffer' : 191,
'vkCmdBindPipeline' : 192,
'vkCmdBindVertexBuffers' : 193,
'vkCmdBlitImage' : 194,
'vkCmdClearAttachments' : 195,
'vkCmdClearColorImage' : 196,
'vkCmdClearDepthStencilImage' : 197,
'vkCmdCopyBuffer' : 198,
'vkCmdCopyBufferToImage' : 199,
'vkCmdCopyImage' : 200,
'vkCmdCopyImageToBuffer' : 201,
'vkCmdCopyQueryPoolResults' : 202,
'vkCmdDebugMarkerBeginEXT' : 203,
'vkCmdDebugMarkerEndEXT' : 204,
'vkCmdDebugMarkerInsertEXT' : 205,
'vkCmdDispatch' : 206,
'vkCmdDispatchBaseKHX' : 207,
'vkCmdDispatchIndirect' : 208,
'vkCmdDraw' : 209,
'vkCmdDrawIndexed' : 210,
'vkCmdDrawIndexedIndirect' : 211,
'vkCmdDrawIndexedIndirectCountAMD' : 212,
'vkCmdDrawIndirect' : 213,
'vkCmdDrawIndirectCountAMD' : 214,
'vkCmdEndQuery' : 215,
'vkCmdEndRenderPass' : 216,
'vkCmdExecuteCommands' : 217,
'vkCmdFillBuffer' : 218,
'vkCmdNextSubpass' : 219,
'vkCmdPipelineBarrier' : 220,
'vkCmdProcessCommandsNVX' : 221,
'vkCmdPushConstants' : 222,
'vkCmdPushDescriptorSetKHR' : 223,
'vkCmdPushDescriptorSetWithTemplateKHR' : 224,
'vkCmdReserveSpaceForCommandsNVX' : 225,
'vkCmdResetEvent' : 226,
'vkCmdResetQueryPool' : 227,
'vkCmdResolveImage' : 228,
'vkCmdSetBlendConstants' : 229,
'vkCmdSetDepthBias' : 230,
'vkCmdSetDepthBounds' : 231,
'vkCmdSetDeviceMaskKHX' : 232,
'vkCmdSetDiscardRectangleEXT' : 233,
'vkCmdSetEvent' : 234,
'vkCmdSetLineWidth' : 235,
'vkCmdSetScissor' : 236,
'vkCmdSetStencilCompareMask' : 237,
'vkCmdSetStencilReference' : 238,
'vkCmdSetStencilWriteMask' : 239,
'vkCmdSetViewport' : 240,
'vkCmdSetViewportWScalingNV' : 241,
'vkCmdUpdateBuffer' : 242,
'vkCmdWaitEvents' : 243,
'vkCmdWriteTimestamp' : 244,
'vkCreateAndroidSurfaceKHR' : 245,
'vkCreateBuffer' : 246,
'vkCreateBufferView' : 247,
'vkCreateCommandPool' : 248,
'vkCreateComputePipelines' : 249,
'vkCreateDebugReportCallbackEXT' : 250,
'vkCreateDescriptorPool' : 251,
'vkCreateDescriptorSetLayout' : 252,
'vkCreateDescriptorUpdateTemplateKHR' : 253,
'vkCreateDevice' : 254,
'vkCreateDisplayModeKHR' : 255,
'vkCreateDisplayPlaneSurfaceKHR' : 256,
'vkCreateEvent' : 257,
'vkCreateFence' : 258,
'vkCreateFramebuffer' : 259,
'vkCreateGraphicsPipelines' : 260,
'vkCreateIOSSurfaceMVK' : 261,
'vkCreateImage' : 262,
'vkCreateImageView' : 263,
'vkCreateIndirectCommandsLayoutNVX' : 264,
'vkCreateInstance' : 265,
'vkCreateMacOSSurfaceMVK' : 266,
'vkCreateMirSurfaceKHR' : 267,
'vkCreateObjectTableNVX' : 268,
'vkCreatePipelineCache' : 269,
'vkCreatePipelineLayout' : 270,
'vkCreateQueryPool' : 271,
'vkCreateRenderPass' : 272,
'vkCreateSampler' : 273,
'vkCreateSemaphore' : 274,
'vkCreateShaderModule' : 275,
'vkCreateSharedSwapchainsKHR' : 276,
'vkCreateSwapchainKHR' : 277,
'vkCreateViSurfaceNN' : 278,
'vkCreateWaylandSurfaceKHR' : 279,
'vkCreateWin32SurfaceKHR' : 280,
'vkCreateXcbSurfaceKHR' : 281,
'vkCreateXlibSurfaceKHR' : 282,
'vkDebugMarkerSetObjectNameEXT' : 283,
'vkDebugMarkerSetObjectTagEXT' : 284,
'vkDebugReportMessageEXT' : 285,
'vkDestroyBuffer' : 286,
'vkDestroyBufferView' : 287,
'vkDestroyCommandPool' : 288,
'vkDestroyDebugReportCallbackEXT' : 289,
'vkDestroyDescriptorPool' : 290,
'vkDestroyDescriptorSetLayout' : 291,
'vkDestroyDescriptorUpdateTemplateKHR' : 292,
'vkDestroyDevice' : 293,
'vkDestroyEvent' : 294,
'vkDestroyFence' : 295,
'vkDestroyFramebuffer' : 296,
'vkDestroyImage' : 297,
'vkDestroyImageView' : 298,
'vkDestroyIndirectCommandsLayoutNVX' : 299,
'vkDestroyInstance' : 300,
'vkDestroyObjectTableNVX' : 301,
'vkDestroyPipeline' : 302,
'vkDestroyPipelineCache' : 303,
'vkDestroyPipelineLayout' : 304,
'vkDestroyQueryPool' : 305,
'vkDestroyRenderPass' : 306,
'vkDestroySampler' : 307,
'vkDestroySemaphore' : 308,
'vkDestroyShaderModule' : 309,
'vkDestroySurfaceKHR' : 310,
'vkDestroySwapchainKHR' : 311,
'vkDeviceWaitIdle' : 312,
'vkDisplayPowerControlEXT' : 313,
'vkEndCommandBuffer' : 314,
'vkEnumerateDeviceExtensionProperties' : 315,
'vkEnumerateDeviceLayerProperties' : 316,
'vkEnumerateInstanceExtensionProperties' : 317,
'vkEnumerateInstanceLayerProperties' : 318,
'vkEnumeratePhysicalDeviceGroupsKHX' : 319,
'vkEnumeratePhysicalDevices' : 320,
'vkFlushMappedMemoryRanges' : 321,
'vkFreeCommandBuffers' : 322,
'vkFreeDescriptorSets' : 323,
'vkFreeMemory' : 324,
'vkGetBufferMemoryRequirements' : 325,
'vkGetDeviceGroupPeerMemoryFeaturesKHX' : 326,
'vkGetDeviceGroupPresentCapabilitiesKHX' : 327,
'vkGetDeviceGroupSurfacePresentModesKHX' : 328,
'vkGetDeviceMemoryCommitment' : 329,
'vkGetDeviceProcAddr' : 330,
'vkGetDeviceQueue' : 331,
'vkGetDisplayModePropertiesKHR' : 332,
'vkGetDisplayPlaneCapabilitiesKHR' : 333,
'vkGetDisplayPlaneSupportedDisplaysKHR' : 334,
'vkGetEventStatus' : 335,
'vkGetFenceStatus' : 336,
'vkGetImageMemoryRequirements' : 337,
'vkGetImageSparseMemoryRequirements' : 338,
'vkGetImageSubresourceLayout' : 339,
'vkGetInstanceProcAddr' : 340,
'vkGetMemoryFdKHR' : 341,
'vkGetMemoryFdPropertiesKHR' : 342,
'vkGetMemoryWin32HandleKHR' : 343,
'vkGetMemoryWin32HandleNV' : 344,
'vkGetMemoryWin32HandlePropertiesKHR' : 345,
'vkGetPastPresentationTimingGOOGLE' : 346,
'vkGetPhysicalDeviceDisplayPlanePropertiesKHR' : 347,
'vkGetPhysicalDeviceDisplayPropertiesKHR' : 348,
'vkGetPhysicalDeviceExternalBufferPropertiesKHR' : 349,
'vkGetPhysicalDeviceExternalImageFormatPropertiesNV' : 350,
'vkGetPhysicalDeviceExternalSemaphorePropertiesKHR' : 351,
'vkGetPhysicalDeviceFeatures' : 352,
'vkGetPhysicalDeviceFeatures2KHR' : 353,
'vkGetPhysicalDeviceFormatProperties' : 354,
'vkGetPhysicalDeviceFormatProperties2KHR' : 355,
'vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX' : 356,
'vkGetPhysicalDeviceImageFormatProperties' : 357,
'vkGetPhysicalDeviceImageFormatProperties2KHR' : 358,
'vkGetPhysicalDeviceMemoryProperties' : 359,
'vkGetPhysicalDeviceMemoryProperties2KHR' : 360,
'vkGetPhysicalDeviceMirPresentationSupportKHR' : 361,
'vkGetPhysicalDevicePresentRectanglesKHX' : 362,
'vkGetPhysicalDeviceProperties' : 363,
'vkGetPhysicalDeviceProperties2KHR' : 364,
'vkGetPhysicalDeviceQueueFamilyProperties' : 365,
'vkGetPhysicalDeviceQueueFamilyProperties2KHR' : 366,
'vkGetPhysicalDeviceSparseImageFormatProperties' : 367,
'vkGetPhysicalDeviceSparseImageFormatProperties2KHR' : 368,
'vkGetPhysicalDeviceSurfaceCapabilities2EXT' : 369,
'vkGetPhysicalDeviceSurfaceCapabilities2KHR' : 370,
'vkGetPhysicalDeviceSurfaceCapabilitiesKHR' : 371,
'vkGetPhysicalDeviceSurfaceFormats2KHR' : 372,
'vkGetPhysicalDeviceSurfaceFormatsKHR' : 373,
'vkGetPhysicalDeviceSurfacePresentModesKHR' : 374,
'vkGetPhysicalDeviceSurfaceSupportKHR' : 375,
'vkGetPhysicalDeviceWaylandPresentationSupportKHR' : 376,
'vkGetPhysicalDeviceWin32PresentationSupportKHR' : 377,
'vkGetPhysicalDeviceXcbPresentationSupportKHR' : 378,
'vkGetPhysicalDeviceXlibPresentationSupportKHR' : 379,
'vkGetPipelineCacheData' : 380,
'vkGetQueryPoolResults' : 381,
'vkGetRandROutputDisplayEXT' : 382,
'vkGetRefreshCycleDurationGOOGLE' : 383,
'vkGetRenderAreaGranularity' : 384,
'vkGetSemaphoreFdKHR' : 385,
'vkGetSemaphoreWin32HandleKHR' : 386,
'vkGetSwapchainCounterEXT' : 387,
'vkGetSwapchainImagesKHR' : 388,
'vkGetSwapchainStatusKHR' : 389,
'vkImportSemaphoreFdKHR' : 390,
'vkImportSemaphoreWin32HandleKHR' : 391,
'vkInvalidateMappedMemoryRanges' : 392,
'vkMapMemory' : 393,
'vkMergePipelineCaches' : 394,
'vkQueueBindSparse' : 395,
'vkQueuePresentKHR' : 396,
'vkQueueSubmit' : 397,
'vkQueueWaitIdle' : 398,
'vkRegisterDeviceEventEXT' : 399,
'vkRegisterDisplayEventEXT' : 400,
'vkRegisterObjectsNVX' : 401,
'vkReleaseDisplayEXT' : 402,
'vkResetCommandBuffer' : 403,
'vkResetCommandPool' : 404,
'vkResetDescriptorPool' : 405,
'vkResetEvent' : 406,
'vkResetFences' : 407,
'vkSetEvent' : 408,
'vkSetHdrMetadataEXT' : 409,
'vkTrimCommandPoolKHR' : 410,
'vkUnmapMemory' : 411,
'vkUnregisterObjectsNVX' : 412,
'vkUpdateDescriptorSetWithTemplateKHR' : 413,
'vkUpdateDescriptorSets' : 414,
'vkWaitForFences' : 415,
'VkPhysicalDeviceProperties2KHR' : 416,
'VkFormatProperties2KHR' : 417,
'VkImageFormatProperties2KHR' : 418,
'VkPhysicalDeviceMemoryProperties2KHR' : 419,
'VkSurfaceCapabilities2KHR' : 420,
'VkDeviceGroupPresentCapabilitiesKHX' : 421,
'VkExternalBufferPropertiesKHR' : 422,
'VkMemoryWin32HandlePropertiesKHR' : 423,
'VkMemoryFdPropertiesKHR' : 424,
'VkExternalSemaphorePropertiesKHR' : 425,
'VkQueueFamilyProperties2KHR' : 426,
'VkSparseImageFormatProperties2KHR' : 427,
'VkSurfaceFormat2KHR' : 428,
'VkTextureLODGatherFormatPropertiesAMD' : 429,
'VkPhysicalDeviceMultiviewPropertiesKHX' : 430,
'VkPhysicalDeviceGroupPropertiesKHX' : 431,
'VkExternalImageFormatPropertiesKHR' : 432,
'VkPhysicalDeviceIDPropertiesKHR' : 433,
'VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX' : 434,
'VkHdrMetadataEXT' : 435,
'VkExternalMemoryPropertiesKHR' : 436,
'VkFormatProperties' : 437,
'VkImageFormatProperties' : 438,
'VkPhysicalDeviceLimits' : 439,
'VkQueueFamilyProperties' : 440,
'VkMemoryType' : 441,
'VkMemoryHeap' : 442,
'VkSparseImageFormatProperties' : 443,
'VkSurfaceCapabilitiesKHR' : 444,
'VkDisplayPropertiesKHR' : 445,
'VkDisplayPlaneCapabilitiesKHR' : 446,
'VkSharedPresentSurfaceCapabilitiesKHR' : 447,
'VkExternalImageFormatPropertiesNV' : 448,
'VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT' : 449,
'VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT' : 450,
'VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT' : 451,
'VkPipelineColorBlendAdvancedStateCreateInfoEXT' : 452,
'VkPipelineCoverageModulationStateCreateInfoNV' : 453,
'VkPipelineCoverageToColorStateCreateInfoNV' : 454,
'VkSamplerReductionModeCreateInfoEXT' : 455,
'VkPhysicalDeviceProperties' : 456,
'VkSurfaceFormatKHR' : 457,
'VkExportFenceCreateInfoKHR' : 458,
'VkPhysicalDeviceExternalFenceInfoKHR' : 459,
'VkExternalFencePropertiesKHR' : 460,
'vkGetPhysicalDeviceExternalFencePropertiesKHR' : 461,
'VkImportFenceFdInfoKHR' : 462,
'VkFenceGetFdInfoKHR' : 463,
'vkImportFenceFdKHR' : 464,
'vkGetFenceFdKHR' : 465,
'VkImportFenceWin32HandleInfoKHR' : 466,
'VkExportFenceWin32HandleInfoKHR' : 467,
'VkFenceGetWin32HandleInfoKHR' : 468,
'vkImportFenceWin32HandleKHR' : 469,
'vkGetFenceWin32HandleKHR' : 470,
'VkSemaphoreGetFdInfoKHR' : 471,
'VkSemaphoreGetWin32HandleInfoKHR' : 472,
'VkMemoryGetFdInfoKHR' : 473,
'VkMemoryGetWin32HandleInfoKHR' : 474,
'VkMemoryDedicatedRequirementsKHR' : 475,
'VkMemoryDedicatedAllocateInfoKHR' : 476,
'VkBufferMemoryRequirementsInfo2KHR' : 477,
'VkImageMemoryRequirementsInfo2KHR' : 478,
'VkImageSparseMemoryRequirementsInfo2KHR' : 479,
'VkMemoryRequirements2KHR' : 480,
'VkSparseImageMemoryRequirements2KHR' : 481,
'vkGetImageMemoryRequirements2KHR' : 482,
'vkGetBufferMemoryRequirements2KHR' : 483,
'vkGetImageSparseMemoryRequirements2KHR' : 484,
'VkPhysicalDevice16BitStorageFeaturesKHR' : 485,
'VkPhysicalDeviceVariablePointerFeaturesKHR' : 486,
'VkSampleLocationsInfoEXT' : 487,
'VkRenderPassSampleLocationsBeginInfoEXT' : 488,
'VkPipelineSampleLocationsStateCreateInfoEXT' : 489,
'VkPhysicalDeviceSampleLocationsPropertiesEXT' : 490,
'VkMultisamplePropertiesEXT' : 491,
'vkGetPhysicalDeviceMultisamplePropertiesEXT' : 492,
'VkValidationCacheCreateInfoEXT' : 493,
'VkShaderModuleValidationCacheCreateInfoEXT' : 494,
'vkCreateValidationCacheEXT' : 495,
'vkGetValidationCacheDataEXT' : 496,
'vkCmdSetSampleLocationsEXT' : 497,
'vkDestroyValidationCacheEXT' : 498,
'vkMergeValidationCachesEXT' : 499,
'VkAttachmentSampleLocationsEXT' : 500,
'VkSubpassSampleLocationsEXT' : 501,
'VkPhysicalDevicePointClippingPropertiesKHR' : 502,
'VkInputAttachmentAspectReferenceKHR' : 503,
'VkRenderPassInputAttachmentAspectCreateInfoKHR' : 504,
'VkImageViewUsageCreateInfoKHR' : 505,
'VkPipelineTessellationDomainOriginStateCreateInfoKHR' : 506,
'VkImageFormatListCreateInfoKHR' : 507,
'VkSamplerYcbcrConversionCreateInfoKHR' : 508,
'VkBindImagePlaneMemoryInfoKHR' : 509,
'VkImagePlaneMemoryRequirementsInfoKHR' : 510,
'vkCreateSamplerYcbcrConversionKHR' : 511,
'VkBindBufferMemoryDeviceGroupInfoKHX' : 512,
'VkBindImageMemoryDeviceGroupInfoKHX' : 513,
'vkDestroySamplerYcbcrConversionKHR' : 514,
'VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR' : 515,
'VkSamplerYcbcrConversionImageFormatPropertiesKHR' : 516,
'VkSamplerYcbcrConversionInfoKHR' : 517,
'VkDeviceQueueGlobalPriorityCreateInfoEXT' : 518,
'vkGetShaderInfoAMD' : 519,
'VkShaderStatisticsInfoAMD' : 520,
'VkImportMemoryHostPointerInfoEXT' : 521,
'VkMemoryHostPointerPropertiesEXT' : 522,
'VkPhysicalDeviceExternalMemoryHostPropertiesEXT' : 523,
'vkGetMemoryHostPointerPropertiesEXT' : 524,
'VkPhysicalDeviceConservativeRasterizationPropertiesEXT' : 525,
'VkPipelineRasterizationConservativeStateCreateInfoEXT' : 526,
'vkCmdWriteBufferMarkerAMD' : 527,
### ADD New func/struct mappings above this line
}
# Mapping of params to unique IDs
implicit_param_map = {
'a' : 0,
'addressModeU' : 1,
'addressModeV' : 2,
'addressModeW' : 3,
'alphaBlendOp' : 4,
'alphaMode' : 5,
'aspectMask' : 6,
'attachmentCount' : 7,
'b' : 8,
'back' : 9,
'bindCount' : 10,
'bindInfoCount' : 11,
'bindingCount' : 12,
'buffer' : 13,
'bufferView' : 14,
'callback' : 15,
'colorBlendOp' : 16,
'colorWriteMask' : 17,
'commandBuffer' : 18,
'commandBufferCount' : 19,
'commandPool' : 20,
'compareOp' : 21,
'components' : 22,
'compositeAlpha' : 23,
'connection' : 24,
'contents' : 25,
'countBuffer' : 26,
'counter' : 27,
'createInfoCount' : 28,
'cullMode' : 29,
'dataSize' : 30,
'dependencyFlags' : 31,
'depthCompareOp' : 32,
'depthFailOp' : 33,
'descriptorCount' : 34,
'descriptorPool' : 35,
'descriptorSet' : 36,
'descriptorSetCount' : 37,
'descriptorSetLayout' : 38,
'descriptorType' : 39,
'descriptorUpdateEntryCount' : 40,
'descriptorUpdateTemplate' : 41,
'descriptorWriteCount' : 42,
'device' : 43,
'deviceEvent' : 44,
'disabledValidationCheckCount' : 45,
'discardRectangleCount' : 46,
'discardRectangleMode' : 47,
'display' : 48,
'displayEvent' : 49,
'displayMode' : 50,
'dpy' : 51,
'dstAccessMask' : 52,
'dstAlphaBlendFactor' : 53,
'dstBuffer' : 54,
'dstCache' : 55,
'dstColorBlendFactor' : 56,
'dstImage' : 57,
'dstImageLayout' : 58,
'dstSet' : 59,
'dstStageMask' : 60,
'dstSubresource' : 61,
'dynamicStateCount' : 62,
'event' : 63,
'eventCount' : 64,
'externalHandleType' : 65,
'faceMask' : 66,
'failOp' : 67,
'fence' : 68,
'fenceCount' : 69,
'filter' : 70,
'finalLayout' : 71,
'flags' : 72,
'format' : 73,
'framebuffer' : 74,
'front' : 75,
'frontFace' : 76,
'g' : 77,
'handleType' : 78,
'handleTypes' : 79,
'image' : 80,
'imageColorSpace' : 81,
'imageFormat' : 82,
'imageLayout' : 83,
'imageSharingMode' : 84,
'imageSubresource' : 85,
'imageType' : 86,
'imageUsage' : 87,
'imageView' : 88,
'indexType' : 89,
'indirectCommandsLayout' : 90,
'indirectCommandsTokenCount' : 91,
'initialLayout' : 92,
'inputRate' : 93,
'instance' : 94,
'layout' : 95,
'level' : 96,
'loadOp' : 97,
'magFilter' : 98,
'memory' : 99,
'memoryRangeCount' : 100,
'minFilter' : 101,
'mipmapMode' : 102,
'mode' : 103,
'modes' : 104,
'module' : 105,
'newLayout' : 106,
'objectCount' : 107,
'objectTable' : 108,
'objectType' : 109,
'oldLayout' : 110,
'oldSwapchain' : 111,
'pAcquireInfo' : 112,
'pAcquireKeys' : 113,
'pAcquireSyncs' : 114,
'pAcquireTimeoutMilliseconds' : 115,
'pAcquireTimeouts' : 116,
'pAllocateInfo' : 117,
'pAllocator' : 118,
'pApplicationInfo' : 119,
'pApplicationName' : 120,
'pAttachments' : 121,
'pAttributes' : 122,
'pBeginInfo' : 123,
'pBindInfo' : 124,
'pBindInfos' : 125,
'pBindings' : 126,
'pBinds' : 127,
'pBuffer' : 128,
'pBufferBinds' : 129,
'pBufferMemoryBarriers' : 130,
'pBuffers' : 131,
'pCallback' : 132,
'pCapabilities' : 133,
'pCode' : 134,
'pColor' : 135,
'pColorAttachments' : 136,
'pCommandBufferDeviceMasks' : 137,
'pCommandBuffers' : 138,
'pCommandPool' : 139,
'pCommittedMemoryInBytes' : 140,
'pCorrelationMasks' : 141,
'pCounterValue' : 142,
'pCreateInfo' : 143,
'pCreateInfos' : 144,
'pData' : 145,
'pDataSize' : 146,
'pDependencies' : 147,
'pDepthStencil' : 148,
'pDepthStencilAttachment' : 149,
'pDescriptorCopies' : 150,
'pDescriptorPool' : 151,
'pDescriptorSets' : 152,
'pDescriptorUpdateEntries' : 153,
'pDescriptorUpdateTemplate' : 154,
'pDescriptorWrites' : 155,
'pDevice' : 156,
'pDeviceEventInfo' : 157,
'pDeviceGroupPresentCapabilities' : 158,
'pDeviceIndices' : 159,
'pDeviceMasks' : 160,
'pDeviceRenderAreas' : 161,
'pDisabledValidationChecks' : 162,
'pDiscardRectangles' : 163,
'pDisplay' : 164,
'pDisplayCount' : 165,
'pDisplayEventInfo' : 166,
'pDisplayPowerInfo' : 167,
'pDisplayTimingProperties' : 168,
'pDisplays' : 169,
'pDynamicOffsets' : 170,
'pDynamicState' : 171,
'pDynamicStates' : 172,
'pEnabledFeatures' : 173,
'pEngineName' : 174,
'pEvent' : 175,
'pEvents' : 176,
'pExternalBufferInfo' : 177,
'pExternalBufferProperties' : 178,
'pExternalImageFormatProperties' : 179,
'pExternalSemaphoreInfo' : 180,
'pExternalSemaphoreProperties' : 181,
'pFd' : 182,
'pFeatures' : 183,
'pFence' : 184,
'pFences' : 185,
'pFormatInfo' : 186,
'pFormatProperties' : 187,
'pFramebuffer' : 188,
'pGranularity' : 189,
'pHandle' : 190,
'pImage' : 191,
'pImageBinds' : 192,
'pImageFormatInfo' : 193,
'pImageFormatProperties' : 194,
'pImageIndex' : 195,
'pImageIndices' : 196,
'pImageMemoryBarriers' : 197,
'pImageOpaqueBinds' : 198,
'pImportSemaphoreFdInfo' : 199,
'pImportSemaphoreWin32HandleInfo' : 200,
'pIndirectCommandsLayout' : 201,
'pIndirectCommandsTokens' : 202,
'pInitialData' : 203,
'pInputAssemblyState' : 204,
'pInputAttachments' : 205,
'pInstance' : 206,
'pLayerName' : 207,
'pLayerPrefix' : 208,
'pLayout' : 209,
'pLimits' : 210,
'pMarkerInfo' : 211,
'pMarkerName' : 212,
'pMemory' : 213,
'pMemoryBarriers' : 214,
'pMemoryFdProperties' : 215,
'pMemoryProperties' : 216,
'pMemoryRanges' : 217,
'pMemoryRequirements' : 218,
'pMemoryWin32HandleProperties' : 219,
'pMessage' : 220,
'pMetadata' : 221,
'pMode' : 222,
'pModes' : 223,
'pName' : 224,
'pNameInfo' : 225,
'pNext' : 226,
'pObjectEntryCounts' : 227,
'pObjectEntryTypes' : 228,
'pObjectEntryUsageFlags' : 229,
'pObjectIndices' : 230,
'pObjectName' : 231,
'pObjectTable' : 232,
'pOffsets' : 233,
'pPeerMemoryFeatures' : 234,
'pPhysicalDeviceCount' : 235,
'pPhysicalDeviceGroupCount' : 236,
'pPhysicalDeviceGroupProperties' : 237,
'pPhysicalDevices' : 238,
'pPipelineCache' : 239,
'pPipelineLayout' : 240,
'pPipelines' : 241,
'pPoolSizes' : 242,
'pPresentInfo' : 243,
'pPresentModeCount' : 244,
'pPresentModes' : 245,
'pPresentationTimingCount' : 246,
'pPresentationTimings' : 247,
'pPreserveAttachments' : 248,
'pProcessCommandsInfo' : 249,
'pProperties' : 250,
'pPropertyCount' : 251,
'pPushConstantRanges' : 252,
'pQueryPool' : 253,
'pQueue' : 254,
'pQueueCreateInfos' : 255,
'pQueueFamilyProperties' : 256,
'pQueueFamilyPropertyCount' : 257,
'pQueuePriorities' : 258,
'pRanges' : 259,
'pRasterizationState' : 260,
'pRectCount' : 261,
'pRectangles' : 262,
'pRects' : 263,
'pRegions' : 264,
'pReleaseKeys' : 265,
'pReleaseSyncs' : 266,
'pRenderPass' : 267,
'pRenderPassBegin' : 268,
'pReserveSpaceInfo' : 269,
'pResolveAttachments' : 270,
'pResults' : 271,
'pSFRRects' : 272,
'pSampleMask' : 273,
'pSampler' : 274,
'pScissors' : 275,
'pSemaphore' : 276,
'pSetLayout' : 277,
'pSetLayouts' : 278,
'pShaderModule' : 279,
'pSignalSemaphoreDeviceIndices' : 280,
'pSignalSemaphoreValues' : 281,
'pSignalSemaphores' : 282,
'pSparseMemoryRequirementCount' : 283,
'pSparseMemoryRequirements' : 284,
'pSpecializationInfo' : 285,
'pSrcCaches' : 286,
'pStages' : 287,
'pSubmits' : 288,
'pSubpasses' : 289,
'pSubresource' : 290,
'pSupported' : 291,
'pSurface' : 292,
'pSurfaceCapabilities' : 293,
'pSurfaceFormatCount' : 294,
'pSurfaceFormats' : 295,
'pSurfaceInfo' : 296,
'pSwapchain' : 297,
'pSwapchainImageCount' : 298,
'pSwapchainImages' : 299,
'pSwapchains' : 300,
'pTag' : 301,
'pTagInfo' : 302,
'pTimes' : 303,
'pTokens' : 304,
'pValues' : 305,
'pVertexAttributeDescriptions' : 306,
'pVertexBindingDescriptions' : 307,
'pVertexInputState' : 308,
'pView' : 309,
'pViewMasks' : 310,
'pViewOffsets' : 311,
'pWaitDstStageMask' : 312,
'pWaitSemaphoreDeviceIndices' : 313,
'pWaitSemaphoreValues' : 314,
'pWaitSemaphores' : 315,
'passOp' : 316,
'physicalDevice' : 317,
'pipeline' : 318,
'pipelineBindPoint' : 319,
'pipelineCache' : 320,
'pipelineLayout' : 321,
'pipelineStage' : 322,
'polygonMode' : 323,
'poolSizeCount' : 324,
'powerState' : 325,
'ppData' : 326,
'ppEnabledExtensionNames' : 327,
'ppEnabledLayerNames' : 328,
'ppObjectTableEntries' : 329,
'preTransform' : 330,
'presentMode' : 331,
'queryPool' : 332,
'queryType' : 333,
'queue' : 334,
'queueCount' : 335,
'queueCreateInfoCount' : 336,
'r' : 337,
'rangeCount' : 338,
'rasterizationOrder' : 339,
'rasterizationSamples' : 340,
'rectCount' : 341,
'regionCount' : 342,
'renderPass' : 343,
'sType' : 344,
'sampler' : 345,
'samples' : 346,
'scissorCount' : 347,
'semaphore' : 348,
'sequencesCountBuffer' : 349,
'sequencesIndexBuffer' : 350,
'shaderModule' : 351,
'sharingMode' : 352,
'size' : 353,
'srcAccessMask' : 354,
'srcAlphaBlendFactor' : 355,
'srcBuffer' : 356,
'srcCacheCount' : 357,
'srcColorBlendFactor' : 358,
'srcImage' : 359,
'srcImageLayout' : 360,
'srcSet' : 361,
'srcStageMask' : 362,
'srcSubresource' : 363,
'stage' : 364,
'stageCount' : 365,
'stageFlags' : 366,
'stageMask' : 367,
'stencilLoadOp' : 368,
'stencilStoreOp' : 369,
'storeOp' : 370,
'subpassCount' : 371,
'subresource' : 372,
'subresourceRange' : 373,
'surface' : 374,
'surfaceCounters' : 375,
'swapchain' : 376,
'swapchainCount' : 377,
'tagSize' : 378,
'targetCommandBuffer' : 379,
'templateType' : 380,
'tiling' : 381,
'tokenCount' : 382,
'tokenType' : 383,
'topology' : 384,
'transform' : 385,
'type' : 386,
'usage' : 387,
'viewType' : 388,
'viewportCount' : 389,
'w' : 390,
'window' : 391,
'x' : 392,
'y' : 393,
'z' : 394,
'externalMemoryFeatures' : 395,
'compatibleHandleTypes' : 396,
'exportFromImportedHandleTypes' : 397,
'linearTilingFeatures' : 398,
'optimalTilingFeatures' : 399,
'bufferFeatures' : 400,
'sampleCounts' : 401,
'framebufferColorSampleCounts' : 402,
'framebufferDepthSampleCounts' : 403,
'framebufferStencilSampleCounts' : 404,
'framebufferNoAttachmentsSampleCounts' : 405,
'sampledImageColorSampleCounts' : 406,
'sampledImageIntegerSampleCounts' : 407,
'sampledImageDepthSampleCounts' : 408,
'sampledImageStencilSampleCounts' : 409,
'storageImageSampleCounts' : 410,
'queueFlags' : 411,
'propertyFlags' : 412,
'supportedTransforms' : 413,
'currentTransform' : 414,
'supportedCompositeAlpha' : 415,
'supportedUsageFlags' : 416,
'supportedAlpha' : 417,
'sharedPresentSupportedUsageFlags' : 418,
'externalSemaphoreFeatures' : 419,
'supportedSurfaceCounters' : 420,
'blendOverlap' : 421,
'coverageModulationMode' : 422,
'coverageModulationTableCount' : 423,
'reductionMode' : 424,
'enabledLayerCount' : 425,
'enabledExtensionCount' : 426,
'waitSemaphoreCount' : 427,
'signalSemaphoreCount' : 428,
'bufferBindCount' : 429,
'imageOpaqueBindCount' : 430,
'imageBindCount' : 431,
'codeSize' : 432,
'initialDataSize' : 433,
'vertexBindingDescriptionCount' : 434,
'vertexAttributeDescriptionCount' : 435,
'setLayoutCount' : 436,
'pushConstantRangeCount' : 437,
'inputAttachmentCount' : 438,
'colorAttachmentCount' : 439,
'preserveAttachmentCount' : 440,
'dependencyCount' : 441,
'dynamicOffsetCount' : 442,
'rectangleCount' : 443,
'correlationMaskCount' : 444,
'acquireCount' : 445,
'releaseCount' : 446,
'deviceIndexCount' : 447,
'SFRRectCount' : 448,
'deviceRenderAreaCount' : 449,
'physicalDeviceCount' : 450,
'waitSemaphoreValuesCount' : 451,
'signalSemaphoreValuesCount' : 452,
'deviceType' : 453,
'colorSpace' : 454,
'pfnAllocation' : 455,
'pfnReallocation' : 556,
'pfnFree' : 457,
'blendConstants' : 458,
'displayName' : 459,
'pfnCallback' : 460,
'externalFenceFeatures' : 461,
'pInfo' : 462,
'pGetFdInfo' : 463,
'pGetWin32HandleInfo' : 464,
'pExternalFenceInfo' : 465,
'pExternalFenceProperties' : 466,
'pImportFenceProperties' : 467,
'pImportFenceFdInfo' : 468,
'pImportFenceWin32HandleInfo' : 469,
'basePipelineHandle' : 470,
'pImmutableSamplers' : 471,
'pTexelBufferView' : 472,
'sampleLocationsPerPixel' : 473,
'sampleLocationsCount' : 474,
'pSampleLocations' : 475,
'attachmentInitialSampleLocationsCount' : 476,
'pAttachmentInitialSampleLocations' : 477,
'postSubpassSampleLocationsCount' : 478,
'pSubpassSampleLocations' : 479,
'sampleLocationSampleCounts' : 480,
'pValidationCache' : 481,
'validationCache' : 482,
'sampleLocationsInfo' : 483,
'pSampleLocationsInfo' : 484,
'pMultisampleProperties' : 485,
'pointClippingBehavior' : 486,
'aspectReferenceCount' : 487,
'pAspectReferences' : 488,
'domainOrigin' : 489,
'ycbcrModel' : 490,
'ycbcrRange' : 491,
'xChromaOffset' : 492,
'yChromaOffset' : 493,
'chromaFilter' : 494,
'planeAspect' : 495,
'pYcbcrConversion' : 496,
'ycbcrConversion' : 497,
'pViewFormats' : 498,
'conversion' : 499,
'pPostSubpassSampleLocations' : 500,
'globalPriority' : 501,
'shaderStage' : 502,
'infoType' : 503,
'pInfoSize' : 504,
'shaderStageMask' : 505,
'pMemoryHostPointerProperties' : 506,
'pHostPointer' : 507,
'conservativeRasterizationMode' : 508,
'pViewports' : 509,
'pViewportWScalings' : 510,
### ADD New implicit param mappings above this line
}
uniqueid_set = set() # store uniqueid to make sure we don't have duplicates
# Convert a string VUID into numerical value
# See "VUID Mapping Details" comment above for more info
def convertVUID(vuid_string):
"""Convert a string-based VUID into a numberical value"""
#func_struct_update = False
#imp_param_update = False
if vuid_string in ['', None]:
return -1
vuid_parts = vuid_string.split('-')
if vuid_parts[1] not in func_struct_id_map:
print ("ERROR: Missing func/struct map value for '%s'!" % (vuid_parts[1]))
print (" TODO: Need to add mapping for this to end of func_struct_id_map")
print (" replace '### ADD New func/struct mappings above this line' line with \"'%s' : %d,\"" % (vuid_parts[1], len(func_struct_id_map)))
func_struct_id_map[vuid_parts[1]] = len(func_struct_id_map)
#func_struct_update = True
sys.exit(1)
uniqueid = func_struct_id_map[vuid_parts[1]] << FUNC_STRUCT_SHIFT
if vuid_parts[-1].isdigit(): # explit VUID has int on the end
explicit_id = int(vuid_parts[-1])
# For explicit case, id is explicit_base + func/struct mapping + unique id
uniqueid = uniqueid + (explicit_id << EXPLICIT_ID_SHIFT) + explicit_bit0
else: # implicit case
if vuid_parts[-1] not in implicit_type_map:
print("ERROR: Missing mapping for implicit type '%s'!\nTODO: Please add new mapping." % (vuid_parts[-1]))
sys.exit(1)
else:
param_id = 0 # Default when no param is available
if vuid_parts[-2] != vuid_parts[1]: # we have a parameter
if vuid_parts[-2] in implicit_param_map:
param_id = implicit_param_map[vuid_parts[-2]]
else:
print ("ERROR: Missing param '%s' from implicit_param_map\n TODO: Please add new mapping." % (vuid_parts[-2]))
print (" replace '### ADD New implicit param mappings above this line' line with \"'%s' : %d,\"" % (vuid_parts[-2], len(implicit_param_map)))
implicit_param_map[vuid_parts[-2]] = len(implicit_param_map)
#imp_param_update = True
sys.exit(1)
uniqueid = uniqueid + (param_id << IMPLICIT_PARAM_SHIFT) + (implicit_type_map[vuid_parts[-1]] << IMPLICIT_TYPE_SHIFT) + implicit_bit0
else: # No parameter so that field is 0
uniqueid = uniqueid + (implicit_type_map[vuid_parts[-1]] << IMPLICIT_TYPE_SHIFT) + implicit_bit0
# if uniqueid in uniqueid_set:
# print ("ERROR: Uniqueid %d for string id %s is a duplicate!" % (uniqueid, vuid_string))
# print (" TODO: Figure out what caused the dupe and fix it")
#sys.exit()
# print ("Storing uniqueid %d for unique string %s" % (uniqueid, vuid_string))
uniqueid_set.add(uniqueid)
# if func_struct_update:
# print ("func_struct_id_map updated, here's new structure")
# print ("func_struct_id_map = {")
# fs_id = 0
# for fs in sorted(func_struct_id_map):
# print ("'%s' : %d," % (fs, fs_id))
# fs_id = fs_id + 1
# print ("### ADD New func/struct mappings above this line")
# print ("}")
# if imp_param_update:
# print ("implicit_param_map updated, here's new structure")
# print ("implicit_param_map = {")
# ip_id = 0
# for ip in sorted(implicit_param_map):
# print ("'%s' : %d," % (ip, ip_id))
# ip_id = ip_id + 1
# print ("### ADD New implicit param mappings above this line")
# print ("}")
return uniqueid
|
#!venv/bin/python
from app import app
from flaskext.actions import Manager
manager = Manager(app)
if __name__ == '__main__':
manager.run()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ModifyReplicaRecoveryModeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'ModifyReplicaRecoveryMode','rds')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_RecoveryMode(self):
return self.get_query_params().get('RecoveryMode')
def set_RecoveryMode(self,RecoveryMode):
self.add_query_param('RecoveryMode',RecoveryMode)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_ReplicaId(self):
return self.get_query_params().get('ReplicaId')
def set_ReplicaId(self,ReplicaId):
self.add_query_param('ReplicaId',ReplicaId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
|
import pytest
from starlette.testclient import TestClient
# from sqlalchemy import create_engine
# from sqlalchemy_utils import database_exists, create_database, drop_database
# from alembic import command
# from alembic.config import Config
from app.main import v1
from app.config import DATABASE_URL
# @pytest.fixture(scope="session", autouse=True)
# def create_test_database():
# url = DATABASE_URL
# engine = create_engine(url)
# assert not database_exists(url), 'Test database already exists. Aborting tests.'
# create_database(url) # Create the test database.
# config = Config("alembic.ini") # Run the migrations.
# config.set_main_option('sqlalchemy.url', DATABASE_URL)
# command.upgrade(config, "head")
# yield # Run the tests.
# drop_database(url) # Drop the test database.
# # https://www.starlette.io/database/#migrations
@pytest.fixture(scope="module")
def test_app():
client = TestClient(v1)
yield client # testing happens here
|
#!/usr/bin/env python
from distutils.core import setup
from commands import getoutput
version = getoutput('git describe --always') or '1.0'
setup(name='unifi',
version=version,
description='API towards Ubiquity Networks UniFi controller',
author='Jakob Borg',
author_email='jakob@nym.se',
url='https://github.com/calmh/unifi-api',
packages=['unifi'],
scripts=['unifi-low-snr-reconnect', 'unifi-ls-clients'],
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Networking']
)
|
def greetings(msg):
print("hello")
|
import functools
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
from selectinf.tests.instance import gaussian_instance
from selectinf.learning.utils import full_model_inference, pivot_plot
from selectinf.learning.core import normal_sampler, keras_fit
def generate(n=200, p=100, s=10, signal=(0.5, 1), sigma=2, **ignored):
X, y, truth = gaussian_instance(n=n,
p=p,
s=s,
equicorrelated=False,
rho=0.5,
sigma=sigma,
signal=signal,
random_signs=True,
scale=False)[:3]
return X, y, truth
def simulate(n=200, p=100, s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=3000):
# description of statistical problem
X, y, truth = generate(n=n,
p=p,
s=s,
equicorrelated=False,
rho=0.5,
sigma=sigma,
signal=signal,
random_signs=True,
scale=False)[:3]
dispersion = sigma**2
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
smooth_sampler = normal_sampler(S, covS)
def meta_algorithm(X, XTXi, resid, sampler):
n, p = X.shape
rho = 0.8
S = sampler(scale=0.) # deterministic with scale=0
ynew = X.dot(XTXi).dot(S) + resid # will be ok for n>p and non-degen X
Xnew = rho * X + np.sqrt(1 - rho**2) * np.random.standard_normal(X.shape)
X_full = np.hstack([X, Xnew])
beta_full = np.linalg.pinv(X_full).dot(ynew)
winners = np.fabs(beta_full)[:p] > np.fabs(beta_full)[p:]
return set(np.nonzero(winners)[0])
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = np.linalg.norm(resid)**2 / (n-p)
selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid)
# run selection algorithm
return full_model_inference(X,
y,
truth,
selection_algorithm,
smooth_sampler,
success_params=(8, 10),
B=B,
fit_probability=keras_fit,
fit_args={'epochs':20, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'})
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
opts = dict(n=200, p=100, s=10, signal=(0.5, 1),
sigma=2, alpha=0.1, B=5000)
R2 = []
for _ in range(100):
X, y, truth = generate(**opts)
R2.append((np.linalg.norm(y-X.dot(truth))**2, np.linalg.norm(y)**2))
R2 = np.array(R2)
R2mean = 1 - np.mean(R2[:,0]) / np.mean(R2[:,1])
print('R2', R2mean)
iseed = int(np.fabs(np.random.standard_normal() * 50000))
for i in range(2000):
df = simulate(**opts)
csvfile = __file__[:-3] + '_200.csv'
outbase = csvfile[:-4]
if df is not None and i > 0:
try: # concatenate to disk
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
f = pivot_plot(df, outbase)[1]
plt.close(f)
|
#
# PySNMP MIB module DES3028P-L2MGMT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DES3028P-L2MGMT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:40:13 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter32, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, iso, Integer32, Unsigned32, Gauge32, IpAddress, Bits, NotificationType, Counter64, ObjectIdentity, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "iso", "Integer32", "Unsigned32", "Gauge32", "IpAddress", "Bits", "NotificationType", "Counter64", "ObjectIdentity", "MibIdentifier")
TextualConvention, DisplayString, TruthValue, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "TruthValue", "RowStatus")
des3028p, = mibBuilder.importSymbols("SWPRIMGMT-DES30XXP-MIB", "des3028p")
swL2MgmtMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2))
if mibBuilder.loadTexts: swL2MgmtMIB.setLastUpdated('1008030000Z')
if mibBuilder.loadTexts: swL2MgmtMIB.setOrganization('D-Link, Inc.')
if mibBuilder.loadTexts: swL2MgmtMIB.setContactInfo('http://support.dlink.com')
if mibBuilder.loadTexts: swL2MgmtMIB.setDescription('The Structure of Layer 2 Network Management Information for enterprise.')
class VlanId(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 4094)
class PortList(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 127)
class MacAddress(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(6, 6)
fixedLength = 6
swL2DevMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1))
swL2PortMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2))
swL2QOSMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3))
swL2TrunkMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4))
swPortMirrorPackage = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 6))
swIGMPPackage = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7))
swL2TrafficSegMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 12))
swL2PortSecurityMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 15))
swL2CosMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17))
swL2DhcpRelayMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 18))
swL2MgmtMIBTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 20))
swL2LoopDetectMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 21))
swL2MultiFilter = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22))
swL2VlanMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 23))
swL2DhcpLocalRelayMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 24))
swL2FloodMAC = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25))
swL2DevInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 1))
swL2DevInfoFrontPanelLedStatus = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2DevInfoFrontPanelLedStatus.setStatus('current')
if mibBuilder.loadTexts: swL2DevInfoFrontPanelLedStatus.setDescription('This object is a set of system LED indications. When log in system DES3028/DES3052 which do not support POE, the first two octets is defined as system LED. The first LED is power LED. The second LED is console LED. When log in system DES3028p/DES3052p which support POE, the first four octets are defined as system LED. The first octet is 0x02 and the second octet is 0x01 when in normal mode. Contrarily, the first octet is 0x01 and the second octet is 0x02 when in poe mode. The third octet indicates the power LED. The fourth octet indicates the console LED. The other octets are defined as follow: start on the third or fifth octets separately correspond to system support poe or system which does not support poe indicate the logical port LED (following swL2BasePort ordering). Every two bytes are presented to a port. The first byte is presentd to the Link/Activity LED. The second byte is presented to the Speed LED. Link/Activity LED : The most significant bit is used for blink/solid: 8 = The LED blinks. The second significant bit is used for link status: 1 = link fail. 2 = link pass. Speed LED : 01 = 10Mbps. 02 = 100Mbps. 03 = 1000Mbps. The four remaining bits are currently unused and must be 0.')
swL2DevCtrl = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2))
swL2DevCtrlSystemReboot = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("reboot", 2), ("save-config-and-reboot", 3), ("reboot-and-load-factory-default-config", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlSystemReboot.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlSystemReboot.setDescription('This object indicates the agent system reboot state. The agent always returns other(1) when this object is read.')
swL2DevCtrlSystemIP = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlSystemIP.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlSystemIP.setDescription('This object indicates system ip.')
swL2DevCtrlSubnetMask = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlSubnetMask.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlSubnetMask.setDescription('This object indicates system subnet mask.')
swL2DevCtrlDefaultGateway = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlDefaultGateway.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlDefaultGateway.setDescription('This object indicates system default gateway.')
swL2DevCtrlManagementVlanId = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 5), VlanId()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlManagementVlanId.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlManagementVlanId.setDescription('This object controls which Vlan includes system ip. And the Vlan should have been created.')
swL2DevCtrlIGMPSnooping = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlIGMPSnooping.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlIGMPSnooping.setDescription('This object indicates layer 2 Internet Group Management Protocol (IGMP) capture function is enabled or disabled .')
swL2DevCtrlCleanAllStatisticCounter = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("normal", 1), ("active", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlCleanAllStatisticCounter.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlCleanAllStatisticCounter.setDescription('As the object is set to active, all the statistic counters will be cleared. If set to normal, do nothing.')
swL2DevCtrlSnmpEnableAuthenTraps = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlSnmpEnableAuthenTraps.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlSnmpEnableAuthenTraps.setDescription('Indicates whether the SNMP entity is permitted to generate authenticationFailure traps. The value of this object overrides any configuration information; as such, it provides a means whereby all authenticationFailure traps may be disabled. Note that it is strongly recommended that this object be stored in non-volatile memory so that it remains constant across re-initializations of the network management system.')
swL2DevCtrlRmonState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlRmonState.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlRmonState.setDescription('This object can be enabled or disabled RMON.')
swL2DevCtrlIpAutoConfig = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlIpAutoConfig.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlIpAutoConfig.setDescription('Indicates the status of automatically getting configuration from TFTP server on device')
swL2MACNotifyState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2MACNotifyState.setStatus('current')
if mibBuilder.loadTexts: swL2MACNotifyState.setDescription('This object can enabled or disabled MAC Notification.')
swL2MACNotifyHistorySize = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 500))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2MACNotifyHistorySize.setStatus('current')
if mibBuilder.loadTexts: swL2MACNotifyHistorySize.setDescription('This object indicates the history size of variation MAC in address table. The default value is 1 .')
swL2MACNotifyInterval = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2MACNotifyInterval.setStatus('current')
if mibBuilder.loadTexts: swL2MACNotifyInterval.setDescription('This object indicates the time interval in second for trigger the MAC notify message. ')
swL2DevCtrlLLDPState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlLLDPState.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlLLDPState.setDescription('Specifies the state of the LLDP function. When this function is enabled, the switch can start to transmit LLDP packets and receive and process the LLDP packets. The specific function of each port will depend on the per port LLDP setting. For the advertisement of LLDP packets, the switch announces the information to its neighbor through ports. For the receiving of LLDP packets, the switch will learn the information from the LLDP packets advertised from the neighbor in the neighbor table. ')
swL2DevCtrlLLDPForwardMessageState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlLLDPForwardMessageState.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlLLDPForwardMessageState.setDescription('When lldp is disabled and lldp forward_message is enabled, the received LLDP Data Units packet will be forwarded. ')
swL2DevCtrlAsymVlanState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlAsymVlanState.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlAsymVlanState.setDescription('This object enables or disables the asymmetric VLAN feature of the device.')
swL2IGMPSnoopingMulticastVlanState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPSnoopingMulticastVlanState.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPSnoopingMulticastVlanState.setDescription('This indicates the global state of the igmp_snoop multicast_vlan.')
swL2DevCtrlVLANTrunkState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlVLANTrunkState.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlVLANTrunkState.setDescription('This indicates the global state of the VLAN trunking feature of the device.')
swL2DevCtrlWeb = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 27))
swL2DevCtrlWebState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 27, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlWebState.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlWebState.setDescription('This object controls Web status.')
swL2DevCtrlTelnet = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 28))
swL2DevCtrlTelnetState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 2, 28, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlTelnetState.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlTelnetState.setDescription('This object controls the Telnet status.')
swL2DevAlarm = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 3))
swL2DevAlarmNewRoot = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 3, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevAlarmNewRoot.setStatus('current')
if mibBuilder.loadTexts: swL2DevAlarmNewRoot.setDescription('When the device has become the new root of the Spanning Tree, this object decides whether to send a new root trap.')
swL2DevAlarmTopologyChange = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 3, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevAlarmTopologyChange.setStatus('current')
if mibBuilder.loadTexts: swL2DevAlarmTopologyChange.setDescription('This object determines whether or not to send a trap message when the switch topology changes. If the object is enabled (3), the Topology Change trap is sent by the device when any of its configured ports transition from the Learning state to the Forwarding state, or from the Forwarding state to the Blocking state. For the same port transition, the device does not send the trap if this object value is disabled or in another state.')
swL2DevAlarmLinkChange = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 1, 3, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevAlarmLinkChange.setStatus('current')
if mibBuilder.loadTexts: swL2DevAlarmLinkChange.setDescription('This object determines whether or not to send a trap message when the link changes. If the object is enabled (3), the Link Change trap is sent by the device when any of its port links change. The device does not send the trap if this object value is disabled or in another state.')
swL2PortInfoTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 1), )
if mibBuilder.loadTexts: swL2PortInfoTable.setStatus('current')
if mibBuilder.loadTexts: swL2PortInfoTable.setDescription('A table that contains information about every port.')
swL2PortInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 1, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2PortInfoPortIndex"), (0, "DES3028P-L2MGMT-MIB", "swL2PortInfoMediumType"))
if mibBuilder.loadTexts: swL2PortInfoEntry.setStatus('current')
if mibBuilder.loadTexts: swL2PortInfoEntry.setDescription('A list of information for each port of the device.')
swL2PortInfoPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortInfoPortIndex.setStatus('current')
if mibBuilder.loadTexts: swL2PortInfoPortIndex.setDescription("This object indicates the module's port number.(1..Max port number in the module)")
swL2PortInfoMediumType = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(100, 101))).clone(namedValues=NamedValues(("copper", 100), ("fiber", 101)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortInfoMediumType.setStatus('current')
if mibBuilder.loadTexts: swL2PortInfoMediumType.setDescription('This object indicates the port type: fiber or copper.')
swL2PortInfoLinkStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("link-pass", 2), ("link-fail", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortInfoLinkStatus.setStatus('current')
if mibBuilder.loadTexts: swL2PortInfoLinkStatus.setDescription('This object indicates the port link status.')
swL2PortInfoNwayStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("auto", 1), ("half-10Mbps", 2), ("full-10Mbps", 3), ("half-100Mbps", 4), ("full-100Mbps", 5), ("half-1Gigabps", 6), ("full-1Gigabps", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortInfoNwayStatus.setStatus('current')
if mibBuilder.loadTexts: swL2PortInfoNwayStatus.setDescription('This object indicates the port speed and duplex mode.')
swL2PortCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 2), )
if mibBuilder.loadTexts: swL2PortCtrlTable.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlTable.setDescription('A table that contains control information about every port.')
swL2PortCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 2, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2PortCtrlPortIndex"), (0, "DES3028P-L2MGMT-MIB", "swL2PortCtrlPortMediumType"))
if mibBuilder.loadTexts: swL2PortCtrlEntry.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlEntry.setDescription('A list of control information for each port of the device.')
swL2PortCtrlPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortCtrlPortIndex.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlPortIndex.setDescription("This object indicates the module's port number.(1..Max port number in the module)")
swL2PortCtrlPortMediumType = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(100, 101))).clone(namedValues=NamedValues(("copper", 100), ("fiber", 101)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortCtrlPortMediumType.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlPortMediumType.setDescription('This object indicates the port type: fiber or copper.')
swL2PortCtrlAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortCtrlAdminState.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlAdminState.setDescription('This object decides whether the port is enabled or disabled.')
swL2PortCtrlNwayState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 7, 8, 9))).clone(namedValues=NamedValues(("nway-auto", 1), ("nway-disabled-10Mbps-Half", 2), ("nway-disabled-10Mbps-Full", 3), ("nway-disabled-100Mbps-Half", 4), ("nway-disabled-100Mbps-Full", 5), ("nway-disabled-1Gigabps-Full", 7), ("nway-disabled-1Gigabps-Full-Master", 8), ("nway-disabled-1Gigabps-Full-Slave", 9)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortCtrlNwayState.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlNwayState.setDescription('Choose the port speed, duplex mode, and N-Way function mode.')
swL2PortCtrlFlowCtrlState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortCtrlFlowCtrlState.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlFlowCtrlState.setDescription('Set the flow control function as enabled or disabled.')
swL2PortCtrlDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 2, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortCtrlDescription.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlDescription.setDescription('The object describes the ports in text.')
swL2PortCtrlAddressLearning = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortCtrlAddressLearning.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlAddressLearning.setDescription('This object decides whether the address learning is enabled or disabled.')
swL2PortCtrlMACNotifyState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortCtrlMACNotifyState.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlMACNotifyState.setDescription("This object sets each port's MAC notification state.")
swL2PortCtrlMulticastfilter = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3))).clone(namedValues=NamedValues(("forward-unregistered-groups", 2), ("filter-unregistered-groups", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortCtrlMulticastfilter.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlMulticastfilter.setDescription('This object decides the multicast packet filtering mode on this port. ')
swL2PortCtrlMDIXState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 2, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("auto", 1), ("normal", 2), ("cross", 3), ("other", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortCtrlMDIXState.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlMDIXState.setDescription("This object configures the MDIX setting of the port. The value 'other' is for those entries in which MDIX is not applicable.")
swL2PortErrTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 3), )
if mibBuilder.loadTexts: swL2PortErrTable.setStatus('current')
if mibBuilder.loadTexts: swL2PortErrTable.setDescription('A table that contains information about the Err port.')
swL2PortErrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 3, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2PortErrPortIndex"))
if mibBuilder.loadTexts: swL2PortErrEntry.setStatus('current')
if mibBuilder.loadTexts: swL2PortErrEntry.setDescription('A list of information for the err port of the device.')
swL2PortErrPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortErrPortIndex.setStatus('current')
if mibBuilder.loadTexts: swL2PortErrPortIndex.setDescription("This object indicates the module's port number.(1..Max port number in the module)")
swL2PortErrPortState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortErrPortState.setStatus('current')
if mibBuilder.loadTexts: swL2PortErrPortState.setDescription('This object decides whether the port state is enabled or disabled.')
swL2PortErrPortStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("err-disabled", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortErrPortStatus.setStatus('current')
if mibBuilder.loadTexts: swL2PortErrPortStatus.setDescription('This object decides whether the PortStatus is err-disabled.')
swL2PortErrPortReason = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("stp-lbd", 1), ("storm-control", 2), ("ddm", 3), ("duld", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortErrPortReason.setStatus('current')
if mibBuilder.loadTexts: swL2PortErrPortReason.setDescription('This object indicates the module which disabled the port.')
swL2PortErrDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 2, 3, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortErrDescription.setStatus('current')
if mibBuilder.loadTexts: swL2PortErrDescription.setDescription('The object describes the ports in text.')
swL2QOSBandwidthControlTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 1), )
if mibBuilder.loadTexts: swL2QOSBandwidthControlTable.setStatus('current')
if mibBuilder.loadTexts: swL2QOSBandwidthControlTable.setDescription('.')
swL2QOSBandwidthControlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 1, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2QOSBandwidthPortIndex"))
if mibBuilder.loadTexts: swL2QOSBandwidthControlEntry.setStatus('current')
if mibBuilder.loadTexts: swL2QOSBandwidthControlEntry.setDescription('A list of information contained in swL2QOSBandwidthControlTable.')
swL2QOSBandwidthPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 650))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2QOSBandwidthPortIndex.setStatus('current')
if mibBuilder.loadTexts: swL2QOSBandwidthPortIndex.setDescription('Indicates the port.')
swL2QOSBandwidthRxRate = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(64, 1024000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2QOSBandwidthRxRate.setStatus('current')
if mibBuilder.loadTexts: swL2QOSBandwidthRxRate.setDescription('Indicates the RX Rate(Mbit/sec) of the specifed port. A value of 1024000 means no limit.')
swL2QOSBandwidthTxRate = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(64, 1024000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2QOSBandwidthTxRate.setStatus('current')
if mibBuilder.loadTexts: swL2QOSBandwidthTxRate.setDescription('Indicates the TX Rate(Mbit/sec) of the specifed port. A value of 1024000 means no limit. ')
swL2QOSBandwidthRadiusRxRate = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2QOSBandwidthRadiusRxRate.setStatus('current')
if mibBuilder.loadTexts: swL2QOSBandwidthRadiusRxRate.setDescription('The Rx Rate value comes from the RADIUS server, If an 802.1X port is authenticated, this value will overwrite the locally configured Rx Rate. ')
swL2QOSBandwidthRadiusTxRate = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2QOSBandwidthRadiusTxRate.setStatus('current')
if mibBuilder.loadTexts: swL2QOSBandwidthRadiusTxRate.setDescription('The Tx Rate value comes from the RADIUS server, If an 802.1X port is authenticated, this value will overwrite the locally configured Tx Rate. ')
swL2QOSSchedulingTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 2), )
if mibBuilder.loadTexts: swL2QOSSchedulingTable.setStatus('current')
if mibBuilder.loadTexts: swL2QOSSchedulingTable.setDescription('.')
swL2QOSSchedulingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 2, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2QOSSchedulingClassIndex"))
if mibBuilder.loadTexts: swL2QOSSchedulingEntry.setStatus('current')
if mibBuilder.loadTexts: swL2QOSSchedulingEntry.setDescription('A list of information contained in the swL2QOSSchedulingTable.')
swL2QOSSchedulingClassIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 3))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2QOSSchedulingClassIndex.setStatus('current')
if mibBuilder.loadTexts: swL2QOSSchedulingClassIndex.setDescription('Indicates the hardware queue number.')
swL2QOSSchedulingMaxWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 55))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2QOSSchedulingMaxWeight.setStatus('current')
if mibBuilder.loadTexts: swL2QOSSchedulingMaxWeight.setDescription(' ')
swL2QOSSchedulingMechanism = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("strict", 1), ("roundrobin", 2), ("weightfair", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2QOSSchedulingMechanism.setStatus('current')
if mibBuilder.loadTexts: swL2QOSSchedulingMechanism.setDescription('Indicates the mechanism of QOS scheduling.')
swL2QOSSchedulingMechanismCtrl = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("strict", 1), ("weightfair", 2), ("none", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2QOSSchedulingMechanismCtrl.setStatus('current')
if mibBuilder.loadTexts: swL2QOSSchedulingMechanismCtrl.setDescription('This object can control QOS scheduling Mechanism.')
swL2QOS8021pUserPriorityTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 3), )
if mibBuilder.loadTexts: swL2QOS8021pUserPriorityTable.setStatus('current')
if mibBuilder.loadTexts: swL2QOS8021pUserPriorityTable.setDescription('.')
swL2QOS8021pUserPriorityEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 3, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2QOS8021pUserPriorityIndex"))
if mibBuilder.loadTexts: swL2QOS8021pUserPriorityEntry.setStatus('current')
if mibBuilder.loadTexts: swL2QOS8021pUserPriorityEntry.setDescription('A list of information contained in the swL2QOS8021pUserPriorityTable.')
swL2QOS8021pUserPriorityIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2QOS8021pUserPriorityIndex.setStatus('current')
if mibBuilder.loadTexts: swL2QOS8021pUserPriorityIndex.setDescription('The 802.1p user priority.')
swL2QOS8021pUserPriorityClass = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 3))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2QOS8021pUserPriorityClass.setStatus('current')
if mibBuilder.loadTexts: swL2QOS8021pUserPriorityClass.setDescription("The number of the switch's hardware priority queue. The switch has four hardware priority queues available. They are numbered between 0 (the lowest priority) and 3 (the highest priority).")
swL2QOS8021pDefaultPriorityTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 4), )
if mibBuilder.loadTexts: swL2QOS8021pDefaultPriorityTable.setStatus('current')
if mibBuilder.loadTexts: swL2QOS8021pDefaultPriorityTable.setDescription('.')
swL2QOS8021pDefaultPriorityEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 4, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2QOS8021pDefaultPriorityIndex"))
if mibBuilder.loadTexts: swL2QOS8021pDefaultPriorityEntry.setStatus('current')
if mibBuilder.loadTexts: swL2QOS8021pDefaultPriorityEntry.setDescription('A list of information contained in the swL2QOS8021pDefaultPriorityTable.')
swL2QOS8021pDefaultPriorityIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 650))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2QOS8021pDefaultPriorityIndex.setStatus('current')
if mibBuilder.loadTexts: swL2QOS8021pDefaultPriorityIndex.setDescription('Indicates the port number.')
swL2QOS8021pDefaultPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2QOS8021pDefaultPriority.setStatus('current')
if mibBuilder.loadTexts: swL2QOS8021pDefaultPriority.setDescription('The priority value to assign to untagged packets received by the switch ports on the switch.')
swL2QOS8021pRadiusPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 3, 4, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2QOS8021pRadiusPriority.setStatus('current')
if mibBuilder.loadTexts: swL2QOS8021pRadiusPriority.setDescription('Indicates the value of 802.1p comes from RADIUS server. If an 802.1X port is authenticated, this value will overwrite the local configured value.')
swPortTrunkMaxEntries = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortTrunkMaxEntries.setStatus('current')
if mibBuilder.loadTexts: swPortTrunkMaxEntries.setDescription('The max entries of the swPortTrunkTable')
swPortTrunkMaxPortMembers = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortTrunkMaxPortMembers.setStatus('current')
if mibBuilder.loadTexts: swPortTrunkMaxPortMembers.setDescription('The max number of ports allowed in a trunk.')
swPortTrunkTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 3), )
if mibBuilder.loadTexts: swPortTrunkTable.setStatus('current')
if mibBuilder.loadTexts: swPortTrunkTable.setDescription('This table specifies the port membership for each logical link.')
swPortTrunkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 3, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swPortTrunkIndex"))
if mibBuilder.loadTexts: swPortTrunkEntry.setStatus('current')
if mibBuilder.loadTexts: swPortTrunkEntry.setDescription('A list of information that specifies which port group forms a single logical link.')
swPortTrunkIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 3))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortTrunkIndex.setStatus('current')
if mibBuilder.loadTexts: swPortTrunkIndex.setDescription('The index of logical port trunk.')
swPortTrunkMasterPort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 3, 1, 2), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swPortTrunkMasterPort.setStatus('current')
if mibBuilder.loadTexts: swPortTrunkMasterPort.setDescription('The object indicates the master port number of the port trunk entry. The first port of the trunk is implicitly configured to be the master logical port. When using a Port Trunk, you can not configure the other ports of the group except the master port. Their configuration must be the same as the master port (e.g. speed, duplex, enabled/disabled, flow control, and so on).')
swPortTrunkPortList = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 3, 1, 3), PortList()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swPortTrunkPortList.setStatus('current')
if mibBuilder.loadTexts: swPortTrunkPortList.setDescription('Indicate member ports of a logical trunk.')
swPortTrunkType = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("static", 1), ("lacp", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swPortTrunkType.setStatus('current')
if mibBuilder.loadTexts: swPortTrunkType.setDescription('This object indicates the type of this entry.')
swPortTrunkActivePort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 3, 1, 5), PortList()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortTrunkActivePort.setStatus('current')
if mibBuilder.loadTexts: swPortTrunkActivePort.setDescription('This object indicates the active ports of this entry.')
swPortTrunkState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 3, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swPortTrunkState.setStatus('current')
if mibBuilder.loadTexts: swPortTrunkState.setDescription('This object indicates the status of this entry. when the state is CreatAndGo (4),the type of trunk is static (1); when the state is CreatAndWait (5), the type of trunk is lacp(2). ')
swPortTrunkFloodingPort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 3, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swPortTrunkFloodingPort.setStatus('current')
if mibBuilder.loadTexts: swPortTrunkFloodingPort.setDescription('The flooding port of every trunk.')
swL2TrunkAlgorithm = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("mac-source", 2), ("mac-destination", 3), ("mac-source-dest", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2TrunkAlgorithm.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkAlgorithm.setDescription('This object configures part of the packet to be examined by the switch when selecting the egress port for transmitting load-sharing data.')
swL2TrunkLACPPortTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 5), )
if mibBuilder.loadTexts: swL2TrunkLACPPortTable.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkLACPPortTable.setDescription('This table specifies which port group a set of ports (up to 8) is formed into a single logical link.')
swL2TrunkLACPPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 5, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2TrunkLACPPortIndex"))
if mibBuilder.loadTexts: swL2TrunkLACPPortEntry.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkLACPPortEntry.setDescription('A list of information specifies which port group a set of ports (up to 8) is formed into a single logical link.')
swL2TrunkLACPPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2TrunkLACPPortIndex.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkLACPPortIndex.setDescription('The index of the logical port LACP. ')
swL2TrunkLACPPortState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 5, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("active", 1), ("passive", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2TrunkLACPPortState.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkLACPPortState.setDescription('The state of the logical port LACP.')
swL2TrunkVLANTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 6), )
if mibBuilder.loadTexts: swL2TrunkVLANTable.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkVLANTable.setDescription('This table is used to manage the VLAN trunking feature of the device.')
swL2TrunkVLANEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 6, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2TrunkVLANPort"))
if mibBuilder.loadTexts: swL2TrunkVLANEntry.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkVLANEntry.setDescription('This object is used to configure the VLAN trunking settings for each port.')
swL2TrunkVLANPort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2TrunkVLANPort.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkVLANPort.setDescription('This object indicates the port being configured.')
swL2TrunkVLANState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 4, 6, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2TrunkVLANState.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkVLANState.setDescription('The state of the logical port LACP.')
swPortMirrorRxPortList = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 6, 2), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swPortMirrorRxPortList.setStatus('current')
if mibBuilder.loadTexts: swPortMirrorRxPortList.setDescription('This object indicates the Rx port list of ports to be sniffed.')
swPortMirrorTxPortList = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 6, 3), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swPortMirrorTxPortList.setStatus('current')
if mibBuilder.loadTexts: swPortMirrorTxPortList.setDescription('This object indicates the Tx port list of ports to be sniffed.')
swPortMirrorTargetPort = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 6, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swPortMirrorTargetPort.setStatus('current')
if mibBuilder.loadTexts: swPortMirrorTargetPort.setDescription('This object indicates the switch whose port will sniff another port. A trunk port member cannot be configured as a target Snooping port. The port number is the sequential (logical) number which is also applied to the bridge MIB, etc. ')
swPortMirrorState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 6, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swPortMirrorState.setStatus('current')
if mibBuilder.loadTexts: swPortMirrorState.setDescription('This object indicates the status of this entry.')
swL2IGMPMaxSupportedVlans = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPMaxSupportedVlans.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMaxSupportedVlans.setDescription('The maximum number of VLANs in the layer 2 IGMP control table (swL2IGMPCtrlTable).')
swL2IGMPMaxIpGroupNumPerVlan = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPMaxIpGroupNumPerVlan.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMaxIpGroupNumPerVlan.setDescription('The maximum number of multicast IP groups per VLAN in the layer 2 IGMP information table (swL2IGMPQueryInfoTable).')
swL2IGMPCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 3), )
if mibBuilder.loadTexts: swL2IGMPCtrlTable.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPCtrlTable.setDescription("The table controls the VLAN's IGMP function. Its scale depends on the current VLAN state (swL2VlanInfoStatus). If the VLAN mode is disabled, there is only one entry in the table, with index 1. If VLAN is in Port-Based or 802.1q mode, the number of entries can be up to 12, with an index range from 1 to 12.")
swL2IGMPCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 3, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2IGMPCtrlVid"))
if mibBuilder.loadTexts: swL2IGMPCtrlEntry.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPCtrlEntry.setDescription('An entry in IGMP control table (swL2IGMPCtrlTable). The entry is effective only when IGMP captures the switch (swL2DevCtrlIGMPSnooping) is enabled.')
swL2IGMPCtrlVid = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPCtrlVid.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPCtrlVid.setDescription("This object indicates the IGMP control entry's VLAN ID. If VLAN is disabled, the VID is always 0 and cannot be changed by management users. If VLAN is in Port-Based mode, the VID is arranged from 1 to 12, fixed form. If VLAN is in 802.1q mode, the VID setting can vary from 1 to 4094 by management user, and the VID in each entry must be unique in the IGMP Control Table.")
swL2IGMPQueryInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(125)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPQueryInterval.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPQueryInterval.setDescription('The frequency at which IGMP Host-Query packets are transmitted on this switch.')
swL2IGMPMaxResponseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 25)).clone(10)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPMaxResponseTime.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMaxResponseTime.setDescription('The maximum query response time on this switch.')
swL2IGMPRobustness = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 3, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPRobustness.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPRobustness.setDescription('The Robustness Variable allows tuning for the expected packet loss on a subnet. If a subnet is expected to be lossy, the Robustness Variable may be increased. IGMP is robust to (Robustness Variable-1) packet losses.')
swL2IGMPLastMemberQueryInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 3, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 25)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPLastMemberQueryInterval.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPLastMemberQueryInterval.setDescription('The Last Member Query Interval is the Max Response Time inserted into Group-Specific Queries sent in response to Leave Group messages, and is also the amount of time between Group-Specific Query messages.')
swL2IGMPHostTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 3, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16711450)).clone(260)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPHostTimeout.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPHostTimeout.setDescription('The timer value for sending IGMP query packets when none was sent by the host in the LAN. The timer works on a per-VLAN basis. Our device will be activated to send the query message if the timer expires. Please reference RFC2236-1997.')
swL2IGMPRouteTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 3, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16711450)).clone(260)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPRouteTimeout.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPRouteTimeout.setDescription('The Router Timeout is how long a host must wait after hearing a Query before it may send any IGMPv2 messages.')
swL2IGMPLeaveTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 3, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16711450)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPLeaveTimer.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPLeaveTimer.setDescription('When a querier receives a Leave Group message for a group that has group members on the reception interface, it sends Group-Specific Queries every swL2IGMPLeaveTimer to the group being left.')
swL2IGMPQueryState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 3, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPQueryState.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPQueryState.setDescription('This object decides if the IGMP query is enabled or disabled.')
swL2IGMPCurrentState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 3, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("querier", 2), ("non-querier", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPCurrentState.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPCurrentState.setDescription('This object indicates the current IGMP query state.')
swL2IGMPCtrlState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 3, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disable", 2), ("enable", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPCtrlState.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPCtrlState.setDescription('This object indicates the status of this entry. other (1) - This entry is currently in use but the conditions under which it will remain so are different from each of the following values. disable (2) - IGMP function is disabled for this entry. enable (3) - IGMP function is enabled for this entry.')
swL2IGMPFastLeave = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 3, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disable", 2), ("enable", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPFastLeave.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPFastLeave.setDescription(' ')
swL2IGMPDynIPMultVlanState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 3, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPDynIPMultVlanState.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPDynIPMultVlanState.setDescription('This object is used to disable or enable the dynamic IP multicast feature in this VLAN.')
swL2IGMPDynIPMultVlanAge = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 3, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPDynIPMultVlanAge.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPDynIPMultVlanAge.setDescription('This object is used to enable or disable aging on the dynamic IP multicast entry in this VLAN.')
swL2IGMPQueryInfoTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 4), )
if mibBuilder.loadTexts: swL2IGMPQueryInfoTable.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPQueryInfoTable.setDescription('The table contains the number of current IGMP query packets which are captured by this device, as well as the IGMP query packets sent by the device.')
swL2IGMPQueryInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 4, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2IGMPInfoVid"))
if mibBuilder.loadTexts: swL2IGMPQueryInfoEntry.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPQueryInfoEntry.setDescription('Information about current IGMP query information, provided that swL2DevCtrlIGMPSnooping and swL2IGMPCtrState of associated VLAN entry are all enabled.')
swL2IGMPInfoVid = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPInfoVid.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPInfoVid.setDescription('This object indicates the VID of the associated IGMP info table entry. It follows swL2IGMPCtrlVid in the associated entry of IGMP control table (swL2IGMPCtrlTable).')
swL2IGMPInfoQueryCount = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPInfoQueryCount.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPInfoQueryCount.setDescription('This object indicates the number of query packets received since the IGMP function enabled, on a per-VLAN basis.')
swL2IGMPInfoTxQueryCount = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 4, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPInfoTxQueryCount.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPInfoTxQueryCount.setDescription('This object indicates the send count of IGMP query messages, on a per-VLAN basis. In case of the IGMP timer expiring, the switch sends IGMP query packets to related VLAN member ports and increment this object by 1.')
swL2IGMPInfoTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 5), )
if mibBuilder.loadTexts: swL2IGMPInfoTable.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPInfoTable.setDescription('The table containing current IGMP information which was captured by this device, provided that swL2DevCtrlIGMPSnooping and swL2IGMPCtrlState of associated VLAN entry are all enabled. Note that the priority of IGMP table entries is lower than Filtering Table, i.e. if there is a table hash collision between the entries of IGMP Table and Filtering Table inside the switch H/W address table, then Filtering Table entry overwrite the colliding entry of IGMP Table. Also see swL2FilterMgmt description.')
swL2IGMPInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 5, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2IGMPVid"), (0, "DES3028P-L2MGMT-MIB", "swL2IGMPGroupIpAddr"))
if mibBuilder.loadTexts: swL2IGMPInfoEntry.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPInfoEntry.setDescription('Information about current IGMP information which was captured by this device, provided that swL2DevCtrlIGMPSnooping and swL2IGMPCtrlState of associated VLAN entry are all enabled.')
swL2IGMPVid = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPVid.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPVid.setDescription('This object indicates the VID of an individual IGMP table entry. It shows the VID of IGMP report information captured on the network.')
swL2IGMPGroupIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 5, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPGroupIpAddr.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPGroupIpAddr.setDescription('This object is the identify group IP address which is captured from IGMP packet, on a per-VLAN basis.')
swL2IGMPMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 5, 1, 3), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPMacAddr.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMacAddr.setDescription('This object identifies the MAC address which is corresponding to swL2IGMPGroupIpAddr, on a per-VLAN basis.')
swL2IGMPPortMap = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 5, 1, 4), PortList()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPPortMap.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPPortMap.setDescription("This object indicates which ports belong to the same multicast group, on a per-VLAN basis. Each multicast group has an octet string to indicate the port map. The most significant bit represents the lowest numbered port, and the least significant bit represents the highest numbered port. Thus, each port of the switch is represented by a single bit within the value of this object. If that bit has a value of '1' then that port is included in the set of ports; the port is not included if its bit has a value of '0' (Note that the setting of the bit corresponding to the port from which a frame is received is irrelevant). The 4 octets represent one unit port according to its logic port. If the unit is less than 32 ports, the other port will just fill this value with zeros.")
swL2IGMPIpGroupReportCount = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 5, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPIpGroupReportCount.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPIpGroupReportCount.setDescription('This object indicates how many report packets were received by our device corresponding with this entry that has the IGMP function enabled, on a per-VLAN basis.')
swL2IGMPRouterPortTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 6), )
if mibBuilder.loadTexts: swL2IGMPRouterPortTable.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPRouterPortTable.setDescription('The information of the router port table.')
swL2IGMPRouterPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 6, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2IGMPRouterPortVlanid"))
if mibBuilder.loadTexts: swL2IGMPRouterPortEntry.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPRouterPortEntry.setDescription('The entry of the swL2IGMPRouterPortTable.')
swL2IGMPRouterPortVlanid = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPRouterPortVlanid.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPRouterPortVlanid.setDescription('This object indicates the VLAN ID of the router port entry.')
swL2IGMPRouterPortVlanName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 6, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPRouterPortVlanName.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPRouterPortVlanName.setDescription('This object indicates the VLAN name of the router port entry.')
swL2IGMPRouterPortStaticPortList = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 6, 1, 3), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPRouterPortStaticPortList.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPRouterPortStaticPortList.setDescription('This object indicates the static portlist of the router port entry.')
swL2IGMPRouterPortDynamicPortList = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 6, 1, 4), PortList()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPRouterPortDynamicPortList.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPRouterPortDynamicPortList.setDescription('This object indicates the dynamic portlist of the router port entry.')
swL2IGMPRouterPortForbiddenPortList = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 6, 1, 5), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPRouterPortForbiddenPortList.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPRouterPortForbiddenPortList.setDescription('This object indicates the forbidden portlist of the router port entry.')
swL2IGMPAccessAuthTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 7), )
if mibBuilder.loadTexts: swL2IGMPAccessAuthTable.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPAccessAuthTable.setDescription('This table is used to manage the IGMP Access Authentication configurations of the device.')
swL2IGMPAccessAuthEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 7, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2IGMPAccessAuthPort"))
if mibBuilder.loadTexts: swL2IGMPAccessAuthEntry.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPAccessAuthEntry.setDescription('A list of manageable entities for IGMP Access Authentication. The configuration is done per port.')
swL2IGMPAccessAuthPort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 7, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPAccessAuthPort.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPAccessAuthPort.setDescription('The index of the swL2IGMPAccessAuthTable. This object corresponds to the port being configured.')
swL2IGMPAccessAuthState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 7, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPAccessAuthState.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPAccessAuthState.setDescription('This object denotes the status of IGMP Access Authentication of the port.')
swL2IGMPMulticastVlanTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 8), )
if mibBuilder.loadTexts: swL2IGMPMulticastVlanTable.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanTable.setDescription('Information about the IGMP snooping multicast VLAN table.')
swL2IGMPMulticastVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 8, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2IGMPMulticastVlanid"))
if mibBuilder.loadTexts: swL2IGMPMulticastVlanEntry.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanEntry.setDescription('The entry of swL2IGMPMulticastVlanTable.')
swL2IGMPMulticastVlanid = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 8, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPMulticastVlanid.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanid.setDescription('This object indicates the VLAN ID of the IGMP snooping multicast VLAN entry.')
swL2IGMPMulticastVlanName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 8, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL2IGMPMulticastVlanName.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanName.setDescription('This object indicates the VLAN name of the IGMP snooping multicast VLAN entry.')
swL2IGMPMulticastVlanSourcePort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 8, 1, 3), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPMulticastVlanSourcePort.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanSourcePort.setDescription('This object indicates the port list of the source ports of the IGMP snooping multicast VLAN. The source ports will be set as tag ports of the VLAN entry and the IGMP control messages received from the member ports will be forwarded to the source ports.')
swL2IGMPMulticastVlanMemberPort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 8, 1, 4), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPMulticastVlanMemberPort.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanMemberPort.setDescription('This object indicates the port list of the member ports of the IGMP snooping multicast VLAN. The source ports will be set as untagged ports of the VLAN entry and the IGMP control messages received from the member ports will be forwarded to the source ports.')
swL2IGMPMulticastVlanTagMemberPort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 8, 1, 5), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPMulticastVlanTagMemberPort.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanTagMemberPort.setDescription('This object indicates the port list of the tag member ports of the IGMP snooping multicast VLAN.')
swL2IGMPMulticastVlanState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 8, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPMulticastVlanState.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanState.setDescription('This object can be used to enable or disable the IGMP snooping multicast VLAN.')
swL2IGMPMulticastVlanReplaceSourceIp = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 8, 1, 7), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPMulticastVlanReplaceSourceIp.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanReplaceSourceIp.setDescription('The replacement source IP of this multicast VLAN.')
swL2IGMPMulticastVlanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 8, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL2IGMPMulticastVlanRowStatus.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanRowStatus.setDescription('This object indicates the status of this entry.')
swL2IGMPMulticastVlanRemoveAllMcastAddrListAction = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 8, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("start", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPMulticastVlanRemoveAllMcastAddrListAction.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanRemoveAllMcastAddrListAction.setDescription('This object indicates whether to remove all the multicast address lists from the IGMP multicast VLAN or not.')
swL2IGMPMulticastVlanUntagSourcePort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 8, 1, 10), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPMulticastVlanUntagSourcePort.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanUntagSourcePort.setDescription('This object indicates the untagged member ports to add to the multicast VLAN.')
swL2IGMPMulticastVlanRemapPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 8, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 8)).clone(8)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL2IGMPMulticastVlanRemapPriority.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanRemapPriority.setDescription("The priority value (0 to 7) to be associated with the data traffic to be forwarded on the multicast VLAN. When set to 8, the packet's original priority will be used.")
swL2IGMPMulticastVlanReplacePriority = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 8, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("true", 1), ("false", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL2IGMPMulticastVlanReplacePriority.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanReplacePriority.setDescription("Specifies that a packet's priority will be changed by the switch based on the remap priority. This flag will only take effect when remap priority is set.")
swL2IGMPMulticastVlanGroupTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 9), )
if mibBuilder.loadTexts: swL2IGMPMulticastVlanGroupTable.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanGroupTable.setDescription('The table containing the IGMP snooping multicast VLAN group information')
swL2IGMPMulticastVlanGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 9, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2IGMPMulticastVlanGroupVid"), (0, "DES3028P-L2MGMT-MIB", "swL2IGMPMulticastVlanGroupFromIp"), (0, "DES3028P-L2MGMT-MIB", "swL2IGMPMulticastVlanGroupToIp"))
if mibBuilder.loadTexts: swL2IGMPMulticastVlanGroupEntry.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanGroupEntry.setDescription('Information about the current IGMP snooping multicast VLAN group.')
swL2IGMPMulticastVlanGroupVid = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 9, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPMulticastVlanGroupVid.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanGroupVid.setDescription('This object indicates the VID of the IGMP snooping multicast VLAN group.')
swL2IGMPMulticastVlanGroupFromIp = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 9, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPMulticastVlanGroupFromIp.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanGroupFromIp.setDescription('Specifies the multicast address list for this VLAN.')
swL2IGMPMulticastVlanGroupToIp = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 9, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPMulticastVlanGroupToIp.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanGroupToIp.setDescription('Specifies the multicast address list for this VLAN.')
swL2IGMPMulticastVlanGroupStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 9, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL2IGMPMulticastVlanGroupStatus.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPMulticastVlanGroupStatus.setDescription('This object indicates the status of this entry.')
swL2IGMPv3Table = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 10), )
if mibBuilder.loadTexts: swL2IGMPv3Table.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPv3Table.setDescription('This table contains the IGMP snooping V3 information.')
swL2IGMPv3Entry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 10, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2IGMPVid"), (0, "DES3028P-L2MGMT-MIB", "swL2IGMPGroupIpAddr"), (0, "DES3028P-L2MGMT-MIB", "swL2IGMPv3SourceIPAddr"))
if mibBuilder.loadTexts: swL2IGMPv3Entry.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPv3Entry.setDescription('Information about the current IGMP snooping V3.')
swL2IGMPv3SourceIPAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 10, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPv3SourceIPAddr.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPv3SourceIPAddr.setDescription('This object identifies the source IP addresses from the group where they were captured from. IGMP packets, on a per-VLAN basis.')
swL2IGMPv3Forwarding = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPv3Forwarding.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPv3Forwarding.setDescription('This object identifies if the packets from the source IP addresses are forwarding or not.')
swL2IGMPv3ExpireTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 10, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2IGMPv3ExpireTimer.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPv3ExpireTimer.setDescription('This object identifies the leaving times of the source time.')
swIGMPSnoopingGroupTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 11), )
if mibBuilder.loadTexts: swIGMPSnoopingGroupTable.setStatus('current')
if mibBuilder.loadTexts: swIGMPSnoopingGroupTable.setDescription('The table contains the current IGMP snooping group information captured by the device.')
swIGMPSnoopingGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 11, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swIGMPSnoopingGroupVid"), (0, "DES3028P-L2MGMT-MIB", "swIGMPSnoopingGroupGroupAddr"), (0, "DES3028P-L2MGMT-MIB", "swIGMPSnoopingGroupSourceAddr"))
if mibBuilder.loadTexts: swIGMPSnoopingGroupEntry.setStatus('current')
if mibBuilder.loadTexts: swIGMPSnoopingGroupEntry.setDescription('Information about the current IGMP snooping group information which has been captured by the device.')
swIGMPSnoopingGroupVid = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 11, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPSnoopingGroupVid.setStatus('current')
if mibBuilder.loadTexts: swIGMPSnoopingGroupVid.setDescription('This object indicates the VID of the individual IGMP snooping group table entry.')
swIGMPSnoopingGroupGroupAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 11, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPSnoopingGroupGroupAddr.setStatus('current')
if mibBuilder.loadTexts: swIGMPSnoopingGroupGroupAddr.setDescription('This object identifies the group IP address which has been captured from the IGMP packet, on a per-VLAN basis.')
swIGMPSnoopingGroupSourceAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 11, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPSnoopingGroupSourceAddr.setStatus('current')
if mibBuilder.loadTexts: swIGMPSnoopingGroupSourceAddr.setDescription('This object identifies the source addresses.')
swIGMPSnoopingGroupIncludePortMap = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 11, 1, 4), PortList()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPSnoopingGroupIncludePortMap.setStatus('current')
if mibBuilder.loadTexts: swIGMPSnoopingGroupIncludePortMap.setDescription('This object indicates the port list under INCLUDE mode.')
swIGMPSnoopingGroupExcludePortMap = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 11, 1, 5), PortList()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPSnoopingGroupExcludePortMap.setStatus('current')
if mibBuilder.loadTexts: swIGMPSnoopingGroupExcludePortMap.setDescription('This object indicates the port list under EXCLUDE mode.')
swIGMPSnoopingGroupReportCount = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 11, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPSnoopingGroupReportCount.setStatus('current')
if mibBuilder.loadTexts: swIGMPSnoopingGroupReportCount.setDescription('This object indicates how many report packets were received by our device corresponding with this entry that has the IGMP function enabled, on a per-group basis.')
swIGMPSnoopingGroupUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 11, 1, 7), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPSnoopingGroupUpTime.setStatus('current')
if mibBuilder.loadTexts: swIGMPSnoopingGroupUpTime.setDescription('This object indicates how long since the group was detected.')
swIGMPSnoopingGroupExpiryTime = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 11, 1, 8), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPSnoopingGroupExpiryTime.setStatus('current')
if mibBuilder.loadTexts: swIGMPSnoopingGroupExpiryTime.setDescription('This object indicates the time left before this group will be aged out.')
swIGMPSnoopingGroupRouterPorts = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 11, 1, 9), PortList()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swIGMPSnoopingGroupRouterPorts.setStatus('current')
if mibBuilder.loadTexts: swIGMPSnoopingGroupRouterPorts.setDescription('This object indicates the port number of the router ports.')
swL2IGMPDynIpMultMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 12))
swL2IGMPDynIPMultMaxEntry = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 12, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPDynIPMultMaxEntry.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPDynIPMultMaxEntry.setDescription('This object specifies the maximum number of entries which can be learned by dynamic IP multicast.')
swL2IGMPSnoopingClearDynIpMult = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 12, 2))
swL2IGMPSnoopingClearDynIpMultVID = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 12, 2, 1), VlanId()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPSnoopingClearDynIpMultVID.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPSnoopingClearDynIpMultVID.setDescription('This object indicates the VLAN identifier where the data driven entries will be removed from.')
swL2IGMPSnoopingClearDynIpMultIP = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 12, 2, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPSnoopingClearDynIpMultIP.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPSnoopingClearDynIpMultIP.setDescription('This object indicates the IP address of the IGMP snooping group from which the data driven entries will be removed.')
swL2IGMPSnoopingClearDynIpMultAction = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 7, 12, 2, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("all", 1), ("vlan", 2), ("group", 3), ("other", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2IGMPSnoopingClearDynIpMultAction.setStatus('current')
if mibBuilder.loadTexts: swL2IGMPSnoopingClearDynIpMultAction.setDescription('Setting this object will clear the data driven entries. all - Remove all learned data driven groups. VLAN - Clear all data driven entries in the VLAN specified in swIGMPSnoopingClearDynIpMultVID. group - Clear the group with the address specified in swL2IGMPSnoopingClearDynIpMultIP in the VLAN specified in swIGMPSnoopingClearDynIpMultVID. ')
swL2TrafficSegTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 12, 1), )
if mibBuilder.loadTexts: swL2TrafficSegTable.setStatus('current')
if mibBuilder.loadTexts: swL2TrafficSegTable.setDescription('This table specifies the port just can forward traffic to the specific port list.')
swL2TrafficSegEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 12, 1, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2TrafficSegPort"))
if mibBuilder.loadTexts: swL2TrafficSegEntry.setStatus('current')
if mibBuilder.loadTexts: swL2TrafficSegEntry.setDescription('A list of information specifies the port with its traffic forward list.')
swL2TrafficSegPort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 12, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2TrafficSegPort.setStatus('current')
if mibBuilder.loadTexts: swL2TrafficSegPort.setDescription('The port number of the logical port.')
swL2TrafficSegForwardPorts = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 12, 1, 1, 2), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2TrafficSegForwardPorts.setStatus('current')
if mibBuilder.loadTexts: swL2TrafficSegForwardPorts.setDescription('The port list that the specific port can forward traffic.')
swL2PortSecurityControlTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 15, 1), )
if mibBuilder.loadTexts: swL2PortSecurityControlTable.setStatus('current')
if mibBuilder.loadTexts: swL2PortSecurityControlTable.setDescription('The port security feature controls the address leaning capability and the traffic forwarding decision. Each port can have this function enabled or disabled. When it is enabled and a number is given said N, which allows N addresses to be learned at this port, the first N learned addresses are locked at this port as a static entry. When the learned address number reaches N, any incoming packet without learned source addresses are discarded (e.g. dropped) and no more new addresses can be learned at this port.')
swL2PortSecurityControlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 15, 1, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2PortSecurityPortIndex"))
if mibBuilder.loadTexts: swL2PortSecurityControlEntry.setStatus('current')
if mibBuilder.loadTexts: swL2PortSecurityControlEntry.setDescription('A list of information contained in the swL2PortSecurityControlTable.')
swL2PortSecurityPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 15, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortSecurityPortIndex.setStatus('current')
if mibBuilder.loadTexts: swL2PortSecurityPortIndex.setDescription('Indicates a secured port to lock address learning.')
swL2PortSecurityMaxLernAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 15, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortSecurityMaxLernAddr.setStatus('current')
if mibBuilder.loadTexts: swL2PortSecurityMaxLernAddr.setDescription('Indicates the allowable number of addresses to be learned at this port.')
swL2PortSecurityMode = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 15, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("permanent", 2), ("deleteOnTimeout", 3), ("deleteOnReset", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortSecurityMode.setStatus('current')
if mibBuilder.loadTexts: swL2PortSecurityMode.setDescription('Indicates the mode of locking address. In deleteOnTimeout (3) mode - the locked addresses can be aged out after the aging timer expires. In this mode, when the locked address is aged out, the number of addresses that can be learned has to be increased by one. In deleteOnReset (4) mode - never age out the locked addresses unless restarting the system to prevent port movement or intrusion.')
swL2PortSecurityAdmState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 15, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("enable", 2), ("disable", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortSecurityAdmState.setStatus('current')
if mibBuilder.loadTexts: swL2PortSecurityAdmState.setDescription('Indicates an administration state of locking address.')
swL2PortSecurityTrapLogState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 15, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("enable", 2), ("disable", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortSecurityTrapLogState.setStatus('current')
if mibBuilder.loadTexts: swL2PortSecurityTrapLogState.setDescription("When enabled (2), whenever there's a new MAC that violates the pre-defined port security configuration, a trap will be sent out and the relevant information will be logged in the system.")
swL2PortSecurityDelCtrl = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 15, 3))
swL2PortSecurityDelVlanName = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 15, 3, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortSecurityDelVlanName.setStatus('current')
if mibBuilder.loadTexts: swL2PortSecurityDelVlanName.setDescription('Indicates the VLAN name.')
swL2PortSecurityDelPort = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 15, 3, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 768))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortSecurityDelPort.setStatus('current')
if mibBuilder.loadTexts: swL2PortSecurityDelPort.setDescription('Indicates the port. 0 means the function dose not work now.')
swL2PortSecurityDelMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 15, 3, 3), MacAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortSecurityDelMacAddress.setStatus('current')
if mibBuilder.loadTexts: swL2PortSecurityDelMacAddress.setDescription('Specifies the MAC address.')
swL2PortSecurityDelActivity = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 15, 3, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("none", 1), ("start", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortSecurityDelActivity.setStatus('current')
if mibBuilder.loadTexts: swL2PortSecurityDelActivity.setDescription('')
swL2CosPriorityCtrl = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3))
swL2CosPriorityTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 1), )
if mibBuilder.loadTexts: swL2CosPriorityTable.setStatus('current')
if mibBuilder.loadTexts: swL2CosPriorityTable.setDescription('Used to show and configure per port priority-based QoS features on the switch.')
swL2CosPriorityEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 1, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2CosPriorityPort"))
if mibBuilder.loadTexts: swL2CosPriorityEntry.setStatus('current')
if mibBuilder.loadTexts: swL2CosPriorityEntry.setDescription('A list of information contained in the swL2CosPriorityTable.')
swL2CosPriorityPort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2CosPriorityPort.setStatus('current')
if mibBuilder.loadTexts: swL2CosPriorityPort.setDescription('The port number of CoS Priority.')
swL2CosPriorityPortPRI = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disable", 2), ("enable", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2CosPriorityPortPRI.setStatus('current')
if mibBuilder.loadTexts: swL2CosPriorityPortPRI.setDescription('Indicates the port_priority state for CoS.')
swL2CosPriorityEtherPRI = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disable", 1), ("ether8021p", 2), ("macBase", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2CosPriorityEtherPRI.setStatus('current')
if mibBuilder.loadTexts: swL2CosPriorityEtherPRI.setDescription('Enable Ethernet frame based priority. When set ether8021p (2), enable 802.1p QoS; When set macBase (3), enable MAC-based QoS; When set disable (1), disable Ethernet frame based priority.')
swL2CosPriorityIpPRI = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disable", 1), ("tos", 2), ("dscp", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2CosPriorityIpPRI.setStatus('current')
if mibBuilder.loadTexts: swL2CosPriorityIpPRI.setDescription('Enable IP priority QoS. When set tos (2), enable TOS based QoS; When set dscp (3), enable DSCP based QoS; When set disable (1), disable IP priority QoS.')
swL2CosPriorityNone = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("valid", 1), ("invalid", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2CosPriorityNone.setStatus('current')
if mibBuilder.loadTexts: swL2CosPriorityNone.setDescription('When read, it always returns invalid (2); when write valid (1), it disables all priority in this table.')
swL2CosPortPRITable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 2), )
if mibBuilder.loadTexts: swL2CosPortPRITable.setStatus('current')
if mibBuilder.loadTexts: swL2CosPortPRITable.setDescription('Used to show port-to-class mappings and map specific port to one of the hardware queues available on the switch.')
swL2CosPortPRIEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 2, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2CosPortPRIIndex"))
if mibBuilder.loadTexts: swL2CosPortPRIEntry.setStatus('current')
if mibBuilder.loadTexts: swL2CosPortPRIEntry.setDescription('A list of information contained in the swL2CosPortPRITable.')
swL2CosPortPRIIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2CosPortPRIIndex.setStatus('current')
if mibBuilder.loadTexts: swL2CosPortPRIIndex.setDescription('Indicates the CoS Priority PortPRI.')
swL2CosPortPRIClass = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 3))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2CosPortPRIClass.setStatus('current')
if mibBuilder.loadTexts: swL2CosPortPRIClass.setDescription("The number of the switch's hardware priority queue. The switch has 4 hardware priority queues available. They are numbered between 0 (the lowest priority queue) and 3 (the highest priority queue). If you want to set one, you must have administrator privileges. And you can set a value of 0 or 3 only, you can't set a value 1 or 2.")
swL2CosMacBasePRITable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 3), )
if mibBuilder.loadTexts: swL2CosMacBasePRITable.setStatus('current')
if mibBuilder.loadTexts: swL2CosMacBasePRITable.setDescription('Used to show MAC priority map to traffic class and map the destination MAC address in incoming packet to one of the hardware queues available on the switch.')
swL2CosMacBasePRIEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 3, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2CosMacBasePRIIndex"))
if mibBuilder.loadTexts: swL2CosMacBasePRIEntry.setStatus('current')
if mibBuilder.loadTexts: swL2CosMacBasePRIEntry.setDescription('A list of information contained in the swL2CosMacBasePRITable.')
swL2CosMacBasePRIIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 3, 1, 1), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2CosMacBasePRIIndex.setStatus('current')
if mibBuilder.loadTexts: swL2CosMacBasePRIIndex.setDescription('Indicates the CoS Priority MacBasePRI.')
swL2CosMacBasePRIClass = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 3))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2CosMacBasePRIClass.setStatus('current')
if mibBuilder.loadTexts: swL2CosMacBasePRIClass.setDescription("The number of the switch's hardware priority queue. The switch has 4 hardware priority queues available. They are numbered between 0 (the lowest priority queue) and 3 (the highest priority queue). If you want to set one, you must have administrator privileges.")
swL2CosTosPRITable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 4), )
if mibBuilder.loadTexts: swL2CosTosPRITable.setStatus('current')
if mibBuilder.loadTexts: swL2CosTosPRITable.setDescription('Used to show TOS value to traffic class mapping and map the TOS value in the IP header of incoming packet to one of the four hardware queues available on the switch.')
swL2CosTosPRIEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 4, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2CosTosPRIIndex"))
if mibBuilder.loadTexts: swL2CosTosPRIEntry.setStatus('current')
if mibBuilder.loadTexts: swL2CosTosPRIEntry.setDescription('A list of information contained in the swL2CosTosPRITable.')
swL2CosTosPRIIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2CosTosPRIIndex.setStatus('current')
if mibBuilder.loadTexts: swL2CosTosPRIIndex.setDescription('Indicates the CoS Priority TosPRI.')
swL2CosTosPRIClass = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 3))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2CosTosPRIClass.setStatus('current')
if mibBuilder.loadTexts: swL2CosTosPRIClass.setDescription("The number of the switch's hardware priority queue. The switch has 4 hardware priority queues available. They are numbered between 0 (the lowest priority queue) and 3 (the highest priority queue). If you want to set one, you must have administrator privileges.")
swL2CosDscpPRITable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 5), )
if mibBuilder.loadTexts: swL2CosDscpPRITable.setStatus('current')
if mibBuilder.loadTexts: swL2CosDscpPRITable.setDescription('Used to show DSCP value to traffic class mapping and map the DSCP value in the IP header of incoming packet to one of the hardware queues available on the switch.')
swL2CosDscpPRIEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 5, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2CosDscpPRIIndex"))
if mibBuilder.loadTexts: swL2CosDscpPRIEntry.setStatus('current')
if mibBuilder.loadTexts: swL2CosDscpPRIEntry.setDescription('A list of information contained in the swL2CosDscpPRITable.')
swL2CosDscpPRIIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2CosDscpPRIIndex.setStatus('current')
if mibBuilder.loadTexts: swL2CosDscpPRIIndex.setDescription('Indicates the CoS Priority DscpPRI.')
swL2CosDscpPRIClass = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 17, 3, 5, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 3))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2CosDscpPRIClass.setStatus('current')
if mibBuilder.loadTexts: swL2CosDscpPRIClass.setDescription("The number of the switch's hardware priority queue. The switch has 4 hardware priority queues available. They are numbered between 0 (the lowest priority queue) and 3 (the highest priority queue). If you want to set one, you must have administrator privileges.")
swL2DhcpRelayState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 18, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DhcpRelayState.setStatus('obsolete')
if mibBuilder.loadTexts: swL2DhcpRelayState.setDescription('This object indicates whether the DHCP relay function is enabled or disabled.')
swL2DhcpRelayHopCount = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 18, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DhcpRelayHopCount.setStatus('obsolete')
if mibBuilder.loadTexts: swL2DhcpRelayHopCount.setDescription('This object indicates the maximum number of router hops that the DHCP packets can cross.')
swL2DhcpRelayTimeThreshold = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 18, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DhcpRelayTimeThreshold.setStatus('obsolete')
if mibBuilder.loadTexts: swL2DhcpRelayTimeThreshold.setDescription('This object indicates the minimum time in seconds within which the switch must relay the DHCP request. If this time is exceeded, the switch will drop the DHCP packet.')
swL2DhcpRelayOption82State = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 18, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DhcpRelayOption82State.setStatus('obsolete')
if mibBuilder.loadTexts: swL2DhcpRelayOption82State.setDescription('This object indicates DHCP relay agent information option 82 function is enabled or disabled.')
swL2DhcpRelayOption82Check = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 18, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DhcpRelayOption82Check.setStatus('obsolete')
if mibBuilder.loadTexts: swL2DhcpRelayOption82Check.setDescription('This object indicates the checking mechanism of DHCP relay agent information option 82 is enabled or disabled.')
swL2DhcpRelayOption82Policy = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 18, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("replace", 1), ("drop", 2), ("keep", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DhcpRelayOption82Policy.setStatus('obsolete')
if mibBuilder.loadTexts: swL2DhcpRelayOption82Policy.setDescription('This object indicates the reforwarding policy of DHCP relay agent information option 82. replace (1) - Replace the exiting option 82 field in messages. drop (2) - Discard messages with existing option 82 field. keep (3) - Retain the existing option 82 field in messages.')
swL2DhcpRelayCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 18, 7), )
if mibBuilder.loadTexts: swL2DhcpRelayCtrlTable.setStatus('obsolete')
if mibBuilder.loadTexts: swL2DhcpRelayCtrlTable.setDescription('This table specifies the IP address as a destination to forward (relay) DHCP packets to.')
swL2DhcpRelayCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 18, 7, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2DhcpRelayCtrlInterfaceName"), (0, "DES3028P-L2MGMT-MIB", "swL2DhcpRelayCtrlServer"))
if mibBuilder.loadTexts: swL2DhcpRelayCtrlEntry.setStatus('obsolete')
if mibBuilder.loadTexts: swL2DhcpRelayCtrlEntry.setDescription('A list of information specifies the IP address as a destination to forward (relay) DHCP packets to.')
swL2DhcpRelayCtrlInterfaceName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 18, 7, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 12))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2DhcpRelayCtrlInterfaceName.setStatus('obsolete')
if mibBuilder.loadTexts: swL2DhcpRelayCtrlInterfaceName.setDescription('The name of the IP interface.')
swL2DhcpRelayCtrlServer = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 18, 7, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2DhcpRelayCtrlServer.setStatus('current')
if mibBuilder.loadTexts: swL2DhcpRelayCtrlServer.setDescription('The DHCP server IP address.')
swL2DhcpRelayCtrlState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 18, 7, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("invalid", 2), ("valid", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DhcpRelayCtrlState.setStatus('obsolete')
if mibBuilder.loadTexts: swL2DhcpRelayCtrlState.setDescription('This object indicates the status of this entry. other (1) - This entry is currently in use but the conditions under which it will remain so are different from each of the following values. invalid (2) - Writing this value to the object, and then the corresponding entry will be removed from the table. valid (3) - This entry resides in the table.')
swL2MgmtMIBTrapPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 20, 0))
swL2PortSecurityViolationTrap = NotificationType((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 20, 0, 1)).setObjects(("DES3028P-L2MGMT-MIB", "swL2PortSecurityPortIndex"), ("DES3028P-L2MGMT-MIB", "swL2PortSecurityViolationMac"))
if mibBuilder.loadTexts: swL2PortSecurityViolationTrap.setStatus('current')
if mibBuilder.loadTexts: swL2PortSecurityViolationTrap.setDescription("When the port_security trap is enabled, if there's a new MAC that violates the pre-defined port security configuration, a trap will be sent out.")
swL2macNotification = NotificationType((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 20, 0, 2)).setObjects(("DES3028P-L2MGMT-MIB", "swL2macNotifyInfo"))
if mibBuilder.loadTexts: swL2macNotification.setStatus('current')
if mibBuilder.loadTexts: swL2macNotification.setDescription(' This trap indicates the MAC address variations in the address table. ')
swL2FloodMacDetectedTrap = NotificationType((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 20, 0, 3)).setObjects(("DES3028P-L2MGMT-MIB", "swL2FloodMacDetectedMacVid"), ("DES3028P-L2MGMT-MIB", "swL2FloodMacDetectedMacAddress"))
if mibBuilder.loadTexts: swL2FloodMacDetectedTrap.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMacDetectedTrap.setDescription(' If theres a new flooding MAC is detected, a trap will be sent out.')
swL2PortLoopOccurred = NotificationType((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 20, 0, 4)).setObjects(("DES3028P-L2MGMT-MIB", "swL2LoopDetectPortIndex"))
if mibBuilder.loadTexts: swL2PortLoopOccurred.setStatus('current')
if mibBuilder.loadTexts: swL2PortLoopOccurred.setDescription('The trap is sent when a Port loop occurs.')
swL2PortLoopRestart = NotificationType((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 20, 0, 5)).setObjects(("DES3028P-L2MGMT-MIB", "swL2LoopDetectPortIndex"))
if mibBuilder.loadTexts: swL2PortLoopRestart.setStatus('current')
if mibBuilder.loadTexts: swL2PortLoopRestart.setDescription('The trap is sent when a Port loop restarts after the interval time.')
swl2PortSecurityBindings = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 20, 1))
swL2PortSecurityViolationMac = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 20, 1, 1), MacAddress()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: swL2PortSecurityViolationMac.setStatus('current')
if mibBuilder.loadTexts: swL2PortSecurityViolationMac.setDescription('This object indicates the MAC address that violates the port security configuration.')
swl2NotificationBindings = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 20, 2))
swL2macNotifyInfo = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 20, 2, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 1024))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2macNotifyInfo.setStatus('current')
if mibBuilder.loadTexts: swL2macNotifyInfo.setDescription('This object indicates the last time reboot information. ')
swL2FloodMacDetectedMacVid = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 20, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2FloodMacDetectedMacVid.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMacDetectedMacVid.setDescription('This object indicates the VID of the flooding MAC.')
swL2FloodMacDetectedMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 20, 2, 3), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2FloodMacDetectedMacAddress.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMacDetectedMacAddress.setDescription('This object indicates the MAC address of the flooding MAC')
swL2LoopDetectCtrl = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 21, 1))
swL2LoopDetectAdminState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 21, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2LoopDetectAdminState.setStatus('current')
if mibBuilder.loadTexts: swL2LoopDetectAdminState.setDescription('This object indicates the loopback detection status for the system.')
swL2LoopDetectInterval = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 21, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 32767))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2LoopDetectInterval.setStatus('current')
if mibBuilder.loadTexts: swL2LoopDetectInterval.setDescription('This object indicates the interval value, the range is from 1 to 32767 seconds.')
swL2LoopDetectRecoverTime = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 21, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2LoopDetectRecoverTime.setStatus('current')
if mibBuilder.loadTexts: swL2LoopDetectRecoverTime.setDescription('This object indicates the recover time, the range is from 60 to 1000000. The value of 0 disables the recover function.')
swL2LoopDetectTrapMode = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 21, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("loop_detected", 2), ("loop_cleared", 3), ("both", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2LoopDetectTrapMode.setStatus('current')
if mibBuilder.loadTexts: swL2LoopDetectTrapMode.setDescription('This object indicates the loopback detection trap mode for the system.')
swL2LoopDetectPortMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 21, 2))
swL2LoopDetectPortTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 21, 2, 1), )
if mibBuilder.loadTexts: swL2LoopDetectPortTable.setStatus('current')
if mibBuilder.loadTexts: swL2LoopDetectPortTable.setDescription('The table specifies the loopback detection function specified by port.')
swL2LoopDetectPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 21, 2, 1, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2LoopDetectPortIndex"))
if mibBuilder.loadTexts: swL2LoopDetectPortEntry.setStatus('current')
if mibBuilder.loadTexts: swL2LoopDetectPortEntry.setDescription('The table specifies the loopback detection function specified by port.')
swL2LoopDetectPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 21, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2LoopDetectPortIndex.setStatus('current')
if mibBuilder.loadTexts: swL2LoopDetectPortIndex.setDescription("This object indicates the module's port number. The range is from 1 to the maximum port number specified in the module")
swL2LoopDetectPortState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 21, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2LoopDetectPortState.setStatus('current')
if mibBuilder.loadTexts: swL2LoopDetectPortState.setDescription('This object indicates the loopback detection function state on the port.')
swL2LoopDetectPortLoopStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 21, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("normal", 1), ("loop", 2), ("error", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2LoopDetectPortLoopStatus.setStatus('current')
if mibBuilder.loadTexts: swL2LoopDetectPortLoopStatus.setDescription('This object indicates the port status.')
swL2McastFilterTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 2), )
if mibBuilder.loadTexts: swL2McastFilterTable.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterTable.setDescription(' A table that contains information about the multicast filter address.')
swL2McastFilterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 2, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2McastFilterProfileIndex"))
if mibBuilder.loadTexts: swL2McastFilterEntry.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterEntry.setDescription('A list of multicast filter mode information for each profile ID. ')
swL2McastFilterProfileIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 24))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2McastFilterProfileIndex.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterProfileIndex.setDescription(' index for each profile')
swL2McastFilterProfileName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2McastFilterProfileName.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterProfileName.setDescription('The multicast filter description.')
swL2McastFilterAddOrDelState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("add", 2), ("delete", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2McastFilterAddOrDelState.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterAddOrDelState.setDescription('The control multicast filter address.')
swL2McastFilterGroupList = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 2, 1, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2McastFilterGroupList.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterGroupList.setDescription('The multicast filter address group list.')
swL2McastFilterStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 2, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL2McastFilterStatus.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterStatus.setDescription('This object indicates the status of this entry.')
swL2McastFilterPortTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 3), )
if mibBuilder.loadTexts: swL2McastFilterPortTable.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterPortTable.setDescription(' A table that is used to bind port to profile ID.')
swL2McastFilterPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 3, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2McastFilterPortGroupPortIndex"))
if mibBuilder.loadTexts: swL2McastFilterPortEntry.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterPortEntry.setDescription('A list of information that is used to bind port to profile ID. ')
swL2McastFilterPortGroupPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2McastFilterPortGroupPortIndex.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterPortGroupPortIndex.setDescription('The port index.')
swL2McastFilterPortProfileAddOrDelState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("add", 2), ("delete", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2McastFilterPortProfileAddOrDelState.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterPortProfileAddOrDelState.setDescription('The control multicast filter profile.')
swL2McastFilterPortProfileID = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 24))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2McastFilterPortProfileID.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterPortProfileID.setDescription('This object indicates the profile ID of this entry. When read, it is always 0. When set, 0 can not be set.')
swL2McastFilterPortMaxGroupTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 4), )
if mibBuilder.loadTexts: swL2McastFilterPortMaxGroupTable.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterPortMaxGroupTable.setDescription(' A table that contains information about the max group number based on port.')
swL2McastFilterPortMaxGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 4, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2McastFilterPortMaxGroupPortIndex"))
if mibBuilder.loadTexts: swL2McastFilterPortMaxGroupEntry.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterPortMaxGroupEntry.setDescription('A list of max group number information for each port.')
swL2McastFilterPortMaxGroupPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 4, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2McastFilterPortMaxGroupPortIndex.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterPortMaxGroupPortIndex.setDescription('The port index.')
swL2McastFilterPortMaxGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 256))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2McastFilterPortMaxGroup.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterPortMaxGroup.setDescription('The max group numbers. The default is 256.')
swL2McastFilterPortInfoTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 5), )
if mibBuilder.loadTexts: swL2McastFilterPortInfoTable.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterPortInfoTable.setDescription(' A table that contains information about all of the multicast groups for the ports.')
swL2McastFilterPortInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 5, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2McastFilterPortInfoPortIndex"))
if mibBuilder.loadTexts: swL2McastFilterPortInfoEntry.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterPortInfoEntry.setDescription('A list of information about all of the multicast groups for each port.')
swL2McastFilterPortInfoPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 5, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2McastFilterPortInfoPortIndex.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterPortInfoPortIndex.setDescription('The port index.')
swL2McastFilterPortInfoProfileName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 22, 5, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2McastFilterPortInfoProfileName.setStatus('current')
if mibBuilder.loadTexts: swL2McastFilterPortInfoProfileName.setDescription('The multicast filter address profile Name.')
swL2VlanAdvertisementTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 23, 1), )
if mibBuilder.loadTexts: swL2VlanAdvertisementTable.setStatus('current')
if mibBuilder.loadTexts: swL2VlanAdvertisementTable.setDescription('A table containing the advertisement state for each VLAN configured into the device by (local or network) management.')
swL2VlanAdvertisementEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 23, 1, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2VlanIndex"))
if mibBuilder.loadTexts: swL2VlanAdvertisementEntry.setStatus('current')
if mibBuilder.loadTexts: swL2VlanAdvertisementEntry.setDescription('The advertisement state for each VLAN configured into the device.')
swL2VlanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 23, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2VlanIndex.setStatus('current')
if mibBuilder.loadTexts: swL2VlanIndex.setDescription('The VLAN-ID or other identifier referring to this VLAN.')
swL2VlanName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 23, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2VlanName.setStatus('current')
if mibBuilder.loadTexts: swL2VlanName.setDescription('An administratively assigned string, which may be used to identify the VLAN.')
swL2VlanAdvertiseState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 23, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2VlanAdvertiseState.setStatus('current')
if mibBuilder.loadTexts: swL2VlanAdvertiseState.setDescription('This object indicates the advertise status of this VLAN entry.')
swL2VlanMultiplyMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 23, 2))
swL2VlanMultiplyVlanList = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 23, 2, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2VlanMultiplyVlanList.setStatus('current')
if mibBuilder.loadTexts: swL2VlanMultiplyVlanList.setDescription('This object specifies the VLAN ID List.')
swL2VlanMultiplyAdvertisement = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 23, 2, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("enabled", 2), ("disabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2VlanMultiplyAdvertisement.setStatus('current')
if mibBuilder.loadTexts: swL2VlanMultiplyAdvertisement.setDescription('This object specifies the advertisement state.')
swL2VlanMultiplyPortList = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 23, 2, 3), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2VlanMultiplyPortList.setStatus('current')
if mibBuilder.loadTexts: swL2VlanMultiplyPortList.setDescription('Specifies the port list.')
swL2VlanMultiplyPortListAction = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 23, 2, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("add-tagged", 2), ("add-untagged", 3), ("add-forbidden", 4), ("delete", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2VlanMultiplyPortListAction.setStatus('current')
if mibBuilder.loadTexts: swL2VlanMultiplyPortListAction.setDescription('Specifies the action for the port list which specified by swL2VlanMultiplyPortList.')
swL2VlanMultiplyAction = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 23, 2, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("create", 2), ("configure", 3), ("delete", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2VlanMultiplyAction.setStatus('current')
if mibBuilder.loadTexts: swL2VlanMultiplyAction.setDescription('Specifies the action for VLANs. other: no action. create: the VLANs specified by swL2VlanMultiplyVlanList would be created at a time. configure: the VLANs specified by swL2VlanMultiplyVlanList would be configured at a time. delete: the VLANs specified by swL2VlanMultiplyVlanList would be deleted at a time. ')
swL2DhcpLocalRelayState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 24, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DhcpLocalRelayState.setStatus('current')
if mibBuilder.loadTexts: swL2DhcpLocalRelayState.setDescription('This object indicates the status of the DHCP local relay function of the switch.')
swL2DhcpLocalRelayVLANTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 24, 2), )
if mibBuilder.loadTexts: swL2DhcpLocalRelayVLANTable.setStatus('current')
if mibBuilder.loadTexts: swL2DhcpLocalRelayVLANTable.setDescription('This table is used to manage the DHCP local relay status for each VLAN.')
swL2DhcpLocalRelayVLANEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 24, 2, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2DhcpLocalRelayVLANID"))
if mibBuilder.loadTexts: swL2DhcpLocalRelayVLANEntry.setStatus('current')
if mibBuilder.loadTexts: swL2DhcpLocalRelayVLANEntry.setDescription('This object lists the current VLANs in the switch and their corresponding DHCP local relay status.')
swL2DhcpLocalRelayVLANID = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 24, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2DhcpLocalRelayVLANID.setStatus('current')
if mibBuilder.loadTexts: swL2DhcpLocalRelayVLANID.setDescription('This object shows the VIDs of the current VLANS in the switch.')
swL2DhcpLocalRelayVLANState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 24, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DhcpLocalRelayVLANState.setStatus('current')
if mibBuilder.loadTexts: swL2DhcpLocalRelayVLANState.setDescription('This object indicates the status of the DHCP relay function of the VLAN.')
swL2FloodMACMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 1))
swL2FloodMACGlobalSettings = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 1, 1))
swL2FloodMACState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2FloodMACState.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACState.setDescription('This object indicates the status of Flooding MAC function.')
swL2FloodMACLogState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2FloodMACLogState.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACLogState.setDescription('This object indicates whether logs are generated when a flooding MAC is detected.')
swL2FloodMACTrapState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2FloodMACTrapState.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACTrapState.setDescription('This object indicates whether traps are generated when a flooding MAC is detected.')
swL2FloodMACClearFDB = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no-action", 1), ("start", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2FloodMACClearFDB.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACClearFDB.setDescription('When set to start(2), this object will clear the entries of swL2FloodMACFDBTable. After the device finishes clearing the entries, it will return to its default value, no-action(1). Setting this object to no-action(1) will not have any effect.')
swL2FloodMACAutoFDBCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 1, 2), )
if mibBuilder.loadTexts: swL2FloodMACAutoFDBCtrlTable.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACAutoFDBCtrlTable.setDescription('A table containing a list of configured IP addresses to which the Auto FDB function will discover the corresponding VLAN, MAC address and port and have a corresponding static FDB entry created automatically.')
swL2FloodMACAutoFDBCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 1, 2, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2FloodMACAutoFDBIPAddress"))
if mibBuilder.loadTexts: swL2FloodMACAutoFDBCtrlEntry.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACAutoFDBCtrlEntry.setDescription('Information containing the configured Auto FDB IP address.')
swL2FloodMACAutoFDBIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 1, 2, 1, 1), IpAddress())
if mibBuilder.loadTexts: swL2FloodMACAutoFDBIPAddress.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACAutoFDBIPAddress.setDescription('The Auto FDB IP address.')
swL2FloodMACAutoFDBRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 1, 2, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL2FloodMACAutoFDBRowStatus.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACAutoFDBRowStatus.setDescription('This object indicates the status of this entry.')
swL2FloodMACInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 2))
swL2FloodMACFDBTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 2, 1), )
if mibBuilder.loadTexts: swL2FloodMACFDBTable.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACFDBTable.setDescription('A table containing a list of current and historical flooding MAC entries.')
swL2FloodMACFDBEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 2, 1, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2FloodMACFDBIndex"), (0, "DES3028P-L2MGMT-MIB", "swL2FloodMACFDBVID"), (0, "DES3028P-L2MGMT-MIB", "swL2FloodMACFDBMacAddress"))
if mibBuilder.loadTexts: swL2FloodMACFDBEntry.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACFDBEntry.setDescription('Information containing the flooding MAC address.')
swL2FloodMACFDBIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 2, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: swL2FloodMACFDBIndex.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACFDBIndex.setDescription('The hardware address table index of the flooding MAC entry.')
swL2FloodMACFDBVID = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094)))
if mibBuilder.loadTexts: swL2FloodMACFDBVID.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACFDBVID.setDescription('The VLAN identifier of the flooding MAC entry.')
swL2FloodMACFDBMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 2, 1, 1, 3), MacAddress())
if mibBuilder.loadTexts: swL2FloodMACFDBMacAddress.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACFDBMacAddress.setDescription('The MAC address of the flooding MAC entry.')
swL2FloodMACFDBStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("active", 1), ("inactive", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2FloodMACFDBStatus.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACFDBStatus.setDescription("The status of the flooding MAC entry. When the value is 'active', this means the entry is currently present in the hardware address table, otherwise, the value is 'inactive'.")
swL2FloodMACFDBTimestamp = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 2, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2FloodMACFDBTimestamp.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACFDBTimestamp.setDescription('A number that correlates to a relative time the entry was detected by the Flooding MAC function.')
swL2FloodMACAutoFDBTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 2, 2), )
if mibBuilder.loadTexts: swL2FloodMACAutoFDBTable.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACAutoFDBTable.setDescription('A table containing the discovered VLAN, MAC address and port of the host with an IP address created in swL2FloodMACAutoFDBCtrlTable.')
swL2FloodMACAutoFDBEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 2, 2, 1), ).setIndexNames((0, "DES3028P-L2MGMT-MIB", "swL2FloodMACAutoFDBIPAddress"))
if mibBuilder.loadTexts: swL2FloodMACAutoFDBEntry.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACAutoFDBEntry.setDescription('Information containing the discovered VLAN, MAC address and port of an Auto FDB entry.')
swL2FloodMACAutoFDBVID = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2FloodMACAutoFDBVID.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACAutoFDBVID.setDescription('The VLAN identifier of Auto FDB entry.')
swL2FloodMACAutoFDBMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 2, 2, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2FloodMACAutoFDBMacAddress.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACAutoFDBMacAddress.setDescription('The MAC address of the Auto FDB entry.')
swL2FloodMACAutoFDBPort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 2, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2FloodMACAutoFDBPort.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACAutoFDBPort.setDescription('The port number of the Auto FDB entry.')
swL2FloodMACAutoFDBTimestamp = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 63, 7, 2, 25, 2, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2FloodMACAutoFDBTimestamp.setStatus('current')
if mibBuilder.loadTexts: swL2FloodMACAutoFDBTimestamp.setDescription('A number that represents the relative time when the Auto FDB learned this entry.')
mibBuilder.exportSymbols("DES3028P-L2MGMT-MIB", swL2IGMPMulticastVlanGroupToIp=swL2IGMPMulticastVlanGroupToIp, swL2McastFilterPortProfileAddOrDelState=swL2McastFilterPortProfileAddOrDelState, swL2IGMPRouterPortVlanid=swL2IGMPRouterPortVlanid, swL2IGMPRouterPortStaticPortList=swL2IGMPRouterPortStaticPortList, swL2McastFilterProfileIndex=swL2McastFilterProfileIndex, swPortTrunkEntry=swPortTrunkEntry, swL2IGMPRouterPortTable=swL2IGMPRouterPortTable, swL2FloodMACFDBEntry=swL2FloodMACFDBEntry, swL2FloodMACAutoFDBTable=swL2FloodMACAutoFDBTable, swL2IGMPRouteTimeout=swL2IGMPRouteTimeout, swPortMirrorPackage=swPortMirrorPackage, swl2PortSecurityBindings=swl2PortSecurityBindings, swL2IGMPIpGroupReportCount=swL2IGMPIpGroupReportCount, swPortTrunkMaxEntries=swPortTrunkMaxEntries, swL2McastFilterPortTable=swL2McastFilterPortTable, swL2PortInfoPortIndex=swL2PortInfoPortIndex, swL2FloodMACAutoFDBTimestamp=swL2FloodMACAutoFDBTimestamp, swL2FloodMACAutoFDBCtrlEntry=swL2FloodMACAutoFDBCtrlEntry, swL2McastFilterTable=swL2McastFilterTable, swL2QOSBandwidthControlTable=swL2QOSBandwidthControlTable, swL2PortInfoMediumType=swL2PortInfoMediumType, swL2PortCtrlMACNotifyState=swL2PortCtrlMACNotifyState, swL2DhcpRelayCtrlServer=swL2DhcpRelayCtrlServer, swL2QOS8021pUserPriorityTable=swL2QOS8021pUserPriorityTable, swL2PortSecurityViolationMac=swL2PortSecurityViolationMac, swL2IGMPAccessAuthState=swL2IGMPAccessAuthState, swL2IGMPDynIPMultMaxEntry=swL2IGMPDynIPMultMaxEntry, swPortMirrorTargetPort=swPortMirrorTargetPort, swL2CosDscpPRITable=swL2CosDscpPRITable, swL2PortInfoNwayStatus=swL2PortInfoNwayStatus, swL2DevCtrlSubnetMask=swL2DevCtrlSubnetMask, swL2PortCtrlPortMediumType=swL2PortCtrlPortMediumType, swL2TrunkLACPPortState=swL2TrunkLACPPortState, swIGMPSnoopingGroupUpTime=swIGMPSnoopingGroupUpTime, swL2PortMgmt=swL2PortMgmt, swIGMPSnoopingGroupGroupAddr=swIGMPSnoopingGroupGroupAddr, swL2DhcpRelayCtrlState=swL2DhcpRelayCtrlState, swL2VlanMgmt=swL2VlanMgmt, swL2PortSecurityAdmState=swL2PortSecurityAdmState, swL2QOSSchedulingMechanism=swL2QOSSchedulingMechanism, swL2VlanAdvertisementTable=swL2VlanAdvertisementTable, swL2FloodMACGlobalSettings=swL2FloodMACGlobalSettings, swL2FloodMACClearFDB=swL2FloodMACClearFDB, swL2DhcpRelayMgmt=swL2DhcpRelayMgmt, swL2FloodMACAutoFDBVID=swL2FloodMACAutoFDBVID, swL2CosTosPRIIndex=swL2CosTosPRIIndex, swL2LoopDetectPortEntry=swL2LoopDetectPortEntry, swL2McastFilterPortEntry=swL2McastFilterPortEntry, swL2FloodMACAutoFDBMacAddress=swL2FloodMACAutoFDBMacAddress, swL2McastFilterPortProfileID=swL2McastFilterPortProfileID, swL2DhcpRelayTimeThreshold=swL2DhcpRelayTimeThreshold, swL2LoopDetectInterval=swL2LoopDetectInterval, swL2MgmtMIB=swL2MgmtMIB, swl2NotificationBindings=swl2NotificationBindings, swL2TrafficSegMgmt=swL2TrafficSegMgmt, swL2PortErrDescription=swL2PortErrDescription, swL2FloodMACAutoFDBIPAddress=swL2FloodMACAutoFDBIPAddress, swL2IGMPv3Forwarding=swL2IGMPv3Forwarding, swL2CosMgmt=swL2CosMgmt, swL2McastFilterProfileName=swL2McastFilterProfileName, swL2IGMPMulticastVlanGroupVid=swL2IGMPMulticastVlanGroupVid, swL2IGMPRouterPortDynamicPortList=swL2IGMPRouterPortDynamicPortList, swL2CosMacBasePRIIndex=swL2CosMacBasePRIIndex, swL2IGMPRouterPortVlanName=swL2IGMPRouterPortVlanName, swL2PortSecurityMode=swL2PortSecurityMode, swL2IGMPCtrlTable=swL2IGMPCtrlTable, swL2IGMPCtrlEntry=swL2IGMPCtrlEntry, swL2IGMPVid=swL2IGMPVid, swL2CosDscpPRIClass=swL2CosDscpPRIClass, swL2DevAlarmLinkChange=swL2DevAlarmLinkChange, swL2IGMPMaxIpGroupNumPerVlan=swL2IGMPMaxIpGroupNumPerVlan, swPortTrunkType=swPortTrunkType, swL2FloodMACInfo=swL2FloodMACInfo, swL2QOS8021pUserPriorityIndex=swL2QOS8021pUserPriorityIndex, swL2TrafficSegForwardPorts=swL2TrafficSegForwardPorts, swL2McastFilterAddOrDelState=swL2McastFilterAddOrDelState, swL2PortSecurityDelVlanName=swL2PortSecurityDelVlanName, swL2DevCtrlTelnet=swL2DevCtrlTelnet, swL2DhcpRelayCtrlInterfaceName=swL2DhcpRelayCtrlInterfaceName, swL2DhcpLocalRelayVLANEntry=swL2DhcpLocalRelayVLANEntry, swL2CosTosPRITable=swL2CosTosPRITable, VlanId=VlanId, swL2DevCtrlManagementVlanId=swL2DevCtrlManagementVlanId, swL2QOSSchedulingTable=swL2QOSSchedulingTable, swL2VlanAdvertisementEntry=swL2VlanAdvertisementEntry, swL2PortCtrlEntry=swL2PortCtrlEntry, swL2IGMPQueryInfoEntry=swL2IGMPQueryInfoEntry, swPortTrunkFloodingPort=swPortTrunkFloodingPort, swL2IGMPMulticastVlanName=swL2IGMPMulticastVlanName, swL2IGMPMulticastVlanRowStatus=swL2IGMPMulticastVlanRowStatus, swL2DevCtrlWeb=swL2DevCtrlWeb, swL2DhcpRelayOption82Policy=swL2DhcpRelayOption82Policy, swL2FloodMACMgmt=swL2FloodMACMgmt, swL2DevAlarmNewRoot=swL2DevAlarmNewRoot, swL2IGMPQueryInfoTable=swL2IGMPQueryInfoTable, swL2IGMPRouterPortEntry=swL2IGMPRouterPortEntry, swL2QOSSchedulingMaxWeight=swL2QOSSchedulingMaxWeight, swL2LoopDetectTrapMode=swL2LoopDetectTrapMode, swL2DhcpRelayOption82State=swL2DhcpRelayOption82State, swL2FloodMACTrapState=swL2FloodMACTrapState, swL2MACNotifyInterval=swL2MACNotifyInterval, swL2IGMPPortMap=swL2IGMPPortMap, swL2VlanMultiplyPortListAction=swL2VlanMultiplyPortListAction, swL2CosMacBasePRIEntry=swL2CosMacBasePRIEntry, swL2QOS8021pUserPriorityClass=swL2QOS8021pUserPriorityClass, swL2DhcpLocalRelayVLANID=swL2DhcpLocalRelayVLANID, swL2LoopDetectRecoverTime=swL2LoopDetectRecoverTime, swL2McastFilterPortGroupPortIndex=swL2McastFilterPortGroupPortIndex, swL2FloodMACFDBIndex=swL2FloodMACFDBIndex, swL2IGMPQueryState=swL2IGMPQueryState, swL2LoopDetectPortTable=swL2LoopDetectPortTable, swL2McastFilterStatus=swL2McastFilterStatus, swPortTrunkActivePort=swPortTrunkActivePort, swL2LoopDetectAdminState=swL2LoopDetectAdminState, swL2QOSMgmt=swL2QOSMgmt, swL2MultiFilter=swL2MultiFilter, swL2TrunkVLANEntry=swL2TrunkVLANEntry, swL2DevCtrlSnmpEnableAuthenTraps=swL2DevCtrlSnmpEnableAuthenTraps, swL2IGMPMulticastVlanEntry=swL2IGMPMulticastVlanEntry, swL2IGMPSnoopingClearDynIpMultAction=swL2IGMPSnoopingClearDynIpMultAction, swIGMPSnoopingGroupSourceAddr=swIGMPSnoopingGroupSourceAddr, swL2IGMPMulticastVlanSourcePort=swL2IGMPMulticastVlanSourcePort, swL2McastFilterPortInfoEntry=swL2McastFilterPortInfoEntry, swL2IGMPSnoopingClearDynIpMult=swL2IGMPSnoopingClearDynIpMult, swL2FloodMACState=swL2FloodMACState, swL2IGMPAccessAuthPort=swL2IGMPAccessAuthPort, swL2DevCtrlCleanAllStatisticCounter=swL2DevCtrlCleanAllStatisticCounter, swL2MgmtMIBTrapPrefix=swL2MgmtMIBTrapPrefix, swPortTrunkPortList=swPortTrunkPortList, swL2QOSSchedulingClassIndex=swL2QOSSchedulingClassIndex, swPortTrunkIndex=swPortTrunkIndex, swL2DevCtrlTelnetState=swL2DevCtrlTelnetState, swL2PortErrPortStatus=swL2PortErrPortStatus, swL2IGMPGroupIpAddr=swL2IGMPGroupIpAddr, swL2FloodMACFDBMacAddress=swL2FloodMACFDBMacAddress, swL2PortSecurityPortIndex=swL2PortSecurityPortIndex, swL2PortSecurityDelMacAddress=swL2PortSecurityDelMacAddress, swL2IGMPMulticastVlanGroupTable=swL2IGMPMulticastVlanGroupTable, swL2TrunkVLANTable=swL2TrunkVLANTable, swL2QOSBandwidthPortIndex=swL2QOSBandwidthPortIndex, swL2PortCtrlMDIXState=swL2PortCtrlMDIXState, swL2PortSecurityControlTable=swL2PortSecurityControlTable, swL2PortErrPortReason=swL2PortErrPortReason, swL2DhcpLocalRelayState=swL2DhcpLocalRelayState, swL2PortErrEntry=swL2PortErrEntry, swL2LoopDetectPortIndex=swL2LoopDetectPortIndex, swL2PortInfoTable=swL2PortInfoTable, PYSNMP_MODULE_ID=swL2MgmtMIB, swL2DhcpRelayState=swL2DhcpRelayState, swL2PortCtrlMulticastfilter=swL2PortCtrlMulticastfilter, swIGMPSnoopingGroupRouterPorts=swIGMPSnoopingGroupRouterPorts, swL2IGMPMulticastVlanGroupStatus=swL2IGMPMulticastVlanGroupStatus, swL2IGMPv3SourceIPAddr=swL2IGMPv3SourceIPAddr, swL2TrunkLACPPortEntry=swL2TrunkLACPPortEntry, swL2CosPriorityEtherPRI=swL2CosPriorityEtherPRI, swL2CosPriorityTable=swL2CosPriorityTable, swL2IGMPMulticastVlanReplaceSourceIp=swL2IGMPMulticastVlanReplaceSourceIp, swL2PortSecurityDelCtrl=swL2PortSecurityDelCtrl, swL2IGMPRouterPortForbiddenPortList=swL2IGMPRouterPortForbiddenPortList, swL2DevAlarm=swL2DevAlarm, swL2macNotification=swL2macNotification, swL2FloodMAC=swL2FloodMAC, swL2DevCtrlWebState=swL2DevCtrlWebState, swL2DevInfo=swL2DevInfo, swL2PortCtrlFlowCtrlState=swL2PortCtrlFlowCtrlState, swL2IGMPLeaveTimer=swL2IGMPLeaveTimer, swL2DevCtrlIGMPSnooping=swL2DevCtrlIGMPSnooping, swL2DhcpLocalRelayMgmt=swL2DhcpLocalRelayMgmt, swL2TrunkLACPPortIndex=swL2TrunkLACPPortIndex, swL2IGMPAccessAuthTable=swL2IGMPAccessAuthTable, swL2IGMPv3ExpireTimer=swL2IGMPv3ExpireTimer, swL2IGMPInfoTable=swL2IGMPInfoTable, swL2FloodMacDetectedTrap=swL2FloodMacDetectedTrap, swL2PortErrTable=swL2PortErrTable, swL2IGMPCurrentState=swL2IGMPCurrentState, swL2DevCtrlDefaultGateway=swL2DevCtrlDefaultGateway, swL2VlanMultiplyAdvertisement=swL2VlanMultiplyAdvertisement, swL2McastFilterPortInfoTable=swL2McastFilterPortInfoTable, swL2VlanMultiplyVlanList=swL2VlanMultiplyVlanList, swIGMPSnoopingGroupVid=swIGMPSnoopingGroupVid, swIGMPSnoopingGroupExcludePortMap=swIGMPSnoopingGroupExcludePortMap, swL2McastFilterPortInfoProfileName=swL2McastFilterPortInfoProfileName, swL2IGMPSnoopingMulticastVlanState=swL2IGMPSnoopingMulticastVlanState, swL2MACNotifyState=swL2MACNotifyState, swIGMPSnoopingGroupIncludePortMap=swIGMPSnoopingGroupIncludePortMap, swL2QOS8021pUserPriorityEntry=swL2QOS8021pUserPriorityEntry, swL2CosTosPRIEntry=swL2CosTosPRIEntry, swL2DevCtrlLLDPState=swL2DevCtrlLLDPState, swL2PortCtrlDescription=swL2PortCtrlDescription, swL2PortCtrlAddressLearning=swL2PortCtrlAddressLearning, swL2IGMPMulticastVlanState=swL2IGMPMulticastVlanState, swL2VlanAdvertiseState=swL2VlanAdvertiseState, swL2QOS8021pDefaultPriority=swL2QOS8021pDefaultPriority, swL2IGMPMulticastVlanTable=swL2IGMPMulticastVlanTable, swL2VlanIndex=swL2VlanIndex, swIGMPSnoopingGroupEntry=swIGMPSnoopingGroupEntry, swL2DhcpLocalRelayVLANState=swL2DhcpLocalRelayVLANState, swL2VlanMultiplyAction=swL2VlanMultiplyAction, swL2IGMPDynIPMultVlanState=swL2IGMPDynIPMultVlanState, swL2QOS8021pDefaultPriorityIndex=swL2QOS8021pDefaultPriorityIndex, swL2QOSSchedulingEntry=swL2QOSSchedulingEntry, swL2McastFilterPortInfoPortIndex=swL2McastFilterPortInfoPortIndex, swL2IGMPAccessAuthEntry=swL2IGMPAccessAuthEntry, swL2LoopDetectPortMgmt=swL2LoopDetectPortMgmt, swL2PortSecurityViolationTrap=swL2PortSecurityViolationTrap, swPortTrunkMaxPortMembers=swPortTrunkMaxPortMembers, swL2QOSBandwidthControlEntry=swL2QOSBandwidthControlEntry, swL2TrunkVLANState=swL2TrunkVLANState, swL2FloodMACAutoFDBRowStatus=swL2FloodMACAutoFDBRowStatus, swL2TrunkMgmt=swL2TrunkMgmt, swL2IGMPFastLeave=swL2IGMPFastLeave, swL2IGMPMacAddr=swL2IGMPMacAddr, swL2FloodMACFDBTable=swL2FloodMACFDBTable, swPortTrunkMasterPort=swPortTrunkMasterPort, PortList=PortList, swL2DevCtrl=swL2DevCtrl, swL2PortErrPortState=swL2PortErrPortState, MacAddress=MacAddress, swL2QOSBandwidthRxRate=swL2QOSBandwidthRxRate, swL2DevCtrlIpAutoConfig=swL2DevCtrlIpAutoConfig, swL2QOSSchedulingMechanismCtrl=swL2QOSSchedulingMechanismCtrl, swL2QOS8021pDefaultPriorityEntry=swL2QOS8021pDefaultPriorityEntry, swL2IGMPv3Table=swL2IGMPv3Table, swL2IGMPv3Entry=swL2IGMPv3Entry, swL2LoopDetectPortState=swL2LoopDetectPortState, swPortMirrorState=swPortMirrorState, swL2QOSBandwidthTxRate=swL2QOSBandwidthTxRate, swL2DevAlarmTopologyChange=swL2DevAlarmTopologyChange, swL2IGMPInfoTxQueryCount=swL2IGMPInfoTxQueryCount, swL2CosPortPRIClass=swL2CosPortPRIClass, swL2VlanName=swL2VlanName, swL2PortSecurityDelPort=swL2PortSecurityDelPort, swIGMPSnoopingGroupTable=swIGMPSnoopingGroupTable, swL2IGMPMaxSupportedVlans=swL2IGMPMaxSupportedVlans, swL2IGMPCtrlVid=swL2IGMPCtrlVid, swL2IGMPInfoEntry=swL2IGMPInfoEntry, swL2FloodMACAutoFDBCtrlTable=swL2FloodMACAutoFDBCtrlTable, swL2DevMgmt=swL2DevMgmt, swL2MACNotifyHistorySize=swL2MACNotifyHistorySize, swL2IGMPDynIpMultMgmt=swL2IGMPDynIpMultMgmt, swL2LoopDetectMgmt=swL2LoopDetectMgmt, swL2DevInfoFrontPanelLedStatus=swL2DevInfoFrontPanelLedStatus, swL2LoopDetectCtrl=swL2LoopDetectCtrl, swL2TrunkAlgorithm=swL2TrunkAlgorithm, swL2PortCtrlAdminState=swL2PortCtrlAdminState, swL2DevCtrlSystemIP=swL2DevCtrlSystemIP, swL2CosPortPRIEntry=swL2CosPortPRIEntry, swL2PortInfoEntry=swL2PortInfoEntry, swL2PortCtrlPortIndex=swL2PortCtrlPortIndex, swL2QOS8021pRadiusPriority=swL2QOS8021pRadiusPriority, swL2IGMPSnoopingClearDynIpMultIP=swL2IGMPSnoopingClearDynIpMultIP, swL2FloodMACAutoFDBPort=swL2FloodMACAutoFDBPort, swL2FloodMACFDBVID=swL2FloodMACFDBVID, swL2DevCtrlAsymVlanState=swL2DevCtrlAsymVlanState)
mibBuilder.exportSymbols("DES3028P-L2MGMT-MIB", swL2PortErrPortIndex=swL2PortErrPortIndex, swL2QOSBandwidthRadiusRxRate=swL2QOSBandwidthRadiusRxRate, swL2McastFilterPortMaxGroupTable=swL2McastFilterPortMaxGroupTable, swL2PortSecurityMaxLernAddr=swL2PortSecurityMaxLernAddr, swL2IGMPMulticastVlanGroupEntry=swL2IGMPMulticastVlanGroupEntry, swL2VlanMultiplyPortList=swL2VlanMultiplyPortList, swL2PortLoopOccurred=swL2PortLoopOccurred, swL2McastFilterGroupList=swL2McastFilterGroupList, swL2DhcpRelayCtrlEntry=swL2DhcpRelayCtrlEntry, swIGMPSnoopingGroupReportCount=swIGMPSnoopingGroupReportCount, swL2CosPriorityPortPRI=swL2CosPriorityPortPRI, swL2DevCtrlSystemReboot=swL2DevCtrlSystemReboot, swL2McastFilterPortMaxGroupPortIndex=swL2McastFilterPortMaxGroupPortIndex, swIGMPPackage=swIGMPPackage, swL2QOS8021pDefaultPriorityTable=swL2QOS8021pDefaultPriorityTable, swL2FloodMACLogState=swL2FloodMACLogState, swL2IGMPInfoVid=swL2IGMPInfoVid, swL2PortCtrlTable=swL2PortCtrlTable, swL2CosMacBasePRIClass=swL2CosMacBasePRIClass, swL2LoopDetectPortLoopStatus=swL2LoopDetectPortLoopStatus, swL2macNotifyInfo=swL2macNotifyInfo, swL2MgmtMIBTraps=swL2MgmtMIBTraps, swL2FloodMACFDBTimestamp=swL2FloodMACFDBTimestamp, swL2TrunkVLANPort=swL2TrunkVLANPort, swL2PortSecurityTrapLogState=swL2PortSecurityTrapLogState, swL2IGMPRobustness=swL2IGMPRobustness, swL2IGMPCtrlState=swL2IGMPCtrlState, swL2TrafficSegTable=swL2TrafficSegTable, swPortMirrorTxPortList=swPortMirrorTxPortList, swL2IGMPMulticastVlanMemberPort=swL2IGMPMulticastVlanMemberPort, swL2DevCtrlRmonState=swL2DevCtrlRmonState, swPortMirrorRxPortList=swPortMirrorRxPortList, swL2IGMPHostTimeout=swL2IGMPHostTimeout, swL2CosPriorityPort=swL2CosPriorityPort, swL2IGMPMulticastVlanRemoveAllMcastAddrListAction=swL2IGMPMulticastVlanRemoveAllMcastAddrListAction, swL2DhcpRelayHopCount=swL2DhcpRelayHopCount, swL2IGMPMaxResponseTime=swL2IGMPMaxResponseTime, swL2CosPortPRITable=swL2CosPortPRITable, swL2DhcpRelayOption82Check=swL2DhcpRelayOption82Check, swL2CosPortPRIIndex=swL2CosPortPRIIndex, swL2IGMPMulticastVlanTagMemberPort=swL2IGMPMulticastVlanTagMemberPort, swL2CosMacBasePRITable=swL2CosMacBasePRITable, swL2PortLoopRestart=swL2PortLoopRestart, swL2VlanMultiplyMgmt=swL2VlanMultiplyMgmt, swL2FloodMACAutoFDBEntry=swL2FloodMACAutoFDBEntry, swL2PortCtrlNwayState=swL2PortCtrlNwayState, swL2McastFilterEntry=swL2McastFilterEntry, swL2PortSecurityControlEntry=swL2PortSecurityControlEntry, swL2DhcpRelayCtrlTable=swL2DhcpRelayCtrlTable, swL2PortSecurityMgmt=swL2PortSecurityMgmt, swL2IGMPInfoQueryCount=swL2IGMPInfoQueryCount, swL2IGMPMulticastVlanUntagSourcePort=swL2IGMPMulticastVlanUntagSourcePort, swIGMPSnoopingGroupExpiryTime=swIGMPSnoopingGroupExpiryTime, swL2IGMPMulticastVlanReplacePriority=swL2IGMPMulticastVlanReplacePriority, swPortTrunkTable=swPortTrunkTable, swL2DevCtrlVLANTrunkState=swL2DevCtrlVLANTrunkState, swL2IGMPDynIPMultVlanAge=swL2IGMPDynIPMultVlanAge, swL2IGMPLastMemberQueryInterval=swL2IGMPLastMemberQueryInterval, swPortTrunkState=swPortTrunkState, swL2FloodMacDetectedMacVid=swL2FloodMacDetectedMacVid, swL2CosDscpPRIEntry=swL2CosDscpPRIEntry, swL2IGMPMulticastVlanGroupFromIp=swL2IGMPMulticastVlanGroupFromIp, swL2IGMPMulticastVlanRemapPriority=swL2IGMPMulticastVlanRemapPriority, swL2CosTosPRIClass=swL2CosTosPRIClass, swL2TrunkLACPPortTable=swL2TrunkLACPPortTable, swL2McastFilterPortMaxGroupEntry=swL2McastFilterPortMaxGroupEntry, swL2FloodMACFDBStatus=swL2FloodMACFDBStatus, swL2PortSecurityDelActivity=swL2PortSecurityDelActivity, swL2CosPriorityIpPRI=swL2CosPriorityIpPRI, swL2DhcpLocalRelayVLANTable=swL2DhcpLocalRelayVLANTable, swL2FloodMacDetectedMacAddress=swL2FloodMacDetectedMacAddress, swL2TrafficSegPort=swL2TrafficSegPort, swL2McastFilterPortMaxGroup=swL2McastFilterPortMaxGroup, swL2IGMPQueryInterval=swL2IGMPQueryInterval, swL2CosPriorityEntry=swL2CosPriorityEntry, swL2IGMPMulticastVlanid=swL2IGMPMulticastVlanid, swL2CosDscpPRIIndex=swL2CosDscpPRIIndex, swL2IGMPSnoopingClearDynIpMultVID=swL2IGMPSnoopingClearDynIpMultVID, swL2PortInfoLinkStatus=swL2PortInfoLinkStatus, swL2CosPriorityCtrl=swL2CosPriorityCtrl, swL2DevCtrlLLDPForwardMessageState=swL2DevCtrlLLDPForwardMessageState, swL2CosPriorityNone=swL2CosPriorityNone, swL2TrafficSegEntry=swL2TrafficSegEntry, swL2QOSBandwidthRadiusTxRate=swL2QOSBandwidthRadiusTxRate)
|
import logging
import os
import yaml
import sys
from ClusterShell import NodeSet
class Config:
POSSIBLE_ATTRS = ["ipmi_user", "ipmi_pass", "model", "snmp_oids", "ro_community" ]
def __init__(self, yamlConfigFilePath):
logging.basicConfig(stream=sys.stdout, level=logging.ERROR)
if os.path.isfile(yamlConfigFilePath):
config_file = open(yamlConfigFilePath, 'r')
conf = yaml.safe_load(config_file)
config_file.close()
else:
raise Exception("config.py: No config file found")
"""
we iterate ver item in endpoints list, it's like:
{'names': 'n[15-27]', 'managers': 'n[15-27]-ipmi', 'ipmi_user': 'root', 'ipmi_pass': 'NoWay', 'model': 'intel_v1'}
{'names': 'n3', 'managers': 'n3-ipmi', 'ipmi_user': 'root', 'ipmi_pass': 'NoWay', 'model': 'intel_v1'}
{'names': 'n[1,2,4-14]', 'managers': 'n[1,2,4-14]-ipmi', 'ipmi_user': 'root', 'ipmi_pass': 'NoWay', 'model': 'intel_v1'}
{'names': 'n[28]', 'managers': 'n[28]-ipmi', 'ipmi_user': 'ADMIN', 'ipmi_pass': 'NoWay', 'model': 'supermicro_v1'}
{'names': 'isw1', 'managers': 'isw1', 'model': 'snmp', 'snmp_oids': ['.1.3.6.1.2.1.99.1.1.1.4.602240030', '.1.3.6.1.2.1.99.1.1.1.4.601240030']}
"""
self.dataDict = {}
self.db_host = conf['influxdb']['db_host']
self.db_port = conf['influxdb']['db_port']
self.http_user = conf['influxdb']['http_user']
self.http_pass = conf['influxdb']['http_pass']
self.db = conf['influxdb']['db']
self.accountlog = conf['accounting']['logfile']
self.accountperiod = conf['accounting']['logperiod']
for endpointDef in conf['endpoints']:
namesList = NodeSet.expand(endpointDef['names'])
managersList = NodeSet.expand(endpointDef['managers'])
if len(namesList) != len(managersList):
raise Exception(f"Configuration error: Nodesets {endpointDef['names']} and {endpointDef['managers']} have different size.")
for name, manager in zip(namesList,managersList):
self.dataDict[name] = {}
self.dataDict[name]['manager'] = manager
for attr in self.POSSIBLE_ATTRS:
if attr in endpointDef:
self.dataDict[name][attr] = endpointDef[attr]
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
"""
A BitBucket Builds template for deploying an application revision to AWS CodeDeploy
narshiva@amazon.com
v1.0.0
"""
from __future__ import print_function
import os
import sys
from time import strftime, sleep
import boto3
from botocore.exceptions import ClientError
from botocore.client import Config
config = Config(
retries=dict(
max_attempts=10
)
)
VERSION_LABEL = strftime("%Y%m%d%H%M%S")
BUCKET_KEY = os.getenv('APPLICATION_NAME') + '/' + VERSION_LABEL + \
'-bitbucket_builds.zip'
def upload_to_s3(artifact):
"""
Uploads an artifact to Amazon S3
"""
try:
client = boto3.client('s3', config=config)
except ClientError as err:
print("Failed to create boto3 client.\n" + str(err))
return False
try:
client.put_object(
Body=open(artifact, 'rb'),
Bucket=os.getenv('S3_BUCKET'),
Key=BUCKET_KEY
)
except ClientError as err:
print("Failed to upload artifact to S3.\n" + str(err))
return False
except IOError as err:
print("Failed to access artifact.zip in this directory.\n" + str(err))
return False
return True
def deploy_new_revision():
"""
Deploy a new application revision to AWS CodeDeploy Deployment Group
"""
try:
client = boto3.client('codedeploy', config=config)
except ClientError as err:
print("Failed to create boto3 client.\n" + str(err))
return False
try:
response = client.create_deployment(
applicationName=str(os.getenv('APPLICATION_NAME')),
deploymentGroupName=str(os.getenv('DEPLOYMENT_GROUP_NAME')),
revision={
'revisionType': 'S3',
's3Location': {
'bucket': os.getenv('S3_BUCKET'),
'key': BUCKET_KEY,
'bundleType': 'zip'
}
},
deploymentConfigName=str(os.getenv('DEPLOYMENT_CONFIG')),
description='New deployment from BitBucket',
ignoreApplicationStopFailures=True
)
except ClientError as err:
print("Failed to deploy application revision.\n" + str(err))
return False
"""
Wait for deployment to complete
"""
while 1:
try:
deploymentResponse = client.get_deployment(
deploymentId=str(response['deploymentId'])
)
deploymentStatus=deploymentResponse['deploymentInfo']['status']
if deploymentStatus == 'Succeeded':
print ("Deployment Succeeded")
return True
elif (deploymentStatus == 'Failed') or (deploymentStatus == 'Stopped') :
print ("Deployment Failed")
return False
elif (deploymentStatus == 'InProgress') or (deploymentStatus == 'Queued') or (deploymentStatus == 'Created'):
continue
except ClientError as err:
print("Failed to deploy application revision.\n" + str(err))
return False
return True
def main():
if not upload_to_s3('/tmp/artifact.zip'):
sys.exit(1)
if not deploy_new_revision():
sys.exit(1)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#from collections import deque
from qt.QtCore import (
Qt,
QMetaObject,
QThread,
QObject,
Slot,
Q_ARG,
QMutex,
QMutexLocker
)
import heapq
import itertools
class Action(QObject):
def __init__(self, impl, finished=None, failed=None, main=False, priority=0):
super(Action, self).__init__()
self.impl = impl
self._finished = finished
self._failed = failed
self.main = main
self.priority = priority
@Slot()
def finished(self):
if self._finished:
self._finished()
self.deleteLater()
@Slot(object)
def failed(self, e):
if self._failed:
self._failed(e)
self.deleteLater()
@Slot()
def run(self):
try:
self.impl()
if self.main:
self.finished()
else:
QMetaObject.invokeMethod(
self,
'finished',
Qt.QueuedConnection
)
except Exception as e:
if self.main:
self.failed(e)
else:
QMetaObject.invokeMethod(
self,
'failed',
Qt.QueuedConnection,
Q_ARG(object, e)
)
class Make(QObject):
def __init__(self, impl, finished=None, failed=None, main=False, priority=0):
super(Make, self).__init__()
self.impl = impl
self._finished = finished
self._failed = failed
self.main = main
self.priority = priority
@Slot(object)
def finished(self, result):
if self._finished:
self._finished(result)
self.deleteLater()
@Slot(object)
def failed(self, e):
if self._failed:
self._failed(e)
self.deleteLater()
@Slot()
def run(self):
try:
result = self.impl()
if self.main:
self.finished(result)
else:
QMetaObject.invokeMethod(
self,
'finished',
Qt.QueuedConnection,
Q_ARG(object, result)
)
except Exception as e:
if self.main:
self.failed(e)
else:
QMetaObject.invokeMethod(
self,
'failed',
Qt.QueuedConnection,
Q_ARG(object, e)
)
class Job(QObject):
def __init__(self, impl):
super(Job, self).__init__()
self.impl = impl
@Slot()
def run(self):
self.impl.run()
self.deleteLater()
class Pool(object):
def __init__(self):
self.thread = QThread()
self.thread.start()
self.local = QObject()
self.remote = QObject()
self.remote.moveToThread(self.thread)
def start(self, job):
job.setParent(self.local)
job = Job(job)
job.moveToThread(self.thread)
job.setParent(self.remote)
QMetaObject.invokeMethod(
job,
'run',
Qt.QueuedConnection
)
def clear(self):
for job in self.local.children():
if hasattr(job, 'cancel'):
job.cancel()
class Heap(object):
REMOVED = '<removed-task>' # placeholder for a removed task
def __init__(self):
self.mutex = QMutex()
self.clear()
def push(self, task, priority=0):
'Add a new task or update the priority of an existing task'
with QMutexLocker(self.mutex):
if task in self.entry_finder:
self.remove(task)
count = next(self.counter)
entry = [priority, count, task]
self.entry_finder[task] = entry
heapq.heappush(self.pq, entry)
def remove(self, task):
'Mark an existing task as REMOVED. Raise KeyError if not found.'
with QMutexLocker(self.mutex):
entry = self.entry_finder.pop(task)
entry[-1] = self.REMOVED
def pop(self):
'Remove and return the lowest priority task. Raise KeyError if empty.'
with QMutexLocker(self.mutex):
while self.pq:
priority, count, task = heapq.heappop(self.pq)
if task is not self.REMOVED:
del self.entry_finder[task]
return task
raise KeyError('pop from an empty priority queue')
def clear(self):
self.pq = [] # list of entries arranged in a heap
self.entry_finder = {} # mapping of tasks to entries
self.counter = itertools.count() # unique sequence count
def __bool__(self):
return bool(self.pq)
class Worker(QObject):
def __init__(self):
super(Worker, self).__init__()
#self.q = deque()
self.q = Heap()
self.pool = Pool()
@Slot()
def deal(self):
try:
job = self.q.pop()
except:
pass
else:
if _getattr(job, 'main', False):
job.run()
else:
self.pool.start(job)
if self.q:
self.delay_deal()
def clear(self):
self.q.clear()
self.pool.clear()
def delay_deal(self):
QMetaObject.invokeMethod(
self,
'deal',
Qt.QueuedConnection
)
def do(self, **kargs):
"""Do some asyne job, maybe in main thread."""
if 'make' in kargs:
make = kargs['make']
catch = kargs.get('catch', lambda x: x)
job = Make(
make,
catch,
main=kargs.get('main', False),
priority=kargs.get('priority', 0)
)
elif 'action' in kargs:
action = kargs['action']
react = kargs.get('react', lambda: None)
job = Action(
action,
react,
main=kargs.get('main', False),
priority=kargs.get('priority', 0)
)
elif 'job' in kargs:
job = kargs['job']
_check_job(job)
else:
assert False, 'Wrong arguments.'
self.q.push(job, -_getattr(job, 'priority', 0))
self.delay_deal()
def _getattr(target, name, default):
return getattr(target, name) if hasattr(target, name) else default
def _check_job(job):
assert isinstance(job, QObject)
assert hasattr(job, 'run')
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import ast
import logging
import re
import sys
from difflib import unified_diff
from pants.base.address import BuildFileAddress, SyntheticAddress
logger = logging.getLogger(__name__)
class BuildTargetParseError(Exception): pass
class DependencySpec(object):
"""A representation of a single dependency spec, including comments around it.
This is a helper class to aid in deduplicating, sorting, forcing, and formatting
dependency specs in a BUILD target's dependencies section.
"""
def __init__(self, spec, comments_above=None, side_comment=None):
self.spec = spec
self.comments_above = comments_above or []
self.side_comment = side_comment
def comments_above_lines(self):
for line in self.comments_above:
line = line.strip()
if line:
yield '# {line}'.format(line=line)
else:
yield ''
def indented_lines(self, lines, indent=4):
indent_spaces = ' ' * indent
for line in lines:
line = line.strip()
if not line:
yield ''
else:
yield '{indent_spaces}{line}'.format(indent_spaces=indent_spaces, line=line)
def lines(self, indent=4):
spec_line = "'{0}',".format(self.spec)
if self.side_comment is not None:
spec_line = '{spec_line} # {comment}'.format(spec_line=spec_line,
comment=self.side_comment)
comments_above = list(self.comments_above_lines())
lines = comments_above + [spec_line]
return list(self.indented_lines(lines, indent))
def has_comment(self):
# If all of the comments above are whitespace, don't consider this forced,
# but keep the whitespace.
return bool(any(self.comments_above_lines()) or self.side_comment)
def __repr__(self):
return '\n'.join(self.lines())
class BuildFileManipulator(object):
"""A class to load, represent, and change the dependencies of a given target.
Use BuildFileManipulator.load(...) for construction, rather than constructing it directly.
"""
@classmethod
def load(cls, build_file, name, target_aliases):
"""A BuildFileManipulator factory class method.
Note that BuildFileManipulator requires a very strict formatting of target declaration.
In particular, it wants to see a newline after `target_type(`, `dependencies = [`, and
the last param to the target constructor before the trailing `)`. There are further
restrictions as well--see the comments below or check out the example targets in
the tests for this class.
:param build_file: A FilesystemBuildFile instance to operate on.
:param name: The name of the target (without the spec path or colon) to operate on.
:target aliases: The callables injected into the build file context that we should treat
as target declarations.
"""
with open(build_file.full_path, 'r') as f:
source = f.read()
source_lines = source.split('\n')
tree = ast.parse(source)
# Since we're not told what the last line of an expression is, we have
# to figure it out based on the start of the expression after it.
# The interval that we consider occupied by a given expression is
# [expr.lineno, next_expr.lineno). For the last expression in the
# file, its end is the number of lines in the file.
# Also note that lineno is 1-indexed, so we subtract 1 from everything.
intervals = [t.lineno - 1 for t in tree.body]
intervals.append(len(source_lines))
# Candidate target declarations
top_level_exprs = [t for t in tree.body if isinstance(t, ast.Expr)]
top_level_calls = [e.value for e in top_level_exprs if isinstance(e.value, ast.Call)]
# Just in case someone is tricky and assigns the result of a target
# declaration to a variable, though in general this is not useful
assigns = [t for t in tree.body if isinstance(t, ast.Assign)]
assigned_calls = [t.value for t in assigns if isinstance(t.value, ast.Call)]
# Final candidate declarations
calls = top_level_calls + assigned_calls
# Filter out calls that don't have a simple name as the function
# i.e. keep `foo()` but not `(some complex expr)()`
calls = [call for call in calls if isinstance(call.func, ast.Name)]
# Now actually get all of the calls to known aliases for targets
# TODO(pl): Log these
target_calls = [call for call in calls if call.func.id in target_aliases]
# We now have enough information to instantiate a BuildFileTarget for
# any one of these, but we're only interested in the one with name `name`
def name_from_call(call):
for keyword in call.keywords:
if keyword.arg == 'name':
if isinstance(keyword.value, ast.Str):
return keyword.value.s
else:
logger.warn('Saw a non-string-literal name argument to a target while '
'looking through {build_file}. Target type was {target_type}.'
'name value was {name_value}'
.format(build_file=build_file,
target_type=call.func.id,
name_value=keyword.value))
raise BuildTargetParseError('Could not find name parameter to target call'
'with target type {target_type}'
.format(target_type=call.func.id))
calls_by_name = dict((name_from_call(call), call) for call in target_calls)
if name not in calls_by_name:
raise BuildTargetParseError('Could not find target named {name} in {build_file}'
.format(name=name, build_file=build_file))
target_call = calls_by_name[name]
# lineno is 1-indexed
target_interval_index = intervals.index(target_call.lineno - 1)
target_start = intervals[target_interval_index]
target_end = intervals[target_interval_index + 1]
def is_whitespace(line):
return line.strip() == ''
def is_comment(line):
return line.strip().startswith('#')
def is_ignored_line(line):
return is_whitespace(line) or is_comment(line)
# Walk the end back so we don't have any trailing whitespace
while is_ignored_line(source_lines[target_end - 1]):
target_end -= 1
target_source_lines = source_lines[target_start:target_end]
# TODO(pl): This would be good logging
# print(astpp.dump(target_call))
# print("Target source lines")
# for line in target_source_lines:
# print(line)
if target_call.args:
raise BuildTargetParseError('Targets cannot be called with non-keyword args. Target was '
'{name} in {build_file}'
.format(name=name, build_file=build_file))
# TODO(pl): This should probably be an assertion. In order for us to have extracted
# this target_call by name, it must have had at least one kwarg (name)
if not target_call.keywords:
raise BuildTargetParseError('Targets cannot have no kwargs. Target type was '
'{target_type} in {build_file}'
.format(target_type=target_call.func.id, build_file=build_file))
if target_call.lineno == target_call.keywords[0].value.lineno:
raise BuildTargetParseError('Arguments to a target cannot be on the same line as the '
'target type. Target type was {target_type} in {build_file} '
'on line number {lineno}.'
.format(target_type=target_call.func.id,
build_file=build_file,
lineno=target_call.lineno))
for keyword in target_call.keywords:
kw_str = keyword.arg
kw_start_line = keyword.value.lineno
source_line = source_lines[kw_start_line - 1]
kwarg_line_re = re.compile(r'\s*?{kw_str}\s*?=\s*?\S'.format(kw_str=kw_str))
if not kwarg_line_re.match(source_line):
raise BuildTargetParseError('kwarg line is malformed. The value of a kwarg to a target '
'must start after the equals sign of the line with the key.'
'Build file was: {build_file}. Line number was: {lineno}'
.format(build_file=build_file, lineno=keyword.value.lineno))
# Same setup as for getting the target's interval
target_call_intervals = [t.value.lineno - target_call.lineno for t in target_call.keywords]
target_call_intervals.append(len(target_source_lines))
last_kwarg = target_call.keywords[-1]
last_interval_index = target_call_intervals.index(last_kwarg.value.lineno - target_call.lineno)
last_kwarg_start = target_call_intervals[last_interval_index]
last_kwarg_end = target_call_intervals[last_interval_index + 1]
last_kwarg_lines = target_source_lines[last_kwarg_start:last_kwarg_end]
if last_kwarg_lines[-1].strip() != ')':
raise BuildTargetParseError('All targets must end with a trailing ) on its own line. It '
"cannot go at the end of the last argument's line. Build file "
'was {build_file}. Target name was {target_name}. Line number '
'was {lineno}'
.format(build_file=build_file,
target_name=name,
lineno=last_kwarg_end + target_call.lineno))
# Now that we've double checked that we have the ) in the proper place,
# remove that line from the lines owned by the last kwarg
target_call_intervals[-1] -= 1
# TODO(pl): Also good logging
# for t in target_call.keywords:
# interval_index = target_call_intervals.index(t.value.lineno - target_call.lineno)
# print("interval_index:", interval_index)
# start = target_call_intervals[interval_index]
# end = target_call_intervals[interval_index + 1]
# print("interval: %s, %s" % (start, end))
# print("lines:")
# print('\n'.join(target_source_lines[start:end]))
# print('\n\n')
# print(target_call_intervals)
def get_dependencies_node(target_call):
for keyword in target_call.keywords:
if keyword.arg == 'dependencies':
return keyword.value
return None
dependencies_node = get_dependencies_node(target_call)
dependencies = []
if dependencies_node:
if not isinstance(dependencies_node, ast.List):
raise BuildTargetParseError('Found non-list dependencies argument on target {name} '
'in build file {build_file}. Argument had invalid type '
'{node_type}'
.format(name=name,
build_file=build_file,
node_type=type(dependencies_node)))
last_lineno = dependencies_node.lineno
for dep_node in dependencies_node.elts:
if not dep_node.lineno > last_lineno:
raise BuildTargetParseError('On line number {lineno} of build file {build_file}, found '
'dependencies declaration where the dependencies argument '
'and dependencies themselves were not all on separate lines.'
.format(lineno=dep_node.lineno, build_file=build_file))
# First, we peek up and grab any whitespace/comments above us
peek_lineno = dep_node.lineno - 1
comments_above = []
while peek_lineno > last_lineno:
peek_str = source_lines[peek_lineno - 1].strip()
if peek_str == '' or peek_str.startswith('#'):
comments_above.insert(0, peek_str.lstrip(' #'))
else:
spec = dependencies[-1].spec if dependencies else None
raise BuildTargetParseError('While parsing the dependencies of {target_name}, '
'encountered an unusual line while trying to extract '
'comments. This probably means that a dependency at '
'line {lineno} in {build_file} is missing a trailing '
'comma. The string in question was {spec}'
.format(target_name=name,
lineno=peek_lineno,
build_file=build_file,
spec=spec))
peek_lineno -= 1
# Done peeking for comments above us, now capture a possible inline side-comment
dep_str = source_lines[dep_node.lineno - 1]
dep_with_comments = dep_str.split('#', 1)
side_comment = None
if len(dep_with_comments) == 2:
side_comment = dep_with_comments[1].strip()
dep = DependencySpec(dep_node.s,
comments_above=comments_above,
side_comment=side_comment)
# TODO(pl): Logging here
dependencies.append(dep)
last_lineno = dep_node.lineno
deps_interval_index = target_call_intervals.index(dependencies_node.lineno -
target_call.lineno)
deps_start = target_call_intervals[deps_interval_index]
deps_end = target_call_intervals[deps_interval_index + 1]
# Finally, like we did for the target intervals above, we're going to roll back
# the end of the deps interval so we don't stomp on any comments after it.
while is_ignored_line(target_source_lines[deps_end - 1]):
deps_end -= 1
else:
# If there isn't already a place defined for dependencies, we use
# the line interval just before the trailing ) that ends the target
deps_start = -1
deps_end = -1
return cls(name=name,
build_file=build_file,
build_file_source_lines=source_lines,
target_source_lines=target_source_lines,
target_interval=(target_start, target_end),
dependencies=dependencies,
dependencies_interval=(deps_start, deps_end))
def __init__(self,
name,
build_file,
build_file_source_lines,
target_source_lines,
target_interval,
dependencies,
dependencies_interval):
"""See BuildFileManipulator.load() for how to construct one as a user."""
self.name = name
self.build_file = build_file
self.target_address = BuildFileAddress(build_file, name)
self._build_file_source_lines = build_file_source_lines
self._target_source_lines = target_source_lines
self._target_interval = target_interval
self._dependencies_interval = dependencies_interval
self._dependencies_by_address = {}
for dep in dependencies:
dep_address = SyntheticAddress.parse(dep.spec, relative_to=build_file.spec_path)
if dep_address in self._dependencies_by_address:
raise BuildTargetParseError('The address {dep_address} occurred multiple times in the '
'dependency specs for target {name} in {build_file}. '
.format(dep_address=dep_address.spec,
name=name,
build_file=build_file))
self._dependencies_by_address[dep_address] = dep
def add_dependency(self, address):
"""Add a dependency to this target. This will deduplicate existing dependencies."""
if address in self._dependencies_by_address:
if self._dependencies_by_address[address].has_comment():
logger.warn('BuildFileManipulator would have added {address} as a dependency of '
'{target_address}, but that dependency was already forced with a comment.'
.format(address=address.spec, target_address=self.target_address.spec))
return
spec = address.reference(referencing_path=self.build_file.spec_path)
self._dependencies_by_address[address] = DependencySpec(spec)
def clear_unforced_dependencies(self):
"""Remove all dependencies not forced by a comment.
This is useful when existing analysis can infer exactly what the correct dependencies should
be. Typical use is to call `clear_unforced_dependencies`, then call `add_dependency` for each
dependency inferred from analysis. The resulting dependency set should be the pruned set
of all dependencies, plus dependencies hand forced by a user comment.
"""
self._dependencies_by_address = dict(
(address, dep) for address, dep in self._dependencies_by_address.items()
if dep.has_comment()
)
def dependency_lines(self):
"""The formatted dependencies=[...] lines for this target.
If there are no dependencies, this returns an empty list.
"""
deps = sorted(self._dependencies_by_address.values(), key=lambda d: d.spec)
def dep_lines():
yield ' dependencies = ['
for dep in deps:
for line in dep.lines():
yield line
yield ' ],'
return list(dep_lines()) if deps else []
def target_lines(self):
"""The formatted target_type(...) lines for this target.
This is just a convenience method for extracting and re-injecting the changed
`dependency_lines` into the target text.
"""
target_lines = self._target_source_lines[:]
deps_begin, deps_end = self._dependencies_interval
target_lines[deps_begin:deps_end] = self.dependency_lines()
return target_lines
def build_file_lines(self):
"""Like `target_lines`, the entire BUILD file's lines after dependency manipulation."""
build_file_lines = self._build_file_source_lines[:]
target_begin, target_end = self._target_interval
build_file_lines[target_begin:target_end] = self.target_lines()
return build_file_lines
def diff_lines(self):
"""A diff between the original BUILD file and the resulting BUILD file."""
start_lines = self._build_file_source_lines[:]
end_lines = self.build_file_lines()
diff_generator = unified_diff(start_lines,
end_lines,
fromfile=self.build_file.relpath,
tofile=self.build_file.relpath,
lineterm='')
return list(diff_generator)
def write(self, dry_run=True):
"""Write out the changes made to the BUILD file, and print the diff to stderr.
:param dry_run: Don't actually write out the BUILD file, but do print the diff to stderr.
"""
start_lines = self._build_file_source_lines[:]
end_lines = self.build_file_lines()
diff_generator = unified_diff(start_lines,
end_lines,
fromfile=self.build_file.relpath,
tofile=self.build_file.relpath,
lineterm='')
if dry_run:
msg = 'DRY RUN, would have written this diff:'
else:
msg = 'REAL RUN, about to write the following diff:'
sys.stderr.write(msg + '\n')
sys.stderr.write('*' * 40 + '\n')
sys.stderr.write('target at: ')
sys.stderr.write(str(self.target_address) + '\n')
for line in diff_generator:
sys.stderr.write(line + '\n')
sys.stderr.write('*' * 40 + '\n')
if not dry_run:
with open(self.build_file.full_path, 'w') as bf:
bf.write('\n'.join(end_lines))
sys.stderr.write('WROTE to {full_path}\n'.format(full_path=self.build_file.full_path))
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
This module is used to perform any operations on nested structures, which can be
specified as sequences that contain non-sequence elements or other sequences.
The utilities here assume (and do not check) that the nested structures form a
'tree', i.e. no references in the structure of the input of these functions
should be recursive.
@@assert_same_structure
@@is_sequence
@@flatten
@@flatten_dict_items
@@pack_sequence_as
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, or a `namedtuple` class.
args: elements to be converted to a sequence.
Returns:
`args` with the type of `instance`.
"""
if (isinstance(instance, tuple) and
hasattr(instance, "_fields") and
isinstance(instance._fields, collections.Sequence) and
all(isinstance(f, six.string_types) for f in instance._fields)):
# This is a namedtuple
return type(instance)(*args)
else:
# Not a namedtuple
return type(instance)(args)
def _yield_flat_nest(nest):
for n in nest:
if is_sequence(n):
for ni in _yield_flat_nest(n):
yield ni
else:
yield n
def is_sequence(seq):
"""Returns a true if its input is a collections.Sequence (except strings).
Args:
seq: an input sequence.
Returns:
True if the sequence is a not a string and is a collections.Sequence.
"""
return (isinstance(seq, collections.Sequence)
and not isinstance(seq, six.string_types))
def flatten(nest):
"""Returns a flat sequence from a given nested structure.
Args:
nest: an arbitrarily nested structure.
Returns:
The flattened version of the input.
Raises:
TypeError: If the input is not a sequence.
"""
if not is_sequence(nest):
raise TypeError("input must be a sequence, but received %s" % nest)
return _sequence_like(nest, list(_yield_flat_nest(nest)))
def _recursive_assert_same_structure(nest1, nest2):
is_sequence_nest1 = is_sequence(nest1)
if is_sequence_nest1 != is_sequence(nest2):
raise ValueError(
"The two structures don't have the same nested structure. "
"First structure: %s, second structure: %s." % (nest1, nest2))
if is_sequence_nest1:
type_nest1 = type(nest1)
type_nest2 = type(nest2)
if type_nest1 != type_nest2:
raise TypeError(
"The two structures don't have the same sequence type. First "
"structure has type %s, while second structure has type %s."
% (type_nest1, type_nest2))
for n1, n2 in zip(nest1, nest2):
_recursive_assert_same_structure(n1, n2)
def assert_same_structure(nest1, nest2):
"""Asserts that two structures are nested in the same way.
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures.
"""
len_nest1 = len(flatten(nest1)) if is_sequence(nest1) else 1
len_nest2 = len(flatten(nest2)) if is_sequence(nest2) else 1
if len_nest1 != len_nest2:
raise ValueError("The two structures don't have the same number of "
"elements. First structure: %s, second structure: %s."
% (nest1, nest2))
_recursive_assert_same_structure(nest1, nest2)
def flatten_dict_items(dictionary):
"""Returns a dictionary with flattened keys and values.
This function flattens the keys and values of a dictionary, which can be
arbitrarily nested structures, and returns the flattened version of such
structures:
```python
example_dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", "d"))}
result = {4: "a", 5: "b", 6: "c", 8: "d"}
flatten_dict_items(example_dictionary) == result
```
The input dictionary must satisfy two properties:
1. Its keys and values should have the same exact nested structure.
2. The set of all flattened keys of the dictionary must not contain repeated
keys.
Args:
dictionary: the dictionary to zip
Returns:
The zipped dictionary.
Raises:
TypeError: If the input is not a dictionary.
ValueError: If any key and value have not the same structure, or if keys are
not unique.
"""
if not isinstance(dictionary, dict):
raise TypeError("input must be a dictionary")
flat_dictionary = {}
for i, v in six.iteritems(dictionary):
if not is_sequence(i):
if i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique." % i)
flat_dictionary[i] = v
else:
flat_i = flatten(i)
flat_v = flatten(v)
if len(flat_i) != len(flat_v):
raise ValueError(
"Could not flatten dictionary. Key had %d elements, but value had "
"%d elements. Key: %s, value: %s."
% (len(flat_i), len(flat_v), flat_i, flat_v))
for new_i, new_v in zip(flat_i, flat_v):
if new_i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique."
% (new_i))
flat_dictionary[new_i] = new_v
return flat_dictionary
def _packed_nest_with_indices(structure, flat, index):
"""Helper function for pack_nest_as.
Args:
structure: Substructure (tuple of elements and/or tuples) to mimic
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
for s in structure:
if is_sequence(s):
new_index, child = _packed_nest_with_indices(s, flat, index)
packed.append(_sequence_like(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def pack_sequence_as(structure, flat_sequence):
"""Returns a given flattened sequence packed into a nest.
Args:
structure: tuple or list constructed of scalars and/or other tuples/lists.
flat_sequence: flat sequence to pack.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
TypeError: If structure or flat_sequence is not a tuple or list.
ValueError: If nest and structure have different element counts.
"""
if not is_sequence(structure):
raise TypeError("structure must be a sequence")
if not is_sequence(flat_sequence):
raise TypeError("flat_sequence must be a sequence")
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but flat_sequence "
"had %d elements. Structure: %s, flat_sequence: %s."
% (len(flat_structure), len(flat_sequence), structure, flat_sequence))
_, packed = _packed_nest_with_indices(structure, flat_sequence, 0)
return _sequence_like(structure, packed)
|
import unittest
from gaussian_system import System, time_matrix, wang_landau
from gaussian_system.thermodynamic_integration import generate_samples_mcmc
import numpy as np
from scipy.stats import multivariate_normal
class TestLikelihood(unittest.TestCase):
def test_trivial(self):
self.assertEqual("a", "a")
def test_corr_z(self):
system = System(0.1, 0.2, 0.3, 0.4)
n_dim = 100
t = time_matrix(n_dim, 0.1)
c_z = system.corr_z(t)
c_ss = system.corr_ss(t)
c_sx = system.corr_sx(t)
c_xs = system.corr_xs(t)
c_xx = system.corr_xx(t)
np.testing.assert_allclose(c_z[:n_dim, :n_dim], c_ss)
np.testing.assert_allclose(c_z[n_dim:, :n_dim], c_sx)
np.testing.assert_allclose(c_z[:n_dim, n_dim:], c_xs)
np.testing.assert_allclose(c_z[n_dim:, n_dim:], c_xx)
def test_log_likelihood(self):
system = System(0.1, 0.2, 0.3, 0.4)
x = np.random.random_sample((100, 100))
s = np.random.random_sample((100, 100))
t = time_matrix(100, 0.1)
result = system.log_likelihood(x, s, t)
c_ss = system.corr_ss(t)
c_sx = system.corr_sx(t)
c_xs = system.corr_xs(t)
c_xx = system.corr_xx(t)
regression_coef = c_sx @ np.linalg.inv(c_ss)
p_x_given_s_cov = c_xx - regression_coef @ c_xs
test = []
for x, s in zip(x, s):
likelihood_distr = multivariate_normal(
cov=p_x_given_s_cov, mean=regression_coef @ s
)
likelihood = likelihood_distr.logpdf(x)
test.append(likelihood)
for val1, val2 in zip(result, test):
self.assertAlmostEqual(val1, val2)
def test_mutual_information(self):
system = System(0.1, 0.2, 0.3, 0.4)
t = time_matrix(100, 0.1)
self.assertAlmostEqual(
system.mutual_information(t),
system.marginal_entropy(t) - system.conditional_entropy(t),
)
def test_distributions(self):
system = System(0.1, 0.2, 0.3, 0.4)
t = time_matrix(100, 0.1)
sample = np.random.random_sample((100, 200))
s = sample[:, :100]
x = sample[:, 100:]
# three mathematically identical ways to compute the joint logpdf
# log(P(s,x)) = log(P(x|s)) + log(P(s)) = log(P(s|x)) + log(P(x))
val1 = system.log_likelihood(x, s, t) + system.log_prior(s, t)
val2 = system.log_posterior(s, x, t) + system.log_marginal(x, t)
val3 = system.log_joint(s, x, t)
for s1, s2, s3 in zip(val1, val2, val3):
self.assertAlmostEqual(s1, s2)
self.assertAlmostEqual(s1, s3)
def test_wang_landau(self):
system = System(0.1, 0.2, 0.3, 0.4)
n_dim = 3
t = time_matrix(n_dim, 0.1)
joint = multivariate_normal(cov=system.corr_z(t))
sample = joint.rvs(1).reshape((2, n_dim))
signal = sample[0]
response = sample[1]
bins = np.linspace(0, 100, 10)
wang_landau(response, signal, system, t, 0.5, bins, 1, 0.8)
def test_thermodynamic_integration(self):
system = System(0.1, 0.2, 0.3, 0.4)
t = time_matrix(100, 0.1)
c_z = system.corr_z(t)
scale = 1
initial_conf = np.random.random_sample(200)
num_samples = 100
generator = generate_samples_mcmc(
initial_conf, c_z, scale, equilibrate=1000
)
for _ in zip(range(num_samples), generator):
pass
if __name__ == "__main__":
unittest.main()
|
from .graph_module import GraphModule
from .graph import Graph
from .node import Argument, Node, Target, map_arg, map_aggregate
from .proxy import Proxy
from .symbolic_trace import Tracer
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
class Interpreter:
"""
An Interpreter executes an FX graph Node-by-Node. This pattern
can be useful for many things, including writing code
transformations as well as analysis passes.
Methods in the Interpreter class can be overridden to customize
the behavior of execution. The map of overrideable methods
in terms of call hierarchy::
run()
+-- run_node
+-- placeholder()
+-- get_attr()
+-- call_function()
+-- call_method()
+-- call_module()
+-- output()
Example:
Suppose we want to swap all instances of ``torch.neg`` with
``torch.sigmoid`` and vice versa (including their ``Tensor``
method equivalents). We could subclass Interpreter like so::
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target,
args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target,
args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
torch.testing.assert_allclose(result, torch.neg(input).sigmoid())
Args:
module (GraphModule): The module to be executed
garbage_collect_values (bool): Whether to delete values after their last
use within the Module's execution. This ensures optimal memory usage during
execution. This can be disabled to, for example, examine all of the intermediate
values in the execution by looking at the ``Interpreter.env`` attribute.
"""
def __init__(self, module : GraphModule, garbage_collect_values : bool = True):
assert isinstance(module, GraphModule)
self.module = module
self.submodules = dict(self.module.named_modules())
self.env : Dict[Node, Any] = {}
self.garbage_collect_values = garbage_collect_values
if self.garbage_collect_values:
# Run through reverse nodes and record the first instance of a use
# of a given node. This represents the *last* use of the node in the
# execution order of the program, which we will use to free unused
# values
node_to_last_use : Dict[Node, Node] = {}
self.user_to_last_uses : Dict[Node, List[Node]] = {}
def register_last_uses(n : Node, user : Node):
if n not in node_to_last_use:
node_to_last_use[n] = user
self.user_to_last_uses.setdefault(user, []).append(n)
for node in reversed(self.module.graph.nodes):
map_arg(node.args, lambda n: register_last_uses(n, node))
map_arg(node.kwargs, lambda n: register_last_uses(n, node))
def run(self, *args, initial_env : Optional[Dict[Node, Any]] = None) -> Any:
"""
Run `module` via interpretation and return the result.
Args:
*args: The arguments to the Module to run, in positional order
initial_env (Optional[Dict[Node, Any]]): An optional starting environment for execution.
This is a dict mapping `Node` to any value. This can be used, for example, to
pre-populate results for certain `Nodes` so as to do only partial evaluation within
the interpreter.
Returns:
Any: The value returned from executing the Module
"""
self.env = initial_env if initial_env else {}
# Positional function args are consumed left-to-right by
# `placeholder` nodes. Use an iterator to keep track of
# position and extract those values.
self.args_iter : Iterator[Any] = iter(args)
for node in self.module.graph.nodes:
if node in self.env:
# Short circuit if we have this value. This could
# be used, for example, for partial evaluation
# where the caller has pre-populated `env` with
# values for a subset of the program.
continue
self.env[node] = self.run_node(node)
if self.garbage_collect_values:
for to_delete in self.user_to_last_uses.get(node, []):
del self.env[to_delete]
if node.op == 'output':
output_val = self.env[node]
return output_val
def run_node(self, n : Node) -> Any:
"""
Run a specific node ``n`` and return the result.
Calls into placeholder, get_attr, call_function,
call_method, call_module, or output depending
on ``node.op``
Args:
n (Node): The Node to execute
Returns:
Any: The result of executing ``n``
"""
args, kwargs = self.fetch_args_kwargs_from_env(n)
assert isinstance(args, tuple)
assert isinstance(kwargs, dict)
return getattr(self, n.op)(n.target, args, kwargs)
# Main Node running APIs
def placeholder(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``placeholder`` node. Note that this is stateful:
``Interpreter`` maintains an internal iterator over
arguments passed to ``run`` and this method returns
next() on that iterator.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Returns:
Any: The argument value that was retrieved.
"""
assert isinstance(target, str)
if target.startswith('*'):
# For a starred parameter e.g. `*args`, retrieve all
# remaining values from the args list.
return list(self.args_iter)
else:
return next(self.args_iter)
def get_attr(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``get_attr`` node. Will retrieve an attribute
value from the ``Module`` hierarchy of ``self.module``.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return:
Any: The value of the attribute that was retrieved
"""
assert isinstance(target, str)
return self.fetch_attr(target)
def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``call_function`` node and return the result.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return
Any: The value returned by the function invocation
"""
assert not isinstance(target, str)
# Execute the function and return the result
return target(*args, **kwargs)
def call_method(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``call_method`` node and return the result.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return
Any: The value returned by the method invocation
"""
# args[0] is the `self` object for this method call
self_obj, *args_tail = args
# Execute the method and return the result
assert isinstance(target, str)
return getattr(self_obj, target)(*args_tail, **kwargs)
def call_module(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``call_module`` node and return the result.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return
Any: The value returned by the module invocation
"""
# Retrieve executed args and kwargs values from the environment
# Execute the method and return the result
assert isinstance(target, str)
submod = self.fetch_attr(target)
return submod(*args, **kwargs)
def output(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute an ``output`` node. This really just retrieves
the value referenced by the ``output`` node and returns it.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return:
Any: The return value referenced by the output node
"""
return args[0]
# Helper methods
def fetch_attr(self, target : str):
"""
Fetch an attribute from the ``Module`` hierarchy of ``self.module``.
Args:
target (str): The fully-qualfiied name of the attribute to fetch
Return:
Any: The value of the attribute.
"""
target_atoms = target.split('.')
attr_itr = self.module
for i, atom in enumerate(target_atoms):
if not hasattr(attr_itr, atom):
raise RuntimeError(f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}")
attr_itr = getattr(attr_itr, atom)
return attr_itr
def fetch_args_kwargs_from_env(self, n : Node) -> Tuple[Tuple, Dict]:
"""
Fetch the concrete values of ``args`` and ``kwargs`` of node ``n``
from the current execution environment.
Args:
n (Node): The node for which ``args`` and ``kwargs`` should be fetched.
Return:
Tuple[Tuple, Dict]: ``args`` and ``kwargs`` with concrete values for ``n``.
"""
args = self.map_nodes_to_values(n.args, n)
assert isinstance(args, tuple)
kwargs = self.map_nodes_to_values(n.kwargs, n)
assert isinstance(kwargs, dict)
return args, kwargs
def map_nodes_to_values(self, args : Argument, n : Node) -> Argument:
"""
Recursively descend through ``args`` and look up the concrete value
for each ``Node`` in the current execution environment.
Args:
args (Argument): Data structure within which to look up concrete values
n (Node): Node to which ``args`` belongs. This is only used for error reporting.
"""
def load_arg(n_arg : Node) -> Any:
if n_arg not in self.env:
raise RuntimeError(f'Node {n} referenced nonexistent value {n_arg}! Run Graph.lint() '
f'to diagnose such issues')
return self.env[n_arg]
return map_arg(args, load_arg)
class Transformer(Interpreter):
"""
``Transformer`` is a special type of interpreter that produces a
new ``Module``. It exposes a ``transform()`` method that returns
the transformed ``Module``. ``Transformer`` does not require
arguments to run, as ``Interpreter`` does. ``Transformer`` works
entirely symbolically.
Example:
Suppose we want to swap all instances of ``torch.neg`` with
``torch.sigmoid`` and vice versa (including their ``Tensor``
method equivalents). We could subclass ``Transformer`` like so::
class NegSigmSwapXformer(Transformer):
def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
transformed : torch.nn.Module = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
torch.testing.assert_allclose(transformed(input), torch.neg(input).sigmoid())
Args:
module (GraphModule): The ``Module`` to be transformed.
"""
def __init__(self, module):
super().__init__(module)
self.new_graph = Graph()
class TransformerTracer(Tracer):
def __init__(self, graph: Graph):
super().__init__()
self.graph = graph
def is_leaf_module(self, _, __) -> bool:
return True
self.tracer = TransformerTracer(self.new_graph)
self.tracer.root = module
def placeholder(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Proxy:
"""
Execute a ``placeholder`` node. In ``Transformer``, this is
overridden to insert a new ``placeholder`` into the output
graph.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
"""
assert isinstance(target, str)
return Proxy(self.new_graph.placeholder(target), self.tracer)
def get_attr(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Proxy:
"""
Execute a ``get_attr`` node. In ``Transformer``, this is
overridden to insert a new ``get_attr`` node into the output
graph.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
"""
assert isinstance(target, str)
return Proxy(self.new_graph.get_attr(target), self.tracer)
def call_module(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
# Override so that the leaf module policy from `self.tracer` is respected.
assert isinstance(target, str)
submod = self.fetch_attr(target)
return self.tracer.call_module(submod, submod.forward, args, kwargs)
def transform(self) -> GraphModule:
"""
Transform ``self.module`` and return the transformed
``GraphModule``.
"""
result = super().run()
if result is not None:
def strip_proxy(a : Union[Argument, Proxy]) -> Any:
return a.node if isinstance(a, Proxy) else a
self.new_graph.output(map_aggregate(result, strip_proxy))
return GraphModule(self.module, self.new_graph)
|
import re
from absl import app
import jax
from jax._src.numpy.lax_numpy import argsort, interp, zeros_like
import jax.numpy as jnp
from jaxopt import implicit_diff
from jaxopt import linear_solve
from jaxopt import OptaxSolver, GradientDescent
from matplotlib.pyplot import vlines
import optax
from sklearn import datasets
from sklearn import model_selection
from sklearn import preprocessing
import matplotlib.pylab as plt
import numpy as np
import jax.scipy as jsp
import tqdm
import cvgutils.Viz as cvgviz
# import cvgutils.nn.jaxutils as jaxutils
from jax.experimental import stax
import cvgutils.Image as cvgim
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from flax import optim
from flax import linen as nn
import jax.numpy as jnp
import jax
from flax.training import train_state
import optax # The Optax gradient processing and optimization library
import numpy as np # Ordinary NumPy
from jax.tree_util import tree_flatten, tree_unflatten
from jax.experimental import host_callback as hcb
import pickle
gn_iters = 30
nhierarchies = 1
scale = 2**nhierarchies
dw = 3
key4 = jax.random.PRNGKey(45)
gt_image = cvgim.imread('~/Projects/cvgutils/tests/testimages/wood_texture.jpg')[:32,:64,:] *2
# gt_image = cvgim.resize(gt_image,scale=0.10) * 2
noise = jax.random.normal(key4,gt_image.shape) * 0.3
noisy_image = jnp.clip(gt_image + noise,0,1)
# noisy_image = jnp.zeros_like(gt_image)
# noisy_image = noisy_image.at[100,100,:].set(1)
init_inpt = jnp.zeros_like(gt_image)
# init_inpt = init_inpt.at[100,100,:].set(1)
im_gt = jnp.array(gt_image)
h,w = gt_image.shape[0],gt_image.shape[1]
data = [dw,h,w,noisy_image, im_gt]
class Conv3features(nn.Module):
def setup(self):
self.straight1 = nn.Conv(3,(3,3),strides=(1,1),use_bias=True)
self.straight2 = nn.Conv(3,(3,3),strides=(1,1),use_bias=True)
def __call__(self,x):
# return self.straight1(x)
# return nn.softplus(self.straight1(x))
l1 = nn.softplus(self.straight1(x))
return nn.softplus(self.straight2(l1))
@jax.jit
def stencil_residual(pp_image, hp_nn, data):
_, _, _, inpt,_ = data
"""Objective function."""
avg_weight = (1. / 2.) ** 0.5 * (1. / pp_image.reshape(-1).shape[0] ** 0.5)
r1 = pp_image - inpt
flag = False
if(flag):
dy = pp_image[1:,:,:] - pp_image[:-1,:,:]
dx = pp_image[:,1:,:] - pp_image[:,:-1,:]
else:
unet_out = Conv3features().apply({'params': hp_nn}, pp_image)
# dy = jnp.concatenate((dy1,dy2,dy3),axis=-1)
out = jnp.concatenate(( r1.reshape(-1), unet_out.reshape(-1)),axis=0)
return avg_weight * out
@jax.jit
def screen_poisson_objective(pp_image, hp_nn, data):
"""Objective function."""
return (stencil_residual(pp_image, hp_nn, data) ** 2).sum()
# @implicit_diff.custom_root(jax.grad(screen_poisson_objective))
def screen_poisson_solver_unrolled(init_image,hp_nn, data):
scale = 2**nhierarchies
#downsample x_0 k times
#for i in range(k)
#complete gn loop
#upsample x_i
x = init_image
x = jax.image.resize(x,(x.shape[0]//2**(nhierarchies-1),x.shape[1]//2**(nhierarchies-1),x.shape[2]),"trilinear")
for k in range(nhierarchies-1,-1,-1):
_, _, _, inpt,_ = data
inpt = jax.image.resize(inpt,(inpt.shape[0]//2**k,inpt.shape[1]//2**k,inpt.shape[2]),"trilinear")
f = lambda pp_image:stencil_residual(pp_image,hp_nn,[*data[:-2],inpt,data[-1]])
for _ in range(gn_iters):
def Ax(pp_image):
jtd = jax.jvp(f,(x,),(pp_image,))[1]
return jax.vjp(f,x)[1](jtd)[0]
def jtf(x):
return jax.vjp(f,x)[1](f(x))[0]
d = linear_solve.solve_cg(matvec=Ax,
b=-jtf(x),
init=x,
maxiter=100)
# hcb.id_print(((Ax(d) + jtf(x)) ** 2).mean(),name='cg optimality unrolled ')
x += d
if(k >0):
x = jax.image.resize(x,(x.shape[0]*2,x.shape[1]*2,x.shape[2]),"trilinear")
loss = lambda pp_image:screen_poisson_objective(pp_image,hp_nn,[*data[:-2],inpt,data[-1]])
optim_cond = jax.grad(loss)
print('optimality cond unrolled ', (optim_cond(x) ** 2).mean())
return x
@implicit_diff.custom_root(jax.grad(screen_poisson_objective))
def screen_poisson_solver_id(init_image,hp_nn,data):
scale = 2**nhierarchies
#downsample x_0 k times
#for i in range(k)
#complete gn loop
#upsample x_i
x = init_image
x = jax.image.resize(x,(x.shape[0]//2**(nhierarchies-1),x.shape[1]//2**(nhierarchies-1),x.shape[2]),"trilinear")
for k in range(nhierarchies-1,-1,-1):
_, _, _, inpt,_ = data
inpt = jax.image.resize(inpt,(inpt.shape[0]//2**k,inpt.shape[1]//2**k,inpt.shape[2]),"trilinear")
f = lambda pp_image:stencil_residual(pp_image,hp_nn,[*data[:-2],inpt,data[-1]])
loss = lambda pp_image:screen_poisson_objective(pp_image,hp_nn,[*data[:-2],inpt,data[-1]])
optim_cond = jax.grad(loss)
# for _ in range(gn_iters):
while((optim_cond(x) ** 2).mean() >= 1e-18):
def Ax(pp_image):
jtd = jax.jvp(f,(x,),(pp_image,))[1]
return jax.vjp(f,x)[1](jtd)[0]
def jtf(x):
return jax.vjp(f,x)[1](f(x))[0]
d = linear_solve.solve_cg(matvec=Ax,
b=-jtf(x),
init=x,
maxiter=100)
# hcb.id_print(((Ax(d) + jtf(x)) ** 2).mean(),name='cg optimality id ')
x += d
if(k >0):
x = jax.image.resize(x,(x.shape[0]*2,x.shape[1]*2,x.shape[2]),"trilinear")
print('optimality cond jax id ', (optim_cond(x) ** 2).mean())
return x
@implicit_diff.custom_root(jax.grad(screen_poisson_objective))
def screen_poisson_hierarchical_solver_id(init_image,hp_nn,data):
#downsample x_0 k times
#for i in range(k)
#complete gn loop
#upsample x_i
x = init_image
x = jax.image.resize(x,(x.shape[0]//2**(nhierarchies-1),x.shape[1]//2**(nhierarchies-1),x.shape[2]),"trilinear")
for k in range(nhierarchies-1,-1,-1):
_, _, _, inpt,_ = data
inpt = jax.image.resize(inpt,(inpt.shape[0]//2**k,inpt.shape[1]//2**k,inpt.shape[2]),"trilinear")
f = lambda pp_image:stencil_residual(pp_image,hp_nn,[*data[:-2],inpt,data[-1]])
loss = lambda pp_image:screen_poisson_objective(pp_image,hp_nn,[*data[:-2],inpt,data[-1]])
optim_cond = jax.grad(loss)
for _ in range(gn_iters):
def Ax(pp_image):
jtd = jax.jvp(f,(x,),(pp_image,))[1]
return jax.vjp(f,x)[1](jtd)[0]
def jtf(x):
return jax.vjp(f,x)[1](f(x))[0]
d = linear_solve.solve_cg(matvec=Ax,
b=-jtf(x),
init=x,
maxiter=100)
# hcb.id_print(((Ax(d) + jtf(x)) ** 2).mean(),name='cg optimality id ')
x += d
if(k >0):
x = jax.image.resize(x,(x.shape[0]*2,x.shape[1]*2,x.shape[2]),"trilinear")
print('optimality cond ', (optim_cond(x) ** 2).mean())
return x
# @jax.jit
def outer_objective_id(hp_nn, init_inner,data):
"""Validation loss."""
gt = data[-1]
f = lambda hp_nn: screen_poisson_solver_id(init_inner, hp_nn,data)
loss = lambda pp_image:screen_poisson_objective(pp_image,hp_nn,data)
# optim_cond = jax.grad(loss)
x = f(hp_nn)
# hcb.id_print((optim_cond(x) ** 2).mean(),name='df_t/d_x id ')
f_v = ((x - gt) ** 2).mean()
return f_v
# return x.sum()
#
# @jax.jit
def outer_objective_unrolled(hp_nn, init_inner,data):
"""Validation loss."""
# gt = data[-1]
f = lambda hp_nn: screen_poisson_solver_unrolled(init_inner, hp_nn,data)
loss = lambda pp_image:screen_poisson_objective(pp_image,hp_nn,data)
optim_cond = jax.grad(loss)
x = f(hp_nn)
hcb.id_print((optim_cond(x) ** 2).mean(),name='df_t/d_x unrolled ')
# f_v = ((x - gt) ** 2).mean()
return x.sum()
def implicit_diff(params):
hyper_param, prime_param = params
F = jax.grad(screen_poisson_objective)
sol = screen_poisson_solver_unrolled(prime_param,hyper_param,data)
F_v = lambda u: F(sol,u,data)
F_x = lambda u: F(u,hyper_param,data)
#x: n
#f:n
#l: m
#dfdl: nxm
#dxdl: nxm
#dfdx:nxn
def vmap_df_dx(u):
def df_dx(u):
#u: nxm
#jvp: nxn
dims = list(range(len(sol.shape)))
dimsp1 = list(range(1,1+len(sol.shape)))
batched = u.reshape(*sol.shape,-1).transpose(dims[-1]+1,*dims)
g_x = lambda x: jax.vjp(F_x,sol)[1](x)
return jax.vmap(g_x)(batched)[0].transpose(*dimsp1,0).reshape(*u.shape)
# return jvpfun
return jax.tree_map(lambda x: df_dx(x), u)
def jf_dlambda():
a = jax.jacfwd(F_v)(hyper_param)
return jax.tree_multimap(lambda x: -x, a)
a = jf_dlambda()
vmap_df_dx(a)
zero_map = jax.tree_multimap(lambda x: x*0, jf_dlambda())
d = linear_solve.solve_cg(matvec=vmap_df_dx,
b=jf_dlambda(),
init=zero_map,
maxiter=100)
return d
def fd(hyper_params, init_inner, data,delta):
f_unrolled = lambda hp_nn:screen_poisson_solver_unrolled (init_inner,hp_nn,data)
from jax.tree_util import tree_flatten, tree_unflatten
grad_flat, grad_tree = tree_flatten(hyper_params)
for i in tqdm.trange(len(grad_flat)):
value_flat, value_tree = tree_flatten(hyper_params)
shape = value_flat[i].shape
for j in tqdm.trange(value_flat[i].reshape(-1).shape[0]):
vff = value_flat.copy()
vfb = value_flat.copy()
# vff[i] = vff[i].reshape(-1).at[j].set(vff[i].reshape(-1)[j] + delta/2)
# vfb[i] = vfb[i].reshape(-1).at[j].set(vfb[i].reshape(-1)[j] - delta/2)
vff[i].reshape(-1)[j] += delta/2
vfb[i].reshape(-1)[j] -= delta/2
vff[i] = vff[i].reshape(*shape)
vfb[i] = vfb[i].reshape(*shape)
vff_tree = tree_unflatten(value_tree, vff)
vfb_tree = tree_unflatten(value_tree, vfb)
ff = f_unrolled(vff_tree)
fb = f_unrolled(vfb_tree)
# grad_flat[i] = grad_flat[i].reshape(-1).at[j].set((ff - fb) / delta)
grad_flat[i].reshape(-1)[j] = (ff - fb) / delta
grad_flat[i] = grad_flat[i].reshape(*shape)
grad_tree = tree_unflatten(grad_tree, grad_flat)
return grad_tree
# @jax.jit
def check_with_unrolled(params,data):
f_unrolled = lambda hp_nn:screen_poisson_solver_unrolled ( *params[1:],hp_nn,data)
f_id = lambda hp_nn:screen_poisson_hierarchical_solver_id( *params[1:],hp_nn,data)
# implicit_diff_val = implicit_diff(params)
# # fd_grad = fd(params[0], *params[1:], data,0.01)
grad_id = jax.jacobian(f_id)(params[0])
# grad_unrolled = jax.jacobian(f_unrolled)(params[0])
# # squared_diff_fd = jax.tree_multimap(lambda x, y: (x-y)**2, fd_grad,grad_unrolled)
# squared_diff_jaxid_unrolled = jax.tree_multimap(lambda x, y: (x-y)**2, grad_id,grad_unrolled)
# squared_diff_myid_unrolled = jax.tree_multimap(lambda x, y: (x-y)**2, implicit_diff_val,grad_unrolled)
# # fd_sum = [i.sum() for i in jax.tree_flatten(squared_diff_fd)[0]]
# jaxid_unrolled_sum = [i.sum() for i in jax.tree_flatten(squared_diff_jaxid_unrolled)[0]]
# myid_unrolled_sum = [i.sum() for i in jax.tree_flatten(squared_diff_myid_unrolled)[0]]
# # hcb.id_print(jnp.array(fd_sum).mean(),name='fd_diff')
# hcb.id_print(jnp.array(jaxid_unrolled_sum).mean(),name='jax_diff')
# hcb.id_print(jnp.array(myid_unrolled_sum).mean(),name='my_diff')
# # return jax_diff
def hyper_optimization():
# import pickle
# # # # pickle.dump( params , open( 'weights_1x1.pkl' , 'wb' ) )
# params = pickle.load( open( 'weights.pkl' , 'rb' ))
# check_with_unrolled([params, init_inpt],data)
delta = 0.001
cnn = Conv3features()
rng = jax.random.PRNGKey(1)
testim = jax.random.uniform(rng,[1, h, w, 3])
rng, init_rng = jax.random.split(rng)
params = cnn.init(init_rng, testim)['params']
rng = jax.random.PRNGKey(0)
import time
start_time = time.time()
f = lambda hp_nn:outer_objective_id(hp_nn, init_inpt,data)
end_time = time.time()
# logger.addScalar(end_time - start_time,'compile_time')
lr = 0.01
solver = OptaxSolver(fun=outer_objective_id, opt=optax.adam(lr), implicit_diff=True)
# optimality_func = lambda :jax.grad(screen_poisson_objective)
# solver.optimality_fun =
state = solver.init_state(params)
import pickle
# result, _ = solver.run(init_params = state)
# f_t = screen_poisson_solver
for i in tqdm.trange(10000):
# if (i%100==0):
# pickle.dump( params , open( 'weights.pkl' , 'wb' ) )
# weights = pickle.load( open( 'weights.pkl' , 'rb' ))
# check_with_unrolled([params, init_inpt],data)
# start_time = time.time()
params, state = solver.update(params, state,init_inner=init_inpt,data=data)
# solver.optimality_fun
# optimality_err = solver.l2_optimality_error(params)
# end_time = time.time()
# logger.addScalar(end_time - start_time,'update_time')
# params = optax.apply_updates(params, updates)
loss = f(params)
print('loss ',loss)
# logger.addScalar(loss,'loss_GD')
# if(i%10 == 0):
# output = f_t(init_inpt, params)
# imshow = jnp.concatenate((output,noisy_image,im_gt),axis=1)
# imshow = jnp.clip(imshow,0,1)
# logger.addImage(np.array(imshow).transpose(2,0,1),'Image')
# logger.takeStep()
# print('loss ', loss)
# params = jax.tree_multimap(lambda x,dfx: x - lr * dfx, params, grad)
hyper_optimization()
|
########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from mock_plugins.cloudify_agent import PluginInstaller
class ConsumerBackedPluginInstaller(PluginInstaller):
def install(self, plugin):
pass
|
from math import prod
import networkx as nx
def parse_data():
with open('2021/09/input.txt') as f:
data = f.read()
G = nx.grid_graph((100, 100))
for y, line in enumerate(data.splitlines()):
for x, height in enumerate(line):
G.add_node((x, y), height=int(height))
return G
def part_one(G):
return sum(
G.nodes[node]["height"] + 1
for node in G
if all(G.nodes[node]["height"] < G.nodes[neighbor]["height"] for neighbor in G[node])
)
def part_two(G):
G.remove_nodes_from([node for node in G if G.nodes[node]["height"] == 9])
return prod(sorted(len(basin) for basin in nx.connected_components(G))[-3:])
def main():
data = parse_data()
print(f'Day 09 Part 01: {part_one(data)}')
print(f'Day 09 Part 02: {part_two(data)}')
|
from sklearn import datasets
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(max_iter=5000,\
solver='lbfgs',\
multi_class='auto')
# The digits dataset
digits = datasets.load_digits()
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Split into train and test subsets (50% each)
X_train, X_test, y_train, y_test = train_test_split(
data, digits.target, test_size=0.5, shuffle=False)
# Learn the digits on the first half of the digits
classifier.fit(X_train, y_train)
# Test on second half of data
n = np.random.randint(int(n_samples/2),n_samples)
print('Predicted: ' + str(classifier.predict(digits.data[n:n+1])[0]))
# Show number
plt.imshow(digits.images[n], cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
from Cython.Build import cythonize
__version__ = '0.33'
setuptools.setup(
name="nexus-data-access",
version=__version__,
url="https://github.jpl.nasa.gov/thuang/nexus",
author="Team Nexus",
description="NEXUS API.",
long_description=open('README.md').read(),
packages=['nexustiles', 'nexustiles.model', 'nexustiles.dao'],
package_data={'nexustiles': ['config/datastores.ini']},
platforms='any',
setup_requires=['cython'],
install_requires=[
'cassandra-driver==3.5.0',
'pysolr==3.7.0',
'requests',
'nexusproto==1.0.0',
'shapely'
],
classifiers=[
'Development Status :: 1 - Pre-Alpha',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
],
ext_modules=cythonize(["**/*.pyx"]),
zip_safe=False
)
|
# -*- coding: utf-8 -*-
from odoo import api, fields, models, tools
class MrpRoutingWorkcenter(models.Model):
_inherit = 'mrp.routing.workcenter'
active = fields.Boolean('Active', default=True)
workcenter_id = fields.Many2one('mrp.workcenter', string='Work Center', required=False, check_company=True, ondelete='set null')
|
import csv
import logging
from django import forms
from django.conf import settings
import requests
from pyisemail import is_email
try:
import phonenumbers
except ImportError:
phonenumbers = None
logger = logging.getLogger(__name__)
class PublicBodyValidator:
def __init__(self, pbs):
self.pbs = pbs
def get_validation_results(self):
self.is_valid = True
for res in validate_publicbodies(self.pbs):
self.is_valid = False
yield res
def write_csv(self, stream):
writer = None
for result in self.get_validation_results():
if writer is None:
fieldnames = list(result.keys())
writer = csv.DictWriter(stream, fieldnames)
writer.writeheader()
writer.writerow(result)
def validate_publicbodies(queryset):
validators = [
(validate_email, "email"),
(validate_url, "url"),
]
if phonenumbers is not None:
validators.append(
(
validate_fax,
"fax",
)
)
for pb in queryset.iterator():
result = run_validators(validators, pb)
if not result["error"]:
continue
result["id"] = pb.id
result["name"] = pb.name
yield result
def run_validators(validators, pb):
results = {"error": False}
for validator, attr in validators:
val = getattr(pb, attr)
results[attr] = None
results[attr + "_error"] = False
if not val:
continue
try:
logger.debug("Validating %s of %s (%s)", attr, pb.name, pb.id)
ret = validator(val)
except forms.ValidationError as e:
results[attr] = "\n".join(e)
results[attr + "_error"] = True
results["error"] = True
else:
results[attr] = ret
return results
def validate_email(email):
diagnosis = is_email(email, diagnose=True, check_dns=True)
if diagnosis.diagnosis_type != "VALID":
raise forms.ValidationError(diagnosis.diagnosis_type, code=diagnosis.code)
def validate_url(url):
try:
response = requests.get(url, timeout=10)
except Exception as e:
raise forms.ValidationError(str(e))
if response.history:
return response.url
def validate_fax(fax):
try:
number = phonenumbers.parse(fax, settings.LANGUAGE_CODE.upper())
except phonenumbers.phonenumberutil.NumberParseException:
raise forms.ValidationError("Fax number cannot be parsed")
if not phonenumbers.is_possible_number(number):
raise forms.ValidationError("Impossible fax number")
if not phonenumbers.is_valid_number(number):
raise forms.ValidationError("Invalid fax number")
|
from pyramid.scaffolds import PyramidTemplate
class RestJsonTemplate(PyramidTemplate):
_template_dir = 'restjson_scaffold'
summary = 'Template to do REST/JSON services'
def pre(self, command, output_dir, vars):
vars["project_cc"] = self._to_camel_case(vars["project"])
return PyramidTemplate.pre(self, command, output_dir, vars)
def _to_camel_case(self, name):
pieces = name.split('-')
return ''.join([piece.capitalize() for piece in pieces])
|
from typing import List
from pydantic import BaseModel
class Migration(BaseModel):
"""
Migration
"""
issueNumber: str
status: str
class MigrationOutList(BaseModel):
migrations: List[Migration]
|
from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Treemap(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "treemap"
_valid_props = {
"branchvalues",
"count",
"customdata",
"customdatasrc",
"domain",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"insidetextfont",
"labels",
"labelssrc",
"legendrank",
"level",
"marker",
"maxdepth",
"meta",
"metasrc",
"name",
"opacity",
"outsidetextfont",
"parents",
"parentssrc",
"pathbar",
"root",
"sort",
"stream",
"text",
"textfont",
"textinfo",
"textposition",
"textsrc",
"texttemplate",
"texttemplatesrc",
"tiling",
"type",
"uid",
"uirevision",
"values",
"valuessrc",
"visible",
}
# branchvalues
# ------------
@property
def branchvalues(self):
"""
Determines how the items in `values` are summed. When set to
"total", items in `values` are taken to be value of all its
descendants. When set to "remainder", items in `values`
corresponding to the root and the branches sectors are taken to
be the extra part not part of the sum of the values at their
leaves.
The 'branchvalues' property is an enumeration that may be specified as:
- One of the following enumeration values:
['remainder', 'total']
Returns
-------
Any
"""
return self["branchvalues"]
@branchvalues.setter
def branchvalues(self, val):
self["branchvalues"] = val
# count
# -----
@property
def count(self):
"""
Determines default for `values` when it is not provided, by
inferring a 1 for each of the "leaves" and/or "branches",
otherwise 0.
The 'count' property is a flaglist and may be specified
as a string containing:
- Any combination of ['branches', 'leaves'] joined with '+' characters
(e.g. 'branches+leaves')
Returns
-------
Any
"""
return self["count"]
@count.setter
def count(self, val):
self["count"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# domain
# ------
@property
def domain(self):
"""
The 'domain' property is an instance of Domain
that may be specified as:
- An instance of :class:`plotly.graph_objs.treemap.Domain`
- A dict of string/value properties that will be passed
to the Domain constructor
Supported dict properties:
column
If there is a layout grid, use the domain for
this column in the grid for this treemap trace
.
row
If there is a layout grid, use the domain for
this row in the grid for this treemap trace .
x
Sets the horizontal domain of this treemap
trace (in plot fraction).
y
Sets the vertical domain of this treemap trace
(in plot fraction).
Returns
-------
plotly.graph_objs.treemap.Domain
"""
return self["domain"]
@domain.setter
def domain(self, val):
self["domain"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['label', 'text', 'value', 'name', 'current path', 'percent root', 'percent entry', 'percent parent'] joined with '+' characters
(e.g. 'label+text')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for hoverinfo
.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.treemap.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.treemap.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format#locale_format for details on the date formatting syntax.
The variables available in `hovertemplate` are the ones emitted
as event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified per-point
(the ones that are `arrayOk: true`) are available. variables
`currentPath`, `root`, `entry`, `percentRoot`, `percentEntry`
and `percentParent`. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Sets hover text elements associated with each sector. If a
single string, the same string appears for all data points. If
an array of string, the items are mapped in order of this
trace's sectors. To be seen, trace `hoverinfo` must contain a
"text" flag.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for hovertext
.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# insidetextfont
# --------------
@property
def insidetextfont(self):
"""
Sets the font used for `textinfo` lying inside the sector.
The 'insidetextfont' property is an instance of Insidetextfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.treemap.Insidetextfont`
- A dict of string/value properties that will be passed
to the Insidetextfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.treemap.Insidetextfont
"""
return self["insidetextfont"]
@insidetextfont.setter
def insidetextfont(self, val):
self["insidetextfont"] = val
# labels
# ------
@property
def labels(self):
"""
Sets the labels of each of the sectors.
The 'labels' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["labels"]
@labels.setter
def labels(self, val):
self["labels"] = val
# labelssrc
# ---------
@property
def labelssrc(self):
"""
Sets the source reference on Chart Studio Cloud for labels .
The 'labelssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["labelssrc"]
@labelssrc.setter
def labelssrc(self, val):
self["labelssrc"] = val
# legendrank
# ----------
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
`*reversed* `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
# level
# -----
@property
def level(self):
"""
Sets the level from which this trace hierarchy is rendered. Set
`level` to `''` to start from the root node in the hierarchy.
Must be an "id" if `ids` is filled in, otherwise plotly
attempts to find a matching item in `labels`.
The 'level' property accepts values of any type
Returns
-------
Any
"""
return self["level"]
@level.setter
def level(self, val):
self["level"] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.treemap.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if colorsis set to a numerical
array. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette
will be chosen according to whether numbers in
the `color` array are all positive, all
negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
colors) or the bounds set in `marker.cmin` and
`marker.cmax` Has an effect only if colorsis
set to a numerical array. Defaults to `false`
when `marker.cmin` and `marker.cmax` are set by
the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if colorsis set to a numerical
array. Value should have the same units as
colors and if set, `marker.cmin` must be set as
well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if colorsis set to a numerical array.
Value should have the same units as colors. Has
no effect when `marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if colorsis set to a numerical
array. Value should have the same units as
colors and if set, `marker.cmax` must be set as
well.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.treemap.marker.Col
orBar` instance or dict with compatible
properties
colors
Sets the color of each sector of this trace. If
not specified, the default trace color set is
used to pick the sector colors.
colorscale
Sets the colorscale. Has an effect only if
colorsis set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorssrc
Sets the source reference on Chart Studio Cloud
for colors .
depthfade
Determines if the sector colors are faded
towards the background from the leaves up to
the headers. This option is unavailable when a
`colorscale` is present, defaults to false when
`marker.colors` is set, but otherwise defaults
to true. When set to "reversed", the fading
direction is inverted, that is the top elements
within hierarchy are drawn with fully saturated
colors while the leaves are faded towards the
background color.
line
:class:`plotly.graph_objects.treemap.marker.Lin
e` instance or dict with compatible properties
pad
:class:`plotly.graph_objects.treemap.marker.Pad
` instance or dict with compatible properties
reversescale
Reverses the color mapping if true. Has an
effect only if colorsis set to a numerical
array. If true, `marker.cmin` will correspond
to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
colorsis set to a numerical array.
Returns
-------
plotly.graph_objs.treemap.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# maxdepth
# --------
@property
def maxdepth(self):
"""
Sets the number of rendered sectors from any given `level`. Set
`maxdepth` to "-1" to render all the levels in the hierarchy.
The 'maxdepth' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
Returns
-------
int
"""
return self["maxdepth"]
@maxdepth.setter
def maxdepth(self, val):
self["maxdepth"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# outsidetextfont
# ---------------
@property
def outsidetextfont(self):
"""
Sets the font used for `textinfo` lying outside the sector.
This option refers to the root of the hierarchy presented on
top left corner of a treemap graph. Please note that if a
hierarchy has multiple root nodes, this option won't have any
effect and `insidetextfont` would be used.
The 'outsidetextfont' property is an instance of Outsidetextfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.treemap.Outsidetextfont`
- A dict of string/value properties that will be passed
to the Outsidetextfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.treemap.Outsidetextfont
"""
return self["outsidetextfont"]
@outsidetextfont.setter
def outsidetextfont(self, val):
self["outsidetextfont"] = val
# parents
# -------
@property
def parents(self):
"""
Sets the parent sectors for each of the sectors. Empty string
items '' are understood to reference the root node in the
hierarchy. If `ids` is filled, `parents` items are understood
to be "ids" themselves. When `ids` is not set, plotly attempts
to find matching items in `labels`, but beware they must be
unique.
The 'parents' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["parents"]
@parents.setter
def parents(self, val):
self["parents"] = val
# parentssrc
# ----------
@property
def parentssrc(self):
"""
Sets the source reference on Chart Studio Cloud for parents .
The 'parentssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["parentssrc"]
@parentssrc.setter
def parentssrc(self, val):
self["parentssrc"] = val
# pathbar
# -------
@property
def pathbar(self):
"""
The 'pathbar' property is an instance of Pathbar
that may be specified as:
- An instance of :class:`plotly.graph_objs.treemap.Pathbar`
- A dict of string/value properties that will be passed
to the Pathbar constructor
Supported dict properties:
edgeshape
Determines which shape is used for edges
between `barpath` labels.
side
Determines on which side of the the treemap the
`pathbar` should be presented.
textfont
Sets the font used inside `pathbar`.
thickness
Sets the thickness of `pathbar` (in px). If not
specified the `pathbar.textfont.size` is used
with 3 pixles extra padding on each side.
visible
Determines if the path bar is drawn i.e.
outside the trace `domain` and with one pixel
gap.
Returns
-------
plotly.graph_objs.treemap.Pathbar
"""
return self["pathbar"]
@pathbar.setter
def pathbar(self, val):
self["pathbar"] = val
# root
# ----
@property
def root(self):
"""
The 'root' property is an instance of Root
that may be specified as:
- An instance of :class:`plotly.graph_objs.treemap.Root`
- A dict of string/value properties that will be passed
to the Root constructor
Supported dict properties:
color
sets the color of the root node for a
sunburst/treemap/icicle trace. this has no
effect when a colorscale is used to set the
markers.
Returns
-------
plotly.graph_objs.treemap.Root
"""
return self["root"]
@root.setter
def root(self, val):
self["root"] = val
# sort
# ----
@property
def sort(self):
"""
Determines whether or not the sectors are reordered from
largest to smallest.
The 'sort' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["sort"]
@sort.setter
def sort(self, val):
self["sort"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.treemap.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.treemap.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# text
# ----
@property
def text(self):
"""
Sets text elements associated with each sector. If trace
`textinfo` contains a "text" flag, these elements will be seen
on the chart. If trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in the
hover labels.
The 'text' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textfont
# --------
@property
def textfont(self):
"""
Sets the font used for `textinfo`.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.treemap.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.treemap.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# textinfo
# --------
@property
def textinfo(self):
"""
Determines which trace information appear on the graph.
The 'textinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['label', 'text', 'value', 'current path', 'percent root', 'percent entry', 'percent parent'] joined with '+' characters
(e.g. 'label+text')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["textinfo"]
@textinfo.setter
def textinfo(self, val):
self["textinfo"] = val
# textposition
# ------------
@property
def textposition(self):
"""
Sets the positions of the `text` elements.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top left', 'top center', 'top right', 'middle left',
'middle center', 'middle right', 'bottom left', 'bottom
center', 'bottom right']
Returns
-------
Any
"""
return self["textposition"]
@textposition.setter
def textposition(self, val):
self["textposition"] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for text .
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
# texttemplate
# ------------
@property
def texttemplate(self):
"""
Template string used for rendering the information text that
appear on points. Note that this will override `textinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format#locale_format for details on the date formatting syntax.
Every attributes that can be specified per-point (the ones that
are `arrayOk: true`) are available. variables `currentPath`,
`root`, `entry`, `percentRoot`, `percentEntry`,
`percentParent`, `label` and `value`.
The 'texttemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["texttemplate"]
@texttemplate.setter
def texttemplate(self, val):
self["texttemplate"] = val
# texttemplatesrc
# ---------------
@property
def texttemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
texttemplate .
The 'texttemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["texttemplatesrc"]
@texttemplatesrc.setter
def texttemplatesrc(self, val):
self["texttemplatesrc"] = val
# tiling
# ------
@property
def tiling(self):
"""
The 'tiling' property is an instance of Tiling
that may be specified as:
- An instance of :class:`plotly.graph_objs.treemap.Tiling`
- A dict of string/value properties that will be passed
to the Tiling constructor
Supported dict properties:
flip
Determines if the positions obtained from
solver are flipped on each axis.
packing
Determines d3 treemap solver. For more info
please refer to
https://github.com/d3/d3-hierarchy#treemap-
tiling
pad
Sets the inner padding (in px).
squarifyratio
When using "squarify" `packing` algorithm,
according to https://github.com/d3/d3-hierarchy
/blob/master/README.md#squarify_ratio this
option specifies the desired aspect ratio of
the generated rectangles. The ratio must be
specified as a number greater than or equal to
one. Note that the orientation of the generated
rectangles (tall or wide) is not implied by the
ratio; for example, a ratio of two will attempt
to produce a mixture of rectangles whose
width:height ratio is either 2:1 or 1:2. When
using "squarify", unlike d3 which uses the
Golden Ratio i.e. 1.618034, Plotly applies 1 to
increase squares in treemap layouts.
Returns
-------
plotly.graph_objs.treemap.Tiling
"""
return self["tiling"]
@tiling.setter
def tiling(self, val):
self["tiling"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# values
# ------
@property
def values(self):
"""
Sets the values associated with each of the sectors. Use with
`branchvalues` to determine how the values are summed.
The 'values' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["values"]
@values.setter
def values(self, val):
self["values"] = val
# valuessrc
# ---------
@property
def valuessrc(self):
"""
Sets the source reference on Chart Studio Cloud for values .
The 'valuessrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["valuessrc"]
@valuessrc.setter
def valuessrc(self, val):
self["valuessrc"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
branchvalues
Determines how the items in `values` are summed. When
set to "total", items in `values` are taken to be value
of all its descendants. When set to "remainder", items
in `values` corresponding to the root and the branches
sectors are taken to be the extra part not part of the
sum of the values at their leaves.
count
Determines default for `values` when it is not
provided, by inferring a 1 for each of the "leaves"
and/or "branches", otherwise 0.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
domain
:class:`plotly.graph_objects.treemap.Domain` instance
or dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.treemap.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `currentPath`, `root`,
`entry`, `percentRoot`, `percentEntry` and
`percentParent`. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each sector.
If a single string, the same string appears for all
data points. If an array of string, the items are
mapped in order of this trace's sectors. To be seen,
trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
insidetextfont
Sets the font used for `textinfo` lying inside the
sector.
labels
Sets the labels of each of the sectors.
labelssrc
Sets the source reference on Chart Studio Cloud for
labels .
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
level
Sets the level from which this trace hierarchy is
rendered. Set `level` to `''` to start from the root
node in the hierarchy. Must be an "id" if `ids` is
filled in, otherwise plotly attempts to find a matching
item in `labels`.
marker
:class:`plotly.graph_objects.treemap.Marker` instance
or dict with compatible properties
maxdepth
Sets the number of rendered sectors from any given
`level`. Set `maxdepth` to "-1" to render all the
levels in the hierarchy.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
outsidetextfont
Sets the font used for `textinfo` lying outside the
sector. This option refers to the root of the hierarchy
presented on top left corner of a treemap graph. Please
note that if a hierarchy has multiple root nodes, this
option won't have any effect and `insidetextfont` would
be used.
parents
Sets the parent sectors for each of the sectors. Empty
string items '' are understood to reference the root
node in the hierarchy. If `ids` is filled, `parents`
items are understood to be "ids" themselves. When `ids`
is not set, plotly attempts to find matching items in
`labels`, but beware they must be unique.
parentssrc
Sets the source reference on Chart Studio Cloud for
parents .
pathbar
:class:`plotly.graph_objects.treemap.Pathbar` instance
or dict with compatible properties
root
:class:`plotly.graph_objects.treemap.Root` instance or
dict with compatible properties
sort
Determines whether or not the sectors are reordered
from largest to smallest.
stream
:class:`plotly.graph_objects.treemap.Stream` instance
or dict with compatible properties
text
Sets text elements associated with each sector. If
trace `textinfo` contains a "text" flag, these elements
will be seen on the chart. If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set,
these elements will be seen in the hover labels.
textfont
Sets the font used for `textinfo`.
textinfo
Determines which trace information appear on the graph.
textposition
Sets the positions of the `text` elements.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. Every attributes
that can be specified per-point (the ones that are
`arrayOk: true`) are available. variables
`currentPath`, `root`, `entry`, `percentRoot`,
`percentEntry`, `percentParent`, `label` and `value`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
texttemplate .
tiling
:class:`plotly.graph_objects.treemap.Tiling` instance
or dict with compatible properties
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
values
Sets the values associated with each of the sectors.
Use with `branchvalues` to determine how the values are
summed.
valuessrc
Sets the source reference on Chart Studio Cloud for
values .
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
def __init__(
self,
arg=None,
branchvalues=None,
count=None,
customdata=None,
customdatasrc=None,
domain=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
insidetextfont=None,
labels=None,
labelssrc=None,
legendrank=None,
level=None,
marker=None,
maxdepth=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
outsidetextfont=None,
parents=None,
parentssrc=None,
pathbar=None,
root=None,
sort=None,
stream=None,
text=None,
textfont=None,
textinfo=None,
textposition=None,
textsrc=None,
texttemplate=None,
texttemplatesrc=None,
tiling=None,
uid=None,
uirevision=None,
values=None,
valuessrc=None,
visible=None,
**kwargs
):
"""
Construct a new Treemap object
Visualize hierarchal data from leaves (and/or outer branches)
towards root with rectangles. The treemap sectors are
determined by the entries in "labels" or "ids" and in
"parents".
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Treemap`
branchvalues
Determines how the items in `values` are summed. When
set to "total", items in `values` are taken to be value
of all its descendants. When set to "remainder", items
in `values` corresponding to the root and the branches
sectors are taken to be the extra part not part of the
sum of the values at their leaves.
count
Determines default for `values` when it is not
provided, by inferring a 1 for each of the "leaves"
and/or "branches", otherwise 0.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
domain
:class:`plotly.graph_objects.treemap.Domain` instance
or dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.treemap.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `currentPath`, `root`,
`entry`, `percentRoot`, `percentEntry` and
`percentParent`. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each sector.
If a single string, the same string appears for all
data points. If an array of string, the items are
mapped in order of this trace's sectors. To be seen,
trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
insidetextfont
Sets the font used for `textinfo` lying inside the
sector.
labels
Sets the labels of each of the sectors.
labelssrc
Sets the source reference on Chart Studio Cloud for
labels .
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
level
Sets the level from which this trace hierarchy is
rendered. Set `level` to `''` to start from the root
node in the hierarchy. Must be an "id" if `ids` is
filled in, otherwise plotly attempts to find a matching
item in `labels`.
marker
:class:`plotly.graph_objects.treemap.Marker` instance
or dict with compatible properties
maxdepth
Sets the number of rendered sectors from any given
`level`. Set `maxdepth` to "-1" to render all the
levels in the hierarchy.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
outsidetextfont
Sets the font used for `textinfo` lying outside the
sector. This option refers to the root of the hierarchy
presented on top left corner of a treemap graph. Please
note that if a hierarchy has multiple root nodes, this
option won't have any effect and `insidetextfont` would
be used.
parents
Sets the parent sectors for each of the sectors. Empty
string items '' are understood to reference the root
node in the hierarchy. If `ids` is filled, `parents`
items are understood to be "ids" themselves. When `ids`
is not set, plotly attempts to find matching items in
`labels`, but beware they must be unique.
parentssrc
Sets the source reference on Chart Studio Cloud for
parents .
pathbar
:class:`plotly.graph_objects.treemap.Pathbar` instance
or dict with compatible properties
root
:class:`plotly.graph_objects.treemap.Root` instance or
dict with compatible properties
sort
Determines whether or not the sectors are reordered
from largest to smallest.
stream
:class:`plotly.graph_objects.treemap.Stream` instance
or dict with compatible properties
text
Sets text elements associated with each sector. If
trace `textinfo` contains a "text" flag, these elements
will be seen on the chart. If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set,
these elements will be seen in the hover labels.
textfont
Sets the font used for `textinfo`.
textinfo
Determines which trace information appear on the graph.
textposition
Sets the positions of the `text` elements.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. Every attributes
that can be specified per-point (the ones that are
`arrayOk: true`) are available. variables
`currentPath`, `root`, `entry`, `percentRoot`,
`percentEntry`, `percentParent`, `label` and `value`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
texttemplate .
tiling
:class:`plotly.graph_objects.treemap.Tiling` instance
or dict with compatible properties
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
values
Sets the values associated with each of the sectors.
Use with `branchvalues` to determine how the values are
summed.
valuessrc
Sets the source reference on Chart Studio Cloud for
values .
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Treemap
"""
super(Treemap, self).__init__("treemap")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Treemap
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Treemap`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("branchvalues", None)
_v = branchvalues if branchvalues is not None else _v
if _v is not None:
self["branchvalues"] = _v
_v = arg.pop("count", None)
_v = count if count is not None else _v
if _v is not None:
self["count"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("domain", None)
_v = domain if domain is not None else _v
if _v is not None:
self["domain"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("insidetextfont", None)
_v = insidetextfont if insidetextfont is not None else _v
if _v is not None:
self["insidetextfont"] = _v
_v = arg.pop("labels", None)
_v = labels if labels is not None else _v
if _v is not None:
self["labels"] = _v
_v = arg.pop("labelssrc", None)
_v = labelssrc if labelssrc is not None else _v
if _v is not None:
self["labelssrc"] = _v
_v = arg.pop("legendrank", None)
_v = legendrank if legendrank is not None else _v
if _v is not None:
self["legendrank"] = _v
_v = arg.pop("level", None)
_v = level if level is not None else _v
if _v is not None:
self["level"] = _v
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("maxdepth", None)
_v = maxdepth if maxdepth is not None else _v
if _v is not None:
self["maxdepth"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("outsidetextfont", None)
_v = outsidetextfont if outsidetextfont is not None else _v
if _v is not None:
self["outsidetextfont"] = _v
_v = arg.pop("parents", None)
_v = parents if parents is not None else _v
if _v is not None:
self["parents"] = _v
_v = arg.pop("parentssrc", None)
_v = parentssrc if parentssrc is not None else _v
if _v is not None:
self["parentssrc"] = _v
_v = arg.pop("pathbar", None)
_v = pathbar if pathbar is not None else _v
if _v is not None:
self["pathbar"] = _v
_v = arg.pop("root", None)
_v = root if root is not None else _v
if _v is not None:
self["root"] = _v
_v = arg.pop("sort", None)
_v = sort if sort is not None else _v
if _v is not None:
self["sort"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
_v = arg.pop("textinfo", None)
_v = textinfo if textinfo is not None else _v
if _v is not None:
self["textinfo"] = _v
_v = arg.pop("textposition", None)
_v = textposition if textposition is not None else _v
if _v is not None:
self["textposition"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("texttemplate", None)
_v = texttemplate if texttemplate is not None else _v
if _v is not None:
self["texttemplate"] = _v
_v = arg.pop("texttemplatesrc", None)
_v = texttemplatesrc if texttemplatesrc is not None else _v
if _v is not None:
self["texttemplatesrc"] = _v
_v = arg.pop("tiling", None)
_v = tiling if tiling is not None else _v
if _v is not None:
self["tiling"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("values", None)
_v = values if values is not None else _v
if _v is not None:
self["values"] = _v
_v = arg.pop("valuessrc", None)
_v = valuessrc if valuessrc is not None else _v
if _v is not None:
self["valuessrc"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
# Read-only literals
# ------------------
self._props["type"] = "treemap"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
from bingads.service_client import _CAMPAIGN_OBJECT_FACTORY_V12
from bingads.v12.internal.bulk.string_table import _StringTable
from bingads.v12.internal.bulk.entities.single_record_bulk_entity import _SingleRecordBulkEntity
from bingads.v12.internal.bulk.mappings import _SimpleBulkMapping, _DynamicColumnNameMapping
from bingads.v12.internal.extensions import bulk_str
class _BulkNegativeKeyword(_SingleRecordBulkEntity):
""" The base class for all bulk negative keywords.
Either assigned individually to a campaign or ad group entity, or shared in a negative keyword list.
*See also:*
* :class:`.BulkAdGroupNegativeKeyword`
* :class:`.BulkCampaignNegativeKeyword`
* :class:`.BulkSharedNegativeKeyword`
"""
def __init__(self, status=None, negative_keyword=None, parent_id=None):
super(_BulkNegativeKeyword, self).__init__()
self._negative_keyword = negative_keyword
self._status = status
self._parent_id = parent_id
@property
def status(self):
""" The status of the negative keyword association.
The value is 'Active' if the negative keyword is assigned to the parent entity.
The value is 'Deleted' if the negative keyword is removed from the parent entity,
or should be removed in a subsequent upload operation.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
self._status = status
@property
def negative_keyword(self):
""" Defines a negative keyword with match type. """
return self._negative_keyword
@negative_keyword.setter
def negative_keyword(self, negative_keyword):
self._negative_keyword = negative_keyword
_MAPPINGS = [
_SimpleBulkMapping(
header=_StringTable.Id,
field_to_csv=lambda c: bulk_str(c.negative_keyword.Id),
csv_to_field=lambda c, v: setattr(c.negative_keyword, 'Id', int(v) if v else None)
),
_SimpleBulkMapping(
header=_StringTable.Status,
field_to_csv=lambda c: bulk_str(c.status),
csv_to_field=lambda c, v: setattr(c, '_status', v if v else None)
),
_SimpleBulkMapping(
header=_StringTable.ParentId,
field_to_csv=lambda c: bulk_str(c._parent_id),
csv_to_field=lambda c, v: setattr(c, '_parent_id', int(v) if v else None)
),
_SimpleBulkMapping(
header=_StringTable.Keyword,
field_to_csv=lambda c: c.negative_keyword.Text,
csv_to_field=lambda c, v: setattr(c.negative_keyword, 'Text', v)
),
_SimpleBulkMapping(
header=_StringTable.MatchType,
field_to_csv=lambda c: bulk_str(c.negative_keyword.MatchType),
csv_to_field=lambda c, v: setattr(c.negative_keyword, 'MatchType', v)
)
]
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self._negative_keyword, 'negative_keyword')
self.convert_to_values(row_values, _BulkNegativeKeyword._MAPPINGS)
def process_mappings_from_row_values(self, row_values):
self._negative_keyword = _CAMPAIGN_OBJECT_FACTORY_V12.create('NegativeKeyword')
self._negative_keyword.Type = 'NegativeKeyword'
row_values.convert_to_entity(self, _BulkNegativeKeyword._MAPPINGS)
def read_additional_data(self, stream_reader):
super(_BulkNegativeKeyword, self).read_additional_data(stream_reader)
class _BulkEntityNegativeKeyword(_BulkNegativeKeyword):
""" This base class for all bulk negative keywords that are assigned individually to a campaign or ad group entity.
*See also:*
* :class:`.BulkAdGroupNegativeKeyword`
* :class:`.BulkCampaignNegativeKeyword`
"""
def __init__(self,
status=None,
negative_keyword=None,
parent_id=None,
entity_name=None):
super(_BulkEntityNegativeKeyword, self).__init__(
status,
negative_keyword,
parent_id,
)
self._entity_name = entity_name
@property
def _entity_column_name(self):
raise NotImplementedError()
_MAPPINGS = [
_DynamicColumnNameMapping(
header_func=lambda c: c._entity_column_name,
field_to_csv=lambda c: c._entity_name,
csv_to_field=lambda c, v: setattr(c, '_entity_name', v))
]
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
super(_BulkEntityNegativeKeyword, self).process_mappings_to_row_values(row_values, exclude_readonly_data)
self.convert_to_values(row_values, _BulkEntityNegativeKeyword._MAPPINGS)
def process_mappings_from_row_values(self, row_values):
super(_BulkEntityNegativeKeyword, self).process_mappings_from_row_values(row_values)
row_values.convert_to_entity(self, _BulkEntityNegativeKeyword._MAPPINGS)
def read_additional_data(self, stream_reader):
super(_BulkEntityNegativeKeyword, self).read_additional_data(stream_reader)
class BulkAdGroupNegativeKeyword(_BulkEntityNegativeKeyword):
""" Represents a negative keyword that is assigned to a ad group. Each negative keyword can be read or written in a bulk file.
This class exposes the :attr:`.BulkNegativeKeyword.negative_keyword` property that can be read and written as
fields of the Ad Group Negative Keyword record in a bulk file.
For more information, see Ad Group Negative Keyword at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self,
status=None,
negative_keyword=None,
ad_group_id=None,
ad_group_name=None,
campaign_name=None):
super(BulkAdGroupNegativeKeyword, self).__init__(
status,
negative_keyword,
ad_group_id,
ad_group_name,
)
self._campaign_name = campaign_name
@property
def campaign_name(self):
""" The name of the campaign that the negative keyword is assigned.
Corresponds to the 'Campaign' field in the bulk file.
:rtype: str
"""
return self._campaign_name
@campaign_name.setter
def campaign_name(self, campaign_name):
self._campaign_name = campaign_name
@property
def ad_group_id(self):
""" Corresponds to the 'Parent Id' field in the bulk file.
:return: The identifier of the ad group that the negative keyword is assigned.
:rtype: int
"""
return self._parent_id
@ad_group_id.setter
def ad_group_id(self, value):
self._parent_id = value
@property
def ad_group_name(self):
""" Corresponds to the 'Ad Group' field in the bulk file.
:return: The name of the ad group that the negative keyword is assigned.
:rtype: str
"""
return self._entity_name
@ad_group_name.setter
def ad_group_name(self, value):
self._entity_name = value
@property
def _entity_column_name(self):
return _StringTable.AdGroup
_MAPPINGS = [
_SimpleBulkMapping(
header=_StringTable.Campaign,
field_to_csv=lambda c: c.campaign_name,
csv_to_field=lambda c, v: setattr(c, 'campaign_name', v)
)
]
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
super(BulkAdGroupNegativeKeyword, self).process_mappings_to_row_values(row_values, exclude_readonly_data)
self.convert_to_values(row_values, BulkAdGroupNegativeKeyword._MAPPINGS)
def process_mappings_from_row_values(self, row_values):
super(BulkAdGroupNegativeKeyword, self).process_mappings_from_row_values(row_values)
row_values.convert_to_entity(self, BulkAdGroupNegativeKeyword._MAPPINGS)
def read_additional_data(self, stream_reader):
super(BulkAdGroupNegativeKeyword, self).read_additional_data(stream_reader)
class BulkCampaignNegativeKeyword(_BulkEntityNegativeKeyword):
""" Represents a negative keyword that is assigned to a campaign. Each negative keyword can be read or written in a bulk file.
This class exposes the :attr:`BulkNegativeKeyword.negative_keyword` property that can be read and written as
fields of the Campaign Negative Keyword record in a bulk file.
For more information, see Campaign Negative Keyword at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self,
status=None,
negative_keyword=None,
campaign_id=None,
campaign_name=None):
super(BulkCampaignNegativeKeyword, self).__init__(
status,
negative_keyword,
campaign_id,
campaign_name,
)
@property
def campaign_id(self):
""" The identifier of the campaign that the negative keyword is assigned.
Corresponds to the 'Parent Id' field in the bulk file.
:rtype: int
"""
return self._parent_id
@campaign_id.setter
def campaign_id(self, value):
self._parent_id = value
@property
def campaign_name(self):
""" The name of the campaign that the negative keyword is assigned.
Corresponds to the 'Campaign' field in the bulk file.
:rtype: str
"""
return self._entity_name
@campaign_name.setter
def campaign_name(self, value):
self._entity_name = value
@property
def _entity_column_name(self):
return _StringTable.Campaign
class BulkCampaignNegativeKeywordList(_SingleRecordBulkEntity):
""" Represents a negative keyword list that is assigned to a campaign. Each negative keyword list can be read or written in a bulk file.
This class exposes the :attr:`BulkCampaignNegativeKeywordList.SharedEntityAssociation` property that can be read
and written as fields of the Campaign Negative Keyword List Association record in a bulk file.
For more information, see Campaign Negative Keyword List Association at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self, status=None, shared_entity_association=None):
super(BulkCampaignNegativeKeywordList, self).__init__()
self._shared_entity_association = shared_entity_association
self._status = status
@property
def status(self):
""" The status of the negative keyword list association.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
self._status = status
@property
def shared_entity_association(self):
""" The campaign and negative keyword list identifiers.
see Campaign Negative Keyword List Association at https://go.microsoft.com/fwlink/?linkid=846127.
"""
return self._shared_entity_association
@shared_entity_association.setter
def shared_entity_association(self, shared_entity_association):
self._shared_entity_association = shared_entity_association
_MAPPINGS = [
_SimpleBulkMapping(
header=_StringTable.Status,
field_to_csv=lambda c: c.status,
csv_to_field=lambda c, v: setattr(c, 'status', v if v else None)
),
_SimpleBulkMapping(
header=_StringTable.Id,
field_to_csv=lambda c: bulk_str(c.shared_entity_association.SharedEntityId),
csv_to_field=lambda c, v: setattr(c.shared_entity_association, 'SharedEntityId', int(v))
),
_SimpleBulkMapping(
header=_StringTable.ParentId,
field_to_csv=lambda c: bulk_str(c.shared_entity_association.EntityId),
csv_to_field=lambda c, v: setattr(c.shared_entity_association, 'EntityId', int(v))
),
]
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self._shared_entity_association, 'shared_entity_association')
self.convert_to_values(row_values, BulkCampaignNegativeKeywordList._MAPPINGS)
def process_mappings_from_row_values(self, row_values):
self._shared_entity_association = _CAMPAIGN_OBJECT_FACTORY_V12.create('SharedEntityAssociation')
self._shared_entity_association.EntityType = 'Campaign'
self._shared_entity_association.SharedEntityType = 'NegativeKeywordList'
row_values.convert_to_entity(self, BulkCampaignNegativeKeywordList._MAPPINGS)
def read_additional_data(self, stream_reader):
super(BulkCampaignNegativeKeywordList, self).read_additional_data(stream_reader)
class BulkNegativeKeywordList(_SingleRecordBulkEntity):
""" Represents a negative keyword list that can be read or written in a bulk file.
This class exposes the :attr:`.BulkNegativeKeywordList.negative_keyword_list` property that can be read and
written as fields of the Negative Keyword List record in a bulk file.
For more information, see Negative Keyword List at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self, status=None, negative_keyword_list=None):
super(BulkNegativeKeywordList, self).__init__()
self._status = status
self._negative_keyword_list = negative_keyword_list
@property
def negative_keyword_list(self):
""" The negative keyword list.
see Negative Keyword List at https://go.microsoft.com/fwlink/?linkid=846127.
"""
return self._negative_keyword_list
@negative_keyword_list.setter
def negative_keyword_list(self, negative_keyword_list):
self._negative_keyword_list = negative_keyword_list
@property
def status(self):
""" The status of the negative keyword list.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
self._status = status
_MAPPINGS = [
_SimpleBulkMapping(
header=_StringTable.Id,
field_to_csv=lambda c: bulk_str(c.negative_keyword_list.Id),
csv_to_field=lambda c, v: setattr(c.negative_keyword_list, 'Id', int(v) if v else None)
),
_SimpleBulkMapping(
header=_StringTable.Status,
field_to_csv=lambda c: bulk_str(c.status),
csv_to_field=lambda c, v: setattr(c, 'status', v if v else None)
),
_SimpleBulkMapping(
header=_StringTable.Name,
field_to_csv=lambda c: c.negative_keyword_list.Name,
csv_to_field=lambda c, v: setattr(c.negative_keyword_list, 'Name', v)
)
]
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self._negative_keyword_list, 'negative_keyword_list')
self.convert_to_values(row_values, BulkNegativeKeywordList._MAPPINGS)
def process_mappings_from_row_values(self, row_values):
self._negative_keyword_list = _CAMPAIGN_OBJECT_FACTORY_V12.create('NegativeKeywordList')
self._negative_keyword_list.Type = 'NegativeKeywordList'
row_values.convert_to_entity(self, BulkNegativeKeywordList._MAPPINGS)
def read_additional_data(self, stream_reader):
super(BulkNegativeKeywordList, self).read_additional_data(stream_reader)
class BulkSharedNegativeKeyword(_BulkNegativeKeyword):
""" Represents a negative keyword that is shared in a negative keyword list.
Each shared negative keyword can be read or written in a bulk file.
This class exposes the :attr:`.BulkNegativeKeyword.NegativeKeyword` property that
can be read and written as fields of the Shared Negative Keyword record in a bulk file.
For more information, see Shared Negative Keyword at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self, status=None, negative_keyword=None, negative_keyword_list_id=None):
super(BulkSharedNegativeKeyword, self).__init__(status, negative_keyword, negative_keyword_list_id)
@property
def negative_keyword_list_id(self):
return self._parent_id
@negative_keyword_list_id.setter
def negative_keyword_list_id(self, value):
self._parent_id = value
|
from pyvisa import VisaIOError, ResourceManager
import numpy as np
from pylabnet.utils.logging.logger import LogHandler
class Driver:
def __init__(self, gpib_address=None, logger=None):
"""Instantiate driver class.
:gpib_address: GPIB-address of the scope, e.g. 'GPIB0::12::INSTR'
Can be read out by using
rm = pyvisa.ResourceManager()
rm.list_resources()
:logger: An instance of a LogClient.
"""
# Instantiate log.
self.log = LogHandler(logger=logger)
self.rm = ResourceManager()
try:
self.device = self.rm.open_resource(gpib_address)
device_id = self.device.query('*IDN?')
self.log.info(f"Successfully connected to {device_id}.")
# We set a more forgiving timeout of 10s (default: 2s).
# self.device.timeout = 10000
except VisaIOError:
self.log.error(f"Connection to {gpib_address} failed.")
def get_power(self, channel):
""" Returns the current power in watts on a desired channel
:param channel: (int) channel to read power of (either 1 or 2)
:return: (float) power in watts
"""
power = self.device.query(f':POW{channel}:VAL?')
return float(power)
def get_wavelength(self, channel):
""" Returns the current wavelength in nm for the desired channel
:param channel: (int) channel to read wavelength of
:return: (int) wavelength
"""
wavelength = self.device.query(f':WAVEL{channel}:VAL?')
return int(float(wavelength))
def get_range(self, channel):
""" Returns the current power range for the channel
:param channel: (int) channel to read range of
:return: (str) range
"""
pr = self.device.query(f':PRANGE{channel}?')
return pr
def set_wavelength(self, channel, wavelength):
""" Sets the wavelength
:param channel: (int) channel to set wavelength of
"""
self.device.write(f':WAVEL{channel}:VAL {wavelength}')
def set_range(self, channel, p_range):
""" Sets the range
:param channel: (int) channel to set range of
:param p_range: (str) range string identifier, can be anything in
'AUTO', 'R1NW', 'R10NW', 'R100NW', 'R1UW', 'R10UW', 'R100UW', 'R1MW',
'R10MW', 'R100MW', 'R1W', 'R10W', 'R100W', 'R1KW'
"""
self.device.write(f':PRANGE{channel} {p_range}')
|
#! /usr/bin/python3
import sys, os, time
from typing import Tuple
Input = str
def part1(puzzle_input: Input) -> int:
return 1
def part2(puzzle_input: Input) -> int:
return 2
def solve(puzzle_input: Input) -> Tuple[int,int]:
return (part1(puzzle_input), part2(puzzle_input))
def get_input(file_path: str) -> Input:
if not os.path.isfile(file_path):
raise FileNotFoundError(file_path)
with open(file_path) as file:
return file.read().strip()
def main():
if len(sys.argv) != 2:
raise Exception("Please, add input file path as parameter")
start = time.perf_counter()
part1_result, part2_result = solve(get_input(sys.argv[1]))
end = time.perf_counter()
print("P1:", part1_result)
print("P2:", part2_result)
print()
print(f"Time: {end - start:.7f}")
if __name__ == "__main__":
main()
|
import fresh_tomatoes
import media
# first Movies
Wings_of_Liberty = media.Movie("StarCraft II: Wings of Liberty",
"The storyline of StarCraft II takes place"
"four years after StarCraft: Brood War.",
"http://wallpaperswide.com/download/starcraft_ii__wings_of_liberty-wallpaper-640x480.jpg", # NOQA
"https://www.youtube.com/watch?v=XZd9n373vf4")
# 2nd movie
Heart_of_the_Swarm = media.Movie("StarCraft II: Heart of the Swarm",
"Heart of the Swarm is a sequel"
"to Wings of Liberty.",
"https://d3tg06jbotvai2.cloudfront.net/game_tetiere/img/heart-of-the-swarm-img4.jpg", # NOQA
"https://www.youtube.com/watch?v=MVbeoSPqRs4")
# 3rd movie
Legacy_of_the_Void = media.Movie("StarCraft II: Legacy of the Void",
"Legacy of the Void is a sequel"
"to Heart of the Swarm.",
"https://cdn.wccftech.com/wp-content/uploads/2015/03/StarCraft-II-Legacy-Of-The-Void.jpg", # NOQA
"https://www.youtube.com/watch?v=M_XwzBMTJaM")
# 4th movie
Hearthstone = media.Movie("Hearthstone",
"Hearthstone is a free-to-play "
"online collectible card video game.",
"https://wallpapercave.com/wp/wp1840268.jpg",
"https://www.youtube.com/watch?v=vPguoeYTvMI")
# 5th movie
World_of_Warcraft = media.Movie("World of Warcraft",
"World of Warcraft (WoW) is a massively "
"multiplayer online role-playing game.",
"https://images5.alphacoders.com/879/879575.jpg", # NOQA
"https://www.youtube.com/watch?v=jSJr3dXZfcg")
# 6th movie
Diablo_III = media.Movie("Diablo III",
"Diablo III is a hack and slash "
"action role-playing game (ARPG).",
"https://desktopwalls.net/wp-content/uploads/2015/08/Diablo%203%20Reaper%20of%20Souls%20Desktop%20Wallpaper.jpg", # NOQA
"https://www.youtube.com/watch?v=Cb7QJwQ58T0&has_verified=1") # NOQA
# movie list
movies = [Wings_of_Liberty, Heart_of_the_Swarm,
Legacy_of_the_Void, Hearthstone,
World_of_Warcraft, Diablo_III]
# call function
fresh_tomatoes.open_movies_page(movies)
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import rospy, rostest
import rosparam
import optparse
import sys
import threading
from bondpy import bondpy
from diagnostic_msgs.srv import AddDiagnostics
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
PKG = 'diagnostic_aggregator'
class TestAddAnalyzer(unittest.TestCase):
def __init__(self, *args):
super(TestAddAnalyzer, self).__init__(*args)
rospy.init_node('test_add_analyzer')
self.namespace = rospy.get_name()
paramlist = rosparam.load_file(rospy.myargv()[1])
# expect to receive these paths in the added analyzers
self.expected = [paramlist[0][1] + analyzer['path'] for name, analyzer in paramlist[0][0]['analyzers'].iteritems()]
self._mutex = threading.Lock()
self.agg_msgs = {}
# put parameters in the node namespace so they can be read by the aggregator
for params, ns in paramlist:
rosparam.upload_params(rospy.get_name() + '/' + ns, params)
rospy.Subscriber('/diagnostics_agg', DiagnosticArray, self.agg_cb)
self.pub = rospy.Publisher('/diagnostics', DiagnosticArray, queue_size=1)
def agg_cb(self, msg):
with self._mutex:
for stat in msg.status:
self.agg_msgs[stat.name] = stat
def add_analyzer(self):
"""Start a bond to the aggregator
"""
namespace = rospy.resolve_name(rospy.get_name())
self.bond = bondpy.Bond("/diagnostics_agg/bond" + namespace, namespace)
self.bond.start()
rospy.wait_for_service('/diagnostics_agg/add_diagnostics', timeout=10)
add_diagnostics = rospy.ServiceProxy('/diagnostics_agg/add_diagnostics', AddDiagnostics)
print(self.namespace)
resp = add_diagnostics(load_namespace=self.namespace)
self.assert_(resp.success, 'Service call was unsuccessful: {0}'.format(resp.message))
def wait_for_agg(self):
self.agg_msgs = {}
while not self.agg_msgs and not rospy.is_shutdown():
rospy.sleep(rospy.Duration(3))
def test_add_agg(self):
self.wait_for_agg()
# confirm that the things we're going to add aren't there already
with self._mutex:
agg_paths = [msg.name for name, msg in self.agg_msgs.iteritems()]
self.assert_(not any(expected in agg_paths for expected in self.expected))
# add the new groups
self.add_analyzer()
arr = DiagnosticArray()
arr.header.stamp = rospy.get_rostime()
arr.status = [
DiagnosticStatus(name='primary', message='hello-primary'),
DiagnosticStatus(name='secondary', message='hello-secondary')
]
self.pub.publish(arr)
self.wait_for_agg()
# the new aggregator data should contain the extra paths. At this point
# the paths are probably still in the 'Other' group because the bond
# hasn't been fully formed
with self._mutex:
agg_paths = [msg.name for name, msg in self.agg_msgs.iteritems()]
self.assert_(all(expected in agg_paths for expected in self.expected))
rospy.sleep(rospy.Duration(5)) # wait a bit for the new items to move to the right group
arr.header.stamp = rospy.get_rostime()
self.pub.publish(arr) # publish again to get the correct groups to show OK
self.wait_for_agg()
for name, msg in self.agg_msgs.iteritems():
if name in self.expected: # should have just received messages on the analyzer
self.assert_(msg.message == 'OK')
agg_paths = [msg.name for name, msg in self.agg_msgs.iteritems()]
self.assert_(all(expected in agg_paths for expected in self.expected))
self.bond.shutdown()
rospy.sleep(rospy.Duration(5)) # wait a bit for the analyzers to unload
self.wait_for_agg()
# the aggregator data should no longer contain the paths once the bond is shut down
with self._mutex:
agg_paths = [msg.name for name, msg in self.agg_msgs.iteritems()]
self.assert_(not any(expected in agg_paths for expected in self.expected))
if __name__ == '__main__':
print 'SYS ARGS:', sys.argv
rostest.run(PKG, sys.argv[0], TestAddAnalyzer, sys.argv)
|
import docassemble.base.config
docassemble.base.config.load()
import docassemble.webapp.db_object
db = docassemble.webapp.db_object.init_sqlalchemy()
from docassemble.webapp.files import SavedFile
from docassemble.webapp.file_number import get_new_file_number
from docassemble.webapp.core.models import Shortener, Email, EmailAttachment
from docassemble.webapp.users.models import UserModel
import docassemble.webapp.worker
import sys
import email
import json
import re
from email.utils import parseaddr, parsedate, getaddresses
from time import mktime
import datetime
import mimetypes
def main():
fp = open("/tmp/mail.log", "a")
#fp.write("The file is " + sys.argv[1] + "\n")
try:
with open(sys.argv[1], 'rU') as email_fp:
msg = email.message_from_file(email_fp)
except Exception as errMess:
fp.write("Failed to read e-mail message: " + str(errMess) + "\n")
sys.exit("Failed to read e-mail message")
raw_date = msg.get('Date', msg.get('Resent-Date', None))
addr_return_path = msg.get('Return-path', None)
addr_reply_to = msg.get('Reply-to', None)
addr_to = msg.get('Envelope-to', None)
addr_from = msg.get('From', msg.get('Sender', None))
subject = msg.get('Subject', None)
fp.write("Message to " + str(addr_to) + "\n")
#fp.write("From was " + str(addr_from) + "\n")
#fp.write("Subject was " + str(subject) + "\n")
to_recipients = list()
for recipient in getaddresses(msg.get_all('to', []) + msg.get_all('resent-to', [])):
to_recipients.append(dict(name=recipient[0], address=recipient[1]))
cc_recipients = list()
for recipient in getaddresses(msg.get_all('cc', []) + msg.get_all('resent-cc', [])):
cc_recipients.append(dict(name=recipient[0], address=recipient[1]))
recipients = list()
for recipient in getaddresses(msg.get_all('to', []) + msg.get_all('cc', []) + msg.get_all('resent-to', []) + msg.get_all('resent-cc', [])):
recipients.append(dict(name=recipient[0], address=recipient[1]))
if addr_to is None and len(recipients):
addr_to = recipients[0]['address']
#fp.write("recipients are " + str(recipients) + "\n")
if addr_to is not None:
#fp.write("parsed envelope-to: " + str(parseaddr(addr_to)) + "\n")
short_code = re.sub(r'@.*', '', parseaddr(addr_to)[1])
else:
short_code = None
#fp.write("short code is " + str(short_code) + "\n")
record = db.session.query(Shortener).filter_by(short=short_code).first()
if record is None:
fp.write("short code not found\n")
sys.exit("short code not found")
#fp.write("short code found\n")
#file_number = get_new_file_number(record.uid, 'email', yaml_file_name=record.filename)
##fp.write("file number is " + str(file_number) + "\n")
#saved_file_email = SavedFile(file_number, fix=True)
if addr_from is not None:
#fp.write("parsed from: " + str(parseaddr(addr_from)[1]) + "\n")
addr_from = dict(name=parseaddr(addr_from)[0], address=parseaddr(addr_from)[1])
else:
addr_from = dict(empty=True)
if addr_return_path is not None:
#fp.write("parsed return_path: " + str(parseaddr(addr_return_path)[1]) + "\n")
addr_return_path = dict(name=parseaddr(addr_return_path)[0], address=parseaddr(addr_return_path)[1])
else:
addr_return_path = dict(empty=True)
#fp.write("return_path is " + str(addr_return_path) + "\n")
if addr_reply_to is not None:
#fp.write("parsed reply-to: " + str(parseaddr(addr_reply_to)[1]) + "\n")
addr_reply_to = dict(name=parseaddr(addr_reply_to)[0], address=parseaddr(addr_reply_to)[1])
#fp.write("reply-to is " + str(addr_reply_to) + "\n")
else:
addr_reply_to = dict(empty=True)
#fp.write("reply-to is " + str(addr_reply_to) + "\n")
msg_current_time = datetime.datetime.now()
if raw_date is not None:
msg_date = datetime.datetime.fromtimestamp(mktime(parsedate(raw_date)))
#fp.write("msg_date is " + str(msg_date) + "\n")
else:
msg_date = msg_current_time
#fp.write("msg_date set to current time\n")
headers = list()
for item in msg.items():
headers.append([item[0], item[1]])
#fp.write("headers:\n" + json.dumps(headers) + "\n")
email_record = Email(short=short_code, to_addr=json.dumps(to_recipients), cc_addr=json.dumps(cc_recipients), from_addr=json.dumps(addr_from), reply_to_addr=json.dumps(addr_reply_to), return_path_addr=json.dumps(addr_return_path), subject=subject, datetime_message=msg_date, datetime_received=msg_current_time)
db.session.add(email_record)
db.session.commit()
save_attachment(record.uid, record.filename, 'headers.json', email_record.id, 0, 'application/json', 'json', json.dumps(headers))
counter = 1
for part in msg.walk():
if part.get_content_maintype() == 'multipart':
continue
filename = part.get_filename()
if part.get_content_type() == 'text/plain':
ext = '.txt'
else:
ext = mimetypes.guess_extension(part.get_content_type())
if not ext:
ext = '.bin'
if filename:
filename = '%03d-%s' % (counter, secure_filename(filename))
else:
filename = '%03d-attachment%s' % (counter, ext)
#fp.write("Filename is " + str(filename) + "\n")
#fp.write("Content type is " + str(part.get_content_type()) + "\n")
real_filename = re.sub(r'[0-9][0-9][0-9]-', r'', filename)
real_ext = re.sub(r'^\.', r'', ext)
save_attachment(record.uid, record.filename, real_filename, email_record.id, counter, part.get_content_type(), real_ext, part.get_payload(decode=True))
counter += 1
fp.close()
user = None
if record.user_id is not None:
user = db.session.query(UserModel).options(db.joinedload('roles')).filter_by(id=record.user_id).first()
if user is None:
user_info = dict(email=None, the_user_id='t' + str(record.temp_user_id), theid=record.temp_user_id, roles=list())
else:
role_list = [role.name for role in user.roles]
if len(role_list) == 0:
role_list = ['user']
user_info = dict(email=user.email, roles=role_list, the_user_id=user.id, theid=user.id, firstname=user.first_name, lastname=user.last_name, nickname=user.nickname, country=user.country, subdivisionfirst=user.subdivisionfirst, subdivisionsecond=user.subdivisionsecond, subdivisionthird=user.subdivisionthird, organization=user.organization)
result = docassemble.webapp.worker.background_action.delay(record.filename, user_info, record.uid, None, None, None, dict(action='incoming_email', arguments=dict(id=email_record.id)), extra=None)
def save_attachment(uid, yaml_filename, filename, email_id, index, content_type, extension, content):
att_file_number = get_new_file_number(uid, filename, yaml_file_name=yaml_filename)
attachment_record = EmailAttachment(email_id=email_id, index=0, content_type=content_type, extension=extension, upload=att_file_number)
db.session.add(attachment_record)
db.session.commit()
saved_file_attachment = SavedFile(att_file_number, extension=extension)
saved_file_attachment.write_content(content)
saved_file_attachment.finalize()
def secure_filename(filename):
filename = re.sub(r'[^A-Za-z0-9\_\-\. ]+', r'_', filename)
return filename.strip('_')
main()
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modules and code used in the core part of AlphaFold.
The structure generation code is in 'folding.py'.
"""
import functools
from alphafold.common import residue_constants
from alphafold.model import all_atom
from alphafold.model import common_modules
from alphafold.model import folding
from alphafold.model import layer_stack
from alphafold.model import lddt
from alphafold.model import mapping
from alphafold.model import prng
from alphafold.model import quat_affine
from alphafold.model import utils
import haiku as hk
import jax
import jax.numpy as jnp
def softmax_cross_entropy(logits, labels):
"""Computes softmax cross entropy given logits and one-hot class labels."""
loss = -jnp.sum(labels * jax.nn.log_softmax(logits), axis=-1)
return jnp.asarray(loss)
def sigmoid_cross_entropy(logits, labels):
"""Computes sigmoid cross entropy given logits and multiple class labels."""
log_p = jax.nn.log_sigmoid(logits)
# log(1 - sigmoid(x)) = log_sigmoid(-x), the latter is more numerically stable
log_not_p = jax.nn.log_sigmoid(-logits)
loss = -labels * log_p - (1. - labels) * log_not_p
return jnp.asarray(loss)
def apply_dropout(*, tensor, safe_key, rate, is_training, broadcast_dim=None):
"""Applies dropout to a tensor."""
if is_training and rate != 0.0:
shape = list(tensor.shape)
if broadcast_dim is not None:
shape[broadcast_dim] = 1
keep_rate = 1.0 - rate
keep = jax.random.bernoulli(safe_key.get(), keep_rate, shape=shape)
return keep * tensor / keep_rate
else:
return tensor
def dropout_wrapper(module,
input_act,
mask,
safe_key,
global_config,
output_act=None,
is_training=True,
**kwargs):
"""Applies module + dropout + residual update."""
if output_act is None:
output_act = input_act
gc = global_config
residual = module(input_act, mask, is_training=is_training, **kwargs)
dropout_rate = 0.0 if gc.deterministic else module.config.dropout_rate
if module.config.shared_dropout:
if module.config.orientation == 'per_row':
broadcast_dim = 0
else:
broadcast_dim = 1
else:
broadcast_dim = None
residual = apply_dropout(tensor=residual,
safe_key=safe_key,
rate=dropout_rate,
is_training=is_training,
broadcast_dim=broadcast_dim)
new_act = output_act + residual
return new_act
def create_extra_msa_feature(batch):
"""Expand extra_msa into 1hot and concat with other extra msa features.
We do this as late as possible as the one_hot extra msa can be very large.
Arguments:
batch: a dictionary with the following keys:
* 'extra_msa': [N_extra_seq, N_res] MSA that wasn't selected as a cluster
centre. Note, that this is not one-hot encoded.
* 'extra_has_deletion': [N_extra_seq, N_res] Whether there is a deletion to
the left of each position in the extra MSA.
* 'extra_deletion_value': [N_extra_seq, N_res] The number of deletions to
the left of each position in the extra MSA.
Returns:
Concatenated tensor of extra MSA features.
"""
# 23 = 20 amino acids + 'X' for unknown + gap + bert mask
msa_1hot = jax.nn.one_hot(batch['extra_msa'], 23)
msa_feat = [msa_1hot,
jnp.expand_dims(batch['extra_has_deletion'], axis=-1),
jnp.expand_dims(batch['extra_deletion_value'], axis=-1)]
return jnp.concatenate(msa_feat, axis=-1)
class AlphaFoldIteration(hk.Module):
"""A single recycling iteration of AlphaFold architecture.
Computes ensembled (averaged) representations from the provided features.
These representations are then passed to the various heads
that have been requested by the configuration file. Each head also returns a
loss which is combined as a weighted sum to produce the total loss.
Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 3-22
"""
def __init__(self, config, global_config, name='alphafold_iteration'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self,
ensembled_batch,
non_ensembled_batch,
is_training,
compute_loss=False,
ensemble_representations=False,
return_representations=False):
num_ensemble = jnp.asarray(ensembled_batch['seq_length'].shape[0])
if not ensemble_representations:
assert ensembled_batch['seq_length'].shape[0] == 1
def slice_batch(i):
b = {k: v[i] for k, v in ensembled_batch.items()}
b.update(non_ensembled_batch)
return b
# Compute representations for each batch element and average.
evoformer_module = EmbeddingsAndEvoformer(
self.config.embeddings_and_evoformer, self.global_config)
batch0 = slice_batch(0)
representations = evoformer_module(batch0, is_training)
# MSA representations are not ensembled so
# we don't pass tensor into the loop.
msa_representation = representations['msa']
del representations['msa']
# Average the representations (except MSA) over the batch dimension.
if ensemble_representations:
def body(x):
"""Add one element to the representations ensemble."""
i, current_representations = x
feats = slice_batch(i)
representations_update = evoformer_module(
feats, is_training)
new_representations = {}
for k in current_representations:
new_representations[k] = (
current_representations[k] + representations_update[k])
return i+1, new_representations
if hk.running_init():
# When initializing the Haiku module, run one iteration of the
# while_loop to initialize the Haiku modules used in `body`.
_, representations = body((1, representations))
else:
_, representations = hk.while_loop(
lambda x: x[0] < num_ensemble,
body,
(1, representations))
for k in representations:
if k != 'msa':
representations[k] /= num_ensemble.astype(representations[k].dtype)
representations['msa'] = msa_representation
batch = batch0 # We are not ensembled from here on.
heads = {}
for head_name, head_config in sorted(self.config.heads.items()):
if not head_config.weight:
continue # Do not instantiate zero-weight heads.
head_factory = {
'masked_msa': MaskedMsaHead,
'distogram': DistogramHead,
'structure_module': functools.partial(
folding.StructureModule, compute_loss=compute_loss),
'predicted_lddt': PredictedLDDTHead,
'predicted_aligned_error': PredictedAlignedErrorHead,
'experimentally_resolved': ExperimentallyResolvedHead,
}[head_name]
heads[head_name] = (head_config,
head_factory(head_config, self.global_config))
total_loss = 0.
ret = {}
ret['representations'] = representations
def loss(module, head_config, ret, name, filter_ret=True):
if filter_ret:
value = ret[name]
else:
value = ret
loss_output = module.loss(value, batch)
ret[name].update(loss_output)
loss = head_config.weight * ret[name]['loss']
return loss
for name, (head_config, module) in heads.items():
# Skip PredictedLDDTHead and PredictedAlignedErrorHead until
# StructureModule is executed.
if name in ('predicted_lddt', 'predicted_aligned_error'):
continue
else:
ret[name] = module(representations, batch, is_training)
if 'representations' in ret[name]:
# Extra representations from the head. Used by the structure module
# to provide activations for the PredictedLDDTHead.
representations.update(ret[name].pop('representations'))
if compute_loss:
total_loss += loss(module, head_config, ret, name)
if self.config.heads.get('predicted_lddt.weight', 0.0):
# Add PredictedLDDTHead after StructureModule executes.
name = 'predicted_lddt'
# Feed all previous results to give access to structure_module result.
head_config, module = heads[name]
ret[name] = module(representations, batch, is_training)
if compute_loss:
total_loss += loss(module, head_config, ret, name, filter_ret=False)
if ('predicted_aligned_error' in self.config.heads
and self.config.heads.get('predicted_aligned_error.weight', 0.0)):
# Add PredictedAlignedErrorHead after StructureModule executes.
name = 'predicted_aligned_error'
# Feed all previous results to give access to structure_module result.
head_config, module = heads[name]
ret[name] = module(representations, batch, is_training)
if compute_loss:
total_loss += loss(module, head_config, ret, name, filter_ret=False)
if compute_loss:
return ret, total_loss
else:
return ret
class AlphaFold(hk.Module):
"""AlphaFold model with recycling.
Jumper et al. (2021) Suppl. Alg. 2 "Inference"
"""
def __init__(self, config, name='alphafold'):
super().__init__(name=name)
self.config = config
self.global_config = config.global_config
def __call__(
self,
batch,
is_training,
compute_loss=False,
ensemble_representations=False,
return_representations=False):
"""Run the AlphaFold model.
Arguments:
batch: Dictionary with inputs to the AlphaFold model.
is_training: Whether the system is in training or inference mode.
compute_loss: Whether to compute losses (requires extra features
to be present in the batch and knowing the true structure).
ensemble_representations: Whether to use ensembling of representations.
return_representations: Whether to also return the intermediate
representations.
Returns:
When compute_loss is True:
a tuple of loss and output of AlphaFoldIteration.
When compute_loss is False:
just output of AlphaFoldIteration.
The output of AlphaFoldIteration is a nested dictionary containing
predictions from the various heads.
"""
impl = AlphaFoldIteration(self.config, self.global_config)
batch_size, num_residues = batch['aatype'].shape
def get_prev(ret):
new_prev = {
'prev_pos':
ret['structure_module']['final_atom_positions'],
'prev_msa_first_row': ret['representations']['msa_first_row'],
'prev_pair': ret['representations']['pair'],
}
return jax.tree_map(jax.lax.stop_gradient, new_prev)
def do_call(prev,
recycle_idx,
compute_loss=compute_loss):
if self.config.resample_msa_in_recycling:
num_ensemble = batch_size // (self.config.num_recycle + 1)
def slice_recycle_idx(x):
start = recycle_idx * num_ensemble
size = num_ensemble
return jax.lax.dynamic_slice_in_dim(x, start, size, axis=0)
ensembled_batch = jax.tree_map(slice_recycle_idx, batch)
else:
num_ensemble = batch_size
ensembled_batch = batch
non_ensembled_batch = jax.tree_map(lambda x: x, prev)
return impl(
ensembled_batch=ensembled_batch,
non_ensembled_batch=non_ensembled_batch,
is_training=is_training,
compute_loss=compute_loss,
ensemble_representations=ensemble_representations)
if self.config.num_recycle:
emb_config = self.config.embeddings_and_evoformer
prev = {
'prev_pos': jnp.zeros(
[num_residues, residue_constants.atom_type_num, 3]),
'prev_msa_first_row': jnp.zeros(
[num_residues, emb_config.msa_channel]),
'prev_pair': jnp.zeros(
[num_residues, num_residues, emb_config.pair_channel]),
}
if 'num_iter_recycling' in batch:
# Training time: num_iter_recycling is in batch.
# The value for each ensemble batch is the same, so arbitrarily taking
# 0-th.
num_iter = batch['num_iter_recycling'][0]
# Add insurance that we will not run more
# recyclings than the model is configured to run.
num_iter = jnp.minimum(num_iter, self.config.num_recycle)
else:
# Eval mode or tests: use the maximum number of iterations.
num_iter = self.config.num_recycle
body = lambda x: (x[0] + 1, # pylint: disable=g-long-lambda
get_prev(do_call(x[1], recycle_idx=x[0],
compute_loss=False)))
if hk.running_init():
# When initializing the Haiku module, run one iteration of the
# while_loop to initialize the Haiku modules used in `body`.
_, prev = body((0, prev))
else:
_, prev = hk.while_loop(
lambda x: x[0] < num_iter,
body,
(0, prev))
else:
prev = {}
num_iter = 0
ret = do_call(prev=prev, recycle_idx=num_iter)
if compute_loss:
ret = ret[0], [ret[1]]
if not return_representations:
del (ret[0] if compute_loss else ret)['representations'] # pytype: disable=unsupported-operands
return ret
class TemplatePairStack(hk.Module):
"""Pair stack for the templates.
Jumper et al. (2021) Suppl. Alg. 16 "TemplatePairStack"
"""
def __init__(self, config, global_config, name='template_pair_stack'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, pair_act, pair_mask, is_training, safe_key=None):
"""Builds TemplatePairStack module.
Arguments:
pair_act: Pair activations for single template, shape [N_res, N_res, c_t].
pair_mask: Pair mask, shape [N_res, N_res].
is_training: Whether the module is in training mode.
safe_key: Safe key object encapsulating the random number generation key.
Returns:
Updated pair_act, shape [N_res, N_res, c_t].
"""
if safe_key is None:
safe_key = prng.SafeKey(hk.next_rng_key())
gc = self.global_config
c = self.config
if not c.num_block:
return pair_act
def block(x):
"""One block of the template pair stack."""
pair_act, safe_key = x
dropout_wrapper_fn = functools.partial(
dropout_wrapper, is_training=is_training, global_config=gc)
safe_key, *sub_keys = safe_key.split(6)
sub_keys = iter(sub_keys)
pair_act = dropout_wrapper_fn(
TriangleAttention(c.triangle_attention_starting_node, gc,
name='triangle_attention_starting_node'),
pair_act,
pair_mask,
next(sub_keys))
pair_act = dropout_wrapper_fn(
TriangleAttention(c.triangle_attention_ending_node, gc,
name='triangle_attention_ending_node'),
pair_act,
pair_mask,
next(sub_keys))
pair_act = dropout_wrapper_fn(
TriangleMultiplication(c.triangle_multiplication_outgoing, gc,
name='triangle_multiplication_outgoing'),
pair_act,
pair_mask,
next(sub_keys))
pair_act = dropout_wrapper_fn(
TriangleMultiplication(c.triangle_multiplication_incoming, gc,
name='triangle_multiplication_incoming'),
pair_act,
pair_mask,
next(sub_keys))
pair_act = dropout_wrapper_fn(
Transition(c.pair_transition, gc, name='pair_transition'),
pair_act,
pair_mask,
next(sub_keys))
return pair_act, safe_key
if gc.use_remat:
block = hk.remat(block)
res_stack = layer_stack.layer_stack(c.num_block)(block)
pair_act, safe_key = res_stack((pair_act, safe_key))
return pair_act
class Transition(hk.Module):
"""Transition layer.
Jumper et al. (2021) Suppl. Alg. 9 "MSATransition"
Jumper et al. (2021) Suppl. Alg. 15 "PairTransition"
"""
def __init__(self, config, global_config, name='transition_block'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, act, mask, is_training=True):
"""Builds Transition module.
Arguments:
act: A tensor of queries of size [batch_size, N_res, N_channel].
mask: A tensor denoting the mask of size [batch_size, N_res].
is_training: Whether the module is in training mode.
Returns:
A float32 tensor of size [batch_size, N_res, N_channel].
"""
_, _, nc = act.shape
num_intermediate = int(nc * self.config.num_intermediate_factor)
mask = jnp.expand_dims(mask, axis=-1)
act = hk.LayerNorm(
axis=[-1],
create_scale=True,
create_offset=True,
name='input_layer_norm')(
act)
transition_module = hk.Sequential([
common_modules.Linear(
num_intermediate,
initializer='relu',
name='transition1'), jax.nn.relu,
common_modules.Linear(
nc,
initializer=utils.final_init(self.global_config),
name='transition2')
])
act = mapping.inference_subbatch(
transition_module,
self.global_config.subbatch_size,
batched_args=[act],
nonbatched_args=[],
low_memory=not is_training)
return act
def glorot_uniform():
return hk.initializers.VarianceScaling(scale=1.0,
mode='fan_avg',
distribution='uniform')
class Attention(hk.Module):
"""Multihead attention."""
def __init__(self, config, global_config, output_dim, name='attention'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
self.output_dim = output_dim
def __call__(self, q_data, m_data, bias, nonbatched_bias=None):
"""Builds Attention module.
Arguments:
q_data: A tensor of queries, shape [batch_size, N_queries, q_channels].
m_data: A tensor of memories from which the keys and values are
projected, shape [batch_size, N_keys, m_channels].
bias: A bias for the attention, shape [batch_size, N_queries, N_keys].
nonbatched_bias: Shared bias, shape [N_queries, N_keys].
Returns:
A float32 tensor of shape [batch_size, N_queries, output_dim].
"""
# Sensible default for when the config keys are missing
key_dim = self.config.get('key_dim', int(q_data.shape[-1]))
value_dim = self.config.get('value_dim', int(m_data.shape[-1]))
num_head = self.config.num_head
assert key_dim % num_head == 0
assert value_dim % num_head == 0
key_dim = key_dim // num_head
value_dim = value_dim // num_head
q_weights = hk.get_parameter(
'query_w', shape=(q_data.shape[-1], num_head, key_dim),
init=glorot_uniform())
k_weights = hk.get_parameter(
'key_w', shape=(m_data.shape[-1], num_head, key_dim),
init=glorot_uniform())
v_weights = hk.get_parameter(
'value_w', shape=(m_data.shape[-1], num_head, value_dim),
init=glorot_uniform())
q = jnp.einsum('bqa,ahc->bqhc', q_data, q_weights) * key_dim**(-0.5)
k = jnp.einsum('bka,ahc->bkhc', m_data, k_weights)
v = jnp.einsum('bka,ahc->bkhc', m_data, v_weights)
logits = jnp.einsum('bqhc,bkhc->bhqk', q, k) + bias
if nonbatched_bias is not None:
logits += jnp.expand_dims(nonbatched_bias, axis=0)
weights = jax.nn.softmax(logits)
weighted_avg = jnp.einsum('bhqk,bkhc->bqhc', weights, v)
if self.global_config.zero_init:
init = hk.initializers.Constant(0.0)
else:
init = glorot_uniform()
if self.config.gating:
gating_weights = hk.get_parameter(
'gating_w',
shape=(q_data.shape[-1], num_head, value_dim),
init=hk.initializers.Constant(0.0))
gating_bias = hk.get_parameter(
'gating_b',
shape=(num_head, value_dim),
init=hk.initializers.Constant(1.0))
gate_values = jnp.einsum('bqc, chv->bqhv', q_data,
gating_weights) + gating_bias
gate_values = jax.nn.sigmoid(gate_values)
weighted_avg *= gate_values
o_weights = hk.get_parameter(
'output_w', shape=(num_head, value_dim, self.output_dim),
init=init)
o_bias = hk.get_parameter('output_b', shape=(self.output_dim,),
init=hk.initializers.Constant(0.0))
output = jnp.einsum('bqhc,hco->bqo', weighted_avg, o_weights) + o_bias
return output
class GlobalAttention(hk.Module):
"""Global attention.
Jumper et al. (2021) Suppl. Alg. 19 "MSAColumnGlobalAttention" lines 2-7
"""
def __init__(self, config, global_config, output_dim, name='attention'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
self.output_dim = output_dim
def __call__(self, q_data, m_data, q_mask, bias):
"""Builds GlobalAttention module.
Arguments:
q_data: A tensor of queries with size [batch_size, N_queries,
q_channels]
m_data: A tensor of memories from which the keys and values
projected. Size [batch_size, N_keys, m_channels]
q_mask: A binary mask for q_data with zeros in the padded sequence
elements and ones otherwise. Size [batch_size, N_queries, q_channels]
(or broadcastable to this shape).
bias: A bias for the attention.
Returns:
A float32 tensor of size [batch_size, N_queries, output_dim].
"""
# Sensible default for when the config keys are missing
key_dim = self.config.get('key_dim', int(q_data.shape[-1]))
value_dim = self.config.get('value_dim', int(m_data.shape[-1]))
num_head = self.config.num_head
assert key_dim % num_head == 0
assert value_dim % num_head == 0
key_dim = key_dim // num_head
value_dim = value_dim // num_head
q_weights = hk.get_parameter(
'query_w', shape=(q_data.shape[-1], num_head, key_dim),
init=glorot_uniform())
k_weights = hk.get_parameter(
'key_w', shape=(m_data.shape[-1], key_dim),
init=glorot_uniform())
v_weights = hk.get_parameter(
'value_w', shape=(m_data.shape[-1], value_dim),
init=glorot_uniform())
v = jnp.einsum('bka,ac->bkc', m_data, v_weights)
q_avg = utils.mask_mean(q_mask, q_data, axis=1)
q = jnp.einsum('ba,ahc->bhc', q_avg, q_weights) * key_dim**(-0.5)
k = jnp.einsum('bka,ac->bkc', m_data, k_weights)
bias = (1e9 * (q_mask[:, None, :, 0] - 1.))
logits = jnp.einsum('bhc,bkc->bhk', q, k) + bias
weights = jax.nn.softmax(logits)
weighted_avg = jnp.einsum('bhk,bkc->bhc', weights, v)
if self.global_config.zero_init:
init = hk.initializers.Constant(0.0)
else:
init = glorot_uniform()
o_weights = hk.get_parameter(
'output_w', shape=(num_head, value_dim, self.output_dim),
init=init)
o_bias = hk.get_parameter('output_b', shape=(self.output_dim,),
init=hk.initializers.Constant(0.0))
if self.config.gating:
gating_weights = hk.get_parameter(
'gating_w',
shape=(q_data.shape[-1], num_head, value_dim),
init=hk.initializers.Constant(0.0))
gating_bias = hk.get_parameter(
'gating_b',
shape=(num_head, value_dim),
init=hk.initializers.Constant(1.0))
gate_values = jnp.einsum('bqc, chv->bqhv', q_data, gating_weights)
gate_values = jax.nn.sigmoid(gate_values + gating_bias)
weighted_avg = weighted_avg[:, None] * gate_values
output = jnp.einsum('bqhc,hco->bqo', weighted_avg, o_weights) + o_bias
else:
output = jnp.einsum('bhc,hco->bo', weighted_avg, o_weights) + o_bias
output = output[:, None]
return output
class MSARowAttentionWithPairBias(hk.Module):
"""MSA per-row attention biased by the pair representation.
Jumper et al. (2021) Suppl. Alg. 7 "MSARowAttentionWithPairBias"
"""
def __init__(self, config, global_config,
name='msa_row_attention_with_pair_bias'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self,
msa_act,
msa_mask,
pair_act,
is_training=False):
"""Builds MSARowAttentionWithPairBias module.
Arguments:
msa_act: [N_seq, N_res, c_m] MSA representation.
msa_mask: [N_seq, N_res] mask of non-padded regions.
pair_act: [N_res, N_res, c_z] pair representation.
is_training: Whether the module is in training mode.
Returns:
Update to msa_act, shape [N_seq, N_res, c_m].
"""
c = self.config
assert len(msa_act.shape) == 3
assert len(msa_mask.shape) == 2
assert c.orientation == 'per_row'
bias = (1e9 * (msa_mask - 1.))[:, None, None, :]
assert len(bias.shape) == 4
msa_act = hk.LayerNorm(
axis=[-1], create_scale=True, create_offset=True, name='query_norm')(
msa_act)
pair_act = hk.LayerNorm(
axis=[-1],
create_scale=True,
create_offset=True,
name='feat_2d_norm')(
pair_act)
init_factor = 1. / jnp.sqrt(int(pair_act.shape[-1]))
weights = hk.get_parameter(
'feat_2d_weights',
shape=(pair_act.shape[-1], c.num_head),
init=hk.initializers.RandomNormal(stddev=init_factor))
nonbatched_bias = jnp.einsum('qkc,ch->hqk', pair_act, weights)
attn_mod = Attention(
c, self.global_config, msa_act.shape[-1])
msa_act = mapping.inference_subbatch(
attn_mod,
self.global_config.subbatch_size,
batched_args=[msa_act, msa_act, bias],
nonbatched_args=[nonbatched_bias],
low_memory=not is_training)
return msa_act
class MSAColumnAttention(hk.Module):
"""MSA per-column attention.
Jumper et al. (2021) Suppl. Alg. 8 "MSAColumnAttention"
"""
def __init__(self, config, global_config, name='msa_column_attention'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self,
msa_act,
msa_mask,
is_training=False):
"""Builds MSAColumnAttention module.
Arguments:
msa_act: [N_seq, N_res, c_m] MSA representation.
msa_mask: [N_seq, N_res] mask of non-padded regions.
is_training: Whether the module is in training mode.
Returns:
Update to msa_act, shape [N_seq, N_res, c_m]
"""
c = self.config
assert len(msa_act.shape) == 3
assert len(msa_mask.shape) == 2
assert c.orientation == 'per_column'
msa_act = jnp.swapaxes(msa_act, -2, -3)
msa_mask = jnp.swapaxes(msa_mask, -1, -2)
bias = (1e9 * (msa_mask - 1.))[:, None, None, :]
assert len(bias.shape) == 4
msa_act = hk.LayerNorm(
axis=[-1], create_scale=True, create_offset=True, name='query_norm')(
msa_act)
attn_mod = Attention(
c, self.global_config, msa_act.shape[-1])
msa_act = mapping.inference_subbatch(
attn_mod,
self.global_config.subbatch_size,
batched_args=[msa_act, msa_act, bias],
nonbatched_args=[],
low_memory=not is_training)
msa_act = jnp.swapaxes(msa_act, -2, -3)
return msa_act
class MSAColumnGlobalAttention(hk.Module):
"""MSA per-column global attention.
Jumper et al. (2021) Suppl. Alg. 19 "MSAColumnGlobalAttention"
"""
def __init__(self, config, global_config, name='msa_column_global_attention'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self,
msa_act,
msa_mask,
is_training=False):
"""Builds MSAColumnGlobalAttention module.
Arguments:
msa_act: [N_seq, N_res, c_m] MSA representation.
msa_mask: [N_seq, N_res] mask of non-padded regions.
is_training: Whether the module is in training mode.
Returns:
Update to msa_act, shape [N_seq, N_res, c_m].
"""
c = self.config
assert len(msa_act.shape) == 3
assert len(msa_mask.shape) == 2
assert c.orientation == 'per_column'
msa_act = jnp.swapaxes(msa_act, -2, -3)
msa_mask = jnp.swapaxes(msa_mask, -1, -2)
bias = (1e9 * (msa_mask - 1.))[:, None, None, :]
assert len(bias.shape) == 4
msa_act = hk.LayerNorm(
axis=[-1], create_scale=True, create_offset=True, name='query_norm')(
msa_act)
attn_mod = GlobalAttention(
c, self.global_config, msa_act.shape[-1],
name='attention')
# [N_seq, N_res, 1]
msa_mask = jnp.expand_dims(msa_mask, axis=-1)
msa_act = mapping.inference_subbatch(
attn_mod,
self.global_config.subbatch_size,
batched_args=[msa_act, msa_act, msa_mask, bias],
nonbatched_args=[],
low_memory=not is_training)
msa_act = jnp.swapaxes(msa_act, -2, -3)
return msa_act
class TriangleAttention(hk.Module):
"""Triangle Attention.
Jumper et al. (2021) Suppl. Alg. 13 "TriangleAttentionStartingNode"
Jumper et al. (2021) Suppl. Alg. 14 "TriangleAttentionEndingNode"
"""
def __init__(self, config, global_config, name='triangle_attention'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, pair_act, pair_mask, is_training=False):
"""Builds TriangleAttention module.
Arguments:
pair_act: [N_res, N_res, c_z] pair activations tensor
pair_mask: [N_res, N_res] mask of non-padded regions in the tensor.
is_training: Whether the module is in training mode.
Returns:
Update to pair_act, shape [N_res, N_res, c_z].
"""
c = self.config
assert len(pair_act.shape) == 3
assert len(pair_mask.shape) == 2
assert c.orientation in ['per_row', 'per_column']
if c.orientation == 'per_column':
pair_act = jnp.swapaxes(pair_act, -2, -3)
pair_mask = jnp.swapaxes(pair_mask, -1, -2)
bias = (1e9 * (pair_mask - 1.))[:, None, None, :]
assert len(bias.shape) == 4
pair_act = hk.LayerNorm(
axis=[-1], create_scale=True, create_offset=True, name='query_norm')(
pair_act)
init_factor = 1. / jnp.sqrt(int(pair_act.shape[-1]))
weights = hk.get_parameter(
'feat_2d_weights',
shape=(pair_act.shape[-1], c.num_head),
init=hk.initializers.RandomNormal(stddev=init_factor))
nonbatched_bias = jnp.einsum('qkc,ch->hqk', pair_act, weights)
attn_mod = Attention(
c, self.global_config, pair_act.shape[-1])
pair_act = mapping.inference_subbatch(
attn_mod,
self.global_config.subbatch_size,
batched_args=[pair_act, pair_act, bias],
nonbatched_args=[nonbatched_bias],
low_memory=not is_training)
if c.orientation == 'per_column':
pair_act = jnp.swapaxes(pair_act, -2, -3)
return pair_act
class MaskedMsaHead(hk.Module):
"""Head to predict MSA at the masked locations.
The MaskedMsaHead employs a BERT-style objective to reconstruct a masked
version of the full MSA, based on a linear projection of
the MSA representation.
Jumper et al. (2021) Suppl. Sec. 1.9.9 "Masked MSA prediction"
"""
def __init__(self, config, global_config, name='masked_msa_head'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
if global_config.multimer_mode:
self.num_output = len(residue_constants.restypes_with_x_and_gap)
else:
self.num_output = config.num_output
def __call__(self, representations, batch, is_training):
"""Builds MaskedMsaHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'msa': MSA representation, shape [N_seq, N_res, c_m].
batch: Batch, unused.
is_training: Whether the module is in training mode.
Returns:
Dictionary containing:
* 'logits': logits of shape [N_seq, N_res, N_aatype] with
(unnormalized) log probabilies of predicted aatype at position.
"""
del batch
logits = common_modules.Linear(
self.num_output,
initializer=utils.final_init(self.global_config),
name='logits')(
representations['msa'])
return dict(logits=logits)
def loss(self, value, batch):
errors = softmax_cross_entropy(
labels=jax.nn.one_hot(batch['true_msa'], num_classes=self.num_output),
logits=value['logits'])
loss = (jnp.sum(errors * batch['bert_mask'], axis=(-2, -1)) /
(1e-8 + jnp.sum(batch['bert_mask'], axis=(-2, -1))))
return {'loss': loss}
class PredictedLDDTHead(hk.Module):
"""Head to predict the per-residue LDDT to be used as a confidence measure.
Jumper et al. (2021) Suppl. Sec. 1.9.6 "Model confidence prediction (pLDDT)"
Jumper et al. (2021) Suppl. Alg. 29 "predictPerResidueLDDT_Ca"
"""
def __init__(self, config, global_config, name='predicted_lddt_head'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, representations, batch, is_training):
"""Builds PredictedLDDTHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'structure_module': Single representation from the structure module,
shape [N_res, c_s].
batch: Batch, unused.
is_training: Whether the module is in training mode.
Returns:
Dictionary containing :
* 'logits': logits of shape [N_res, N_bins] with
(unnormalized) log probabilies of binned predicted lDDT.
"""
act = representations['structure_module']
act = hk.LayerNorm(
axis=[-1],
create_scale=True,
create_offset=True,
name='input_layer_norm')(
act)
act = common_modules.Linear(
self.config.num_channels,
initializer='relu',
name='act_0')(
act)
act = jax.nn.relu(act)
act = common_modules.Linear(
self.config.num_channels,
initializer='relu',
name='act_1')(
act)
act = jax.nn.relu(act)
logits = common_modules.Linear(
self.config.num_bins,
initializer=utils.final_init(self.global_config),
name='logits')(
act)
# Shape (batch_size, num_res, num_bins)
return dict(logits=logits)
def loss(self, value, batch):
# Shape (num_res, 37, 3)
pred_all_atom_pos = value['structure_module']['final_atom_positions']
# Shape (num_res, 37, 3)
true_all_atom_pos = batch['all_atom_positions']
# Shape (num_res, 37)
all_atom_mask = batch['all_atom_mask']
# Shape (num_res,)
lddt_ca = lddt.lddt(
# Shape (batch_size, num_res, 3)
predicted_points=pred_all_atom_pos[None, :, 1, :],
# Shape (batch_size, num_res, 3)
true_points=true_all_atom_pos[None, :, 1, :],
# Shape (batch_size, num_res, 1)
true_points_mask=all_atom_mask[None, :, 1:2].astype(jnp.float32),
cutoff=15.,
per_residue=True)
lddt_ca = jax.lax.stop_gradient(lddt_ca)
num_bins = self.config.num_bins
bin_index = jnp.floor(lddt_ca * num_bins).astype(jnp.int32)
# protect against out of range for lddt_ca == 1
bin_index = jnp.minimum(bin_index, num_bins - 1)
lddt_ca_one_hot = jax.nn.one_hot(bin_index, num_classes=num_bins)
# Shape (num_res, num_channel)
logits = value['predicted_lddt']['logits']
errors = softmax_cross_entropy(labels=lddt_ca_one_hot, logits=logits)
# Shape (num_res,)
mask_ca = all_atom_mask[:, residue_constants.atom_order['CA']]
mask_ca = mask_ca.astype(jnp.float32)
loss = jnp.sum(errors * mask_ca) / (jnp.sum(mask_ca) + 1e-8)
if self.config.filter_by_resolution:
# NMR & distillation have resolution = 0
loss *= ((batch['resolution'] >= self.config.min_resolution)
& (batch['resolution'] <= self.config.max_resolution)).astype(
jnp.float32)
output = {'loss': loss}
return output
class PredictedAlignedErrorHead(hk.Module):
"""Head to predict the distance errors in the backbone alignment frames.
Can be used to compute predicted TM-Score.
Jumper et al. (2021) Suppl. Sec. 1.9.7 "TM-score prediction"
"""
def __init__(self, config, global_config,
name='predicted_aligned_error_head'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, representations, batch, is_training):
"""Builds PredictedAlignedErrorHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'pair': pair representation, shape [N_res, N_res, c_z].
batch: Batch, unused.
is_training: Whether the module is in training mode.
Returns:
Dictionary containing:
* logits: logits for aligned error, shape [N_res, N_res, N_bins].
* bin_breaks: array containing bin breaks, shape [N_bins - 1].
"""
act = representations['pair']
# Shape (num_res, num_res, num_bins)
logits = common_modules.Linear(
self.config.num_bins,
initializer=utils.final_init(self.global_config),
name='logits')(act)
# Shape (num_bins,)
breaks = jnp.linspace(
0., self.config.max_error_bin, self.config.num_bins - 1)
return dict(logits=logits, breaks=breaks)
def loss(self, value, batch):
# Shape (num_res, 7)
predicted_affine = quat_affine.QuatAffine.from_tensor(
value['structure_module']['final_affines'])
# Shape (num_res, 7)
true_affine = quat_affine.QuatAffine.from_tensor(
batch['backbone_affine_tensor'])
# Shape (num_res)
mask = batch['backbone_affine_mask']
# Shape (num_res, num_res)
square_mask = mask[:, None] * mask[None, :]
num_bins = self.config.num_bins
# (1, num_bins - 1)
breaks = value['predicted_aligned_error']['breaks']
# (1, num_bins)
logits = value['predicted_aligned_error']['logits']
# Compute the squared error for each alignment.
def _local_frame_points(affine):
points = [jnp.expand_dims(x, axis=-2) for x in affine.translation]
return affine.invert_point(points, extra_dims=1)
error_dist2_xyz = [
jnp.square(a - b)
for a, b in zip(_local_frame_points(predicted_affine),
_local_frame_points(true_affine))]
error_dist2 = sum(error_dist2_xyz)
# Shape (num_res, num_res)
# First num_res are alignment frames, second num_res are the residues.
error_dist2 = jax.lax.stop_gradient(error_dist2)
sq_breaks = jnp.square(breaks)
true_bins = jnp.sum((
error_dist2[..., None] > sq_breaks).astype(jnp.int32), axis=-1)
errors = softmax_cross_entropy(
labels=jax.nn.one_hot(true_bins, num_bins, axis=-1), logits=logits)
loss = (jnp.sum(errors * square_mask, axis=(-2, -1)) /
(1e-8 + jnp.sum(square_mask, axis=(-2, -1))))
if self.config.filter_by_resolution:
# NMR & distillation have resolution = 0
loss *= ((batch['resolution'] >= self.config.min_resolution)
& (batch['resolution'] <= self.config.max_resolution)).astype(
jnp.float32)
output = {'loss': loss}
return output
class ExperimentallyResolvedHead(hk.Module):
"""Predicts if an atom is experimentally resolved in a high-res structure.
Only trained on high-resolution X-ray crystals & cryo-EM.
Jumper et al. (2021) Suppl. Sec. 1.9.10 '"Experimentally resolved" prediction'
"""
def __init__(self, config, global_config,
name='experimentally_resolved_head'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, representations, batch, is_training):
"""Builds ExperimentallyResolvedHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'single': Single representation, shape [N_res, c_s].
batch: Batch, unused.
is_training: Whether the module is in training mode.
Returns:
Dictionary containing:
* 'logits': logits of shape [N_res, 37],
log probability that an atom is resolved in atom37 representation,
can be converted to probability by applying sigmoid.
"""
logits = common_modules.Linear(
37, # atom_exists.shape[-1]
initializer=utils.final_init(self.global_config),
name='logits')(representations['single'])
return dict(logits=logits)
def loss(self, value, batch):
logits = value['logits']
assert len(logits.shape) == 2
# Does the atom appear in the amino acid?
atom_exists = batch['atom37_atom_exists']
# Is the atom resolved in the experiment? Subset of atom_exists,
# *except for OXT*
all_atom_mask = batch['all_atom_mask'].astype(jnp.float32)
xent = sigmoid_cross_entropy(labels=all_atom_mask, logits=logits)
loss = jnp.sum(xent * atom_exists) / (1e-8 + jnp.sum(atom_exists))
if self.config.filter_by_resolution:
# NMR & distillation examples have resolution = 0.
loss *= ((batch['resolution'] >= self.config.min_resolution)
& (batch['resolution'] <= self.config.max_resolution)).astype(
jnp.float32)
output = {'loss': loss}
return output
class TriangleMultiplication(hk.Module):
"""Triangle multiplication layer ("outgoing" or "incoming").
Jumper et al. (2021) Suppl. Alg. 11 "TriangleMultiplicationOutgoing"
Jumper et al. (2021) Suppl. Alg. 12 "TriangleMultiplicationIncoming"
"""
def __init__(self, config, global_config, name='triangle_multiplication'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, act, mask, is_training=True):
"""Builds TriangleMultiplication module.
Arguments:
act: Pair activations, shape [N_res, N_res, c_z]
mask: Pair mask, shape [N_res, N_res].
is_training: Whether the module is in training mode.
Returns:
Outputs, same shape/type as act.
"""
del is_training
c = self.config
gc = self.global_config
mask = mask[..., None]
act = hk.LayerNorm(axis=[-1], create_scale=True, create_offset=True,
name='layer_norm_input')(act)
input_act = act
left_projection = common_modules.Linear(
c.num_intermediate_channel,
name='left_projection')
left_proj_act = mask * left_projection(act)
right_projection = common_modules.Linear(
c.num_intermediate_channel,
name='right_projection')
right_proj_act = mask * right_projection(act)
left_gate_values = jax.nn.sigmoid(common_modules.Linear(
c.num_intermediate_channel,
bias_init=1.,
initializer=utils.final_init(gc),
name='left_gate')(act))
right_gate_values = jax.nn.sigmoid(common_modules.Linear(
c.num_intermediate_channel,
bias_init=1.,
initializer=utils.final_init(gc),
name='right_gate')(act))
left_proj_act *= left_gate_values
right_proj_act *= right_gate_values
# "Outgoing" edges equation: 'ikc,jkc->ijc'
# "Incoming" edges equation: 'kjc,kic->ijc'
# Note on the Suppl. Alg. 11 & 12 notation:
# For the "outgoing" edges, a = left_proj_act and b = right_proj_act
# For the "incoming" edges, it's swapped:
# b = left_proj_act and a = right_proj_act
act = jnp.einsum(c.equation, left_proj_act, right_proj_act)
act = hk.LayerNorm(
axis=[-1],
create_scale=True,
create_offset=True,
name='center_layer_norm')(
act)
output_channel = int(input_act.shape[-1])
act = common_modules.Linear(
output_channel,
initializer=utils.final_init(gc),
name='output_projection')(act)
gate_values = jax.nn.sigmoid(common_modules.Linear(
output_channel,
bias_init=1.,
initializer=utils.final_init(gc),
name='gating_linear')(input_act))
act *= gate_values
return act
class DistogramHead(hk.Module):
"""Head to predict a distogram.
Jumper et al. (2021) Suppl. Sec. 1.9.8 "Distogram prediction"
"""
def __init__(self, config, global_config, name='distogram_head'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, representations, batch, is_training):
"""Builds DistogramHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'pair': pair representation, shape [N_res, N_res, c_z].
batch: Batch, unused.
is_training: Whether the module is in training mode.
Returns:
Dictionary containing:
* logits: logits for distogram, shape [N_res, N_res, N_bins].
* bin_breaks: array containing bin breaks, shape [N_bins - 1,].
"""
half_logits = common_modules.Linear(
self.config.num_bins,
initializer=utils.final_init(self.global_config),
name='half_logits')(
representations['pair'])
logits = half_logits + jnp.swapaxes(half_logits, -2, -3)
breaks = jnp.linspace(self.config.first_break, self.config.last_break,
self.config.num_bins - 1)
return dict(logits=logits, bin_edges=breaks)
def loss(self, value, batch):
return _distogram_log_loss(value['logits'], value['bin_edges'],
batch, self.config.num_bins)
def _distogram_log_loss(logits, bin_edges, batch, num_bins):
"""Log loss of a distogram."""
assert len(logits.shape) == 3
positions = batch['pseudo_beta']
mask = batch['pseudo_beta_mask']
assert positions.shape[-1] == 3
sq_breaks = jnp.square(bin_edges)
dist2 = jnp.sum(
jnp.square(
jnp.expand_dims(positions, axis=-2) -
jnp.expand_dims(positions, axis=-3)),
axis=-1,
keepdims=True)
true_bins = jnp.sum(dist2 > sq_breaks, axis=-1)
errors = softmax_cross_entropy(
labels=jax.nn.one_hot(true_bins, num_bins), logits=logits)
square_mask = jnp.expand_dims(mask, axis=-2) * jnp.expand_dims(mask, axis=-1)
avg_error = (
jnp.sum(errors * square_mask, axis=(-2, -1)) /
(1e-6 + jnp.sum(square_mask, axis=(-2, -1))))
dist2 = dist2[..., 0]
return dict(loss=avg_error, true_dist=jnp.sqrt(1e-6 + dist2))
class OuterProductMean(hk.Module):
"""Computes mean outer product.
Jumper et al. (2021) Suppl. Alg. 10 "OuterProductMean"
"""
def __init__(self,
config,
global_config,
num_output_channel,
name='outer_product_mean'):
super().__init__(name=name)
self.global_config = global_config
self.config = config
self.num_output_channel = num_output_channel
def __call__(self, act, mask, is_training=True):
"""Builds OuterProductMean module.
Arguments:
act: MSA representation, shape [N_seq, N_res, c_m].
mask: MSA mask, shape [N_seq, N_res].
is_training: Whether the module is in training mode.
Returns:
Update to pair representation, shape [N_res, N_res, c_z].
"""
gc = self.global_config
c = self.config
mask = mask[..., None]
act = hk.LayerNorm([-1], True, True, name='layer_norm_input')(act)
left_act = mask * common_modules.Linear(
c.num_outer_channel,
initializer='linear',
name='left_projection')(
act)
right_act = mask * common_modules.Linear(
c.num_outer_channel,
initializer='linear',
name='right_projection')(
act)
if gc.zero_init:
init_w = hk.initializers.Constant(0.0)
else:
init_w = hk.initializers.VarianceScaling(scale=2., mode='fan_in')
output_w = hk.get_parameter(
'output_w',
shape=(c.num_outer_channel, c.num_outer_channel,
self.num_output_channel),
init=init_w)
output_b = hk.get_parameter(
'output_b', shape=(self.num_output_channel,),
init=hk.initializers.Constant(0.0))
def compute_chunk(left_act):
# This is equivalent to
#
# act = jnp.einsum('abc,ade->dceb', left_act, right_act)
# act = jnp.einsum('dceb,cef->bdf', act, output_w) + output_b
#
# but faster.
left_act = jnp.transpose(left_act, [0, 2, 1])
act = jnp.einsum('acb,ade->dceb', left_act, right_act)
act = jnp.einsum('dceb,cef->dbf', act, output_w) + output_b
return jnp.transpose(act, [1, 0, 2])
act = mapping.inference_subbatch(
compute_chunk,
c.chunk_size,
batched_args=[left_act],
nonbatched_args=[],
low_memory=True,
input_subbatch_dim=1,
output_subbatch_dim=0)
epsilon = 1e-3
norm = jnp.einsum('abc,adc->bdc', mask, mask)
act /= epsilon + norm
return act
def dgram_from_positions(positions, num_bins, min_bin, max_bin):
"""Compute distogram from amino acid positions.
Arguments:
positions: [N_res, 3] Position coordinates.
num_bins: The number of bins in the distogram.
min_bin: The left edge of the first bin.
max_bin: The left edge of the final bin. The final bin catches
everything larger than `max_bin`.
Returns:
Distogram with the specified number of bins.
"""
def squared_difference(x, y):
return jnp.square(x - y)
lower_breaks = jnp.linspace(min_bin, max_bin, num_bins)
lower_breaks = jnp.square(lower_breaks)
upper_breaks = jnp.concatenate([lower_breaks[1:],
jnp.array([1e8], dtype=jnp.float32)], axis=-1)
dist2 = jnp.sum(
squared_difference(
jnp.expand_dims(positions, axis=-2),
jnp.expand_dims(positions, axis=-3)),
axis=-1, keepdims=True)
dgram = ((dist2 > lower_breaks).astype(jnp.float32) *
(dist2 < upper_breaks).astype(jnp.float32))
return dgram
def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks):
"""Create pseudo beta features."""
is_gly = jnp.equal(aatype, residue_constants.restype_order['G'])
ca_idx = residue_constants.atom_order['CA']
cb_idx = residue_constants.atom_order['CB']
pseudo_beta = jnp.where(
jnp.tile(is_gly[..., None], [1] * len(is_gly.shape) + [3]),
all_atom_positions[..., ca_idx, :],
all_atom_positions[..., cb_idx, :])
if all_atom_masks is not None:
pseudo_beta_mask = jnp.where(
is_gly, all_atom_masks[..., ca_idx], all_atom_masks[..., cb_idx])
pseudo_beta_mask = pseudo_beta_mask.astype(jnp.float32)
return pseudo_beta, pseudo_beta_mask
else:
return pseudo_beta
class EvoformerIteration(hk.Module):
"""Single iteration (block) of Evoformer stack.
Jumper et al. (2021) Suppl. Alg. 6 "EvoformerStack" lines 2-10
"""
def __init__(self, config, global_config, is_extra_msa,
name='evoformer_iteration'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
self.is_extra_msa = is_extra_msa
def __call__(self, activations, masks, is_training=True, safe_key=None):
"""Builds EvoformerIteration module.
Arguments:
activations: Dictionary containing activations:
* 'msa': MSA activations, shape [N_seq, N_res, c_m].
* 'pair': pair activations, shape [N_res, N_res, c_z].
masks: Dictionary of masks:
* 'msa': MSA mask, shape [N_seq, N_res].
* 'pair': pair mask, shape [N_res, N_res].
is_training: Whether the module is in training mode.
safe_key: prng.SafeKey encapsulating rng key.
Returns:
Outputs, same shape/type as act.
"""
c = self.config
gc = self.global_config
msa_act, pair_act = activations['msa'], activations['pair']
if safe_key is None:
safe_key = prng.SafeKey(hk.next_rng_key())
msa_mask, pair_mask = masks['msa'], masks['pair']
dropout_wrapper_fn = functools.partial(
dropout_wrapper,
is_training=is_training,
global_config=gc)
safe_key, *sub_keys = safe_key.split(10)
sub_keys = iter(sub_keys)
outer_module = OuterProductMean(
config=c.outer_product_mean,
global_config=self.global_config,
num_output_channel=int(pair_act.shape[-1]),
name='outer_product_mean')
if c.outer_product_mean.first:
pair_act = dropout_wrapper_fn(
outer_module,
msa_act,
msa_mask,
safe_key=next(sub_keys),
output_act=pair_act)
msa_act = dropout_wrapper_fn(
MSARowAttentionWithPairBias(
c.msa_row_attention_with_pair_bias, gc,
name='msa_row_attention_with_pair_bias'),
msa_act,
msa_mask,
safe_key=next(sub_keys),
pair_act=pair_act)
if not self.is_extra_msa:
attn_mod = MSAColumnAttention(
c.msa_column_attention, gc, name='msa_column_attention')
else:
attn_mod = MSAColumnGlobalAttention(
c.msa_column_attention, gc, name='msa_column_global_attention')
msa_act = dropout_wrapper_fn(
attn_mod,
msa_act,
msa_mask,
safe_key=next(sub_keys))
msa_act = dropout_wrapper_fn(
Transition(c.msa_transition, gc, name='msa_transition'),
msa_act,
msa_mask,
safe_key=next(sub_keys))
if not c.outer_product_mean.first:
pair_act = dropout_wrapper_fn(
outer_module,
msa_act,
msa_mask,
safe_key=next(sub_keys),
output_act=pair_act)
pair_act = dropout_wrapper_fn(
TriangleMultiplication(c.triangle_multiplication_outgoing, gc,
name='triangle_multiplication_outgoing'),
pair_act,
pair_mask,
safe_key=next(sub_keys))
pair_act = dropout_wrapper_fn(
TriangleMultiplication(c.triangle_multiplication_incoming, gc,
name='triangle_multiplication_incoming'),
pair_act,
pair_mask,
safe_key=next(sub_keys))
pair_act = dropout_wrapper_fn(
TriangleAttention(c.triangle_attention_starting_node, gc,
name='triangle_attention_starting_node'),
pair_act,
pair_mask,
safe_key=next(sub_keys))
pair_act = dropout_wrapper_fn(
TriangleAttention(c.triangle_attention_ending_node, gc,
name='triangle_attention_ending_node'),
pair_act,
pair_mask,
safe_key=next(sub_keys))
pair_act = dropout_wrapper_fn(
Transition(c.pair_transition, gc, name='pair_transition'),
pair_act,
pair_mask,
safe_key=next(sub_keys))
return {'msa': msa_act, 'pair': pair_act}
class EmbeddingsAndEvoformer(hk.Module):
"""Embeds the input data and runs Evoformer.
Produces the MSA, single and pair representations.
Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 5-18
"""
def __init__(self, config, global_config, name='evoformer'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, batch, is_training, safe_key=None):
c = self.config
gc = self.global_config
if safe_key is None:
safe_key = prng.SafeKey(hk.next_rng_key())
# Embed clustered MSA.
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 5
# Jumper et al. (2021) Suppl. Alg. 3 "InputEmbedder"
preprocess_1d = common_modules.Linear(
c.msa_channel, name='preprocess_1d')(
batch['target_feat'])
preprocess_msa = common_modules.Linear(
c.msa_channel, name='preprocess_msa')(
batch['msa_feat'])
msa_activations = jnp.expand_dims(preprocess_1d, axis=0) + preprocess_msa
left_single = common_modules.Linear(
c.pair_channel, name='left_single')(
batch['target_feat'])
right_single = common_modules.Linear(
c.pair_channel, name='right_single')(
batch['target_feat'])
pair_activations = left_single[:, None] + right_single[None]
mask_2d = batch['seq_mask'][:, None] * batch['seq_mask'][None, :]
# Inject previous outputs for recycling.
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 6
# Jumper et al. (2021) Suppl. Alg. 32 "RecyclingEmbedder"
if c.recycle_pos and 'prev_pos' in batch:
prev_pseudo_beta = pseudo_beta_fn(
batch['aatype'], batch['prev_pos'], None)
dgram = dgram_from_positions(prev_pseudo_beta, **self.config.prev_pos)
pair_activations += common_modules.Linear(
c.pair_channel, name='prev_pos_linear')(
dgram)
if c.recycle_features:
if 'prev_msa_first_row' in batch:
prev_msa_first_row = hk.LayerNorm([-1],
True,
True,
name='prev_msa_first_row_norm')(
batch['prev_msa_first_row'])
msa_activations = msa_activations.at[0].add(prev_msa_first_row)
if 'prev_pair' in batch:
pair_activations += hk.LayerNorm([-1],
True,
True,
name='prev_pair_norm')(
batch['prev_pair'])
# Relative position encoding.
# Jumper et al. (2021) Suppl. Alg. 4 "relpos"
# Jumper et al. (2021) Suppl. Alg. 5 "one_hot"
if c.max_relative_feature:
# Add one-hot-encoded clipped residue distances to the pair activations.
pos = batch['residue_index']
offset = pos[:, None] - pos[None, :]
rel_pos = jax.nn.one_hot(
jnp.clip(
offset + c.max_relative_feature,
a_min=0,
a_max=2 * c.max_relative_feature),
2 * c.max_relative_feature + 1)
pair_activations += common_modules.Linear(
c.pair_channel, name='pair_activiations')(
rel_pos)
# Embed templates into the pair activations.
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9-13
if c.template.enabled:
template_batch = {k: batch[k] for k in batch if k.startswith('template_')}
template_pair_representation = TemplateEmbedding(c.template, gc)(
pair_activations,
template_batch,
mask_2d,
is_training=is_training)
pair_activations += template_pair_representation
# Embed extra MSA features.
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 14-16
extra_msa_feat = create_extra_msa_feature(batch)
extra_msa_activations = common_modules.Linear(
c.extra_msa_channel,
name='extra_msa_activations')(
extra_msa_feat)
# Extra MSA Stack.
# Jumper et al. (2021) Suppl. Alg. 18 "ExtraMsaStack"
extra_msa_stack_input = {
'msa': extra_msa_activations,
'pair': pair_activations,
}
extra_msa_stack_iteration = EvoformerIteration(
c.evoformer, gc, is_extra_msa=True, name='extra_msa_stack')
def extra_msa_stack_fn(x):
act, safe_key = x
safe_key, safe_subkey = safe_key.split()
extra_evoformer_output = extra_msa_stack_iteration(
activations=act,
masks={
'msa': batch['extra_msa_mask'],
'pair': mask_2d
},
is_training=is_training,
safe_key=safe_subkey)
return (extra_evoformer_output, safe_key)
if gc.use_remat:
extra_msa_stack_fn = hk.remat(extra_msa_stack_fn)
extra_msa_stack = layer_stack.layer_stack(
c.extra_msa_stack_num_block)(
extra_msa_stack_fn)
extra_msa_output, safe_key = extra_msa_stack(
(extra_msa_stack_input, safe_key))
pair_activations = extra_msa_output['pair']
evoformer_input = {
'msa': msa_activations,
'pair': pair_activations,
}
evoformer_masks = {'msa': batch['msa_mask'], 'pair': mask_2d}
# Append num_templ rows to msa_activations with template embeddings.
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 7-8
if c.template.enabled and c.template.embed_torsion_angles:
num_templ, num_res = batch['template_aatype'].shape
# Embed the templates aatypes.
aatype_one_hot = jax.nn.one_hot(batch['template_aatype'], 22, axis=-1)
# Embed the templates aatype, torsion angles and masks.
# Shape (templates, residues, msa_channels)
ret = all_atom.atom37_to_torsion_angles(
aatype=batch['template_aatype'],
all_atom_pos=batch['template_all_atom_positions'],
all_atom_mask=batch['template_all_atom_masks'],
# Ensure consistent behaviour during testing:
placeholder_for_undefined=not gc.zero_init)
template_features = jnp.concatenate([
aatype_one_hot,
jnp.reshape(
ret['torsion_angles_sin_cos'], [num_templ, num_res, 14]),
jnp.reshape(
ret['alt_torsion_angles_sin_cos'], [num_templ, num_res, 14]),
ret['torsion_angles_mask']], axis=-1)
template_activations = common_modules.Linear(
c.msa_channel,
initializer='relu',
name='template_single_embedding')(
template_features)
template_activations = jax.nn.relu(template_activations)
template_activations = common_modules.Linear(
c.msa_channel,
initializer='relu',
name='template_projection')(
template_activations)
# Concatenate the templates to the msa.
evoformer_input['msa'] = jnp.concatenate(
[evoformer_input['msa'], template_activations], axis=0)
# Concatenate templates masks to the msa masks.
# Use mask from the psi angle, as it only depends on the backbone atoms
# from a single residue.
torsion_angle_mask = ret['torsion_angles_mask'][:, :, 2]
torsion_angle_mask = torsion_angle_mask.astype(
evoformer_masks['msa'].dtype)
evoformer_masks['msa'] = jnp.concatenate(
[evoformer_masks['msa'], torsion_angle_mask], axis=0)
# Main trunk of the network
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 17-18
evoformer_iteration = EvoformerIteration(
c.evoformer, gc, is_extra_msa=False, name='evoformer_iteration')
def evoformer_fn(x):
act, safe_key = x
safe_key, safe_subkey = safe_key.split()
evoformer_output = evoformer_iteration(
activations=act,
masks=evoformer_masks,
is_training=is_training,
safe_key=safe_subkey)
return (evoformer_output, safe_key)
if gc.use_remat:
evoformer_fn = hk.remat(evoformer_fn)
evoformer_stack = layer_stack.layer_stack(c.evoformer_num_block)(
evoformer_fn)
evoformer_output, safe_key = evoformer_stack(
(evoformer_input, safe_key))
msa_activations = evoformer_output['msa']
pair_activations = evoformer_output['pair']
single_activations = common_modules.Linear(
c.seq_channel, name='single_activations')(
msa_activations[0])
num_sequences = batch['msa_feat'].shape[0]
output = {
'single': single_activations,
'pair': pair_activations,
# Crop away template rows such that they are not used in MaskedMsaHead.
'msa': msa_activations[:num_sequences, :, :],
'msa_first_row': msa_activations[0],
}
return output
class SingleTemplateEmbedding(hk.Module):
"""Embeds a single template.
Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9+11
"""
def __init__(self, config, global_config, name='single_template_embedding'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, query_embedding, batch, mask_2d, is_training):
"""Build the single template embedding.
Arguments:
query_embedding: Query pair representation, shape [N_res, N_res, c_z].
batch: A batch of template features (note the template dimension has been
stripped out as this module only runs over a single template).
mask_2d: Padding mask (Note: this doesn't care if a template exists,
unlike the template_pseudo_beta_mask).
is_training: Whether the module is in training mode.
Returns:
A template embedding [N_res, N_res, c_z].
"""
assert mask_2d.dtype == query_embedding.dtype
dtype = query_embedding.dtype
num_res = batch['template_aatype'].shape[0]
num_channels = (self.config.template_pair_stack
.triangle_attention_ending_node.value_dim)
template_mask = batch['template_pseudo_beta_mask']
template_mask_2d = template_mask[:, None] * template_mask[None, :]
template_mask_2d = template_mask_2d.astype(dtype)
template_dgram = dgram_from_positions(batch['template_pseudo_beta'],
**self.config.dgram_features)
template_dgram = template_dgram.astype(dtype)
to_concat = [template_dgram, template_mask_2d[:, :, None]]
aatype = jax.nn.one_hot(batch['template_aatype'], 22, axis=-1, dtype=dtype)
to_concat.append(jnp.tile(aatype[None, :, :], [num_res, 1, 1]))
to_concat.append(jnp.tile(aatype[:, None, :], [1, num_res, 1]))
n, ca, c = [residue_constants.atom_order[a] for a in ('N', 'CA', 'C')]
rot, trans = quat_affine.make_transform_from_reference(
n_xyz=batch['template_all_atom_positions'][:, n],
ca_xyz=batch['template_all_atom_positions'][:, ca],
c_xyz=batch['template_all_atom_positions'][:, c])
affines = quat_affine.QuatAffine(
quaternion=quat_affine.rot_to_quat(rot, unstack_inputs=True),
translation=trans,
rotation=rot,
unstack_inputs=True)
points = [jnp.expand_dims(x, axis=-2) for x in affines.translation]
affine_vec = affines.invert_point(points, extra_dims=1)
inv_distance_scalar = jax.lax.rsqrt(
1e-6 + sum([jnp.square(x) for x in affine_vec]))
# Backbone affine mask: whether the residue has C, CA, N
# (the template mask defined above only considers pseudo CB).
template_mask = (
batch['template_all_atom_masks'][..., n] *
batch['template_all_atom_masks'][..., ca] *
batch['template_all_atom_masks'][..., c])
template_mask_2d = template_mask[:, None] * template_mask[None, :]
inv_distance_scalar *= template_mask_2d.astype(inv_distance_scalar.dtype)
unit_vector = [(x * inv_distance_scalar)[..., None] for x in affine_vec]
unit_vector = [x.astype(dtype) for x in unit_vector]
template_mask_2d = template_mask_2d.astype(dtype)
if not self.config.use_template_unit_vector:
unit_vector = [jnp.zeros_like(x) for x in unit_vector]
to_concat.extend(unit_vector)
to_concat.append(template_mask_2d[..., None])
act = jnp.concatenate(to_concat, axis=-1)
# Mask out non-template regions so we don't get arbitrary values in the
# distogram for these regions.
act *= template_mask_2d[..., None]
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 9
act = common_modules.Linear(
num_channels,
initializer='relu',
name='embedding2d')(
act)
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 11
act = TemplatePairStack(
self.config.template_pair_stack, self.global_config)(
act, mask_2d, is_training)
act = hk.LayerNorm([-1], True, True, name='output_layer_norm')(act)
return act
class TemplateEmbedding(hk.Module):
"""Embeds a set of templates.
Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9-12
Jumper et al. (2021) Suppl. Alg. 17 "TemplatePointwiseAttention"
"""
def __init__(self, config, global_config, name='template_embedding'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, query_embedding, template_batch, mask_2d, is_training):
"""Build TemplateEmbedding module.
Arguments:
query_embedding: Query pair representation, shape [N_res, N_res, c_z].
template_batch: A batch of template features.
mask_2d: Padding mask (Note: this doesn't care if a template exists,
unlike the template_pseudo_beta_mask).
is_training: Whether the module is in training mode.
Returns:
A template embedding [N_res, N_res, c_z].
"""
num_templates = template_batch['template_mask'].shape[0]
num_channels = (self.config.template_pair_stack
.triangle_attention_ending_node.value_dim)
num_res = query_embedding.shape[0]
dtype = query_embedding.dtype
template_mask = template_batch['template_mask']
template_mask = template_mask.astype(dtype)
query_num_channels = query_embedding.shape[-1]
# Make sure the weights are shared across templates by constructing the
# embedder here.
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9-12
template_embedder = SingleTemplateEmbedding(self.config, self.global_config)
def map_fn(batch):
return template_embedder(query_embedding, batch, mask_2d, is_training)
template_pair_representation = mapping.sharded_map(map_fn, in_axes=0)(
template_batch)
# Cross attend from the query to the templates along the residue
# dimension by flattening everything else into the batch dimension.
# Jumper et al. (2021) Suppl. Alg. 17 "TemplatePointwiseAttention"
flat_query = jnp.reshape(query_embedding,
[num_res * num_res, 1, query_num_channels])
flat_templates = jnp.reshape(
jnp.transpose(template_pair_representation, [1, 2, 0, 3]),
[num_res * num_res, num_templates, num_channels])
bias = (1e9 * (template_mask[None, None, None, :] - 1.))
template_pointwise_attention_module = Attention(
self.config.attention, self.global_config, query_num_channels)
nonbatched_args = [bias]
batched_args = [flat_query, flat_templates]
embedding = mapping.inference_subbatch(
template_pointwise_attention_module,
self.config.subbatch_size,
batched_args=batched_args,
nonbatched_args=nonbatched_args,
low_memory=not is_training)
embedding = jnp.reshape(embedding,
[num_res, num_res, query_num_channels])
# No gradients if no templates.
embedding *= (jnp.sum(template_mask) > 0.).astype(embedding.dtype)
return embedding
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.