hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b4e7b46ffcd23585b3bbbc18f55ec72d8a164224 | 7,050 | py | Python | tests/python/kaolin/io/test_materials.py | mlej8/kaolin | 19fd610fff68c4d9ad9035386b76e6fd51b0b67c | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-08-28T23:26:19.000Z | 2021-08-28T23:26:19.000Z | tests/python/kaolin/io/test_materials.py | mlej8/kaolin | 19fd610fff68c4d9ad9035386b76e6fd51b0b67c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/python/kaolin/io/test_materials.py | mlej8/kaolin | 19fd610fff68c4d9ad9035386b76e6fd51b0b67c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import torch
import pytest
from kaolin.io import materials, usd, obj
# Seed for texture sampling
torch.random.manual_seed(0)
@pytest.fixture(scope='class')
def out_dir():
# Create temporary output directory
out_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '_out')
os.makedirs(out_dir, exist_ok=True)
yield out_dir
shutil.rmtree(out_dir)
@pytest.fixture(scope='module')
def material_values():
params = {
'diffuse_color': (0., 1., 0.),
'roughness_value': 0.1,
'metallic_value': 1.,
'specular_color': (1., 0., 0.),
'is_specular_workflow': True,
}
material = materials.PBRMaterial(**params)
yield material
@pytest.fixture(scope='module')
def material_textures():
params = {
'diffuse_texture': torch.rand((3, 256, 256)),
'roughness_texture': torch.rand((1, 256, 256)),
'metallic_texture': torch.rand((1, 256, 256)),
'normals_texture': torch.rand((1, 256, 256)),
'specular_texture': torch.rand((3, 256, 256)),
'is_specular_workflow': True,
}
material = materials.PBRMaterial(**params)
yield material
@pytest.fixture(scope='module')
def mesh():
cur_dir = os.path.dirname(os.path.realpath(__file__))
obj_mesh = obj.import_mesh(os.path.join(cur_dir, os.pardir, os.pardir,
os.pardir, 'samples/rocket.obj'), with_normals=True,
with_materials=True, error_handler=obj.skip_error_handler)
return obj_mesh
class TestPBRMaterial:
def test_cycle_values(self, out_dir, material_values):
file_path = os.path.join(out_dir, 'pbr_test.usda')
material_values.write_to_usd(file_path, '/World/Looks/pbr')
material_in = materials.PBRMaterial().read_from_usd(file_path, '/World/Looks/pbr')
assert material_values.diffuse_color == pytest.approx(material_in.diffuse_color, 0.1)
assert material_values.roughness_value == pytest.approx(material_in.roughness_value, 0.1)
assert material_values.metallic_value == pytest.approx(material_in.metallic_value, 0.1)
assert material_values.specular_color == pytest.approx(material_in.specular_color, 0.1)
assert material_values.is_specular_workflow == material_in.is_specular_workflow
def test_cycle_textures(self, out_dir, material_textures):
"""Cycle test for textures. This conversion is lossy!"""
file_path = os.path.join(out_dir, 'pbr_tex_test.usda')
material_textures.write_to_usd(file_path, '/World/Looks/pbr')
material_in = materials.PBRMaterial().read_from_usd(file_path, '/World/Looks/pbr')
assert torch.allclose(material_textures.diffuse_texture, material_in.diffuse_texture, atol=1e-2)
assert torch.allclose(material_textures.roughness_texture, material_in.roughness_texture, atol=1e-2)
assert torch.allclose(material_textures.metallic_texture, material_in.metallic_texture, atol=1e-2)
assert torch.allclose(material_textures.normals_texture, material_in.normals_texture, atol=1e-2)
assert torch.allclose(material_textures.specular_texture, material_in.specular_texture, atol=1e-2)
assert material_textures.is_specular_workflow == material_in.is_specular_workflow
def test_material_values(self, out_dir):
out_path = os.path.join(out_dir, 'pbr_material_values.usda')
stage = usd.create_stage(out_path)
tests = {
'Default': {},
'Diffuse': {'diffuse_color': (0., 1., 0.)},
'Roughness': {'roughness_value': 0.1},
'Metallic': {'metallic_value': 1.},
'Specular': {'specular_color': (1., 0., 0.), 'is_specular_workflow': True},
}
for test_name, params in tests.items():
prim = stage.DefinePrim(f'/World/{test_name}', 'Sphere')
mat = materials.PBRMaterial(**params)
mat.write_to_usd(out_path, f'/World/Looks/{test_name}', bound_prims=[prim], time=0)
stage.Save()
# Confirm exported USD matches golden file
# TODO(jlafleche) Render the two mesh for visual comparison
golden = os.path.join(out_dir, os.pardir, os.pardir, os.pardir,
os.pardir, 'samples/golden/pbr_material_values.usda')
assert open(golden).read() == open(out_path).read()
def test_material_textures(self, out_dir, mesh, material_textures):
def _create_checkerboard(val1, val2):
channels = len(val1)
checkerboard = torch.ones((channels, 2, 2)) * torch.tensor(val1)[:, None, None]
checkerboard[:, 0, 0] = torch.tensor(val2)
checkerboard[:, 1, 1] = torch.tensor(val2)
checkerboard = torch.nn.functional.interpolate(checkerboard[None, ...], scale_factor=128)[0]
return checkerboard
out_path = os.path.join(out_dir, 'pbr_material_textures.usda')
stage = usd.create_stage(out_path)
tests = {
'Default': {},
'Diffuse': {'diffuse_texture': _create_checkerboard((0., 1., 0.), (0., 0., 1.))},
'Roughness': {'roughness_texture': _create_checkerboard((0.1,), (0.9,))},
'Metallic': {'metallic_texture': _create_checkerboard((0.1,), (0.9,))},
'Normal': {'normals_texture': _create_checkerboard((0., 0., 1.,), (0., 0.5, 0.5))},
'Specular': {'specular_texture': _create_checkerboard((1., 0., 0.), (0., 0., 1.)),
'is_specular_workflow': True},
}
for test_name, params in tests.items():
material_textures = materials.PBRMaterial(**params)
prim = usd.add_mesh(stage, f'/World/{test_name}', mesh.vertices, mesh.faces,
uvs=mesh.uvs,
face_uvs_idx=mesh.face_uvs_idx,
face_normals=mesh.vertex_normals[mesh.face_normals].view(-1, 3))
material_textures.write_to_usd(out_path, f'/World/Looks/{test_name}', bound_prims=[prim], time=0)
stage.Save()
# Confirm exported USD matches golden file
# TODO(jlafleche) Render the two mesh for visual comparison
golden = os.path.join(out_dir, os.pardir, os.pardir, os.pardir,
os.pardir, 'samples/golden/pbr_material_textures.usda')
assert open(golden).read() == open(out_path).read()
| 44.620253 | 109 | 0.653901 |
c9fc627d31be3ef3ae78534099b183a9aab0452d | 3,021 | py | Python | plugins/python/container/fn/execute/main.py | proglang/servercodetest | f0acd5940a22be9a72a95494afb308bb3017ea64 | [
"MIT"
] | null | null | null | plugins/python/container/fn/execute/main.py | proglang/servercodetest | f0acd5940a22be9a72a95494afb308bb3017ea64 | [
"MIT"
] | 2 | 2020-06-05T16:24:37.000Z | 2020-11-15T09:02:48.000Z | plugins/python/container/fn/execute/main.py | proglang/servercodetest | f0acd5940a22be9a72a95494afb308bb3017ea64 | [
"MIT"
] | 1 | 2020-10-22T05:46:52.000Z | 2020-10-22T05:46:52.000Z | import util
import util.random
import os
import json
from .struct import Output, Text, Mark
import logging
class Main:
def __init__(
self, _dir: str, globalSettings: "GlobalSettings",
):
from plugin import GlobalSettings
self.dir = _dir
self.delimiter = "DATA_" + util.random.string(20)
self._global: GlobalSettings = globalSettings
def _get_env(self):
env = {
"PYTHONHASHSEED": "1",
"PYTHONDONTWRITEBYTECODE": "1",
"DELIMITER": self.delimiter,
}
# TODO: remove non needed env vars
for k, v in os.environ.items():
env[k] = v
return env
def _parse(self, data:str) -> dict:
try:
start_tag1 = data.rfind(f"<{self.delimiter}>")
end_tag1 = start_tag1 + len(self.delimiter) + 2
start_tag2 = data.find(f"</{self.delimiter}>", end_tag1)
return (data[:start_tag1], json.loads(data[end_tag1:start_tag2]))
except Exception as e:
logging.error("Couldn't parse result: %s -> data: %s", str(e), str(data))
return (data, {})
def _popen(self, *args):
return util.Popen(*args, env=self._get_env(), cwd=self.dir)
def run_code(self):
try:
logging.debug("run_code->start")
popen = self._popen("python3", "./sct_user.py")
return Text(popen.error, popen.data)
except Exception as e:
logging.error("Exception in run_code: %s", str(e))
raise #forward excpetion
finally:
logging.debug("run_code->finish")
def run_pytest(self):
try:
logging.debug("run_pytest->start")
popen = self._popen("pytest", "./sct_user.py", "--color=yes")
return Text(popen.error, popen.data)
except Exception as e:
logging.error("Exception in run_pytest: %s", str(e))
raise #forward excpetion
finally:
logging.debug("run_pytest->finish")
def run_mark(self):
try:
logging.debug("run_mark->start")
popen = self._popen("python3", "./sct_exec_mark.py")
(text, data) = self._parse(popen.data)
ret = Mark(popen.error, text)
for (_, value) in data.items():
for entry in value["reg"]:
ret.add(*entry, False)
for entry in value["suc"]:
ret.add(*entry, True)
except Exception as e:
logging.error("Exception in run_mark: %s", str(e))
raise #forward excpetion
finally:
logging.debug("run_mark->finish")
return ret
def run(self):
output = Output()
if self._global.exec.run:
output.run = self.run_code()
if self._global.exec.pytest:
output.pytest = self.run_pytest()
if self._global.exec.mark:
output.mark = self.run_mark()
return output.serialize()
| 33.566667 | 85 | 0.553128 |
a6ae555dd7aef7d67207803294aec6f50f031d62 | 63 | py | Python | quickfix_doc/datadictionary/__init__.py | connamara/QuickFIX-doc | fa75e27dfada2da12148e9ea67d0ceb6a31f1d46 | [
"DOC"
] | 3 | 2018-12-25T19:49:56.000Z | 2021-07-17T01:41:08.000Z | quickfix_doc/datadictionary/__init__.py | connamara/QuickFIX-doc | fa75e27dfada2da12148e9ea67d0ceb6a31f1d46 | [
"DOC"
] | 1 | 2018-12-07T20:53:31.000Z | 2018-12-07T20:53:31.000Z | quickfix_doc/datadictionary/__init__.py | connamara/QuickFIX-doc | fa75e27dfada2da12148e9ea67d0ceb6a31f1d46 | [
"DOC"
] | 3 | 2020-05-21T03:07:19.000Z | 2021-07-18T03:07:06.000Z | from . import util
from . import fields
from . import messages
| 15.75 | 22 | 0.761905 |
cc6df68f970a3d2bf908e82bda259f22db16094d | 1,315 | py | Python | setup.py | jgstew/file_meta_data | 184a3785560c4c18b8c04094481d456466eeba51 | [
"Apache-2.0"
] | null | null | null | setup.py | jgstew/file_meta_data | 184a3785560c4c18b8c04094481d456466eeba51 | [
"Apache-2.0"
] | null | null | null | setup.py | jgstew/file_meta_data | 184a3785560c4c18b8c04094481d456466eeba51 | [
"Apache-2.0"
] | null | null | null | """
py2app/py2exe build script for MyApplication.
Will automatically ensure that all build prerequisites are available
via ez_setup
Usage (Mac OS X):
python setup.py py2app
Usage (Windows):
python setup.py py2exe
"""
# https://pythonhosted.org/py2app/examples.html#cross-platform
import os
import urllib
# http://stackoverflow.com/questions/22676/how-do-i-download-a-file-over-http-using-python
if os.path.isfile("ez_setup.py"):
urllib.urlretrieve ("http://peak.telecommunity.com/dist/ez_setup.py", "ez_setup.py")
import ez_setup
ez_setup.use_setuptools()
import sys
from setuptools import setup
mainscript = 'file_meta_data.py'
if sys.platform == 'darwin':
extra_options = dict(
setup_requires=['py2app'],
app=[mainscript],
# Cross-platform applications generally expect sys.argv to
# be used for opening files.
options=dict(py2app=dict(argv_emulation=True)),
)
elif sys.platform == 'win32':
extra_options = dict(
setup_requires=['py2exe'],
app=[mainscript],
)
else:
extra_options = dict(
# Normally unix-like platforms will use "setup.py install"
# and install the main script as such
scripts=[mainscript],
)
setup(
name="file_meta_data",
**extra_options
) | 25.288462 | 90 | 0.681369 |
2d9d9c421365c1c9bbaa499e2cdd4cfbe9f1b3fc | 342 | py | Python | trakt/core/decorators.py | jmolinski/traktpy | e6ff22acaf273b7b45070a4f8938c210fe4d63d7 | [
"MIT"
] | null | null | null | trakt/core/decorators.py | jmolinski/traktpy | e6ff22acaf273b7b45070a4f8938c210fe4d63d7 | [
"MIT"
] | 1 | 2019-04-13T10:15:48.000Z | 2019-04-13T10:15:48.000Z | trakt/core/decorators.py | jmolinski/traktpy | e6ff22acaf273b7b45070a4f8938c210fe4d63d7 | [
"MIT"
] | null | null | null | from typing import Any, Callable
from trakt.core.exceptions import NotAuthenticated
def auth_required(f: Callable[..., Any]) -> Callable[..., Any]:
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
if not self.client.user:
raise NotAuthenticated
return f(self, *args, **kwargs)
return wrapper
| 24.428571 | 63 | 0.646199 |
0f553fbf39b7fe7f088e0ea6cbd4f21167ba9da8 | 14,112 | py | Python | supervisor/supervisord.py | sfriesel/supervisor | aa7d40e2949043ece15f972a23b28055c687f6fa | [
"ZPL-2.1"
] | 2 | 2017-09-17T21:24:44.000Z | 2019-08-26T03:02:43.000Z | supervisor/supervisord.py | sfriesel/supervisor | aa7d40e2949043ece15f972a23b28055c687f6fa | [
"ZPL-2.1"
] | null | null | null | supervisor/supervisord.py | sfriesel/supervisor | aa7d40e2949043ece15f972a23b28055c687f6fa | [
"ZPL-2.1"
] | 3 | 2021-02-23T08:36:03.000Z | 2021-02-23T08:36:54.000Z | #!/usr/bin/env python
"""supervisord -- run a set of applications as daemons.
Usage: %s [options]
Options:
-c/--configuration FILENAME -- configuration file
-n/--nodaemon -- run in the foreground (same as 'nodaemon true' in config file)
-h/--help -- print this usage message and exit
-v/--version -- print supervisord version number and exit
-u/--user USER -- run supervisord as this user (or numeric uid)
-m/--umask UMASK -- use this umask for daemon subprocess (default is 022)
-d/--directory DIRECTORY -- directory to chdir to when daemonized
-l/--logfile FILENAME -- use FILENAME as logfile path
-y/--logfile_maxbytes BYTES -- use BYTES to limit the max size of logfile
-z/--logfile_backups NUM -- number of backups to keep when max bytes reached
-e/--loglevel LEVEL -- use LEVEL as log level (debug,info,warn,error,critical)
-j/--pidfile FILENAME -- write a pid file for the daemon process to FILENAME
-i/--identifier STR -- identifier used for this instance of supervisord
-q/--childlogdir DIRECTORY -- the log directory for child process logs
-k/--nocleanup -- prevent the process from performing cleanup (removal of
old automatic child log files) at startup.
-a/--minfds NUM -- the minimum number of file descriptors for start success
-t/--strip_ansi -- strip ansi escape codes from process output
--minprocs NUM -- the minimum number of processes available for start success
--profile_options OPTIONS -- run supervisord under profiler and output
results based on OPTIONS, which is a comma-sep'd
list of 'cumulative', 'calls', and/or 'callers',
e.g. 'cumulative,callers')
"""
import os
import time
import errno
import select
import signal
from supervisor.medusa import asyncore_25 as asyncore
from supervisor.options import ServerOptions
from supervisor.options import signame
from supervisor import events
from supervisor.states import SupervisorStates
from supervisor.states import getProcessStateDescription
class Supervisor:
stopping = False # set after we detect that we are handling a stop request
lastshutdownreport = 0 # throttle for delayed process error reports at stop
process_groups = None # map of process group name to process group object
stop_groups = None # list used for priority ordered shutdown
def __init__(self, options):
self.options = options
self.process_groups = {}
self.ticks = {}
def main(self):
if not self.options.first:
# prevent crash on libdispatch-based systems, at least for the
# first request
self.options.cleanup_fds()
info_messages = []
critical_messages = []
warn_messages = []
setuid_msg = self.options.set_uid()
if setuid_msg:
critical_messages.append(setuid_msg)
if self.options.first:
rlimit_messages = self.options.set_rlimits()
info_messages.extend(rlimit_messages)
warn_messages.extend(self.options.parse_warnings)
# this sets the options.logger object
# delay logger instantiation until after setuid
self.options.make_logger(critical_messages, warn_messages,
info_messages)
if not self.options.nocleanup:
# clean up old automatic logs
self.options.clear_autochildlogdir()
self.run()
def run(self):
self.process_groups = {} # clear
self.stop_groups = None # clear
events.clear()
try:
for config in self.options.process_group_configs:
self.add_process_group(config)
self.options.process_environment()
self.options.openhttpservers(self)
self.options.setsignals()
if (not self.options.nodaemon) and self.options.first:
self.options.daemonize()
# writing pid file needs to come *after* daemonizing or pid
# will be wrong
self.options.write_pidfile()
self.runforever()
finally:
self.options.cleanup()
def diff_to_active(self, new=None):
if not new:
new = self.options.process_group_configs
cur = [group.config for group in self.process_groups.values()]
curdict = dict(zip([cfg.name for cfg in cur], cur))
newdict = dict(zip([cfg.name for cfg in new], new))
added = [cand for cand in new if cand.name not in curdict]
removed = [cand for cand in cur if cand.name not in newdict]
changed = [cand for cand in new
if cand != curdict.get(cand.name, cand)]
return added, changed, removed
def add_process_group(self, config):
name = config.name
if name not in self.process_groups:
config.after_setuid()
self.process_groups[name] = config.make_group()
return True
return False
def remove_process_group(self, name):
if self.process_groups[name].get_unstopped_processes():
return False
del self.process_groups[name]
return True
def get_process_map(self):
process_map = {}
pgroups = self.process_groups.values()
for group in pgroups:
process_map.update(group.get_dispatchers())
return process_map
def shutdown_report(self):
unstopped = []
pgroups = self.process_groups.values()
for group in pgroups:
unstopped.extend(group.get_unstopped_processes())
if unstopped:
# throttle 'waiting for x to die' reports
now = time.time()
if now > (self.lastshutdownreport + 3): # every 3 secs
names = [ p.config.name for p in unstopped ]
namestr = ', '.join(names)
self.options.logger.info('waiting for %s to die' % namestr)
self.lastshutdownreport = now
for proc in unstopped:
state = getProcessStateDescription(proc.get_state())
self.options.logger.blather(
'%s state: %s' % (proc.config.name, state))
return unstopped
def ordered_stop_groups_phase_1(self):
if self.stop_groups:
# stop the last group (the one with the "highest" priority)
self.stop_groups[-1].stop_all()
def ordered_stop_groups_phase_2(self):
# after phase 1 we've transitioned and reaped, let's see if we
# can remove the group we stopped from the stop_groups queue.
if self.stop_groups:
# pop the last group (the one with the "highest" priority)
group = self.stop_groups.pop()
if group.get_unstopped_processes():
# if any processes in the group aren't yet in a
# stopped state, we're not yet done shutting this
# group down, so push it back on to the end of the
# stop group queue
self.stop_groups.append(group)
def runforever(self):
events.notify(events.SupervisorRunningEvent())
timeout = 1 # this cannot be fewer than the smallest TickEvent (5)
socket_map = self.options.get_socket_map()
while 1:
combined_map = {}
combined_map.update(socket_map)
combined_map.update(self.get_process_map())
pgroups = self.process_groups.values()
pgroups.sort()
if self.options.mood < SupervisorStates.RUNNING:
if not self.stopping:
# first time, set the stopping flag, do a
# notification and set stop_groups
self.stopping = True
self.stop_groups = pgroups[:]
events.notify(events.SupervisorStoppingEvent())
self.ordered_stop_groups_phase_1()
if not self.shutdown_report():
# if there are no unstopped processes (we're done
# killing everything), it's OK to swtop or reload
raise asyncore.ExitNow
r, w, x = [], [], []
for fd, dispatcher in combined_map.items():
if dispatcher.readable():
r.append(fd)
if dispatcher.writable():
w.append(fd)
try:
r, w, x = self.options.select(r, w, x, timeout)
except select.error, err:
r = w = x = []
if err.args[0] == errno.EINTR:
self.options.logger.blather('EINTR encountered in select')
else:
raise
for fd in r:
if combined_map.has_key(fd):
try:
dispatcher = combined_map[fd]
self.options.logger.blather(
'read event caused by %(dispatcher)s',
dispatcher=dispatcher)
dispatcher.handle_read_event()
except asyncore.ExitNow:
raise
except:
combined_map[fd].handle_error()
for fd in w:
if combined_map.has_key(fd):
try:
dispatcher = combined_map[fd]
self.options.logger.blather(
'write event caused by %(dispatcher)s',
dispatcher=dispatcher)
dispatcher.handle_write_event()
except asyncore.ExitNow:
raise
except:
combined_map[fd].handle_error()
[ group.transition() for group in pgroups ]
self.reap()
self.handle_signal()
self.tick()
if self.options.mood < SupervisorStates.RUNNING:
self.ordered_stop_groups_phase_2()
if self.options.test:
break
def tick(self, now=None):
""" Send one or more 'tick' events when the timeslice related to
the period for the event type rolls over """
if now is None:
# now won't be None in unit tests
now = time.time()
for event in events.TICK_EVENTS:
period = event.period
last_tick = self.ticks.get(period)
if last_tick is None:
# we just started up
last_tick = self.ticks[period] = timeslice(period, now)
this_tick = timeslice(period, now)
if this_tick != last_tick:
self.ticks[period] = this_tick
events.notify(event(this_tick, self))
def reap(self, once=False):
pid, sts = self.options.waitpid()
if pid:
process = self.options.pidhistory.get(pid, None)
if process is None:
self.options.logger.critical('reaped unknown pid %s)' % pid)
else:
process.finish(pid, sts)
del self.options.pidhistory[pid]
if not once:
self.reap() # keep reaping until no more kids to reap
def handle_signal(self):
sig = self.options.get_signal()
if sig:
if sig in (signal.SIGTERM, signal.SIGINT, signal.SIGQUIT):
self.options.logger.warn(
'received %s indicating exit request' % signame(sig))
self.options.mood = SupervisorStates.SHUTDOWN
elif sig == signal.SIGHUP:
self.options.logger.warn(
'received %s indicating restart request' % signame(sig))
self.options.mood = SupervisorStates.RESTARTING
elif sig == signal.SIGCHLD:
self.options.logger.debug(
'received %s indicating a child quit' % signame(sig))
elif sig == signal.SIGUSR2:
self.options.logger.info(
'received %s indicating log reopen request' % signame(sig))
self.options.reopenlogs()
for group in self.process_groups.values():
group.reopenlogs()
else:
self.options.logger.blather(
'received %s indicating nothing' % signame(sig))
def get_state(self):
return self.options.mood
def timeslice(period, when):
return int(when - (when % period))
# profile entry point
def profile(cmd, globals, locals, sort_order, callers):
try:
import cProfile as profile
except ImportError:
import profile # python < 2.5
import pstats
import tempfile
fd, fn = tempfile.mkstemp()
try:
profile.runctx(cmd, globals, locals, fn)
stats = pstats.Stats(fn)
stats.strip_dirs()
# calls,time,cumulative and cumulative,calls,time are useful
stats.sort_stats(*sort_order or ('cumulative', 'calls', 'time'))
if callers:
stats.print_callers(.3)
else:
stats.print_stats(.3)
finally:
os.remove(fn)
# Main program
def main(args=None, test=False):
assert os.name == "posix", "This code makes Unix-specific assumptions"
# if we hup, restart by making a new Supervisor()
first = True
while 1:
options = ServerOptions()
options.realize(args, doc=__doc__)
options.first = first
options.test = test
if options.profile_options:
sort_order, callers = options.profile_options
profile('go(options)', globals(), locals(), sort_order, callers)
else:
go(options)
if test or (options.mood < SupervisorStates.RESTARTING):
break
options.close_httpservers()
options.close_logger()
first = False
def go(options):
d = Supervisor(options)
try:
d.main()
except asyncore.ExitNow:
pass
if __name__ == "__main__":
main()
| 37.531915 | 79 | 0.581633 |
29fb03591fc5cf4903893860eb21b0af3b10b561 | 363 | py | Python | test/acceptance/squad_test/squad_test.py | guillaume-chevalier/ReuBERT | 86f115f651e0613047a7e319fdb0a5d9ec6f9292 | [
"MIT"
] | 48 | 2019-04-27T01:26:18.000Z | 2022-03-21T08:01:06.000Z | test/acceptance/squad_test/squad_test.py | h2k/ReuBERT | 86f115f651e0613047a7e319fdb0a5d9ec6f9292 | [
"MIT"
] | 4 | 2019-04-27T01:05:31.000Z | 2020-03-06T00:25:37.000Z | test/acceptance/squad_test/squad_test.py | h2k/ReuBERT | 86f115f651e0613047a7e319fdb0a5d9ec6f9292 | [
"MIT"
] | 12 | 2019-04-21T04:53:49.000Z | 2020-08-13T08:02:16.000Z | import json
import os
def load_json_file_test(json_name):
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), json_name), encoding="utf8") as json_data:
return json.load(json_data)
SQUAD_DATA_SET = load_json_file_test('squad_questions_beautified.json')
class TestSquad():
@classmethod
def setup_class(cls):
pass
| 20.166667 | 114 | 0.732782 |
df9fb98db1faef9ced0dd7ef2d1900e6fb4f23fc | 641 | py | Python | tests/Exscript/servers/TelnetdTest.py | saveshodhan/exscript | 72718eee3e87b345d5a5255be9824e867e42927b | [
"MIT"
] | 226 | 2015-01-20T19:59:06.000Z | 2022-01-02T11:13:01.000Z | tests/Exscript/servers/TelnetdTest.py | saveshodhan/exscript | 72718eee3e87b345d5a5255be9824e867e42927b | [
"MIT"
] | 155 | 2015-01-02T07:56:27.000Z | 2022-01-09T20:56:19.000Z | tests/Exscript/servers/TelnetdTest.py | saveshodhan/exscript | 72718eee3e87b345d5a5255be9824e867e42927b | [
"MIT"
] | 114 | 2015-01-03T11:48:17.000Z | 2022-01-26T02:50:43.000Z | from __future__ import absolute_import
import sys
import unittest
import re
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
from .ServerTest import ServerTest
from Exscript.servers import Telnetd
from Exscript.protocols import Telnet
class TelnetdTest(ServerTest):
CORRELATE = Telnetd
def _create_daemon(self):
self.daemon = Telnetd(self.host, self.port, self.device)
def _create_client(self):
return Telnet()
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TelnetdTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 23.740741 | 71 | 0.74103 |
9dc319f96ecbdc44d9fa5f4ecce47d159904af36 | 1,497 | py | Python | tests/test_summary_via_file.py | svven/summary | 1cd7cb9ab89782c729bfd768df0c6d1483131dc3 | [
"MIT"
] | 12 | 2015-10-15T13:27:27.000Z | 2020-02-22T05:38:33.000Z | tests/test_summary_via_file.py | svven/summary | 1cd7cb9ab89782c729bfd768df0c6d1483131dc3 | [
"MIT"
] | 10 | 2015-01-09T13:37:06.000Z | 2021-12-13T19:45:35.000Z | tests/test_summary_via_file.py | svven/summary | 1cd7cb9ab89782c729bfd768df0c6d1483131dc3 | [
"MIT"
] | 3 | 2015-12-19T07:42:21.000Z | 2020-04-03T14:44:38.000Z | import ssl
from summary import Summary
import os
def test_summary():
testdata = get_test_data()
passed = 0
failed = 0
errors = ""
print ssl.OPENSSL_VERSION
for line in testdata:
if line.startswith('url') or line.strip().startswith('#') or not line.strip():
continue
url, img, title, desc = (line.strip().split('|') + [None]*99)[:4]
print "Testing %(url)s %(img)s %(title)s %(desc)s" % locals()
summ = Summary(url)
summ.extract()
testpassed = True
if img:
if not summ.image or summ.image.url != unicode(img):
testpassed = False
errors += "%s bad image %s\n" % (url, summ.image and summ.image.url)
if title:
if summ.title != unicode(title, 'utf-8'):
testpassed = False
errors += "%s bad title %s\n" % (url, summ.title)
if desc:
if summ.description != unicode(desc, 'utf-8'):
testpassed = False
errors += "%s bad desc %s\n" % (url, summ.description)
if testpassed:
passed += 1
else:
failed +=1
print "Passed %(passed)s, Failed %(failed)s" % locals()
if errors: print errors
assert failed == 0
def get_test_data():
testfile = './data.txt'
testpath = os.path.join(os.path.dirname(__file__), testfile)
with open (testpath, 'r') as myfile:
data = myfile.readlines()
return data | 28.245283 | 86 | 0.540414 |
a4d233e12d4f006ce1fb63091523ee887f14970b | 527 | py | Python | dynet_modules/call_mlp.py | saikrishnarallabandi/clustergen_steroids | b05adb2a5570d5372c6197e045ba3d0bce1a094f | [
"Apache-2.0"
] | null | null | null | dynet_modules/call_mlp.py | saikrishnarallabandi/clustergen_steroids | b05adb2a5570d5372c6197e045ba3d0bce1a094f | [
"Apache-2.0"
] | null | null | null | dynet_modules/call_mlp.py | saikrishnarallabandi/clustergen_steroids | b05adb2a5570d5372c6197e045ba3d0bce1a094f | [
"Apache-2.0"
] | null | null | null | from MLP import OneLayerMLP
import dynet as dy
m = dy.ParameterCollection()
# create an embedding table.
E = m.add_lookup_parameters((1000,10))
# create an MLP from 10 to 4 with a hidden layer of 20.
mlp = OneLayerMLP(m, 10, 20, 4, dy.rectify)
# use them together.
output = mlp(E[3])
# now save the model:
dy.save("basename",[mlp, E])
# now load:
m2 = dy.ParameterCollection()
mlp2, E2 = dy.load("basename", m2)
output2 = mlp2(E2[3])
import numpy
assert(numpy.array_equal(output2.npvalue(), output.npvalue()))
print "Done"
| 22.913043 | 62 | 0.711575 |
1026589e0b0904883b1244fea3be1b932f6c4886 | 40,984 | py | Python | srdense/proj_utils/local_utils.py | ypxie/SuperRes | 1dded37fc24d99ca32cef88e8ccc3f2f0a3738c1 | [
"MIT"
] | 5 | 2019-01-04T06:50:18.000Z | 2019-02-13T12:02:24.000Z | srdense/proj_utils/local_utils.py | ypxie/SuperRes | 1dded37fc24d99ca32cef88e8ccc3f2f0a3738c1 | [
"MIT"
] | null | null | null | srdense/proj_utils/local_utils.py | ypxie/SuperRes | 1dded37fc24d99ca32cef88e8ccc3f2f0a3738c1 | [
"MIT"
] | 1 | 2019-08-24T20:56:53.000Z | 2019-08-24T20:56:53.000Z | # -*- coding: utf-8 -*-
import numpy as np
import os, math
from PIL import Image
from sklearn.neighbors import NearestNeighbors
import scipy
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
import skimage, skimage.morphology
from PIL import Image, ImageDraw
from scipy.ndimage.interpolation import rotate
from skimage import color, measure
import re
import scipy.ndimage
from numba import jit, autojit
import random, shutil
import scipy.misc as misc
def get(identifier):
return get_from_module(identifier, globals(), 'local_utils')
def mkdirs(folders, erase=False):
if type(folders) is not list:
folders = [folders]
for fold in folders:
if not os.path.exists(fold):
os.makedirs(fold)
else:
if erase:
shutil.rmtree(fold)
os.makedirs(fold)
class myobj(object):
pass
def process_sent(this_sent):
this_sent = ' <start> ' + this_sent + ' <eos> '
return this_sent
def split_words(words):
words = words.replace('_', ' ')
return re.findall(r'\w+|\S+', words)
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
#check_consistent_length(x, y)
#x = column_or_1d(x)
#y = column_or_1d(y)
x, y = np.asarray(x), np.asarray(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
if isinstance(area, np.memmap):
# Reductions such as .sum used internally in np.trapz do not return a
# scalar by default for numpy.memmap instances contrary to
# regular numpy.ndarray instances.
area = area.dtype.type(area)
return area
def normalize_img(X):
min_, max_ = np.min(X), np.max(X)
X = (X - min_)/ (max_ - min_ + 1e-9)
X = X*255
return X.astype(np.uint8)
def imread(imgfile):
assert os.path.exists(imgfile), '{} does not exist!'.format(imgfile)
rmgimg = scipy.misc.imread(imgfile)
return rmgimg
def writeImg(array, savepath):
scipy.misc.imsave(savepath, array)
#cv2.imwrite(savepath, array)
def imresize(img, resizeratio=1):
'''Take care of cv2 reshape squeeze behevaior'''
if resizeratio == 1:
return img
#outshape = ( int(img.shape[1] * resizeratio) , int(img.shape[0] * resizeratio))
#temp = cv2.resize(img, outshape).astype(float)
outshape = ( int(img.shape[0] * resizeratio) , int(img.shape[1] * resizeratio))
temp = misc.imresize(img, size=outshape).astype(float)
if len(img.shape) == 3 and img.shape[2] == 1:
temp = np.reshape(temp, temp.shape + (1,))
return temp
def imresize_shape(img, outshape):
if len(img.shape) == 4:
img_out = []
for this_img in img:
img_out.append( imresize_shape(this_img, outshape) )
return np.stack(img_out, axis=0)
img = img.astype(np.float32)
outshape = ( int(outshape[0]) , int(outshape[1]) )
if (img.shape[0], img.shape[1]) == outshape:
return img
#temp = cv2.resize(img, (outshape[1], outshape[0]))
temp = misc.imresize(img, size=outshape, interp='bilinear').astype(float)
return temp
def pre_process_img(img, yuv = False, mode = 'vanilla', norm = True):
if yuv :
img = RGB2YUV(img)
img = img.astype('float32')
if len(img.shape) == 2:
img = np.expand_dims(img,axis = -1)
# first of all normalize the img
if norm:
if mode == 'channel':
for ch_id in range(img.shape[2]):
this_ch = img[:,:, ch_id]
this_mean = np.mean(this_ch)
this_std = np.std(this_ch)
img[:,:, ch_id] = (this_ch - this_mean) / (this_std + 1e-8)
elif mode == 'whole':
this_mean, this_std = np.mean(img[:]), np.std(img[:])
img = (img - this_mean)/this_std
elif mode == 'rescale':
this_min, this_max, this_mean = np.min(img[:]), np.max(img[:]),np.mean(img[:])
img = (img - this_min) / ( this_max- this_min + 1e-8)
img = img - np.mean(img)
elif mode == 'vanilla':
img = img.astype(np.float32)/255
img = (img - 0.5)/0.5
else:
raise Exception('Unknown mode for pre_processing')
return img
def mysqueeze(a, axis = None):
if axis == None:
return np.squeeze(a)
if a.shape[axis] != 1:
return a
else:
return np.squeeze(a, axis = axis)
def getImg_from_Grid(grid_vec, patchsize):
patchRow, patchCol = patchsize
indx = -1
imgchannel = int(grid_vec.shape[-1]//(patchRow*patchCol))
numberofImg = grid_vec.shape[0]
gridshape = (grid_vec[0,:,:,:].shape[0],grid_vec[0,:,:,:].shape[1])
imgs = np.zeros((grid_vec.shape[0], gridshape[0]*patchRow, gridshape[1]*patchCol, imgchannel ))
imgs = mysqueeze(imgs, axis = -1)
for imgidx in range(numberofImg):
for colid in range(gridshape[1]):
for rowid in range(gridshape[0]):
indx = indx + 1
this_vec = grid_vec[imgidx,rowid,colid,:]
this_patch = np.reshape(this_vec, (patchRow,patchCol,imgchannel ))
this_patch = mysqueeze(this_patch,axis = -1)
startRow, endRow = rowid *patchRow, (rowid+1)*patchRow
startCol, endCol = colid *patchCol, (colid+1)*patchCol
#print this_patch.shape
imgs[imgidx,startRow:endRow,startCol: endCol] = this_patch
#imshow(img)
return imgs
def getmesh_zigzag(RowPts,ColPts):
"""RowPts means the index of row coordinates,
ColPts means the index of col coordinates
"""
#img = np.zeros((max(RowPts), max(ColPts)))
rr,cc = [], []
for colidx in ColPts:
if np.mod(colidx, 2) == 0:
rr.extend(RowPts)
else:
rr.extend(RowPts[::-1])
cc.extend([colidx]*len(RowPts))
return np.asarray(rr), np.asarray(cc)
def getmesh(RowPts,ColPts):
"""RowPts means the index of row coordinates,
ColPts means the index of col coordinates
"""
rr,cc = [], []
for colidx in ColPts:
rr.extend(RowPts)
cc.extend([colidx]*len(RowPts))
return np.asarray(rr), np.asarray(cc)
def getfileinfo(imgdir, contourextList, ImgExtList, LabelExt, test_mode = False):
'''return a list of dictionary {'thisfile':os.path.join(imgdir,f), 'thismatfile':thismatfile}
'''
alllist = [f for f in os.listdir(imgdir)]
alllist = sorted(alllist)
returnList = []
for f in alllist:
if os.path.isfile(os.path.join(imgdir,f)) and \
os.path.splitext(f)[1] in ImgExtList:
if test_mode is False:
flag = 0
for contourext in contourextList:
thismatfile = os.path.join(imgdir,os.path.splitext(f)[0] + contourext + LabelExt)
if os.path.isfile(thismatfile):
this_dict = {'thisfile':os.path.join(imgdir,f), 'thismatfile':thismatfile}
this_dict['file_name'] = f
this_dict['mat_name'] = os.path.splitext(f)[0] + contourext + LabelExt
returnList.append(this_dict)
flag = 1
break
if flag == 0:
print(("Image: {s} does not have matfile".format(s = os.path.splitext(f)[0] )))
else:
this_dict = {'thisfile':os.path.join(imgdir,f), 'thismatfile':None}
this_dict['file_name'] = f
this_dict['mat_name'] = None
returnList.append(this_dict)
return returnList
def yieldfileinfo(imgdir, contourextList,ImgExtList,LabelExt):
alllist = [f for f in os.listdir(imgdir)]
alllist = sorted(alllist)
#absfilelist = [];
#absmatfilelist = [];
dict_lsit = []
returnList = []
for f in alllist:
if os.path.isfile(os.path.join(imgdir,f)) and \
os.path.splitext(f)[1] in ImgExtList:
flag = 0
returnDict = {}
for contourext in contourextList:
thismatfile = os.path.join(imgdir,os.path.splitext(f)[0] + contourext + LabelExt)
if os.path.isfile(thismatfile):
#absmatfilelist.append(thismatfile)
#absfilelist.append(os.path.join(imgdir,f))
returnDict['thisfile'] = os.path.join(imgdir,f)
returnDict['thismatfile'] = thismatfile
#returnList.append({'thisfile':os.path.join(imgdir,f), 'thismatfile':thismatfile})
dict_lsit.append(returnDict)
flag = 1
break
if flag == 0:
print(("Image: {s} does not have matfile".format(s = os.path.splitext(f)[0] )))
return dict_lsit
def getFromFolderList(subfolder_list, number_list = -1, contourextList = '',
ImgExtList = '.png',LabelExt = '.mat'):
'''
subfolder_list: the folder that contain the images, it is a list of folders.
number_list: the number of images you wanna take
'''
random.seed(4)
if type(subfolder_list) != list:
subfolder_list = [subfolder_list]
if type(number_list) != list:
number_list = [number_list]
if len(number_list) == 1:
number_list = number_list * len(subfolder_list)
returnDict_list = []
for imgdir, num_img in zip(subfolder_list, number_list):
alllist = [f for f in os.listdir(imgdir)]
if len(subfolder_list) == 1 and len(alllist) < num_img:
return None
# because we want to terminate when the number of image is larger than this.
total_num = len(alllist)
list_of_file = list(range(total_num))
random.shuffle(list_of_file)
img_count = 0
for file_ind in list_of_file:
returnDict = {}
f = alllist[file_ind]
if os.path.isfile(os.path.join(imgdir,f)) and \
os.path.splitext(f)[1] in ImgExtList:
flag = 0
for contourext in contourextList:
thismatfile = os.path.join(imgdir,os.path.splitext(f)[0] + \
contourext + LabelExt)
if os.path.isfile(thismatfile):
returnDict['thisfile'] = os.path.join(imgdir,f)
returnDict['thismatfile'] = thismatfile
returnDict_list.append(returnDict)
flag = 1
img_count += 1
break
if flag == 0:
print(("Image: {s} does not have matfile".format(s = os.path.splitext(f)[0] )))
if num_img > 0 and img_count == num_img:
break
return returnDict_list
def getfilelist(Imagefolder, inputext, with_ext=False):
'''inputext: ['.json'] '''
if type(inputext) is not list:
inputext = [inputext]
filelist = []
filenames = []
allfiles = sorted(os.listdir(Imagefolder))
for f in allfiles:
if os.path.splitext(f)[1] in inputext and os.path.isfile(os.path.join(Imagefolder,f)):
filelist.append(os.path.join(Imagefolder,f))
if with_ext is True:
filenames.append( os.path.basename(f) )
else:
filenames.append( os.path.splitext(os.path.basename(f))[0] )
return filelist, filenames
def getfolderlist(Imagefolder):
'''inputext: ['.json'] '''
folder_list = []
folder_names = []
allfiles = sorted(os.listdir(Imagefolder))
for f in allfiles:
this_path = os.path.join(Imagefolder, f)
if os.path.isdir(this_path):
folder_list.append(this_path)
folder_names.append(f)
return folder_list, folder_names
def find(logicalMatrix):
totalInd = np.arange(0, len(logicalMatrix.flat))
return totalInd[logicalMatrix.flatten()]
def imshow(img, size=None):
if size is not None:
plt.figure(figsize = size)
else:
plt.figure()
plt.imshow(img)
plt.show()
def fast_Points2Patches(Patches,centerIndx, img, patchsize):
totalsub = np.unravel_index(centerIndx, [img.shape[0],img.shape[1]])
numberofInd = len(centerIndx)
#Patches = np.zeros(numberofInd, np.prod(patchsize)*img.shape[2])
if len(img.shape) == 2:
img = img[:,:,None]
npad3 = ((patchsize[0],patchsize[0]),(patchsize[1],patchsize[1]),(0,0))
img = np.pad(img,npad3, 'symmetric')
centralRow = totalsub[0][:] + patchsize[0]
centralCol = totalsub[1][:] + patchsize[1]
se = CentralToOrigin(centralRow, centralCol,patchsize[0],patchsize[1])
for i in range(numberofInd):
Patches[i,:] = img[se['RS'][i] : se['RE'][i], se['CS'][i]:se['CE'][i],:].copy().flatten()
def knnsearch(seeds, pints,K):
"""return the indexes and distance of k neareast points for every pts in points from seeds\
seeds: N*dim, points: N*dim
seeds, and points should be of N*dim format"""
knn = NearestNeighbors(n_neighbors=K)
knn.fit(seeds)
distance, index = knn.kneighbors(pints, return_distance=True)
return index,distance
def Points2Patches(centerIndx, img, patchsize):
totalsub = np.unravel_index(centerIndx, [img.shape[0],img.shape[1]])
numberofInd = len(centerIndx)
if len(img.shape) == 2:
img = img[:,:,None]
Patches = np.zeros((numberofInd, np.prod(patchsize)*img.shape[2]))
npad3 = ((patchsize[0],patchsize[0]),(patchsize[1],patchsize[1]),(0,0))
img = np.pad(img,npad3, 'symmetric')
centralRow = totalsub[0][:] + patchsize[0]
centralCol = totalsub[1][:] + patchsize[1]
se = CentralToOrigin(centralRow, centralCol,patchsize[0],patchsize[1])
for i in range(numberofInd):
Patches[i,:] = img[se['RS'][i] : se['RE'][i], se['CS'][i]:se['CE'][i],:].copy().flatten()
#imshow(img[se['RS'][i] : se['RE'][i], se['CS'][i]:se['CE'][i],:][...,0])
# tmp = img[:,:,0].copy() #np.zeros((img.shape[0], img.shape[1]))
# tmp[se['RS'][i] : se['RE'][i], se['CS'][i]:se['CE'][i]] = 255
# #tmp = scipy.ndimage.morphology.grey_dilation(tmp,(3,3) )
# imshow(tmp)
return Patches
def CentralToOrigin(centralRow, centralCol,Rowsize,Colsize):
RowUp = int(Rowsize/2)
RowDown = Rowsize - RowUp - 1
ColLeft = int(Colsize/2)
ColRight = Colsize - ColLeft - 1
se = {}
se['RS'] = int(centralRow - RowUp)
se['RE'] = int(centralRow + RowDown + 1) #because python does not take the last value
se['CS'] = int(centralCol - ColLeft)
se['CE'] = int(centralCol + ColRight + 1)
return se
def OriginToCentral(OrigRow, OrigCol,Rowsize,Colsize):
RowUp = int(Rowsize/2)
ColLeft = int(Colsize/2)
center = {}
center['RC'] = int(OrigRow + RowUp)
center['CC'] = int(OrigCol + ColLeft)
return center
def patchflow(Img,chunknum,row,col,channel,**kwargs):
pixelind = find(np.ones(Img.shape[0], Img.shape[1]) == 1)
Totalnum = len(pixelind)
numberofchunk = np.floor((Totalnum + chunknum - 1)// chunknum) # the floor
Chunkfile = np.zeros((chunknum, row*col*channel))
chunkstart = 0
for chunkidx in range(numberofchunk):
thisnum = min(chunknum, Totalnum - chunkidx*chunknum)
thisInd = pixelind[chunkstart: chunkstart + thisnum]
fast_Points2Patches(Chunkfile[0:thisnum,:],thisInd, Img, (row,col))
chunkstart += thisnum
yield Chunkfile[0:thisnum,:]
def Indexflow(Totalnum, batch_size, random=True):
numberofchunk = int(Totalnum + batch_size - 1)// int(batch_size) # the floor
#Chunkfile = np.zeros((batch_size, row*col*channel))
totalIndx = np.arange(Totalnum).astype(np.int)
if random is True:
totalIndx = np.random.permutation(totalIndx)
chunkstart = 0
for chunkidx in range(int(numberofchunk)):
thisnum = min(batch_size, Totalnum - chunkidx*batch_size)
thisInd = totalIndx[chunkstart: chunkstart + thisnum]
chunkstart += thisnum
yield thisInd
def batchflow(batch_size, *Data):
# we dont check Data, they should all have equal first dimension
Totalnum = Data[0].shape[0]
for thisInd in Indexflow(Totalnum, batch_size):
if len(Data) == 1:
yield Data[0][thisInd, ...]
else:
batch_tuple = [s[thisInd,...] for s in Data]
yield tuple(batch_tuple)
@autojit
def overlayImg(img, mask,print_color =[5,119,72],linewidth= 1, alpha = 0.618,savepath = None):
#img = img_as_float(data.camera())
rows, cols = img.shape[0:2]
# Construct a colour image to superimpose
color_mask = np.zeros((rows, cols, 3))
assert len(mask.shape) == 2,'mask should be of dimension 2'
color_mask[mask == 1] = print_color
color_mask[mask == 0] = img[mask == 0]
#imshow(color_mask)
if len(img.shape) == 2:
img_color = np.dstack((img, img, img))
else:
img_color = img
img_hsv = color.rgb2hsv(img_color)
color_mask_hsv = color.rgb2hsv(color_mask)
img_hsv[..., 0] = color_mask_hsv[..., 0]
img_hsv[..., 1] = color_mask_hsv[..., 1] * alpha
img_masked = color.hsv2rgb(img_hsv)
# Display the output
#f, (ax0, ax1, ax2) = plt.subplots(1, 3,
# subplot_kw={'xticks': [], 'yticks': []})
#ax0.imshow(img, cmap=plt.cm.gray)
#ax1.imshow(color_mask)
#ax2.imshow(img_masked)
#plt.show()
img_masked = np.asarray((img_masked/np.max(img_masked) ) * 255, dtype = np.uint8)
if savepath is not None:
im = Image.fromarray(img_masked)
im.save(savepath)
#im = Image.fromarray(img_masked)
#im.save(savepath)
return img_masked
@jit
def _combine_markers(label_img, coordinates):
#first we remove all the label_img region contain coordinates
tmp_img = label_img.copy()
num_obj = np.max(tmp_img)
for ind in range(1, num_obj+1):
for j in range(coordinates.shape[0]):
if tmp_img[coordinates[j,0], coordinates[j,1]] == ind:
tmp_img[tmp_img==ind] = 0
break
new_num = np.max(tmp_img)
rest_contour = label2contour(tmp_img, returnImg =False)
old_num = coordinates.shape[0]
total_num = len(rest_contour) + old_num
new_coord = np.zeros((total_num, 2))
new_coord[0:old_num] = coordinates
for ind, this_contour in enumerate(rest_contour):
new_coord[old_num+ind] = np.asarray([np.mean(this_contour[:,0]), np.mean(this_contour[:,1]) ])
return new_coord.astype(np.int)
@jit
def combine_markers(label_img, coordinates):
#first we remove all the label_img region contain coordinates
num_obj = np.max(label_img)
regions = regionprops(label_img)
seedmap = np.zeros_like(label_img, dtype=bool)
seedmap[coordinates[:,0], coordinates[:,1]] = True
max_num = num_obj + coordinates.shape[0]
new_coord = np.zeros((max_num,2))
seedcount = 0
for props in regions:
minr, minc, maxr, maxc = props.bbox
thismask = label_img[minr:maxr, minc:maxc] == props.label
this_seedmap = seedmap[minr:maxr, minc:maxc]
this_seeds = np.argwhere(np.logical_and(thismask, this_seedmap)) + np.array([[minr, minc]])
number_seeds = this_seeds.shape[0]
if number_seeds <= 1:
new_coord[seedcount,:] = props.centroid
seedcount += 1
elif number_seeds >=2:
new_coord[seedcount:seedcount+rem_cord.shape[0],:] = this_seeds
seedcount += rem_cord.shape[0]
return new_coord[0:seedcount,:].astype(np.int)
@jit
def intersect(arr_, brr_):
#return the intersection of arr and brr.
arr = set(map(tuple, arr_))
brr = set(map(tuple, brr_))
return np.asarray(arr.intersection(brr)).astype(np.int)
def residual_markers(label_img, coordinates):
#first we remove all the label_img region contain coordinates
# also need to return single label_imgage, and residual with markers
num_obj = np.max(label_img)
regions = regionprops(label_img)
seedmap = np.zeros_like(label_img, dtype=bool)
new_label = np.zeros_like(label_img)
class_label = np.zeros_like(label_img)
seedmap[coordinates[:,0], coordinates[:,1]] = True
max_num = num_obj + coordinates.shape[0]
#coordinates = set(map(tuple, coordinates))
new_coord = np.zeros((max_num,2))
seedcount = 0
regionCount = 0
classRegionCount = 0
all_area = [props.area for props in regions]
mid_area = np.median(all_area)
for props in regions:
minr, minc, maxr, maxc = props.bbox
rlen = maxr-minr+1
clen = maxc-minc+1
ratio = props.minor_axis_length/props.major_axis_length
thismask = label_img[minr:maxr, minc:maxc] == props.label
this_seedmap = seedmap[minr:maxr, minc:maxc]
this_new_label = new_label[minr:maxr, minc:maxc]
this_class_label = class_label[minr:maxr, minc:maxc]
this_seeds = np.argwhere(np.logical_and(thismask, this_seedmap)) + np.array([[minr, minc]])
number_seeds = this_seeds.shape[0]
if number_seeds <= 1:
classRegionCount += 1
this_class_label[thismask] = classRegionCount
#coordinates.difference(map(tuple, this_seeds.tolist()))
elif number_seeds >=2:
# if the cell is very round, we don'r split it
if ratio > 0.85 and props.solidity>0.9 and props.area < 4*mid_area:
classRegionCount += 1
this_class_label[thismask] = classRegionCount
else:
regionCount += 1
this_new_label[thismask] = regionCount
#rem_cord = intersect(props.coords, coordinates)
new_coord[seedcount:seedcount+number_seeds,:] = this_seeds
seedcount += number_seeds
return class_label, new_label, new_coord[0:seedcount,:].astype(np.int)
@autojit
def safe_boarder(boarder_seed, row, col):
'''
board_seed: N*2 represent row and col for 0 and 1 axis.
'''
boarder_seed[boarder_seed[:,0] < 0, 0] = 0
boarder_seed[boarder_seed[:,0] >= row,0] = row-1
boarder_seed[boarder_seed[:,1] < 0, 1] = 0
boarder_seed[boarder_seed[:,1] >= col, 1] = col-1
return boarder_seed
@autojit
def label2contour(label_img, org=None, print_color = [0,0,1], linewidth = 2, alpha = 1, returnImg = False):
#npad = ((1,1),(1,1))
row, col = label_img.shape
#label_img = np.pad(label_img, npad, mode='constant', constant_values=0)
contour_img = np.zeros(label_img.shape, dtype=bool)
#tmp_img = np.zeros_like(label_img)
regions = regionprops(label_img)
contourlist = [np.array([-1,-1])]*len(regions) #because numba can not work with []
for id, props in enumerate(regions):
minr, minc, maxr, maxc = props.bbox
rs, re = max(minr-1,0), min(maxr+1, row)
cs, ce = max(minc-1,0), min(maxc+1, col)
thispatch = label_img[rs:re, cs:ce] == props.label
contours = measure.find_contours(thispatch, 0)
thiscontour = (contours[0] + [rs, cs]).astype(int)
contourlist[id] = safe_boarder(thiscontour, row, col)
contour_img[thiscontour[:, 0], thiscontour[:, 1]] = True
masked_img = None
if returnImg:
se = np.array([[ True, True, True],
[ True, True, True],
[ True, True, True]], dtype=bool)
contour_mask = skimage.morphology.binary_dilation(contour_img, se)
masked_img = overlayImg(org, contour_mask , print_color = print_color, alpha = alpha)
return masked_img, contourlist
def split_img(img, windowsize=1000, board = 0, fixed_window = False, step_size = None, tuple_slice = False):
'''
img dimension: channel, row, col
output:
(IndexDict, PackList)
IndexDict is a dictionry, the key is the actual patch size, the values is the list of identifier,
PackList: list of (thisPatch,org_slice ,extract_slice, thisSize,identifier), the index of Packlist
corresponds to the identifier.
org_slice: slice coordinated at the orignal image.
extract_slice: slice coordinate at the extract thisPatch,
the length of org_slice should be equal to extract_slice.
fixed_window: if true, it forces the extracted patches has to be of size window_size.
we don't pad the original image to make mod(imgsize, windowsize)==0, instead, if the remaining is small,
we expand the left board to lefter to compensate the smaller reminging patches.
The default behavior is False: get all window_size patches, and collect the remining patches as it is.
step_size: if step_size is smaller than (windowsize-2*board), we extract the patches with overlapping.
which means the org_slice is overlapping.
eg:
lenght = 17
img = np.arange(2*lenght*lenght).reshape(2,lenght,lenght)
nm = np.zeros(img.shape).astype(np.int)
AllDict, PackList = split_img(img, windowsize=7, board = 0, step_size= 2,fixed_window = True)
print img
print '---------------------------------------'
print AllDict.keys()
for key in AllDict.keys():
iden_list = AllDict[key]
for iden in iden_list:
thispatch = PackList[iden][0]
org_slice = PackList[iden][1]
extract_slice = PackList[iden][2]
nm[:,org_slice[0],org_slice[1]] = thispatch[:,extract_slice[0],extract_slice[1]]
print thispatch[:,extract_slice[0],extract_slice[1]]
print nm
print sum(nm-img)
'''
IndexDict = {}
identifier = -1
PackList = []
row_size, col_size = img.shape[1], img.shape[2]
if windowsize is not None and type(windowsize) is int:
windowsize = (windowsize, windowsize)
if windowsize is None or (row_size <= windowsize[0] and col_size<=windowsize[1] and (not fixed_window)):
pad_img = img
rowsize, colsize = pad_img.shape[1:]
org_slice = (slice(0, rowsize), slice(0, colsize))
extract_slice = org_slice
crop_patch_slice = (slice(0, rowsize), slice(0, colsize))
thisSize = (rowsize, colsize )
identifier = identifier + 1
org_slice_tuple = (0, 0)
if thisSize in IndexDict:
IndexDict[thisSize].append(identifier)
else:
IndexDict[thisSize] = []
IndexDict[thisSize].append(identifier)
PackList.append((crop_patch_slice, org_slice ,extract_slice, thisSize,identifier, org_slice_tuple))
else:
hidden_windowsize = (windowsize[0]-2*board, windowsize[1]-2*board)
for each_size in hidden_windowsize:
if each_size <= 0:
raise RuntimeError('windowsize can not be smaller than board*2.')
if type(step_size) is int:
step_size = (step_size, step_size)
if step_size is None:
step_size = hidden_windowsize
numRowblocks = int(math.ceil(float(row_size)/step_size[0]))
numColblocks = int(math.ceil(float(col_size)/step_size[1]))
# sanity check, make sure the image is at least of size window_size to the left-hand side if fixed_windows is true
# which means, -----*******|-----, left to the vertical board of original image is at least window_size.
row_addition_board, col_addition_board = 0, 0
addition_board = 0
if fixed_window:
if row_size + 2 * board < windowsize[0]: # means we need to add more on board.
row_addition_board = windowsize[0] - (row_size + 2 * board )
if col_size + 2 * board < windowsize[1]: # means we need to add more on board.
col_addition_board = windowsize[1] - (col_size + 2 * board)
addition_board = row_addition_board if row_addition_board > col_addition_board else col_addition_board
left_pad = addition_board + board
pad4d = ((0,0),( left_pad , board), ( left_pad , board ))
pad_img = np.pad(img, pad4d, 'symmetric').astype(img.dtype)
thisrowstart, thiscolstart =0, 0
thisrowend, thiscolend = 0,0
for row_idx in range(numRowblocks):
thisrowlen = min(hidden_windowsize[0], row_size - row_idx * step_size[0])
row_step_len = min(step_size[0], row_size - row_idx * step_size[0])
thisrowstart = 0 if row_idx == 0 else thisrowstart + step_size[0]
thisrowend = thisrowstart + thisrowlen
row_shift = 0
if fixed_window:
if thisrowlen < hidden_windowsize[0]:
row_shift = hidden_windowsize[0] - thisrowlen
for col_idx in range(numColblocks):
thiscollen = min(hidden_windowsize[1], col_size - col_idx * step_size[1])
col_step_len = min(step_size[1], col_size - col_idx * step_size[1])
thiscolstart = 0 if col_idx == 0 else thiscolstart + step_size[1]
thiscolend = thiscolstart + thiscollen
col_shift = 0
if fixed_window:
# we need to shift the patch to left to make it at least windowsize.
if thiscollen < hidden_windowsize[1]:
col_shift = hidden_windowsize[1] - thiscollen
#
#----board----******************----board----
#
crop_r_start = thisrowstart - board - row_shift + left_pad
crop_c_start = thiscolstart - board - col_shift + left_pad
crop_r_end = thisrowend + board + left_pad
crop_c_end = thiscolend + board + left_pad
#we need to handle the tricky board condition
# thispatch will be of size (:,:, windowsize+ 2*board)
#thisPatch = pad_img[:,crop_r_start:crop_r_end, crop_c_start:crop_c_end].copy()
crop_patch_slice = (slice(crop_r_start, crop_r_end), slice(crop_c_start, crop_c_end))
org_slice_tuple = (crop_r_start-left_pad, crop_c_start -left_pad )
thisSize = (thisrowlen + 2*board + row_shift, thiscollen + 2*board + col_shift)
org_slice = (slice(thisrowstart, thisrowend), slice(thiscolstart, thiscolend))
# slice on a cooridinate of the original image
extract_slice = (slice(board + row_shift, board + thisrowlen + row_shift),
slice(board + col_shift, board + col_shift + thiscollen))
# extract on local coordinate of a patch
identifier = identifier +1
PackList.append((crop_patch_slice, org_slice ,extract_slice, thisSize,identifier, org_slice_tuple))
if thisSize in IndexDict:
IndexDict[thisSize].append(identifier)
else:
IndexDict[thisSize] = []
IndexDict[thisSize].append(identifier)
PackDict = {}
for this_size in list(IndexDict.keys()):
iden_list = IndexDict[this_size]
this_len = len(iden_list)
org_slice_list = []
extract_slice_list = []
slice_tuple_list = []
BatchData = np.zeros( (this_len, img.shape[0]) + tuple(this_size) )
for idx, iden in enumerate(iden_list):
crop_patch_slice = PackList[iden][0]
BatchData[idx,...] = pad_img[:,crop_patch_slice[0],crop_patch_slice[1]]
org_slice_list.append(PackList[iden][1])
extract_slice_list.append(PackList[iden][2])
slice_tuple_list.append(PackList[iden][-1])
PackDict[this_size]= (BatchData, org_slice_list, extract_slice_list, slice_tuple_list)
return PackDict
def split_index(img, windowsize=1000, board = 0, fixed_window = False, step_size = None, tuple_slice = False):
'''
img dimension: channel, row, col
output:
(IndexDict, PackList)
IndexDict is a dictionry, the key is the actual patch size, the values is the list of identifier,
PackList: list of (thisPatch,org_slice ,extract_slice, thisSize,identifier), the index of Packlist
corresponds to the identifier.
org_slice: slice coordinated at the orignal image.
extract_slice: slice coordinate at the extract thisPatch,
the length of org_slice should be equal to extract_slice.
fixed_window: if true, it forces the extracted patches has to be of size window_size.
we don't pad the original image to make mod(imgsize, windowsize)==0, instead, if the remaining is small,
we expand the left board to lefter to compensate the smaller reminging patches.
The default behavior is False: get all window_size patches, and collect the remining patches as it is.
step_size: if step_size is smaller than (windowsize-2*board), we extract the patches with overlapping.
which means the org_slice is overlapping.
eg:
lenght = 17
img = np.arange(2*lenght*lenght).reshape(2,lenght,lenght)
nm = np.zeros(img.shape).astype(np.int)
AllDict, PackList = split_img(img, windowsize=7, board = 0, step_size= 2,fixed_window = True)
print img
print '---------------------------------------'
print AllDict.keys()
for key in AllDict.keys():
iden_list = AllDict[key]
for iden in iden_list:
thispatch = PackList[iden][0]
org_slice = PackList[iden][1]
extract_slice = PackList[iden][2]
nm[:,org_slice[0],org_slice[1]] = thispatch[:,extract_slice[0],extract_slice[1]]
print thispatch[:,extract_slice[0],extract_slice[1]]
print nm
print sum(nm-img)
'''
IndexDict = {}
identifier = -1
PackList = []
row_size, col_size = img.shape[1], img.shape[2]
if windowsize is not None and type(windowsize) is int:
windowsize = (windowsize, windowsize)
if windowsize is None or (row_size <= windowsize[0] and col_size<=windowsize[1] and (not fixed_window)):
pad_img = img
rowsize, colsize = pad_img.shape[1:]
org_slice = (slice(0, rowsize), slice(0, colsize))
extract_slice = org_slice
crop_patch_slice = (slice(0, rowsize), slice(0, colsize))
thisSize = (rowsize, colsize )
identifier = identifier + 1
org_slice_tuple = (0, 0)
if thisSize in IndexDict:
IndexDict[thisSize].append(identifier)
else:
IndexDict[thisSize] = []
IndexDict[thisSize].append(identifier)
PackList.append((crop_patch_slice, org_slice ,extract_slice, thisSize,identifier, org_slice_tuple))
else:
hidden_windowsize = (windowsize[0]-2*board, windowsize[1]-2*board)
for each_size in hidden_windowsize:
if each_size <= 0:
raise RuntimeError('windowsize can not be smaller than board*2.')
if type(step_size) is int:
step_size = (step_size, step_size)
if step_size is None:
step_size = hidden_windowsize
numRowblocks = int(math.ceil(float(row_size)/step_size[0]))
numColblocks = int(math.ceil(float(col_size)/step_size[1]))
# sanity check, make sure the image is at least of size window_size to the left-hand side if fixed_windows is true
# which means, -----*******|-----, left to the vertical board of original image is at least window_size.
row_addition_board, col_addition_board = 0, 0
addition_board = 0
if fixed_window:
if row_size + 2 * board < windowsize[0]: # means we need to add more on board.
row_addition_board = windowsize[0] - (row_size + 2 * board )
if col_size + 2 * board < windowsize[1]: # means we need to add more on board.
col_addition_board = windowsize[1] - (col_size + 2 * board)
addition_board = row_addition_board if row_addition_board > col_addition_board else col_addition_board
left_pad = addition_board + board
pad4d = ((0,0),( left_pad , board), ( left_pad , board ))
# forget about the 0 padding now.
pad_img = np.pad(img, pad4d, 'symmetric').astype(img.dtype)
thisrowstart, thiscolstart =0, 0
thisrowend, thiscolend = 0,0
for row_idx in range(numRowblocks):
thisrowlen = min(hidden_windowsize[0], row_size - row_idx * step_size[0])
row_step_len = min(step_size[0], row_size - row_idx * step_size[0])
thisrowstart = 0 if row_idx == 0 else thisrowstart + step_size[0]
thisrowend = thisrowstart + thisrowlen
row_shift = 0
if fixed_window:
if thisrowlen < hidden_windowsize[0]:
row_shift = hidden_windowsize[0] - thisrowlen
for col_idx in range(numColblocks):
thiscollen = min(hidden_windowsize[1], col_size - col_idx * step_size[1])
col_step_len = min(step_size[1], col_size - col_idx * step_size[1])
thiscolstart = 0 if col_idx == 0 else thiscolstart + step_size[1]
thiscolend = thiscolstart + thiscollen
col_shift = 0
if fixed_window:
# we need to shift the patch to left to make it at least windowsize.
if thiscollen < hidden_windowsize[1]:
col_shift = hidden_windowsize[1] - thiscollen
#
#----board----******************----board----
#
crop_r_start = thisrowstart - board - row_shift + left_pad
crop_c_start = thiscolstart - board - col_shift + left_pad
crop_r_end = thisrowend + board + left_pad
crop_c_end = thiscolend + board + left_pad
#we need to handle the tricky board condition
# thispatch will be of size (:,:, windowsize+ 2*board)
#thisPatch = pad_img[:,crop_r_start:crop_r_end, crop_c_start:crop_c_end].copy()
crop_patch_slice = (slice(crop_r_start, crop_r_end), slice(crop_c_start, crop_c_end))
org_slice_tuple = (crop_r_start-left_pad, crop_c_start -left_pad )
thisSize = (thisrowlen + 2*board + row_shift, thiscollen + 2*board + col_shift)
# slice on a cooridinate of the original image
org_slice = (slice(thisrowstart, thisrowend), slice(thiscolstart, thiscolend))
# extract on local coordinate of a patch
extract_slice = (slice(board + row_shift, board + thisrowlen + row_shift),
slice(board + col_shift, board + col_shift + thiscollen))
identifier = identifier +1
PackList.append((crop_patch_slice, org_slice, extract_slice, thisSize, identifier, org_slice_tuple))
if thisSize in IndexDict:
IndexDict[thisSize].append(identifier)
else:
IndexDict[thisSize] = []
IndexDict[thisSize].append(identifier)
return PackList, pad_img | 39.294343 | 126 | 0.604821 |
be7193418e82fd004543a486f68be4da0a2b55fe | 49,106 | py | Python | pandas/tseries/index.py | ContinuumIO/pandas | 1fb318d605c4587c68d7dcb3ab9f673a8ad7bbca | [
"BSD-2-Clause"
] | 10 | 2015-07-21T06:35:13.000Z | 2021-10-30T00:15:05.000Z | pandas/tseries/index.py | aflaxman/pandas | cbe91d73dacf46a36cadd2a7901c4d97a566c080 | [
"BSD-2-Clause"
] | null | null | null | pandas/tseries/index.py | aflaxman/pandas | cbe91d73dacf46a36cadd2a7901c4d97a566c080 | [
"BSD-2-Clause"
] | 5 | 2017-05-28T05:31:12.000Z | 2020-09-01T03:08:01.000Z | # pylint: disable=E1101
import operator
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from pandas.core.common import isnull
from pandas.core.index import Index, Int64Index
from pandas.tseries.frequencies import infer_freq, to_offset, get_period_alias
from pandas.tseries.offsets import DateOffset, generate_range, Tick
from pandas.tseries.tools import parse_time_string, normalize_date
from pandas.util.decorators import cache_readonly
import pandas.core.common as com
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
from pandas.lib import Timestamp
import pandas.lib as lib
import pandas._algos as _algos
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _field_accessor(name, field):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = self._local_timestamps()
return lib.get_date_field(values, field)
f.__name__ = name
return property(f)
def _join_i8_wrapper(joinf, with_indexers=True):
@staticmethod
def wrapper(left, right):
if isinstance(left, np.ndarray):
left = left.view('i8', type=np.ndarray)
if isinstance(right, np.ndarray):
right = right.view('i8', type=np.ndarray)
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view('M8[ns]')
return join_index, left_indexer, right_indexer
return results
return wrapper
def _dt_index_cmp(opname):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
func = getattr(super(DatetimeIndex, self), opname)
if isinstance(other, datetime):
func = getattr(self, opname)
other = _to_m8(other)
elif isinstance(other, list):
other = DatetimeIndex(other)
elif not isinstance(other, np.ndarray):
other = _ensure_datetime64(other)
result = func(other)
return result.view(np.ndarray)
return wrapper
def _ensure_datetime64(other):
if isinstance(other, np.datetime64):
return other
raise TypeError('%s type object %s' % (type(other), str(other)))
class TimeSeriesError(Exception):
pass
_midnight = time(0, 0)
_NS_DTYPE = np.dtype('M8[ns]')
_INT64_DTYPE = np.dtype(np.int64)
class DatetimeIndex(Int64Index):
"""
Immutable ndarray of datetime64 data, represented internally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency information.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
"""
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
_algos.left_join_indexer_unique_int64, with_indexers=False)
_arrmap = None
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__')
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
# structured array cache for datetime fields
_sarr_cache = None
_engine_type = lib.DatetimeEngine
offset = None
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None, tz=None,
verify_integrity=True, normalize=False, **kwds):
dayfirst = kwds.pop('dayfirst', None)
yearfirst = kwds.pop('yearfirst', None)
warn = False
if 'offset' in kwds and kwds['offset']:
freq = kwds['offset']
warn = True
freq_infer = False
if not isinstance(freq, DateOffset):
if freq != 'infer':
freq = to_offset(freq)
else:
freq_infer = True
freq = None
if warn:
import warnings
warnings.warn("parameter 'offset' is deprecated, "
"please use 'freq' instead",
FutureWarning)
offset = freq
if periods is not None:
if com.is_float(periods):
periods = int(periods)
elif not com.is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, offset,
tz=tz, normalize=normalize)
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data, offset, dayfirst=dayfirst,
yearfirst=yearfirst)
else:
data = tools.to_datetime(data)
data.offset = offset
if isinstance(data, DatetimeIndex):
if name is not None:
data.name = name
return data
if issubclass(data.dtype.type, basestring):
subarr = _str_to_dt_array(data, offset, dayfirst=dayfirst,
yearfirst=yearfirst)
elif issubclass(data.dtype.type, np.datetime64):
if isinstance(data, DatetimeIndex):
if tz is None:
tz = data.tz
subarr = data.values
if offset is None:
offset = data.offset
verify_integrity = False
else:
if data.dtype != _NS_DTYPE:
subarr = lib.cast_to_nanoseconds(data)
else:
subarr = data
elif data.dtype == _INT64_DTYPE:
if isinstance(data, Int64Index):
raise TypeError('cannot convert Int64Index->DatetimeIndex')
if copy:
subarr = np.asarray(data, dtype=_NS_DTYPE)
else:
subarr = data.view(_NS_DTYPE)
else:
try:
subarr = tools.to_datetime(data)
except ValueError:
# tz aware
subarr = tools.to_datetime(data, utc=True)
if not np.issubdtype(subarr.dtype, np.datetime64):
raise TypeError('Unable to convert %s to datetime dtype'
% str(data))
if isinstance(subarr, DatetimeIndex):
if tz is None:
tz = subarr.tz
else:
if tz is not None:
tz = tools._maybe_get_tz(tz)
if (not isinstance(data, DatetimeIndex) or
getattr(data, 'tz', None) is None):
# Convert tz-naive to UTC
ints = subarr.view('i8')
subarr = lib.tz_localize_to_utc(ints, tz)
subarr = subarr.view(_NS_DTYPE)
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
if verify_integrity and len(subarr) > 0:
if offset is not None and not freq_infer:
inferred = subarr.inferred_freq
if inferred != offset.freqstr:
raise ValueError('Dates do not conform to passed '
'frequency')
if freq_infer:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False):
if com._count_not_none(start, end, periods) < 2:
raise ValueError('Must specify two of start, end, or periods')
_normalized = True
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
inferred_tz = tools._infer_tzinfo(start, end)
if tz is not None and inferred_tz is not None:
assert(inferred_tz == tz)
elif inferred_tz is not None:
tz = inferred_tz
tz = tools._maybe_get_tz(tz)
if start is not None:
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
if hasattr(offset, 'delta') and offset != offsets.Day():
if inferred_tz is None and tz is not None:
# naive dates
if start is not None and start.tz is None:
start = start.tz_localize(tz)
if end is not None and end.tz is None:
end = end.tz_localize(tz)
if start and end:
if start.tz is None and end.tz is not None:
start = start.tz_localize(end.tz)
if end.tz is None and start.tz is not None:
end = end.tz_localize(start.tz)
if (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end)):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
else:
if inferred_tz is None and tz is not None:
# naive dates
if start is not None and start.tz is not None:
start = start.replace(tzinfo=None)
if end is not None and end.tz is not None:
end = end.replace(tzinfo=None)
if start and end:
if start.tz is None and end.tz is not None:
end = end.replace(tzinfo=None)
if end.tz is None and start.tz is not None:
start = start.replace(tzinfo=None)
if (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end)):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None and getattr(index, 'tz', None) is None:
index = lib.tz_localize_to_utc(com._ensure_int64(index), tz)
index = index.view(_NS_DTYPE)
index = index.view(cls)
index.name = name
index.offset = offset
index.tz = tz
return index
def _box_values(self, values):
return lib.map_infer(values, lib.Timestamp)
def _local_timestamps(self):
utc = _utc()
if self.is_monotonic:
return lib.tz_convert(self.asi8, utc, self.tz)
else:
values = self.asi8
indexer = values.argsort()
result = lib.tz_convert(values.take(indexer), utc, self.tz)
n = len(indexer)
reverse = np.empty(n, dtype=np.int_)
reverse.put(indexer, np.arange(n))
return result.take(reverse)
@classmethod
def _simple_new(cls, values, name, freq=None, tz=None):
if values.dtype != _NS_DTYPE:
values = com._ensure_int64(values).view(_NS_DTYPE)
result = values.view(cls)
result.name = name
result.offset = freq
result.tz = tools._maybe_get_tz(tz)
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if offset is None:
raise Exception('Must provide a DateOffset!')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = tools.to_datetime(list(xdr), box=False)
cachedRange = arr.view(DatetimeIndex)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
assert(isinstance(end, Timestamp))
end = offset.rollback(end)
endLoc = cachedRange.get_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
assert(isinstance(start, Timestamp))
start = offset.rollforward(start)
startLoc = cachedRange.get_loc(start)
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.get_loc(start)
endLoc = cachedRange.get_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return lib.ints_to_pydatetime(self.asi8)
def __repr__(self):
from pandas.core.format import _format_datetime64
values = self.values
freq = None
if self.offset is not None:
freq = self.offset.freqstr
summary = str(self.__class__)
if len(self) == 1:
first = _format_datetime64(values[0], tz=self.tz)
summary += '\n[%s]' % first
elif len(self) == 2:
first = _format_datetime64(values[0], tz=self.tz)
last = _format_datetime64(values[-1], tz=self.tz)
summary += '\n[%s, %s]' % (first, last)
elif len(self) > 2:
first = _format_datetime64(values[0], tz=self.tz)
last = _format_datetime64(values[-1], tz=self.tz)
summary += '\n[%s, ..., %s]' % (first, last)
tagline = '\nLength: %d, Freq: %s, Timezone: %s'
summary += tagline % (len(self), freq, self.tz)
return summary
__str__ = __repr__
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.name, self.offset, self.tz
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if len(state) == 2:
nd_state, own_state = state
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
np.ndarray.__setstate__(self, nd_state)
else: # pragma: no cover
np.ndarray.__setstate__(self, state)
def __add__(self, other):
if isinstance(other, Index):
return self.union(other)
elif isinstance(other, (DateOffset, timedelta)):
return self._add_delta(other)
elif isinstance(other, np.timedelta64):
raise NotImplementedError
elif com.is_integer(other):
return self.shift(other)
else: # pragma: no cover
raise TypeError(other)
def __sub__(self, other):
if isinstance(other, Index):
return self.diff(other)
elif isinstance(other, (DateOffset, timedelta)):
return self._add_delta(-other)
elif isinstance(other, np.timedelta64):
raise NotImplementedError
elif com.is_integer(other):
return self.shift(-other)
else: # pragma: no cover
raise TypeError(other)
def _add_delta(self, delta):
if isinstance(delta, (Tick, timedelta)):
inc = offsets._delta_to_nanoseconds(delta)
new_values = (self.asi8 + inc).view(_NS_DTYPE)
else:
new_values = self.astype('O') + delta
return DatetimeIndex(new_values, tz=self.tz, freq='infer')
def __contains__(self, key):
try:
return np.isscalar(self.get_loc(key))
except (KeyError, TypeError):
return False
def isin(self, values):
"""
Compute boolean array of whether each index value is found in the
passed set of values
Parameters
----------
values : set or sequence of values
Returns
-------
is_contained : ndarray (boolean dtype)
"""
if not isinstance(values, DatetimeIndex):
try:
values = DatetimeIndex(values)
except ValueError:
return self.asobject.isin(values)
value_set = set(values.asi8)
return lib.ismember(self.asi8, value_set)
def to_datetime(self, dayfirst=False):
return self.copy()
def groupby(self, f):
objs = self.asobject
return _algos.groupby_object(objs, f)
def summary(self, name=None):
if len(self) > 0:
index_summary = ', %s to %s' % (str(self[0]), str(self[-1]))
else:
index_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (name, len(self), index_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
return result
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
to_concat = [x.values if isinstance(x, Index) else x
for x in to_concat]
return Index(com._concat_compat(to_concat), name=name)
def get_duplicates(self):
values = Index.get_duplicates(self)
return DatetimeIndex(values)
def astype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return self.asobject
elif dtype == _INT64_DTYPE:
return self.asi8.copy()
else: # pragma: no cover
raise ValueError('Cannot cast DatetimeIndex to dtype %s' % dtype)
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
def _get_time_micros(self):
utc = _utc()
values = self.asi8
if self.tz is not None and self.tz is not utc:
values = self._local_timestamps()
return lib.get_time_micros(values)
@property
def asobject(self):
"""
Convert to Index of datetime objects
"""
if isnull(self).any():
msg = 'DatetimeIndex with NaT cannot be converted to object'
raise ValueError(msg)
return self._get_object_index()
def tolist(self):
"""
See ndarray.tolist
"""
return list(self.asobject)
def _get_object_index(self):
boxfunc = lambda x: Timestamp(x, offset=self.offset, tz=self.tz)
boxed_values = lib.map_infer(self.asi8, boxfunc)
return Index(boxed_values, dtype=object)
def to_pydatetime(self):
"""
Return DatetimeIndex as object ndarray of datetime.datetime objects
Returns
-------
datetimes : ndarray
"""
return lib.ints_to_pydatetime(self.asi8, tz=self.tz)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from pandas.tseries.period import PeriodIndex
if self.freq is None and freq is None:
msg = "You must pass a freq argument as current index has none."
raise ValueError(msg)
if freq is None:
freq = get_period_alias(self.freqstr)
return PeriodIndex(self.values, freq=freq)
def order(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self.values)
if not ascending:
sorted_values = sorted_values[::-1]
return self._simple_new(sorted_values, self.name, None,
self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occurring frequency
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
snapped = np.empty(len(self), dtype=_NS_DTYPE)
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def shift(self, n, freq=None):
"""
Specialized shift which produces a DatetimeIndex
Parameters
----------
n : int
Periods to shift by
freq : DateOffset or timedelta-like, optional
Returns
-------
shifted : DatetimeIndex
"""
if freq is not None and freq != self.offset:
if isinstance(freq, basestring):
freq = to_offset(freq)
result = Index.shift(self, n, freq)
result.tz = self.tz
return result
if n == 0:
# immutable so OK
return self
if self.offset is None:
raise ValueError("Cannot shift with no offset")
start = self[0] + n * self.offset
end = self[-1] + n * self.offset
return DatetimeIndex(start=start, end=end, freq=self.offset,
name=self.name, tz=self.tz)
def repeat(self, repeats, axis=None):
"""
Analogous to ndarray.repeat
"""
return DatetimeIndex(self.values.repeat(repeats),
name=self.name)
def take(self, indices, axis=0):
"""
Analogous to ndarray.take
"""
maybe_slice = lib.maybe_indices_to_slice(com._ensure_int64(indices))
if isinstance(maybe_slice, slice):
return self[maybe_slice]
indices = com._ensure_platform_int(indices)
taken = self.values.take(indices, axis=axis)
return self._simple_new(taken, self.name, None, self.tz)
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, DatetimeIndex):
result.tz = self.tz
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if not isinstance(other, DatetimeIndex) and len(other) > 0:
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
return_indexers=return_indexers)
def _maybe_utc_convert(self, other):
this = self
if isinstance(other, DatetimeIndex):
if self.tz is not None:
if other.tz is None:
raise Exception('Cannot join tz-naive with tz-aware DatetimeIndex')
elif other.tz is not None:
raise Exception('Cannot join tz-naive with tz-aware DatetimeIndex')
if self.tz != other.tz:
this = self.tz_convert('UTC')
other = other.tz_convert('UTC')
return this, other
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, DatetimeIndex)
and self.offset == other.offset
and self._can_fast_union(other)):
joined = self._view_like(joined)
joined.name = name
return joined
else:
return DatetimeIndex(joined, name=name)
def _can_fast_union(self, other):
if not isinstance(other, DatetimeIndex):
return False
offset = self.offset
if offset is None or offset != other.offset:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_end = left[-1]
right_start = right[0]
# Only need to "adjoin", not overlap
return (left_end + offset) >= right_start
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
if not self.offset._should_cache():
# concatenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = com._concat_compat((left.values, right_chunk))
return self._view_like(dates)
else:
return left
else:
return type(self)(start=left_start,
end=max(left_end, right_end),
freq=left.offset)
def __array_finalize__(self, obj):
if self.ndim == 0: # pragma: no cover
return self.item()
self.offset = getattr(obj, 'offset', None)
self.tz = getattr(obj, 'tz', None)
def intersection(self, other):
"""
Specialized intersection for DatetimeIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
elif (other.offset is None or self.offset is None or
other.offset != self.offset or
not other.offset.isAnchored() or
(not self.is_monotonic or not other.is_monotonic)):
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._view_like(left_chunk)
def _partial_date_slice(self, reso, parsed):
if not self.is_monotonic:
raise TimeSeriesError('Partial indexing only valid for ordered time'
' series')
if reso == 'year':
t1 = Timestamp(datetime(parsed.year, 1, 1))
t2 = Timestamp(datetime(parsed.year, 12, 31))
elif reso == 'month':
d = lib.monthrange(parsed.year, parsed.month)[1]
t1 = Timestamp(datetime(parsed.year, parsed.month, 1))
t2 = Timestamp(datetime(parsed.year, parsed.month, d))
elif reso == 'quarter':
qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead
d = lib.monthrange(parsed.year, qe)[1] # at end of month
t1 = Timestamp(datetime(parsed.year, parsed.month, 1))
t2 = Timestamp(datetime(parsed.year, qe, d))
else:
raise KeyError
stamps = self.asi8
left = stamps.searchsorted(t1.value, side='left')
right = stamps.searchsorted(t2.value, side='right')
return slice(left, right)
def _possibly_promote(self, other):
if other.inferred_type == 'date':
other = DatetimeIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
try:
return Index.get_value(self, series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
if isinstance(key, time):
locs = self.indexer_at_time(key)
return series.take(locs)
if isinstance(key, basestring):
stamp = Timestamp(key, tz=self.tz)
else:
stamp = Timestamp(key)
try:
return self._engine.get_value(series, stamp)
except KeyError:
raise KeyError(stamp)
def get_loc(self, key):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.get_loc(key)
except KeyError:
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
if isinstance(key, time):
return self.indexer_at_time(key)
try:
return self._engine.get_loc(Timestamp(key))
except (KeyError, ValueError):
raise KeyError(key)
def _get_string_slice(self, key):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
asdt, parsed, reso = parse_time_string(key, freq)
key = asdt
loc = self._partial_date_slice(reso, parsed)
return loc
def slice_locs(self, start=None, end=None):
"""
Index.slice_locs, customized to handle partial ISO-8601 string slicing
"""
if isinstance(start, basestring) or isinstance(end, basestring):
try:
if start:
start_loc = self._get_string_slice(start).start
else:
start_loc = 0
if end:
end_loc = self._get_string_slice(end).stop
else:
end_loc = len(self)
return start_loc, end_loc
except KeyError:
pass
return Index.slice_locs(self, start, end)
def __getitem__(self, key):
"""Override numpy.ndarray's __getitem__ method to work as desired"""
arr_idx = self.view(np.ndarray)
if np.isscalar(key):
val = arr_idx[key]
return Timestamp(val, offset=self.offset, tz=self.tz)
else:
if com._is_bool_indexer(key):
key = np.asarray(key)
key = lib.maybe_booleans_to_slice(key.view(np.uint8))
new_offset = None
if isinstance(key, slice):
if self.offset is not None and key.step is not None:
new_offset = key.step * self.offset
else:
new_offset = self.offset
result = arr_idx[key]
if result.ndim > 1:
return result
return self._simple_new(result, self.name, new_offset, self.tz)
# Try to run function on index first, and then on elements of index
# Especially important for group-by functionality
def map(self, f):
try:
result = f(self)
if not isinstance(result, np.ndarray):
raise TypeError
return result
except Exception:
return _algos.arrmap_object(self.asobject, f)
# alias to offset
@property
def freq(self):
return self.offset
@cache_readonly
def inferred_freq(self):
try:
return infer_freq(self)
except ValueError:
return None
@property
def freqstr(self):
return self.offset.freqstr
year = _field_accessor('year', 'Y')
month = _field_accessor('month', 'M')
day = _field_accessor('day', 'D')
hour = _field_accessor('hour', 'h')
minute = _field_accessor('minute', 'm')
second = _field_accessor('second', 's')
microsecond = _field_accessor('microsecond', 'us')
nanosecond = _field_accessor('nanosecond', 'ns')
weekofyear = _field_accessor('weekofyear', 'woy')
week = weekofyear
dayofweek = _field_accessor('dayofweek', 'dow')
weekday = dayofweek
dayofyear = _field_accessor('dayofyear', 'doy')
quarter = _field_accessor('quarter', 'q')
def normalize(self):
"""
Return DatetimeIndex with times to midnight. Length is unaltered
Returns
-------
normalized : DatetimeIndex
"""
new_values = lib.date_normalize(self.asi8)
return DatetimeIndex(new_values, freq='infer', name=self.name)
def __iter__(self):
return iter(self._get_object_index())
def searchsorted(self, key, side='left'):
if isinstance(key, np.ndarray):
key = np.array(key, dtype=_NS_DTYPE, copy=False)
else:
key = _to_m8(key)
return self.values.searchsorted(key, side=side)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'datetime'
def argmin(self):
# hack to workaround argmin failure
try:
return self.values.argmin()
except Exception: # pragma: no cover
return self.asi8.argmin()
@property
def inferred_type(self):
# b/c datetime is represented as microseconds since the epoch, make
# sure we can't have ambiguous indexing
return 'datetime64'
@property
def dtype(self):
return _NS_DTYPE
@property
def is_all_dates(self):
return True
@cache_readonly
def is_normalized(self):
"""
Returns True if all of the dates are at midnight ("no time")
"""
return lib.dates_normalized(self.asi8)
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self is other:
return True
if (not hasattr(other, 'inferred_type') or
other.inferred_type != 'datetime64'):
if self.offset is not None:
return False
try:
other = DatetimeIndex(other)
except:
return False
if self.tz is not None:
if other.tz is None:
return False
same_zone = lib.get_timezone(self.tz) == lib.get_timezone(other.tz)
else:
if other.tz is not None:
return False
same_zone = True
return same_zone and np.array_equal(self.asi8, other.asi8)
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
if type(item) == datetime:
item = _to_m8(item)
new_index = np.concatenate((self[:loc].asi8,
[item.view(np.int64)],
self[loc:].asi8))
return DatetimeIndex(new_index, freq='infer')
def _view_like(self, ndarray):
result = ndarray.view(type(self))
result.offset = self.offset
result.tz = self.tz
result.name = self.name
return result
def tz_convert(self, tz):
"""
Convert DatetimeIndex from one time zone to another (using pytz)
Returns
-------
normalized : DatetimeIndex
"""
tz = tools._maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise Exception('Cannot convert tz-naive timestamps, use '
'tz_localize to localize')
# No conversion since timestamps are all UTC to begin with
return self._simple_new(self.values, self.name, self.offset, tz)
def tz_localize(self, tz):
"""
Localize tz-naive DatetimeIndex to given time zone (using pytz)
Returns
-------
localized : DatetimeIndex
"""
if self.tz is not None:
raise ValueError("Already tz-aware, use tz_convert to convert.")
tz = tools._maybe_get_tz(tz)
# Convert to UTC
new_dates = lib.tz_localize_to_utc(self.asi8, tz)
new_dates = new_dates.view(_NS_DTYPE)
return self._simple_new(new_dates, self.name, self.offset, tz)
def indexer_at_time(self, time, asof=False):
"""
Select values at particular time of day (e.g. 9:30AM)
Parameters
----------
time : datetime.time or string
tz : string or pytz.timezone
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries
Returns
-------
values_at_time : TimeSeries
"""
from dateutil.parser import parse
if asof:
raise NotImplementedError
if isinstance(time, basestring):
time = parse(time).time()
if time.tzinfo:
# TODO
raise NotImplementedError
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
return (micros == time_micros).nonzero()[0]
def indexer_between_time(self, start_time, end_time, include_start=True,
include_end=True):
"""
Select values between particular times of day (e.g., 9:00-9:30AM)
Parameters
----------
start_time : datetime.time or string
end_time : datetime.time or string
include_start : boolean, default True
include_end : boolean, default True
tz : string or pytz.timezone, default None
Returns
-------
values_between_time : TimeSeries
"""
from dateutil.parser import parse
if isinstance(start_time, basestring):
start_time = parse(start_time).time()
if isinstance(end_time, basestring):
end_time = parse(end_time).time()
if start_time.tzinfo or end_time.tzinfo:
raise NotImplementedError
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
if include_start and include_end:
lop = rop = operator.le
elif include_start:
lop = operator.le
rop = operator.lt
elif include_end:
lop = operator.lt
rop = operator.le
else:
lop = rop = operator.lt
if start_time <= end_time:
join_op = operator.and_
else:
join_op = operator.or_
mask = join_op(lop(start_micros, time_micros),
rop(time_micros, end_micros))
return mask.nonzero()[0]
def min(self, axis=None):
"""
Overridden ndarray.min to return a Timestamp
"""
if self.is_monotonic:
return self[0]
else:
min_stamp = self.asi8.min()
return Timestamp(min_stamp, tz=self.tz)
def max(self, axis=None):
"""
Overridden ndarray.max to return a Timestamp
"""
if self.is_monotonic:
return self[-1]
else:
max_stamp = self.asi8.max()
return Timestamp(max_stamp, tz=self.tz)
def _generate_regular_range(start, end, periods, offset):
if isinstance(offset, Tick):
stride = offset.nanos
if periods is None:
b = Timestamp(start).value
e = Timestamp(end).value
e += stride - e % stride
elif start is not None:
b = Timestamp(start).value
e = b + periods * stride
elif end is not None:
e = Timestamp(end).value + stride
b = e - periods * stride
else:
raise NotImplementedError
data = np.arange(b, e, stride, dtype=np.int64)
data = data.view(_NS_DTYPE)
else:
if isinstance(start, Timestamp):
start = start.to_pydatetime()
if isinstance(end, Timestamp):
end = end.to_pydatetime()
xdr = generate_range(start=start, end=end,
periods=periods, offset=offset)
dates = list(xdr)
# utc = len(dates) > 0 and dates[0].tzinfo is not None
data = tools.to_datetime(dates)
return data
def date_range(start=None, end=None, periods=None, freq='D', tz=None,
normalize=False, name=None):
"""
Return a fixed frequency datetime index, with day (calendar) as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name of the resulting index
Notes
-----
2 of start, end, or periods must be specified
Returns
-------
rng : DatetimeIndex
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name)
def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
normalize=True, name=None):
"""
Return a fixed frequency datetime index, with business day as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'B' (business daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name for the resulting index
Notes
-----
2 of start, end, or periods must be specified
Returns
-------
rng : DatetimeIndex
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name)
def _to_m8(key):
'''
Timestamp-like => dt64
'''
if not isinstance(key, datetime):
# this also converts strings
key = Timestamp(key)
return np.int64(lib.pydt_to_i8(key)).view(_NS_DTYPE)
def _str_to_dt_array(arr, offset=None, dayfirst=None, yearfirst=None):
def parser(x):
result = parse_time_string(x, offset, dayfirst=dayfirst, yearfirst=None)
return result[0]
arr = np.asarray(arr, dtype=object)
data = _algos.arrmap_object(arr, parser)
return tools.to_datetime(data)
_CACHE_START = Timestamp(datetime(1950, 1, 1))
_CACHE_END = Timestamp(datetime(2030, 1, 1))
_daterange_cache = {}
def _naive_in_cache_range(start, end):
if start is None or end is None:
return False
else:
if start.tzinfo is not None or end.tzinfo is not None:
return False
return _in_range(start, end, _CACHE_START, _CACHE_END)
def _in_range(start, end, rng_start, rng_end):
return start > rng_start and end < rng_end
def _time_to_micros(time):
seconds = time.hour * 60 * 60 + 60 * time.minute + time.second
return 1000000 * seconds + time.microsecond
def _utc_naive(dt):
if dt is None:
return dt
if dt.tz is not None:
dt = dt.tz_convert('utc').replace(tzinfo=None)
return dt
| 31.397698 | 87 | 0.562884 |
79a42e8160e6fe029ad55fac737fdd52c1abded1 | 686 | py | Python | dpsniper/probability/binomial_cdf.py | barryZZJ/dp-sniper | 71a3fc06f3fc319b023bde9aad8f05b8c5a47a80 | [
"MIT"
] | 13 | 2021-03-30T15:39:35.000Z | 2022-02-21T08:30:45.000Z | dpsniper/probability/binomial_cdf.py | barryZZJ/dp-sniper | 71a3fc06f3fc319b023bde9aad8f05b8c5a47a80 | [
"MIT"
] | null | null | null | dpsniper/probability/binomial_cdf.py | barryZZJ/dp-sniper | 71a3fc06f3fc319b023bde9aad8f05b8c5a47a80 | [
"MIT"
] | 4 | 2021-06-30T08:37:45.000Z | 2022-03-05T03:21:14.000Z | from scipy.stats import beta
def lcb(n: int, k: int, alpha: float):
"""
Computes a lower confidence bound on the probability parameter p of a binomial CDF.
Returns:
The largest p such that Pr[Binom[n,p] >= k] <= alpha
"""
if k == 0:
return 0
else:
# Inspired by https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval#Clopper%E2%80%93Pearson_interval
return beta.ppf(alpha, k, n-k+1)
def ucb(n: int, k: int, alpha: float):
"""
Computes an upper confidence bound on the probability parameter p of a binomial CDF.
Returns:
The smallest p such that Pr[Binom[n,p] <= k] <= alpha
"""
if k == n:
return 1
else:
return beta.ppf(1-alpha, k+1, n-k)
| 23.655172 | 118 | 0.685131 |
273fb3e4ee46174a184464b9476d61666d7c3d9a | 1,655 | py | Python | package/spack-r-rvcheck/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | 1 | 2018-07-17T07:45:09.000Z | 2018-07-17T07:45:09.000Z | package/spack-r-rvcheck/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | package/spack-r-rvcheck/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RRvcheck(RPackage):
"""Check latest release version of R and R package (both in 'CRAN',
'Bioconductor' or 'Github')."""
homepage = "https://cran.r-project.org/package=rvcheck"
url = "https://cran.rstudio.com/src/contrib/rvcheck_0.0.9.tar.gz"
list_url = "https://cran.rstudio.com/src/contrib/Archive/rvcheck"
version('0.0.9', '7e9821de754577f94fdcbf7b02a20edc')
| 44.72973 | 78 | 0.675529 |
21c61a38dbf58850ca84e32846842f4730ad5c1b | 9,436 | py | Python | tests/test_sign_config.py | Luci2015/user-sync.py | e0d08e5170fb66d054182bded2219e30c33b9d46 | [
"MIT"
] | 82 | 2017-02-25T00:41:37.000Z | 2022-02-21T04:30:23.000Z | tests/test_sign_config.py | Luci2015/user-sync.py | e0d08e5170fb66d054182bded2219e30c33b9d46 | [
"MIT"
] | 562 | 2017-01-20T20:50:31.000Z | 2022-03-29T21:07:59.000Z | tests/test_sign_config.py | Luci2015/user-sync.py | e0d08e5170fb66d054182bded2219e30c33b9d46 | [
"MIT"
] | 81 | 2017-02-06T13:16:38.000Z | 2022-03-15T16:46:20.000Z | import logging
import pytest
from user_sync.config.sign_sync import SignConfigLoader
from user_sync.config.user_sync import DictConfig
from user_sync.engine.common import AdobeGroup
from user_sync.engine.sign import SignSyncEngine
from user_sync.error import AssertionException
@pytest.fixture
def modify_sign_config(modify_config):
def _modify_sign_config(key, value, merge=True):
return modify_config('sign_root_config', key, value, merge)
return _modify_sign_config
def test_loader_attributes(default_sign_args):
"""ensure that initial load of Sign config is correct"""
config = SignConfigLoader(default_sign_args)
assert isinstance(config.logger, logging.Logger)
assert config.args == default_sign_args
assert 'users' in config.invocation_options
assert 'config_filename' in config.invocation_options
assert isinstance(config.main_config, DictConfig)
def test_config_structure(default_sign_args):
"""ensure that Sign config test fixture is structured correctly"""
_ = SignConfigLoader(default_sign_args)
# nothing to assert here, if the config object is constructed without exceptions, then the test passes
# NOTE: tmp_sign_connector_config and tmp_config_files are needed to prevent the ConfigFileLoader
# from complaining that there are no temporary sign connector or ldap connector files
def test_invocation_defaults(modify_sign_config):
"""ensure that invocation defaults are resolved correctly"""
sign_config_file = modify_sign_config(['invocation_defaults', 'users'], 'all')
args = {'config_filename': sign_config_file}
config = SignConfigLoader(args)
assert 'users' in config.invocation_options
assert config.invocation_options['users'] == ['all']
args = {'config_filename': sign_config_file, 'users': ['mapped']}
config = SignConfigLoader(args)
assert 'users' in config.invocation_options
assert config.invocation_options['users'] == ['mapped']
def test_group_config(modify_sign_config):
def load_sign_groups(group_config):
sign_config_file = modify_sign_config(['user_management'], group_config)
args = {'config_filename': sign_config_file}
config = SignConfigLoader(args)
return config.get_directory_groups()
def check_mapping(mappings, name, priority, roles, sign_groups):
assert name in mappings
assert mappings[name]['priority'] == priority
for r in roles:
assert r in mappings[name]['roles']
for g in sign_groups:
assert AdobeGroup.create(g) in mappings[name]['groups']
group_config = [
{'directory_group': 'Test Group 1', 'sign_group': 'Sign Group 1'},
{'directory_group': 'Test Group Admins 1', 'sign_group': None, 'group_admin': True, 'account_admin': False}
]
group_mappings = load_sign_groups(group_config)
check_mapping(group_mappings, 'Test Group 1', 0, [], ['Sign Group 1'])
check_mapping(group_mappings, 'Test Group Admins 1', 1, ['GROUP_ADMIN'], [])
group_config = [
{'directory_group': 'Test Group Admins 1', 'sign_group': None, 'group_admin': True},
]
group_mappings = load_sign_groups(group_config)
check_mapping(group_mappings, 'Test Group Admins 1', 0, ['GROUP_ADMIN'], [])
group_config = [
{'directory_group': 'Test Group 1', 'sign_group': 'Sign Group 1', 'group_admin': True},
{'directory_group': 'Test Group 1', 'sign_group': 'Sign Group 1', 'account_admin': True}
]
group_mappings = load_sign_groups(group_config)
check_mapping(group_mappings, 'Test Group 1', 0, ['GROUP_ADMIN', 'ACCOUNT_ADMIN'], ['Sign Group 1'])
group_config = [
{'directory_group': 'Test Group 1', 'sign_group': 'Sign Group 1'},
{'directory_group': 'Test Group 2', 'sign_group': 'Sign Group 1'},
{'directory_group': 'Test Group 2', 'sign_group': None, 'group_admin': True},
{'directory_group': 'Test Group 2', 'sign_group': None, 'account_admin': True},
{'directory_group': 'Test Group Admins 2', 'sign_group': None, 'account_admin': True}
]
group_mappings = load_sign_groups(group_config)
check_mapping(group_mappings, 'Test Group 1', 0, [], ['Sign Group 1'])
check_mapping(group_mappings, 'Test Group 2', 1, ['GROUP_ADMIN', 'ACCOUNT_ADMIN'], ['Sign Group 1'])
check_mapping(group_mappings, 'Test Group Admins 2', 4, ['ACCOUNT_ADMIN'], [])
group_config = [
{'directory_group': 'Test Group 1'},
{'directory_group': 'Test Group 2', 'sign_group': 'Sign Group 1'},
{'directory_group': 'Test Group 1', 'sign_group': 'Sign Group 2'},
{'directory_group': 'Test Group 2', 'sign_group': 'Sign Group 2'},
]
group_mappings = load_sign_groups(group_config)
check_mapping(group_mappings, 'Test Group 1', 0, [], ['Sign Group 2'])
check_mapping(group_mappings, 'Test Group 2', 1, [], ['Sign Group 1', 'Sign Group 2'])
def test_identity_module(test_resources, modify_sign_config):
"""ensure directory module name is correct"""
args = {'config_filename': test_resources['sign_root_config']}
config = SignConfigLoader(args)
assert config.get_directory_connector_module_name() == 'ldap'
sign_config_file = modify_sign_config(['identity_source', 'type'], 'okta')
args = {'config_filename': sign_config_file}
config = SignConfigLoader(args)
assert config.get_directory_connector_module_name() == 'okta'
def test_identity_connector_options(default_sign_args):
"""ensure sign connector options are retrieved from Sign config handler"""
options = {'username': 'ldapuser@example.com', 'password': 'password', 'host': 'ldap://host', 'base_dn': 'DC=example,DC=com', 'search_page_size': 200,
'require_tls_cert': False, 'all_users_filter': '(&(objectClass=user)(objectCategory=person)(!(userAccountControl:1.2.840.113556.1.4.803:=2)))',
'group_filter_format': '(&(|(objectCategory=group)(objectClass=groupOfNames)(objectClass=posixGroup))(cn={group}))',
'group_member_filter_format': '(memberOf={group_dn})', 'user_email_format': '{mail}'}
config = SignConfigLoader(default_sign_args)
assert config.get_directory_connector_options('ldap') == options
with pytest.raises(AssertionException):
config.get_directory_connector_options('okta')
def test_target_config_options(default_sign_args, modify_sign_config):
"""ensure directory module name is correct"""
# simple case
config = SignConfigLoader(default_sign_args)
target_options = config.get_target_options()
print(target_options)
assert target_options[SignConfigLoader.DEFAULT_ORG_NAME]['host'] == 'api.echosignstage.com'
assert target_options[SignConfigLoader.DEFAULT_ORG_NAME]['integration_key'] == '[Sign API Key]'
assert target_options[SignConfigLoader.DEFAULT_ORG_NAME]['admin_email'] == 'user@example.com'
# complex case
sign_config_file = modify_sign_config(['sign_orgs'], {'primary': 'connector-sign.yml', 'org2': 'connector-sign.yml'}, False)
args = {'config_filename': sign_config_file}
config = SignConfigLoader(args)
target_options = config.get_target_options()
assert 'org2' in target_options
assert target_options['org2']['host'] == 'api.echosignstage.com'
assert target_options['org2']['integration_key'] == '[Sign API Key]'
assert target_options['org2']['admin_email'] == 'user@example.com'
# invalid case
sign_config_file = modify_sign_config(['sign_orgs'], {'org1': 'connector-sign.yml'}, False)
args = {'config_filename': sign_config_file}
config = SignConfigLoader(args)
# 'sign_orgs' must specify a config with the key 'primary'
with pytest.raises(AssertionException):
config.get_target_options()
def test_logging_config(default_sign_args):
config = SignConfigLoader(default_sign_args)
logging_config = config.get_logging_config()
assert logging_config.get_bool('log_to_file') is True
assert logging_config.get_string('file_log_directory').endswith('sign_logs')
assert logging_config.get_string('file_log_name_format') == '{:%Y-%m-%d}-sign.log'
assert logging_config.get_string('file_log_level') == 'info'
assert logging_config.get_string('console_log_level') == 'debug'
def test_engine_options(modify_sign_config):
sign_config_file = modify_sign_config(['user_sync'], {'sign_only_limit': 1000})
args = {'config_filename': sign_config_file}
config = SignConfigLoader(args)
options = config.get_engine_options()
# ensure rule options dict is initialized from default_options
for k in SignSyncEngine.default_options:
assert k in options
# ensure rule options dict is updated with invocation_options
for k in config.invocation_options:
assert k in options
# ensure that we didn't accidentally introduce any new keys in get_engine_options()
assert not (set(SignSyncEngine.default_options.keys()) | set(config.invocation_options.keys())) - set(options.keys())
assert options['user_sync']['sign_only_limit'] == 1000
def test_load_invocation_options(modify_sign_config):
sign_config_file = modify_sign_config(['invocation_defaults'], {'users': 'mapped', 'test_mode': False})
args = {'config_filename': sign_config_file}
config = SignConfigLoader(args)
options = config.load_invocation_options()
assert options['directory_group_mapped'] is True
| 47.898477 | 158 | 0.719055 |
a9189a83593a5271523b2653606726c8c6d2789e | 2,604 | py | Python | fonts/FreeSansBold11pt7b.py | cnobile2012/Python-TFT | 812a87e6f694eae338c3d9579ea98eae636f8f99 | [
"MIT"
] | null | null | null | fonts/FreeSansBold11pt7b.py | cnobile2012/Python-TFT | 812a87e6f694eae338c3d9579ea98eae636f8f99 | [
"MIT"
] | null | null | null | fonts/FreeSansBold11pt7b.py | cnobile2012/Python-TFT | 812a87e6f694eae338c3d9579ea98eae636f8f99 | [
"MIT"
] | null | null | null | FreeSansBold11pt7bBitmaps = [
0x30, 0x63, 0xFF, 0xE3, 0x8D, 0x12, 0x00, 0x0E, 0x01, 0xC0, 0x38, 0x07,
0x0F, 0xFF, 0xFF, 0xC3, 0x80, 0x70, 0x0E, 0x01, 0xC0, 0xFF, 0xF3, 0x36,
0xC0, 0xFF, 0xFF, 0xC0, 0xFF, 0xF0, 0x04, 0x30, 0x82, 0x18, 0x61, 0x04,
0x30, 0x82, 0x18, 0x61, 0x04, 0x30, 0x1E, 0x1F, 0xE7, 0xFB, 0x87, 0xE1,
0xF8, 0x7E, 0x1F, 0x87, 0xE1, 0xF8, 0x7E, 0x1F, 0x87, 0xE1, 0xFC, 0xF7,
0xF8, 0xFC, 0x0C, 0x31, 0xFF, 0xFC, 0x71, 0xC7, 0x1C, 0x71, 0xC7, 0x1C,
0x71, 0xC7, 0x1E, 0x1F, 0xEF, 0xFF, 0x87, 0xE1, 0xF8, 0x70, 0x1C, 0x0F,
0x07, 0x83, 0xC3, 0xE1, 0xE0, 0x70, 0x3F, 0xFF, 0xFF, 0xFF, 0x3E, 0x1F,
0xEF, 0xFB, 0x87, 0xE1, 0xC0, 0x70, 0x38, 0x3C, 0x0F, 0x80, 0xF0, 0x1F,
0x87, 0xE1, 0xFC, 0xF7, 0xF8, 0xFC, 0x07, 0x81, 0xE0, 0xF8, 0x7E, 0x1B,
0x8C, 0xE3, 0x39, 0x8E, 0xE3, 0xB0, 0xEF, 0xFF, 0xFF, 0xFF, 0xC0, 0xE0,
0x38, 0x0E, 0x7F, 0xDF, 0xF7, 0xFD, 0x80, 0x60, 0x19, 0x0F, 0xFB, 0xFE,
0xE3, 0xC0, 0x70, 0x1C, 0x07, 0xE1, 0xF8, 0xFF, 0xF9, 0xFC, 0x1F, 0x0F,
0xE7, 0xFF, 0xC7, 0xE0, 0x38, 0x0F, 0xF3, 0xFE, 0xF3, 0xF8, 0x7E, 0x1F,
0x87, 0xE1, 0xFC, 0xF7, 0xF8, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0xE0,
0x38, 0x07, 0x01, 0xC0, 0x70, 0x0E, 0x03, 0x80, 0x70, 0x0E, 0x01, 0xC0,
0x70, 0x0E, 0x01, 0xC0, 0x0F, 0x03, 0xFC, 0x7F, 0xE7, 0x0E, 0x70, 0xE7,
0x0E, 0x39, 0xC1, 0xF8, 0x3F, 0xC7, 0x0E, 0x70, 0xEF, 0x0F, 0x70, 0xE7,
0x9E, 0x7F, 0xE1, 0xFC, 0x1E, 0x1F, 0xCF, 0xFB, 0x87, 0xE1, 0xF8, 0x7E,
0x1F, 0x87, 0xFF, 0xDF, 0xF3, 0xDC, 0x07, 0xE1, 0xF8, 0xEF, 0xF9, 0xFC,
0x08, 0x00 ]
FreeSansBold11pt7bGlyphs = [
[ 0, 7, 7, 9, 1, -15 ], # 0x2A '#'
[ 7, 11, 10, 13, 1, -9 ], # 0x2B '+'
[ 21, 4, 7, 6, 1, -2 ], # 0x2C ','
[ 25, 6, 3, 7, 1, -7 ], # 0x2D '-'
[ 28, 4, 3, 6, 1, -2 ], # 0x2E '.'
[ 30, 6, 16, 6, 0, -15 ], # 0x2F '/'
[ 42, 10, 16, 12, 1, -15 ], # 0x30 '0'
[ 62, 6, 16, 12, 2, -15 ], # 0x31 '1'
[ 74, 10, 16, 12, 1, -15 ], # 0x32 '2'
[ 94, 10, 16, 12, 1, -15 ], # 0x33 '3'
[ 114, 10, 16, 12, 1, -15 ], # 0x34 '4'
[ 134, 10, 16, 12, 1, -15 ], # 0x35 '5'
[ 154, 10, 16, 12, 1, -15 ], # 0x36 '6'
[ 174, 11, 16, 12, 1, -15 ], # 0x37 '7'
[ 196, 12, 16, 12, 0, -15 ], # 0x38 '8'
[ 220, 10, 17, 12, 1, -15 ] ] # 0x39 '9'
FreeSansBold11pt7b = [
FreeSansBold11pt7bBitmaps,
FreeSansBold11pt7bGlyphs,
0x2A, 0x39, 39 ]
# Approx. 361 bytes
| 54.25 | 75 | 0.515745 |
9e7240d0b6bf129de743871d1507a3f3807c2898 | 449 | py | Python | apps/Utils/logger.py | vikkyB2/UserMaintenance | 0ae9db17d694ae67ac82145524cae4a1f69d54c6 | [
"MIT"
] | null | null | null | apps/Utils/logger.py | vikkyB2/UserMaintenance | 0ae9db17d694ae67ac82145524cae4a1f69d54c6 | [
"MIT"
] | null | null | null | apps/Utils/logger.py | vikkyB2/UserMaintenance | 0ae9db17d694ae67ac82145524cae4a1f69d54c6 | [
"MIT"
] | null | null | null | import logging
c_handler = logging.StreamHandler()
f_handler = logging.FileHandler("logFile.log")
c_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
f_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
c_handler.setFormatter(c_format)
f_handler.setFormatter(f_format)
def configLogger(logger):
logger.addHandler(c_handler)
logger.addHandler(f_handler)
return logger | 29.933333 | 85 | 0.717149 |
f220086232d46bec2d0f3b6df1b0f12793dbd1b8 | 6,687 | py | Python | wagtail/wagtailimages/south_migrations/0004_auto__chg_field_rendition_focal_point_key.py | edrex/wagtail | dc1b51a5be1a57f6cb1b90507eea6ab7f2e1affe | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailimages/south_migrations/0004_auto__chg_field_rendition_focal_point_key.py | edrex/wagtail | dc1b51a5be1a57f6cb1b90507eea6ab7f2e1affe | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailimages/south_migrations/0004_auto__chg_field_rendition_focal_point_key.py | edrex/wagtail | dc1b51a5be1a57f6cb1b90507eea6ab7f2e1affe | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# remove duplicate renditions
db.execute("""
DELETE FROM wagtailimages_rendition WHERE image_id || '-' || filter_id IN (
SELECT image_id || '-' || filter_id FROM wagtailimages_rendition WHERE focal_point_key IS NULL GROUP BY image_id, filter_id HAVING COUNT(*) > 1
) AND focal_point_key IS NULL
""")
# Changing field 'Rendition.focal_point_key'
db.alter_column('wagtailimages_rendition', 'focal_point_key', self.gf('django.db.models.fields.CharField')(max_length=255, default=''))
def backwards(self, orm):
# Changing field 'Rendition.focal_point_key'
db.alter_column('wagtailimages_rendition', 'focal_point_key', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_set'", 'blank': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_set'", 'blank': 'True', 'to': "orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'wagtailimages.filter': {
'Meta': {'object_name': 'Filter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'spec': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'wagtailimages.image': {
'Meta': {'object_name': 'Image'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'focal_point_height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'focal_point_width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'focal_point_x': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'focal_point_y': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uploaded_by_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {})
},
'wagtailimages.rendition': {
'Meta': {'unique_together': "(('image', 'filter', 'focal_point_key'),)", 'object_name': 'Rendition'},
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'filter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['wagtailimages.Filter']"}),
'focal_point_key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'renditions'", 'to': "orm['wagtailimages.Image']"}),
'width': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['wagtailimages'] | 69.65625 | 193 | 0.581576 |
eaba76e7dd89e9368b352b58dc28b89fa7d247e2 | 9,845 | py | Python | pybind/nos/v7_1_0/interface/hundredgigabitethernet/ip/interface_fo_ospf_conf/ospf_interface_config/authentication_key/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/nos/v7_1_0/interface/hundredgigabitethernet/ip/interface_fo_ospf_conf/ospf_interface_config/authentication_key/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/nos/v7_1_0/interface/hundredgigabitethernet/ip/interface_fo_ospf_conf/ospf_interface_config/authentication_key/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import no_encrypt_auth_key_table
import auth_key_table
class authentication_key(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/hundredgigabitethernet/ip/interface-fo-ospf-conf/ospf-interface-config/authentication-key. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__no_encrypt_auth_key_table','__auth_key_table',)
_yang_name = 'authentication-key'
_rest_name = 'authentication-key'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__no_encrypt_auth_key_table = YANGDynClass(base=no_encrypt_auth_key_table.no_encrypt_auth_key_table, is_container='container', presence=False, yang_name="no-encrypt-auth-key-table", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
self.__auth_key_table = YANGDynClass(base=auth_key_table.auth_key_table, is_container='container', presence=False, yang_name="auth-key-table", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'hundredgigabitethernet', u'ip', u'interface-fo-ospf-conf', u'ospf-interface-config', u'authentication-key']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'HundredGigabitEthernet', u'ip', u'ospf', u'authentication-key']
def _get_no_encrypt_auth_key_table(self):
"""
Getter method for no_encrypt_auth_key_table, mapped from YANG variable /interface/hundredgigabitethernet/ip/interface_fo_ospf_conf/ospf_interface_config/authentication_key/no_encrypt_auth_key_table (container)
"""
return self.__no_encrypt_auth_key_table
def _set_no_encrypt_auth_key_table(self, v, load=False):
"""
Setter method for no_encrypt_auth_key_table, mapped from YANG variable /interface/hundredgigabitethernet/ip/interface_fo_ospf_conf/ospf_interface_config/authentication_key/no_encrypt_auth_key_table (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_no_encrypt_auth_key_table is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_no_encrypt_auth_key_table() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=no_encrypt_auth_key_table.no_encrypt_auth_key_table, is_container='container', presence=False, yang_name="no-encrypt-auth-key-table", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """no_encrypt_auth_key_table must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=no_encrypt_auth_key_table.no_encrypt_auth_key_table, is_container='container', presence=False, yang_name="no-encrypt-auth-key-table", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""",
})
self.__no_encrypt_auth_key_table = t
if hasattr(self, '_set'):
self._set()
def _unset_no_encrypt_auth_key_table(self):
self.__no_encrypt_auth_key_table = YANGDynClass(base=no_encrypt_auth_key_table.no_encrypt_auth_key_table, is_container='container', presence=False, yang_name="no-encrypt-auth-key-table", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
def _get_auth_key_table(self):
"""
Getter method for auth_key_table, mapped from YANG variable /interface/hundredgigabitethernet/ip/interface_fo_ospf_conf/ospf_interface_config/authentication_key/auth_key_table (container)
"""
return self.__auth_key_table
def _set_auth_key_table(self, v, load=False):
"""
Setter method for auth_key_table, mapped from YANG variable /interface/hundredgigabitethernet/ip/interface_fo_ospf_conf/ospf_interface_config/authentication_key/auth_key_table (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_auth_key_table is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_auth_key_table() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=auth_key_table.auth_key_table, is_container='container', presence=False, yang_name="auth-key-table", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """auth_key_table must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=auth_key_table.auth_key_table, is_container='container', presence=False, yang_name="auth-key-table", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""",
})
self.__auth_key_table = t
if hasattr(self, '_set'):
self._set()
def _unset_auth_key_table(self):
self.__auth_key_table = YANGDynClass(base=auth_key_table.auth_key_table, is_container='container', presence=False, yang_name="auth-key-table", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
no_encrypt_auth_key_table = __builtin__.property(_get_no_encrypt_auth_key_table, _set_no_encrypt_auth_key_table)
auth_key_table = __builtin__.property(_get_auth_key_table, _set_auth_key_table)
_pyangbind_elements = {'no_encrypt_auth_key_table': no_encrypt_auth_key_table, 'auth_key_table': auth_key_table, }
| 61.53125 | 501 | 0.74901 |
fc65b553689f2e2174a314db45a2cf75637aa60a | 84 | py | Python | threeML/utils/data_builders/__init__.py | Husky22/threeML | 2ef3401e3edf82ceffd85ad0a9ea9e8b2bba3520 | [
"BSD-3-Clause"
] | null | null | null | threeML/utils/data_builders/__init__.py | Husky22/threeML | 2ef3401e3edf82ceffd85ad0a9ea9e8b2bba3520 | [
"BSD-3-Clause"
] | null | null | null | threeML/utils/data_builders/__init__.py | Husky22/threeML | 2ef3401e3edf82ceffd85ad0a9ea9e8b2bba3520 | [
"BSD-3-Clause"
] | null | null | null | from .time_series_builder import TimeSeriesBuilder
__all__ = ['TimeSeriesBuilder']
| 21 | 50 | 0.833333 |
5cbd9dcbfc4d66f47e630ee364bd7c4f91a2359a | 1,370 | py | Python | gwinc/squeeze.py | Jonjocarts/LION-Public | e6c8d7475e4f883dbb268bf6f028bbc378540ab3 | [
"Unlicense"
] | null | null | null | gwinc/squeeze.py | Jonjocarts/LION-Public | e6c8d7475e4f883dbb268bf6f028bbc378540ab3 | [
"Unlicense"
] | null | null | null | gwinc/squeeze.py | Jonjocarts/LION-Public | e6c8d7475e4f883dbb268bf6f028bbc378540ab3 | [
"Unlicense"
] | 1 | 2021-06-23T04:51:20.000Z | 2021-06-23T04:51:20.000Z | from numpy import pi, sqrt
from . import const
def sql(ifo):
"""Computer standard quantum limit (SQL) for IFO"""
c = const.c
Parm = ifo.gwinc.parm
w0 = 2 * pi * c / ifo.Laser.Wavelength
m = ifo.Materials.MirrorMass
Titm = ifo.Optics.ITM.Transmittance
Tsrm = ifo.Optics.SRM.Transmittance
tSR = sqrt(Tsrm)
rSR = sqrt(1 - Tsrm)
fSQL = (1/(2*pi))*(8/c)*sqrt((Parm*w0)/(m*Titm))*(tSR/(1+rSR))
return fSQL
def computeFCParams(ifo, fcParams):
"""Compute ideal filter cavity Tin, detuning [Hz] and bandwidth [Hz]
"""
# FC parameters
c = const.c
fsrFC = c / (2 * fcParams.L)
lossFC = fcParams.Lrt + fcParams.Te
fSQL = sql(ifo)
# detuning and cavity bandwidth (D&D paper P1400018 and/or PRD)
eps = 4 / (2 + sqrt(2 + 2 * sqrt(1 + (4 * pi * fSQL / (fsrFC * lossFC))**4)))
s1eps = sqrt(1 - eps)
# cavity bandwidth [Hz]
gammaFC = fSQL / sqrt(s1eps + s1eps**3)
# cavity detuning [Hz]
detuneFC = s1eps * gammaFC
# input mirror transmission
TinFC = 4 * pi * gammaFC / fsrFC - lossFC
if TinFC < lossFC:
raise RuntimeError('IFC: Losses are too high! %.1f ppm max.' % 1e6 * gammaFC / fsrFC)
# Add to fcParams structure
fcParams.Ti = TinFC
fcParams.fdetune = -detuneFC
fcParams.gammaFC = gammaFC
fcParams.fsrFC = fsrFC
return fcParams
| 26.346154 | 93 | 0.611679 |
fd468616ea41593bae85d00a0073df5d117c439d | 100 | py | Python | src/download.py | gitter-badger/video-dl | 50270766390d4ddc57a872492c76adf0e1d64324 | [
"Apache-2.0"
] | null | null | null | src/download.py | gitter-badger/video-dl | 50270766390d4ddc57a872492c76adf0e1d64324 | [
"Apache-2.0"
] | null | null | null | src/download.py | gitter-badger/video-dl | 50270766390d4ddc57a872492c76adf0e1d64324 | [
"Apache-2.0"
] | null | null | null | """Used for test scripts."""
from video_dl.entry import main
if __name__ == '__main__':
main()
| 16.666667 | 31 | 0.67 |
aff74b03bb9af494930f0a46f6a1457db02719f7 | 591 | py | Python | post/views.py | Mansouroopi/nested-objects | 55c7984e1eefd9030119197feb2f52238ed212a5 | [
"MIT"
] | null | null | null | post/views.py | Mansouroopi/nested-objects | 55c7984e1eefd9030119197feb2f52238ed212a5 | [
"MIT"
] | null | null | null | post/views.py | Mansouroopi/nested-objects | 55c7984e1eefd9030119197feb2f52238ed212a5 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework import generics
from post.models import BlogPost as Post, Tag
from post.serializers import PostSerializer
class PostListCreateAPIView(generics.ListCreateAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
def perform_create(self, serializer):
"""Save the post data when creating a new post."""
serializer.save(author=self.request.user)
class PostDetailsAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
| 23.64 | 64 | 0.76819 |
dcf47b732401f1cb962212a9ee406d900cc91d83 | 3,640 | py | Python | substringMappingStructures.py | jcavalieri8619/OCRerror_correct | bd7b96139a2b90cda5957eff75f516a31b052b3a | [
"MIT"
] | 3 | 2018-03-12T07:31:12.000Z | 2018-09-12T02:14:56.000Z | substringMappingStructures.py | jcavalieri8619/OCRerror_correct | bd7b96139a2b90cda5957eff75f516a31b052b3a | [
"MIT"
] | null | null | null | substringMappingStructures.py | jcavalieri8619/OCRerror_correct | bd7b96139a2b90cda5957eff75f516a31b052b3a | [
"MIT"
] | null | null | null | __author__ = 'jcavalie'
def subgroups( my_list ):
for each_tuple in (lambda p, f = lambda n, g:
{ (x,) + y for x in range( 1, n ) for y in g( n - x, g ) } | { (n,) }:
f( p, f ))( len( my_list ) ):
yield list( my_list[ sum( each_tuple[ :index ] ):sum( each_tuple[ :index ] ) + length ] for index, length in
enumerate( each_tuple ) )
_OBS = 0
_HIDN = 1
class mappingObject( object ):
def __init__( self, charMapping, initposition ):
self._substrMapping = (charMapping[ _OBS ], charMapping[ _HIDN ])
self._positions = initposition
def getSubstrMapping( self ):
return self._substrMapping[ : ]
def getPositions( self ):
return self._positions
def __le__( self, other ):
return max( self.getPositions( ) ) <= min( other.getPositions( ) )
def __add__( self, other ):
observed = self.getSubstrMapping( )[ _OBS ] + other.getSubstrMapping( )[ _OBS ]
hidden = self.getSubstrMapping( )[ _HIDN ] + other.getSubstrMapping( )[ _HIDN ]
updatedPositions = sorted( self.getPositions( ) + other.getPositions( ) )
return mappingObject( (observed, hidden), updatedPositions )
_MAX_ERRWIN = 3
class mappingContainer( object ):
def __init__( self, mappingLst, possible_positions ):
self._mappings = list( )
self.possiblePositions = possible_positions
self._full_length = len( possible_positions )
if isinstance( mappingLst[ 0 ], mappingObject ):
self._mappings = mappingLst[ : ]
elif isinstance( mappingLst[ 0 ], list ):
for sublist in mappingLst:
if len( sublist ) > _MAX_ERRWIN or not len(sublist):
self._mappings = list( )
raise ValueError( "Max ErrWindow Exceeded or Empty" )
else:
currMapObj = sum( sublist[ 1: ], sublist[ 0 ] )
#FIXME
#if accuracy drops then restore below code block to original
#by simply appending currMapObj without checking anything
currMap=currMapObj.getSubstrMapping()
if currMap[_OBS] == currMap[_HIDN] and len(currMap[_OBS])>1:
raise ValueError( "many-to-many identity mapping" )
else:
self._mappings.append( currMapObj )
else:
raise RuntimeError( "Error occurred in mappingContainer.__init__" )
def getLstMapObjs( self ):
return self._mappings[ : ]
def exportMapping( self, container, **kwargs ):
temp_container = [ ]
for arg in self.getLstMapObjs( ):
substr_mapping = arg.getSubstrMapping( )
temp_container.append( substr_mapping )
container.append( temp_container )
def __add__( self, other ):
updatedMappings = self.getLstMapObjs( ) + other.getLstMapObjs( )
return mappingContainer( updatedMappings, self.possiblePositions )
def getCurrPositions( self ):
currPositions = list( )
for arg in self.getLstMapObjs( ):
currPositions.extend( arg.getPositions( ) )
return currPositions
def positionsNeeded( self ):
return list( set( self.possiblePositions ) - set( self.getCurrPositions( ) ) )
def insertMapping( self, other ):
for index, objct in enumerate( self.getLstMapObjs( ) ):
if other <= objct:
self._mappings.insert( index, other )
break
else:
self._mappings.append( other ) | 33.090909 | 116 | 0.586813 |
14b8358b7e45d4dca1901581f4631775f6db6370 | 4,932 | py | Python | get_positions.py | smoe/mCaller | e83333e03dea2df2844a9d9c7bfc3f475cd3c863 | [
"MIT"
] | 37 | 2017-04-25T20:29:02.000Z | 2022-02-21T11:33:04.000Z | get_positions.py | smoe/mCaller | e83333e03dea2df2844a9d9c7bfc3f475cd3c863 | [
"MIT"
] | 33 | 2017-07-31T04:16:01.000Z | 2022-02-19T16:22:58.000Z | get_positions.py | smoe/mCaller | e83333e03dea2df2844a9d9c7bfc3f475cd3c863 | [
"MIT"
] | 17 | 2017-08-23T00:28:08.000Z | 2021-09-08T13:33:05.000Z | #!/usr/bin/env python
import sys
import numpy as np
from Bio import SeqIO
from scipy import stats
from scipy.cluster.hierarchy import fcluster,linkage
import scipy.spatial.distance as ssd
from extract_contexts import revcomp
from plotlib import plot_w_labels
import os
import pandas as pd
def make_pos_set(pos_list):
pos_set = set()
with open(pos_list,'r') as fi:
for line in fi:
if len(line) > 3:
pos_set.add(tuple(line.strip().split('\t')[:4]))
return pos_set
def aggregate_by_pos(meth_fi,aggfi,depth_thresh,mod_thresh,pos_list,control,verbose_results,gff,ref,plot,plotdir,plotsummary):
pos_dict = {}
if verbose_results:
pos_dict_verbose = {}
if pos_list:
pos_set = make_pos_set(pos_list)
values_dict = {}
for line in open(meth_fi,'r'):
#try:
#print line
try:
csome,read,pos,context,values,strand,label,prob = tuple(line.split('\t'))
except: #for backwards compatibility; does not work with verbose results
csome,read,pos,context,values,strand,label = tuple(line.split('\t'))
nextpos = str(int(pos)+1)
if pos_list and (csome,pos,nextpos,strand) not in pos_set:
continue
if (csome,pos,nextpos,context,strand) not in pos_dict:
pos_dict[(csome,pos,nextpos,context,strand)] = []
values_dict[(csome,pos,nextpos,context,strand)] = []
if verbose_results:
pos_dict_verbose[(csome,pos,nextpos,context,strand)] = []
if (pos_list and (csome,pos,nextpos,strand) in pos_set) or (not pos_list and plot):
values_dict[(csome,pos,nextpos,context,strand)].append([float(v) for v in values.split(',')][:-1])
if label[0] == 'm':
pos_dict[(csome,pos,nextpos,context,strand)].append(1)
else:
pos_dict[(csome,pos,nextpos,context,strand)].append(0)
def main():
#parse command line options
from argparse import ArgumentParser
parser = ArgumentParser(description='Produce bed file of methylated positions based on mCaller output')
parser.add_argument('-d','--min_read_depth',type=int,required=False,help='minimum coverage of position to determine methylation (default = 15)',default=15)
parser.add_argument('-t','--mod_threshold',type=float,required=False,help='minimum %% of observations at a position to include in report (default = 0.5)',default=0.5)
parser.add_argument('-f','--mCaller_file',type=str,required=True,help='the output file from mCaller to summarize')
parser.add_argument('-p','--positions',type=str,required=False,help='~bed file of positions for which to calculate % methylated (chromosome,start,end,strand); ignores other thresholds')
parser.add_argument('--control',action='store_true',required=False,help='take unmethylated positions as a control for motif detection')
parser.add_argument('--gff',action='store_true',required=False,help='output PacBio-style gff instead of bed ("identificationQv" score will be average probability of methylation)')
parser.add_argument('--ref',type=str,required=False,help='use reference fasta to output longer contexts surrounding a base, from -20 to +20')
parser.add_argument('--plot',action='store_true',required=False,help='plot currents deviations at the positions included (not recommended for many positions)')
parser.add_argument('--plotsummary',action='store_true',required=False,help='plot currents deviations summarized across the positions included')
parser.add_argument('--plotdir',type=str,required=False,default='mCaller_position_plots',help='output directory for plots, default=mCaller_position_plots')
parser.add_argument('--vo',action='store_true',required=False,help='verbose output including probabilities for each position')
parser.add_argument('-v','--version',action='store_true',required=False,help='print version')
args = parser.parse_args()
if args.version:
print 'mCallerNP 0.3'
sys.exit(0)
assert os.path.isfile(args.mCaller_file), 'file not found at '+args.mCaller_file
if args.positions:
output_file = args.mCaller_file.split('.')[0]+'.methylation.positions.summary'
elif not args.control:
output_file = args.mCaller_file.split('.')[0]+'.methylation.summary'
else:
output_file = args.mCaller_file.split('.')[0]+'.methylation.control.summary'
if args.gff:
output_file = output_file+'.gff'
else:
output_file = output_file+'.bed'
if not os.path.isdir(args.plotdir):
os.mkdir(args.plotdir)
print args.mCaller_file
aggregate_by_pos(args.mCaller_file,output_file,args.min_read_depth,args.mod_threshold,args.positions,args.control,args.vo,args.gff,args.ref,args.plot,args.plotdir,args.plotsummary)
if __name__ == "__main__":
main()
| 52.468085 | 189 | 0.694039 |
a300ee9b4cd90b679d016b72390c6fdad3013950 | 462 | py | Python | src/Server/util/MulticastAddressGenerator.py | Jictyvoo/TEC502-2018.1--English-Dictionary-Game | a775e5ca7853f87466dd478b9dbc4762c3d111c4 | [
"MIT"
] | null | null | null | src/Server/util/MulticastAddressGenerator.py | Jictyvoo/TEC502-2018.1--English-Dictionary-Game | a775e5ca7853f87466dd478b9dbc4762c3d111c4 | [
"MIT"
] | null | null | null | src/Server/util/MulticastAddressGenerator.py | Jictyvoo/TEC502-2018.1--English-Dictionary-Game | a775e5ca7853f87466dd478b9dbc4762c3d111c4 | [
"MIT"
] | null | null | null | class MulticastAddressGenerator:
def __init__(self):
self.__group_ip = "224.0.0."
self.__current_last_ip = 0
def __end_last_ip(self):
return self.__current_last_ip >= 255
def get_next_group(self):
if self.__end_last_ip():
self.__current_last_ip = 0
returned_ip = self.__group_ip + str(self.__current_last_ip)
self.__current_last_ip = self.__current_last_ip + 1
return returned_ip
| 30.8 | 67 | 0.664502 |
29be2f07b7e9e208480355ee1e57324fe5a1d1a0 | 11,080 | py | Python | examples/MoorPy_Example.py | marcellyteixeira/projeto | 1acb1a60413e4ebb5aa3a88d32eccb5ecc926480 | [
"BSD-3-Clause"
] | 6 | 2021-02-19T00:03:34.000Z | 2022-03-10T07:49:25.000Z | examples/MoorPy_Example.py | marcellyteixeira/projeto | 1acb1a60413e4ebb5aa3a88d32eccb5ecc926480 | [
"BSD-3-Clause"
] | 1 | 2021-04-29T20:00:22.000Z | 2021-04-29T20:00:22.000Z | examples/MoorPy_Example.py | marcellyteixeira/projeto | 1acb1a60413e4ebb5aa3a88d32eccb5ecc926480 | [
"BSD-3-Clause"
] | 7 | 2021-02-19T00:04:00.000Z | 2022-03-25T07:58:21.000Z | # --------------------------------------------------------------------------------------------
# MoorPy
#
# A mooring system visualizer and quasi-static modeler in Python.
# Matt Hall and Stein Housner
#
# --------------------------------------------------------------------------------------------
# 2018-08-14: playing around with making a QS shared-mooring simulation tool, to replace what's in Patrick's work
# 2020-06-17: Trying to create a new quasi-static mooring system solver based on my Catenary function adapted from FAST v7, and using MoorDyn architecture
import numpy as np
import moorpy as mp
from moorpy.MoorProps import getLineProps
# EXAMPLE 1: Load in a MoorDyn input file to create a MoorPy System
ms = mp.System('lines.txt')
ms.initialize(plots=1)
ms.plot()
'''
ms = mp.System('IEA-15-240-RWT-UMaineSemi_MoorDyn_PPIb.dat', depth=850)
ms.initialize(plots=0)
for body in ms.bodyList:
body.m = 20093000 # from VolturnUS-S spec sheet
body.v = 20206 # from VolturnUS-S spec sheet
body.rCG = [0, 0, -2.3256] # see RAFT verification doc
body.AWP = 78.54 # dWL=10 from VolturnUS-S spec sheet
body.rM = [0,0, 10.886] # KB=-13.63+20=6.37, BM=It/V = It/20206, KM = KB+BM -> rM = KM-20
# It = (np.pi/64)*d**4 + np.pi*(d/2)**2 * each xWP**2 It = 496567 -> BM = 24.516 -> KM = 30.886 -> rM = 10.886
#ms.bodyList[0].type=1
print(ms.bodyList[0].r6)
ms.bodyList[0].m += 450000
ms.solveEquilibrium3(plots=0)
print(ms.bodyList[0].r6)
#anim = ms.animateSolution()
#ms.plot(colortension=True, cbar_tension=True)
#ms.plot2d(colortension=True)
#for point in ms.pointList:
#print(point.r)
'''
'''
ms = mp.System('IEA-15-240-RWT-UMaineSemi_MoorDyn_PPI.dat', depth=850)
ms.initialize(plots=0)
#ms.bodyList[0].type=1
for body in ms.bodyList:
body.m = 20093000 # from VolturnUS-S spec sheet
body.v = 20206 # from VolturnUS-S spec sheet
body.rCG = [0, 0, -2.3256] # see RAFT verification doc
body.AWP = 78.54 # dWL=10 from VolturnUS-S spec sheet
body.rM = [0,0, 10.886] # KB=-13.63+20=6.37, BM=It/V = It/20206, KM = KB+BM -> rM = KM-20
#body.f6Ext = 2500000*np.array([1, 0, 0, 0, 150, 0])
ms.solveEquilibrium3(plots=0)
for body in ms.bodyList:
heading = 0
cosbeta = np.cos(heading*np.pi/180.0)
sinbeta = np.sin(heading*np.pi/180.0)
force = 2.5e6
height = 150
body.f6Ext = force*np.array([cosbeta, sinbeta, 0, -height*sinbeta, height*cosbeta, 0])
fig, ax = ms.plot()
ms.solveEquilibrium3(plots=0)
#ms.bodyList[0].f6Ext = Fthrust*np.array([1, 0, 0, 0, hHub, 0])
#ms.solveEquilibrium3(plots=0)
#anim = ms.animateSolution()
#ms.plot(colortension=True, cbar_tension=True)
ms.plot(ax=ax)
if heading==0:
print(ms.lineList[1].LBot)
print(ms.bodyList[0].r6[0])
elif heading==60:
print(ms.pointList[4].r[2])
print(np.linalg.norm([ms.bodyList[0].r6[0],ms.bodyList[0].r6[1]]))
'''
'''
# EXAMPLE 2: Catenary Testing Calls
mp.catenary(576.2346666666667, 514.6666666666666, 800, 4809884.623076923, -2.6132152062554828, CB=-64.33333333333337, HF0=0, VF0=0, Tol=1e-05, MaxIter=50, plots=2)
print("\nTEST 2")
mp.catenary(88.91360441490338, 44.99537159734132, 100.0, 854000000.0000001, 1707.0544275185273, CB=0.0, HF0=912082.6820817506, VF0=603513.100376363, Tol=1e-06, MaxIter=50, plots=1)
print("\nTEST 3")
mp.catenary(99.81149090002897, 0.8459770263789324, 100.0, 854000000.0000001, 1707.0544275185273, CB=0.0, HF0=323638.97834178555, VF0=30602.023233123222, Tol=1e-06, MaxIter=50, plots=1)
print("\nTEST 4")
mp.catenary(99.81520776134033, 0.872357398602503, 100.0, 854000000.0000001, 1707.0544275185273, CB=0.0, HF0=355255.0943810993, VF0=32555.18285808794, Tol=1e-06, MaxIter=50, plots=1)
print("\nTEST 5")
mp.catenary(99.81149195956499, 0.8459747131565791, 100.0, 854000000.0000001, 1707.0544275185273, CB=0.0, HF0=323645.55876751675, VF0=30602.27072107738, Tol=1e-06, MaxIter=50, plots=1)
print("\nTEST 6")
mp.catenary(88.91360650151807, 44.99537139684605, 100.0, 854000000.0000001, 1707.0544275185273, CB=0.0, HF0=912082.6820817146, VF0=603513.100376342, Tol=1e-06, MaxIter=50, plots=1)
print("\nTEST 7")
mp.catenary(9.516786788834565, 2.601777402222183, 10.0, 213500000.00000003, 426.86336920488003, CB=0.0, HF0=1218627.2292202935, VF0=328435.58512892434, Tol=1e-06, MaxIter=50, plots=1)
print("\nTEST 8")
mp.catenary(9.897879983411258, 0.3124565409495972, 10.0, 213500000.00000003, 426.86336920488003, CB=0.0, HF0=2191904.191415531, VF0=69957.98566771008, Tol=1e-06, MaxIter=50, plots=1)
print("\nTEST 9")
mp.catenary(107.77260514238083, 7.381234307499085, 112.08021179445676, 6784339692.139625, 13559.120871401587, CB=0.0, HF0=672316.8532881762, VF0=-552499.1313868811, Tol=1e-06, MaxIter=50, plots=1)
print("\nTEST 9.1")
mp.catenary(107.67265157943795, 7.381234307499085, 112.08021179445676, 6784339692.139625, 13559.120871401587, CB=0.0, HF0=3752787.759641461, VF0=-1678302.5929179655, Tol=1e-06, MaxIter=50, plots=1)
print("\nTEST 9.2")
mp.catenary(107.77260514238083, 7.381234307499085, 112.08021179445676, 6784339692.139625, 13559.120871401587, CB=0.0, Tol=1e-06, MaxIter=50, plots=2, HF0= 1.35e+05,VF0= 1.13e+02)
print("\nTEST 9.3")
mp.catenary(98.6712173965359, 8.515909042185399, 102.7903150736787, 5737939634.533289, 11467.878219531065, CB=0.0, HF0=118208621.36075467, VF0=-12806834.457078349, Tol=1e-07, MaxIter=50, plots=2)
'''
'''
# EXAMPLE 3: Create a simple MoorPy System from scratch
test = mp.System()
test.depth = 100
# Create the LineType of the line for the system
test.addLineType("main", 0.1, 100.0, 1e8)
# add points and lines
test.addPoint(1, [ 0, 0, 0])
test.addPoint(0, [ 100, 0, -50], DOFs=[2])
test.addLine(120, "main")
# attach
test.pointList[0].attachLine(1, 1)
test.pointList[1].attachLine(1, 0)
test.initialize(plots=1)
test.solveEquilibrium3()
test.plot()
'''
"""
# EXAMPLE 4: Create a diagonal or orthogonal MoorPy System and compare system stiffness matrices
ms = mp.System()
# --- diagonal scenario ---
'''
ms.depth = 600
r6 = np.zeros(6)
#r6 = np.array([1,1,0,1,1,1])
ms.addBody(0, r6)
r = np.zeros(3)
#r = np.array([-2,1,3])
ms.addPoint(1, r)
ms.bodyList[0].attachPoint(len(ms.pointList),r-r6[:3])
#ms.addPoint(1, np.array([-10,0,0]))
#ms.bodyList[0].attachPoint(len(ms.pointList),np.array([-10,0,0]))
ms.addPoint(1, np.array([-1000,-1000,-600]))
ms.addLineType('main', 0.10, 40, 1e9)
ms.addLine(1600, 'main')
ms.pointList[0].attachLine(1, 1)
ms.pointList[1].attachLine(1, 0)
'''
# --- orthogonal scenario ---
ms.depth = 100
ms.addBody(0, np.zeros(6))
ms.addPoint(1, np.zeros(3)) # fairlead point
#ms.bodyList[0].attachPoint(len(ms.pointList), [0, 10, 0]) # translations good but some rotations are different
#ms.bodyList[0].attachPoint(len(ms.pointList), [10, 0, 0]) # looks good except 6,4 and 6,6 terms (just first-order vs. nonlinear?)
ms.bodyList[0].attachPoint(len(ms.pointList), [0, 0, 10]) # looks good except 4,4 and 4,6 terms (just first-order vs. nonlinear?) but 4,4 has sign flipped!
ms.addPoint(1, [700, -380, -10]) # anchor point
ms.addLineType('main', 0.10, 10, 1e10)
ms.addLine(815, 'main')
ms.pointList[0].attachLine(1, 1)
ms.pointList[1].attachLine(1, 0)
# --- end of scenario choices ---
ms.initialize()
ms.plot()
a = np.array([0.1, 0, 0, 0, 0, 0])
Kbody = ms.bodyList[0].getStiffness()
Kpoint = ms.pointList[0].getStiffness()
KlineA = ms.lineList[0].getStiffnessMatrix()
KpointA = ms.pointList[0].getStiffnessA()
KbodyA = ms.bodyList[0].getStiffnessA()
#Ksystem = ms.getSystemStiffness()
KsystemA = ms.getSystemStiffnessA()
ms.display=3
Ksystem = ms.getSystemStiffness(dth=0.05)
print(ms.pointList[0].r)
print("line stiffness A")
mp.printMat(KlineA)
print("body stiffness A")
mp.printMat(KbodyA)
print("system stiffness A")
mp.printMat(KsystemA)
print("system stiffness nonlinear")
mp.printMat(Ksystem)
"""
"""
# EXAMPLE 5: Create a 3 line MoorPy System from scratch (Taken from ColorTension_example)
depth = 600
angle = np.arange(3)*np.pi*2/3 # line headings list
anchorR = 1600 # anchor radius/spacing
fair_depth= 21
fairR = 20
LineLength= 1800
typeName = "chain" # identifier string for line type
# --------------- set up mooring system ---------------------
# Create blank system object
ms = mp.System()
# Set the depth of the system to the depth of the input value
ms.depth = depth
# add a line type
ms.lineTypes[typeName] = getLineProps(120, name=typeName)
# Add a free, body at [0,0,0] to the system (including some properties to make it hydrostatically stiff)
ms.addBody(0, np.zeros(6), m=1e6, v=1e3, rM=100, AWP=1e3)
# Set the anchor points of the system
anchors = []
for i in range(len(angle)):
ms.addPoint(1, np.array([anchorR*np.cos(angle[i]), anchorR*np.sin(angle[i]), -ms.depth], dtype=float))
anchors.append(len(ms.pointList))
# Set the points that are attached to the body to the system
bodypts = []
for i in range(len(angle)):
ms.addPoint(1, np.array([fairR*np.cos(angle[i]), fairR*np.sin(angle[i]), -fair_depth], dtype=float))
bodypts.append(len(ms.pointList))
ms.bodyList[0].attachPoint(ms.pointList[bodypts[i]-1].number, ms.pointList[bodypts[i]-1].r - ms.bodyList[0].r6[:3])
# Add and attach lines to go from the anchor points to the body points
for i in range(len(angle)):
ms.addLine(LineLength, typeName)
line = len(ms.lineList)
ms.pointList[anchors[i]-1].attachLine(ms.lineList[line-1].number, 0)
ms.pointList[bodypts[i]-1].attachLine(ms.lineList[line-1].number, 1)
'''
ms.initialize() # make sure everything's connected
ms.solveEquilibrium3() # equilibrate
ms.unload("sample.txt") # export to MD input file
#fig, ax = ms.plot() # plot the system in original configuration
ms.solveEquilibrium3() # equilibrate
#fig, ax = ms.plot(color='red') # plot the system in displaced configuration (on the same plot, in red)
print(f"Body offset position is {ms.bodyList[0].r6}")
plt.show()
fig, ax, minten, maxten = ms.colortensions()
#fig.show()
#Read in tension data
#dataff, unitsff = ml.read_output_file("C:/code/Analyzing Outputs/Python scripts for OpenFAST outputs/FF Mooring lines/", "steadyfarm.FarmMD.MD.out")
#time = dataff['Time']
fig, ax = plt.subplots(figsize=(6, 1))
fig.subplots_adjust(bottom=0.5)
cmap = mpl.colors.LinearSegmentedColormap.from_list('mycolors',['blue','red'])
bounds = range(int(minten),int(maxten), int((maxten-minten)/10))
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),
cax=ax, orientation='horizontal',
label="Tension [N]")
'''
# ----------------
ms.initialize()
ms.solveEquilibrium3()
#ms.unload('sample.txt')
fig, ax = ms.plot(colortension=True)
"""
| 34.625 | 197 | 0.666065 |
9f9a0fa84698c2fd4bddbfc8238fad4c9fd03ec4 | 999 | py | Python | load_plugins.py | personGithubAccount/WitheredBot | 2b25a1da7796e94ff8e54f58adbf2e07e46e8bd4 | [
"MIT"
] | 2 | 2021-11-10T21:39:34.000Z | 2021-11-11T13:43:40.000Z | load_plugins.py | personGithubAccount/WitheredBot | 2b25a1da7796e94ff8e54f58adbf2e07e46e8bd4 | [
"MIT"
] | null | null | null | load_plugins.py | personGithubAccount/WitheredBot | 2b25a1da7796e94ff8e54f58adbf2e07e46e8bd4 | [
"MIT"
] | null | null | null | # [loads plugins if any]
from importlib import import_module
from os import listdir
import time
from libs.extras import print
class LoadPlugin:
def __init__(self, bot):
self.bot = bot
def load_plugin(self):
load_time = 0
loaded = []
for file in listdir("./plugins"):
t = time.time() * 1000
if file.endswith("_plugin.py"):
try:
class_plug = import_module(f'plugins.{file.replace(".py", "")}')
data = class_plug.setup(self.bot)
self.bot.add_cog(data['Object'])
load_time += round(abs(t - time.time() * 1000))
loaded.append(data)
print(f"Plugin \"{data['name']}\" Loaded!")
except ImportError:
print(f"Plugin Loading Failed!")
finally:
pass
print(f"Loaded All Plugins In {load_time}ms")
return loaded, load_time | 33.3 | 84 | 0.517518 |
a36623e51c4241dc0d43cc8e63b7c6e1e6f2b906 | 1,020 | py | Python | setup.py | cedadev/abcunit-backend | 2cc519c290e990b2db1e3f7666b7f429532f271b | [
"BSD-2-Clause"
] | null | null | null | setup.py | cedadev/abcunit-backend | 2cc519c290e990b2db1e3f7666b7f429532f271b | [
"BSD-2-Clause"
] | 11 | 2020-10-28T14:54:25.000Z | 2021-06-22T14:56:14.000Z | setup.py | cedadev/abcunit-backend | 2cc519c290e990b2db1e3f7666b7f429532f271b | [
"BSD-2-Clause"
] | 1 | 2020-10-29T10:11:04.000Z | 2020-10-29T10:11:04.000Z | import os
from setuptools import setup
current_dir = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(current_dir, 'README.rst')).read()
REQUIRES_PYTHON = ">=3.5.0"
VERSION = open(os.path.join(current_dir, 'abcunit_backend', '__init__.py')).readline().split('"')[1]
reqs = [line.strip() for line in open('requirements.txt')]
dev_reqs = [line.strip() for line in open('requirements_dev.txt')]
docs_requirements = [
"sphinx",
"sphinx-rtd-theme",
"nbsphinx",
"pandoc",
"ipython",
"m2r2"
]
setup(
name="abcunit-backend",
version=VERSION,
description="Backend solution for abcunit success / failure logs",
long_description=README,
author="Jonathan Haigh",
author_email="jonathan.haigh@stfc.ac.uk",
url="https://github.com/cedadev/abcunit-backend",
python_requires=REQUIRES_PYTHON,
license="BSD 2-Clause License",
packages=["abcunit_backend"],
install_requires=reqs,
extras_require={"dev": dev_reqs, "docs": docs_requirements}
)
| 29.142857 | 100 | 0.695098 |
7c82d8dcb558f4f2d50161df866039daba883efb | 69 | py | Python | main.py | princesinghtomar/Classic-Brick-Breaker | acf339a1e242342f04ff27d5b0b508a87cf88014 | [
"MIT"
] | null | null | null | main.py | princesinghtomar/Classic-Brick-Breaker | acf339a1e242342f04ff27d5b0b508a87cf88014 | [
"MIT"
] | null | null | null | main.py | princesinghtomar/Classic-Brick-Breaker | acf339a1e242342f04ff27d5b0b508a87cf88014 | [
"MIT"
] | null | null | null | from mainrunning import *
if __name__ == "__main__":
Run().Go() | 17.25 | 26 | 0.637681 |
4f73d62f1f21c9be5a651aab19f0775458f14a03 | 303 | py | Python | Python/pipes/client.py | Suraj-Rajesh/code | 3d554c4d1d5cf4bd9d084b8034641c1f6c2a47c9 | [
"MIT"
] | null | null | null | Python/pipes/client.py | Suraj-Rajesh/code | 3d554c4d1d5cf4bd9d084b8034641c1f6c2a47c9 | [
"MIT"
] | null | null | null | Python/pipes/client.py | Suraj-Rajesh/code | 3d554c4d1d5cf4bd9d084b8034641c1f6c2a47c9 | [
"MIT"
] | null | null | null | import os, time, sys
pipe = '/var/tmp/simple_pipe'
def client():
no = 1
pipeout = open(pipe, 'w')
pipeout.write('Number %03d\n' % no)
####### Alternate ########
# pipeout = os.open(pipe, os.O_WRONLY)
# os.write(pipeout, 'Number %03d\n' % no)
if __name__ == '__main__':
client()
| 17.823529 | 43 | 0.574257 |
bb8fcbdf4b84c6b9e92bdee97ec1f1533a52ef4e | 1,621 | py | Python | profiles_api/models.py | NikhilNB24/PROFILES-REST-API | c37c3666f46b8a8d8444ee08be30aa2620a118f1 | [
"MIT"
] | null | null | null | profiles_api/models.py | NikhilNB24/PROFILES-REST-API | c37c3666f46b8a8d8444ee08be30aa2620a118f1 | [
"MIT"
] | null | null | null | profiles_api/models.py | NikhilNB24/PROFILES-REST-API | c37c3666f46b8a8d8444ee08be30aa2620a118f1 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
class UserProfileManager(BaseUserManager):
"""Manager foruser profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError('User must have an email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create a new superuser"""
user = self.create_superuser(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PendingDeprecationWarning):
""" Database model for users in system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name of user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
def __str__(self):
"""Return string representation of user"""
return self.email
| 29.472727 | 63 | 0.676743 |
b5c96ee11ad69d35ed92de6d7006205d96804a78 | 3,325 | py | Python | pyfr/solvers/euler/elements.py | nathan-blanc/PyFR | 8b77eec80707760b529fac2a081abfb5ab8bc916 | [
"BSD-3-Clause"
] | 1 | 2020-02-28T11:40:22.000Z | 2020-02-28T11:40:22.000Z | pyfr/solvers/euler/elements.py | nathan-blanc/PyFR | 8b77eec80707760b529fac2a081abfb5ab8bc916 | [
"BSD-3-Clause"
] | 1 | 2022-02-16T20:44:34.000Z | 2022-02-16T20:44:34.000Z | pyfr/solvers/euler/elements.py | nathan-blanc/PyFR | 8b77eec80707760b529fac2a081abfb5ab8bc916 | [
"BSD-3-Clause"
] | 1 | 2020-04-15T12:38:41.000Z | 2020-04-15T12:38:41.000Z | # -*- coding: utf-8 -*-
from pyfr.solvers.baseadvec import BaseAdvectionElements
class BaseFluidElements(object):
formulations = ['std', 'dual']
privarmap = {2: ['rho', 'u', 'v', 'p'],
3: ['rho', 'u', 'v', 'w', 'p']}
convarmap = {2: ['rho', 'rhou', 'rhov', 'E'],
3: ['rho', 'rhou', 'rhov', 'rhow', 'E']}
dualcoeffs = convarmap
visvarmap = {
2: [('density', ['rho']),
('velocity', ['u', 'v']),
('pressure', ['p'])],
3: [('density', ['rho']),
('velocity', ['u', 'v', 'w']),
('pressure', ['p'])]
}
@staticmethod
def pri_to_con(pris, cfg):
rho, p = pris[0], pris[-1]
# Multiply velocity components by rho
rhovs = [rho*c for c in pris[1:-1]]
# Compute the energy
gamma = cfg.getfloat('constants', 'gamma')
E = p/(gamma - 1) + 0.5*rho*sum(c*c for c in pris[1:-1])
return [rho] + rhovs + [E]
@staticmethod
def con_to_pri(cons, cfg):
rho, E = cons[0], cons[-1]
# Divide momentum components by rho
vs = [rhov/rho for rhov in cons[1:-1]]
# Compute the pressure
gamma = cfg.getfloat('constants', 'gamma')
p = (gamma - 1)*(E - 0.5*rho*sum(v*v for v in vs))
return [rho] + vs + [p]
class EulerElements(BaseFluidElements, BaseAdvectionElements):
def set_backend(self, *args, **kwargs):
super().set_backend(*args, **kwargs)
# Register our flux kernels
self._be.pointwise.register('pyfr.solvers.euler.kernels.tflux')
self._be.pointwise.register('pyfr.solvers.euler.kernels.tfluxlin')
# Template parameters for the flux kernels
tplargs = {
'ndims': self.ndims,
'nvars': self.nvars,
'nverts': len(self.basis.linspts),
'c': self.cfg.items_as('constants', float),
'jac_exprs': self.basis.jac_exprs
}
# Helpers
c, l = 'curved', 'linear'
r, s = self._mesh_regions, self._slice_mat
if c in r and 'flux' not in self.antialias:
self.kernels['tdisf_curved'] = lambda uin: self._be.kernel(
'tflux', tplargs=tplargs, dims=[self.nupts, r[c]],
u=s(self.scal_upts[uin], c), f=s(self._vect_upts, c),
smats=self.smat_at('upts', c)
)
elif c in r:
self.kernels['tdisf_curved'] = lambda: self._be.kernel(
'tflux', tplargs=tplargs, dims=[self.nqpts, r[c]],
u=s(self._scal_qpts, c), f=s(self._vect_qpts, c),
smats=self.smat_at('qpts', c)
)
if l in r and 'flux' not in self.antialias:
self.kernels['tdisf_linear'] = lambda uin: self._be.kernel(
'tfluxlin', tplargs=tplargs, dims=[self.nupts, r[l]],
u=s(self.scal_upts[uin], l), f=s(self._vect_upts, l),
verts=self.ploc_at('linspts', l), upts=self.upts
)
elif l in r:
self.kernels['tdisf_linear'] = lambda: self._be.kernel(
'tfluxlin', tplargs=tplargs, dims=[self.nqpts, r[l]],
u=s(self._scal_qpts, l), f=s(self._vect_qpts, l),
verts=self.ploc_at('linspts', l), upts=self.qpts
)
| 33.585859 | 74 | 0.519398 |
72f60209fd6fc64489d2b3338679ca46a0fa9f50 | 8,290 | py | Python | test/service/test_cart_export_service.py | jessebrennan/azul | 65970a0947f38fae439a3bf8fd960d351787b7a3 | [
"Apache-2.0"
] | null | null | null | test/service/test_cart_export_service.py | jessebrennan/azul | 65970a0947f38fae439a3bf8fd960d351787b7a3 | [
"Apache-2.0"
] | null | null | null | test/service/test_cart_export_service.py | jessebrennan/azul | 65970a0947f38fae439a3bf8fd960d351787b7a3 | [
"Apache-2.0"
] | null | null | null | from unittest import (
TestCase,
skipIf,
)
from unittest.mock import (
patch,
)
import responses
from azul import (
config,
)
from azul.service.cart_export_service import (
CartExportService,
ExpiredAccessTokenError,
)
from azul.service.collection_data_access import (
CollectionDataAccess,
)
from retorts import (
ResponsesHelper,
)
@skipIf(config.dss_endpoint is None,
'DSS endpoint is not configured')
class TestCartExportService(TestCase):
@patch('azul.deployment.aws.dynamo')
def test_get_content_with_no_resume_token_returning_results_without_next_resume_token(self, _dynamodb_client):
mock_entity_1 = dict(EntityId='entity1', EntityType='foo', EntityVersion='bar')
mock_entity_2 = dict(EntityId='entity2', EntityType='foo', EntityVersion='bar')
expected_content_item_1 = dict(type='file',
uuid=mock_entity_1['EntityId'],
version=mock_entity_1['EntityVersion'])
def mock_get_paginable_cart_items(**kwargs):
self.assertIsNone(kwargs['resume_token'])
return dict(items=[mock_entity_1, mock_entity_2],
last_evaluated_key=None)
service = CartExportService()
with patch.object(service.cart_item_manager,
'get_paginable_cart_items',
side_effect=mock_get_paginable_cart_items):
content = service.get_content('user1', 'cart1', 'collection1', 'ver1', None)
content_items = content['items']
self.assertIsNone(content['resume_token'])
self.assertEqual(2, len(content_items))
self.assertIn(expected_content_item_1, content_items)
@patch('azul.deployment.aws.dynamo')
def test_get_content_with_no_resume_token_returning_no_results_without_next_resume_token(self, _dynamodb_client):
def mock_get_paginable_cart_items(**kwargs):
self.assertIsNone(kwargs['resume_token'])
return dict(items=[],
last_evaluated_key=None)
service = CartExportService()
with patch.object(service.cart_item_manager,
'get_paginable_cart_items',
side_effect=mock_get_paginable_cart_items):
content = service.get_content('user1', 'cart1', 'collection1', 'ver1', None)
self.assertIsNone(content['resume_token'])
self.assertEqual(0, len(content['items']))
@patch('azul.deployment.aws.dynamo')
def test_get_content_with_resume_token_returning_results_with_next_resume_token(self, _dynamodb_client):
mock_resume_token = 'abc'
mock_entity_1 = dict(EntityId='entity1', EntityType='foo', EntityVersion='bar')
mock_entity_2 = dict(EntityId='entity2', EntityType='foo', EntityVersion='bar')
expected_content_item_1 = dict(type='file',
uuid=mock_entity_1['EntityId'],
version=mock_entity_1['EntityVersion'])
def mock_get_paginable_cart_items(**kwargs):
self.assertIsNotNone(kwargs['resume_token'])
return dict(items=[mock_entity_1, mock_entity_2],
last_evaluated_key={'foo': 'bar'})
service = CartExportService()
with patch.object(service.cart_item_manager,
'get_paginable_cart_items',
side_effect=mock_get_paginable_cart_items):
content = service.get_content('user1', 'cart1', 'collection1', 'ver1', mock_resume_token)
content_items = content['items']
self.assertNotEqual(mock_resume_token, content['resume_token'])
self.assertEqual(2, len(content_items))
self.assertIn(expected_content_item_1, content_items)
@responses.activate
@patch('azul.deployment.aws.dynamo')
def test_export_create_new_collection(self, _dynamodb_client):
expected_collection = dict(uuid='abc', version='123')
expected_get_content_result = dict(resume_token='rt1',
items=[1, 2, 3, 4]) # NOTE: This is just for the test.
service = CartExportService()
with patch.object(service.cart_item_manager, 'get_cart', side_effect=[dict(CartName='abc123')]):
with patch.object(service, 'get_content', side_effect=[expected_get_content_result]):
with ResponsesHelper() as helper:
helper.add(responses.Response(responses.PUT,
CollectionDataAccess.endpoint_url('collections'),
status=201,
json=expected_collection))
result = service.export(export_id='export1',
user_id='user1',
cart_id='cart1',
access_token='at1',
collection_uuid=expected_collection['uuid'],
collection_version='ver1',
resume_token=None)
self.assertEqual(expected_collection, result['collection'])
self.assertEqual(expected_get_content_result['resume_token'], result['resume_token'])
self.assertEqual(len(expected_get_content_result['items']), result['exported_item_count'])
@responses.activate
@patch('azul.deployment.aws.dynamo')
def test_export_append_items_to_collection_ok(self, _dynamodb_client):
expected_collection = dict(uuid='abc', version='123')
expected_get_content_result = dict(resume_token='rt1',
items=[1, 2, 3, 4]) # NOTE: This is just for the test.
service = CartExportService()
with patch.object(service, 'get_content', side_effect=[expected_get_content_result]):
with ResponsesHelper() as helper:
helper.add(responses.Response(
responses.PATCH,
CollectionDataAccess.endpoint_url('collections', expected_collection['uuid']),
json=expected_collection
))
result = service.export(export_id='export1',
user_id='user1',
cart_id='cart1',
access_token='at1',
collection_uuid=expected_collection['uuid'],
collection_version='ver1',
resume_token='rt0')
self.assertEqual(expected_collection, result['collection'])
self.assertEqual(expected_get_content_result['resume_token'], result['resume_token'])
self.assertEqual(len(expected_get_content_result['items']), result['exported_item_count'])
@responses.activate
@patch('azul.deployment.aws.dynamo')
def test_export_append_items_to_collection_raises_expired_access_token_error(self, _dynamodb_client):
expected_collection = dict(uuid='abc', version='123')
expected_get_content_result = dict(resume_token='rt1',
items=[1, 2, 3, 4]) # NOTE: This is just for the test.
service = CartExportService()
with self.assertRaises(ExpiredAccessTokenError):
with patch.object(service, 'get_content', side_effect=[expected_get_content_result]):
with ResponsesHelper() as helper:
url = CollectionDataAccess.endpoint_url('collections', expected_collection['uuid'])
helper.add(responses.Response(responses.PATCH, url, status=401, json=dict(code='abc')))
service.export(export_id='export1',
user_id='user1',
cart_id='cart1',
access_token='at1',
collection_uuid=expected_collection['uuid'],
collection_version='ver1',
resume_token='rt0')
| 51.490683 | 117 | 0.592642 |
699d21cbe7b1ca942f9c17d807afbc8c1f34115d | 436,469 | py | Python | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.py | CiscoDevNet/ydk-py | 073731fea50694d0bc6cd8ebf10fec308dcc0aa9 | [
"ECL-2.0",
"Apache-2.0"
] | 177 | 2016-03-15T17:03:51.000Z | 2022-03-18T16:48:44.000Z | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.py | CiscoDevNet/ydk-py | 073731fea50694d0bc6cd8ebf10fec308dcc0aa9 | [
"ECL-2.0",
"Apache-2.0"
] | 18 | 2016-03-30T10:45:22.000Z | 2020-07-14T16:28:13.000Z | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.py | CiscoDevNet/ydk-py | 073731fea50694d0bc6cd8ebf10fec308dcc0aa9 | [
"ECL-2.0",
"Apache-2.0"
] | 85 | 2016-03-16T20:38:57.000Z | 2022-02-22T04:26:02.000Z | """ Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR subscriber\-infra\-tmplmgr package configuration.
This module contains definitions
for the following management objects\:
dynamic\-template\: All dynamic template configurations
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class DynamicTemplate(_Entity_):
"""
All dynamic template configurations
.. attribute:: ppps
Templates of the PPP Type
**type**\: :py:class:`Ppps <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps>`
.. attribute:: ip_subscribers
The IP Subscriber Template Table
**type**\: :py:class:`IpSubscribers <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers>`
.. attribute:: subscriber_services
The Service Type Template Table
**type**\: :py:class:`SubscriberServices <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices>`
"""
_prefix = 'subscriber-infra-tmplmgr-cfg'
_revision = '2015-01-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate, self).__init__()
self._top_entity = None
self.yang_name = "dynamic-template"
self.yang_parent_name = "Cisco-IOS-XR-subscriber-infra-tmplmgr-cfg"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("ppps", ("ppps", DynamicTemplate.Ppps)), ("ip-subscribers", ("ip_subscribers", DynamicTemplate.IpSubscribers)), ("subscriber-services", ("subscriber_services", DynamicTemplate.SubscriberServices))])
self._leafs = OrderedDict()
self.ppps = DynamicTemplate.Ppps()
self.ppps.parent = self
self._children_name_map["ppps"] = "ppps"
self.ip_subscribers = DynamicTemplate.IpSubscribers()
self.ip_subscribers.parent = self
self._children_name_map["ip_subscribers"] = "ip-subscribers"
self.subscriber_services = DynamicTemplate.SubscriberServices()
self.subscriber_services.parent = self
self._children_name_map["subscriber_services"] = "subscriber-services"
self._segment_path = lambda: "Cisco-IOS-XR-subscriber-infra-tmplmgr-cfg:dynamic-template"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate, [], name, value)
class Ppps(_Entity_):
"""
Templates of the PPP Type
.. attribute:: ppp
A Template of the PPP Type
**type**\: list of :py:class:`Ppp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp>`
"""
_prefix = 'subscriber-infra-tmplmgr-cfg'
_revision = '2015-01-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps, self).__init__()
self.yang_name = "ppps"
self.yang_parent_name = "dynamic-template"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("ppp", ("ppp", DynamicTemplate.Ppps.Ppp))])
self._leafs = OrderedDict()
self.ppp = YList(self)
self._segment_path = lambda: "ppps"
self._absolute_path = lambda: "Cisco-IOS-XR-subscriber-infra-tmplmgr-cfg:dynamic-template/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps, [], name, value)
class Ppp(_Entity_):
"""
A Template of the PPP Type
.. attribute:: template_name (key)
The name of the template
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: ipv4_network
Interface IPv4 Network configuration data
**type**\: :py:class:`Ipv4Network <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Ipv4Network>`
.. attribute:: accounting
Subscriber accounting dynamic\-template commands
**type**\: :py:class:`Accounting <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Accounting>`
.. attribute:: qos
QoS dynamically applied configuration template
**type**\: :py:class:`Qos <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Qos>`
.. attribute:: igmp
IGMPconfiguration
**type**\: :py:class:`Igmp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Igmp>`
.. attribute:: ipv6_network
Interface IPv6 Network configuration data
**type**\: :py:class:`Ipv6Network <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Ipv6Network>`
.. attribute:: pppoe_template
PPPoE template configuration data
**type**\: :py:class:`PppoeTemplate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.PppoeTemplate>`
**presence node**\: True
.. attribute:: span_monitor_sessions
Monitor Session container for this template
**type**\: :py:class:`SpanMonitorSessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.SpanMonitorSessions>`
.. attribute:: vrf
Assign the interface to a VRF
**type**\: str
**length:** 1..32
.. attribute:: ipv6_neighbor
Interface IPv6 Network configuration data
**type**\: :py:class:`Ipv6Neighbor <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Ipv6Neighbor>`
.. attribute:: ppp_template
PPP template configuration data
**type**\: :py:class:`PppTemplate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.PppTemplate>`
.. attribute:: pbr
Dynamic Template PBR configuration
**type**\: :py:class:`Pbr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Pbr>`
.. attribute:: ipv4_packet_filter
IPv4 Packet Filtering configuration for the template
**type**\: :py:class:`Ipv4PacketFilter <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Ipv4PacketFilter>`
.. attribute:: ipv6_packet_filter
IPv6 Packet Filtering configuration for the interface
**type**\: :py:class:`Ipv6PacketFilter <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Ipv6PacketFilter>`
.. attribute:: dhcpv6
Interface dhcpv6 configuration data
**type**\: :py:class:`Dhcpv6 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Dhcpv6>`
"""
_prefix = 'subscriber-infra-tmplmgr-cfg'
_revision = '2015-01-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp, self).__init__()
self.yang_name = "ppp"
self.yang_parent_name = "ppps"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['template_name']
self._child_classes = OrderedDict([("Cisco-IOS-XR-ipv4-ma-subscriber-cfg:ipv4-network", ("ipv4_network", DynamicTemplate.Ppps.Ppp.Ipv4Network)), ("Cisco-IOS-XR-subscriber-accounting-cfg:accounting", ("accounting", DynamicTemplate.Ppps.Ppp.Accounting)), ("Cisco-IOS-XR-qos-ma-bng-cfg:qos", ("qos", DynamicTemplate.Ppps.Ppp.Qos)), ("Cisco-IOS-XR-ipv4-igmp-dyn-tmpl-cfg:igmp", ("igmp", DynamicTemplate.Ppps.Ppp.Igmp)), ("Cisco-IOS-XR-ipv6-ma-subscriber-cfg:ipv6-network", ("ipv6_network", DynamicTemplate.Ppps.Ppp.Ipv6Network)), ("Cisco-IOS-XR-subscriber-pppoe-ma-gbl-cfg:pppoe-template", ("pppoe_template", DynamicTemplate.Ppps.Ppp.PppoeTemplate)), ("Cisco-IOS-XR-Ethernet-SPAN-subscriber-cfg:span-monitor-sessions", ("span_monitor_sessions", DynamicTemplate.Ppps.Ppp.SpanMonitorSessions)), ("Cisco-IOS-XR-ipv6-nd-subscriber-cfg:ipv6-neighbor", ("ipv6_neighbor", DynamicTemplate.Ppps.Ppp.Ipv6Neighbor)), ("Cisco-IOS-XR-ppp-ma-gbl-cfg:ppp-template", ("ppp_template", DynamicTemplate.Ppps.Ppp.PppTemplate)), ("Cisco-IOS-XR-pbr-subscriber-cfg:pbr", ("pbr", DynamicTemplate.Ppps.Ppp.Pbr)), ("Cisco-IOS-XR-ip-pfilter-subscriber-cfg:ipv4-packet-filter", ("ipv4_packet_filter", DynamicTemplate.Ppps.Ppp.Ipv4PacketFilter)), ("Cisco-IOS-XR-ip-pfilter-subscriber-cfg:ipv6-packet-filter", ("ipv6_packet_filter", DynamicTemplate.Ppps.Ppp.Ipv6PacketFilter)), ("Cisco-IOS-XR-ipv6-new-dhcpv6d-subscriber-cfg:dhcpv6", ("dhcpv6", DynamicTemplate.Ppps.Ppp.Dhcpv6))])
self._leafs = OrderedDict([
('template_name', (YLeaf(YType.str, 'template-name'), ['str'])),
('vrf', (YLeaf(YType.str, 'Cisco-IOS-XR-infra-rsi-subscriber-cfg:vrf'), ['str'])),
])
self.template_name = None
self.vrf = None
self.ipv4_network = DynamicTemplate.Ppps.Ppp.Ipv4Network()
self.ipv4_network.parent = self
self._children_name_map["ipv4_network"] = "Cisco-IOS-XR-ipv4-ma-subscriber-cfg:ipv4-network"
self.accounting = DynamicTemplate.Ppps.Ppp.Accounting()
self.accounting.parent = self
self._children_name_map["accounting"] = "Cisco-IOS-XR-subscriber-accounting-cfg:accounting"
self.qos = DynamicTemplate.Ppps.Ppp.Qos()
self.qos.parent = self
self._children_name_map["qos"] = "Cisco-IOS-XR-qos-ma-bng-cfg:qos"
self.igmp = DynamicTemplate.Ppps.Ppp.Igmp()
self.igmp.parent = self
self._children_name_map["igmp"] = "Cisco-IOS-XR-ipv4-igmp-dyn-tmpl-cfg:igmp"
self.ipv6_network = DynamicTemplate.Ppps.Ppp.Ipv6Network()
self.ipv6_network.parent = self
self._children_name_map["ipv6_network"] = "Cisco-IOS-XR-ipv6-ma-subscriber-cfg:ipv6-network"
self.pppoe_template = None
self._children_name_map["pppoe_template"] = "Cisco-IOS-XR-subscriber-pppoe-ma-gbl-cfg:pppoe-template"
self.span_monitor_sessions = DynamicTemplate.Ppps.Ppp.SpanMonitorSessions()
self.span_monitor_sessions.parent = self
self._children_name_map["span_monitor_sessions"] = "Cisco-IOS-XR-Ethernet-SPAN-subscriber-cfg:span-monitor-sessions"
self.ipv6_neighbor = DynamicTemplate.Ppps.Ppp.Ipv6Neighbor()
self.ipv6_neighbor.parent = self
self._children_name_map["ipv6_neighbor"] = "Cisco-IOS-XR-ipv6-nd-subscriber-cfg:ipv6-neighbor"
self.ppp_template = DynamicTemplate.Ppps.Ppp.PppTemplate()
self.ppp_template.parent = self
self._children_name_map["ppp_template"] = "Cisco-IOS-XR-ppp-ma-gbl-cfg:ppp-template"
self.pbr = DynamicTemplate.Ppps.Ppp.Pbr()
self.pbr.parent = self
self._children_name_map["pbr"] = "Cisco-IOS-XR-pbr-subscriber-cfg:pbr"
self.ipv4_packet_filter = DynamicTemplate.Ppps.Ppp.Ipv4PacketFilter()
self.ipv4_packet_filter.parent = self
self._children_name_map["ipv4_packet_filter"] = "Cisco-IOS-XR-ip-pfilter-subscriber-cfg:ipv4-packet-filter"
self.ipv6_packet_filter = DynamicTemplate.Ppps.Ppp.Ipv6PacketFilter()
self.ipv6_packet_filter.parent = self
self._children_name_map["ipv6_packet_filter"] = "Cisco-IOS-XR-ip-pfilter-subscriber-cfg:ipv6-packet-filter"
self.dhcpv6 = DynamicTemplate.Ppps.Ppp.Dhcpv6()
self.dhcpv6.parent = self
self._children_name_map["dhcpv6"] = "Cisco-IOS-XR-ipv6-new-dhcpv6d-subscriber-cfg:dhcpv6"
self._segment_path = lambda: "ppp" + "[template-name='" + str(self.template_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-subscriber-infra-tmplmgr-cfg:dynamic-template/ppps/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp, ['template_name', 'vrf'], name, value)
class Ipv4Network(_Entity_):
"""
Interface IPv4 Network configuration data
.. attribute:: unnumbered
Enable IP processing without an explicit address
**type**\: str
.. attribute:: mtu
The IP Maximum Transmission Unit
**type**\: int
**range:** 68..65535
**units**\: byte
.. attribute:: unreachables
TRUE if enabled, FALSE if disabled
**type**\: bool
**default value**\: false
.. attribute:: rpf
TRUE if enabled, FALSE if disabled
**type**\: bool
**default value**\: true
"""
_prefix = 'ipv4-ma-subscriber-cfg'
_revision = '2015-07-30'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Ipv4Network, self).__init__()
self.yang_name = "ipv4-network"
self.yang_parent_name = "ppp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unnumbered', (YLeaf(YType.str, 'unnumbered'), ['str'])),
('mtu', (YLeaf(YType.uint32, 'mtu'), ['int'])),
('unreachables', (YLeaf(YType.boolean, 'unreachables'), ['bool'])),
('rpf', (YLeaf(YType.boolean, 'rpf'), ['bool'])),
])
self.unnumbered = None
self.mtu = None
self.unreachables = None
self.rpf = None
self._segment_path = lambda: "Cisco-IOS-XR-ipv4-ma-subscriber-cfg:ipv4-network"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Ipv4Network, ['unnumbered', 'mtu', 'unreachables', 'rpf'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Ipv4Network']['meta_info']
class Accounting(_Entity_):
"""
Subscriber accounting dynamic\-template commands
.. attribute:: idle_timeout
Subscriber accounting idle timeout
**type**\: :py:class:`IdleTimeout <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Accounting.IdleTimeout>`
.. attribute:: session
Subscriber accounting session accounting
**type**\: :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Accounting.Session>`
.. attribute:: service_accounting
Subscriber accounting service accounting
**type**\: :py:class:`ServiceAccounting <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Accounting.ServiceAccounting>`
.. attribute:: monitor_feature
Subscriber monitor feature
**type**\: str
.. attribute:: prepaid_feature
Subscriber accounting prepaid feature
**type**\: str
"""
_prefix = 'subscriber-accounting-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Accounting, self).__init__()
self.yang_name = "accounting"
self.yang_parent_name = "ppp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("idle-timeout", ("idle_timeout", DynamicTemplate.Ppps.Ppp.Accounting.IdleTimeout)), ("session", ("session", DynamicTemplate.Ppps.Ppp.Accounting.Session)), ("service-accounting", ("service_accounting", DynamicTemplate.Ppps.Ppp.Accounting.ServiceAccounting))])
self._leafs = OrderedDict([
('monitor_feature', (YLeaf(YType.str, 'monitor-feature'), ['str'])),
('prepaid_feature', (YLeaf(YType.str, 'prepaid-feature'), ['str'])),
])
self.monitor_feature = None
self.prepaid_feature = None
self.idle_timeout = DynamicTemplate.Ppps.Ppp.Accounting.IdleTimeout()
self.idle_timeout.parent = self
self._children_name_map["idle_timeout"] = "idle-timeout"
self.session = DynamicTemplate.Ppps.Ppp.Accounting.Session()
self.session.parent = self
self._children_name_map["session"] = "session"
self.service_accounting = DynamicTemplate.Ppps.Ppp.Accounting.ServiceAccounting()
self.service_accounting.parent = self
self._children_name_map["service_accounting"] = "service-accounting"
self._segment_path = lambda: "Cisco-IOS-XR-subscriber-accounting-cfg:accounting"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Accounting, ['monitor_feature', 'prepaid_feature'], name, value)
class IdleTimeout(_Entity_):
"""
Subscriber accounting idle timeout
.. attribute:: timeout_value
Idle timeout value in seconds
**type**\: int
**range:** 60..4320000
.. attribute:: threshold
Threshold in minute(s) per packet
**type**\: int
**range:** 1..10000
.. attribute:: direction
Idle timeout traffic direction
**type**\: str
"""
_prefix = 'subscriber-accounting-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Accounting.IdleTimeout, self).__init__()
self.yang_name = "idle-timeout"
self.yang_parent_name = "accounting"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('timeout_value', (YLeaf(YType.uint32, 'timeout-value'), ['int'])),
('threshold', (YLeaf(YType.uint32, 'threshold'), ['int'])),
('direction', (YLeaf(YType.str, 'direction'), ['str'])),
])
self.timeout_value = None
self.threshold = None
self.direction = None
self._segment_path = lambda: "idle-timeout"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Accounting.IdleTimeout, ['timeout_value', 'threshold', 'direction'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Accounting.IdleTimeout']['meta_info']
class Session(_Entity_):
"""
Subscriber accounting session accounting
.. attribute:: method_list_name
Session accounting method list name
**type**\: str
.. attribute:: periodic_interval
Interim accounting interval in minutes
**type**\: int
**range:** 0..4294967295
.. attribute:: dual_stack_delay
Dual stack wait delay in seconds
**type**\: int
**range:** 0..4294967295
.. attribute:: hold_acct_start
Hold Accounting start based on IA\_PD
**type**\: :py:class:`HoldAcctStart <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Accounting.Session.HoldAcctStart>`
"""
_prefix = 'subscriber-accounting-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Accounting.Session, self).__init__()
self.yang_name = "session"
self.yang_parent_name = "accounting"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('method_list_name', (YLeaf(YType.str, 'method-list-name'), ['str'])),
('periodic_interval', (YLeaf(YType.uint32, 'periodic-interval'), ['int'])),
('dual_stack_delay', (YLeaf(YType.uint32, 'dual-stack-delay'), ['int'])),
('hold_acct_start', (YLeaf(YType.enumeration, 'hold-acct-start'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg', 'DynamicTemplate', 'Ppps.Ppp.Accounting.Session.HoldAcctStart')])),
])
self.method_list_name = None
self.periodic_interval = None
self.dual_stack_delay = None
self.hold_acct_start = None
self._segment_path = lambda: "session"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Accounting.Session, ['method_list_name', 'periodic_interval', 'dual_stack_delay', 'hold_acct_start'], name, value)
class HoldAcctStart(Enum):
"""
HoldAcctStart (Enum Class)
Hold Accounting start based on IA\_PD
.. data:: ipv6_prefix_delegation = 3
Based on ipv6 delegated prefix
"""
ipv6_prefix_delegation = Enum.YLeaf(3, "ipv6-prefix-delegation")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Accounting.Session.HoldAcctStart']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Accounting.Session']['meta_info']
class ServiceAccounting(_Entity_):
"""
Subscriber accounting service accounting
.. attribute:: method_list_name
Service accounting method list name
**type**\: str
.. attribute:: accounting_interim_interval
Accounting interim interval in minutes
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'subscriber-accounting-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Accounting.ServiceAccounting, self).__init__()
self.yang_name = "service-accounting"
self.yang_parent_name = "accounting"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('method_list_name', (YLeaf(YType.str, 'method-list-name'), ['str'])),
('accounting_interim_interval', (YLeaf(YType.uint32, 'accounting-interim-interval'), ['int'])),
])
self.method_list_name = None
self.accounting_interim_interval = None
self._segment_path = lambda: "service-accounting"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Accounting.ServiceAccounting, ['method_list_name', 'accounting_interim_interval'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Accounting.ServiceAccounting']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Accounting']['meta_info']
class Qos(_Entity_):
"""
QoS dynamically applied configuration template
.. attribute:: service_policy
Service policy to be applied in ingress/egress direction
**type**\: :py:class:`ServicePolicy <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Qos.ServicePolicy>`
.. attribute:: account
QoS L2 overhead accounting
**type**\: :py:class:`Account <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Qos.Account>`
.. attribute:: output
QoS to be applied in egress direction
**type**\: :py:class:`Output <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Qos.Output>`
"""
_prefix = 'qos-ma-bng-cfg'
_revision = '2016-04-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Qos, self).__init__()
self.yang_name = "qos"
self.yang_parent_name = "ppp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("service-policy", ("service_policy", DynamicTemplate.Ppps.Ppp.Qos.ServicePolicy)), ("account", ("account", DynamicTemplate.Ppps.Ppp.Qos.Account)), ("output", ("output", DynamicTemplate.Ppps.Ppp.Qos.Output))])
self._leafs = OrderedDict()
self.service_policy = DynamicTemplate.Ppps.Ppp.Qos.ServicePolicy()
self.service_policy.parent = self
self._children_name_map["service_policy"] = "service-policy"
self.account = DynamicTemplate.Ppps.Ppp.Qos.Account()
self.account.parent = self
self._children_name_map["account"] = "account"
self.output = DynamicTemplate.Ppps.Ppp.Qos.Output()
self.output.parent = self
self._children_name_map["output"] = "output"
self._segment_path = lambda: "Cisco-IOS-XR-qos-ma-bng-cfg:qos"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Qos, [], name, value)
class ServicePolicy(_Entity_):
"""
Service policy to be applied in ingress/egress
direction
.. attribute:: input
Subscriber ingress policy
**type**\: :py:class:`Input <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Qos.ServicePolicy.Input>`
**presence node**\: True
.. attribute:: output
Subscriber egress policy
**type**\: :py:class:`Output <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Qos.ServicePolicy.Output>`
**presence node**\: True
"""
_prefix = 'qos-ma-bng-cfg'
_revision = '2016-04-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Qos.ServicePolicy, self).__init__()
self.yang_name = "service-policy"
self.yang_parent_name = "qos"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("input", ("input", DynamicTemplate.Ppps.Ppp.Qos.ServicePolicy.Input)), ("output", ("output", DynamicTemplate.Ppps.Ppp.Qos.ServicePolicy.Output))])
self._leafs = OrderedDict()
self.input = None
self._children_name_map["input"] = "input"
self.output = None
self._children_name_map["output"] = "output"
self._segment_path = lambda: "service-policy"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Qos.ServicePolicy, [], name, value)
class Input(_Entity_):
"""
Subscriber ingress policy
.. attribute:: policy_name
Name of policy\-map
**type**\: str
**mandatory**\: True
.. attribute:: spi_name
Name of the SPI
**type**\: str
.. attribute:: merge
TRUE for merge enabled for service\-policy applied on dynamic template
**type**\: bool
.. attribute:: merge_id
Merge ID value
**type**\: int
**range:** 0..255
.. attribute:: account_stats
TRUE for account stats enabled for service\-policy applied on dynamic template. Note\: account stats not supported for subscriber type 'ppp' and 'ipsubscriber'
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'qos-ma-bng-cfg'
_revision = '2016-04-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Qos.ServicePolicy.Input, self).__init__()
self.yang_name = "input"
self.yang_parent_name = "service-policy"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('policy_name', (YLeaf(YType.str, 'policy-name'), ['str'])),
('spi_name', (YLeaf(YType.str, 'spi-name'), ['str'])),
('merge', (YLeaf(YType.boolean, 'merge'), ['bool'])),
('merge_id', (YLeaf(YType.uint32, 'merge-id'), ['int'])),
('account_stats', (YLeaf(YType.boolean, 'account-stats'), ['bool'])),
])
self.policy_name = None
self.spi_name = None
self.merge = None
self.merge_id = None
self.account_stats = None
self._segment_path = lambda: "input"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Qos.ServicePolicy.Input, ['policy_name', 'spi_name', 'merge', 'merge_id', 'account_stats'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Qos.ServicePolicy.Input']['meta_info']
class Output(_Entity_):
"""
Subscriber egress policy
.. attribute:: policy_name
Name of policy\-map
**type**\: str
**mandatory**\: True
.. attribute:: spi_name
Name of the SPI
**type**\: str
.. attribute:: merge
TRUE for merge enabled for service\-policy applied on dynamic template
**type**\: bool
.. attribute:: merge_id
Merge ID value
**type**\: int
**range:** 0..255
.. attribute:: account_stats
TRUE for account stats enabled for service\-policy applied on dynamic template. Note\: account stats not supported for subscriber type 'ppp' and 'ipsubscriber'
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'qos-ma-bng-cfg'
_revision = '2016-04-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Qos.ServicePolicy.Output, self).__init__()
self.yang_name = "output"
self.yang_parent_name = "service-policy"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('policy_name', (YLeaf(YType.str, 'policy-name'), ['str'])),
('spi_name', (YLeaf(YType.str, 'spi-name'), ['str'])),
('merge', (YLeaf(YType.boolean, 'merge'), ['bool'])),
('merge_id', (YLeaf(YType.uint32, 'merge-id'), ['int'])),
('account_stats', (YLeaf(YType.boolean, 'account-stats'), ['bool'])),
])
self.policy_name = None
self.spi_name = None
self.merge = None
self.merge_id = None
self.account_stats = None
self._segment_path = lambda: "output"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Qos.ServicePolicy.Output, ['policy_name', 'spi_name', 'merge', 'merge_id', 'account_stats'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Qos.ServicePolicy.Output']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Qos.ServicePolicy']['meta_info']
class Account(_Entity_):
"""
QoS L2 overhead accounting
.. attribute:: aal
ATM adaptation layer AAL
**type**\: :py:class:`Qosl2DataLink <ydk.models.cisco_ios_xr.Cisco_IOS_XR_qos_ma_bng_cfg.Qosl2DataLink>`
.. attribute:: encapsulation
Specify encapsulation type
**type**\: :py:class:`Qosl2Encap <ydk.models.cisco_ios_xr.Cisco_IOS_XR_qos_ma_bng_cfg.Qosl2Encap>`
.. attribute:: atm_cell_tax
ATM cell tax to L2 overhead
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: user_defined
Numeric L2 overhead offset
**type**\: int
**range:** \-63..63
"""
_prefix = 'qos-ma-bng-cfg'
_revision = '2016-04-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Qos.Account, self).__init__()
self.yang_name = "account"
self.yang_parent_name = "qos"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('aal', (YLeaf(YType.enumeration, 'aal'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_qos_ma_bng_cfg', 'Qosl2DataLink', '')])),
('encapsulation', (YLeaf(YType.enumeration, 'encapsulation'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_qos_ma_bng_cfg', 'Qosl2Encap', '')])),
('atm_cell_tax', (YLeaf(YType.empty, 'atm-cell-tax'), ['Empty'])),
('user_defined', (YLeaf(YType.int32, 'user-defined'), ['int'])),
])
self.aal = None
self.encapsulation = None
self.atm_cell_tax = None
self.user_defined = None
self._segment_path = lambda: "account"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Qos.Account, ['aal', 'encapsulation', 'atm_cell_tax', 'user_defined'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Qos.Account']['meta_info']
class Output(_Entity_):
"""
QoS to be applied in egress direction
.. attribute:: minimum_bandwidth
Minimum bandwidth value for the subscriber (in kbps)
**type**\: int
**range:** 1..4294967295
**units**\: kbit/s
"""
_prefix = 'qos-ma-bng-cfg'
_revision = '2016-04-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Qos.Output, self).__init__()
self.yang_name = "output"
self.yang_parent_name = "qos"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('minimum_bandwidth', (YLeaf(YType.uint32, 'minimum-bandwidth'), ['int'])),
])
self.minimum_bandwidth = None
self._segment_path = lambda: "output"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Qos.Output, ['minimum_bandwidth'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Qos.Output']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Qos']['meta_info']
class Igmp(_Entity_):
"""
IGMPconfiguration
.. attribute:: default_vrf
Default VRF
**type**\: :py:class:`DefaultVrf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Igmp.DefaultVrf>`
"""
_prefix = 'ipv4-igmp-dyn-tmpl-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Igmp, self).__init__()
self.yang_name = "igmp"
self.yang_parent_name = "ppp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("default-vrf", ("default_vrf", DynamicTemplate.Ppps.Ppp.Igmp.DefaultVrf))])
self._leafs = OrderedDict()
self.default_vrf = DynamicTemplate.Ppps.Ppp.Igmp.DefaultVrf()
self.default_vrf.parent = self
self._children_name_map["default_vrf"] = "default-vrf"
self._segment_path = lambda: "Cisco-IOS-XR-ipv4-igmp-dyn-tmpl-cfg:igmp"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Igmp, [], name, value)
class DefaultVrf(_Entity_):
"""
Default VRF
.. attribute:: explicit_tracking
IGMPv3 explicit host tracking
**type**\: :py:class:`ExplicitTracking <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Igmp.DefaultVrf.ExplicitTracking>`
**presence node**\: True
.. attribute:: max_groups
IGMP Max Groups
**type**\: int
**range:** 1..40000
**default value**\: 25000
.. attribute:: access_group
Access list specifying access\-list group range
**type**\: str
**length:** 1..64
.. attribute:: version
IGMP Version
**type**\: int
**range:** 1..3
**default value**\: 3
.. attribute:: query_interval
Query interval in seconds
**type**\: int
**range:** 1..3600
**units**\: second
**default value**\: 60
.. attribute:: query_max_response_time
Query response value in seconds
**type**\: int
**range:** 1..25
**units**\: second
**default value**\: 10
.. attribute:: multicast_mode
Configure Multicast mode variable
**type**\: :py:class:`DynTmplMulticastMode <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_igmp_dyn_tmpl_cfg.DynTmplMulticastMode>`
"""
_prefix = 'ipv4-igmp-dyn-tmpl-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Igmp.DefaultVrf, self).__init__()
self.yang_name = "default-vrf"
self.yang_parent_name = "igmp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("explicit-tracking", ("explicit_tracking", DynamicTemplate.Ppps.Ppp.Igmp.DefaultVrf.ExplicitTracking))])
self._leafs = OrderedDict([
('max_groups', (YLeaf(YType.uint32, 'max-groups'), ['int'])),
('access_group', (YLeaf(YType.str, 'access-group'), ['str'])),
('version', (YLeaf(YType.uint32, 'version'), ['int'])),
('query_interval', (YLeaf(YType.uint32, 'query-interval'), ['int'])),
('query_max_response_time', (YLeaf(YType.uint32, 'query-max-response-time'), ['int'])),
('multicast_mode', (YLeaf(YType.enumeration, 'multicast-mode'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_igmp_dyn_tmpl_cfg', 'DynTmplMulticastMode', '')])),
])
self.max_groups = None
self.access_group = None
self.version = None
self.query_interval = None
self.query_max_response_time = None
self.multicast_mode = None
self.explicit_tracking = None
self._children_name_map["explicit_tracking"] = "explicit-tracking"
self._segment_path = lambda: "default-vrf"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Igmp.DefaultVrf, ['max_groups', 'access_group', 'version', 'query_interval', 'query_max_response_time', 'multicast_mode'], name, value)
class ExplicitTracking(_Entity_):
"""
IGMPv3 explicit host tracking
.. attribute:: enable
Enable or disable, when value is TRUE or FALSE respectively
**type**\: bool
**mandatory**\: True
.. attribute:: access_list_name
Access list specifying tracking group range
**type**\: str
**length:** 1..64
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ipv4-igmp-dyn-tmpl-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Igmp.DefaultVrf.ExplicitTracking, self).__init__()
self.yang_name = "explicit-tracking"
self.yang_parent_name = "default-vrf"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('enable', (YLeaf(YType.boolean, 'enable'), ['bool'])),
('access_list_name', (YLeaf(YType.str, 'access-list-name'), ['str'])),
])
self.enable = None
self.access_list_name = None
self._segment_path = lambda: "explicit-tracking"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Igmp.DefaultVrf.ExplicitTracking, ['enable', 'access_list_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Igmp.DefaultVrf.ExplicitTracking']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Igmp.DefaultVrf']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Igmp']['meta_info']
class Ipv6Network(_Entity_):
"""
Interface IPv6 Network configuration data
.. attribute:: addresses
Set the IPv6 address of an interface
**type**\: :py:class:`Addresses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Ipv6Network.Addresses>`
.. attribute:: mtu
MTU Setting of Interface
**type**\: int
**range:** 1280..65535
**units**\: byte
.. attribute:: rpf
TRUE if enabled, FALSE if disabled
**type**\: bool
.. attribute:: unreachables
Override Sending of ICMP Unreachable Messages
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ipv6-ma-subscriber-cfg'
_revision = '2017-01-11'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Ipv6Network, self).__init__()
self.yang_name = "ipv6-network"
self.yang_parent_name = "ppp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("addresses", ("addresses", DynamicTemplate.Ppps.Ppp.Ipv6Network.Addresses))])
self._leafs = OrderedDict([
('mtu', (YLeaf(YType.uint32, 'mtu'), ['int'])),
('rpf', (YLeaf(YType.boolean, 'rpf'), ['bool'])),
('unreachables', (YLeaf(YType.empty, 'unreachables'), ['Empty'])),
])
self.mtu = None
self.rpf = None
self.unreachables = None
self.addresses = DynamicTemplate.Ppps.Ppp.Ipv6Network.Addresses()
self.addresses.parent = self
self._children_name_map["addresses"] = "addresses"
self._segment_path = lambda: "Cisco-IOS-XR-ipv6-ma-subscriber-cfg:ipv6-network"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Ipv6Network, ['mtu', 'rpf', 'unreachables'], name, value)
class Addresses(_Entity_):
"""
Set the IPv6 address of an interface
.. attribute:: auto_configuration
Auto IPv6 Interface Configuration
**type**\: :py:class:`AutoConfiguration <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Ipv6Network.Addresses.AutoConfiguration>`
"""
_prefix = 'ipv6-ma-subscriber-cfg'
_revision = '2017-01-11'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Ipv6Network.Addresses, self).__init__()
self.yang_name = "addresses"
self.yang_parent_name = "ipv6-network"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("auto-configuration", ("auto_configuration", DynamicTemplate.Ppps.Ppp.Ipv6Network.Addresses.AutoConfiguration))])
self._leafs = OrderedDict()
self.auto_configuration = DynamicTemplate.Ppps.Ppp.Ipv6Network.Addresses.AutoConfiguration()
self.auto_configuration.parent = self
self._children_name_map["auto_configuration"] = "auto-configuration"
self._segment_path = lambda: "addresses"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Ipv6Network.Addresses, [], name, value)
class AutoConfiguration(_Entity_):
"""
Auto IPv6 Interface Configuration
.. attribute:: enable
The flag to enable auto ipv6 interface configuration
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ipv6-ma-subscriber-cfg'
_revision = '2017-01-11'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Ipv6Network.Addresses.AutoConfiguration, self).__init__()
self.yang_name = "auto-configuration"
self.yang_parent_name = "addresses"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('enable', (YLeaf(YType.empty, 'enable'), ['Empty'])),
])
self.enable = None
self._segment_path = lambda: "auto-configuration"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Ipv6Network.Addresses.AutoConfiguration, ['enable'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Ipv6Network.Addresses.AutoConfiguration']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Ipv6Network.Addresses']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Ipv6Network']['meta_info']
class PppoeTemplate(_Entity_):
"""
PPPoE template configuration data
.. attribute:: port_limit
Specify the Port limit (attr 62) to apply to the subscriber
**type**\: int
**range:** 1..65535
**mandatory**\: True
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'subscriber-pppoe-ma-gbl-cfg'
_revision = '2019-10-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.PppoeTemplate, self).__init__()
self.yang_name = "pppoe-template"
self.yang_parent_name = "ppp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('port_limit', (YLeaf(YType.uint16, 'port-limit'), ['int'])),
])
self.port_limit = None
self._segment_path = lambda: "Cisco-IOS-XR-subscriber-pppoe-ma-gbl-cfg:pppoe-template"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.PppoeTemplate, ['port_limit'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.PppoeTemplate']['meta_info']
class SpanMonitorSessions(_Entity_):
"""
Monitor Session container for this template
.. attribute:: span_monitor_session
Configuration for a particular class of Monitor Session
**type**\: list of :py:class:`SpanMonitorSession <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.SpanMonitorSessions.SpanMonitorSession>`
"""
_prefix = 'ethernet-span-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.SpanMonitorSessions, self).__init__()
self.yang_name = "span-monitor-sessions"
self.yang_parent_name = "ppp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("span-monitor-session", ("span_monitor_session", DynamicTemplate.Ppps.Ppp.SpanMonitorSessions.SpanMonitorSession))])
self._leafs = OrderedDict()
self.span_monitor_session = YList(self)
self._segment_path = lambda: "Cisco-IOS-XR-Ethernet-SPAN-subscriber-cfg:span-monitor-sessions"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.SpanMonitorSessions, [], name, value)
class SpanMonitorSession(_Entity_):
"""
Configuration for a particular class of Monitor
Session
.. attribute:: session_class (key)
Session Class
**type**\: :py:class:`SpanSessionClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_datatypes.SpanSessionClass>`
.. attribute:: mirror_first
Mirror a specified number of bytes from start of packet
**type**\: int
**range:** 1..10000
**units**\: byte
.. attribute:: attachment
Attach the interface to a Monitor Session
**type**\: :py:class:`Attachment <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.SpanMonitorSessions.SpanMonitorSession.Attachment>`
**presence node**\: True
.. attribute:: mirror_interval
Specify the mirror interval
**type**\: :py:class:`SpanMirrorInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_subscriber_cfg.SpanMirrorInterval>`
.. attribute:: acl
Enable ACL matching for traffic mirroring
**type**\: :py:class:`Acl <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.SpanMonitorSessions.SpanMonitorSession.Acl>`
**presence node**\: True
"""
_prefix = 'ethernet-span-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.SpanMonitorSessions.SpanMonitorSession, self).__init__()
self.yang_name = "span-monitor-session"
self.yang_parent_name = "span-monitor-sessions"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['session_class']
self._child_classes = OrderedDict([("attachment", ("attachment", DynamicTemplate.Ppps.Ppp.SpanMonitorSessions.SpanMonitorSession.Attachment)), ("acl", ("acl", DynamicTemplate.Ppps.Ppp.SpanMonitorSessions.SpanMonitorSession.Acl))])
self._leafs = OrderedDict([
('session_class', (YLeaf(YType.enumeration, 'session-class'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_datatypes', 'SpanSessionClass', '')])),
('mirror_first', (YLeaf(YType.uint32, 'mirror-first'), ['int'])),
('mirror_interval', (YLeaf(YType.enumeration, 'mirror-interval'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_subscriber_cfg', 'SpanMirrorInterval', '')])),
])
self.session_class = None
self.mirror_first = None
self.mirror_interval = None
self.attachment = None
self._children_name_map["attachment"] = "attachment"
self.acl = None
self._children_name_map["acl"] = "acl"
self._segment_path = lambda: "span-monitor-session" + "[session-class='" + str(self.session_class) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.SpanMonitorSessions.SpanMonitorSession, ['session_class', 'mirror_first', 'mirror_interval'], name, value)
class Attachment(_Entity_):
"""
Attach the interface to a Monitor Session
.. attribute:: session_name
Session Name
**type**\: str
**length:** 1..79
**mandatory**\: True
.. attribute:: direction
Specify the direction of traffic to replicate (optional)
**type**\: :py:class:`SpanTrafficDirection <ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_subscriber_cfg.SpanTrafficDirection>`
.. attribute:: port_level_enable
Enable port level traffic mirroring
**type**\: :py:class:`Empty<ydk.types.Empty>`
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ethernet-span-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.SpanMonitorSessions.SpanMonitorSession.Attachment, self).__init__()
self.yang_name = "attachment"
self.yang_parent_name = "span-monitor-session"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('session_name', (YLeaf(YType.str, 'session-name'), ['str'])),
('direction', (YLeaf(YType.enumeration, 'direction'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_subscriber_cfg', 'SpanTrafficDirection', '')])),
('port_level_enable', (YLeaf(YType.empty, 'port-level-enable'), ['Empty'])),
])
self.session_name = None
self.direction = None
self.port_level_enable = None
self._segment_path = lambda: "attachment"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.SpanMonitorSessions.SpanMonitorSession.Attachment, ['session_name', 'direction', 'port_level_enable'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.SpanMonitorSessions.SpanMonitorSession.Attachment']['meta_info']
class Acl(_Entity_):
"""
Enable ACL matching for traffic mirroring
.. attribute:: acl_enable
Enable ACL
**type**\: :py:class:`Empty<ydk.types.Empty>`
**mandatory**\: True
.. attribute:: acl_name
ACL Name
**type**\: str
**length:** 1..80
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ethernet-span-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.SpanMonitorSessions.SpanMonitorSession.Acl, self).__init__()
self.yang_name = "acl"
self.yang_parent_name = "span-monitor-session"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('acl_enable', (YLeaf(YType.empty, 'acl-enable'), ['Empty'])),
('acl_name', (YLeaf(YType.str, 'acl-name'), ['str'])),
])
self.acl_enable = None
self.acl_name = None
self._segment_path = lambda: "acl"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.SpanMonitorSessions.SpanMonitorSession.Acl, ['acl_enable', 'acl_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.SpanMonitorSessions.SpanMonitorSession.Acl']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.SpanMonitorSessions.SpanMonitorSession']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.SpanMonitorSessions']['meta_info']
class Ipv6Neighbor(_Entity_):
"""
Interface IPv6 Network configuration data
.. attribute:: ra_interval
Set IPv6 Router Advertisement (RA) interval in seconds
**type**\: :py:class:`RaInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.RaInterval>`
**presence node**\: True
.. attribute:: framed_prefix
Set the IPv6 framed ipv6 prefix for a subscriber interface
**type**\: :py:class:`FramedPrefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.FramedPrefix>`
**presence node**\: True
.. attribute:: duplicate_address_detection
Duplicate Address Detection (DAD)
**type**\: :py:class:`DuplicateAddressDetection <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.DuplicateAddressDetection>`
.. attribute:: ra_initial
IPv6 ND RA Initial
**type**\: :py:class:`RaInitial <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.RaInitial>`
**presence node**\: True
.. attribute:: framed_prefix_pool
Set the IPv6 framed ipv6 prefix pool for a subscriber interface
**type**\: str
.. attribute:: managed_config
Host to use stateful protocol for address configuration
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: other_config
Host to use stateful protocol for non\-address configuration
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: start_ra_on_ipv6_enable
Start RA on ipv6\-enable config
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: nud_enable
NUD enable
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: ra_lifetime
Set IPv6 Router Advertisement (RA) lifetime in seconds
**type**\: int
**range:** 0..9000
**units**\: second
.. attribute:: router_preference
RA Router Preference
**type**\: :py:class:`Ipv6NdRouterPrefTemplate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_subscriber_cfg.Ipv6NdRouterPrefTemplate>`
.. attribute:: ra_suppress
Enable suppress IPv6 router advertisement
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: ra_unicast
Enable RA unicast Flag
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: ra_unspecify_hoplimit
Unspecify IPv6 Router Advertisement (RA) hop\-limit
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: ra_suppress_mtu
RA suppress MTU flag
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: suppress_cache_learning
Suppress cache learning flag
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: reachable_time
Set advertised reachability time in milliseconds
**type**\: int
**range:** 0..3600000
**units**\: millisecond
.. attribute:: ns_interval
Set advertised NS retransmission interval in milliseconds
**type**\: int
**range:** 1000..4294967295
**units**\: millisecond
"""
_prefix = 'ipv6-nd-subscriber-cfg'
_revision = '2016-12-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Ipv6Neighbor, self).__init__()
self.yang_name = "ipv6-neighbor"
self.yang_parent_name = "ppp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("ra-interval", ("ra_interval", DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.RaInterval)), ("framed-prefix", ("framed_prefix", DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.FramedPrefix)), ("duplicate-address-detection", ("duplicate_address_detection", DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.DuplicateAddressDetection)), ("ra-initial", ("ra_initial", DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.RaInitial))])
self._leafs = OrderedDict([
('framed_prefix_pool', (YLeaf(YType.str, 'framed-prefix-pool'), ['str'])),
('managed_config', (YLeaf(YType.empty, 'managed-config'), ['Empty'])),
('other_config', (YLeaf(YType.empty, 'other-config'), ['Empty'])),
('start_ra_on_ipv6_enable', (YLeaf(YType.empty, 'start-ra-on-ipv6-enable'), ['Empty'])),
('nud_enable', (YLeaf(YType.empty, 'nud-enable'), ['Empty'])),
('ra_lifetime', (YLeaf(YType.uint32, 'ra-lifetime'), ['int'])),
('router_preference', (YLeaf(YType.enumeration, 'router-preference'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_subscriber_cfg', 'Ipv6NdRouterPrefTemplate', '')])),
('ra_suppress', (YLeaf(YType.empty, 'ra-suppress'), ['Empty'])),
('ra_unicast', (YLeaf(YType.empty, 'ra-unicast'), ['Empty'])),
('ra_unspecify_hoplimit', (YLeaf(YType.empty, 'ra-unspecify-hoplimit'), ['Empty'])),
('ra_suppress_mtu', (YLeaf(YType.empty, 'ra-suppress-mtu'), ['Empty'])),
('suppress_cache_learning', (YLeaf(YType.empty, 'suppress-cache-learning'), ['Empty'])),
('reachable_time', (YLeaf(YType.uint32, 'reachable-time'), ['int'])),
('ns_interval', (YLeaf(YType.uint32, 'ns-interval'), ['int'])),
])
self.framed_prefix_pool = None
self.managed_config = None
self.other_config = None
self.start_ra_on_ipv6_enable = None
self.nud_enable = None
self.ra_lifetime = None
self.router_preference = None
self.ra_suppress = None
self.ra_unicast = None
self.ra_unspecify_hoplimit = None
self.ra_suppress_mtu = None
self.suppress_cache_learning = None
self.reachable_time = None
self.ns_interval = None
self.ra_interval = None
self._children_name_map["ra_interval"] = "ra-interval"
self.framed_prefix = None
self._children_name_map["framed_prefix"] = "framed-prefix"
self.duplicate_address_detection = DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.DuplicateAddressDetection()
self.duplicate_address_detection.parent = self
self._children_name_map["duplicate_address_detection"] = "duplicate-address-detection"
self.ra_initial = None
self._children_name_map["ra_initial"] = "ra-initial"
self._segment_path = lambda: "Cisco-IOS-XR-ipv6-nd-subscriber-cfg:ipv6-neighbor"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Ipv6Neighbor, ['framed_prefix_pool', 'managed_config', 'other_config', 'start_ra_on_ipv6_enable', 'nud_enable', 'ra_lifetime', 'router_preference', 'ra_suppress', 'ra_unicast', 'ra_unspecify_hoplimit', 'ra_suppress_mtu', 'suppress_cache_learning', 'reachable_time', 'ns_interval'], name, value)
class RaInterval(_Entity_):
"""
Set IPv6 Router Advertisement (RA) interval in
seconds
.. attribute:: maximum
Maximum RA interval in seconds
**type**\: int
**range:** 4..1800
**mandatory**\: True
**units**\: second
.. attribute:: minimum
Minimum RA interval in seconds. Must be less than 0.75 \* maximum interval
**type**\: int
**range:** 3..1800
**units**\: second
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ipv6-nd-subscriber-cfg'
_revision = '2016-12-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.RaInterval, self).__init__()
self.yang_name = "ra-interval"
self.yang_parent_name = "ipv6-neighbor"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('maximum', (YLeaf(YType.uint32, 'maximum'), ['int'])),
('minimum', (YLeaf(YType.uint32, 'minimum'), ['int'])),
])
self.maximum = None
self.minimum = None
self._segment_path = lambda: "ra-interval"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.RaInterval, ['maximum', 'minimum'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.RaInterval']['meta_info']
class FramedPrefix(_Entity_):
"""
Set the IPv6 framed ipv6 prefix for a
subscriber interface
.. attribute:: prefix_length
IPv6 framed prefix length
**type**\: int
**range:** 0..128
**mandatory**\: True
.. attribute:: prefix
IPV6 framed prefix address
**type**\: str
**mandatory**\: True
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ipv6-nd-subscriber-cfg'
_revision = '2016-12-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.FramedPrefix, self).__init__()
self.yang_name = "framed-prefix"
self.yang_parent_name = "ipv6-neighbor"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('prefix_length', (YLeaf(YType.uint8, 'prefix-length'), ['int'])),
('prefix', (YLeaf(YType.str, 'prefix'), ['str'])),
])
self.prefix_length = None
self.prefix = None
self._segment_path = lambda: "framed-prefix"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.FramedPrefix, ['prefix_length', 'prefix'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.FramedPrefix']['meta_info']
class DuplicateAddressDetection(_Entity_):
"""
Duplicate Address Detection (DAD)
.. attribute:: attempts
Set IPv6 duplicate address detection transmits
**type**\: int
**range:** 0..600
"""
_prefix = 'ipv6-nd-subscriber-cfg'
_revision = '2016-12-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.DuplicateAddressDetection, self).__init__()
self.yang_name = "duplicate-address-detection"
self.yang_parent_name = "ipv6-neighbor"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('attempts', (YLeaf(YType.uint32, 'attempts'), ['int'])),
])
self.attempts = None
self._segment_path = lambda: "duplicate-address-detection"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.DuplicateAddressDetection, ['attempts'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.DuplicateAddressDetection']['meta_info']
class RaInitial(_Entity_):
"""
IPv6 ND RA Initial
.. attribute:: count
Initial RA count
**type**\: int
**range:** 0..32
**mandatory**\: True
.. attribute:: interval
Initial RA interval in seconds
**type**\: int
**range:** 4..1800
**mandatory**\: True
**units**\: second
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ipv6-nd-subscriber-cfg'
_revision = '2016-12-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.RaInitial, self).__init__()
self.yang_name = "ra-initial"
self.yang_parent_name = "ipv6-neighbor"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('count', (YLeaf(YType.uint32, 'count'), ['int'])),
('interval', (YLeaf(YType.uint32, 'interval'), ['int'])),
])
self.count = None
self.interval = None
self._segment_path = lambda: "ra-initial"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.RaInitial, ['count', 'interval'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Ipv6Neighbor.RaInitial']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Ipv6Neighbor']['meta_info']
class PppTemplate(_Entity_):
"""
PPP template configuration data
.. attribute:: fsm
PPP FSM global template configuration data
**type**\: :py:class:`Fsm <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.PppTemplate.Fsm>`
.. attribute:: lcp
PPP LCP global template configuration data
**type**\: :py:class:`Lcp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp>`
.. attribute:: ipv6cp
PPP IPv6CP global template configuration data
**type**\: :py:class:`Ipv6cp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.PppTemplate.Ipv6cp>`
.. attribute:: ipcp
PPP IPCP global template configuration data
**type**\: :py:class:`Ipcp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp>`
"""
_prefix = 'ppp-ma-gbl-cfg'
_revision = '2017-11-05'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.PppTemplate, self).__init__()
self.yang_name = "ppp-template"
self.yang_parent_name = "ppp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("fsm", ("fsm", DynamicTemplate.Ppps.Ppp.PppTemplate.Fsm)), ("lcp", ("lcp", DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp)), ("ipv6cp", ("ipv6cp", DynamicTemplate.Ppps.Ppp.PppTemplate.Ipv6cp)), ("ipcp", ("ipcp", DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp))])
self._leafs = OrderedDict()
self.fsm = DynamicTemplate.Ppps.Ppp.PppTemplate.Fsm()
self.fsm.parent = self
self._children_name_map["fsm"] = "fsm"
self.lcp = DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp()
self.lcp.parent = self
self._children_name_map["lcp"] = "lcp"
self.ipv6cp = DynamicTemplate.Ppps.Ppp.PppTemplate.Ipv6cp()
self.ipv6cp.parent = self
self._children_name_map["ipv6cp"] = "ipv6cp"
self.ipcp = DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp()
self.ipcp.parent = self
self._children_name_map["ipcp"] = "ipcp"
self._segment_path = lambda: "Cisco-IOS-XR-ppp-ma-gbl-cfg:ppp-template"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.PppTemplate, [], name, value)
class Fsm(_Entity_):
"""
PPP FSM global template configuration data
.. attribute:: max_consecutive_conf_naks
This specifies the maximum number of consecutive Conf\-Naks
**type**\: int
**range:** 2..10
**default value**\: 5
.. attribute:: max_unacknowledged_conf_requests
This specifies the maximum number of unacknowledged Conf\-Requests
**type**\: int
**range:** 4..20
**default value**\: 10
.. attribute:: retry_timeout
This specifies the maximum time to wait for a response during PPP negotiation
**type**\: int
**range:** 1..10
**default value**\: 3
.. attribute:: protocol_reject_timeout
This specifies the maximum time to wait before sending Protocol Reject
**type**\: int
**range:** 1..60
**default value**\: 60
"""
_prefix = 'ppp-ma-gbl-cfg'
_revision = '2017-11-05'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.PppTemplate.Fsm, self).__init__()
self.yang_name = "fsm"
self.yang_parent_name = "ppp-template"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('max_consecutive_conf_naks', (YLeaf(YType.uint32, 'max-consecutive-conf-naks'), ['int'])),
('max_unacknowledged_conf_requests', (YLeaf(YType.uint32, 'max-unacknowledged-conf-requests'), ['int'])),
('retry_timeout', (YLeaf(YType.uint32, 'retry-timeout'), ['int'])),
('protocol_reject_timeout', (YLeaf(YType.uint32, 'protocol-reject-timeout'), ['int'])),
])
self.max_consecutive_conf_naks = None
self.max_unacknowledged_conf_requests = None
self.retry_timeout = None
self.protocol_reject_timeout = None
self._segment_path = lambda: "fsm"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.PppTemplate.Fsm, ['max_consecutive_conf_naks', 'max_unacknowledged_conf_requests', 'retry_timeout', 'protocol_reject_timeout'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.PppTemplate.Fsm']['meta_info']
class Lcp(_Entity_):
"""
PPP LCP global template configuration data
.. attribute:: absolute_timeout
This specifies the session absolute timeout value
**type**\: :py:class:`AbsoluteTimeout <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.AbsoluteTimeout>`
.. attribute:: delay
This specifies the time to delay before starting active LCPnegotiations
**type**\: :py:class:`Delay <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Delay>`
.. attribute:: authentication
PPP authentication parameters
**type**\: :py:class:`Authentication <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Authentication>`
.. attribute:: keepalive
This specifies the rate at which EchoReq packets are sent
**type**\: :py:class:`Keepalive <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Keepalive>`
.. attribute:: renegotiation
Specify whether to ignore attempts to renegotiate LCP
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: service_type
This is the Service\-Type
**type**\: int
**range:** 0..15
**default value**\: 0
.. attribute:: send_term_request_on_shut_down
Enable Sending LCP Terminate request on shutdown
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: mru_ignore
Ignore MRU negotiated with peer while setting interface BW
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ppp-ma-gbl-cfg'
_revision = '2017-11-05'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp, self).__init__()
self.yang_name = "lcp"
self.yang_parent_name = "ppp-template"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("absolute-timeout", ("absolute_timeout", DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.AbsoluteTimeout)), ("delay", ("delay", DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Delay)), ("authentication", ("authentication", DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Authentication)), ("keepalive", ("keepalive", DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Keepalive))])
self._leafs = OrderedDict([
('renegotiation', (YLeaf(YType.empty, 'renegotiation'), ['Empty'])),
('service_type', (YLeaf(YType.uint32, 'service-type'), ['int'])),
('send_term_request_on_shut_down', (YLeaf(YType.empty, 'send-term-request-on-shut-down'), ['Empty'])),
('mru_ignore', (YLeaf(YType.empty, 'mru-ignore'), ['Empty'])),
])
self.renegotiation = None
self.service_type = None
self.send_term_request_on_shut_down = None
self.mru_ignore = None
self.absolute_timeout = DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.AbsoluteTimeout()
self.absolute_timeout.parent = self
self._children_name_map["absolute_timeout"] = "absolute-timeout"
self.delay = DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Delay()
self.delay.parent = self
self._children_name_map["delay"] = "delay"
self.authentication = DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Authentication()
self.authentication.parent = self
self._children_name_map["authentication"] = "authentication"
self.keepalive = DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Keepalive()
self.keepalive.parent = self
self._children_name_map["keepalive"] = "keepalive"
self._segment_path = lambda: "lcp"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp, ['renegotiation', 'service_type', 'send_term_request_on_shut_down', 'mru_ignore'], name, value)
class AbsoluteTimeout(_Entity_):
"""
This specifies the session absolute timeout
value
.. attribute:: minutes
Minutes
**type**\: int
**range:** 0..35000000
.. attribute:: seconds
Seconds
**type**\: int
**range:** 0..59
"""
_prefix = 'ppp-ma-gbl-cfg'
_revision = '2017-11-05'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.AbsoluteTimeout, self).__init__()
self.yang_name = "absolute-timeout"
self.yang_parent_name = "lcp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('minutes', (YLeaf(YType.uint32, 'minutes'), ['int'])),
('seconds', (YLeaf(YType.uint32, 'seconds'), ['int'])),
])
self.minutes = None
self.seconds = None
self._segment_path = lambda: "absolute-timeout"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.AbsoluteTimeout, ['minutes', 'seconds'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.AbsoluteTimeout']['meta_info']
class Delay(_Entity_):
"""
This specifies the time to delay before
starting active LCPnegotiations
.. attribute:: seconds
Seconds
**type**\: int
**range:** 0..255
.. attribute:: milliseconds
Milliseconds
**type**\: int
**range:** 0..999
"""
_prefix = 'ppp-ma-gbl-cfg'
_revision = '2017-11-05'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Delay, self).__init__()
self.yang_name = "delay"
self.yang_parent_name = "lcp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('seconds', (YLeaf(YType.uint32, 'seconds'), ['int'])),
('milliseconds', (YLeaf(YType.uint32, 'milliseconds'), ['int'])),
])
self.seconds = None
self.milliseconds = None
self._segment_path = lambda: "delay"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Delay, ['seconds', 'milliseconds'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Delay']['meta_info']
class Authentication(_Entity_):
"""
PPP authentication parameters
.. attribute:: methods
This specifies the PPP link authentication method
**type**\: :py:class:`Methods <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Authentication.Methods>`
.. attribute:: chap_host_name
This specifies the CHAP hostname
**type**\: str
.. attribute:: pap
<1> for accepting null\-passwordduring authentication
**type**\: int
**range:** 0..4294967295
.. attribute:: mschap_host_name
This specifies the MS\-CHAP hostname
**type**\: str
.. attribute:: max_authentication_failures
This specifies whether to allow multiple authentication failures and, if so, how many
**type**\: int
**range:** 0..10
.. attribute:: timeout
Maximum time to wait for an authentication response
**type**\: int
**range:** 3..30
**default value**\: 10
"""
_prefix = 'ppp-ma-gbl-cfg'
_revision = '2017-11-05'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Authentication, self).__init__()
self.yang_name = "authentication"
self.yang_parent_name = "lcp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("methods", ("methods", DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Authentication.Methods))])
self._leafs = OrderedDict([
('chap_host_name', (YLeaf(YType.str, 'chap-host-name'), ['str'])),
('pap', (YLeaf(YType.uint32, 'pap'), ['int'])),
('mschap_host_name', (YLeaf(YType.str, 'mschap-host-name'), ['str'])),
('max_authentication_failures', (YLeaf(YType.uint32, 'max-authentication-failures'), ['int'])),
('timeout', (YLeaf(YType.uint32, 'timeout'), ['int'])),
])
self.chap_host_name = None
self.pap = None
self.mschap_host_name = None
self.max_authentication_failures = None
self.timeout = None
self.methods = DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Authentication.Methods()
self.methods.parent = self
self._children_name_map["methods"] = "methods"
self._segment_path = lambda: "authentication"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Authentication, ['chap_host_name', 'pap', 'mschap_host_name', 'max_authentication_failures', 'timeout'], name, value)
class Methods(_Entity_):
"""
This specifies the PPP link authentication
method
.. attribute:: method
Select between one and three authentication methods in order of preference
**type**\: list of :py:class:`PppAuthenticationMethodGbl <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ppp_ma_gbl_cfg.PppAuthenticationMethodGbl>`
"""
_prefix = 'ppp-ma-gbl-cfg'
_revision = '2017-11-05'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Authentication.Methods, self).__init__()
self.yang_name = "methods"
self.yang_parent_name = "authentication"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('method', (YLeafList(YType.enumeration, 'method'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ppp_ma_gbl_cfg', 'PppAuthenticationMethodGbl', '')])),
])
self.method = []
self._segment_path = lambda: "methods"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Authentication.Methods, ['method'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Authentication.Methods']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Authentication']['meta_info']
class Keepalive(_Entity_):
"""
This specifies the rate at which EchoReq
packets are sent
.. attribute:: keepalive_disable
TRUE to disable keepalives, FALSE to specify a new keepalive interval
**type**\: bool
.. attribute:: interval
The keepalive interval. Leave unspecified when disabling keepalives
**type**\: int
**range:** 10..180
.. attribute:: retry_count
The keepalive retry count. Leave unspecified when disabling keepalives
**type**\: int
**range:** 1..255
"""
_prefix = 'ppp-ma-gbl-cfg'
_revision = '2017-11-05'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Keepalive, self).__init__()
self.yang_name = "keepalive"
self.yang_parent_name = "lcp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('keepalive_disable', (YLeaf(YType.boolean, 'keepalive-disable'), ['bool'])),
('interval', (YLeaf(YType.uint32, 'interval'), ['int'])),
('retry_count', (YLeaf(YType.uint32, 'retry-count'), ['int'])),
])
self.keepalive_disable = None
self.interval = None
self.retry_count = None
self._segment_path = lambda: "keepalive"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Keepalive, ['keepalive_disable', 'interval', 'retry_count'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp.Keepalive']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.PppTemplate.Lcp']['meta_info']
class Ipv6cp(_Entity_):
"""
PPP IPv6CP global template configuration data
.. attribute:: passive
Specify whether to run IPv6CP in Passive mode
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: renegotiation
Specify whether to ignore attempts to renegotiate IPv6CP
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: peer_interface_id
Specify the Interface\-Id to impose on the peer
**type**\: str
.. attribute:: protocol_reject
Specify whether to protocol reject IPv6CP
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ppp-ma-gbl-cfg'
_revision = '2017-11-05'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.PppTemplate.Ipv6cp, self).__init__()
self.yang_name = "ipv6cp"
self.yang_parent_name = "ppp-template"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('passive', (YLeaf(YType.empty, 'passive'), ['Empty'])),
('renegotiation', (YLeaf(YType.empty, 'renegotiation'), ['Empty'])),
('peer_interface_id', (YLeaf(YType.str, 'peer-interface-id'), ['str'])),
('protocol_reject', (YLeaf(YType.empty, 'protocol-reject'), ['Empty'])),
])
self.passive = None
self.renegotiation = None
self.peer_interface_id = None
self.protocol_reject = None
self._segment_path = lambda: "ipv6cp"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.PppTemplate.Ipv6cp, ['passive', 'renegotiation', 'peer_interface_id', 'protocol_reject'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.PppTemplate.Ipv6cp']['meta_info']
class Ipcp(_Entity_):
"""
PPP IPCP global template configuration data
.. attribute:: wins
IPCP WINS parameters
**type**\: :py:class:`Wins <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Wins>`
.. attribute:: dns
IPCP DNS parameters
**type**\: :py:class:`Dns <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Dns>`
.. attribute:: peer_address
IPCP address parameters
**type**\: :py:class:`PeerAddress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.PeerAddress>`
.. attribute:: renegotiation
Specify whether to ignore attempts to renegotiate IPCP
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: passive
Specify whether to run IPCP in Passive mode
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: protocol_reject
Specify whether to protocol reject IPCP
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: peer_netmask
Specify the IPv4 netmask to negotiate for the peer
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'ppp-ma-gbl-cfg'
_revision = '2017-11-05'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp, self).__init__()
self.yang_name = "ipcp"
self.yang_parent_name = "ppp-template"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("wins", ("wins", DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Wins)), ("dns", ("dns", DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Dns)), ("peer-address", ("peer_address", DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.PeerAddress))])
self._leafs = OrderedDict([
('renegotiation', (YLeaf(YType.empty, 'renegotiation'), ['Empty'])),
('passive', (YLeaf(YType.empty, 'passive'), ['Empty'])),
('protocol_reject', (YLeaf(YType.empty, 'protocol-reject'), ['Empty'])),
('peer_netmask', (YLeaf(YType.str, 'peer-netmask'), ['str'])),
])
self.renegotiation = None
self.passive = None
self.protocol_reject = None
self.peer_netmask = None
self.wins = DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Wins()
self.wins.parent = self
self._children_name_map["wins"] = "wins"
self.dns = DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Dns()
self.dns.parent = self
self._children_name_map["dns"] = "dns"
self.peer_address = DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.PeerAddress()
self.peer_address.parent = self
self._children_name_map["peer_address"] = "peer-address"
self._segment_path = lambda: "ipcp"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp, ['renegotiation', 'passive', 'protocol_reject', 'peer_netmask'], name, value)
class Wins(_Entity_):
"""
IPCP WINS parameters
.. attribute:: wins_addresses
Specify WINS address(es) to provide
**type**\: :py:class:`WinsAddresses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Wins.WinsAddresses>`
"""
_prefix = 'ppp-ma-gbl-cfg'
_revision = '2017-11-05'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Wins, self).__init__()
self.yang_name = "wins"
self.yang_parent_name = "ipcp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("wins-addresses", ("wins_addresses", DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Wins.WinsAddresses))])
self._leafs = OrderedDict()
self.wins_addresses = DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Wins.WinsAddresses()
self.wins_addresses.parent = self
self._children_name_map["wins_addresses"] = "wins-addresses"
self._segment_path = lambda: "wins"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Wins, [], name, value)
class WinsAddresses(_Entity_):
"""
Specify WINS address(es) to provide
.. attribute:: primary
Primary WINS IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: secondary
Secondary WINS IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'ppp-ma-gbl-cfg'
_revision = '2017-11-05'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Wins.WinsAddresses, self).__init__()
self.yang_name = "wins-addresses"
self.yang_parent_name = "wins"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('primary', (YLeaf(YType.str, 'primary'), ['str'])),
('secondary', (YLeaf(YType.str, 'secondary'), ['str'])),
])
self.primary = None
self.secondary = None
self._segment_path = lambda: "wins-addresses"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Wins.WinsAddresses, ['primary', 'secondary'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Wins.WinsAddresses']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Wins']['meta_info']
class Dns(_Entity_):
"""
IPCP DNS parameters
.. attribute:: dns_addresses
Specify DNS address(es) to provide
**type**\: :py:class:`DnsAddresses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Dns.DnsAddresses>`
"""
_prefix = 'ppp-ma-gbl-cfg'
_revision = '2017-11-05'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Dns, self).__init__()
self.yang_name = "dns"
self.yang_parent_name = "ipcp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("dns-addresses", ("dns_addresses", DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Dns.DnsAddresses))])
self._leafs = OrderedDict()
self.dns_addresses = DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Dns.DnsAddresses()
self.dns_addresses.parent = self
self._children_name_map["dns_addresses"] = "dns-addresses"
self._segment_path = lambda: "dns"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Dns, [], name, value)
class DnsAddresses(_Entity_):
"""
Specify DNS address(es) to provide
.. attribute:: primary
Primary DNS IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: secondary
Secondary DNS IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'ppp-ma-gbl-cfg'
_revision = '2017-11-05'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Dns.DnsAddresses, self).__init__()
self.yang_name = "dns-addresses"
self.yang_parent_name = "dns"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('primary', (YLeaf(YType.str, 'primary'), ['str'])),
('secondary', (YLeaf(YType.str, 'secondary'), ['str'])),
])
self.primary = None
self.secondary = None
self._segment_path = lambda: "dns-addresses"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Dns.DnsAddresses, ['primary', 'secondary'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Dns.DnsAddresses']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.Dns']['meta_info']
class PeerAddress(_Entity_):
"""
IPCP address parameters
.. attribute:: default
Specify an IP address to assign to peers through IPCP
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: pool
Accepts an IP address from the peer if in the pool, else allocates one from the pool
**type**\: str
"""
_prefix = 'ppp-ma-gbl-cfg'
_revision = '2017-11-05'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.PeerAddress, self).__init__()
self.yang_name = "peer-address"
self.yang_parent_name = "ipcp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('default', (YLeaf(YType.str, 'default'), ['str'])),
('pool', (YLeaf(YType.str, 'pool'), ['str'])),
])
self.default = None
self.pool = None
self._segment_path = lambda: "peer-address"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.PeerAddress, ['default', 'pool'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp.PeerAddress']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.PppTemplate.Ipcp']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.PppTemplate']['meta_info']
class Pbr(_Entity_):
"""
Dynamic Template PBR configuration
.. attribute:: service_policies
Ingress service policy
**type**\: :py:class:`ServicePolicies <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Pbr.ServicePolicies>`
.. attribute:: service_policy_in
Class for subscriber ingress policy
**type**\: str
"""
_prefix = 'pbr-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Pbr, self).__init__()
self.yang_name = "pbr"
self.yang_parent_name = "ppp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("service-policies", ("service_policies", DynamicTemplate.Ppps.Ppp.Pbr.ServicePolicies))])
self._leafs = OrderedDict([
('service_policy_in', (YLeaf(YType.str, 'service-policy-in'), ['str'])),
])
self.service_policy_in = None
self.service_policies = DynamicTemplate.Ppps.Ppp.Pbr.ServicePolicies()
self.service_policies.parent = self
self._children_name_map["service_policies"] = "service-policies"
self._segment_path = lambda: "Cisco-IOS-XR-pbr-subscriber-cfg:pbr"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Pbr, ['service_policy_in'], name, value)
class ServicePolicies(_Entity_):
"""
Ingress service policy
.. attribute:: service_policy
Service policy details
**type**\: list of :py:class:`ServicePolicy <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Pbr.ServicePolicies.ServicePolicy>`
"""
_prefix = 'pbr-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Pbr.ServicePolicies, self).__init__()
self.yang_name = "service-policies"
self.yang_parent_name = "pbr"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("service-policy", ("service_policy", DynamicTemplate.Ppps.Ppp.Pbr.ServicePolicies.ServicePolicy))])
self._leafs = OrderedDict()
self.service_policy = YList(self)
self._segment_path = lambda: "service-policies"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Pbr.ServicePolicies, [], name, value)
class ServicePolicy(_Entity_):
"""
Service policy details
.. attribute:: service_policy (key)
Name of policy\-map
**type**\: str
**length:** 1..64
"""
_prefix = 'pbr-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Pbr.ServicePolicies.ServicePolicy, self).__init__()
self.yang_name = "service-policy"
self.yang_parent_name = "service-policies"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['service_policy']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('service_policy', (YLeaf(YType.str, 'service-policy'), ['str'])),
])
self.service_policy = None
self._segment_path = lambda: "service-policy" + "[service-policy='" + str(self.service_policy) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Pbr.ServicePolicies.ServicePolicy, ['service_policy'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Pbr.ServicePolicies.ServicePolicy']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Pbr.ServicePolicies']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Pbr']['meta_info']
class Ipv4PacketFilter(_Entity_):
"""
IPv4 Packet Filtering configuration for the
template
.. attribute:: outbound
IPv4 Packet filter to be applied to outbound packets
**type**\: :py:class:`Outbound <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Ipv4PacketFilter.Outbound>`
.. attribute:: inbound
IPv4 Packet filter to be applied to inbound packets
**type**\: :py:class:`Inbound <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Ipv4PacketFilter.Inbound>`
"""
_prefix = 'ip-pfilter-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Ipv4PacketFilter, self).__init__()
self.yang_name = "ipv4-packet-filter"
self.yang_parent_name = "ppp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("outbound", ("outbound", DynamicTemplate.Ppps.Ppp.Ipv4PacketFilter.Outbound)), ("inbound", ("inbound", DynamicTemplate.Ppps.Ppp.Ipv4PacketFilter.Inbound))])
self._leafs = OrderedDict()
self.outbound = DynamicTemplate.Ppps.Ppp.Ipv4PacketFilter.Outbound()
self.outbound.parent = self
self._children_name_map["outbound"] = "outbound"
self.inbound = DynamicTemplate.Ppps.Ppp.Ipv4PacketFilter.Inbound()
self.inbound.parent = self
self._children_name_map["inbound"] = "inbound"
self._segment_path = lambda: "Cisco-IOS-XR-ip-pfilter-subscriber-cfg:ipv4-packet-filter"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Ipv4PacketFilter, [], name, value)
class Outbound(_Entity_):
"""
IPv4 Packet filter to be applied to outbound
packets
.. attribute:: common_acl_name
Not supported (Leave unspecified)
**type**\: str
**length:** 1..64
.. attribute:: name
IPv4 Packet Filter Name to be applied to Outbound packets
**type**\: str
**length:** 1..64
.. attribute:: hardware_count
Not supported (Leave unspecified)
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: interface_statistics
Not supported (Leave unspecified)
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ip-pfilter-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Ipv4PacketFilter.Outbound, self).__init__()
self.yang_name = "outbound"
self.yang_parent_name = "ipv4-packet-filter"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('common_acl_name', (YLeaf(YType.str, 'common-acl-name'), ['str'])),
('name', (YLeaf(YType.str, 'name'), ['str'])),
('hardware_count', (YLeaf(YType.empty, 'hardware-count'), ['Empty'])),
('interface_statistics', (YLeaf(YType.empty, 'interface-statistics'), ['Empty'])),
])
self.common_acl_name = None
self.name = None
self.hardware_count = None
self.interface_statistics = None
self._segment_path = lambda: "outbound"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Ipv4PacketFilter.Outbound, ['common_acl_name', 'name', 'hardware_count', 'interface_statistics'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Ipv4PacketFilter.Outbound']['meta_info']
class Inbound(_Entity_):
"""
IPv4 Packet filter to be applied to inbound
packets
.. attribute:: common_acl_name
Not supported (Leave unspecified)
**type**\: str
**length:** 1..64
.. attribute:: name
IPv4 Packet Filter Name to be applied to Inbound packets NOTE\: This parameter is mandatory if 'CommonACLName' is not specified
**type**\: str
**length:** 1..64
.. attribute:: hardware_count
Not supported (Leave unspecified)
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: interface_statistics
Not supported (Leave unspecified)
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ip-pfilter-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Ipv4PacketFilter.Inbound, self).__init__()
self.yang_name = "inbound"
self.yang_parent_name = "ipv4-packet-filter"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('common_acl_name', (YLeaf(YType.str, 'common-acl-name'), ['str'])),
('name', (YLeaf(YType.str, 'name'), ['str'])),
('hardware_count', (YLeaf(YType.empty, 'hardware-count'), ['Empty'])),
('interface_statistics', (YLeaf(YType.empty, 'interface-statistics'), ['Empty'])),
])
self.common_acl_name = None
self.name = None
self.hardware_count = None
self.interface_statistics = None
self._segment_path = lambda: "inbound"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Ipv4PacketFilter.Inbound, ['common_acl_name', 'name', 'hardware_count', 'interface_statistics'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Ipv4PacketFilter.Inbound']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Ipv4PacketFilter']['meta_info']
class Ipv6PacketFilter(_Entity_):
"""
IPv6 Packet Filtering configuration for the
interface
.. attribute:: inbound
IPv6 Packet filter to be applied to inbound packets
**type**\: :py:class:`Inbound <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Ipv6PacketFilter.Inbound>`
.. attribute:: outbound
IPv6 Packet filter to be applied to outbound packets
**type**\: :py:class:`Outbound <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Ipv6PacketFilter.Outbound>`
"""
_prefix = 'ip-pfilter-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Ipv6PacketFilter, self).__init__()
self.yang_name = "ipv6-packet-filter"
self.yang_parent_name = "ppp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("inbound", ("inbound", DynamicTemplate.Ppps.Ppp.Ipv6PacketFilter.Inbound)), ("outbound", ("outbound", DynamicTemplate.Ppps.Ppp.Ipv6PacketFilter.Outbound))])
self._leafs = OrderedDict()
self.inbound = DynamicTemplate.Ppps.Ppp.Ipv6PacketFilter.Inbound()
self.inbound.parent = self
self._children_name_map["inbound"] = "inbound"
self.outbound = DynamicTemplate.Ppps.Ppp.Ipv6PacketFilter.Outbound()
self.outbound.parent = self
self._children_name_map["outbound"] = "outbound"
self._segment_path = lambda: "Cisco-IOS-XR-ip-pfilter-subscriber-cfg:ipv6-packet-filter"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Ipv6PacketFilter, [], name, value)
class Inbound(_Entity_):
"""
IPv6 Packet filter to be applied to inbound
packets
.. attribute:: common_acl_name
Not supported (Leave unspecified)
**type**\: str
**length:** 1..64
.. attribute:: name
IPv6 Packet Filter Name to be applied to Inbound NOTE\: This parameter is mandatory if 'CommonACLName' is not specified
**type**\: str
**length:** 1..64
.. attribute:: interface_statistics
Not supported (Leave unspecified)
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ip-pfilter-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Ipv6PacketFilter.Inbound, self).__init__()
self.yang_name = "inbound"
self.yang_parent_name = "ipv6-packet-filter"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('common_acl_name', (YLeaf(YType.str, 'common-acl-name'), ['str'])),
('name', (YLeaf(YType.str, 'name'), ['str'])),
('interface_statistics', (YLeaf(YType.empty, 'interface-statistics'), ['Empty'])),
])
self.common_acl_name = None
self.name = None
self.interface_statistics = None
self._segment_path = lambda: "inbound"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Ipv6PacketFilter.Inbound, ['common_acl_name', 'name', 'interface_statistics'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Ipv6PacketFilter.Inbound']['meta_info']
class Outbound(_Entity_):
"""
IPv6 Packet filter to be applied to outbound
packets
.. attribute:: common_acl_name
Not supported (Leave unspecified)
**type**\: str
**length:** 1..64
.. attribute:: name
IPv6 Packet Filter Name to be applied to Outbound packets
**type**\: str
**length:** 1..64
.. attribute:: interface_statistics
Not supported (Leave unspecified)
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ip-pfilter-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Ipv6PacketFilter.Outbound, self).__init__()
self.yang_name = "outbound"
self.yang_parent_name = "ipv6-packet-filter"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('common_acl_name', (YLeaf(YType.str, 'common-acl-name'), ['str'])),
('name', (YLeaf(YType.str, 'name'), ['str'])),
('interface_statistics', (YLeaf(YType.empty, 'interface-statistics'), ['Empty'])),
])
self.common_acl_name = None
self.name = None
self.interface_statistics = None
self._segment_path = lambda: "outbound"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Ipv6PacketFilter.Outbound, ['common_acl_name', 'name', 'interface_statistics'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Ipv6PacketFilter.Outbound']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Ipv6PacketFilter']['meta_info']
class Dhcpv6(_Entity_):
"""
Interface dhcpv6 configuration data
.. attribute:: delegated_prefix
The prefix to be used for Prefix Delegation
**type**\: :py:class:`DelegatedPrefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.Ppps.Ppp.Dhcpv6.DelegatedPrefix>`
**presence node**\: True
.. attribute:: dns_ipv6address
Dns IPv6 Address
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: mode_class
Select proxy/server profile based on mode class name
**type**\: str
.. attribute:: dhcpv6_iplease
Cisco VSA to configure any dhcpv6 ip lease per subscriber
**type**\: str
.. attribute:: dhcpv6_option
Cisco VSA to configure any dhcpv6 option per subscriber
**type**\: str
.. attribute:: address_pool
The pool to be used for Address assignment
**type**\: str
.. attribute:: delegated_prefix_pool
The pool to be used for Prefix Delegation
**type**\: str
.. attribute:: class_
The class to be used for proxy/server profile
**type**\: str
.. attribute:: stateful_address
Stateful IPv6 Address
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'ipv6-new-dhcpv6d-subscriber-cfg'
_revision = '2017-09-30'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Dhcpv6, self).__init__()
self.yang_name = "dhcpv6"
self.yang_parent_name = "ppp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("delegated-prefix", ("delegated_prefix", DynamicTemplate.Ppps.Ppp.Dhcpv6.DelegatedPrefix))])
self._leafs = OrderedDict([
('dns_ipv6address', (YLeaf(YType.str, 'dns-ipv6address'), ['str'])),
('mode_class', (YLeaf(YType.str, 'mode-class'), ['str'])),
('dhcpv6_iplease', (YLeaf(YType.str, 'dhcpv6-iplease'), ['str'])),
('dhcpv6_option', (YLeaf(YType.str, 'dhcpv6-option'), ['str'])),
('address_pool', (YLeaf(YType.str, 'address-pool'), ['str'])),
('delegated_prefix_pool', (YLeaf(YType.str, 'delegated-prefix-pool'), ['str'])),
('class_', (YLeaf(YType.str, 'class'), ['str'])),
('stateful_address', (YLeaf(YType.str, 'stateful-address'), ['str'])),
])
self.dns_ipv6address = None
self.mode_class = None
self.dhcpv6_iplease = None
self.dhcpv6_option = None
self.address_pool = None
self.delegated_prefix_pool = None
self.class_ = None
self.stateful_address = None
self.delegated_prefix = None
self._children_name_map["delegated_prefix"] = "delegated-prefix"
self._segment_path = lambda: "Cisco-IOS-XR-ipv6-new-dhcpv6d-subscriber-cfg:dhcpv6"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Dhcpv6, ['dns_ipv6address', 'mode_class', 'dhcpv6_iplease', 'dhcpv6_option', 'address_pool', 'delegated_prefix_pool', 'class_', 'stateful_address'], name, value)
class DelegatedPrefix(_Entity_):
"""
The prefix to be used for Prefix Delegation
.. attribute:: prefix
IPv6 Prefix
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
**mandatory**\: True
.. attribute:: prefix_length
PD Prefix Length
**type**\: int
**range:** 0..128
**mandatory**\: True
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ipv6-new-dhcpv6d-subscriber-cfg'
_revision = '2017-09-30'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.Ppps.Ppp.Dhcpv6.DelegatedPrefix, self).__init__()
self.yang_name = "delegated-prefix"
self.yang_parent_name = "dhcpv6"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('prefix', (YLeaf(YType.str, 'prefix'), ['str'])),
('prefix_length', (YLeaf(YType.uint8, 'prefix-length'), ['int'])),
])
self.prefix = None
self.prefix_length = None
self._segment_path = lambda: "delegated-prefix"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.Ppps.Ppp.Dhcpv6.DelegatedPrefix, ['prefix', 'prefix_length'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Dhcpv6.DelegatedPrefix']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp.Dhcpv6']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps.Ppp']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.Ppps']['meta_info']
class IpSubscribers(_Entity_):
"""
The IP Subscriber Template Table
.. attribute:: ip_subscriber
A IP Subscriber Type Template
**type**\: list of :py:class:`IpSubscriber <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber>`
"""
_prefix = 'subscriber-infra-tmplmgr-cfg'
_revision = '2015-01-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers, self).__init__()
self.yang_name = "ip-subscribers"
self.yang_parent_name = "dynamic-template"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("ip-subscriber", ("ip_subscriber", DynamicTemplate.IpSubscribers.IpSubscriber))])
self._leafs = OrderedDict()
self.ip_subscriber = YList(self)
self._segment_path = lambda: "ip-subscribers"
self._absolute_path = lambda: "Cisco-IOS-XR-subscriber-infra-tmplmgr-cfg:dynamic-template/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers, [], name, value)
class IpSubscriber(_Entity_):
"""
A IP Subscriber Type Template
.. attribute:: template_name (key)
The name of the template
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: ipv4_network
Interface IPv4 Network configuration data
**type**\: :py:class:`Ipv4Network <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4Network>`
.. attribute:: accounting
Subscriber accounting dynamic\-template commands
**type**\: :py:class:`Accounting <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Accounting>`
.. attribute:: qos
QoS dynamically applied configuration template
**type**\: :py:class:`Qos <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Qos>`
.. attribute:: igmp
IGMPconfiguration
**type**\: :py:class:`Igmp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Igmp>`
.. attribute:: ipv6_network
Interface IPv6 Network configuration data
**type**\: :py:class:`Ipv6Network <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Network>`
.. attribute:: span_monitor_sessions
Monitor Session container for this template
**type**\: :py:class:`SpanMonitorSessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions>`
.. attribute:: vrf
Assign the interface to a VRF
**type**\: str
**length:** 1..32
.. attribute:: ipv6_neighbor
Interface IPv6 Network configuration data
**type**\: :py:class:`Ipv6Neighbor <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor>`
.. attribute:: pbr
Dynamic Template PBR configuration
**type**\: :py:class:`Pbr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Pbr>`
.. attribute:: ipv4_packet_filter
IPv4 Packet Filtering configuration for the template
**type**\: :py:class:`Ipv4PacketFilter <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4PacketFilter>`
.. attribute:: ipv6_packet_filter
IPv6 Packet Filtering configuration for the interface
**type**\: :py:class:`Ipv6PacketFilter <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6PacketFilter>`
.. attribute:: dhcpd
Interface dhcpv4 configuration data
**type**\: :py:class:`Dhcpd <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Dhcpd>`
.. attribute:: dhcpv6
Interface dhcpv6 configuration data
**type**\: :py:class:`Dhcpv6 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Dhcpv6>`
"""
_prefix = 'subscriber-infra-tmplmgr-cfg'
_revision = '2015-01-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber, self).__init__()
self.yang_name = "ip-subscriber"
self.yang_parent_name = "ip-subscribers"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['template_name']
self._child_classes = OrderedDict([("Cisco-IOS-XR-ipv4-ma-subscriber-cfg:ipv4-network", ("ipv4_network", DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4Network)), ("Cisco-IOS-XR-subscriber-accounting-cfg:accounting", ("accounting", DynamicTemplate.IpSubscribers.IpSubscriber.Accounting)), ("Cisco-IOS-XR-qos-ma-bng-cfg:qos", ("qos", DynamicTemplate.IpSubscribers.IpSubscriber.Qos)), ("Cisco-IOS-XR-ipv4-igmp-dyn-tmpl-cfg:igmp", ("igmp", DynamicTemplate.IpSubscribers.IpSubscriber.Igmp)), ("Cisco-IOS-XR-ipv6-ma-subscriber-cfg:ipv6-network", ("ipv6_network", DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Network)), ("Cisco-IOS-XR-Ethernet-SPAN-subscriber-cfg:span-monitor-sessions", ("span_monitor_sessions", DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions)), ("Cisco-IOS-XR-ipv6-nd-subscriber-cfg:ipv6-neighbor", ("ipv6_neighbor", DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor)), ("Cisco-IOS-XR-pbr-subscriber-cfg:pbr", ("pbr", DynamicTemplate.IpSubscribers.IpSubscriber.Pbr)), ("Cisco-IOS-XR-ip-pfilter-subscriber-cfg:ipv4-packet-filter", ("ipv4_packet_filter", DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4PacketFilter)), ("Cisco-IOS-XR-ip-pfilter-subscriber-cfg:ipv6-packet-filter", ("ipv6_packet_filter", DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6PacketFilter)), ("Cisco-IOS-XR-ipv4-dhcpd-subscriber-cfg:dhcpd", ("dhcpd", DynamicTemplate.IpSubscribers.IpSubscriber.Dhcpd)), ("Cisco-IOS-XR-ipv6-new-dhcpv6d-subscriber-cfg:dhcpv6", ("dhcpv6", DynamicTemplate.IpSubscribers.IpSubscriber.Dhcpv6))])
self._leafs = OrderedDict([
('template_name', (YLeaf(YType.str, 'template-name'), ['str'])),
('vrf', (YLeaf(YType.str, 'Cisco-IOS-XR-infra-rsi-subscriber-cfg:vrf'), ['str'])),
])
self.template_name = None
self.vrf = None
self.ipv4_network = DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4Network()
self.ipv4_network.parent = self
self._children_name_map["ipv4_network"] = "Cisco-IOS-XR-ipv4-ma-subscriber-cfg:ipv4-network"
self.accounting = DynamicTemplate.IpSubscribers.IpSubscriber.Accounting()
self.accounting.parent = self
self._children_name_map["accounting"] = "Cisco-IOS-XR-subscriber-accounting-cfg:accounting"
self.qos = DynamicTemplate.IpSubscribers.IpSubscriber.Qos()
self.qos.parent = self
self._children_name_map["qos"] = "Cisco-IOS-XR-qos-ma-bng-cfg:qos"
self.igmp = DynamicTemplate.IpSubscribers.IpSubscriber.Igmp()
self.igmp.parent = self
self._children_name_map["igmp"] = "Cisco-IOS-XR-ipv4-igmp-dyn-tmpl-cfg:igmp"
self.ipv6_network = DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Network()
self.ipv6_network.parent = self
self._children_name_map["ipv6_network"] = "Cisco-IOS-XR-ipv6-ma-subscriber-cfg:ipv6-network"
self.span_monitor_sessions = DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions()
self.span_monitor_sessions.parent = self
self._children_name_map["span_monitor_sessions"] = "Cisco-IOS-XR-Ethernet-SPAN-subscriber-cfg:span-monitor-sessions"
self.ipv6_neighbor = DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor()
self.ipv6_neighbor.parent = self
self._children_name_map["ipv6_neighbor"] = "Cisco-IOS-XR-ipv6-nd-subscriber-cfg:ipv6-neighbor"
self.pbr = DynamicTemplate.IpSubscribers.IpSubscriber.Pbr()
self.pbr.parent = self
self._children_name_map["pbr"] = "Cisco-IOS-XR-pbr-subscriber-cfg:pbr"
self.ipv4_packet_filter = DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4PacketFilter()
self.ipv4_packet_filter.parent = self
self._children_name_map["ipv4_packet_filter"] = "Cisco-IOS-XR-ip-pfilter-subscriber-cfg:ipv4-packet-filter"
self.ipv6_packet_filter = DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6PacketFilter()
self.ipv6_packet_filter.parent = self
self._children_name_map["ipv6_packet_filter"] = "Cisco-IOS-XR-ip-pfilter-subscriber-cfg:ipv6-packet-filter"
self.dhcpd = DynamicTemplate.IpSubscribers.IpSubscriber.Dhcpd()
self.dhcpd.parent = self
self._children_name_map["dhcpd"] = "Cisco-IOS-XR-ipv4-dhcpd-subscriber-cfg:dhcpd"
self.dhcpv6 = DynamicTemplate.IpSubscribers.IpSubscriber.Dhcpv6()
self.dhcpv6.parent = self
self._children_name_map["dhcpv6"] = "Cisco-IOS-XR-ipv6-new-dhcpv6d-subscriber-cfg:dhcpv6"
self._segment_path = lambda: "ip-subscriber" + "[template-name='" + str(self.template_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-subscriber-infra-tmplmgr-cfg:dynamic-template/ip-subscribers/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber, ['template_name', 'vrf'], name, value)
class Ipv4Network(_Entity_):
"""
Interface IPv4 Network configuration data
.. attribute:: unnumbered
Enable IP processing without an explicit address
**type**\: str
.. attribute:: mtu
The IP Maximum Transmission Unit
**type**\: int
**range:** 68..65535
**units**\: byte
.. attribute:: unreachables
TRUE if enabled, FALSE if disabled
**type**\: bool
**default value**\: false
.. attribute:: rpf
TRUE if enabled, FALSE if disabled
**type**\: bool
**default value**\: true
"""
_prefix = 'ipv4-ma-subscriber-cfg'
_revision = '2015-07-30'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4Network, self).__init__()
self.yang_name = "ipv4-network"
self.yang_parent_name = "ip-subscriber"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unnumbered', (YLeaf(YType.str, 'unnumbered'), ['str'])),
('mtu', (YLeaf(YType.uint32, 'mtu'), ['int'])),
('unreachables', (YLeaf(YType.boolean, 'unreachables'), ['bool'])),
('rpf', (YLeaf(YType.boolean, 'rpf'), ['bool'])),
])
self.unnumbered = None
self.mtu = None
self.unreachables = None
self.rpf = None
self._segment_path = lambda: "Cisco-IOS-XR-ipv4-ma-subscriber-cfg:ipv4-network"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4Network, ['unnumbered', 'mtu', 'unreachables', 'rpf'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4Network']['meta_info']
class Accounting(_Entity_):
"""
Subscriber accounting dynamic\-template commands
.. attribute:: service_accounting
Subscriber accounting service accounting
**type**\: :py:class:`ServiceAccounting <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.ServiceAccounting>`
.. attribute:: session
Subscriber accounting session accounting
**type**\: :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.Session>`
.. attribute:: idle_timeout
Subscriber accounting idle timeout
**type**\: :py:class:`IdleTimeout <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.IdleTimeout>`
.. attribute:: monitor_feature
Subscriber monitor feature
**type**\: str
.. attribute:: prepaid_feature
Subscriber accounting prepaid feature
**type**\: str
"""
_prefix = 'subscriber-accounting-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Accounting, self).__init__()
self.yang_name = "accounting"
self.yang_parent_name = "ip-subscriber"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("service-accounting", ("service_accounting", DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.ServiceAccounting)), ("session", ("session", DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.Session)), ("idle-timeout", ("idle_timeout", DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.IdleTimeout))])
self._leafs = OrderedDict([
('monitor_feature', (YLeaf(YType.str, 'monitor-feature'), ['str'])),
('prepaid_feature', (YLeaf(YType.str, 'prepaid-feature'), ['str'])),
])
self.monitor_feature = None
self.prepaid_feature = None
self.service_accounting = DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.ServiceAccounting()
self.service_accounting.parent = self
self._children_name_map["service_accounting"] = "service-accounting"
self.session = DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.Session()
self.session.parent = self
self._children_name_map["session"] = "session"
self.idle_timeout = DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.IdleTimeout()
self.idle_timeout.parent = self
self._children_name_map["idle_timeout"] = "idle-timeout"
self._segment_path = lambda: "Cisco-IOS-XR-subscriber-accounting-cfg:accounting"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Accounting, ['monitor_feature', 'prepaid_feature'], name, value)
class ServiceAccounting(_Entity_):
"""
Subscriber accounting service accounting
.. attribute:: method_list_name
Service accounting method list name
**type**\: str
.. attribute:: accounting_interim_interval
Accounting interim interval in minutes
**type**\: int
**range:** 0..4294967295
**units**\: minute
"""
_prefix = 'subscriber-accounting-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.ServiceAccounting, self).__init__()
self.yang_name = "service-accounting"
self.yang_parent_name = "accounting"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('method_list_name', (YLeaf(YType.str, 'method-list-name'), ['str'])),
('accounting_interim_interval', (YLeaf(YType.uint32, 'accounting-interim-interval'), ['int'])),
])
self.method_list_name = None
self.accounting_interim_interval = None
self._segment_path = lambda: "service-accounting"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.ServiceAccounting, ['method_list_name', 'accounting_interim_interval'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.ServiceAccounting']['meta_info']
class Session(_Entity_):
"""
Subscriber accounting session accounting
.. attribute:: method_list_name
Session accounting method list name
**type**\: str
.. attribute:: periodic_interval
Interim accounting interval in minutes
**type**\: int
**range:** 0..4294967295
**units**\: minute
.. attribute:: dual_stack_delay
Dual stack wait delay in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: hold_acct_start
Hold Accounting start based on IA\_PD
**type**\: :py:class:`HoldAcctStart <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.Session.HoldAcctStart>`
"""
_prefix = 'subscriber-accounting-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.Session, self).__init__()
self.yang_name = "session"
self.yang_parent_name = "accounting"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('method_list_name', (YLeaf(YType.str, 'method-list-name'), ['str'])),
('periodic_interval', (YLeaf(YType.uint32, 'periodic-interval'), ['int'])),
('dual_stack_delay', (YLeaf(YType.uint32, 'dual-stack-delay'), ['int'])),
('hold_acct_start', (YLeaf(YType.enumeration, 'hold-acct-start'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg', 'DynamicTemplate', 'IpSubscribers.IpSubscriber.Accounting.Session.HoldAcctStart')])),
])
self.method_list_name = None
self.periodic_interval = None
self.dual_stack_delay = None
self.hold_acct_start = None
self._segment_path = lambda: "session"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.Session, ['method_list_name', 'periodic_interval', 'dual_stack_delay', 'hold_acct_start'], name, value)
class HoldAcctStart(Enum):
"""
HoldAcctStart (Enum Class)
Hold Accounting start based on IA\_PD
.. data:: ipv6_prefix_delegation = 3
Based on ipv6 delegated prefix
"""
ipv6_prefix_delegation = Enum.YLeaf(3, "ipv6-prefix-delegation")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.Session.HoldAcctStart']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.Session']['meta_info']
class IdleTimeout(_Entity_):
"""
Subscriber accounting idle timeout
.. attribute:: timeout_value
Idle timeout value in seconds
**type**\: int
**range:** 60..4320000
**units**\: second
.. attribute:: threshold
Threshold in minute(s) per packet
**type**\: int
**range:** 1..10000
.. attribute:: direction
Idle timeout traffic direction
**type**\: str
"""
_prefix = 'subscriber-accounting-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.IdleTimeout, self).__init__()
self.yang_name = "idle-timeout"
self.yang_parent_name = "accounting"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('timeout_value', (YLeaf(YType.uint32, 'timeout-value'), ['int'])),
('threshold', (YLeaf(YType.uint32, 'threshold'), ['int'])),
('direction', (YLeaf(YType.str, 'direction'), ['str'])),
])
self.timeout_value = None
self.threshold = None
self.direction = None
self._segment_path = lambda: "idle-timeout"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.IdleTimeout, ['timeout_value', 'threshold', 'direction'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Accounting.IdleTimeout']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Accounting']['meta_info']
class Qos(_Entity_):
"""
QoS dynamically applied configuration template
.. attribute:: service_policy
Service policy to be applied in ingress/egress direction
**type**\: :py:class:`ServicePolicy <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Qos.ServicePolicy>`
.. attribute:: account
QoS L2 overhead accounting
**type**\: :py:class:`Account <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Qos.Account>`
.. attribute:: output
QoS to be applied in egress direction
**type**\: :py:class:`Output <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Qos.Output>`
"""
_prefix = 'qos-ma-bng-cfg'
_revision = '2016-04-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Qos, self).__init__()
self.yang_name = "qos"
self.yang_parent_name = "ip-subscriber"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("service-policy", ("service_policy", DynamicTemplate.IpSubscribers.IpSubscriber.Qos.ServicePolicy)), ("account", ("account", DynamicTemplate.IpSubscribers.IpSubscriber.Qos.Account)), ("output", ("output", DynamicTemplate.IpSubscribers.IpSubscriber.Qos.Output))])
self._leafs = OrderedDict()
self.service_policy = DynamicTemplate.IpSubscribers.IpSubscriber.Qos.ServicePolicy()
self.service_policy.parent = self
self._children_name_map["service_policy"] = "service-policy"
self.account = DynamicTemplate.IpSubscribers.IpSubscriber.Qos.Account()
self.account.parent = self
self._children_name_map["account"] = "account"
self.output = DynamicTemplate.IpSubscribers.IpSubscriber.Qos.Output()
self.output.parent = self
self._children_name_map["output"] = "output"
self._segment_path = lambda: "Cisco-IOS-XR-qos-ma-bng-cfg:qos"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Qos, [], name, value)
class ServicePolicy(_Entity_):
"""
Service policy to be applied in ingress/egress
direction
.. attribute:: input
Subscriber ingress policy
**type**\: :py:class:`Input <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Qos.ServicePolicy.Input>`
**presence node**\: True
.. attribute:: output
Subscriber egress policy
**type**\: :py:class:`Output <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Qos.ServicePolicy.Output>`
**presence node**\: True
"""
_prefix = 'qos-ma-bng-cfg'
_revision = '2016-04-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Qos.ServicePolicy, self).__init__()
self.yang_name = "service-policy"
self.yang_parent_name = "qos"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("input", ("input", DynamicTemplate.IpSubscribers.IpSubscriber.Qos.ServicePolicy.Input)), ("output", ("output", DynamicTemplate.IpSubscribers.IpSubscriber.Qos.ServicePolicy.Output))])
self._leafs = OrderedDict()
self.input = None
self._children_name_map["input"] = "input"
self.output = None
self._children_name_map["output"] = "output"
self._segment_path = lambda: "service-policy"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Qos.ServicePolicy, [], name, value)
class Input(_Entity_):
"""
Subscriber ingress policy
.. attribute:: policy_name
Name of policy\-map
**type**\: str
**mandatory**\: True
.. attribute:: spi_name
Name of the SPI
**type**\: str
.. attribute:: merge
TRUE for merge enabled for service\-policy applied on dynamic template
**type**\: bool
.. attribute:: merge_id
Merge ID value
**type**\: int
**range:** 0..255
.. attribute:: account_stats
TRUE for account stats enabled for service\-policy applied on dynamic template. Note\: account stats not supported for subscriber type 'ppp' and 'ipsubscriber'
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'qos-ma-bng-cfg'
_revision = '2016-04-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Qos.ServicePolicy.Input, self).__init__()
self.yang_name = "input"
self.yang_parent_name = "service-policy"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('policy_name', (YLeaf(YType.str, 'policy-name'), ['str'])),
('spi_name', (YLeaf(YType.str, 'spi-name'), ['str'])),
('merge', (YLeaf(YType.boolean, 'merge'), ['bool'])),
('merge_id', (YLeaf(YType.uint32, 'merge-id'), ['int'])),
('account_stats', (YLeaf(YType.boolean, 'account-stats'), ['bool'])),
])
self.policy_name = None
self.spi_name = None
self.merge = None
self.merge_id = None
self.account_stats = None
self._segment_path = lambda: "input"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Qos.ServicePolicy.Input, ['policy_name', 'spi_name', 'merge', 'merge_id', 'account_stats'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Qos.ServicePolicy.Input']['meta_info']
class Output(_Entity_):
"""
Subscriber egress policy
.. attribute:: policy_name
Name of policy\-map
**type**\: str
**mandatory**\: True
.. attribute:: spi_name
Name of the SPI
**type**\: str
.. attribute:: merge
TRUE for merge enabled for service\-policy applied on dynamic template
**type**\: bool
.. attribute:: merge_id
Merge ID value
**type**\: int
**range:** 0..255
.. attribute:: account_stats
TRUE for account stats enabled for service\-policy applied on dynamic template. Note\: account stats not supported for subscriber type 'ppp' and 'ipsubscriber'
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'qos-ma-bng-cfg'
_revision = '2016-04-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Qos.ServicePolicy.Output, self).__init__()
self.yang_name = "output"
self.yang_parent_name = "service-policy"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('policy_name', (YLeaf(YType.str, 'policy-name'), ['str'])),
('spi_name', (YLeaf(YType.str, 'spi-name'), ['str'])),
('merge', (YLeaf(YType.boolean, 'merge'), ['bool'])),
('merge_id', (YLeaf(YType.uint32, 'merge-id'), ['int'])),
('account_stats', (YLeaf(YType.boolean, 'account-stats'), ['bool'])),
])
self.policy_name = None
self.spi_name = None
self.merge = None
self.merge_id = None
self.account_stats = None
self._segment_path = lambda: "output"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Qos.ServicePolicy.Output, ['policy_name', 'spi_name', 'merge', 'merge_id', 'account_stats'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Qos.ServicePolicy.Output']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Qos.ServicePolicy']['meta_info']
class Account(_Entity_):
"""
QoS L2 overhead accounting
.. attribute:: aal
ATM adaptation layer AAL
**type**\: :py:class:`Qosl2DataLink <ydk.models.cisco_ios_xr.Cisco_IOS_XR_qos_ma_bng_cfg.Qosl2DataLink>`
.. attribute:: encapsulation
Specify encapsulation type
**type**\: :py:class:`Qosl2Encap <ydk.models.cisco_ios_xr.Cisco_IOS_XR_qos_ma_bng_cfg.Qosl2Encap>`
.. attribute:: atm_cell_tax
ATM cell tax to L2 overhead
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: user_defined
Numeric L2 overhead offset
**type**\: int
**range:** \-63..63
"""
_prefix = 'qos-ma-bng-cfg'
_revision = '2016-04-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Qos.Account, self).__init__()
self.yang_name = "account"
self.yang_parent_name = "qos"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('aal', (YLeaf(YType.enumeration, 'aal'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_qos_ma_bng_cfg', 'Qosl2DataLink', '')])),
('encapsulation', (YLeaf(YType.enumeration, 'encapsulation'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_qos_ma_bng_cfg', 'Qosl2Encap', '')])),
('atm_cell_tax', (YLeaf(YType.empty, 'atm-cell-tax'), ['Empty'])),
('user_defined', (YLeaf(YType.int32, 'user-defined'), ['int'])),
])
self.aal = None
self.encapsulation = None
self.atm_cell_tax = None
self.user_defined = None
self._segment_path = lambda: "account"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Qos.Account, ['aal', 'encapsulation', 'atm_cell_tax', 'user_defined'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Qos.Account']['meta_info']
class Output(_Entity_):
"""
QoS to be applied in egress direction
.. attribute:: minimum_bandwidth
Minimum bandwidth value for the subscriber (in kbps)
**type**\: int
**range:** 1..4294967295
**units**\: kbit/s
"""
_prefix = 'qos-ma-bng-cfg'
_revision = '2016-04-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Qos.Output, self).__init__()
self.yang_name = "output"
self.yang_parent_name = "qos"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('minimum_bandwidth', (YLeaf(YType.uint32, 'minimum-bandwidth'), ['int'])),
])
self.minimum_bandwidth = None
self._segment_path = lambda: "output"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Qos.Output, ['minimum_bandwidth'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Qos.Output']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Qos']['meta_info']
class Igmp(_Entity_):
"""
IGMPconfiguration
.. attribute:: default_vrf
Default VRF
**type**\: :py:class:`DefaultVrf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Igmp.DefaultVrf>`
"""
_prefix = 'ipv4-igmp-dyn-tmpl-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Igmp, self).__init__()
self.yang_name = "igmp"
self.yang_parent_name = "ip-subscriber"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("default-vrf", ("default_vrf", DynamicTemplate.IpSubscribers.IpSubscriber.Igmp.DefaultVrf))])
self._leafs = OrderedDict()
self.default_vrf = DynamicTemplate.IpSubscribers.IpSubscriber.Igmp.DefaultVrf()
self.default_vrf.parent = self
self._children_name_map["default_vrf"] = "default-vrf"
self._segment_path = lambda: "Cisco-IOS-XR-ipv4-igmp-dyn-tmpl-cfg:igmp"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Igmp, [], name, value)
class DefaultVrf(_Entity_):
"""
Default VRF
.. attribute:: explicit_tracking
IGMPv3 explicit host tracking
**type**\: :py:class:`ExplicitTracking <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Igmp.DefaultVrf.ExplicitTracking>`
**presence node**\: True
.. attribute:: max_groups
IGMP Max Groups
**type**\: int
**range:** 1..40000
**default value**\: 25000
.. attribute:: access_group
Access list specifying access\-list group range
**type**\: str
**length:** 1..64
.. attribute:: version
IGMP Version
**type**\: int
**range:** 1..3
**default value**\: 3
.. attribute:: query_interval
Query interval in seconds
**type**\: int
**range:** 1..3600
**units**\: second
**default value**\: 60
.. attribute:: query_max_response_time
Query response value in seconds
**type**\: int
**range:** 1..25
**units**\: second
**default value**\: 10
.. attribute:: multicast_mode
Configure Multicast mode variable
**type**\: :py:class:`DynTmplMulticastMode <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_igmp_dyn_tmpl_cfg.DynTmplMulticastMode>`
"""
_prefix = 'ipv4-igmp-dyn-tmpl-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Igmp.DefaultVrf, self).__init__()
self.yang_name = "default-vrf"
self.yang_parent_name = "igmp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("explicit-tracking", ("explicit_tracking", DynamicTemplate.IpSubscribers.IpSubscriber.Igmp.DefaultVrf.ExplicitTracking))])
self._leafs = OrderedDict([
('max_groups', (YLeaf(YType.uint32, 'max-groups'), ['int'])),
('access_group', (YLeaf(YType.str, 'access-group'), ['str'])),
('version', (YLeaf(YType.uint32, 'version'), ['int'])),
('query_interval', (YLeaf(YType.uint32, 'query-interval'), ['int'])),
('query_max_response_time', (YLeaf(YType.uint32, 'query-max-response-time'), ['int'])),
('multicast_mode', (YLeaf(YType.enumeration, 'multicast-mode'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_igmp_dyn_tmpl_cfg', 'DynTmplMulticastMode', '')])),
])
self.max_groups = None
self.access_group = None
self.version = None
self.query_interval = None
self.query_max_response_time = None
self.multicast_mode = None
self.explicit_tracking = None
self._children_name_map["explicit_tracking"] = "explicit-tracking"
self._segment_path = lambda: "default-vrf"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Igmp.DefaultVrf, ['max_groups', 'access_group', 'version', 'query_interval', 'query_max_response_time', 'multicast_mode'], name, value)
class ExplicitTracking(_Entity_):
"""
IGMPv3 explicit host tracking
.. attribute:: enable
Enable or disable, when value is TRUE or FALSE respectively
**type**\: bool
**mandatory**\: True
.. attribute:: access_list_name
Access list specifying tracking group range
**type**\: str
**length:** 1..64
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ipv4-igmp-dyn-tmpl-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Igmp.DefaultVrf.ExplicitTracking, self).__init__()
self.yang_name = "explicit-tracking"
self.yang_parent_name = "default-vrf"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('enable', (YLeaf(YType.boolean, 'enable'), ['bool'])),
('access_list_name', (YLeaf(YType.str, 'access-list-name'), ['str'])),
])
self.enable = None
self.access_list_name = None
self._segment_path = lambda: "explicit-tracking"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Igmp.DefaultVrf.ExplicitTracking, ['enable', 'access_list_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Igmp.DefaultVrf.ExplicitTracking']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Igmp.DefaultVrf']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Igmp']['meta_info']
class Ipv6Network(_Entity_):
"""
Interface IPv6 Network configuration data
.. attribute:: addresses
Set the IPv6 address of an interface
**type**\: :py:class:`Addresses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Network.Addresses>`
.. attribute:: mtu
MTU Setting of Interface
**type**\: int
**range:** 1280..65535
**units**\: byte
.. attribute:: rpf
TRUE if enabled, FALSE if disabled
**type**\: bool
.. attribute:: unreachables
Override Sending of ICMP Unreachable Messages
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ipv6-ma-subscriber-cfg'
_revision = '2017-01-11'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Network, self).__init__()
self.yang_name = "ipv6-network"
self.yang_parent_name = "ip-subscriber"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("addresses", ("addresses", DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Network.Addresses))])
self._leafs = OrderedDict([
('mtu', (YLeaf(YType.uint32, 'mtu'), ['int'])),
('rpf', (YLeaf(YType.boolean, 'rpf'), ['bool'])),
('unreachables', (YLeaf(YType.empty, 'unreachables'), ['Empty'])),
])
self.mtu = None
self.rpf = None
self.unreachables = None
self.addresses = DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Network.Addresses()
self.addresses.parent = self
self._children_name_map["addresses"] = "addresses"
self._segment_path = lambda: "Cisco-IOS-XR-ipv6-ma-subscriber-cfg:ipv6-network"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Network, ['mtu', 'rpf', 'unreachables'], name, value)
class Addresses(_Entity_):
"""
Set the IPv6 address of an interface
.. attribute:: auto_configuration
Auto IPv6 Interface Configuration
**type**\: :py:class:`AutoConfiguration <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Network.Addresses.AutoConfiguration>`
"""
_prefix = 'ipv6-ma-subscriber-cfg'
_revision = '2017-01-11'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Network.Addresses, self).__init__()
self.yang_name = "addresses"
self.yang_parent_name = "ipv6-network"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("auto-configuration", ("auto_configuration", DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Network.Addresses.AutoConfiguration))])
self._leafs = OrderedDict()
self.auto_configuration = DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Network.Addresses.AutoConfiguration()
self.auto_configuration.parent = self
self._children_name_map["auto_configuration"] = "auto-configuration"
self._segment_path = lambda: "addresses"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Network.Addresses, [], name, value)
class AutoConfiguration(_Entity_):
"""
Auto IPv6 Interface Configuration
.. attribute:: enable
The flag to enable auto ipv6 interface configuration
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ipv6-ma-subscriber-cfg'
_revision = '2017-01-11'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Network.Addresses.AutoConfiguration, self).__init__()
self.yang_name = "auto-configuration"
self.yang_parent_name = "addresses"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('enable', (YLeaf(YType.empty, 'enable'), ['Empty'])),
])
self.enable = None
self._segment_path = lambda: "auto-configuration"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Network.Addresses.AutoConfiguration, ['enable'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Network.Addresses.AutoConfiguration']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Network.Addresses']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Network']['meta_info']
class SpanMonitorSessions(_Entity_):
"""
Monitor Session container for this template
.. attribute:: span_monitor_session
Configuration for a particular class of Monitor Session
**type**\: list of :py:class:`SpanMonitorSession <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions.SpanMonitorSession>`
"""
_prefix = 'ethernet-span-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions, self).__init__()
self.yang_name = "span-monitor-sessions"
self.yang_parent_name = "ip-subscriber"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("span-monitor-session", ("span_monitor_session", DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions.SpanMonitorSession))])
self._leafs = OrderedDict()
self.span_monitor_session = YList(self)
self._segment_path = lambda: "Cisco-IOS-XR-Ethernet-SPAN-subscriber-cfg:span-monitor-sessions"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions, [], name, value)
class SpanMonitorSession(_Entity_):
"""
Configuration for a particular class of Monitor
Session
.. attribute:: session_class (key)
Session Class
**type**\: :py:class:`SpanSessionClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_datatypes.SpanSessionClass>`
.. attribute:: mirror_first
Mirror a specified number of bytes from start of packet
**type**\: int
**range:** 1..10000
**units**\: byte
.. attribute:: attachment
Attach the interface to a Monitor Session
**type**\: :py:class:`Attachment <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions.SpanMonitorSession.Attachment>`
**presence node**\: True
.. attribute:: mirror_interval
Specify the mirror interval
**type**\: :py:class:`SpanMirrorInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_subscriber_cfg.SpanMirrorInterval>`
.. attribute:: acl
Enable ACL matching for traffic mirroring
**type**\: :py:class:`Acl <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions.SpanMonitorSession.Acl>`
**presence node**\: True
"""
_prefix = 'ethernet-span-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions.SpanMonitorSession, self).__init__()
self.yang_name = "span-monitor-session"
self.yang_parent_name = "span-monitor-sessions"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['session_class']
self._child_classes = OrderedDict([("attachment", ("attachment", DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions.SpanMonitorSession.Attachment)), ("acl", ("acl", DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions.SpanMonitorSession.Acl))])
self._leafs = OrderedDict([
('session_class', (YLeaf(YType.enumeration, 'session-class'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_datatypes', 'SpanSessionClass', '')])),
('mirror_first', (YLeaf(YType.uint32, 'mirror-first'), ['int'])),
('mirror_interval', (YLeaf(YType.enumeration, 'mirror-interval'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_subscriber_cfg', 'SpanMirrorInterval', '')])),
])
self.session_class = None
self.mirror_first = None
self.mirror_interval = None
self.attachment = None
self._children_name_map["attachment"] = "attachment"
self.acl = None
self._children_name_map["acl"] = "acl"
self._segment_path = lambda: "span-monitor-session" + "[session-class='" + str(self.session_class) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions.SpanMonitorSession, ['session_class', 'mirror_first', 'mirror_interval'], name, value)
class Attachment(_Entity_):
"""
Attach the interface to a Monitor Session
.. attribute:: session_name
Session Name
**type**\: str
**length:** 1..79
**mandatory**\: True
.. attribute:: direction
Specify the direction of traffic to replicate (optional)
**type**\: :py:class:`SpanTrafficDirection <ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_subscriber_cfg.SpanTrafficDirection>`
.. attribute:: port_level_enable
Enable port level traffic mirroring
**type**\: :py:class:`Empty<ydk.types.Empty>`
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ethernet-span-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions.SpanMonitorSession.Attachment, self).__init__()
self.yang_name = "attachment"
self.yang_parent_name = "span-monitor-session"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('session_name', (YLeaf(YType.str, 'session-name'), ['str'])),
('direction', (YLeaf(YType.enumeration, 'direction'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_subscriber_cfg', 'SpanTrafficDirection', '')])),
('port_level_enable', (YLeaf(YType.empty, 'port-level-enable'), ['Empty'])),
])
self.session_name = None
self.direction = None
self.port_level_enable = None
self._segment_path = lambda: "attachment"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions.SpanMonitorSession.Attachment, ['session_name', 'direction', 'port_level_enable'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions.SpanMonitorSession.Attachment']['meta_info']
class Acl(_Entity_):
"""
Enable ACL matching for traffic mirroring
.. attribute:: acl_enable
Enable ACL
**type**\: :py:class:`Empty<ydk.types.Empty>`
**mandatory**\: True
.. attribute:: acl_name
ACL Name
**type**\: str
**length:** 1..80
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ethernet-span-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions.SpanMonitorSession.Acl, self).__init__()
self.yang_name = "acl"
self.yang_parent_name = "span-monitor-session"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('acl_enable', (YLeaf(YType.empty, 'acl-enable'), ['Empty'])),
('acl_name', (YLeaf(YType.str, 'acl-name'), ['str'])),
])
self.acl_enable = None
self.acl_name = None
self._segment_path = lambda: "acl"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions.SpanMonitorSession.Acl, ['acl_enable', 'acl_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions.SpanMonitorSession.Acl']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions.SpanMonitorSession']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.SpanMonitorSessions']['meta_info']
class Ipv6Neighbor(_Entity_):
"""
Interface IPv6 Network configuration data
.. attribute:: ra_interval
Set IPv6 Router Advertisement (RA) interval in seconds
**type**\: :py:class:`RaInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.RaInterval>`
**presence node**\: True
.. attribute:: framed_prefix
Set the IPv6 framed ipv6 prefix for a subscriber interface
**type**\: :py:class:`FramedPrefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.FramedPrefix>`
**presence node**\: True
.. attribute:: duplicate_address_detection
Duplicate Address Detection (DAD)
**type**\: :py:class:`DuplicateAddressDetection <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.DuplicateAddressDetection>`
.. attribute:: ra_initial
IPv6 ND RA Initial
**type**\: :py:class:`RaInitial <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.RaInitial>`
**presence node**\: True
.. attribute:: framed_prefix_pool
Set the IPv6 framed ipv6 prefix pool for a subscriber interface
**type**\: str
.. attribute:: managed_config
Host to use stateful protocol for address configuration
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: other_config
Host to use stateful protocol for non\-address configuration
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: start_ra_on_ipv6_enable
Start RA on ipv6\-enable config
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: nud_enable
NUD enable
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: ra_lifetime
Set IPv6 Router Advertisement (RA) lifetime in seconds
**type**\: int
**range:** 0..9000
**units**\: second
.. attribute:: router_preference
RA Router Preference
**type**\: :py:class:`Ipv6NdRouterPrefTemplate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_subscriber_cfg.Ipv6NdRouterPrefTemplate>`
.. attribute:: ra_suppress
Enable suppress IPv6 router advertisement
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: ra_unicast
Enable RA unicast Flag
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: ra_unspecify_hoplimit
Unspecify IPv6 Router Advertisement (RA) hop\-limit
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: ra_suppress_mtu
RA suppress MTU flag
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: suppress_cache_learning
Suppress cache learning flag
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: reachable_time
Set advertised reachability time in milliseconds
**type**\: int
**range:** 0..3600000
**units**\: millisecond
.. attribute:: ns_interval
Set advertised NS retransmission interval in milliseconds
**type**\: int
**range:** 1000..4294967295
**units**\: millisecond
"""
_prefix = 'ipv6-nd-subscriber-cfg'
_revision = '2016-12-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor, self).__init__()
self.yang_name = "ipv6-neighbor"
self.yang_parent_name = "ip-subscriber"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("ra-interval", ("ra_interval", DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.RaInterval)), ("framed-prefix", ("framed_prefix", DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.FramedPrefix)), ("duplicate-address-detection", ("duplicate_address_detection", DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.DuplicateAddressDetection)), ("ra-initial", ("ra_initial", DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.RaInitial))])
self._leafs = OrderedDict([
('framed_prefix_pool', (YLeaf(YType.str, 'framed-prefix-pool'), ['str'])),
('managed_config', (YLeaf(YType.empty, 'managed-config'), ['Empty'])),
('other_config', (YLeaf(YType.empty, 'other-config'), ['Empty'])),
('start_ra_on_ipv6_enable', (YLeaf(YType.empty, 'start-ra-on-ipv6-enable'), ['Empty'])),
('nud_enable', (YLeaf(YType.empty, 'nud-enable'), ['Empty'])),
('ra_lifetime', (YLeaf(YType.uint32, 'ra-lifetime'), ['int'])),
('router_preference', (YLeaf(YType.enumeration, 'router-preference'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_subscriber_cfg', 'Ipv6NdRouterPrefTemplate', '')])),
('ra_suppress', (YLeaf(YType.empty, 'ra-suppress'), ['Empty'])),
('ra_unicast', (YLeaf(YType.empty, 'ra-unicast'), ['Empty'])),
('ra_unspecify_hoplimit', (YLeaf(YType.empty, 'ra-unspecify-hoplimit'), ['Empty'])),
('ra_suppress_mtu', (YLeaf(YType.empty, 'ra-suppress-mtu'), ['Empty'])),
('suppress_cache_learning', (YLeaf(YType.empty, 'suppress-cache-learning'), ['Empty'])),
('reachable_time', (YLeaf(YType.uint32, 'reachable-time'), ['int'])),
('ns_interval', (YLeaf(YType.uint32, 'ns-interval'), ['int'])),
])
self.framed_prefix_pool = None
self.managed_config = None
self.other_config = None
self.start_ra_on_ipv6_enable = None
self.nud_enable = None
self.ra_lifetime = None
self.router_preference = None
self.ra_suppress = None
self.ra_unicast = None
self.ra_unspecify_hoplimit = None
self.ra_suppress_mtu = None
self.suppress_cache_learning = None
self.reachable_time = None
self.ns_interval = None
self.ra_interval = None
self._children_name_map["ra_interval"] = "ra-interval"
self.framed_prefix = None
self._children_name_map["framed_prefix"] = "framed-prefix"
self.duplicate_address_detection = DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.DuplicateAddressDetection()
self.duplicate_address_detection.parent = self
self._children_name_map["duplicate_address_detection"] = "duplicate-address-detection"
self.ra_initial = None
self._children_name_map["ra_initial"] = "ra-initial"
self._segment_path = lambda: "Cisco-IOS-XR-ipv6-nd-subscriber-cfg:ipv6-neighbor"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor, ['framed_prefix_pool', 'managed_config', 'other_config', 'start_ra_on_ipv6_enable', 'nud_enable', 'ra_lifetime', 'router_preference', 'ra_suppress', 'ra_unicast', 'ra_unspecify_hoplimit', 'ra_suppress_mtu', 'suppress_cache_learning', 'reachable_time', 'ns_interval'], name, value)
class RaInterval(_Entity_):
"""
Set IPv6 Router Advertisement (RA) interval in
seconds
.. attribute:: maximum
Maximum RA interval in seconds
**type**\: int
**range:** 4..1800
**mandatory**\: True
**units**\: second
.. attribute:: minimum
Minimum RA interval in seconds. Must be less than 0.75 \* maximum interval
**type**\: int
**range:** 3..1800
**units**\: second
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ipv6-nd-subscriber-cfg'
_revision = '2016-12-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.RaInterval, self).__init__()
self.yang_name = "ra-interval"
self.yang_parent_name = "ipv6-neighbor"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('maximum', (YLeaf(YType.uint32, 'maximum'), ['int'])),
('minimum', (YLeaf(YType.uint32, 'minimum'), ['int'])),
])
self.maximum = None
self.minimum = None
self._segment_path = lambda: "ra-interval"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.RaInterval, ['maximum', 'minimum'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.RaInterval']['meta_info']
class FramedPrefix(_Entity_):
"""
Set the IPv6 framed ipv6 prefix for a
subscriber interface
.. attribute:: prefix_length
IPv6 framed prefix length
**type**\: int
**range:** 0..128
**mandatory**\: True
.. attribute:: prefix
IPV6 framed prefix address
**type**\: str
**mandatory**\: True
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ipv6-nd-subscriber-cfg'
_revision = '2016-12-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.FramedPrefix, self).__init__()
self.yang_name = "framed-prefix"
self.yang_parent_name = "ipv6-neighbor"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('prefix_length', (YLeaf(YType.uint8, 'prefix-length'), ['int'])),
('prefix', (YLeaf(YType.str, 'prefix'), ['str'])),
])
self.prefix_length = None
self.prefix = None
self._segment_path = lambda: "framed-prefix"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.FramedPrefix, ['prefix_length', 'prefix'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.FramedPrefix']['meta_info']
class DuplicateAddressDetection(_Entity_):
"""
Duplicate Address Detection (DAD)
.. attribute:: attempts
Set IPv6 duplicate address detection transmits
**type**\: int
**range:** 0..600
"""
_prefix = 'ipv6-nd-subscriber-cfg'
_revision = '2016-12-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.DuplicateAddressDetection, self).__init__()
self.yang_name = "duplicate-address-detection"
self.yang_parent_name = "ipv6-neighbor"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('attempts', (YLeaf(YType.uint32, 'attempts'), ['int'])),
])
self.attempts = None
self._segment_path = lambda: "duplicate-address-detection"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.DuplicateAddressDetection, ['attempts'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.DuplicateAddressDetection']['meta_info']
class RaInitial(_Entity_):
"""
IPv6 ND RA Initial
.. attribute:: count
Initial RA count
**type**\: int
**range:** 0..32
**mandatory**\: True
.. attribute:: interval
Initial RA interval in seconds
**type**\: int
**range:** 4..1800
**mandatory**\: True
**units**\: second
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ipv6-nd-subscriber-cfg'
_revision = '2016-12-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.RaInitial, self).__init__()
self.yang_name = "ra-initial"
self.yang_parent_name = "ipv6-neighbor"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('count', (YLeaf(YType.uint32, 'count'), ['int'])),
('interval', (YLeaf(YType.uint32, 'interval'), ['int'])),
])
self.count = None
self.interval = None
self._segment_path = lambda: "ra-initial"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.RaInitial, ['count', 'interval'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor.RaInitial']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6Neighbor']['meta_info']
class Pbr(_Entity_):
"""
Dynamic Template PBR configuration
.. attribute:: service_policies
Ingress service policy
**type**\: :py:class:`ServicePolicies <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Pbr.ServicePolicies>`
.. attribute:: service_policy_in
Class for subscriber ingress policy
**type**\: str
"""
_prefix = 'pbr-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Pbr, self).__init__()
self.yang_name = "pbr"
self.yang_parent_name = "ip-subscriber"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("service-policies", ("service_policies", DynamicTemplate.IpSubscribers.IpSubscriber.Pbr.ServicePolicies))])
self._leafs = OrderedDict([
('service_policy_in', (YLeaf(YType.str, 'service-policy-in'), ['str'])),
])
self.service_policy_in = None
self.service_policies = DynamicTemplate.IpSubscribers.IpSubscriber.Pbr.ServicePolicies()
self.service_policies.parent = self
self._children_name_map["service_policies"] = "service-policies"
self._segment_path = lambda: "Cisco-IOS-XR-pbr-subscriber-cfg:pbr"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Pbr, ['service_policy_in'], name, value)
class ServicePolicies(_Entity_):
"""
Ingress service policy
.. attribute:: service_policy
Service policy details
**type**\: list of :py:class:`ServicePolicy <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Pbr.ServicePolicies.ServicePolicy>`
"""
_prefix = 'pbr-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Pbr.ServicePolicies, self).__init__()
self.yang_name = "service-policies"
self.yang_parent_name = "pbr"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("service-policy", ("service_policy", DynamicTemplate.IpSubscribers.IpSubscriber.Pbr.ServicePolicies.ServicePolicy))])
self._leafs = OrderedDict()
self.service_policy = YList(self)
self._segment_path = lambda: "service-policies"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Pbr.ServicePolicies, [], name, value)
class ServicePolicy(_Entity_):
"""
Service policy details
.. attribute:: service_policy (key)
Name of policy\-map
**type**\: str
**length:** 1..64
"""
_prefix = 'pbr-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Pbr.ServicePolicies.ServicePolicy, self).__init__()
self.yang_name = "service-policy"
self.yang_parent_name = "service-policies"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['service_policy']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('service_policy', (YLeaf(YType.str, 'service-policy'), ['str'])),
])
self.service_policy = None
self._segment_path = lambda: "service-policy" + "[service-policy='" + str(self.service_policy) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Pbr.ServicePolicies.ServicePolicy, ['service_policy'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Pbr.ServicePolicies.ServicePolicy']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Pbr.ServicePolicies']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Pbr']['meta_info']
class Ipv4PacketFilter(_Entity_):
"""
IPv4 Packet Filtering configuration for the
template
.. attribute:: outbound
IPv4 Packet filter to be applied to outbound packets
**type**\: :py:class:`Outbound <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4PacketFilter.Outbound>`
.. attribute:: inbound
IPv4 Packet filter to be applied to inbound packets
**type**\: :py:class:`Inbound <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4PacketFilter.Inbound>`
"""
_prefix = 'ip-pfilter-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4PacketFilter, self).__init__()
self.yang_name = "ipv4-packet-filter"
self.yang_parent_name = "ip-subscriber"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("outbound", ("outbound", DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4PacketFilter.Outbound)), ("inbound", ("inbound", DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4PacketFilter.Inbound))])
self._leafs = OrderedDict()
self.outbound = DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4PacketFilter.Outbound()
self.outbound.parent = self
self._children_name_map["outbound"] = "outbound"
self.inbound = DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4PacketFilter.Inbound()
self.inbound.parent = self
self._children_name_map["inbound"] = "inbound"
self._segment_path = lambda: "Cisco-IOS-XR-ip-pfilter-subscriber-cfg:ipv4-packet-filter"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4PacketFilter, [], name, value)
class Outbound(_Entity_):
"""
IPv4 Packet filter to be applied to outbound
packets
.. attribute:: common_acl_name
Not supported (Leave unspecified)
**type**\: str
**length:** 1..64
.. attribute:: name
IPv4 Packet Filter Name to be applied to Outbound packets
**type**\: str
**length:** 1..64
.. attribute:: hardware_count
Not supported (Leave unspecified)
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: interface_statistics
Not supported (Leave unspecified)
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ip-pfilter-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4PacketFilter.Outbound, self).__init__()
self.yang_name = "outbound"
self.yang_parent_name = "ipv4-packet-filter"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('common_acl_name', (YLeaf(YType.str, 'common-acl-name'), ['str'])),
('name', (YLeaf(YType.str, 'name'), ['str'])),
('hardware_count', (YLeaf(YType.empty, 'hardware-count'), ['Empty'])),
('interface_statistics', (YLeaf(YType.empty, 'interface-statistics'), ['Empty'])),
])
self.common_acl_name = None
self.name = None
self.hardware_count = None
self.interface_statistics = None
self._segment_path = lambda: "outbound"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4PacketFilter.Outbound, ['common_acl_name', 'name', 'hardware_count', 'interface_statistics'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4PacketFilter.Outbound']['meta_info']
class Inbound(_Entity_):
"""
IPv4 Packet filter to be applied to inbound
packets
.. attribute:: common_acl_name
Not supported (Leave unspecified)
**type**\: str
**length:** 1..64
.. attribute:: name
IPv4 Packet Filter Name to be applied to Inbound packets NOTE\: This parameter is mandatory if 'CommonACLName' is not specified
**type**\: str
**length:** 1..64
.. attribute:: hardware_count
Not supported (Leave unspecified)
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: interface_statistics
Not supported (Leave unspecified)
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ip-pfilter-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4PacketFilter.Inbound, self).__init__()
self.yang_name = "inbound"
self.yang_parent_name = "ipv4-packet-filter"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('common_acl_name', (YLeaf(YType.str, 'common-acl-name'), ['str'])),
('name', (YLeaf(YType.str, 'name'), ['str'])),
('hardware_count', (YLeaf(YType.empty, 'hardware-count'), ['Empty'])),
('interface_statistics', (YLeaf(YType.empty, 'interface-statistics'), ['Empty'])),
])
self.common_acl_name = None
self.name = None
self.hardware_count = None
self.interface_statistics = None
self._segment_path = lambda: "inbound"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4PacketFilter.Inbound, ['common_acl_name', 'name', 'hardware_count', 'interface_statistics'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4PacketFilter.Inbound']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Ipv4PacketFilter']['meta_info']
class Ipv6PacketFilter(_Entity_):
"""
IPv6 Packet Filtering configuration for the
interface
.. attribute:: inbound
IPv6 Packet filter to be applied to inbound packets
**type**\: :py:class:`Inbound <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6PacketFilter.Inbound>`
.. attribute:: outbound
IPv6 Packet filter to be applied to outbound packets
**type**\: :py:class:`Outbound <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6PacketFilter.Outbound>`
"""
_prefix = 'ip-pfilter-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6PacketFilter, self).__init__()
self.yang_name = "ipv6-packet-filter"
self.yang_parent_name = "ip-subscriber"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("inbound", ("inbound", DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6PacketFilter.Inbound)), ("outbound", ("outbound", DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6PacketFilter.Outbound))])
self._leafs = OrderedDict()
self.inbound = DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6PacketFilter.Inbound()
self.inbound.parent = self
self._children_name_map["inbound"] = "inbound"
self.outbound = DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6PacketFilter.Outbound()
self.outbound.parent = self
self._children_name_map["outbound"] = "outbound"
self._segment_path = lambda: "Cisco-IOS-XR-ip-pfilter-subscriber-cfg:ipv6-packet-filter"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6PacketFilter, [], name, value)
class Inbound(_Entity_):
"""
IPv6 Packet filter to be applied to inbound
packets
.. attribute:: common_acl_name
Not supported (Leave unspecified)
**type**\: str
**length:** 1..64
.. attribute:: name
IPv6 Packet Filter Name to be applied to Inbound NOTE\: This parameter is mandatory if 'CommonACLName' is not specified
**type**\: str
**length:** 1..64
.. attribute:: interface_statistics
Not supported (Leave unspecified)
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ip-pfilter-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6PacketFilter.Inbound, self).__init__()
self.yang_name = "inbound"
self.yang_parent_name = "ipv6-packet-filter"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('common_acl_name', (YLeaf(YType.str, 'common-acl-name'), ['str'])),
('name', (YLeaf(YType.str, 'name'), ['str'])),
('interface_statistics', (YLeaf(YType.empty, 'interface-statistics'), ['Empty'])),
])
self.common_acl_name = None
self.name = None
self.interface_statistics = None
self._segment_path = lambda: "inbound"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6PacketFilter.Inbound, ['common_acl_name', 'name', 'interface_statistics'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6PacketFilter.Inbound']['meta_info']
class Outbound(_Entity_):
"""
IPv6 Packet filter to be applied to outbound
packets
.. attribute:: common_acl_name
Not supported (Leave unspecified)
**type**\: str
**length:** 1..64
.. attribute:: name
IPv6 Packet Filter Name to be applied to Outbound packets
**type**\: str
**length:** 1..64
.. attribute:: interface_statistics
Not supported (Leave unspecified)
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ip-pfilter-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6PacketFilter.Outbound, self).__init__()
self.yang_name = "outbound"
self.yang_parent_name = "ipv6-packet-filter"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('common_acl_name', (YLeaf(YType.str, 'common-acl-name'), ['str'])),
('name', (YLeaf(YType.str, 'name'), ['str'])),
('interface_statistics', (YLeaf(YType.empty, 'interface-statistics'), ['Empty'])),
])
self.common_acl_name = None
self.name = None
self.interface_statistics = None
self._segment_path = lambda: "outbound"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6PacketFilter.Outbound, ['common_acl_name', 'name', 'interface_statistics'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6PacketFilter.Outbound']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Ipv6PacketFilter']['meta_info']
class Dhcpd(_Entity_):
"""
Interface dhcpv4 configuration data
.. attribute:: dhcpv4_iplease
Cisco VSA to configure any dhcp4 ip lease per subscriber
**type**\: str
.. attribute:: class_
The class to be used for proxy/server profile
**type**\: str
.. attribute:: mode_class
Select proxy/server profile based on mode class name
**type**\: str
.. attribute:: default_gateway
The Default Gateway IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: session_limit
The pool to be used for Prefix Delegation
**type**\: int
**range:** 0..4294967295
.. attribute:: dhcpv4_option
Cisco VSA to configure any dhcp4 option per subscriber
**type**\: str
"""
_prefix = 'ipv4-dhcpd-subscriber-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Dhcpd, self).__init__()
self.yang_name = "dhcpd"
self.yang_parent_name = "ip-subscriber"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('dhcpv4_iplease', (YLeaf(YType.str, 'dhcpv4-iplease'), ['str'])),
('class_', (YLeaf(YType.str, 'class'), ['str'])),
('mode_class', (YLeaf(YType.str, 'mode-class'), ['str'])),
('default_gateway', (YLeaf(YType.str, 'default-gateway'), ['str'])),
('session_limit', (YLeaf(YType.uint32, 'session-limit'), ['int'])),
('dhcpv4_option', (YLeaf(YType.str, 'dhcpv4-option'), ['str'])),
])
self.dhcpv4_iplease = None
self.class_ = None
self.mode_class = None
self.default_gateway = None
self.session_limit = None
self.dhcpv4_option = None
self._segment_path = lambda: "Cisco-IOS-XR-ipv4-dhcpd-subscriber-cfg:dhcpd"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Dhcpd, ['dhcpv4_iplease', 'class_', 'mode_class', 'default_gateway', 'session_limit', 'dhcpv4_option'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Dhcpd']['meta_info']
class Dhcpv6(_Entity_):
"""
Interface dhcpv6 configuration data
.. attribute:: delegated_prefix
The prefix to be used for Prefix Delegation
**type**\: :py:class:`DelegatedPrefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.IpSubscribers.IpSubscriber.Dhcpv6.DelegatedPrefix>`
**presence node**\: True
.. attribute:: dns_ipv6address
Dns IPv6 Address
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: mode_class
Select proxy/server profile based on mode class name
**type**\: str
.. attribute:: dhcpv6_iplease
Cisco VSA to configure any dhcpv6 ip lease per subscriber
**type**\: str
.. attribute:: dhcpv6_option
Cisco VSA to configure any dhcpv6 option per subscriber
**type**\: str
.. attribute:: address_pool
The pool to be used for Address assignment
**type**\: str
.. attribute:: delegated_prefix_pool
The pool to be used for Prefix Delegation
**type**\: str
.. attribute:: class_
The class to be used for proxy/server profile
**type**\: str
.. attribute:: stateful_address
Stateful IPv6 Address
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'ipv6-new-dhcpv6d-subscriber-cfg'
_revision = '2017-09-30'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Dhcpv6, self).__init__()
self.yang_name = "dhcpv6"
self.yang_parent_name = "ip-subscriber"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("delegated-prefix", ("delegated_prefix", DynamicTemplate.IpSubscribers.IpSubscriber.Dhcpv6.DelegatedPrefix))])
self._leafs = OrderedDict([
('dns_ipv6address', (YLeaf(YType.str, 'dns-ipv6address'), ['str'])),
('mode_class', (YLeaf(YType.str, 'mode-class'), ['str'])),
('dhcpv6_iplease', (YLeaf(YType.str, 'dhcpv6-iplease'), ['str'])),
('dhcpv6_option', (YLeaf(YType.str, 'dhcpv6-option'), ['str'])),
('address_pool', (YLeaf(YType.str, 'address-pool'), ['str'])),
('delegated_prefix_pool', (YLeaf(YType.str, 'delegated-prefix-pool'), ['str'])),
('class_', (YLeaf(YType.str, 'class'), ['str'])),
('stateful_address', (YLeaf(YType.str, 'stateful-address'), ['str'])),
])
self.dns_ipv6address = None
self.mode_class = None
self.dhcpv6_iplease = None
self.dhcpv6_option = None
self.address_pool = None
self.delegated_prefix_pool = None
self.class_ = None
self.stateful_address = None
self.delegated_prefix = None
self._children_name_map["delegated_prefix"] = "delegated-prefix"
self._segment_path = lambda: "Cisco-IOS-XR-ipv6-new-dhcpv6d-subscriber-cfg:dhcpv6"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Dhcpv6, ['dns_ipv6address', 'mode_class', 'dhcpv6_iplease', 'dhcpv6_option', 'address_pool', 'delegated_prefix_pool', 'class_', 'stateful_address'], name, value)
class DelegatedPrefix(_Entity_):
"""
The prefix to be used for Prefix Delegation
.. attribute:: prefix
IPv6 Prefix
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
**mandatory**\: True
.. attribute:: prefix_length
PD Prefix Length
**type**\: int
**range:** 0..128
**mandatory**\: True
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ipv6-new-dhcpv6d-subscriber-cfg'
_revision = '2017-09-30'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.IpSubscribers.IpSubscriber.Dhcpv6.DelegatedPrefix, self).__init__()
self.yang_name = "delegated-prefix"
self.yang_parent_name = "dhcpv6"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('prefix', (YLeaf(YType.str, 'prefix'), ['str'])),
('prefix_length', (YLeaf(YType.uint8, 'prefix-length'), ['int'])),
])
self.prefix = None
self.prefix_length = None
self._segment_path = lambda: "delegated-prefix"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.IpSubscribers.IpSubscriber.Dhcpv6.DelegatedPrefix, ['prefix', 'prefix_length'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Dhcpv6.DelegatedPrefix']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber.Dhcpv6']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers.IpSubscriber']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.IpSubscribers']['meta_info']
class SubscriberServices(_Entity_):
"""
The Service Type Template Table
.. attribute:: subscriber_service
A Service Type Template
**type**\: list of :py:class:`SubscriberService <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService>`
"""
_prefix = 'subscriber-infra-tmplmgr-cfg'
_revision = '2015-01-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices, self).__init__()
self.yang_name = "subscriber-services"
self.yang_parent_name = "dynamic-template"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("subscriber-service", ("subscriber_service", DynamicTemplate.SubscriberServices.SubscriberService))])
self._leafs = OrderedDict()
self.subscriber_service = YList(self)
self._segment_path = lambda: "subscriber-services"
self._absolute_path = lambda: "Cisco-IOS-XR-subscriber-infra-tmplmgr-cfg:dynamic-template/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices, [], name, value)
class SubscriberService(_Entity_):
"""
A Service Type Template
.. attribute:: template_name (key)
The name of the template
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: ipv4_network
Interface IPv4 Network configuration data
**type**\: :py:class:`Ipv4Network <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Ipv4Network>`
.. attribute:: subscriber_attribute
Subscriber attribute configuration data
**type**\: :py:class:`SubscriberAttribute <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.SubscriberAttribute>`
.. attribute:: accounting
Subscriber accounting dynamic\-template commands
**type**\: :py:class:`Accounting <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Accounting>`
.. attribute:: qos
QoS dynamically applied configuration template
**type**\: :py:class:`Qos <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Qos>`
.. attribute:: ipv6_network
Interface IPv6 Network configuration data
**type**\: :py:class:`Ipv6Network <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Network>`
.. attribute:: span_monitor_sessions
Monitor Session container for this template
**type**\: :py:class:`SpanMonitorSessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions>`
.. attribute:: vrf
Assign the interface to a VRF
**type**\: str
**length:** 1..32
.. attribute:: ipv6_neighbor
Interface IPv6 Network configuration data
**type**\: :py:class:`Ipv6Neighbor <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor>`
.. attribute:: pbr
Dynamic Template PBR configuration
**type**\: :py:class:`Pbr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Pbr>`
.. attribute:: ipv4_packet_filter
IPv4 Packet Filtering configuration for the template
**type**\: :py:class:`Ipv4PacketFilter <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Ipv4PacketFilter>`
.. attribute:: ipv6_packet_filter
IPv6 Packet Filtering configuration for the interface
**type**\: :py:class:`Ipv6PacketFilter <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Ipv6PacketFilter>`
"""
_prefix = 'subscriber-infra-tmplmgr-cfg'
_revision = '2015-01-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService, self).__init__()
self.yang_name = "subscriber-service"
self.yang_parent_name = "subscriber-services"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['template_name']
self._child_classes = OrderedDict([("Cisco-IOS-XR-ipv4-ma-subscriber-cfg:ipv4-network", ("ipv4_network", DynamicTemplate.SubscriberServices.SubscriberService.Ipv4Network)), ("Cisco-IOS-XR-opendns-deviceid-cfg:subscriber-attribute", ("subscriber_attribute", DynamicTemplate.SubscriberServices.SubscriberService.SubscriberAttribute)), ("Cisco-IOS-XR-subscriber-accounting-cfg:accounting", ("accounting", DynamicTemplate.SubscriberServices.SubscriberService.Accounting)), ("Cisco-IOS-XR-qos-ma-bng-cfg:qos", ("qos", DynamicTemplate.SubscriberServices.SubscriberService.Qos)), ("Cisco-IOS-XR-ipv6-ma-subscriber-cfg:ipv6-network", ("ipv6_network", DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Network)), ("Cisco-IOS-XR-Ethernet-SPAN-subscriber-cfg:span-monitor-sessions", ("span_monitor_sessions", DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions)), ("Cisco-IOS-XR-ipv6-nd-subscriber-cfg:ipv6-neighbor", ("ipv6_neighbor", DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor)), ("Cisco-IOS-XR-pbr-subscriber-cfg:pbr", ("pbr", DynamicTemplate.SubscriberServices.SubscriberService.Pbr)), ("Cisco-IOS-XR-ip-pfilter-subscriber-cfg:ipv4-packet-filter", ("ipv4_packet_filter", DynamicTemplate.SubscriberServices.SubscriberService.Ipv4PacketFilter)), ("Cisco-IOS-XR-ip-pfilter-subscriber-cfg:ipv6-packet-filter", ("ipv6_packet_filter", DynamicTemplate.SubscriberServices.SubscriberService.Ipv6PacketFilter))])
self._leafs = OrderedDict([
('template_name', (YLeaf(YType.str, 'template-name'), ['str'])),
('vrf', (YLeaf(YType.str, 'Cisco-IOS-XR-infra-rsi-subscriber-cfg:vrf'), ['str'])),
])
self.template_name = None
self.vrf = None
self.ipv4_network = DynamicTemplate.SubscriberServices.SubscriberService.Ipv4Network()
self.ipv4_network.parent = self
self._children_name_map["ipv4_network"] = "Cisco-IOS-XR-ipv4-ma-subscriber-cfg:ipv4-network"
self.subscriber_attribute = DynamicTemplate.SubscriberServices.SubscriberService.SubscriberAttribute()
self.subscriber_attribute.parent = self
self._children_name_map["subscriber_attribute"] = "Cisco-IOS-XR-opendns-deviceid-cfg:subscriber-attribute"
self.accounting = DynamicTemplate.SubscriberServices.SubscriberService.Accounting()
self.accounting.parent = self
self._children_name_map["accounting"] = "Cisco-IOS-XR-subscriber-accounting-cfg:accounting"
self.qos = DynamicTemplate.SubscriberServices.SubscriberService.Qos()
self.qos.parent = self
self._children_name_map["qos"] = "Cisco-IOS-XR-qos-ma-bng-cfg:qos"
self.ipv6_network = DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Network()
self.ipv6_network.parent = self
self._children_name_map["ipv6_network"] = "Cisco-IOS-XR-ipv6-ma-subscriber-cfg:ipv6-network"
self.span_monitor_sessions = DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions()
self.span_monitor_sessions.parent = self
self._children_name_map["span_monitor_sessions"] = "Cisco-IOS-XR-Ethernet-SPAN-subscriber-cfg:span-monitor-sessions"
self.ipv6_neighbor = DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor()
self.ipv6_neighbor.parent = self
self._children_name_map["ipv6_neighbor"] = "Cisco-IOS-XR-ipv6-nd-subscriber-cfg:ipv6-neighbor"
self.pbr = DynamicTemplate.SubscriberServices.SubscriberService.Pbr()
self.pbr.parent = self
self._children_name_map["pbr"] = "Cisco-IOS-XR-pbr-subscriber-cfg:pbr"
self.ipv4_packet_filter = DynamicTemplate.SubscriberServices.SubscriberService.Ipv4PacketFilter()
self.ipv4_packet_filter.parent = self
self._children_name_map["ipv4_packet_filter"] = "Cisco-IOS-XR-ip-pfilter-subscriber-cfg:ipv4-packet-filter"
self.ipv6_packet_filter = DynamicTemplate.SubscriberServices.SubscriberService.Ipv6PacketFilter()
self.ipv6_packet_filter.parent = self
self._children_name_map["ipv6_packet_filter"] = "Cisco-IOS-XR-ip-pfilter-subscriber-cfg:ipv6-packet-filter"
self._segment_path = lambda: "subscriber-service" + "[template-name='" + str(self.template_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-subscriber-infra-tmplmgr-cfg:dynamic-template/subscriber-services/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService, ['template_name', 'vrf'], name, value)
class Ipv4Network(_Entity_):
"""
Interface IPv4 Network configuration data
.. attribute:: unnumbered
Enable IP processing without an explicit address
**type**\: str
.. attribute:: mtu
The IP Maximum Transmission Unit
**type**\: int
**range:** 68..65535
**units**\: byte
.. attribute:: unreachables
TRUE if enabled, FALSE if disabled
**type**\: bool
**default value**\: false
.. attribute:: rpf
TRUE if enabled, FALSE if disabled
**type**\: bool
**default value**\: true
"""
_prefix = 'ipv4-ma-subscriber-cfg'
_revision = '2015-07-30'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Ipv4Network, self).__init__()
self.yang_name = "ipv4-network"
self.yang_parent_name = "subscriber-service"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unnumbered', (YLeaf(YType.str, 'unnumbered'), ['str'])),
('mtu', (YLeaf(YType.uint32, 'mtu'), ['int'])),
('unreachables', (YLeaf(YType.boolean, 'unreachables'), ['bool'])),
('rpf', (YLeaf(YType.boolean, 'rpf'), ['bool'])),
])
self.unnumbered = None
self.mtu = None
self.unreachables = None
self.rpf = None
self._segment_path = lambda: "Cisco-IOS-XR-ipv4-ma-subscriber-cfg:ipv4-network"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Ipv4Network, ['unnumbered', 'mtu', 'unreachables', 'rpf'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Ipv4Network']['meta_info']
class SubscriberAttribute(_Entity_):
"""
Subscriber attribute configuration data
.. attribute:: open_dns
OpenDNS configuration data
**type**\: :py:class:`OpenDns <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.SubscriberAttribute.OpenDns>`
"""
_prefix = 'opendns-deviceid-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.SubscriberAttribute, self).__init__()
self.yang_name = "subscriber-attribute"
self.yang_parent_name = "subscriber-service"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("open-dns", ("open_dns", DynamicTemplate.SubscriberServices.SubscriberService.SubscriberAttribute.OpenDns))])
self._leafs = OrderedDict()
self.open_dns = DynamicTemplate.SubscriberServices.SubscriberService.SubscriberAttribute.OpenDns()
self.open_dns.parent = self
self._children_name_map["open_dns"] = "open-dns"
self._segment_path = lambda: "Cisco-IOS-XR-opendns-deviceid-cfg:subscriber-attribute"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.SubscriberAttribute, [], name, value)
class OpenDns(_Entity_):
"""
OpenDNS configuration data
.. attribute:: device_id
Specify deviceID to be used for applying OpenDNS policies
**type**\: str
"""
_prefix = 'opendns-deviceid-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.SubscriberAttribute.OpenDns, self).__init__()
self.yang_name = "open-dns"
self.yang_parent_name = "subscriber-attribute"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('device_id', (YLeaf(YType.str, 'device-id'), ['str'])),
])
self.device_id = None
self._segment_path = lambda: "open-dns"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.SubscriberAttribute.OpenDns, ['device_id'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.SubscriberAttribute.OpenDns']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.SubscriberAttribute']['meta_info']
class Accounting(_Entity_):
"""
Subscriber accounting dynamic\-template commands
.. attribute:: service_accounting
Subscriber accounting service accounting
**type**\: :py:class:`ServiceAccounting <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Accounting.ServiceAccounting>`
.. attribute:: session
Subscriber accounting session accounting
**type**\: :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Accounting.Session>`
.. attribute:: idle_timeout
Subscriber accounting idle timeout
**type**\: :py:class:`IdleTimeout <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Accounting.IdleTimeout>`
.. attribute:: monitor_feature
Subscriber monitor feature
**type**\: str
.. attribute:: prepaid_feature
Subscriber accounting prepaid feature
**type**\: str
"""
_prefix = 'subscriber-accounting-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Accounting, self).__init__()
self.yang_name = "accounting"
self.yang_parent_name = "subscriber-service"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("service-accounting", ("service_accounting", DynamicTemplate.SubscriberServices.SubscriberService.Accounting.ServiceAccounting)), ("session", ("session", DynamicTemplate.SubscriberServices.SubscriberService.Accounting.Session)), ("idle-timeout", ("idle_timeout", DynamicTemplate.SubscriberServices.SubscriberService.Accounting.IdleTimeout))])
self._leafs = OrderedDict([
('monitor_feature', (YLeaf(YType.str, 'monitor-feature'), ['str'])),
('prepaid_feature', (YLeaf(YType.str, 'prepaid-feature'), ['str'])),
])
self.monitor_feature = None
self.prepaid_feature = None
self.service_accounting = DynamicTemplate.SubscriberServices.SubscriberService.Accounting.ServiceAccounting()
self.service_accounting.parent = self
self._children_name_map["service_accounting"] = "service-accounting"
self.session = DynamicTemplate.SubscriberServices.SubscriberService.Accounting.Session()
self.session.parent = self
self._children_name_map["session"] = "session"
self.idle_timeout = DynamicTemplate.SubscriberServices.SubscriberService.Accounting.IdleTimeout()
self.idle_timeout.parent = self
self._children_name_map["idle_timeout"] = "idle-timeout"
self._segment_path = lambda: "Cisco-IOS-XR-subscriber-accounting-cfg:accounting"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Accounting, ['monitor_feature', 'prepaid_feature'], name, value)
class ServiceAccounting(_Entity_):
"""
Subscriber accounting service accounting
.. attribute:: method_list_name
Service accounting method list name
**type**\: str
.. attribute:: accounting_interim_interval
Accounting interim interval in minutes
**type**\: int
**range:** 0..4294967295
**units**\: minute
"""
_prefix = 'subscriber-accounting-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Accounting.ServiceAccounting, self).__init__()
self.yang_name = "service-accounting"
self.yang_parent_name = "accounting"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('method_list_name', (YLeaf(YType.str, 'method-list-name'), ['str'])),
('accounting_interim_interval', (YLeaf(YType.uint32, 'accounting-interim-interval'), ['int'])),
])
self.method_list_name = None
self.accounting_interim_interval = None
self._segment_path = lambda: "service-accounting"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Accounting.ServiceAccounting, ['method_list_name', 'accounting_interim_interval'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Accounting.ServiceAccounting']['meta_info']
class Session(_Entity_):
"""
Subscriber accounting session accounting
.. attribute:: method_list_name
Session accounting method list name
**type**\: str
.. attribute:: periodic_interval
Interim accounting interval in minutes
**type**\: int
**range:** 0..4294967295
**units**\: minute
.. attribute:: dual_stack_delay
Dual stack wait delay in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: hold_acct_start
Hold Accounting start based on IA\_PD
**type**\: :py:class:`HoldAcctStart <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Accounting.Session.HoldAcctStart>`
"""
_prefix = 'subscriber-accounting-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Accounting.Session, self).__init__()
self.yang_name = "session"
self.yang_parent_name = "accounting"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('method_list_name', (YLeaf(YType.str, 'method-list-name'), ['str'])),
('periodic_interval', (YLeaf(YType.uint32, 'periodic-interval'), ['int'])),
('dual_stack_delay', (YLeaf(YType.uint32, 'dual-stack-delay'), ['int'])),
('hold_acct_start', (YLeaf(YType.enumeration, 'hold-acct-start'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg', 'DynamicTemplate', 'SubscriberServices.SubscriberService.Accounting.Session.HoldAcctStart')])),
])
self.method_list_name = None
self.periodic_interval = None
self.dual_stack_delay = None
self.hold_acct_start = None
self._segment_path = lambda: "session"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Accounting.Session, ['method_list_name', 'periodic_interval', 'dual_stack_delay', 'hold_acct_start'], name, value)
class HoldAcctStart(Enum):
"""
HoldAcctStart (Enum Class)
Hold Accounting start based on IA\_PD
.. data:: ipv6_prefix_delegation = 3
Based on ipv6 delegated prefix
"""
ipv6_prefix_delegation = Enum.YLeaf(3, "ipv6-prefix-delegation")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Accounting.Session.HoldAcctStart']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Accounting.Session']['meta_info']
class IdleTimeout(_Entity_):
"""
Subscriber accounting idle timeout
.. attribute:: timeout_value
Idle timeout value in seconds
**type**\: int
**range:** 60..4320000
**units**\: second
.. attribute:: threshold
Threshold in minute(s) per packet
**type**\: int
**range:** 1..10000
.. attribute:: direction
Idle timeout traffic direction
**type**\: str
"""
_prefix = 'subscriber-accounting-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Accounting.IdleTimeout, self).__init__()
self.yang_name = "idle-timeout"
self.yang_parent_name = "accounting"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('timeout_value', (YLeaf(YType.uint32, 'timeout-value'), ['int'])),
('threshold', (YLeaf(YType.uint32, 'threshold'), ['int'])),
('direction', (YLeaf(YType.str, 'direction'), ['str'])),
])
self.timeout_value = None
self.threshold = None
self.direction = None
self._segment_path = lambda: "idle-timeout"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Accounting.IdleTimeout, ['timeout_value', 'threshold', 'direction'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Accounting.IdleTimeout']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Accounting']['meta_info']
class Qos(_Entity_):
"""
QoS dynamically applied configuration template
.. attribute:: service_policy
Service policy to be applied in ingress/egress direction
**type**\: :py:class:`ServicePolicy <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Qos.ServicePolicy>`
.. attribute:: account
QoS L2 overhead accounting
**type**\: :py:class:`Account <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Qos.Account>`
.. attribute:: output
QoS to be applied in egress direction
**type**\: :py:class:`Output <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Qos.Output>`
"""
_prefix = 'qos-ma-bng-cfg'
_revision = '2016-04-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Qos, self).__init__()
self.yang_name = "qos"
self.yang_parent_name = "subscriber-service"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("service-policy", ("service_policy", DynamicTemplate.SubscriberServices.SubscriberService.Qos.ServicePolicy)), ("account", ("account", DynamicTemplate.SubscriberServices.SubscriberService.Qos.Account)), ("output", ("output", DynamicTemplate.SubscriberServices.SubscriberService.Qos.Output))])
self._leafs = OrderedDict()
self.service_policy = DynamicTemplate.SubscriberServices.SubscriberService.Qos.ServicePolicy()
self.service_policy.parent = self
self._children_name_map["service_policy"] = "service-policy"
self.account = DynamicTemplate.SubscriberServices.SubscriberService.Qos.Account()
self.account.parent = self
self._children_name_map["account"] = "account"
self.output = DynamicTemplate.SubscriberServices.SubscriberService.Qos.Output()
self.output.parent = self
self._children_name_map["output"] = "output"
self._segment_path = lambda: "Cisco-IOS-XR-qos-ma-bng-cfg:qos"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Qos, [], name, value)
class ServicePolicy(_Entity_):
"""
Service policy to be applied in ingress/egress
direction
.. attribute:: input
Subscriber ingress policy
**type**\: :py:class:`Input <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Qos.ServicePolicy.Input>`
**presence node**\: True
.. attribute:: output
Subscriber egress policy
**type**\: :py:class:`Output <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Qos.ServicePolicy.Output>`
**presence node**\: True
"""
_prefix = 'qos-ma-bng-cfg'
_revision = '2016-04-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Qos.ServicePolicy, self).__init__()
self.yang_name = "service-policy"
self.yang_parent_name = "qos"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("input", ("input", DynamicTemplate.SubscriberServices.SubscriberService.Qos.ServicePolicy.Input)), ("output", ("output", DynamicTemplate.SubscriberServices.SubscriberService.Qos.ServicePolicy.Output))])
self._leafs = OrderedDict()
self.input = None
self._children_name_map["input"] = "input"
self.output = None
self._children_name_map["output"] = "output"
self._segment_path = lambda: "service-policy"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Qos.ServicePolicy, [], name, value)
class Input(_Entity_):
"""
Subscriber ingress policy
.. attribute:: policy_name
Name of policy\-map
**type**\: str
**mandatory**\: True
.. attribute:: spi_name
Name of the SPI
**type**\: str
.. attribute:: merge
TRUE for merge enabled for service\-policy applied on dynamic template
**type**\: bool
.. attribute:: merge_id
Merge ID value
**type**\: int
**range:** 0..255
.. attribute:: account_stats
TRUE for account stats enabled for service\-policy applied on dynamic template. Note\: account stats not supported for subscriber type 'ppp' and 'ipsubscriber'
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'qos-ma-bng-cfg'
_revision = '2016-04-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Qos.ServicePolicy.Input, self).__init__()
self.yang_name = "input"
self.yang_parent_name = "service-policy"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('policy_name', (YLeaf(YType.str, 'policy-name'), ['str'])),
('spi_name', (YLeaf(YType.str, 'spi-name'), ['str'])),
('merge', (YLeaf(YType.boolean, 'merge'), ['bool'])),
('merge_id', (YLeaf(YType.uint32, 'merge-id'), ['int'])),
('account_stats', (YLeaf(YType.boolean, 'account-stats'), ['bool'])),
])
self.policy_name = None
self.spi_name = None
self.merge = None
self.merge_id = None
self.account_stats = None
self._segment_path = lambda: "input"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Qos.ServicePolicy.Input, ['policy_name', 'spi_name', 'merge', 'merge_id', 'account_stats'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Qos.ServicePolicy.Input']['meta_info']
class Output(_Entity_):
"""
Subscriber egress policy
.. attribute:: policy_name
Name of policy\-map
**type**\: str
**mandatory**\: True
.. attribute:: spi_name
Name of the SPI
**type**\: str
.. attribute:: merge
TRUE for merge enabled for service\-policy applied on dynamic template
**type**\: bool
.. attribute:: merge_id
Merge ID value
**type**\: int
**range:** 0..255
.. attribute:: account_stats
TRUE for account stats enabled for service\-policy applied on dynamic template. Note\: account stats not supported for subscriber type 'ppp' and 'ipsubscriber'
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'qos-ma-bng-cfg'
_revision = '2016-04-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Qos.ServicePolicy.Output, self).__init__()
self.yang_name = "output"
self.yang_parent_name = "service-policy"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('policy_name', (YLeaf(YType.str, 'policy-name'), ['str'])),
('spi_name', (YLeaf(YType.str, 'spi-name'), ['str'])),
('merge', (YLeaf(YType.boolean, 'merge'), ['bool'])),
('merge_id', (YLeaf(YType.uint32, 'merge-id'), ['int'])),
('account_stats', (YLeaf(YType.boolean, 'account-stats'), ['bool'])),
])
self.policy_name = None
self.spi_name = None
self.merge = None
self.merge_id = None
self.account_stats = None
self._segment_path = lambda: "output"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Qos.ServicePolicy.Output, ['policy_name', 'spi_name', 'merge', 'merge_id', 'account_stats'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Qos.ServicePolicy.Output']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Qos.ServicePolicy']['meta_info']
class Account(_Entity_):
"""
QoS L2 overhead accounting
.. attribute:: aal
ATM adaptation layer AAL
**type**\: :py:class:`Qosl2DataLink <ydk.models.cisco_ios_xr.Cisco_IOS_XR_qos_ma_bng_cfg.Qosl2DataLink>`
.. attribute:: encapsulation
Specify encapsulation type
**type**\: :py:class:`Qosl2Encap <ydk.models.cisco_ios_xr.Cisco_IOS_XR_qos_ma_bng_cfg.Qosl2Encap>`
.. attribute:: atm_cell_tax
ATM cell tax to L2 overhead
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: user_defined
Numeric L2 overhead offset
**type**\: int
**range:** \-63..63
"""
_prefix = 'qos-ma-bng-cfg'
_revision = '2016-04-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Qos.Account, self).__init__()
self.yang_name = "account"
self.yang_parent_name = "qos"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('aal', (YLeaf(YType.enumeration, 'aal'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_qos_ma_bng_cfg', 'Qosl2DataLink', '')])),
('encapsulation', (YLeaf(YType.enumeration, 'encapsulation'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_qos_ma_bng_cfg', 'Qosl2Encap', '')])),
('atm_cell_tax', (YLeaf(YType.empty, 'atm-cell-tax'), ['Empty'])),
('user_defined', (YLeaf(YType.int32, 'user-defined'), ['int'])),
])
self.aal = None
self.encapsulation = None
self.atm_cell_tax = None
self.user_defined = None
self._segment_path = lambda: "account"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Qos.Account, ['aal', 'encapsulation', 'atm_cell_tax', 'user_defined'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Qos.Account']['meta_info']
class Output(_Entity_):
"""
QoS to be applied in egress direction
.. attribute:: minimum_bandwidth
Minimum bandwidth value for the subscriber (in kbps)
**type**\: int
**range:** 1..4294967295
**units**\: kbit/s
"""
_prefix = 'qos-ma-bng-cfg'
_revision = '2016-04-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Qos.Output, self).__init__()
self.yang_name = "output"
self.yang_parent_name = "qos"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('minimum_bandwidth', (YLeaf(YType.uint32, 'minimum-bandwidth'), ['int'])),
])
self.minimum_bandwidth = None
self._segment_path = lambda: "output"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Qos.Output, ['minimum_bandwidth'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Qos.Output']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Qos']['meta_info']
class Ipv6Network(_Entity_):
"""
Interface IPv6 Network configuration data
.. attribute:: addresses
Set the IPv6 address of an interface
**type**\: :py:class:`Addresses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Network.Addresses>`
.. attribute:: mtu
MTU Setting of Interface
**type**\: int
**range:** 1280..65535
**units**\: byte
.. attribute:: rpf
TRUE if enabled, FALSE if disabled
**type**\: bool
.. attribute:: unreachables
Override Sending of ICMP Unreachable Messages
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ipv6-ma-subscriber-cfg'
_revision = '2017-01-11'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Network, self).__init__()
self.yang_name = "ipv6-network"
self.yang_parent_name = "subscriber-service"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("addresses", ("addresses", DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Network.Addresses))])
self._leafs = OrderedDict([
('mtu', (YLeaf(YType.uint32, 'mtu'), ['int'])),
('rpf', (YLeaf(YType.boolean, 'rpf'), ['bool'])),
('unreachables', (YLeaf(YType.empty, 'unreachables'), ['Empty'])),
])
self.mtu = None
self.rpf = None
self.unreachables = None
self.addresses = DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Network.Addresses()
self.addresses.parent = self
self._children_name_map["addresses"] = "addresses"
self._segment_path = lambda: "Cisco-IOS-XR-ipv6-ma-subscriber-cfg:ipv6-network"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Network, ['mtu', 'rpf', 'unreachables'], name, value)
class Addresses(_Entity_):
"""
Set the IPv6 address of an interface
.. attribute:: auto_configuration
Auto IPv6 Interface Configuration
**type**\: :py:class:`AutoConfiguration <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Network.Addresses.AutoConfiguration>`
"""
_prefix = 'ipv6-ma-subscriber-cfg'
_revision = '2017-01-11'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Network.Addresses, self).__init__()
self.yang_name = "addresses"
self.yang_parent_name = "ipv6-network"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("auto-configuration", ("auto_configuration", DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Network.Addresses.AutoConfiguration))])
self._leafs = OrderedDict()
self.auto_configuration = DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Network.Addresses.AutoConfiguration()
self.auto_configuration.parent = self
self._children_name_map["auto_configuration"] = "auto-configuration"
self._segment_path = lambda: "addresses"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Network.Addresses, [], name, value)
class AutoConfiguration(_Entity_):
"""
Auto IPv6 Interface Configuration
.. attribute:: enable
The flag to enable auto ipv6 interface configuration
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ipv6-ma-subscriber-cfg'
_revision = '2017-01-11'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Network.Addresses.AutoConfiguration, self).__init__()
self.yang_name = "auto-configuration"
self.yang_parent_name = "addresses"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('enable', (YLeaf(YType.empty, 'enable'), ['Empty'])),
])
self.enable = None
self._segment_path = lambda: "auto-configuration"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Network.Addresses.AutoConfiguration, ['enable'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Network.Addresses.AutoConfiguration']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Network.Addresses']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Network']['meta_info']
class SpanMonitorSessions(_Entity_):
"""
Monitor Session container for this template
.. attribute:: span_monitor_session
Configuration for a particular class of Monitor Session
**type**\: list of :py:class:`SpanMonitorSession <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions.SpanMonitorSession>`
"""
_prefix = 'ethernet-span-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions, self).__init__()
self.yang_name = "span-monitor-sessions"
self.yang_parent_name = "subscriber-service"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("span-monitor-session", ("span_monitor_session", DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions.SpanMonitorSession))])
self._leafs = OrderedDict()
self.span_monitor_session = YList(self)
self._segment_path = lambda: "Cisco-IOS-XR-Ethernet-SPAN-subscriber-cfg:span-monitor-sessions"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions, [], name, value)
class SpanMonitorSession(_Entity_):
"""
Configuration for a particular class of Monitor
Session
.. attribute:: session_class (key)
Session Class
**type**\: :py:class:`SpanSessionClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_datatypes.SpanSessionClass>`
.. attribute:: mirror_first
Mirror a specified number of bytes from start of packet
**type**\: int
**range:** 1..10000
**units**\: byte
.. attribute:: attachment
Attach the interface to a Monitor Session
**type**\: :py:class:`Attachment <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions.SpanMonitorSession.Attachment>`
**presence node**\: True
.. attribute:: mirror_interval
Specify the mirror interval
**type**\: :py:class:`SpanMirrorInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_subscriber_cfg.SpanMirrorInterval>`
.. attribute:: acl
Enable ACL matching for traffic mirroring
**type**\: :py:class:`Acl <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions.SpanMonitorSession.Acl>`
**presence node**\: True
"""
_prefix = 'ethernet-span-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions.SpanMonitorSession, self).__init__()
self.yang_name = "span-monitor-session"
self.yang_parent_name = "span-monitor-sessions"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['session_class']
self._child_classes = OrderedDict([("attachment", ("attachment", DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions.SpanMonitorSession.Attachment)), ("acl", ("acl", DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions.SpanMonitorSession.Acl))])
self._leafs = OrderedDict([
('session_class', (YLeaf(YType.enumeration, 'session-class'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_datatypes', 'SpanSessionClass', '')])),
('mirror_first', (YLeaf(YType.uint32, 'mirror-first'), ['int'])),
('mirror_interval', (YLeaf(YType.enumeration, 'mirror-interval'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_subscriber_cfg', 'SpanMirrorInterval', '')])),
])
self.session_class = None
self.mirror_first = None
self.mirror_interval = None
self.attachment = None
self._children_name_map["attachment"] = "attachment"
self.acl = None
self._children_name_map["acl"] = "acl"
self._segment_path = lambda: "span-monitor-session" + "[session-class='" + str(self.session_class) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions.SpanMonitorSession, ['session_class', 'mirror_first', 'mirror_interval'], name, value)
class Attachment(_Entity_):
"""
Attach the interface to a Monitor Session
.. attribute:: session_name
Session Name
**type**\: str
**length:** 1..79
**mandatory**\: True
.. attribute:: direction
Specify the direction of traffic to replicate (optional)
**type**\: :py:class:`SpanTrafficDirection <ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_subscriber_cfg.SpanTrafficDirection>`
.. attribute:: port_level_enable
Enable port level traffic mirroring
**type**\: :py:class:`Empty<ydk.types.Empty>`
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ethernet-span-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions.SpanMonitorSession.Attachment, self).__init__()
self.yang_name = "attachment"
self.yang_parent_name = "span-monitor-session"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('session_name', (YLeaf(YType.str, 'session-name'), ['str'])),
('direction', (YLeaf(YType.enumeration, 'direction'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_subscriber_cfg', 'SpanTrafficDirection', '')])),
('port_level_enable', (YLeaf(YType.empty, 'port-level-enable'), ['Empty'])),
])
self.session_name = None
self.direction = None
self.port_level_enable = None
self._segment_path = lambda: "attachment"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions.SpanMonitorSession.Attachment, ['session_name', 'direction', 'port_level_enable'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions.SpanMonitorSession.Attachment']['meta_info']
class Acl(_Entity_):
"""
Enable ACL matching for traffic mirroring
.. attribute:: acl_enable
Enable ACL
**type**\: :py:class:`Empty<ydk.types.Empty>`
**mandatory**\: True
.. attribute:: acl_name
ACL Name
**type**\: str
**length:** 1..80
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ethernet-span-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions.SpanMonitorSession.Acl, self).__init__()
self.yang_name = "acl"
self.yang_parent_name = "span-monitor-session"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('acl_enable', (YLeaf(YType.empty, 'acl-enable'), ['Empty'])),
('acl_name', (YLeaf(YType.str, 'acl-name'), ['str'])),
])
self.acl_enable = None
self.acl_name = None
self._segment_path = lambda: "acl"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions.SpanMonitorSession.Acl, ['acl_enable', 'acl_name'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions.SpanMonitorSession.Acl']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions.SpanMonitorSession']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.SpanMonitorSessions']['meta_info']
class Ipv6Neighbor(_Entity_):
"""
Interface IPv6 Network configuration data
.. attribute:: ra_interval
Set IPv6 Router Advertisement (RA) interval in seconds
**type**\: :py:class:`RaInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.RaInterval>`
**presence node**\: True
.. attribute:: framed_prefix
Set the IPv6 framed ipv6 prefix for a subscriber interface
**type**\: :py:class:`FramedPrefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.FramedPrefix>`
**presence node**\: True
.. attribute:: duplicate_address_detection
Duplicate Address Detection (DAD)
**type**\: :py:class:`DuplicateAddressDetection <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.DuplicateAddressDetection>`
.. attribute:: ra_initial
IPv6 ND RA Initial
**type**\: :py:class:`RaInitial <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.RaInitial>`
**presence node**\: True
.. attribute:: framed_prefix_pool
Set the IPv6 framed ipv6 prefix pool for a subscriber interface
**type**\: str
.. attribute:: managed_config
Host to use stateful protocol for address configuration
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: other_config
Host to use stateful protocol for non\-address configuration
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: start_ra_on_ipv6_enable
Start RA on ipv6\-enable config
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: nud_enable
NUD enable
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: ra_lifetime
Set IPv6 Router Advertisement (RA) lifetime in seconds
**type**\: int
**range:** 0..9000
**units**\: second
.. attribute:: router_preference
RA Router Preference
**type**\: :py:class:`Ipv6NdRouterPrefTemplate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_subscriber_cfg.Ipv6NdRouterPrefTemplate>`
.. attribute:: ra_suppress
Enable suppress IPv6 router advertisement
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: ra_unicast
Enable RA unicast Flag
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: ra_unspecify_hoplimit
Unspecify IPv6 Router Advertisement (RA) hop\-limit
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: ra_suppress_mtu
RA suppress MTU flag
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: suppress_cache_learning
Suppress cache learning flag
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: reachable_time
Set advertised reachability time in milliseconds
**type**\: int
**range:** 0..3600000
**units**\: millisecond
.. attribute:: ns_interval
Set advertised NS retransmission interval in milliseconds
**type**\: int
**range:** 1000..4294967295
**units**\: millisecond
"""
_prefix = 'ipv6-nd-subscriber-cfg'
_revision = '2016-12-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor, self).__init__()
self.yang_name = "ipv6-neighbor"
self.yang_parent_name = "subscriber-service"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("ra-interval", ("ra_interval", DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.RaInterval)), ("framed-prefix", ("framed_prefix", DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.FramedPrefix)), ("duplicate-address-detection", ("duplicate_address_detection", DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.DuplicateAddressDetection)), ("ra-initial", ("ra_initial", DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.RaInitial))])
self._leafs = OrderedDict([
('framed_prefix_pool', (YLeaf(YType.str, 'framed-prefix-pool'), ['str'])),
('managed_config', (YLeaf(YType.empty, 'managed-config'), ['Empty'])),
('other_config', (YLeaf(YType.empty, 'other-config'), ['Empty'])),
('start_ra_on_ipv6_enable', (YLeaf(YType.empty, 'start-ra-on-ipv6-enable'), ['Empty'])),
('nud_enable', (YLeaf(YType.empty, 'nud-enable'), ['Empty'])),
('ra_lifetime', (YLeaf(YType.uint32, 'ra-lifetime'), ['int'])),
('router_preference', (YLeaf(YType.enumeration, 'router-preference'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_nd_subscriber_cfg', 'Ipv6NdRouterPrefTemplate', '')])),
('ra_suppress', (YLeaf(YType.empty, 'ra-suppress'), ['Empty'])),
('ra_unicast', (YLeaf(YType.empty, 'ra-unicast'), ['Empty'])),
('ra_unspecify_hoplimit', (YLeaf(YType.empty, 'ra-unspecify-hoplimit'), ['Empty'])),
('ra_suppress_mtu', (YLeaf(YType.empty, 'ra-suppress-mtu'), ['Empty'])),
('suppress_cache_learning', (YLeaf(YType.empty, 'suppress-cache-learning'), ['Empty'])),
('reachable_time', (YLeaf(YType.uint32, 'reachable-time'), ['int'])),
('ns_interval', (YLeaf(YType.uint32, 'ns-interval'), ['int'])),
])
self.framed_prefix_pool = None
self.managed_config = None
self.other_config = None
self.start_ra_on_ipv6_enable = None
self.nud_enable = None
self.ra_lifetime = None
self.router_preference = None
self.ra_suppress = None
self.ra_unicast = None
self.ra_unspecify_hoplimit = None
self.ra_suppress_mtu = None
self.suppress_cache_learning = None
self.reachable_time = None
self.ns_interval = None
self.ra_interval = None
self._children_name_map["ra_interval"] = "ra-interval"
self.framed_prefix = None
self._children_name_map["framed_prefix"] = "framed-prefix"
self.duplicate_address_detection = DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.DuplicateAddressDetection()
self.duplicate_address_detection.parent = self
self._children_name_map["duplicate_address_detection"] = "duplicate-address-detection"
self.ra_initial = None
self._children_name_map["ra_initial"] = "ra-initial"
self._segment_path = lambda: "Cisco-IOS-XR-ipv6-nd-subscriber-cfg:ipv6-neighbor"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor, ['framed_prefix_pool', 'managed_config', 'other_config', 'start_ra_on_ipv6_enable', 'nud_enable', 'ra_lifetime', 'router_preference', 'ra_suppress', 'ra_unicast', 'ra_unspecify_hoplimit', 'ra_suppress_mtu', 'suppress_cache_learning', 'reachable_time', 'ns_interval'], name, value)
class RaInterval(_Entity_):
"""
Set IPv6 Router Advertisement (RA) interval in
seconds
.. attribute:: maximum
Maximum RA interval in seconds
**type**\: int
**range:** 4..1800
**mandatory**\: True
**units**\: second
.. attribute:: minimum
Minimum RA interval in seconds. Must be less than 0.75 \* maximum interval
**type**\: int
**range:** 3..1800
**units**\: second
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ipv6-nd-subscriber-cfg'
_revision = '2016-12-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.RaInterval, self).__init__()
self.yang_name = "ra-interval"
self.yang_parent_name = "ipv6-neighbor"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('maximum', (YLeaf(YType.uint32, 'maximum'), ['int'])),
('minimum', (YLeaf(YType.uint32, 'minimum'), ['int'])),
])
self.maximum = None
self.minimum = None
self._segment_path = lambda: "ra-interval"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.RaInterval, ['maximum', 'minimum'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.RaInterval']['meta_info']
class FramedPrefix(_Entity_):
"""
Set the IPv6 framed ipv6 prefix for a
subscriber interface
.. attribute:: prefix_length
IPv6 framed prefix length
**type**\: int
**range:** 0..128
**mandatory**\: True
.. attribute:: prefix
IPV6 framed prefix address
**type**\: str
**mandatory**\: True
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ipv6-nd-subscriber-cfg'
_revision = '2016-12-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.FramedPrefix, self).__init__()
self.yang_name = "framed-prefix"
self.yang_parent_name = "ipv6-neighbor"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('prefix_length', (YLeaf(YType.uint8, 'prefix-length'), ['int'])),
('prefix', (YLeaf(YType.str, 'prefix'), ['str'])),
])
self.prefix_length = None
self.prefix = None
self._segment_path = lambda: "framed-prefix"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.FramedPrefix, ['prefix_length', 'prefix'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.FramedPrefix']['meta_info']
class DuplicateAddressDetection(_Entity_):
"""
Duplicate Address Detection (DAD)
.. attribute:: attempts
Set IPv6 duplicate address detection transmits
**type**\: int
**range:** 0..600
"""
_prefix = 'ipv6-nd-subscriber-cfg'
_revision = '2016-12-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.DuplicateAddressDetection, self).__init__()
self.yang_name = "duplicate-address-detection"
self.yang_parent_name = "ipv6-neighbor"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('attempts', (YLeaf(YType.uint32, 'attempts'), ['int'])),
])
self.attempts = None
self._segment_path = lambda: "duplicate-address-detection"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.DuplicateAddressDetection, ['attempts'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.DuplicateAddressDetection']['meta_info']
class RaInitial(_Entity_):
"""
IPv6 ND RA Initial
.. attribute:: count
Initial RA count
**type**\: int
**range:** 0..32
**mandatory**\: True
.. attribute:: interval
Initial RA interval in seconds
**type**\: int
**range:** 4..1800
**mandatory**\: True
**units**\: second
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ipv6-nd-subscriber-cfg'
_revision = '2016-12-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.RaInitial, self).__init__()
self.yang_name = "ra-initial"
self.yang_parent_name = "ipv6-neighbor"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self.is_presence_container = True
self._leafs = OrderedDict([
('count', (YLeaf(YType.uint32, 'count'), ['int'])),
('interval', (YLeaf(YType.uint32, 'interval'), ['int'])),
])
self.count = None
self.interval = None
self._segment_path = lambda: "ra-initial"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.RaInitial, ['count', 'interval'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor.RaInitial']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Ipv6Neighbor']['meta_info']
class Pbr(_Entity_):
"""
Dynamic Template PBR configuration
.. attribute:: service_policies
Ingress service policy
**type**\: :py:class:`ServicePolicies <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Pbr.ServicePolicies>`
.. attribute:: service_policy_in
Class for subscriber ingress policy
**type**\: str
"""
_prefix = 'pbr-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Pbr, self).__init__()
self.yang_name = "pbr"
self.yang_parent_name = "subscriber-service"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("service-policies", ("service_policies", DynamicTemplate.SubscriberServices.SubscriberService.Pbr.ServicePolicies))])
self._leafs = OrderedDict([
('service_policy_in', (YLeaf(YType.str, 'service-policy-in'), ['str'])),
])
self.service_policy_in = None
self.service_policies = DynamicTemplate.SubscriberServices.SubscriberService.Pbr.ServicePolicies()
self.service_policies.parent = self
self._children_name_map["service_policies"] = "service-policies"
self._segment_path = lambda: "Cisco-IOS-XR-pbr-subscriber-cfg:pbr"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Pbr, ['service_policy_in'], name, value)
class ServicePolicies(_Entity_):
"""
Ingress service policy
.. attribute:: service_policy
Service policy details
**type**\: list of :py:class:`ServicePolicy <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Pbr.ServicePolicies.ServicePolicy>`
"""
_prefix = 'pbr-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Pbr.ServicePolicies, self).__init__()
self.yang_name = "service-policies"
self.yang_parent_name = "pbr"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("service-policy", ("service_policy", DynamicTemplate.SubscriberServices.SubscriberService.Pbr.ServicePolicies.ServicePolicy))])
self._leafs = OrderedDict()
self.service_policy = YList(self)
self._segment_path = lambda: "service-policies"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Pbr.ServicePolicies, [], name, value)
class ServicePolicy(_Entity_):
"""
Service policy details
.. attribute:: service_policy (key)
Name of policy\-map
**type**\: str
**length:** 1..64
"""
_prefix = 'pbr-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Pbr.ServicePolicies.ServicePolicy, self).__init__()
self.yang_name = "service-policy"
self.yang_parent_name = "service-policies"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['service_policy']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('service_policy', (YLeaf(YType.str, 'service-policy'), ['str'])),
])
self.service_policy = None
self._segment_path = lambda: "service-policy" + "[service-policy='" + str(self.service_policy) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Pbr.ServicePolicies.ServicePolicy, ['service_policy'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Pbr.ServicePolicies.ServicePolicy']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Pbr.ServicePolicies']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Pbr']['meta_info']
class Ipv4PacketFilter(_Entity_):
"""
IPv4 Packet Filtering configuration for the
template
.. attribute:: outbound
IPv4 Packet filter to be applied to outbound packets
**type**\: :py:class:`Outbound <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Ipv4PacketFilter.Outbound>`
.. attribute:: inbound
IPv4 Packet filter to be applied to inbound packets
**type**\: :py:class:`Inbound <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Ipv4PacketFilter.Inbound>`
"""
_prefix = 'ip-pfilter-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Ipv4PacketFilter, self).__init__()
self.yang_name = "ipv4-packet-filter"
self.yang_parent_name = "subscriber-service"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("outbound", ("outbound", DynamicTemplate.SubscriberServices.SubscriberService.Ipv4PacketFilter.Outbound)), ("inbound", ("inbound", DynamicTemplate.SubscriberServices.SubscriberService.Ipv4PacketFilter.Inbound))])
self._leafs = OrderedDict()
self.outbound = DynamicTemplate.SubscriberServices.SubscriberService.Ipv4PacketFilter.Outbound()
self.outbound.parent = self
self._children_name_map["outbound"] = "outbound"
self.inbound = DynamicTemplate.SubscriberServices.SubscriberService.Ipv4PacketFilter.Inbound()
self.inbound.parent = self
self._children_name_map["inbound"] = "inbound"
self._segment_path = lambda: "Cisco-IOS-XR-ip-pfilter-subscriber-cfg:ipv4-packet-filter"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Ipv4PacketFilter, [], name, value)
class Outbound(_Entity_):
"""
IPv4 Packet filter to be applied to outbound
packets
.. attribute:: common_acl_name
Not supported (Leave unspecified)
**type**\: str
**length:** 1..64
.. attribute:: name
IPv4 Packet Filter Name to be applied to Outbound packets
**type**\: str
**length:** 1..64
.. attribute:: hardware_count
Not supported (Leave unspecified)
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: interface_statistics
Not supported (Leave unspecified)
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ip-pfilter-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Ipv4PacketFilter.Outbound, self).__init__()
self.yang_name = "outbound"
self.yang_parent_name = "ipv4-packet-filter"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('common_acl_name', (YLeaf(YType.str, 'common-acl-name'), ['str'])),
('name', (YLeaf(YType.str, 'name'), ['str'])),
('hardware_count', (YLeaf(YType.empty, 'hardware-count'), ['Empty'])),
('interface_statistics', (YLeaf(YType.empty, 'interface-statistics'), ['Empty'])),
])
self.common_acl_name = None
self.name = None
self.hardware_count = None
self.interface_statistics = None
self._segment_path = lambda: "outbound"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Ipv4PacketFilter.Outbound, ['common_acl_name', 'name', 'hardware_count', 'interface_statistics'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Ipv4PacketFilter.Outbound']['meta_info']
class Inbound(_Entity_):
"""
IPv4 Packet filter to be applied to inbound
packets
.. attribute:: common_acl_name
Not supported (Leave unspecified)
**type**\: str
**length:** 1..64
.. attribute:: name
IPv4 Packet Filter Name to be applied to Inbound packets NOTE\: This parameter is mandatory if 'CommonACLName' is not specified
**type**\: str
**length:** 1..64
.. attribute:: hardware_count
Not supported (Leave unspecified)
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: interface_statistics
Not supported (Leave unspecified)
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ip-pfilter-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Ipv4PacketFilter.Inbound, self).__init__()
self.yang_name = "inbound"
self.yang_parent_name = "ipv4-packet-filter"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('common_acl_name', (YLeaf(YType.str, 'common-acl-name'), ['str'])),
('name', (YLeaf(YType.str, 'name'), ['str'])),
('hardware_count', (YLeaf(YType.empty, 'hardware-count'), ['Empty'])),
('interface_statistics', (YLeaf(YType.empty, 'interface-statistics'), ['Empty'])),
])
self.common_acl_name = None
self.name = None
self.hardware_count = None
self.interface_statistics = None
self._segment_path = lambda: "inbound"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Ipv4PacketFilter.Inbound, ['common_acl_name', 'name', 'hardware_count', 'interface_statistics'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Ipv4PacketFilter.Inbound']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Ipv4PacketFilter']['meta_info']
class Ipv6PacketFilter(_Entity_):
"""
IPv6 Packet Filtering configuration for the
interface
.. attribute:: inbound
IPv6 Packet filter to be applied to inbound packets
**type**\: :py:class:`Inbound <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Ipv6PacketFilter.Inbound>`
.. attribute:: outbound
IPv6 Packet filter to be applied to outbound packets
**type**\: :py:class:`Outbound <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg.DynamicTemplate.SubscriberServices.SubscriberService.Ipv6PacketFilter.Outbound>`
"""
_prefix = 'ip-pfilter-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6PacketFilter, self).__init__()
self.yang_name = "ipv6-packet-filter"
self.yang_parent_name = "subscriber-service"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("inbound", ("inbound", DynamicTemplate.SubscriberServices.SubscriberService.Ipv6PacketFilter.Inbound)), ("outbound", ("outbound", DynamicTemplate.SubscriberServices.SubscriberService.Ipv6PacketFilter.Outbound))])
self._leafs = OrderedDict()
self.inbound = DynamicTemplate.SubscriberServices.SubscriberService.Ipv6PacketFilter.Inbound()
self.inbound.parent = self
self._children_name_map["inbound"] = "inbound"
self.outbound = DynamicTemplate.SubscriberServices.SubscriberService.Ipv6PacketFilter.Outbound()
self.outbound.parent = self
self._children_name_map["outbound"] = "outbound"
self._segment_path = lambda: "Cisco-IOS-XR-ip-pfilter-subscriber-cfg:ipv6-packet-filter"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6PacketFilter, [], name, value)
class Inbound(_Entity_):
"""
IPv6 Packet filter to be applied to inbound
packets
.. attribute:: common_acl_name
Not supported (Leave unspecified)
**type**\: str
**length:** 1..64
.. attribute:: name
IPv6 Packet Filter Name to be applied to Inbound NOTE\: This parameter is mandatory if 'CommonACLName' is not specified
**type**\: str
**length:** 1..64
.. attribute:: interface_statistics
Not supported (Leave unspecified)
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ip-pfilter-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6PacketFilter.Inbound, self).__init__()
self.yang_name = "inbound"
self.yang_parent_name = "ipv6-packet-filter"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('common_acl_name', (YLeaf(YType.str, 'common-acl-name'), ['str'])),
('name', (YLeaf(YType.str, 'name'), ['str'])),
('interface_statistics', (YLeaf(YType.empty, 'interface-statistics'), ['Empty'])),
])
self.common_acl_name = None
self.name = None
self.interface_statistics = None
self._segment_path = lambda: "inbound"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6PacketFilter.Inbound, ['common_acl_name', 'name', 'interface_statistics'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Ipv6PacketFilter.Inbound']['meta_info']
class Outbound(_Entity_):
"""
IPv6 Packet filter to be applied to outbound
packets
.. attribute:: common_acl_name
Not supported (Leave unspecified)
**type**\: str
**length:** 1..64
.. attribute:: name
IPv6 Packet Filter Name to be applied to Outbound packets
**type**\: str
**length:** 1..64
.. attribute:: interface_statistics
Not supported (Leave unspecified)
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ip-pfilter-subscriber-cfg'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6PacketFilter.Outbound, self).__init__()
self.yang_name = "outbound"
self.yang_parent_name = "ipv6-packet-filter"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('common_acl_name', (YLeaf(YType.str, 'common-acl-name'), ['str'])),
('name', (YLeaf(YType.str, 'name'), ['str'])),
('interface_statistics', (YLeaf(YType.empty, 'interface-statistics'), ['Empty'])),
])
self.common_acl_name = None
self.name = None
self.interface_statistics = None
self._segment_path = lambda: "outbound"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(DynamicTemplate.SubscriberServices.SubscriberService.Ipv6PacketFilter.Outbound, ['common_acl_name', 'name', 'interface_statistics'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Ipv6PacketFilter.Outbound']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService.Ipv6PacketFilter']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices.SubscriberService']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate.SubscriberServices']['meta_info']
def clone_ptr(self):
self._top_entity = DynamicTemplate()
return self._top_entity
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_infra_tmplmgr_cfg as meta
return meta._meta_table['DynamicTemplate']['meta_info']
| 48.555902 | 1,548 | 0.482114 |
09b376c0cfab7c53bd66e0801340dd810e6ddf84 | 8,163 | py | Python | Server/src/virtualenv/Lib/site-packages/pip/baseparser.py | ppyordanov/HCI_4_Future_Cities | 4dc7dc59acccf30357bde66524c2d64c29908de8 | [
"MIT"
] | null | null | null | Server/src/virtualenv/Lib/site-packages/pip/baseparser.py | ppyordanov/HCI_4_Future_Cities | 4dc7dc59acccf30357bde66524c2d64c29908de8 | [
"MIT"
] | null | null | null | Server/src/virtualenv/Lib/site-packages/pip/baseparser.py | ppyordanov/HCI_4_Future_Cities | 4dc7dc59acccf30357bde66524c2d64c29908de8 | [
"MIT"
] | null | null | null | """Base option parser setup"""
import sys
import optparse
import os
import textwrap
from distutils.util import strtobool
from pip.backwardcompat import ConfigParser, string_types
from pip.locations import default_config_file
from pip.util import get_terminal_size, get_prog
from pip._vendor import pkg_resources
class PrettyHelpFormatter(optparse.IndentedHelpFormatter):
"""A prettier/less verbose help formatter for optparse."""
def __init__(self, *args, **kwargs):
# help position must be aligned with __init__.parseopts.description
kwargs['max_help_position'] = 30
kwargs['indent_increment'] = 1
kwargs['width'] = get_terminal_size()[0] - 2
optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
def format_option_strings(self, option):
return self._format_option_strings(option, ' <%s>', ', ')
def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '):
"""
Return a comma-separated list of option strings and metavars.
:param option: tuple of (short opt, long opt), e.g: ('-f', '--format')
:param mvarfmt: metavar format string - evaluated as mvarfmt % metavar
:param optsep: separator
"""
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, optsep)
if option.takes_value():
metavar = option.metavar or option.dest.lower()
opts.append(mvarfmt % metavar.lower())
return ''.join(opts)
def format_heading(self, heading):
if heading == 'Options':
return ''
return heading + ':\n'
def format_usage(self, usage):
"""
Ensure there is only one newline between usage and the first heading
if there is no description.
"""
msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ")
return msg
def format_description(self, description):
# leave full control over description to us
if description:
if hasattr(self.parser, 'main'):
label = 'Commands'
else:
label = 'Description'
# some doc strings have inital newlines, some don't
description = description.lstrip('\n')
#some doc strings have final newlines and spaces, some don't
description = description.rstrip()
#dedent, then reindent
description = self.indent_lines(textwrap.dedent(description), " ")
description = '%s:\n%s\n' % (label, description)
return description
else:
return ''
def format_epilog(self, epilog):
# leave full control over epilog to us
if epilog:
return epilog
else:
return ''
def indent_lines(self, text, indent):
new_lines = [indent + line for line in text.split('\n')]
return "\n".join(new_lines)
class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):
"""Custom help formatter for use in ConfigOptionParser that updates
the defaults before expanding them, allowing them to show up correctly
in the help listing"""
def expand_default(self, option):
if self.parser is not None:
self.parser.update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class CustomOptionParser(optparse.OptionParser):
def insert_option_group(self, idx, *args, **kwargs):
"""Insert an OptionGroup at a given position."""
group = self.add_option_group(*args, **kwargs)
self.option_groups.pop()
self.option_groups.insert(idx, group)
return group
@property
def option_list_all(self):
"""Get a list of all options, including those in option groups."""
res = self.option_list[:]
for i in self.option_groups:
res.extend(i.option_list)
return res
class ConfigOptionParser(CustomOptionParser):
"""Custom option parser which updates its defaults by checking the
configuration files and environmental variables"""
def __init__(self, *args, **kwargs):
self.config = ConfigParser.RawConfigParser()
self.name = kwargs.pop('name')
self.files = self.get_config_files()
if self.files:
self.config.read(self.files)
assert self.name
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
config_file = os.environ.get('PIP_CONFIG_FILE', False)
if config_file == os.devnull:
return []
if config_file and os.path.exists(config_file):
return [config_file]
return [default_config_file]
def check_default(self, option, key, val):
try:
return option.check_value(key, val)
except optparse.OptionValueError:
e = sys.exc_info()[1]
print("An error occurred during configuration: %s" % e)
sys.exit(3)
def update_defaults(self, defaults):
"""Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists)."""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
for section in ('global', self.name):
config.update(self.normalize_keys(self.get_config_section(section)))
# 2. environmental variables
config.update(self.normalize_keys(self.get_environ_vars()))
# Then set the options with those values
for key, val in config.items():
option = self.get_option(key)
if option is not None:
# ignore empty values
if not val:
continue
if option.action in ('store_true', 'store_false', 'count'):
val = strtobool(val)
if option.action == 'append':
val = val.split()
val = [self.check_default(option, key, v) for v in val]
else:
val = self.check_default(option, key, val)
defaults[option.dest] = val
return defaults
def normalize_keys(self, items):
"""Return a config dictionary with normalized keys regardless of
whether the keys were specified in environment variables or in config
files"""
normalized = {}
for key, val in items:
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
normalized[key] = val
return normalized
def get_config_section(self, name):
"""Get a section of a configuration"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self, prefix='PIP_'):
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if key.startswith(prefix):
yield (key.replace(prefix, '').lower(), val)
def get_default_values(self):
"""Overridding to make updating the defaults after instantiation of
the option parser possible, update_defaults() does the dirty work."""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self.update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, string_types):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def error(self, msg):
self.print_usage(sys.stderr)
self.exit(2, "%s\n" % msg)
| 36.28 | 80 | 0.613377 |
05d0ec892eb7d88735b0b0a53269d21b0b13d08f | 451 | py | Python | Program_Python_code/19.py | skyhigh8591/Learning_Test_Program | 5f3c0f11874618919002126863772e0dd06a1072 | [
"MIT"
] | null | null | null | Program_Python_code/19.py | skyhigh8591/Learning_Test_Program | 5f3c0f11874618919002126863772e0dd06a1072 | [
"MIT"
] | null | null | null | Program_Python_code/19.py | skyhigh8591/Learning_Test_Program | 5f3c0f11874618919002126863772e0dd06a1072 | [
"MIT"
] | null | null | null | #! /usr/bin/python
#coding=utf-8
for letter in "python":
print "current letter",letter
fruits = ["banana","apple","mango"]
for fruit in fruits:
print "current letter",fruit
for index in range(len(fruits)):
print index
print "current letter",fruit[index]
#########################
for num in range(1,5):
print num
print "1~10,num=num+2"
for num in range(1,11,2):
print num
print "1~10,num=num-2"
for num in range(1,11,-2):
print num | 15.033333 | 36 | 0.640798 |
ab6785b57f8bd8c38eac5bd9e3cadde88ca87dec | 4,163 | py | Python | functions/tf_utils/ConvLSTMCell.py | hsokooti/RegNet | 28a8b6132677bb58e9fc811c0dd15d78913c7e86 | [
"Apache-2.0"
] | 187 | 2018-01-08T11:37:12.000Z | 2022-03-03T06:26:43.000Z | functions/tf_utils/ConvLSTMCell.py | hsokooti/RegNet | 28a8b6132677bb58e9fc811c0dd15d78913c7e86 | [
"Apache-2.0"
] | 15 | 2018-01-27T07:12:41.000Z | 2021-08-22T12:37:30.000Z | functions/tf_utils/ConvLSTMCell.py | hsokooti/RegNet | 28a8b6132677bb58e9fc811c0dd15d78913c7e86 | [
"Apache-2.0"
] | 60 | 2018-01-09T13:00:32.000Z | 2021-06-15T03:02:36.000Z | import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
from tensorflow.python.ops import init_ops
from tensorflow.python.util import nest
import collections
class ConvLSTMCell(object):
""" Convolutional LSTM network cell (ConvLSTMCell).
The implementation is based on http://arxiv.org/abs/1506.04214.
and `BasicLSTMCell` in TensorFlow.
2D version: https://github.com/iwyoo/ConvLSTMCell-tensorflow
"""
def __init__(self, hidden_num, filter_size=None,
forget_bias=1.0, activation=tanh, name="ConvLSTMCell"):
if filter_size is None:
filter_size = [3, 3, 3]
self.hidden_num = hidden_num
self.filter_size = filter_size
self.forget_bias = forget_bias
self.activation = activation
self.name = name
def zero_state(self, batch_size, height, width, depth):
# return tf.zeros_like(tf.placeholder(tf.float32, shape=[batch_size, height, width, depth, self.hidden_num * 2]))
# A = tf.tile(tf.expand_dims(input_tensor, axis=5), [1, 1, 1, 1, 1, self.hidden_num * 2])
# return tf.zeros_like(tf.placeholder(tf.float32, shape=[batch_size, height, width, depth, self.hidden_num * 2]))
# return tf.keras.backend.zeros(shape=[batch_size, height, width, depth, self.hidden_num * 2])
return tf.zeros([batch_size, height, width, depth, self.hidden_num * 2])
def __call__(self, inputs, state, scope=None):
"""Convolutional Long short-term memory cell (ConvLSTM)."""
with vs.variable_scope(scope or self.name): # "ConvLSTMCell"
# c, h = array_ops.split(3, 2, state) original one
c, h = array_ops.split(state, 2, axis=4) # Hessam
# batch_size * height * width * channel
concat = _conv([inputs, h], 4 * self.hidden_num, self.filter_size)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(concat, 4, axis=4)
new_c = (c * sigmoid(f + self.forget_bias) + sigmoid(i) *
self.activation(j))
new_h = self.activation(new_c) * sigmoid(o)
new_state = array_ops.concat([new_c, new_h], axis=4)
return new_h, new_state
def _conv(args, output_size, filter_size, stddev=0.001, bias=True, bias_start=0.0, scope=None):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 3.
# (batch_size x height x width x arg_size)
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
height = shapes[0][1]
width = shapes[0][2]
depth = shapes[0][3]
for shape in shapes:
if len(shape) != 5:
raise ValueError("Conv is expecting 4D arguments: %s" % str(shapes))
if not shape[4]:
raise ValueError("Conv expects shape[4] of arguments: %s" % str(shapes))
if shape[1] == height and shape[2] == width and shape[3] == depth:
total_arg_size += shape[4]
else:
raise ValueError("Inconsistent height and width size in arguments: %s" % str(shapes))
with vs.variable_scope(scope or "Conv"):
kernel = vs.get_variable("Kernel",
[filter_size[0], filter_size[1], filter_size[2], total_arg_size, output_size],
initializer=init_ops.truncated_normal_initializer(stddev=stddev), trainable=True)
if len(args) == 1:
res = tf.nn.conv3d(args[0], kernel, [1, 1, 1, 1, 1], padding='SAME')
else:
res = tf.nn.conv3d(array_ops.concat(args, 4), kernel, [1, 1, 1, 1, 1], padding='SAME')
if not bias: return res
bias_term = vs.get_variable("Bias", [output_size],
initializer=init_ops.constant_initializer(bias_start))
return res + bias_term
| 42.479592 | 121 | 0.630074 |
0e5b0c452a781a8a628b4658f26c599909825f52 | 16,282 | py | Python | tests/python/kaolin/ops/spc/test_conv.py | priyasundaresan/kaolin | ddae34ba5f09bffc4368c29bc50491c5ece797d4 | [
"ECL-2.0",
"Apache-2.0"
] | 3,747 | 2019-11-13T02:18:16.000Z | 2022-03-31T21:12:31.000Z | tests/python/kaolin/ops/spc/test_conv.py | priyasundaresan/kaolin | ddae34ba5f09bffc4368c29bc50491c5ece797d4 | [
"ECL-2.0",
"Apache-2.0"
] | 371 | 2019-11-13T14:50:59.000Z | 2022-03-22T19:40:06.000Z | tests/python/kaolin/ops/spc/test_conv.py | priyasundaresan/kaolin | ddae34ba5f09bffc4368c29bc50491c5ece797d4 | [
"ECL-2.0",
"Apache-2.0"
] | 482 | 2019-11-13T05:04:38.000Z | 2022-03-31T10:20:26.000Z | # Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import pytest
import os
from itertools import product
import torch
from kaolin.ops.spc.uint8 import bits_to_uint8, uint8_bits_sum, uint8_to_bits
from kaolin.ops.random import random_spc_octrees
from kaolin.rep import Spc
from kaolin.ops import spc
from kaolin.utils.testing import FLOAT_TYPES, with_seed, check_tensor
os.environ['NVIDIA_TF32_OVERRIDE'] = '0'
@pytest.mark.parametrize('batch_size', [1, 3])
@pytest.mark.parametrize('height,width,depth,threshold',
[(27, 37, 37, 0.7), (64, 64, 64, 0.)])
@pytest.mark.parametrize('in_channels', [1, 5])
@pytest.mark.parametrize('out_channels', [1, 7])
@pytest.mark.parametrize('kernel_size,kernel_offset', [(1, 0), (2, 0), (3, 0), (3, 1), (4, 0), (5, 0), (5, 2)])
@pytest.mark.parametrize('with_bias', [False, True])
class TestConv3D:
@pytest.fixture(autouse=True)
def sparsity_masks(self, batch_size, height, width, depth, threshold):
return torch.rand(batch_size, height, width, depth,
device='cuda') > threshold
@pytest.fixture(autouse=True)
def feature_grids(self, sparsity_masks, batch_size, in_channels, height, width, depth):
return torch.rand(batch_size, in_channels, height, width, depth,
device='cuda') * sparsity_masks.unsqueeze(1)
@pytest.fixture(autouse=True)
def kernel_vectors(self, kernel_size, kernel_offset):
return torch.tensor(
list(product(range(-kernel_offset, kernel_size - kernel_offset), repeat=3)),
dtype=torch.int16, device='cuda')
@pytest.fixture(autouse=True)
def dense_weight(self, in_channels, out_channels, kernel_size):
return torch.rand(out_channels, in_channels,
kernel_size, kernel_size, kernel_size,
device='cuda')
@pytest.fixture(autouse=True)
def spc_weight(self, dense_weight, in_channels, out_channels):
return dense_weight.reshape(out_channels, in_channels, -1).permute(2, 1, 0)
@pytest.fixture(autouse=True)
def bias(self, with_bias, out_channels):
if with_bias:
return torch.rand(out_channels, device='cuda')
else:
return None
@pytest.fixture(autouse=True)
def octrees_lengths_features(self, feature_grids, sparsity_masks):
return spc.feature_grids_to_spc(feature_grids, sparsity_masks)
@pytest.fixture(autouse=True)
def octrees(self, octrees_lengths_features):
return octrees_lengths_features[0]
@pytest.fixture(autouse=True)
def lengths(self, octrees_lengths_features):
return octrees_lengths_features[1]
@pytest.fixture(autouse=True)
def coalescent_features(self, octrees_lengths_features):
return octrees_lengths_features[2]
@pytest.fixture(autouse=True)
def max_level_pyramids_exsum(self, octrees, lengths):
return spc.scan_octrees(octrees, lengths)
@pytest.fixture(autouse=True)
def max_level(self, max_level_pyramids_exsum):
return max_level_pyramids_exsum[0]
@pytest.fixture(autouse=True)
def pyramids(self, max_level_pyramids_exsum):
return max_level_pyramids_exsum[1]
@pytest.fixture(autouse=True)
def exsum(self, max_level_pyramids_exsum):
return max_level_pyramids_exsum[2]
@pytest.fixture(autouse=True)
def point_hierarchies(self, octrees, pyramids, exsum):
return spc.generate_points(octrees, pyramids, exsum)
@pytest.mark.parametrize('with_spc_to_dict', [False, True])
@pytest.mark.parametrize('jump', [0, 1, 2])
def test_conv3d(self, height, width, depth, in_channels, out_channels, kernel_size,
feature_grids, sparsity_masks, dense_weight, bias,
octrees, lengths, coalescent_features, max_level,
pyramids, exsum, point_hierarchies,
kernel_vectors, kernel_offset, spc_weight, jump, with_spc_to_dict):
stride = 2 ** jump
coalescent_features = coalescent_features.detach()
coalescent_features.requires_grad = True
spc_weight = spc_weight.detach()
spc_weight.requires_grad = True
if with_spc_to_dict:
input_spc = Spc(octrees, lengths)
output_features, output_level = spc.conv3d(
**input_spc.to_dict(), level=input_spc.max_level, input=coalescent_features,
weight=spc_weight, kernel_vectors=kernel_vectors, jump=jump, bias=bias)
output = spc.to_dense(**input_spc.to_dict(), input=output_features,
level=output_level)
output_sparsity_masks = spc.to_dense(
**input_spc.to_dict(),
input=torch.ones_like(output_features, requires_grad=False),
level=output_level)
else:
output_features, output_level = spc.conv3d(
octrees, point_hierarchies, max_level, pyramids, exsum, coalescent_features,
spc_weight, kernel_vectors, jump=jump, bias=bias)
output = spc.to_dense(point_hierarchies, pyramids, output_features, output_level)
output_sparsity_masks = spc.to_dense(
point_hierarchies, pyramids, torch.ones_like(output_features, requires_grad=False),
output_level)
feature_grids = feature_grids.detach()
feature_grids.requires_grad = True
dense_weight = dense_weight.detach()
dense_weight.requires_grad = True
padded_input = torch.nn.functional.pad(feature_grids,
(kernel_offset, kernel_size - 1 - kernel_offset,
kernel_offset, kernel_size - 1 - kernel_offset,
kernel_offset, kernel_size - 1 - kernel_offset))
expected_output = torch.nn.functional.conv3d(padded_input, dense_weight, stride=stride, bias=bias)
expected_height, expected_width, expected_depth = expected_output.shape[2:]
expected_output *= output_sparsity_masks[:, :, :expected_height, :expected_width, :expected_depth]
assert torch.allclose(output[:, :, :expected_height, :expected_width, :expected_depth],
expected_output)
grad_output = torch.rand_like(output)
output.backward(grad_output)
expected_output.backward(grad_output[:, :, :expected_height, :expected_width, :expected_depth])
_, _, sparsified_grad = spc.feature_grids_to_spc(feature_grids.grad, sparsity_masks)
assert torch.allclose(coalescent_features.grad, sparsified_grad)
assert torch.allclose(spc_weight.grad,
dense_weight.grad.reshape(out_channels, in_channels, -1).permute(2, 1, 0),
rtol=1e-3, atol=1e-3)
@pytest.mark.parametrize('with_spc_to_dict', [False, True])
@pytest.mark.parametrize('jump', [0, 1, 2])
def test_conv_transpose3d(self, height, width, depth, in_channels, out_channels,
sparsity_masks, dense_weight, bias,
octrees, lengths, max_level, pyramids, exsum, point_hierarchies,
kernel_vectors, kernel_size, kernel_offset, spc_weight, jump,
with_spc_to_dict):
stride = 2 ** jump
if stride > kernel_size:
pytest.skip('stride higher than kernel_size is not tested')
out_sparsity_masks = sparsity_masks
in_level = max_level - jump
in_num_nodes = torch.sum(pyramids[:, 0, -(2 + jump)])
coalescent_features = torch.rand((in_num_nodes, in_channels), device='cuda',
requires_grad=True)
dense_weight = dense_weight.detach()
dense_weight.requires_grad = True
spc_weight = spc_weight.detach()
spc_weight.requires_grad = True
if with_spc_to_dict:
input_spc = Spc(octrees, lengths)
feature_grids = spc.to_dense(**input_spc.to_dict(), input=coalescent_features,
level=in_level)
else:
feature_grids = spc.to_dense(point_hierarchies, pyramids, coalescent_features, in_level)
feature_grids = feature_grids[:, :, :math.ceil(height / stride),
:math.ceil(width / stride), :math.ceil(depth / stride)]
feature_grids = feature_grids.detach()
feature_grids.requires_grad = True
if with_spc_to_dict:
sparsity_masks = spc.to_dense(
**input_spc.to_dict(), input=torch.ones_like(coalescent_features),
level=in_level).bool()
else:
sparsity_masks = spc.to_dense(point_hierarchies, pyramids,
torch.ones_like(coalescent_features),
in_level).bool()
sparsity_masks = sparsity_masks[:, 0, :math.ceil(height / stride),
:math.ceil(width / stride), :math.ceil(depth / stride)]
# test forward
if with_spc_to_dict:
output_features, output_level = spc.conv_transpose3d(
**input_spc.to_dict(), level=in_level, input=coalescent_features,
weight=spc_weight, kernel_vectors=kernel_vectors, jump=jump, bias=bias)
output = spc.to_dense(**input_spc.to_dict(), input=output_features, level=output_level)
else:
output_features, output_level = spc.conv_transpose3d(
octrees, point_hierarchies, in_level, pyramids, exsum,
coalescent_features,
spc_weight, kernel_vectors, jump=jump, bias=bias)
output = spc.to_dense(point_hierarchies, pyramids, output_features, output_level)
output = output[:, :, :height, :width, :depth]
expected_output = torch.nn.functional.conv_transpose3d(
feature_grids, dense_weight.permute(1, 0, 2, 3, 4),
stride=stride, bias=bias,
output_padding=stride - 1)[:, :,
kernel_offset:height + kernel_offset,
kernel_offset:width + kernel_offset,
kernel_offset:depth + kernel_offset]
expected_output *= out_sparsity_masks.unsqueeze(1)
assert output_level == max_level
assert torch.allclose(output, expected_output)
# test backward
grad_out = torch.rand_like(expected_output)
expected_output.backward(grad_out)
output.backward(grad_out)
_, _, sparsified_grad = spc.feature_grids_to_spc(feature_grids.grad, sparsity_masks)
assert torch.allclose(coalescent_features.grad, sparsified_grad)
assert torch.allclose(spc_weight.grad,
dense_weight.grad.reshape(out_channels, in_channels, -1).permute(2, 1, 0),
rtol=1e-3, atol=1e-3)
@pytest.mark.parametrize('with_spc_to_dict', [False, True])
@pytest.mark.parametrize('jump', [0, 1, 2])
def test_module_conv3d(self, height, width, depth, in_channels, out_channels, with_bias,
octrees, lengths, coalescent_features, max_level, pyramids, exsum,
point_hierarchies, kernel_vectors, jump, with_spc_to_dict):
conv = spc.Conv3d(in_channels, out_channels, kernel_vectors,
jump, bias=with_bias).cuda()
params = dict(conv.named_parameters())
weight = params['weight']
check_tensor(weight, shape=(kernel_vectors.shape[0],
in_channels, out_channels),
dtype=torch.float, device='cuda')
if with_bias:
assert len(params) == 2
bias = params['bias']
check_tensor(bias, shape=(out_channels,), dtype=torch.float,
device='cuda')
else:
assert len(params) == 1
bias = None
buffers = dict(conv.named_buffers())
assert len(buffers) == 1
assert torch.equal(buffers['kernel_vectors'], kernel_vectors)
assert repr(conv) == f'Conv3d(in={in_channels}, out={out_channels}, ' \
f'kernel_vector_size={kernel_vectors.shape[0]})'
if with_spc_to_dict:
input_spc = Spc(octrees, lengths)
output, output_level = conv(**input_spc.to_dict(), level=max_level,
input=coalescent_features)
else:
output, output_level = conv(
octrees, point_hierarchies, max_level, pyramids, exsum,
coalescent_features)
expected_output, expected_output_level = spc.conv3d(
octrees, point_hierarchies, max_level, pyramids, exsum, coalescent_features,
weight, kernel_vectors, jump=jump, bias=bias)
assert torch.equal(output, expected_output)
assert output_level == expected_output_level
@pytest.mark.parametrize('with_spc_to_dict', [False, True])
@pytest.mark.parametrize('jump', [0, 1, 2])
def test_module_conv_transpose3d(self, height, width, depth, in_channels, out_channels, with_bias,
octrees, lengths, max_level, pyramids, exsum, point_hierarchies,
kernel_size, kernel_vectors, jump, with_spc_to_dict):
stride = 2 ** jump
if stride > kernel_size:
pytest.skip('stride higher than kernel_size is not tested')
in_level = max_level - jump
in_num_nodes = torch.sum(pyramids[:, 0, -(2 + jump)])
coalescent_features = torch.rand((in_num_nodes, in_channels), device='cuda',
requires_grad=True)
conv = spc.ConvTranspose3d(in_channels, out_channels, kernel_vectors,
jump, bias=with_bias).cuda()
params = dict(conv.named_parameters())
weight = params['weight']
check_tensor(weight, shape=(kernel_vectors.shape[0],
in_channels, out_channels),
dtype=torch.float, device='cuda')
if with_bias:
assert len(params) == 2
bias = params['bias']
check_tensor(bias, shape=(out_channels,), dtype=torch.float,
device='cuda')
else:
assert len(params) == 1
bias = None
buffers = dict(conv.named_buffers())
assert len(buffers) == 1
assert torch.equal(buffers['kernel_vectors'], kernel_vectors)
assert repr(conv) == f'ConvTranspose3d(in={in_channels}, ' \
f'out={out_channels}, ' \
f'kernel_vector_size={kernel_vectors.shape[0]})'
if with_spc_to_dict:
input_spc = Spc(octrees, lengths)
output, output_level = conv(**input_spc.to_dict(), level=in_level,
input=coalescent_features)
else:
output, output_level = conv(
octrees, point_hierarchies, in_level, pyramids, exsum,
coalescent_features)
expected_output, expected_output_level = spc.conv_transpose3d(
octrees, point_hierarchies, in_level, pyramids, exsum, coalescent_features,
weight, kernel_vectors, jump=jump, bias=bias)
assert torch.equal(output, expected_output)
assert output_level == expected_output_level
| 47.608187 | 111 | 0.622774 |
56e204768555680b36be83c2e67205a958fcfff9 | 21,930 | py | Python | electrum_ltc/tests/test_blockchain.py | Alcofribas4/electrum-ltc-satochip | e78eba47654d96b3d445c1ecad04de6a74bce153 | [
"MIT"
] | null | null | null | electrum_ltc/tests/test_blockchain.py | Alcofribas4/electrum-ltc-satochip | e78eba47654d96b3d445c1ecad04de6a74bce153 | [
"MIT"
] | 1 | 2021-06-02T00:17:09.000Z | 2021-06-02T00:17:09.000Z | electrum_ltc/tests/test_blockchain.py | geekfil/electrum-ltc | 054347e6d6c03d823d361469772726a68ff459be | [
"MIT"
] | null | null | null | import shutil
import tempfile
import os
from electrum_ltc import constants, blockchain
from electrum_ltc.simple_config import SimpleConfig
from electrum_ltc.blockchain import Blockchain, deserialize_header, hash_header
from electrum_ltc.util import bh2u, bfh, make_dir
from . import SequentialTestCase
class TestBlockchain(SequentialTestCase):
HEADERS = {
'A': deserialize_header(bfh("010000000000000000000000000000000000000000000000000000000000000000000000d9ced4ed1130f7b7faad9be25323ffafa33232a17c3edf6cfd97bee6bafbdd97dae5494dffff7f2000000000"), 0),
'B': deserialize_header(bfh("00000020f916c456fc51df627885d7d674ed02dc88a225adb3f02ad13eb4938ff3270853186c8dfd970a4545f79916bc1d75c9d00432f57c89209bf3bb115b7612848f509c25f45bffff7f2005000000"), 1),
'C': deserialize_header(bfh("00000020e9078937b6b92a74120d9a8876475a6e97227e59b54cf05f87e24eb8b0a7199bbf2cbf153013a1c54abaf70e95198fcef2f3059cc6b4d0f7e876808e7d24d11cc825f45bffff7f2000000000"), 2),
'D': deserialize_header(bfh("0000002081e2f5ea4e64d6370c6334a78dc8c128bbc3388ae5be3ec434b61d19b2b26903e71019d7feecd9b8596eca9a67032c5f4641b23b5d731dc393e37de7f9c2f299e725f45bffff7f2001000000"), 3),
'E': deserialize_header(bfh("00000020c7c8ca692fade08a253136051e07c62bb0d76af97aa47945bd28335360e91338a3586da94c71753f27c075f57f44faf913c31177a0957bbda42e7699e3a2141aed25f45bffff7f2001000000"), 4),
'F': deserialize_header(bfh("00000020c8e83c4c4dc2a38820e8c330eda47aa84eb82722ce1e3a649b8b202501db40bc7aee1d692d1615c3bdf52c291032144ce9e3b258a473c17c745047f3431ff8e2ee25f45bffff7f2000000000"), 5),
'O': deserialize_header(bfh("000000209acbe22912d4a4e67a39d7779f04549c724be5f8e081955cce786290081a79903a141ce635cbb1cd2b3a4fcdd0a3380517845ba41736c82a79cab535d31128066526f45bffff7f2001000000"), 6),
'P': deserialize_header(bfh("0000002018cca0f1541812329cec7f75e7c13922a5b9976801a320b0d8174846a6285aa09690c2fe7c1a4450c74dc908fe94dd96c3b0637d51475e9e06a78e944a0c7fe28126f45bffff7f2002000000"), 7),
'Q': deserialize_header(bfh("000000202fb59385b4e743696bffaa4cf2338202822e446db933ae456b924660d6f69b78148be228a4c3f2061bafe7efdfc4a8d5a94759464b9b5c619994d45dfcaf49e1a126f45bffff7f2002000000"), 8),
'R': deserialize_header(bfh("00000020778597da18ab4664f4543c8b27d601aec685073ffeccfb2d7950088602a1f17a15681cb2d00ff889193f6a68a93f5096aeb2d84ca0af6185a462555822552221a626f45bffff7f2001000000"), 9),
'S': deserialize_header(bfh("00000020f69aceedf7013f73fe9d508d1e4df9d89700e18b07a2ea1fa8fd19367a07d2af9dc087fc977b06c24a69c682d1afd1020e6dc1f087571ccec66310a786e1548fab26f45bffff7f2000000000"), 10),
'T': deserialize_header(bfh("0000002042a4bf62d587d353871034d5128c7ef12479012586bd535d159e1d0b5d3e387f03b243756c25053253aeda309604363460a3911015929e68705bd89dff6fe064b026f45bffff7f2000000000"), 11),
'U': deserialize_header(bfh("0000002034f706a01b82ea66aa869a887bf25bbed0dfc0f0f3840994446f1e4fd8f58f7dd67cb902a7d807cee7676cb543feec3e053aa824d5dfb528d5b94f9760313d9db726f45bffff7f2001000000"), 12),
'G': deserialize_header(bfh("000000209acbe22912d4a4e67a39d7779f04549c724be5f8e081955cce786290081a79903a141ce635cbb1cd2b3a4fcdd0a3380517845ba41736c82a79cab535d31128066928f45bffff7f2001000000"), 6),
'H': deserialize_header(bfh("000000205b976fbe6fccb4c67de1a081747bb888a0cb486b06d0203f76b9b3916cf46d839690c2fe7c1a4450c74dc908fe94dd96c3b0637d51475e9e06a78e944a0c7fe26a28f45bffff7f2000000000"), 7),
'I': deserialize_header(bfh("000000206c767e525915ac216be783dbc4554ac569a121ccc4c5dac8abe521dae7eac670148be228a4c3f2061bafe7efdfc4a8d5a94759464b9b5c619994d45dfcaf49e16a28f45bffff7f2000000000"), 8),
'J': deserialize_header(bfh("00000020bfa64ff6b96eb438d24c32f2ca27a96d8e20b23671577dce2b37b3a815e9739615681cb2d00ff889193f6a68a93f5096aeb2d84ca0af6185a462555822552221c928f45bffff7f2000000000"), 9),
'K': deserialize_header(bfh("00000020b9e0539dedc1177c8f0cb6c90b6afa6953a67e92932cb9852529bd211a9ec4599dc087fc977b06c24a69c682d1afd1020e6dc1f087571ccec66310a786e1548fca28f45bffff7f2000000000"), 10),
'L': deserialize_header(bfh("000000206ac59045b5e3b8ec016cb5a56780c0346fb79454b62e95a63c426fb16bb01dc503b243756c25053253aeda309604363460a3911015929e68705bd89dff6fe064ca28f45bffff7f2000000000"), 11),
'M': deserialize_header(bfh("00000020bfa64ff6b96eb438d24c32f2ca27a96d8e20b23671577dce2b37b3a815e9739615681cb2d00ff889193f6a68a93f5096aeb2d84ca0af6185a4625558225522214229f45bffff7f2000000000"), 9),
'N': deserialize_header(bfh("000000208a469366884904d3f6b51dc44098335404dbe7092f1dc824bcd8608c122b8e299dc087fc977b06c24a69c682d1afd1020e6dc1f087571ccec66310a786e1548f4329f45bffff7f2001000000"), 10),
'X': deserialize_header(bfh("00000020b381f50227543a4feea529064fbb654fd3ce9f251c978ee4168cd3c9f41068cb03b243756c25053253aeda309604363460a3911015929e68705bd89dff6fe0649b29f45bffff7f2001000000"), 11),
'Y': deserialize_header(bfh("00000020b2c2c09de3206a17c4fd5ec3f7e1e4b4c339f1df94e1498be161ca15df0b6ca4d67cb902a7d807cee7676cb543feec3e053aa824d5dfb528d5b94f9760313d9d9b29f45bffff7f2004000000"), 12),
'Z': deserialize_header(bfh("000000202c5fda8478f58b64cdd57b405929b423158c4913374ae1645c56093aad15febb0f2596c29203f8a0f71ae94193092dc8f113be3dbee4579f1e649fa3d6dcc38c622ef45bffff7f2000000000"), 13),
}
# tree of headers:
# - M <- N <- X <- Y <- Z
# /
# - G <- H <- I <- J <- K <- L
# /
# A <- B <- C <- D <- E <- F <- O <- P <- Q <- R <- S <- T <- U
@classmethod
def setUpClass(cls):
super().setUpClass()
constants.set_regtest()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
constants.set_mainnet()
def setUp(self):
super().setUp()
self.data_dir = tempfile.mkdtemp()
make_dir(os.path.join(self.data_dir, 'forks'))
self.config = SimpleConfig({'electrum_path': self.data_dir})
blockchain.blockchains = {}
def tearDown(self):
super().tearDown()
shutil.rmtree(self.data_dir)
def _append_header(self, chain: Blockchain, header: dict):
self.assertTrue(chain.can_connect(header))
chain.save_header(header)
def test_get_height_of_last_common_block_with_chain(self):
blockchain.blockchains[constants.net.GENESIS] = chain_u = Blockchain(
config=self.config, forkpoint=0, parent=None,
forkpoint_hash=constants.net.GENESIS, prev_hash=None)
open(chain_u.path(), 'w+').close()
self._append_header(chain_u, self.HEADERS['A'])
self._append_header(chain_u, self.HEADERS['B'])
self._append_header(chain_u, self.HEADERS['C'])
self._append_header(chain_u, self.HEADERS['D'])
self._append_header(chain_u, self.HEADERS['E'])
self._append_header(chain_u, self.HEADERS['F'])
self._append_header(chain_u, self.HEADERS['O'])
self._append_header(chain_u, self.HEADERS['P'])
self._append_header(chain_u, self.HEADERS['Q'])
chain_l = chain_u.fork(self.HEADERS['G'])
self._append_header(chain_l, self.HEADERS['H'])
self._append_header(chain_l, self.HEADERS['I'])
self._append_header(chain_l, self.HEADERS['J'])
self._append_header(chain_l, self.HEADERS['K'])
self._append_header(chain_l, self.HEADERS['L'])
self.assertEqual({chain_u: 8, chain_l: 5}, chain_u.get_parent_heights())
self.assertEqual({chain_l: 11}, chain_l.get_parent_heights())
chain_z = chain_l.fork(self.HEADERS['M'])
self._append_header(chain_z, self.HEADERS['N'])
self._append_header(chain_z, self.HEADERS['X'])
self._append_header(chain_z, self.HEADERS['Y'])
self._append_header(chain_z, self.HEADERS['Z'])
self.assertEqual({chain_u: 8, chain_z: 5}, chain_u.get_parent_heights())
self.assertEqual({chain_l: 11, chain_z: 8}, chain_l.get_parent_heights())
self.assertEqual({chain_z: 13}, chain_z.get_parent_heights())
self.assertEqual(5, chain_u.get_height_of_last_common_block_with_chain(chain_l))
self.assertEqual(5, chain_l.get_height_of_last_common_block_with_chain(chain_u))
self.assertEqual(5, chain_u.get_height_of_last_common_block_with_chain(chain_z))
self.assertEqual(5, chain_z.get_height_of_last_common_block_with_chain(chain_u))
self.assertEqual(8, chain_l.get_height_of_last_common_block_with_chain(chain_z))
self.assertEqual(8, chain_z.get_height_of_last_common_block_with_chain(chain_l))
self._append_header(chain_u, self.HEADERS['R'])
self._append_header(chain_u, self.HEADERS['S'])
self._append_header(chain_u, self.HEADERS['T'])
self._append_header(chain_u, self.HEADERS['U'])
self.assertEqual({chain_u: 12, chain_z: 5}, chain_u.get_parent_heights())
self.assertEqual({chain_l: 11, chain_z: 8}, chain_l.get_parent_heights())
self.assertEqual({chain_z: 13}, chain_z.get_parent_heights())
self.assertEqual(5, chain_u.get_height_of_last_common_block_with_chain(chain_l))
self.assertEqual(5, chain_l.get_height_of_last_common_block_with_chain(chain_u))
self.assertEqual(5, chain_u.get_height_of_last_common_block_with_chain(chain_z))
self.assertEqual(5, chain_z.get_height_of_last_common_block_with_chain(chain_u))
self.assertEqual(8, chain_l.get_height_of_last_common_block_with_chain(chain_z))
self.assertEqual(8, chain_z.get_height_of_last_common_block_with_chain(chain_l))
def test_parents_after_forking(self):
blockchain.blockchains[constants.net.GENESIS] = chain_u = Blockchain(
config=self.config, forkpoint=0, parent=None,
forkpoint_hash=constants.net.GENESIS, prev_hash=None)
open(chain_u.path(), 'w+').close()
self._append_header(chain_u, self.HEADERS['A'])
self._append_header(chain_u, self.HEADERS['B'])
self._append_header(chain_u, self.HEADERS['C'])
self._append_header(chain_u, self.HEADERS['D'])
self._append_header(chain_u, self.HEADERS['E'])
self._append_header(chain_u, self.HEADERS['F'])
self._append_header(chain_u, self.HEADERS['O'])
self._append_header(chain_u, self.HEADERS['P'])
self._append_header(chain_u, self.HEADERS['Q'])
self.assertEqual(None, chain_u.parent)
chain_l = chain_u.fork(self.HEADERS['G'])
self._append_header(chain_l, self.HEADERS['H'])
self._append_header(chain_l, self.HEADERS['I'])
self._append_header(chain_l, self.HEADERS['J'])
self._append_header(chain_l, self.HEADERS['K'])
self._append_header(chain_l, self.HEADERS['L'])
self.assertEqual(None, chain_l.parent)
self.assertEqual(chain_l, chain_u.parent)
chain_z = chain_l.fork(self.HEADERS['M'])
self._append_header(chain_z, self.HEADERS['N'])
self._append_header(chain_z, self.HEADERS['X'])
self._append_header(chain_z, self.HEADERS['Y'])
self._append_header(chain_z, self.HEADERS['Z'])
self.assertEqual(chain_z, chain_u.parent)
self.assertEqual(chain_z, chain_l.parent)
self.assertEqual(None, chain_z.parent)
self._append_header(chain_u, self.HEADERS['R'])
self._append_header(chain_u, self.HEADERS['S'])
self._append_header(chain_u, self.HEADERS['T'])
self._append_header(chain_u, self.HEADERS['U'])
self.assertEqual(chain_z, chain_u.parent)
self.assertEqual(chain_z, chain_l.parent)
self.assertEqual(None, chain_z.parent)
def test_forking_and_swapping(self):
blockchain.blockchains[constants.net.GENESIS] = chain_u = Blockchain(
config=self.config, forkpoint=0, parent=None,
forkpoint_hash=constants.net.GENESIS, prev_hash=None)
open(chain_u.path(), 'w+').close()
self._append_header(chain_u, self.HEADERS['A'])
self._append_header(chain_u, self.HEADERS['B'])
self._append_header(chain_u, self.HEADERS['C'])
self._append_header(chain_u, self.HEADERS['D'])
self._append_header(chain_u, self.HEADERS['E'])
self._append_header(chain_u, self.HEADERS['F'])
self._append_header(chain_u, self.HEADERS['O'])
self._append_header(chain_u, self.HEADERS['P'])
self._append_header(chain_u, self.HEADERS['Q'])
self._append_header(chain_u, self.HEADERS['R'])
chain_l = chain_u.fork(self.HEADERS['G'])
self._append_header(chain_l, self.HEADERS['H'])
self._append_header(chain_l, self.HEADERS['I'])
self._append_header(chain_l, self.HEADERS['J'])
# do checks
self.assertEqual(2, len(blockchain.blockchains))
self.assertEqual(1, len(os.listdir(os.path.join(self.data_dir, "forks"))))
self.assertEqual(0, chain_u.forkpoint)
self.assertEqual(None, chain_u.parent)
self.assertEqual(constants.net.GENESIS, chain_u._forkpoint_hash)
self.assertEqual(None, chain_u._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "blockchain_headers"), chain_u.path())
self.assertEqual(10 * 80, os.stat(chain_u.path()).st_size)
self.assertEqual(6, chain_l.forkpoint)
self.assertEqual(chain_u, chain_l.parent)
self.assertEqual(hash_header(self.HEADERS['G']), chain_l._forkpoint_hash)
self.assertEqual(hash_header(self.HEADERS['F']), chain_l._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "forks", "fork2_6_90791a08906278ce5c9581e0f8e54b729c54049f77d7397ae6a4d41229e2cb9a_836df46c91b3b9763f20d0066b48cba088b87b7481a0e17dc6b4cc6fbe6f975b"), chain_l.path())
self.assertEqual(4 * 80, os.stat(chain_l.path()).st_size)
self._append_header(chain_l, self.HEADERS['K'])
# chains were swapped, do checks
self.assertEqual(2, len(blockchain.blockchains))
self.assertEqual(1, len(os.listdir(os.path.join(self.data_dir, "forks"))))
self.assertEqual(6, chain_u.forkpoint)
self.assertEqual(chain_l, chain_u.parent)
self.assertEqual(hash_header(self.HEADERS['O']), chain_u._forkpoint_hash)
self.assertEqual(hash_header(self.HEADERS['F']), chain_u._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "forks", "fork2_6_90791a08906278ce5c9581e0f8e54b729c54049f77d7397ae6a4d41229e2cb9a_a05a28a6464817d8b020a3016897b9a52239c1e7757fec9c32121854f1a0cc18"), chain_u.path())
self.assertEqual(4 * 80, os.stat(chain_u.path()).st_size)
self.assertEqual(0, chain_l.forkpoint)
self.assertEqual(None, chain_l.parent)
self.assertEqual(constants.net.GENESIS, chain_l._forkpoint_hash)
self.assertEqual(None, chain_l._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "blockchain_headers"), chain_l.path())
self.assertEqual(11 * 80, os.stat(chain_l.path()).st_size)
for b in (chain_u, chain_l):
self.assertTrue(all([b.can_connect(b.read_header(i), False) for i in range(b.height())]))
self._append_header(chain_u, self.HEADERS['S'])
self._append_header(chain_u, self.HEADERS['T'])
self._append_header(chain_u, self.HEADERS['U'])
self._append_header(chain_l, self.HEADERS['L'])
chain_z = chain_l.fork(self.HEADERS['M'])
self._append_header(chain_z, self.HEADERS['N'])
self._append_header(chain_z, self.HEADERS['X'])
self._append_header(chain_z, self.HEADERS['Y'])
self._append_header(chain_z, self.HEADERS['Z'])
# chain_z became best chain, do checks
self.assertEqual(3, len(blockchain.blockchains))
self.assertEqual(2, len(os.listdir(os.path.join(self.data_dir, "forks"))))
self.assertEqual(0, chain_z.forkpoint)
self.assertEqual(None, chain_z.parent)
self.assertEqual(constants.net.GENESIS, chain_z._forkpoint_hash)
self.assertEqual(None, chain_z._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "blockchain_headers"), chain_z.path())
self.assertEqual(14 * 80, os.stat(chain_z.path()).st_size)
self.assertEqual(9, chain_l.forkpoint)
self.assertEqual(chain_z, chain_l.parent)
self.assertEqual(hash_header(self.HEADERS['J']), chain_l._forkpoint_hash)
self.assertEqual(hash_header(self.HEADERS['I']), chain_l._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "forks", "fork2_9_9673e915a8b3372bce7d577136b2208e6da927caf2324cd238b46eb9f64fa6bf_59c49e1a21bd292585b92c93927ea65369fa6a0bc9b60c8f7c17c1ed9d53e0b9"), chain_l.path())
self.assertEqual(3 * 80, os.stat(chain_l.path()).st_size)
self.assertEqual(6, chain_u.forkpoint)
self.assertEqual(chain_z, chain_u.parent)
self.assertEqual(hash_header(self.HEADERS['O']), chain_u._forkpoint_hash)
self.assertEqual(hash_header(self.HEADERS['F']), chain_u._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "forks", "fork2_6_90791a08906278ce5c9581e0f8e54b729c54049f77d7397ae6a4d41229e2cb9a_a05a28a6464817d8b020a3016897b9a52239c1e7757fec9c32121854f1a0cc18"), chain_u.path())
self.assertEqual(7 * 80, os.stat(chain_u.path()).st_size)
for b in (chain_u, chain_l, chain_z):
self.assertTrue(all([b.can_connect(b.read_header(i), False) for i in range(b.height())]))
self.assertEqual(constants.net.GENESIS, chain_z.get_hash(0))
self.assertEqual(hash_header(self.HEADERS['F']), chain_z.get_hash(5))
self.assertEqual(hash_header(self.HEADERS['G']), chain_z.get_hash(6))
self.assertEqual(hash_header(self.HEADERS['I']), chain_z.get_hash(8))
self.assertEqual(hash_header(self.HEADERS['M']), chain_z.get_hash(9))
self.assertEqual(hash_header(self.HEADERS['Z']), chain_z.get_hash(13))
def test_doing_multiple_swaps_after_single_new_header(self):
blockchain.blockchains[constants.net.GENESIS] = chain_u = Blockchain(
config=self.config, forkpoint=0, parent=None,
forkpoint_hash=constants.net.GENESIS, prev_hash=None)
open(chain_u.path(), 'w+').close()
self._append_header(chain_u, self.HEADERS['A'])
self._append_header(chain_u, self.HEADERS['B'])
self._append_header(chain_u, self.HEADERS['C'])
self._append_header(chain_u, self.HEADERS['D'])
self._append_header(chain_u, self.HEADERS['E'])
self._append_header(chain_u, self.HEADERS['F'])
self._append_header(chain_u, self.HEADERS['O'])
self._append_header(chain_u, self.HEADERS['P'])
self._append_header(chain_u, self.HEADERS['Q'])
self._append_header(chain_u, self.HEADERS['R'])
self._append_header(chain_u, self.HEADERS['S'])
self.assertEqual(1, len(blockchain.blockchains))
self.assertEqual(0, len(os.listdir(os.path.join(self.data_dir, "forks"))))
chain_l = chain_u.fork(self.HEADERS['G'])
self._append_header(chain_l, self.HEADERS['H'])
self._append_header(chain_l, self.HEADERS['I'])
self._append_header(chain_l, self.HEADERS['J'])
self._append_header(chain_l, self.HEADERS['K'])
# now chain_u is best chain, but it's tied with chain_l
self.assertEqual(2, len(blockchain.blockchains))
self.assertEqual(1, len(os.listdir(os.path.join(self.data_dir, "forks"))))
chain_z = chain_l.fork(self.HEADERS['M'])
self._append_header(chain_z, self.HEADERS['N'])
self._append_header(chain_z, self.HEADERS['X'])
self.assertEqual(3, len(blockchain.blockchains))
self.assertEqual(2, len(os.listdir(os.path.join(self.data_dir, "forks"))))
# chain_z became best chain, do checks
self.assertEqual(0, chain_z.forkpoint)
self.assertEqual(None, chain_z.parent)
self.assertEqual(constants.net.GENESIS, chain_z._forkpoint_hash)
self.assertEqual(None, chain_z._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "blockchain_headers"), chain_z.path())
self.assertEqual(12 * 80, os.stat(chain_z.path()).st_size)
self.assertEqual(9, chain_l.forkpoint)
self.assertEqual(chain_z, chain_l.parent)
self.assertEqual(hash_header(self.HEADERS['J']), chain_l._forkpoint_hash)
self.assertEqual(hash_header(self.HEADERS['I']), chain_l._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "forks", "fork2_9_9673e915a8b3372bce7d577136b2208e6da927caf2324cd238b46eb9f64fa6bf_59c49e1a21bd292585b92c93927ea65369fa6a0bc9b60c8f7c17c1ed9d53e0b9"), chain_l.path())
self.assertEqual(2 * 80, os.stat(chain_l.path()).st_size)
self.assertEqual(6, chain_u.forkpoint)
self.assertEqual(chain_z, chain_u.parent)
self.assertEqual(hash_header(self.HEADERS['O']), chain_u._forkpoint_hash)
self.assertEqual(hash_header(self.HEADERS['F']), chain_u._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "forks", "fork2_6_90791a08906278ce5c9581e0f8e54b729c54049f77d7397ae6a4d41229e2cb9a_a05a28a6464817d8b020a3016897b9a52239c1e7757fec9c32121854f1a0cc18"), chain_u.path())
self.assertEqual(5 * 80, os.stat(chain_u.path()).st_size)
self.assertEqual(constants.net.GENESIS, chain_z.get_hash(0))
self.assertEqual(hash_header(self.HEADERS['F']), chain_z.get_hash(5))
self.assertEqual(hash_header(self.HEADERS['G']), chain_z.get_hash(6))
self.assertEqual(hash_header(self.HEADERS['I']), chain_z.get_hash(8))
self.assertEqual(hash_header(self.HEADERS['M']), chain_z.get_hash(9))
self.assertEqual(hash_header(self.HEADERS['X']), chain_z.get_hash(11))
for b in (chain_u, chain_l, chain_z):
self.assertTrue(all([b.can_connect(b.read_header(i), False) for i in range(b.height())]))
| 64.122807 | 219 | 0.732558 |
7b6479e3edfa67adc171566d59eaba83b33216ed | 7,355 | py | Python | tern/__main__.py | aditi137/tern | 2c216e52bb6fc45842fb664881ae3a049be1fd1a | [
"BSD-2-Clause"
] | null | null | null | tern/__main__.py | aditi137/tern | 2c216e52bb6fc45842fb664881ae3a049be1fd1a | [
"BSD-2-Clause"
] | null | null | null | tern/__main__.py | aditi137/tern | 2c216e52bb6fc45842fb664881ae3a049be1fd1a | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017-2019 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
"""
Tern executable
"""
import argparse
import logging
import os
import sys
from tern.analyze.docker import run
from tern.utils import cache
from tern.utils import constants
from tern.utils import general
from tern.utils import rootfs
from tern.report import errors
# global logger
from tern.utils.general import check_image_string
logger = logging.getLogger(constants.logger_name)
logger.setLevel(logging.DEBUG)
# console stream handler
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(module)s - %(message)s')
log_handler = logging.FileHandler(constants.logfile, mode='w')
log_handler.setLevel(logging.DEBUG)
log_handler.setFormatter(formatter)
console.setFormatter(formatter)
logger.addHandler(log_handler)
def check_file_existence(path):
if not os.path.isfile(path):
msg = "{}: does not exist".format(path)
raise argparse.ArgumentTypeError(msg)
return path
def get_version():
'''Return the version string for the --version command line option'''
ver_type, commit_or_ver = general.get_git_rev_or_version()
message = ''
if ver_type == "package":
message = "Tern version {}".format(commit_or_ver)
else:
message = "Tern at commit {}".format(commit_or_ver)
return message
def create_top_dir(working_dir=None):
'''Create the top level working directory'''
top_dir = general.get_top_dir(working_dir)
if not os.path.isdir(top_dir):
os.makedirs(top_dir)
def do_main(args):
'''Execute according to subcommands'''
# set bind mount location if working in a container
rootfs.set_mount_dir(args.bind_mount, args.working_dir)
# create working directory
create_top_dir(args.working_dir)
if args.log_stream:
# set up console logs
global logger
global console
logger.addHandler(console)
logger.debug('Starting...')
if args.clear_cache:
logger.debug('Clearing cache...')
cache.clear()
if hasattr(args, 'name') and args.name == 'report':
if args.dockerfile:
run.execute_dockerfile(args)
if args.docker_image:
# Check if the image is of image:tag
# or image@digest_type:digest format
if not check_image_string(args.docker_image):
sys.stderr.write('Error running Tern\n'
'Please provide docker image '
'string in image:tag or '
'image@digest_type:digest format\n')
sys.exit(1)
if general.check_tar(args.docker_image):
logger.error("%s", errors.incorrect_raw_option)
else:
run.execute_docker_image(args)
logger.debug('Report completed.')
if args.raw_image:
if not general.check_tar(args.raw_image):
logger.error("%s", errors.invalid_raw_image.format(
image=args.raw_image))
else:
run.execute_docker_image(args)
logger.debug('Report completed.')
logger.debug('Finished')
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
prog='Tern',
description='''
Tern is a container image component curation tool. Tern retrieves
information about packages that are installed in a container image.
Learn more at https://github.com/vmware/tern''')
parser.add_argument('-l', '--log-stream', action='store_true',
help="Stream logs to the console; "
"Useful when running in a shell")
parser.add_argument('-c', '--clear-cache', action='store_true',
help="Clear the cache before running")
parser.add_argument('-k', '--keep-wd', action='store_true',
help="Keep the working directory after execution."
" Useful when debugging container images")
parser.add_argument('-b', '--bind-mount', metavar='BIND_DIR',
help="Absolute path to bind mount target. Needed"
" when running from within a container.")
parser.add_argument('-r', '--redo', action='store_true',
help="Repopulate the cache for found layers")
parser.add_argument('-wd', '--working-dir', metavar='PATH',
help="Change default working directory to specified"
"absolute path.")
# sys.version gives more information than we care to print
py_ver = sys.version.replace('\n', '').split('[')[0]
parser.add_argument('-v', '--version', action='version',
version="{ver_str}\n python version = {py_v}".format(
ver_str=get_version(), py_v=py_ver))
subparsers = parser.add_subparsers(help='Subcommands')
# subparser for report
parser_report = subparsers.add_parser('report',
help="Create a BoM report."
" Run 'tern report -h' for"
" report format options.")
parser_report.add_argument('-d', '--dockerfile', type=check_file_existence,
help="Dockerfile used to build the Docker"
" image")
parser_report.add_argument('-i', '--docker-image',
help="Docker image that exists locally -"
" image:tag"
" The option can be used to pull docker"
" images by digest as well -"
" <repo>@<digest-type>:<digest>")
parser_report.add_argument('-w', '--raw-image', metavar='FILE',
help="Raw container image that exists locally "
"in the form of a tar archive.")
parser_report.add_argument('-x', '--extend', metavar='EXTENSION',
help="Use an extension to analyze a container "
"image. Available extensions: cve-bin-tool")
parser_report.add_argument('-f', '--report-format',
metavar='REPORT_FORMAT',
help="Format the report using one of the "
"available formats: "
"spdxtagvalue, json, yaml")
parser_report.add_argument('-o', '--output-file', default=None,
metavar='FILE',
help="Write the report to a file. "
"If no file is given the default file in "
"utils/constants.py will be used")
parser_report.set_defaults(name='report')
args = parser.parse_args()
# execute
if sys.version_info < (3, 0):
sys.stderr.write("Error running Tern. Please check that python3 "
"is configured as default.\n")
else:
do_main(args)
if __name__ == "__main__":
main()
| 39.331551 | 79 | 0.579062 |
9989b2fd1e776780d8659980cea098db18295dbb | 12,967 | py | Python | insights/parsers/tests/test_modinfo.py | zerodayz/insights-core | fedecee56762282404415d94d492e3c4ef0bc731 | [
"Apache-2.0"
] | null | null | null | insights/parsers/tests/test_modinfo.py | zerodayz/insights-core | fedecee56762282404415d94d492e3c4ef0bc731 | [
"Apache-2.0"
] | null | null | null | insights/parsers/tests/test_modinfo.py | zerodayz/insights-core | fedecee56762282404415d94d492e3c4ef0bc731 | [
"Apache-2.0"
] | null | null | null | import doctest
import pytest
from insights.parsers import modinfo, ParseException, SkipException
from insights.parsers.modinfo import ModInfoI40e, ModInfoVmxnet3, ModInfoIgb, ModInfoIxgbe, ModInfoVeth
from insights.tests import context_wrap
MODINFO_I40E = """
filename: /lib/modules/3.10.0-993.el7.x86_64/kernel/drivers/net/ethernet/intel/i40e/i40e.ko.xz
firmware: i40e/i40e-e2-7.13.1.0.fw
firmware: i40e/i40e-e1h-7.13.1.0.fw
version: 2.3.2-k
license: GPL
description: Intel(R) Ethernet Connection XL710 Network Driver
author: Intel Corporation, <e1000-devel@lists.sourceforge.net>
retpoline: Y
rhelversion: 7.7
srcversion: DC5C250666ADD8603966656
alias: pci:v00008086d0000158Bsv*sd*bc*sc*i*
alias: pci:v00008086d0000158Asv*sd*bc*sc*i*
depends: ptp
intree: Y
vermagic: 3.10.0-993.el7.x86_64 SMP mod_unload modversions
signer: Red Hat Enterprise Linux kernel signing key
sig_key: 81:7C:CB:07:72:4E:7F:B8:15:24:10:F9:27:2D:AA:CF:80:3E:CE:59
sig_hashalgo: sha256
parm: debug:Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX) (uint)
parm: int_mode: Force interrupt mode other than MSI-X (1 INT#x; 2 MSI) (int)
""".strip()
MODINFO_INTEL = """
filename: /lib/modules/3.10.0-993.el7.x86_64/kernel/arch/x86/crypto/aesni-intel.ko.xz
alias: crypto-aes
alias: aes
license: GPL
description: Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized
alias: crypto-fpu
alias: fpu
retpoline: Y
rhelversion: 7.7
srcversion: 975EC794FC6B4D7306E0879
alias: x86cpu:vendor:*:family:*:model:*:feature:*0099*
depends: glue_helper,lrw,cryptd,ablk_helper
intree: Y
vermagic: 3.10.0-993.el7.x86_64 SMP mod_unload modversions
signer: Red Hat Enterprise Linux kernel signing key
sig_key: 81:7C:CB:07:72:4E:7F:B8:15:24:10:F9:27:2D:AA:CF:80:3E:CE:59
sig_hashalgo: sha256
""".strip()
MODINFO_BNX2X = """
filename: /lib/modules/3.10.0-514.el7.x86_64/kernel/drivers/net/ethernet/broadcom/bnx2x/bnx2x.ko
firmware: bnx2x/bnx2x-e2-7.13.1.0.fw
firmware: bnx2x/bnx2x-e1h-7.13.1.0.fw
firmware: bnx2x/bnx2x-e1-7.13.1.0.fw
version: 1.712.30-0
license: GPL
description: QLogic BCM57710/57711/57711E/57712/57712_MF/57800/57800_MF/57810/57810_MF/57840/57840_MF Driver
author: Eliezer Tamir
rhelversion: 7.3
srcversion: E631435423FC99CEF769288
alias: pci:v000014E4d0000163Fsv*sd*bc*sc*i*
alias: pci:v000014E4d0000163Esv*sd*bc*sc*i*
alias: pci:v000014E4d0000163Dsv*sd*bc*sc*i*
alias: pci:v00001077d000016ADsv*sd*bc*sc*i*
alias: pci:v000014E4d000016ADsv*sd*bc*sc*i*
alias: pci:v00001077d000016A4sv*sd*bc*sc*i*
alias: pci:v000014E4d000016A4sv*sd*bc*sc*i*
alias: pci:v000014E4d000016ABsv*sd*bc*sc*i*
alias: pci:v000014E4d000016AFsv*sd*bc*sc*i*
alias: pci:v000014E4d000016A2sv*sd*bc*sc*i*
alias: pci:v00001077d000016A1sv*sd*bc*sc*i*
alias: pci:v000014E4d000016A1sv*sd*bc*sc*i*
alias: pci:v000014E4d0000168Dsv*sd*bc*sc*i*
alias: pci:v000014E4d000016AEsv*sd*bc*sc*i*
alias: pci:v000014E4d0000168Esv*sd*bc*sc*i*
alias: pci:v000014E4d000016A9sv*sd*bc*sc*i*
alias: pci:v000014E4d000016A5sv*sd*bc*sc*i*
alias: pci:v000014E4d0000168Asv*sd*bc*sc*i*
alias: pci:v000014E4d0000166Fsv*sd*bc*sc*i*
alias: pci:v000014E4d00001663sv*sd*bc*sc*i*
alias: pci:v000014E4d00001662sv*sd*bc*sc*i*
alias: pci:v000014E4d00001650sv*sd*bc*sc*i*
alias: pci:v000014E4d0000164Fsv*sd*bc*sc*i*
alias: pci:v000014E4d0000164Esv*sd*bc*sc*i*
depends: mdio,libcrc32c,ptp
intree: Y
vermagic: 3.10.0-514.el7.x86_64 SMP mod_unload modversions
signer: Red Hat Enterprise Linux kernel signing key
sig_key: 75:FE:A1:DF:24:5A:CC:D9:7A:17:FE:3A:36:72:61:E6:5F:8A:1E:60
sig_hashalgo: sha256
parm: num_queues: Set number of queues (default is as a number of CPUs) (int)
parm: disable_tpa: Disable the TPA (LRO) feature (int)
parm: int_mode: Force interrupt mode other than MSI-X (1 INT#x; 2 MSI) (int)
parm: dropless_fc: Pause on exhausted host ring (int)
parm: mrrs: Force Max Read Req Size (0..3) (for debug) (int)
parm: debug: Default debug msglevel (int)
""".strip()
MODINFO_VMXNET3 = """
filename: /lib/modules/3.10.0-957.10.1.el7.x86_64/kernel/drivers/net/vmxnet3/vmxnet3.ko.xz
version: 1.4.14.0-k
license: GPL v2
description: VMware vmxnet3 virtual NIC driver
author: VMware, Inc.
retpoline: Y
rhelversion: 7.6
srcversion: 7E672688ACACBDD2E363B63
alias: pci:v000015ADd000007B0sv*sd*bc*sc*i*
depends:
intree: Y
vermagic: 3.10.0-957.10.1.el7.x86_64 SMP mod_unload modversions
signer: Red Hat Enterprise Linux kernel signing key
sig_key: A5:70:18:DF:B6:C9:D6:1F:CF:CE:0A:3D:02:8B:B3:69:BD:76:CA:ED
sig_hashalgo: sha256
""".strip()
MODINFO_IGB = """
filename: /lib/modules/3.10.0-327.10.1.el7.jump7.x86_64/kernel/drivers/net/ethernet/intel/igb/igb.ko
version: 5.2.15-k
license: GPL
description: Intel(R) Gigabit Ethernet Network Driver
author: Intel Corporation, <e1000-devel@lists.sourceforge.net>
rhelversion: 7.2
srcversion: 9CF4D446FA2E882F6BA0A17
alias: pci:v00008086d000010D6sv*sd*bc*sc*i*
depends: i2c-core,ptp,dca,i2c-algo-bit
intree: Y
vermagic: 3.10.0-327.10.1.el7.jump7.x86_64 SMP mod_unload modversions
signer: Red Hat Enterprise Linux kernel signing key
sig_key: C9:10:C7:BB:C3:C7:10:A1:68:A6:F3:6D:45:22:90:B7:5A:D4:B0:7A
sig_hashalgo: sha256
parm: max_vfs:Maximum number of virtual functions to allocate per physical function (uint)
parm: debug:Debug level (0=none,...,16=all) (int)
""".strip()
MODINFO_IXGBE = """
filename: /lib/modules/3.10.0-514.6.1.el7.jump3.x86_64/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe.ko
version: 4.4.0-k-rh7.3
license: GPL
description: Intel(R) 10 Gigabit PCI Express Network Driver
author: Intel Corporation, <linux.nics@intel.com>
rhelversion: 7.3
srcversion: 24F0195E8A357701DE1B32E
alias: pci:v00008086d000015CEsv*sd*bc*sc*i*
depends: i2c-core,ptp,dca,i2c-algo-bit
intree: Y
vermagic: 3.10.0-514.6.1.el7.jump3.x86_64 SMP mod_unload modversions
signer: Red Hat Enterprise Linux kernel signing key
sig_key: 69:10:6E:D5:83:0D:2C:66:97:41:91:7B:0F:57:D4:1D:95:A2:8A:EB
sig_hashalgo: sha256
parm: max_vfs:Maximum number of virtual functions to allocate per physical function (uint)
parm: debug:Debug level (0=none,...,16=all) (int)
""".strip()
MODINFO_NO = """
""".strip()
MODINFO_NO_1 = """
modinfo ERROR Module i40e not found.
""".strip()
MODINFO_VETH = """
filename: /lib/modules/3.10.0-327.el7.x86_64/kernel/drivers/net/veth.ko
alias: rtnl-link-veth
license: GPL v2
description: Virtual Ethernet Tunnel
rhelversion: 7.2
srcversion: 25C6BF3D2F35CAF3A252F12
depends:
intree: Y
vermagic: 3.10.0-327.el7.x86_64 SMP mod_unload modversions
signer: Red Hat Enterprise Linux kernel signing key
sig_key: BC:73:C3:CE:E8:9E:5E:AE:99:4A:E5:0A:0D:B1:F0:FE:E3:FC:09:13
sig_hashalgo: sha256
""".strip()
def test_modinfo():
modinfo_obj = ModInfoI40e(context_wrap(MODINFO_I40E))
assert modinfo_obj.module_name == 'i40e'
assert modinfo_obj.module_version == '2.3.2-k'
assert modinfo_obj.module_deps == ['ptp']
assert modinfo_obj.module_signer == 'Red Hat Enterprise Linux kernel signing key'
assert len(modinfo_obj.data['alias']) == 2
assert modinfo_obj.data['sig_key'] == '81:7C:CB:07:72:4E:7F:B8:15:24:10:F9:27:2D:AA:CF:80:3E:CE:59'
assert modinfo_obj.data['vermagic'] == '3.10.0-993.el7.x86_64 SMP mod_unload modversions'
assert sorted(modinfo_obj.data['parm']) == sorted(['debug:Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX) (uint)',
'int_mode: Force interrupt mode other than MSI-X (1 INT#x; 2 MSI) (int)'])
assert modinfo_obj.data['description'] == 'Intel(R) Ethernet Connection XL710 Network Driver'
assert ('signer' in modinfo_obj) is True
assert modinfo_obj.module_path == "/lib/modules/3.10.0-993.el7.x86_64/kernel/drivers/net/ethernet/intel/i40e/i40e.ko.xz"
modinfo_obj = ModInfoI40e(context_wrap(MODINFO_INTEL))
assert len(modinfo_obj.data['alias']) == 5
assert sorted(modinfo_obj.data['alias']) == sorted(['aes', 'crypto-aes', 'crypto-fpu', 'fpu', 'x86cpu:vendor:*:family:*:model:*:feature:*0099*'])
assert ('parm' in modinfo_obj) is False
assert modinfo_obj.module_name == 'aesni-intel'
assert modinfo_obj.data['description'] == 'Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized'
assert modinfo_obj.data['rhelversion'] == '7.7'
assert modinfo_obj.module_signer == 'Red Hat Enterprise Linux kernel signing key'
assert modinfo_obj.module_deps == ['glue_helper', 'lrw', 'cryptd', 'ablk_helper']
assert modinfo_obj.data['sig_key'] == '81:7C:CB:07:72:4E:7F:B8:15:24:10:F9:27:2D:AA:CF:80:3E:CE:59'
modinfo_obj = ModInfoI40e(context_wrap(MODINFO_BNX2X))
assert len(modinfo_obj.data['alias']) == 24
assert len(modinfo_obj.data['parm']) == 6
assert len(modinfo_obj.data['firmware']) == 3
assert sorted(modinfo_obj.data['firmware']) == sorted(['bnx2x/bnx2x-e2-7.13.1.0.fw', 'bnx2x/bnx2x-e1h-7.13.1.0.fw', 'bnx2x/bnx2x-e1-7.13.1.0.fw'])
assert modinfo_obj.module_name == 'bnx2x'
assert modinfo_obj.module_path == '/lib/modules/3.10.0-514.el7.x86_64/kernel/drivers/net/ethernet/broadcom/bnx2x/bnx2x.ko'
assert modinfo_obj.module_signer == 'Red Hat Enterprise Linux kernel signing key'
assert sorted(modinfo_obj.module_deps) == sorted(['mdio', 'libcrc32c', 'ptp'])
modinfo_igb = ModInfoIgb(context_wrap(MODINFO_IGB))
assert modinfo_igb.data.get('alias') == 'pci:v00008086d000010D6sv*sd*bc*sc*i*'
assert modinfo_igb.module_name == 'igb'
assert modinfo_igb.module_path == '/lib/modules/3.10.0-327.10.1.el7.jump7.x86_64/kernel/drivers/net/ethernet/intel/igb/igb.ko'
modinfo_ixgbe = ModInfoIxgbe(context_wrap(MODINFO_IXGBE))
assert modinfo_ixgbe.data.get('alias') == 'pci:v00008086d000015CEsv*sd*bc*sc*i*'
assert modinfo_ixgbe.module_name == 'ixgbe'
assert modinfo_ixgbe.module_path == '/lib/modules/3.10.0-514.6.1.el7.jump3.x86_64/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe.ko'
modinfo_drv = ModInfoVmxnet3(context_wrap(MODINFO_VMXNET3))
assert modinfo_drv.data.get('alias') == 'pci:v000015ADd000007B0sv*sd*bc*sc*i*'
assert len(modinfo_drv.module_parm) == 0
assert len(modinfo_drv.module_firmware) == 0
assert modinfo_drv.module_name == 'vmxnet3'
assert modinfo_drv.module_path == '/lib/modules/3.10.0-957.10.1.el7.x86_64/kernel/drivers/net/vmxnet3/vmxnet3.ko.xz'
assert sorted(modinfo_obj.data['firmware']) == sorted(['bnx2x/bnx2x-e2-7.13.1.0.fw', 'bnx2x/bnx2x-e1h-7.13.1.0.fw', 'bnx2x/bnx2x-e1-7.13.1.0.fw'])
modinfo_drv = ModInfoVeth(context_wrap(MODINFO_VETH))
assert modinfo_drv.module_name == 'veth'
assert modinfo_drv.module_path == '/lib/modules/3.10.0-327.el7.x86_64/kernel/drivers/net/veth.ko'
assert modinfo_drv.module_signer == 'Red Hat Enterprise Linux kernel signing key'
with pytest.raises(SkipException) as exc:
modinfo_obj = ModInfoI40e(context_wrap(MODINFO_NO))
assert 'No Contents' in str(exc)
with pytest.raises(ParseException) as exc:
modinfo_obj = ModInfoI40e(context_wrap(MODINFO_NO_1))
assert 'No Parsed Contents' in str(exc)
with pytest.raises(SkipException) as exc:
modinfo_drv = ModInfoVmxnet3(context_wrap(MODINFO_NO))
assert 'No Contents' in str(exc)
with pytest.raises(ParseException) as exc:
modinfo_drv = ModInfoVmxnet3(context_wrap(MODINFO_NO_1))
assert 'No Parsed Contents' in str(exc)
with pytest.raises(SkipException) as exc:
modinfo_drv = ModInfoVeth(context_wrap(MODINFO_NO))
assert 'No Contents' in str(exc)
with pytest.raises(ParseException) as exc:
modinfo_drv = ModInfoVeth(context_wrap(MODINFO_NO_1))
assert 'No Parsed Contents' in str(exc)
def test_modinfo_doc_examples():
env = {'modinfo_obj': ModInfoI40e(context_wrap(MODINFO_I40E)),
'modinfo_drv': ModInfoVmxnet3(context_wrap(MODINFO_VMXNET3)),
'modinfo_igb': ModInfoIgb(context_wrap(MODINFO_IGB)),
'modinfo_veth': ModInfoVeth(context_wrap(MODINFO_VETH)),
'modinfo_ixgbe': ModInfoIxgbe(context_wrap(MODINFO_IXGBE))}
failed, total = doctest.testmod(modinfo, globs=env)
assert failed == 0
| 47.672794 | 150 | 0.689905 |
40d45c54d29486baf750658213d59481b1331d77 | 3,023 | py | Python | projects/plots/line_plot_2D.py | nirdslab/streaminghub | a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf | [
"MIT"
] | null | null | null | projects/plots/line_plot_2D.py | nirdslab/streaminghub | a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf | [
"MIT"
] | null | null | null | projects/plots/line_plot_2D.py | nirdslab/streaminghub | a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf | [
"MIT"
] | 1 | 2020-01-22T15:35:29.000Z | 2020-01-22T15:35:29.000Z | #!/usr/bin/env python3
"""
This command-line executable program accepts a
data-stream name as an input, and generates a
plot for each stream in the data-stream.
It can be used for testing whether a meta-stream
has been correctly used to generate a compliant
data-stream, and also to visualize how different
streams arrive at different frequencies.
"""
import sys
from typing import List
import numpy as np
import pylsl
SYNTAX = "stream_plot_2D [stream_name]"
def plot_data(_streams: List[pylsl.StreamInlet]):
from matplotlib import pyplot as plt, animation
S = len(_streams) # number of streams
W = 100 # buffer size (only W data points are visualized per plot)
D = [np.full((W, stream.channel_count), 0.0) for stream in _streams] # for 2D data (initialize to 0, 0)
SD = [] # Scatter Data
COLS = 2
ROWS = 1 + S // COLS
# subplot for each stream inlet
fig, axs = plt.subplots(ROWS, COLS, squeeze=False)
def ax(_i):
return axs if S == 1 else axs[_i // COLS, _i % COLS]
# initialize subplots
for i in range(S):
a = ax(i)
a.xaxis.set_visible(False)
a.title.set_text(_streams[i].info().type())
a.set_ylim(0, 1)
a.set_xlim(0, 1)
_x = D[i][:, 0]
_y = D[i][:, 1]
_c = np.stack([np.zeros(W), np.zeros(W), np.ones(W), np.linspace(0.1, 1.0, W)], axis=-1)
_s = 2 ** np.linspace(1, 6, W)
scatters = a.scatter(x=_x, y=_y, s=_s, c=_c, animated=True)
SD.append(scatters)
# remove excess subplots
for i in range(S, ROWS * COLS):
ax(i).set_axis_off()
plt.tight_layout()
def init():
for _scatters in SD:
_scatters.set_offsets(np.full((W, 2), 0.0))
return SD
def animate(f):
nonlocal D
for _i in range(S):
_si = _streams[_i]
samples, _ = _si.pull_chunk(max_samples=1024)
_n = len(samples)
if _n == 0:
continue
# convert samples to numpy array, and move nan values outside visible plot range
samples = np.nan_to_num(np.array(samples), nan=-1.0)
# update samples
if _n < W:
d_i = np.roll(D[_i], -_n, axis=0)
d_i[-_n:] = samples
D[_i] = d_i.copy()
else:
D[_i] = np.array(samples[-W:]).copy()
# update plot
_cc = _si.channel_count
SD[_i].set_offsets(D[_i])
return SD
# _ = animation.FuncAnimation(fig, animate, init_func=init, blit=True, interval=100)
_ = animation.FuncAnimation(fig, animate, init_func=init, blit=True, save_count=0, cache_frame_data=False)
plt.show()
def main():
# Load matplotlib
import matplotlib
matplotlib.use("Qt5Agg")
# parse command-line args
args = sys.argv
assert len(args) == 2, f"Invalid Syntax.\nExpected: {SYNTAX}"
stream_name = sys.argv[1]
streams = pylsl.resolve_stream('name', stream_name)
streams = filter(lambda x: x.channel_count() == 2, streams)
in_object = [*map(pylsl.StreamInlet, streams)]
plot_data(in_object)
if __name__ == '__main__':
try:
main()
except AssertionError as e:
print(f'Error: {e}', file=sys.stderr)
| 27.234234 | 108 | 0.648363 |
130434229e96a96dfacf1fc8b90dc98ef80d9bec | 2,038 | py | Python | Lib/secrets.py | gerph/cpython | 98813cb03c2371789669c3d8debf8fca2a344de9 | [
"CNRI-Python-GPL-Compatible"
] | 11,058 | 2018-05-29T07:40:06.000Z | 2022-03-31T11:38:42.000Z | Lib/secrets.py | gerph/cpython | 98813cb03c2371789669c3d8debf8fca2a344de9 | [
"CNRI-Python-GPL-Compatible"
] | 2,105 | 2018-06-01T10:07:16.000Z | 2022-03-31T14:56:42.000Z | Lib/secrets.py | gerph/cpython | 98813cb03c2371789669c3d8debf8fca2a344de9 | [
"CNRI-Python-GPL-Compatible"
] | 1,933 | 2018-01-15T13:08:40.000Z | 2022-03-31T11:28:59.000Z | """Generate cryptographically strong pseudo-random numbers suitable for
managing secrets such as account authentication, tokens, and similar.
See PEP 506 for more information.
https://www.python.org/dev/peps/pep-0506/
"""
__all__ = ['choice', 'randbelow', 'randbits', 'SystemRandom',
'token_bytes', 'token_hex', 'token_urlsafe',
'compare_digest',
]
import base64
import binascii
import os
from hmac import compare_digest
from random import SystemRandom
_sysrand = SystemRandom()
randbits = _sysrand.getrandbits
choice = _sysrand.choice
def randbelow(exclusive_upper_bound):
"""Return a random int in the range [0, n)."""
if exclusive_upper_bound <= 0:
raise ValueError("Upper bound must be positive.")
return _sysrand._randbelow(exclusive_upper_bound)
DEFAULT_ENTROPY = 32 # number of bytes to return by default
def token_bytes(nbytes=None):
"""Return a random byte string containing *nbytes* bytes.
If *nbytes* is ``None`` or not supplied, a reasonable
default is used.
>>> token_bytes(16) #doctest:+SKIP
b'\\xebr\\x17D*t\\xae\\xd4\\xe3S\\xb6\\xe2\\xebP1\\x8b'
"""
if nbytes is None:
nbytes = DEFAULT_ENTROPY
return os.urandom(nbytes)
def token_hex(nbytes=None):
"""Return a random text string, in hexadecimal.
The string has *nbytes* random bytes, each byte converted to two
hex digits. If *nbytes* is ``None`` or not supplied, a reasonable
default is used.
>>> token_hex(16) #doctest:+SKIP
'f9bf78b9a18ce6d46a0cd2b0b86df9da'
"""
return binascii.hexlify(token_bytes(nbytes)).decode('ascii')
def token_urlsafe(nbytes=None):
"""Return a random URL-safe text string, in Base64 encoding.
The string has *nbytes* random bytes. If *nbytes* is ``None``
or not supplied, a reasonable default is used.
>>> token_urlsafe(16) #doctest:+SKIP
'Drmhze6EPcv0fN_81Bj-nA'
"""
tok = token_bytes(nbytes)
return base64.urlsafe_b64encode(tok).rstrip(b'=').decode('ascii')
| 27.540541 | 71 | 0.693327 |
b0ca5fdeca78599e75642e122657edfb78cebfe2 | 4,137 | py | Python | agents/n_step_q.py | bluepc2013/deep-rl-tensorflow-master | f25f2529839b0ae0d76439f01131654329bfafd8 | [
"MIT"
] | 29 | 2017-03-30T08:17:01.000Z | 2021-03-15T17:11:51.000Z | agents/n_step_q.py | binderwang/deep-rl-tensorflow | b646c65de0e5e76036c03ea7a4cf71f5b714c34c | [
"MIT"
] | null | null | null | agents/n_step_q.py | binderwang/deep-rl-tensorflow | b646c65de0e5e76036c03ea7a4cf71f5b714c34c | [
"MIT"
] | 11 | 2016-12-17T11:50:12.000Z | 2021-08-23T18:09:30.000Z | import os
import time
import numpy as np
import tensorflow as tf
from logging import getLogger
from .agent import Agent
from .history import History
from .experience import Experience
logger = getLogger(__name__)
class NStepQ(Agent):
def __init__(self, sess, pred_network, env, stat, conf, target_network=None):
super(DeepQ, self).__init__(sess, pred_network, target_network, env, stat, conf)
# Optimizer
with tf.variable_scope('optimizer'):
self.targets = tf.placeholder('float32', [None], name='target_q_t')
self.actions = tf.placeholder('int64', [None], name='action')
actions_one_hot = tf.one_hot(self.actions, self.env.action_size, 1.0, 0.0, name='action_one_hot')
pred_q = tf.reduce_sum(self.pred_network.outputs * actions_one_hot, reduction_indices=1, name='q_acted')
self.delta = self.targets - pred_q
if self.max_delta and self.min_delta:
self.delta = tf.clip_by_value(self.delta, self.min_delta, self.max_delta, name='clipped_delta')
self.loss = tf.reduce_mean(tf.square(self.delta), name='loss')
self.learning_rate_op = tf.maximum(self.learning_rate_minimum,
tf.train.exponential_decay(
self.learning_rate,
self.stat.t_op,
self.learning_rate_decay_step,
self.learning_rate_decay,
staircase=True))
optimizer = tf.train.RMSPropOptimizer(
self.learning_rate_op, momentum=0.95, epsilon=0.01)
grads_and_vars = optimizer.compute_gradients(self.loss)
for idx, (grad, var) in enumerate(grads_and_vars):
if grad is not None:
grads_and_vars[idx] = (tf.clip_by_norm(grad, self.max_grad_norm), var)
self.optim = optimizer.apply_gradients(grads_and_vars)
# Add accumulated gradients for n-step Q-learning
def make_accumulated_gradients(self):
reset_accum_grads = []
new_grads_and_vars = []
# 1. Prepare accum_grads
self.accum_grads = {}
self.add_accum_grads = {}
for step, network in enumerate(self.networks):
grads_and_vars = self.global_optim.compute_gradients(network.total_loss, network.w.values())
_add_accum_grads = []
for grad, var in tuple(grads_and_vars):
if grad is not None:
shape = grad.get_shape().as_list()
name = 'accum/%s' % "/".join(var.name.split(':')[0].split('/')[-3:])
if step == 0:
self.accum_grads[name] = tf.Variable(
tf.zeros(shape), trainable=False, name=name)
global_v = global_var[re.sub(r'.*\/A3C_\d+\/', '', var.name)]
new_grads_and_vars.append((tf.clip_by_norm(self.accum_grads[name].ref(), self.max_grad_norm), global_v))
reset_accum_grads.append(self.accum_grads[name].assign(tf.zeros(shape)))
_add_accum_grads.append(tf.assign_add(self.accum_grads[name], grad))
# 2. Add gradient to accum_grads
self.add_accum_grads[step] = tf.group(*_add_accum_grads)
def observe(self, observation, reward, action, terminal):
reward = max(self.min_r, min(self.max_r, reward))
self.history.add(observation)
self.experience.add(observation, reward, action, terminal)
# q, loss, is_update
result = [], 0, False
if self.t > self.t_learn_start:
if self.t % self.t_train_freq == 0:
result = self.q_learning_minibatch()
if self.t % self.t_target_q_update_freq == self.t_target_q_update_freq - 1:
self.update_target_q_network()
return result
def q_learning_minibatch(self):
if self.experience.count < self.history_length:
return [], 0, False
else:
s_t, action, reward, s_t_plus_1, terminal = self.experience.sample()
terminal = np.array(terminal) + 0.
# Deep Q-learning
max_q_t_plus_1 = self.target_network.calc_max_outputs(s_t_plus_1)
target_q_t = (1. - terminal) * self.discount_r * max_q_t_plus_1 + reward
_, q_t, loss = self.sess.run([self.optim, self.pred_network.outputs, self.loss], {
self.targets: target_q_t,
self.actions: action,
self.pred_network.inputs: s_t,
})
return q_t, loss, True
| 35.358974 | 116 | 0.673193 |
71b98f59428322523fe15276f1dd95e05126903b | 1,330 | py | Python | social_auth_ragtag_id/backends.py | RagtagOpen/python-social-auth-ragtag-id | 8d8e005231c09535098136213347934e9da7b3f2 | [
"MIT"
] | null | null | null | social_auth_ragtag_id/backends.py | RagtagOpen/python-social-auth-ragtag-id | 8d8e005231c09535098136213347934e9da7b3f2 | [
"MIT"
] | 3 | 2020-03-24T16:26:22.000Z | 2021-02-02T21:55:45.000Z | social_auth_ragtag_id/backends.py | RagtagOpen/python-social-auth-ragtag-id | 8d8e005231c09535098136213347934e9da7b3f2 | [
"MIT"
] | null | null | null | from social_core.backends.oauth import BaseOAuth2
class RagtagOAuth2(BaseOAuth2):
"""Ragtag ID OAuth authentication backend"""
name = "ragtag"
AUTHORIZATION_URL = "https://id.ragtag.org/oauth/authorize/"
ACCESS_TOKEN_URL = "https://id.ragtag.org/oauth/token/"
ACCESS_TOKEN_METHOD = "POST"
REVOKE_TOKEN_URL = "https://id.ragtag.org/oauth/revoke_token/"
SCOPE_SEPARATOR = " "
ID_KEY = "id"
def get_user_details(self, response):
"""Return user details from Ragtag ID account"""
return {
"username": response.get("username"),
"email": response.get("email"),
"first_name": response.get("first_name"),
"last_name": response.get("last_name"),
}
def user_data(self, access_token, *args, **kwargs):
"""Fetches user data from id.ragtag.org"""
return self.get_json(
"https://id.ragtag.org/api/me/",
headers={"Authorization": "Bearer {}".format(access_token)},
)
def auth_params(self, state=None):
params = super(RagtagOAuth2, self).auth_params(state=state)
approval_prompt = self.setting("APPROVAL_PROMPT", "auto")
if not approval_prompt == "auto":
params["approval_prompt"] = self.setting("APPROVAL_PROMPT", "")
return params
| 35.945946 | 75 | 0.627068 |
8e85c1b93291aa581c6b6f90d4fc551c1f713881 | 4,279 | py | Python | trajectory/BadaAircraftPerformance/BadaAeroDynamicsFile.py | RobertPastor/flight-profile | bdc3bb9defeb347db26f96f7accd4d06cad1e33b | [
"MIT"
] | null | null | null | trajectory/BadaAircraftPerformance/BadaAeroDynamicsFile.py | RobertPastor/flight-profile | bdc3bb9defeb347db26f96f7accd4d06cad1e33b | [
"MIT"
] | null | null | null | trajectory/BadaAircraftPerformance/BadaAeroDynamicsFile.py | RobertPastor/flight-profile | bdc3bb9defeb347db26f96f7accd4d06cad1e33b | [
"MIT"
] | null | null | null | '''
Created on 6 mars 2015
@author: PASTOR Robert
Written By:
Robert PASTOR
Email: < robert [--DOT--] pastor0691 (--AT--) orange [--DOT--] fr >
http://trajectoire-predict.monsite-orange.fr/
Copyright 2015 Robert PASTOR
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
this class is responsible for managing the AeroDynamics data provided for each aircraft by BADA
'''
from trajectory.BadaAircraftPerformance.BadaAircraftPerformanceFile import AircraftPerformance
from trajectory.Environment.Atmosphere import Atmosphere
from trajectory.Environment.Earth import Earth
class AeroDynamics(object):
className = ''
AeroDynamicsLine = 3
WingAreaSurfaceSquareMeters = 0.0
VstallKcas = {}
DragCoeff = {}
LandingGearDragCoeff = 0.0
atmosphere = None
earth = None
def __init__(self,
aircraftPerformance,
atmosphere,
earth):
''' need atmosphere to compute stall speed from air density at airport altitude '''
self.className = self.__class__.__name__
assert (isinstance(aircraftPerformance, AircraftPerformance))
self.WingAreaSurfaceSquareMeters = aircraftPerformance.getWingAreaSurfaceSquareMeters()
assert (isinstance(atmosphere, Atmosphere) and not(atmosphere is None))
self.atmosphere = atmosphere
assert (isinstance(earth, Earth) and not(earth is None))
self.earth = earth
'''
Specifically, five different configurations are specified with a stall speed
[(Vstall)i ] and configuration threshold altitude [Hmax, i ] given for each
CC n Phase Name Vstall(KCAS) CD0 CD2 unused /
CD 1 CR Clean .13900E+03 .25954E-01 .25882E-01 .00000E+00 /
CD 2 IC 1 .11300E+03 .28410E-01 .37646E-01 .00000E+00 /
CD 3 TO 1+F .10400E+03 .44520E-01 .32811E-01 .00000E+00 /
CD 4 AP 2 .10000E+03 .46986E-01 .35779E-01 .00000E+00 /
CD 5 LD FULL .94000E+02 .97256E-01 .36689E-01 .00000E+00 /
'''
self.VstallKcas = aircraftPerformance.getVstallKcasKnots()
self.DragCoeff = aircraftPerformance.getDragCoeff()
self.LandingGearDragCoeff = aircraftPerformance.getLandingGearDragCoeff()
print ( self.className + ': Wing Area Surface= {0} Square-Meters'.format(self.WingAreaSurfaceSquareMeters) )
print ( self.className + ': stall speed= {0} knots'.format(self.VstallKcas) )
def getVstallKcas(self, phase):
''' calibrated air speed in Knots '''
assert (phase in ['CR', 'IC', 'TO', 'AP', 'LD'])
return self.VstallKcas[phase]
def getDragCoeff(self, phase):
assert (phase in ['CR', 'IC', 'TO', 'AP', 'LD'])
CD0 = self.DragCoeff['CD0'][phase]
CD2 = self.DragCoeff['CD2'][phase]
return CD0, CD2
def getWingAreaSurfaceSquareMeters(self):
return self.WingAreaSurfaceSquareMeters
def __str__(self):
strMsg = self.className + ': WingAreaSurface Square-Meters= ' + str(self.WingAreaSurfaceSquareMeters)
strMsg += ': stall speeds in knots= ' + str (self.VstallKcas)
return strMsg
def dump(self):
print ( self.className + ': Wing Area Surface= {0} Square-Meters'.format(self.WingAreaSurfaceSquareMeters) )
print ( self.className + ': stall speed= {0} knots'.format(self.VstallKcas) )
| 39.62037 | 116 | 0.629353 |
0057978c97842ccfd25f2f3dba877544a56eeb29 | 1,023 | py | Python | tests/io/test_read_write.py | awaelchli/torch-optical-flow | 1f48d95b8f3412052f7c35eb2ec1fa7cb739efe1 | [
"MIT"
] | null | null | null | tests/io/test_read_write.py | awaelchli/torch-optical-flow | 1f48d95b8f3412052f7c35eb2ec1fa7cb739efe1 | [
"MIT"
] | null | null | null | tests/io/test_read_write.py | awaelchli/torch-optical-flow | 1f48d95b8f3412052f7c35eb2ec1fa7cb739efe1 | [
"MIT"
] | 1 | 2021-11-14T09:13:03.000Z | 2021-11-14T09:13:03.000Z | from pathlib import Path
import pytest
import torch
from optical_flow.io.read_write import FORMATS, read, write
@pytest.mark.parametrize("fmt", FORMATS)
@pytest.mark.parametrize(
"device",
[
pytest.param(torch.device("cpu")),
pytest.param(
torch.device("cuda", 0),
marks=pytest.mark.skipif(
not torch.cuda.is_available(), reason="requires GPU"
),
),
],
)
def test_read_write(tmpdir, fmt, device):
flow = torch.rand(2, 5, 6, device=device) * 100
filename = Path(tmpdir) / "test"
if fmt == "kitti":
filename = filename.with_suffix(".png")
write(filename, flow, fmt=fmt)
loaded_flow = read(filename, fmt=fmt)
assert isinstance(loaded_flow, torch.Tensor)
assert loaded_flow.dtype == torch.float32
assert loaded_flow.shape == flow.shape
assert loaded_flow.device == torch.device("cpu")
atol = 1e-1 if fmt == "kitti" else 1e-8
assert torch.allclose(flow.cpu(), loaded_flow, atol=atol)
| 29.228571 | 68 | 0.638319 |
4a9a5485458f40114490dea9660c0f91644822c5 | 90 | py | Python | dist/book/codes/109.py | EManualResource/book-python-basic | a6f9e985b8765f9e8dbc7a0bea82243545d3fa06 | [
"Apache-2.0"
] | null | null | null | dist/book/codes/109.py | EManualResource/book-python-basic | a6f9e985b8765f9e8dbc7a0bea82243545d3fa06 | [
"Apache-2.0"
] | null | null | null | dist/book/codes/109.py | EManualResource/book-python-basic | a6f9e985b8765f9e8dbc7a0bea82243545d3fa06 | [
"Apache-2.0"
] | null | null | null | #coding:utf-8
print "please write your name:"
name=raw_input()
print "Hello,%s"%name
| 9 | 31 | 0.688889 |
f413689eeb720cf93ef02961641a5ffee7dfb333 | 2,216 | py | Python | corehq/apps/users/signals.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2020-05-05T13:10:01.000Z | 2020-05-05T13:10:01.000Z | corehq/apps/users/signals.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2019-12-09T14:00:14.000Z | 2019-12-09T14:00:14.000Z | corehq/apps/users/signals.py | MaciejChoromanski/commcare-hq | fd7f65362d56d73b75a2c20d2afeabbc70876867 | [
"BSD-3-Clause"
] | 5 | 2015-11-30T13:12:45.000Z | 2019-07-01T19:27:07.000Z | from __future__ import absolute_import
from __future__ import unicode_literals
from django.db.models.signals import post_save
from django.dispatch import receiver, Signal
from django.contrib.auth.signals import user_logged_in
from corehq.elastic import send_to_elasticsearch
commcare_user_post_save = Signal(providing_args=["couch_user"])
couch_user_post_save = Signal(providing_args=["couch_user"])
@receiver(user_logged_in)
def set_language(sender, **kwargs):
"""
Whenever a user logs in, attempt to set their browser session
to the right language.
HT: http://mirobetm.blogspot.com/2012/02/django-language-set-in-database-field.html
"""
from corehq.apps.users.models import CouchUser
user = kwargs['user']
couch_user = CouchUser.from_django_user(user)
if couch_user and couch_user.language:
kwargs['request'].session['django_language'] = couch_user.language
# Signal that syncs django_user => couch_user
def django_user_post_save_signal(sender, instance, created, raw=False, **kwargs):
from corehq.apps.users.models import CouchUser
if raw:
return
return CouchUser.django_user_post_save_signal(sender, instance, created)
def update_user_in_es(sender, couch_user, **kwargs):
"""
Automatically sync the user to elastic directly on save or delete
"""
from corehq.pillows.user import transform_user_for_elasticsearch
send_to_elasticsearch("users", transform_user_for_elasticsearch(couch_user.to_json()),
delete=couch_user.to_be_deleted())
def sync_user_phone_numbers(sender, couch_user, **kwargs):
from corehq.apps.sms.tasks import sync_user_phone_numbers as sms_sync_user_phone_numbers
sms_sync_user_phone_numbers.delay(couch_user.get_id)
# This gets called by UsersAppConfig when the module is set up
def connect_user_signals():
from django.contrib.auth.models import User
post_save.connect(django_user_post_save_signal, User,
dispatch_uid="django_user_post_save_signal")
couch_user_post_save.connect(update_user_in_es, dispatch_uid="update_user_in_es")
couch_user_post_save.connect(sync_user_phone_numbers, dispatch_uid="sync_user_phone_numbers")
| 39.571429 | 97 | 0.774368 |
ab6716921591df6a3c818f7972d4a4de504a6f4d | 8,435 | py | Python | ugali/pipeline/run_05.0_followup.py | SidneyMau/ugali | 2c9d7a8d0aec48f867b868d2663492e08b428f4e | [
"MIT"
] | null | null | null | ugali/pipeline/run_05.0_followup.py | SidneyMau/ugali | 2c9d7a8d0aec48f867b868d2663492e08b428f4e | [
"MIT"
] | null | null | null | ugali/pipeline/run_05.0_followup.py | SidneyMau/ugali | 2c9d7a8d0aec48f867b868d2663492e08b428f4e | [
"MIT"
] | 1 | 2019-07-18T16:42:27.000Z | 2019-07-18T16:42:27.000Z | #!/usr/bin/env python
"""Perform MCMC follow-up fitting."""
import os
from os.path import join,exists,basename,splitext
import shutil
from collections import OrderedDict as odict
from multiprocessing import Pool
import matplotlib
try: os.environ['DISPLAY']
except KeyError: matplotlib.use('Agg')
import numpy
import numpy as np
import yaml
import fitsio
from ugali.analysis.pipeline import Pipeline
from ugali.analysis.scan import Scan
import ugali.analysis.source
import ugali.analysis.loglike
import ugali.analysis.results
import ugali.utils.config
from ugali.utils.logger import logger
from ugali.utils.shell import mkdir
components = ['mcmc','membership','results','plot','collect','scan']
components = ['mcmc','membership','results','plot']
def make_filenames(config,label):
config = ugali.utils.config.Config(config)
outdir=config['output']['mcmcdir']
samfile=join(outdir,config['output']['mcmcfile']%label)
srcfile=samfile.replace('.npy','.yaml')
memfile=samfile.replace('.npy','.fits')
ret = dict(outfile=samfile,samfile=samfile,srcfile=srcfile,memfile=memfile)
return ret
def do_results(args):
""" Write the results output file """
config,name,label,coord = args
filenames = make_filenames(config,label)
srcfile = filenames['srcfile']
samples = filenames['samfile']
if not exists(srcfile):
logger.warning("Couldn't find %s; skipping..."%srcfile)
return
if not exists(samples):
logger.warning("Couldn't find %s; skipping..."%samples)
return
logger.info("Writing %s..."%srcfile)
from ugali.analysis.results import write_results
write_results(srcfile,config,srcfile,samples)
def do_membership(args):
""" Write the membership output file """
config,name,label,coord = args
filenames = make_filenames(config,label)
srcfile = filenames['srcfile']
memfile = filenames['memfile']
logger.info("Writing %s..."%memfile)
from ugali.analysis.loglike import write_membership
write_membership(memfile,config,srcfile,section='source')
def do_plot(args):
""" Create plots of mcmc output """
import ugali.utils.plotting
import pylab as plt
config,name,label,coord = args
filenames = make_filenames(config,label)
srcfile = filenames['srcfile']
samfile = filenames['samfile']
memfile = filenames['memfile']
if not exists(srcfile):
logger.warning("Couldn't find %s; skipping..."%srcfile)
return
if not exists(samfile):
logger.warning("Couldn't find %s; skipping..."%samfile)
return
config = ugali.utils.config.Config(config)
burn = config['mcmc']['nburn']*config['mcmc']['nwalkers']
source = ugali.analysis.source.Source()
source.load(srcfile,section='source')
outfile = samfile.replace('.npy','.png')
ugali.utils.plotting.plotTriangle(srcfile,samfile,burn=burn)
logger.info(" Writing %s..."%outfile)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
plt.close()
plotter = ugali.utils.plotting.SourcePlotter(source,config,radius=0.5)
data = fitsio.read(memfile,trim_strings=True) if exists(memfile) else None
if data is not None:
plt.figure()
kernel,isochrone = source.kernel,source.isochrone
ugali.utils.plotting.plotMembership(config,data,kernel,isochrone)
outfile = samfile.replace('.npy','_mem.png')
logger.info(" Writing %s..."%outfile)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
plt.close()
plotter.plot6(data)
outfile = samfile.replace('.npy','_6panel.png')
logger.info(" Writing %s..."%outfile)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
outfile = samfile.replace('.npy','_6panel.pdf')
logger.info(" Writing %s..."%outfile)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
plt.close()
try:
title = name
plotter.plot4()
outfile = samfile.replace('.npy','_4panel.png')
logger.info(" Writing %s..."%outfile)
plt.suptitle(title)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
plt.close()
except:
logger.warning(" Failed to create plotter.plot4()")
def run(self):
if self.opts.coords is not None:
coords = self.opts.coords
names = vars(self.opts).get('names',len(coords)*[''])
else:
names,coords = self.parser.parse_targets(self.config.candfile)
labels=[n.lower().replace(' ','_').replace('(','').replace(')','') for n in names]
self.outdir=mkdir(self.config['output']['mcmcdir'])
self.logdir=mkdir(join(self.outdir,'log'))
args = list(zip(len(names)*[self.opts.config],names,labels,coords))
if 'mcmc' in self.opts.run:
logger.info("Running 'mcmc'...")
try: shutil.copy(self.opts.config,self.outdir)
except Exception as e: logger.warn(e.message)
for config,name,label,coord in args:
glon,glat,radius = coord
outfile = make_filenames(self.config,label)['samfile']
base = splitext(basename(outfile))[0]
logfile=join(self.logdir,base+'.log')
jobname=base
script = self.config['mcmc']['script']
nthreads = self.config['mcmc']['nthreads']
srcmdl = self.config['mcmc'].get('srcmdl')
if srcmdl is not None:
try: shutil.copy(srcmdl,self.outdir)
except Exception as e: logger.warn(e.message)
logger.info('%s (%s)'%(name,srcmdl))
cmd='%s %s --name %s --srcmdl %s %s' % (
script,self.opts.config,name,srcmdl,outfile)
else:
logger.info('%s (%.4f,%.4f)'%(name,glon,glat))
cmd='%s %s --name %s --gal %.4f %.4f --grid %s'% (
script,self.opts.config,name,glon,glat,outfile)
logger.info(cmd)
self.batch.submit(cmd,jobname,logfile,n=nthreads,a='mpirun')
if 'results' in self.opts.run:
logger.info("Running 'results'...")
if len(args) > 1:
pool = Pool(maxtasksperchild=1)
pool.map(do_results,args)
else:
do_results(*args)
if 'membership' in self.opts.run:
logger.info("Running 'membership'...")
if len(args) > 1:
pool = Pool(maxtasksperchild=1)
pool.map(do_membership,args)
else:
do_membership(*args)
if 'plot' in self.opts.run:
logger.info("Running 'plot'...")
if len(args) > 1:
pool = Pool(maxtasksperchild=1)
pool.map(do_plot,args)
#map(do_plot,args)
else:
do_plot(*args)
if 'collect' in self.opts.run:
logger.info("Running 'collect'...")
results = odict()
srcmdl = odict()
params = odict()
for config,name,label,coord in args:
srcfile = make_filenames(self.config,name)['srcfile']
results[name] = yaml.load(open(srcfile))['results']
srcmdl[name] = yaml.load(open(srcfile))['source']
params[name] = yaml.load(open(srcfile))['params']
for base,output in [('results.yaml',results),('srcmdl.yaml',srcmdl),('params.yaml',params)]:
outfile = join(self.outdir,base)
out = open(outfile,'w')
out.write(yaml.dump(output))
out.close()
if 'scan' in self.opts.run:
logger.info("Running 'scan'...")
for config,name,label,coord in args:
logdir = mkdir('plots/log')
logfile=join(logdir,'%s_lnlscan.log')
cmd = 'python lnlscan.py %s --name %s --xpar %s --xbins 45 --ypar %s --ybins 45'%(self.opts.config,name,'age','metallicity')
self.batch.submit(cmd,logfile=logfile)
cmd = 'python lnlscan.py %s --name %s --xpar %s --xbins 45 --ypar %s --ybins 45'%(self.opts.config,name,'metallicity','distance_modulus')
self.batch.submit(cmd,logfile=logfile)
cmd = 'python lnlscan.py %s --name %s --xpar %s --xbins 45 --ypar %s --ybins 45'%(self.opts.config,name,'age','distance_modulus')
self.batch.submit(cmd,logfile=logfile)
Pipeline.run = run
pipeline = Pipeline(__doc__,components)
pipeline.parser.add_coords(radius=True,targets=True)
pipeline.parser.add_ncores()
pipeline.parse_args()
pipeline.execute()
| 34.855372 | 150 | 0.620865 |
abad3b5d2c624201bd1f358d40fc91312bf8eb24 | 18,903 | py | Python | jina/peapods/runtimes/zmq/zed.py | soumik2012/jina | 990a51d6a69a40146e6705eb0b8e6caf841a6fcc | [
"Apache-2.0"
] | 2 | 2021-11-01T10:16:45.000Z | 2021-11-08T08:35:44.000Z | jina/peapods/runtimes/zmq/zed.py | soumik2012/jina | 990a51d6a69a40146e6705eb0b8e6caf841a6fcc | [
"Apache-2.0"
] | null | null | null | jina/peapods/runtimes/zmq/zed.py | soumik2012/jina | 990a51d6a69a40146e6705eb0b8e6caf841a6fcc | [
"Apache-2.0"
] | null | null | null | import argparse
import re
import time
from collections import defaultdict
from typing import Dict, List
import zmq
from .base import ZMQRuntime
from ...zmq import ZmqStreamlet
from .... import __default_endpoint__
from ....enums import OnErrorStrategy, SocketType
from ....excepts import (
NoExplicitMessage,
ExecutorFailToLoad,
MemoryOverHighWatermark,
ChainedPodException,
BadConfigSource,
RuntimeTerminated,
UnknownControlCommand,
)
from ....executors import BaseExecutor
from ....helper import random_identity, typename
from ....logging.profile import used_memory
from ....proto import jina_pb2
from ....types.arrays.document import DocumentArray
from ....types.message import Message
from ....types.request import Request
from ....types.routing.table import RoutingTable
class ZEDRuntime(ZMQRuntime):
"""Runtime procedure leveraging :class:`ZmqStreamlet` for Executor."""
def __init__(self, args: 'argparse.Namespace', ctrl_addr: str, **kwargs):
"""Initialize private parameters and execute private loading functions.
:param args: args from CLI
:param ctrl_addr: control port address
:param kwargs: extra keyword arguments
"""
super().__init__(args, ctrl_addr, **kwargs)
self._id = random_identity()
self._last_active_time = time.perf_counter()
self._request = None
self._message = None
# all pending messages collected so far, key is the request id
self._pending_msgs = defaultdict(list) # type: Dict[str, List['Message']]
self._partial_requests = None
self._partial_messages = None
# idle_dealer_ids only becomes non-None when it receives IDLE ControlRequest
self._idle_dealer_ids = set()
self._load_zmqstreamlet()
self._load_plugins()
self._load_executor()
def run_forever(self):
"""Start the `ZmqStreamlet`."""
self._zmqstreamlet.start(self._msg_callback)
def teardown(self):
"""Close the `ZmqStreamlet` and `Executor`."""
self._zmqstreamlet.close()
self._executor.close()
super().teardown()
#: Private methods required by :meth:`setup`
def _load_zmqstreamlet(self):
"""Load ZMQStreamlet to this runtime."""
# important: fix zmqstreamlet ctrl address to replace the the ctrl address generated in the main
# process/thread
self._zmqstreamlet = ZmqStreamlet(
args=self.args,
logger=self.logger,
ctrl_addr=self.ctrl_addr,
ready_event=self.is_ready_event,
)
def _load_executor(self):
"""Load the executor to this runtime, specified by ``uses`` CLI argument."""
try:
self._executor = BaseExecutor.load_config(
self.args.uses,
override_with=self.args.uses_with,
override_metas=self.args.uses_metas,
override_requests=self.args.uses_requests,
runtime_args=vars(self.args),
)
except BadConfigSource as ex:
self.logger.error(
f'fail to load config from {self.args.uses}, if you are using docker image for --uses, '
f'please use "docker://YOUR_IMAGE_NAME"'
)
raise ExecutorFailToLoad from ex
except FileNotFoundError as ex:
self.logger.error(f'fail to load file dependency')
raise ExecutorFailToLoad from ex
except Exception as ex:
self.logger.critical(f'can not load the executor from {self.args.uses}')
raise ExecutorFailToLoad from ex
def _load_plugins(self):
"""Load the plugins if needed necessary to load executors."""
if self.args.py_modules:
from ....importer import PathImporter
PathImporter.add_modules(*self.args.py_modules)
#: Private methods required by :meth:`teardown`
def _check_memory_watermark(self):
"""Check the memory watermark."""
if used_memory() > self.args.memory_hwm > 0:
raise MemoryOverHighWatermark
#: Private methods required by run_forever
def _pre_hook(self, msg: 'Message') -> 'ZEDRuntime':
"""
Pre-hook function, what to do after first receiving the message.
:param msg: received message
:return: `ZEDRuntime`
"""
msg.add_route(self.name, self._id)
self._request = msg.request
self._message = msg
if self.expect_parts > 1:
req_id = msg.envelope.request_id
self._pending_msgs[req_id].append(msg)
self._partial_messages = self._pending_msgs[req_id]
self._partial_requests = [v.request for v in self._partial_messages]
if self.logger.debug_enabled:
self._log_info_msg(
msg,
f'({len(self.partial_requests)}/{self.expect_parts} parts)'
if self.expect_parts > 1
else '',
)
if self.expect_parts > 1 and self.expect_parts > len(self.partial_requests):
# NOTE: reduce priority is higher than chain exception
# otherwise a reducer will lose its function when earlier pods raise exception
raise NoExplicitMessage
if self.request_type == 'ControlRequest':
self._handle_control_req()
if (
msg.envelope.status.code == jina_pb2.StatusProto.ERROR
and self.args.on_error_strategy >= OnErrorStrategy.SKIP_HANDLE
):
raise ChainedPodException
return self
def _log_info_msg(self, msg, part_str):
info_msg = f'recv {msg.envelope.request_type} '
if self.request_type == 'DataRequest':
info_msg += f'({self.envelope.header.exec_endpoint}) - ({self.envelope.request_id}) '
elif self.request_type == 'ControlRequest':
info_msg += f'({self.request.command}) '
info_msg += f'{part_str} from {msg.colored_route}'
self.logger.debug(info_msg)
def _post_hook(self, msg: 'Message') -> 'ZEDRuntime':
"""
Post-hook function, what to do before handing out the message.
:param msg: received message
:return: `ZEDRuntime`
"""
# do NOT access `msg.request.*` in the _pre_hook, as it will trigger the deserialization
# all meta information should be stored and accessed via `msg.envelope`
self._last_active_time = time.perf_counter()
self._check_memory_watermark()
if self.expect_parts > 1:
msgs = self._pending_msgs.pop(msg.envelope.request_id)
msg.merge_envelope_from(msgs)
msg.update_timestamp()
return self
@staticmethod
def _parse_params(parameters: Dict, executor_name: str):
parsed_params = parameters
specific_parameters = parameters.get(executor_name, None)
if specific_parameters:
parsed_params.update(**specific_parameters)
return parsed_params
def _handle(self) -> 'ZEDRuntime':
"""Register the current message to this pea, so that all message-related properties are up-to-date, including
:attr:`request`, :attr:`prev_requests`, :attr:`message`, :attr:`prev_messages`. And then call the executor to handle
this message if its envelope's status is not ERROR, else skip handling of message.
.. note::
Handle does not handle explicitly message because it may wait for different messages when different parts are expected
:return: ZEDRuntime procedure.
"""
# skip executor for non-DataRequest
if self.request_type != 'DataRequest':
self.logger.debug(f'skip executor: not data request')
return self
# migrated from the previously RouteDriver logic
# set dealer id
if self._idle_dealer_ids:
dealer_id = self._idle_dealer_ids.pop()
self.envelope.receiver_id = dealer_id
# when no available dealer, pause the pollin from upstream
if not self._idle_dealer_ids:
self._zmqstreamlet.pause_pollin()
self.logger.debug(
f'using route, set receiver_id: {self.envelope.receiver_id}'
)
# skip executor if target_peapod mismatch
if not re.match(self.envelope.header.target_peapod, self.name):
self.logger.debug(
f'skip executor: mismatch target, target: {self.envelope.header.target_peapod}, name: {self.name}'
)
return self
# skip executor if endpoints mismatch
if (
self.envelope.header.exec_endpoint not in self._executor.requests
and __default_endpoint__ not in self._executor.requests
):
self.logger.debug(
f'skip executor: mismatch request, exec_endpoint: {self.envelope.header.exec_endpoint}, requests: {self._executor.requests}'
)
return self
params = self._parse_params(self.request.parameters, self._executor.metas.name)
# executor logic
r_docs = self._executor(
req_endpoint=self.envelope.header.exec_endpoint,
docs=self.docs,
parameters=params,
docs_matrix=self.docs_matrix,
groundtruths=self.groundtruths,
groundtruths_matrix=self.groundtruths_matrix,
)
# assigning result back to request
# 1. Return none: do nothing
# 2. Return nonempty and non-DocumentArray: raise error
# 3. Return DocArray, but the memory pointer says it is the same as self.docs: do nothing
# 4. Return DocArray and its not a shallow copy of self.docs: assign self.request.docs
if r_docs is not None:
if not isinstance(r_docs, DocumentArray):
raise TypeError(
f'return type must be {DocumentArray!r} or None, but getting {typename(r_docs)}'
)
elif r_docs != self.request.docs:
# this means the returned DocArray is a completely new one
self.request.docs.clear()
self.request.docs.extend(r_docs)
return self
def _handle_control_req(self):
# migrated from previous ControlDriver logic
if self.request.command == 'TERMINATE':
self.envelope.status.code = jina_pb2.StatusProto.SUCCESS
raise RuntimeTerminated
elif self.request.command == 'STATUS':
self.envelope.status.code = jina_pb2.StatusProto.READY
self.request.parameters = vars(self.args)
elif self.request.command == 'IDLE':
self._idle_dealer_ids.add(self.envelope.receiver_id)
self._zmqstreamlet.resume_pollin()
self.logger.debug(
f'{self.envelope.receiver_id} is idle, now I know these idle peas {self._idle_dealer_ids}'
)
elif self.request.command == 'CANCEL':
if self.envelope.receiver_id in self._idle_dealer_ids:
self._idle_dealer_ids.remove(self.envelope.receiver_id)
elif self.request.command == 'ACTIVATE':
self._zmqstreamlet._send_idle_to_router()
elif self.request.command == 'DEACTIVATE':
self._zmqstreamlet._send_cancel_to_router()
else:
raise UnknownControlCommand(
f'don\'t know how to handle {self.request.command}'
)
def _callback(self, msg: 'Message'):
self.is_post_hook_done = False #: if the post_hook is called
self._pre_hook(msg)._handle()._post_hook(msg)
self.is_post_hook_done = True
return msg
def _msg_callback(self, msg: 'Message') -> None:
"""
Callback function after receiving the message
When nothing is returned then nothing is send out via :attr:`zmqlet.sock_out`.
:param msg: received message
"""
try:
# notice how executor related exceptions are handled here
# generally unless executor throws an OSError, the exception are caught and solved inplace
processed_msg = self._callback(msg)
# dont sent responses for CANCEL and IDLE control requests
if msg.is_data_request or msg.request.command not in ['CANCEL', 'IDLE']:
self._zmqstreamlet.send_message(processed_msg)
except RuntimeTerminated:
# this is the proper way to end when a terminate signal is sent
self._zmqstreamlet.send_message(msg)
self._zmqstreamlet.close()
except KeyboardInterrupt as kbex:
# save executor
self.logger.debug(f'{kbex!r} causes the breaking from the event loop')
self._zmqstreamlet.send_message(msg)
self._zmqstreamlet.close(flush=False)
except (SystemError, zmq.error.ZMQError) as ex:
# save executor
self.logger.debug(f'{ex!r} causes the breaking from the event loop')
self._zmqstreamlet.send_message(msg)
self._zmqstreamlet.close()
except MemoryOverHighWatermark:
self.logger.critical(
f'memory usage {used_memory()} GB is above the high-watermark: {self.args.memory_hwm} GB'
)
except NoExplicitMessage:
# silent and do not propagate message anymore
# 1. wait partial message to be finished
# 2. dealer send a control message and no need to go on
pass
except (RuntimeError, Exception, ChainedPodException) as ex:
# general runtime error and nothing serious, we simply mark the message to error and pass on
if not self.is_post_hook_done:
self._post_hook(msg)
if self.args.on_error_strategy == OnErrorStrategy.THROW_EARLY:
raise
if isinstance(ex, ChainedPodException):
# the error is print from previous pod, no need to show it again
# hence just add exception and propagate further
# please do NOT add logger.error here!
msg.add_exception()
else:
msg.add_exception(ex, executor=getattr(self, '_executor'))
self.logger.error(
f'{ex!r}'
+ f'\n add "--quiet-error" to suppress the exception details'
if not self.args.quiet_error
else '',
exc_info=not self.args.quiet_error,
)
self._zmqstreamlet.send_message(msg)
#: Some class-specific properties
@property
def is_idle(self) -> bool:
"""
Return ``True`` when current time is ``max_idle_time`` seconds late than the last active time
:return: True if idle else false.
"""
return (time.perf_counter() - self._last_active_time) > self.args.max_idle_time
@property
def request(self) -> 'Request':
"""
Get the current request body inside the protobuf message
:return: :class:`ZEDRuntime` request
"""
return self._request
@property
def message(self) -> 'Message':
"""
Get the current protobuf message to be processed
:return: :class:`ZEDRuntime` message
"""
return self._message
@property
def request_type(self) -> str:
"""
Get the type of message being processed
:return: request type
"""
return self._message.envelope.request_type
@property
def expect_parts(self) -> int:
"""
The expected number of partial messages before trigger :meth:`handle`
:return: expected number of partial messages
"""
if self.message.is_data_request:
if self.args.socket_in == SocketType.ROUTER_BIND:
graph = RoutingTable(self._message.envelope.routing_table)
return graph.active_target_pod.expected_parts
else:
return self.args.num_part
else:
return 1
@property
def partial_requests(self) -> List['Request']:
"""
The collected partial requests under the current ``request_id``
:return: collected partial requests
"""
return self._partial_requests
@property
def partial_messages(self) -> List['Message']:
"""
The collected partial messages under the current ``request_id`` "
:return: collected partial messages
"""
return self._partial_messages
def _get_docs(self, field: str) -> 'DocumentArray':
if self.expect_parts > 1:
result = DocumentArray(
[d for r in reversed(self.partial_requests) for d in getattr(r, field)]
)
else:
result = getattr(self.request, field)
# to unify all length=0 DocumentArray (or any other results) will simply considered as None
# otherwise the executor has to handle DocArray(0)
if len(result):
return result
def _get_docs_matrix(self, field) -> List['DocumentArray']:
"""DocumentArray from (multiple) requests
:param field: either `docs` or `groundtruths`
.. # noqa: DAR201"""
if self.expect_parts > 1:
result = [getattr(r, field) for r in reversed(self.partial_requests)]
else:
result = [getattr(self.request, field)]
# to unify all length=0 DocumentArray (or any other results) will simply considered as None
# otherwise, the executor has to handle [None, None, None] or [DocArray(0), DocArray(0), DocArray(0)]
len_r = sum(len(r) for r in result)
if len_r:
return result
@property
def docs(self) -> 'DocumentArray':
"""Return a DocumentArray by concatenate (multiple) ``requests.docs``
.. # noqa: DAR201"""
return self._get_docs('docs')
@property
def groundtruths(self) -> 'DocumentArray':
"""Return a DocumentArray by concatenate (multiple) ``requests.groundtruths``
.. # noqa: DAR201"""
return self._get_docs('groundtruths')
@property
def docs_matrix(self) -> List['DocumentArray']:
"""Return a list of DocumentArray from multiple requests
.. # noqa: DAR201"""
return self._get_docs_matrix('docs')
@property
def groundtruths_matrix(self) -> List['DocumentArray']:
"""A flattened DocumentArray from (multiple) requests
.. # noqa: DAR201"""
return self._get_docs_matrix('groundtruths')
@property
def envelope(self) -> 'jina_pb2.EnvelopeProto':
"""Get the current message envelope
.. # noqa: DAR201
"""
return self._message.envelope
| 37.655378 | 140 | 0.621965 |
d3662a53ca9685860f63799a3b38785ac480cf40 | 36,976 | py | Python | tensorflow/python/ops/ragged/row_partition_test.py | joshz123/tensorflow | 7841ca029060ab78e221e757d4b1ee6e3e0ffaa4 | [
"Apache-2.0"
] | 78 | 2020-08-04T12:36:25.000Z | 2022-03-25T04:23:40.000Z | tensorflow/python/ops/ragged/row_partition_test.py | joshz123/tensorflow | 7841ca029060ab78e221e757d4b1ee6e3e0ffaa4 | [
"Apache-2.0"
] | 203 | 2019-06-14T23:53:10.000Z | 2022-02-10T02:27:23.000Z | tensorflow/python/ops/ragged/row_partition_test.py | joshz123/tensorflow | 7841ca029060ab78e221e757d4b1ee6e3e0ffaa4 | [
"Apache-2.0"
] | 28 | 2017-03-25T13:48:09.000Z | 2021-10-14T00:10:50.000Z | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.ragged.RowPartition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import row_partition
from tensorflow.python.ops.ragged.row_partition import RowPartition
from tensorflow.python.ops.ragged.row_partition import RowPartitionSpec
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RowPartitionTest(test_util.TensorFlowTestCase, parameterized.TestCase):
#=============================================================================
# RaggedTensor class docstring examples
#=============================================================================
def testClassDocStringExamples(self):
# From section: "Component Tensors"
rp = RowPartition.from_row_splits(row_splits=[0, 4, 4, 7, 8, 8])
self.assertAllEqual(rp.row_splits(), [0, 4, 4, 7, 8, 8])
del rp
# From section: "Alternative Row-Partitioning Schemes"
rt1 = RowPartition.from_row_splits(row_splits=[0, 4, 4, 7, 8, 8])
rt2 = RowPartition.from_row_lengths(row_lengths=[4, 0, 3, 1, 0])
rt3 = RowPartition.from_value_rowids(
value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5)
rt4 = RowPartition.from_row_starts(row_starts=[0, 4, 4, 7, 8], nvals=8)
rt5 = RowPartition.from_row_limits(row_limits=[4, 4, 7, 8, 8])
for rp in (rt1, rt2, rt3, rt4, rt5):
self.assertAllEqual(rp.row_splits(), [0, 4, 4, 7, 8, 8])
del rt1, rt2, rt3, rt4, rt5
# From section: "Multiple Ragged Dimensions"
inner_rt = RowPartition.from_row_splits(row_splits=[0, 4, 4, 7, 8, 8])
outer_rt = RowPartition.from_row_splits(row_splits=[0, 3, 3, 5])
del inner_rt, outer_rt
#=============================================================================
# RaggedTensor Constructor (private)
#=============================================================================
def testRaggedTensorConstruction(self):
row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
rp = RowPartition(
row_splits=row_splits,
internal=row_partition._row_partition_factory_key)
self.assertAllEqual(rp.row_splits(), [0, 2, 2, 5, 6, 7])
def testRaggedTensorConstructionErrors(self):
row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
with self.assertRaisesRegexp(ValueError,
'RaggedTensor constructor is private'):
RowPartition(row_splits=row_splits)
with self.assertRaisesRegexp(TypeError,
'Row-partitioning argument must be a Tensor'):
RowPartition(
row_splits=[0, 2, 2, 5, 6, 7],
internal=row_partition._row_partition_factory_key)
with self.assertRaisesRegexp(ValueError,
r'Shape \(6, 1\) must have rank 1'):
RowPartition(
row_splits=array_ops.expand_dims(row_splits, 1),
internal=row_partition._row_partition_factory_key)
with self.assertRaisesRegexp(TypeError,
'Cached value must be a Tensor or None.'):
RowPartition(
row_splits=row_splits,
row_lengths=[2, 3, 4],
internal=row_partition._row_partition_factory_key)
#=============================================================================
# RaggedTensor Factory Ops
#=============================================================================
def testFromValueRowIdsWithDerivedNRows(self):
# nrows is known at graph creation time.
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
# TODO(martinz): add nrows
rp = RowPartition.from_value_rowids(value_rowids, validate=False)
self.assertEqual(rp.dtype, dtypes.int64)
rp_row_splits = rp.row_splits()
rp_value_rowids = rp.value_rowids()
rp_nrows = rp.nrows()
self.assertIs(rp_value_rowids, value_rowids) # value_rowids
self.assertAllEqual(rp_value_rowids, value_rowids)
self.assertAllEqual(rp_nrows, 5)
self.assertAllEqual(rp_row_splits, [0, 2, 2, 5, 6, 7])
def testFromValueRowIdsWithDerivedNRowsDynamic(self):
# nrows is not known at graph creation time.
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
value_rowids = array_ops.placeholder_with_default(value_rowids, shape=None)
rp = RowPartition.from_value_rowids(value_rowids, validate=False)
rp_value_rowids = rp.value_rowids()
rp_nrows = rp.nrows()
self.assertIs(rp_value_rowids, value_rowids) # value_rowids
self.assertAllEqual(rp_value_rowids, value_rowids)
self.assertAllEqual(rp_nrows, 5)
def testFromValueRowIdsWithExplicitNRows(self):
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
nrows = constant_op.constant(7, dtypes.int64)
rp = RowPartition.from_value_rowids(value_rowids, nrows, validate=False)
rp_value_rowids = rp.value_rowids()
rp_nrows = rp.nrows()
rp_row_splits = rp.row_splits()
self.assertIs(rp_value_rowids, value_rowids) # value_rowids
self.assertIs(rp_nrows, nrows) # nrows
self.assertAllEqual(rp_row_splits, [0, 2, 2, 5, 6, 7, 7, 7])
def testFromValueRowIdsWithExplicitNRowsEqualToDefault(self):
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
nrows = constant_op.constant(5, dtypes.int64)
rp = RowPartition.from_value_rowids(value_rowids, nrows, validate=False)
rp_value_rowids = rp.value_rowids()
rp_nrows = rp.nrows()
rp_row_splits = rp.row_splits()
self.assertIs(rp_value_rowids, value_rowids) # value_rowids
self.assertIs(rp_nrows, nrows) # nrows
self.assertAllEqual(rp_value_rowids, value_rowids)
self.assertAllEqual(rp_nrows, nrows)
self.assertAllEqual(rp_row_splits, [0, 2, 2, 5, 6, 7])
def testFromValueRowIdsWithEmptyValues(self):
rp = RowPartition.from_value_rowids([])
rp_nrows = rp.nrows()
self.assertEqual(rp.dtype, dtypes.int64)
self.assertEqual(rp.value_rowids().shape.as_list(), [0])
self.assertAllEqual(rp_nrows, 0)
def testFromRowSplits(self):
row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
rp = RowPartition.from_row_splits(row_splits, validate=False)
self.assertEqual(rp.dtype, dtypes.int64)
rp_row_splits = rp.row_splits()
rp_nrows = rp.nrows()
self.assertIs(rp_row_splits, row_splits)
self.assertAllEqual(rp_nrows, 5)
def testFromRowSplitsWithDifferentSplitTypes(self):
splits1 = [0, 2, 2, 5, 6, 7]
splits2 = np.array([0, 2, 2, 5, 6, 7], np.int64)
splits3 = np.array([0, 2, 2, 5, 6, 7], np.int32)
splits4 = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
splits5 = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int32)
rt1 = RowPartition.from_row_splits(splits1)
rt2 = RowPartition.from_row_splits(splits2)
rt3 = RowPartition.from_row_splits(splits3)
rt4 = RowPartition.from_row_splits(splits4)
rt5 = RowPartition.from_row_splits(splits5)
self.assertEqual(rt1.row_splits().dtype, dtypes.int64)
self.assertEqual(rt2.row_splits().dtype, dtypes.int64)
self.assertEqual(rt3.row_splits().dtype, dtypes.int32)
self.assertEqual(rt4.row_splits().dtype, dtypes.int64)
self.assertEqual(rt5.row_splits().dtype, dtypes.int32)
def testFromRowSplitsWithEmptySplits(self):
err_msg = 'row_splits tensor may not be empty'
with self.assertRaisesRegexp(ValueError, err_msg):
RowPartition.from_row_splits([])
def testFromRowStarts(self):
nvals = constant_op.constant(7)
row_starts = constant_op.constant([0, 2, 2, 5, 6], dtypes.int64)
rp = RowPartition.from_row_starts(row_starts, nvals, validate=False)
self.assertEqual(rp.dtype, dtypes.int64)
rp_row_starts = rp.row_starts()
rp_row_splits = rp.row_splits()
rp_nrows = rp.nrows()
self.assertAllEqual(rp_nrows, 5)
self.assertAllEqual(rp_row_starts, row_starts)
self.assertAllEqual(rp_row_splits, [0, 2, 2, 5, 6, 7])
def testFromRowLimits(self):
row_limits = constant_op.constant([2, 2, 5, 6, 7], dtypes.int64)
rp = RowPartition.from_row_limits(row_limits, validate=False)
self.assertEqual(rp.dtype, dtypes.int64)
rp_row_limits = rp.row_limits()
rp_row_splits = rp.row_splits()
rp_nrows = rp.nrows()
self.assertAllEqual(rp_nrows, 5)
self.assertAllEqual(rp_row_limits, row_limits)
self.assertAllEqual(rp_row_splits, [0, 2, 2, 5, 6, 7])
def testFromRowLengths(self):
row_lengths = constant_op.constant([2, 0, 3, 1, 1], dtypes.int64)
rp = RowPartition.from_row_lengths(row_lengths, validate=False)
self.assertEqual(rp.dtype, dtypes.int64)
rp_row_lengths = rp.row_lengths()
rp_nrows = rp.nrows()
self.assertIs(rp_row_lengths, row_lengths) # nrows
self.assertAllEqual(rp_nrows, 5)
self.assertAllEqual(rp_row_lengths, row_lengths)
def testFromUniformRowLength(self):
nvals = 16
a1 = RowPartition.from_uniform_row_length(
nvals=nvals, uniform_row_length=2)
self.assertAllEqual(a1.uniform_row_length(), 2)
self.assertAllEqual(a1.nrows(), 8)
def testFromUniformRowLengthWithEmptyValues(self):
a = RowPartition.from_uniform_row_length(
nvals=0, uniform_row_length=0, nrows=10)
self.assertEqual(self.evaluate(a.nvals()), 0)
self.assertEqual(self.evaluate(a.nrows()), 10)
def testFromUniformRowLengthWithPlaceholders1(self):
nvals = array_ops.placeholder_with_default(
constant_op.constant(6, dtype=dtypes.int64), None)
rt1 = RowPartition.from_uniform_row_length(
nvals=nvals, uniform_row_length=3)
const_nvals1 = self.evaluate(rt1.nvals())
self.assertEqual(const_nvals1, 6)
def testFromUniformRowLengthWithPlaceholders2(self):
nvals = array_ops.placeholder_with_default(6, None)
ph_rowlen = array_ops.placeholder_with_default(3, None)
rt2 = RowPartition.from_uniform_row_length(
nvals=nvals, uniform_row_length=ph_rowlen)
const_nvals2 = self.evaluate(rt2.nvals())
self.assertEqual(const_nvals2, 6)
def testFromValueRowIdsWithBadNRows(self):
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
nrows = constant_op.constant(5, dtypes.int64)
with self.assertRaisesRegexp(ValueError, r'Expected nrows >= 0; got -2'):
RowPartition.from_value_rowids(
value_rowids=array_ops.placeholder_with_default(value_rowids, None),
nrows=-2)
with self.assertRaisesRegexp(
ValueError, r'Expected nrows >= value_rowids\[-1\] \+ 1; got nrows=2, '
r'value_rowids\[-1\]=4'):
RowPartition.from_value_rowids(value_rowids=value_rowids, nrows=2)
with self.assertRaisesRegexp(
ValueError, r'Expected nrows >= value_rowids\[-1\] \+ 1; got nrows=4, '
r'value_rowids\[-1\]=4'):
RowPartition.from_value_rowids(value_rowids=value_rowids, nrows=4)
with self.assertRaisesRegexp(ValueError,
r'Shape \(7, 1\) must have rank 1'):
RowPartition.from_value_rowids(
value_rowids=array_ops.expand_dims(value_rowids, 1), nrows=nrows)
with self.assertRaisesRegexp(ValueError, r'Shape \(1,\) must have rank 0'):
RowPartition.from_value_rowids(
value_rowids=value_rowids, nrows=array_ops.expand_dims(nrows, 0))
#=============================================================================
# RowPartition.__str__
#=============================================================================
def testRowPartitionStr(self):
row_splits = [0, 2, 5, 6, 6, 7]
rp = RowPartition.from_row_splits(row_splits, validate=False)
splits_type = 'int64'
if context.executing_eagerly():
expected_repr = ('tf.RowPartition(row_splits=tf.Tensor([0 2 5 6 6 7], '
'shape=(6,), dtype=int64))')
else:
expected_repr = ('tf.RowPartition(row_splits='
'Tensor("RowPartitionFromRowSplits/row_splits:0", '
'shape=(6,), dtype={}))').format(splits_type)
self.assertEqual(repr(rp), expected_repr)
self.assertEqual(str(rp), expected_repr)
@parameterized.parameters([
# from_value_rowids
{
'descr': 'bad rank for value_rowids',
'factory': RowPartition.from_value_rowids,
'value_rowids': [[1, 2], [3, 4]],
'nrows': 10
},
{
'descr': 'bad rank for nrows',
'factory': RowPartition.from_value_rowids,
'value_rowids': [1, 2, 3, 4],
'nrows': [10]
},
{
'descr': 'negative value_rowid',
'factory': RowPartition.from_value_rowids,
'value_rowids': [-5, 2, 3, 4],
'nrows': 10
},
{
'descr': 'non-monotonic-increasing value_rowid',
'factory': RowPartition.from_value_rowids,
'value_rowids': [4, 3, 2, 1],
'nrows': 10
},
{
'descr': 'value_rowid > nrows',
'factory': RowPartition.from_value_rowids,
'value_rowids': [1, 2, 3, 4],
'nrows': 2
},
# from_row_splits
{
'descr': 'bad rank for row_splits',
'factory': RowPartition.from_row_splits,
'row_splits': [[1, 2], [3, 4]]
},
{
'descr': 'row_splits[0] != 0',
'factory': RowPartition.from_row_splits,
'row_splits': [2, 3, 4]
},
{
'descr': 'non-monotonic-increasing row_splits',
'factory': RowPartition.from_row_splits,
'row_splits': [0, 3, 2, 4]
},
# from_row_lengths
{
'descr': 'bad rank for row_lengths',
'factory': RowPartition.from_row_lengths,
'row_lengths': [[1, 2], [1, 0]]
},
{
'descr': 'negatve row_lengths',
'factory': RowPartition.from_row_lengths,
'row_lengths': [3, -1, 2]
},
# from_row_starts
{
'descr': 'bad rank for row_starts',
'factory': RowPartition.from_row_starts,
'nvals': 2,
'row_starts': [[1, 2], [3, 4]]
},
{
'descr': 'row_starts[0] != 0',
'factory': RowPartition.from_row_starts,
'nvals': 5,
'row_starts': [2, 3, 4]
},
{
'descr': 'non-monotonic-increasing row_starts',
'factory': RowPartition.from_row_starts,
'nvals': 4,
'row_starts': [0, 3, 2, 4]
},
{
'descr': 'row_starts[0] > nvals',
'factory': RowPartition.from_row_starts,
'nvals': 4,
'row_starts': [0, 2, 3, 5]
},
# from_row_limits
{
'descr': 'bad rank for row_limits',
'factory': RowPartition.from_row_limits,
'row_limits': [[1, 2], [3, 4]]
},
{
'descr': 'row_limits[0] < 0',
'factory': RowPartition.from_row_limits,
'row_limits': [-1, 3, 4]
},
{
'descr': 'non-monotonic-increasing row_limits',
'factory': RowPartition.from_row_limits,
'row_limits': [0, 3, 2, 4]
},
# from_uniform_row_length
{
'descr': 'rowlen * nrows != nvals (1)',
'factory': RowPartition.from_uniform_row_length,
'nvals': 5,
'uniform_row_length': 3
},
{
'descr': 'rowlen * nrows != nvals (2)',
'factory': RowPartition.from_uniform_row_length,
'nvals': 5,
'uniform_row_length': 6
},
{
'descr': 'rowlen * nrows != nvals (3)',
'factory': RowPartition.from_uniform_row_length,
'nvals': 6,
'uniform_row_length': 3,
'nrows': 3
},
{
'descr': 'rowlen must be a scalar',
'factory': RowPartition.from_uniform_row_length,
'nvals': 4,
'uniform_row_length': [2]
},
{
'descr': 'rowlen must be nonnegative',
'factory': RowPartition.from_uniform_row_length,
'nvals': 4,
'uniform_row_length': -1
},
])
def testFactoryValidation(self, descr, factory, **kwargs):
# When input tensors have shape information, some of these errors will be
# detected statically.
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
partition = factory(**kwargs)
self.evaluate(partition.row_splits())
# Remove shape information (by wrapping tensors in placeholders), and check
# that we detect the errors when the graph is run.
if not context.executing_eagerly():
def wrap_arg(v):
return array_ops.placeholder_with_default(
constant_op.constant(v, dtype=dtypes.int64),
tensor_shape.TensorShape(None))
kwargs = dict((k, wrap_arg(v)) for (k, v) in kwargs.items())
with self.assertRaises(errors.InvalidArgumentError):
partition = factory(**kwargs)
self.evaluate(partition.row_splits())
@parameterized.named_parameters([
('FromRowSplits', lambda: RowPartition.from_row_splits([0, 2, 8]),
['row_splits']),
('FromRowLengths', lambda: RowPartition.from_row_lengths([3, 0, 8]),
['row_splits', 'row_lengths']),
('FromValueRowIds',
lambda: RowPartition.from_value_rowids([0, 0, 3, 4, 4, 4]),
['row_splits', 'value_rowids', 'row_lengths', 'nrows']),
('FromRowStarts',
lambda: RowPartition.from_row_starts([0, 3, 7], nvals=10),
['row_splits']),
('FromRowLimits', lambda: RowPartition.from_row_limits([3, 7, 10]),
['row_splits']),
])
def testPrecomputedSplits(self, rp_factory, expected_encodings):
rp = rp_factory()
self.assertEqual(rp.has_precomputed_row_splits(),
'row_splits' in expected_encodings)
self.assertEqual(rp.has_precomputed_row_lengths(),
'row_lengths' in expected_encodings)
self.assertEqual(rp.has_precomputed_value_rowids(),
'value_rowids' in expected_encodings)
self.assertEqual(rp.has_precomputed_nrows(), 'nrows' in expected_encodings)
def testWithPrecomputedSplits(self):
rp = RowPartition.from_row_splits([0, 2, 8])
rp_with_row_splits = rp.with_precomputed_row_splits()
self.assertTrue(rp_with_row_splits.has_precomputed_row_splits())
self.assertFalse(rp.has_precomputed_row_lengths())
rp_with_row_lengths = rp.with_precomputed_row_lengths()
self.assertTrue(rp_with_row_lengths.has_precomputed_row_lengths())
self.assertFalse(rp.has_precomputed_value_rowids())
rp_with_value_rowids = rp.with_precomputed_value_rowids()
self.assertTrue(rp_with_value_rowids.has_precomputed_value_rowids())
self.assertFalse(rp.has_precomputed_nrows())
rp_with_nrows = rp.with_precomputed_nrows()
self.assertTrue(rp_with_nrows.has_precomputed_nrows())
@parameterized.named_parameters([
dict(
testcase_name='FromRowSplitsAndRowSplits',
x=lambda: RowPartition.from_row_splits([0, 3, 8]),
y=lambda: RowPartition.from_row_splits([0, 3, 8]),
expected_encodings=['row_splits']),
dict(
testcase_name='FromRowSplitsAndUniformRowLength',
x=lambda: RowPartition.from_row_splits([0, 3, 6]),
y=lambda: RowPartition.from_uniform_row_length(3, nvals=6),
expected_encodings=['row_splits', 'uniform_row_length', 'nrows']),
dict(
testcase_name='FromRowSplitsAndRowLengths',
x=lambda: RowPartition.from_row_splits([0, 3, 8]),
y=lambda: RowPartition.from_row_lengths([3, 5]),
expected_encodings=['row_splits', 'row_lengths']),
dict(
testcase_name='FromRowSplitsAndValueRowIds',
x=lambda: RowPartition.from_row_splits([0, 3, 8]),
y=lambda: RowPartition.from_value_rowids([0, 0, 0, 1, 1, 1, 1, 1]),
expected_encodings=[
'row_splits', 'row_lengths', 'value_rowids', 'nrows'
]),
dict(
testcase_name='FromRowSplitsAndRowSplitsPlusNRows',
x=lambda: RowPartition.from_row_splits([0, 3, 8]),
y=lambda: RowPartition.from_row_splits([0, 3, 8]).
with_precomputed_nrows(),
expected_encodings=['row_splits', 'nrows']),
])
def testMergePrecomputedEncodings(self, x, y, expected_encodings):
x = x()
y = y()
for validate in (True, False):
result = x.merge_precomputed_encodings(y, validate)
self.assertEqual(result.has_precomputed_row_splits(),
'row_splits' in expected_encodings)
self.assertEqual(result.has_precomputed_row_lengths(),
'row_lengths' in expected_encodings)
self.assertEqual(result.has_precomputed_value_rowids(),
'value_rowids' in expected_encodings)
self.assertEqual(result.has_precomputed_nrows(),
'nrows' in expected_encodings)
self.assertEqual(result.uniform_row_length() is not None,
'uniform_row_length' in expected_encodings)
for r in (x, y):
if (r.has_precomputed_row_splits() and
result.has_precomputed_row_splits()):
self.assertAllEqual(r.row_splits(), result.row_splits())
if (r.has_precomputed_row_lengths() and
result.has_precomputed_row_lengths()):
self.assertAllEqual(r.row_lengths(), result.row_lengths())
if (r.has_precomputed_value_rowids() and
result.has_precomputed_value_rowids()):
self.assertAllEqual(r.value_rowids(), result.value_rowids())
if r.has_precomputed_nrows() and result.has_precomputed_nrows():
self.assertAllEqual(r.nrows(), result.nrows())
if (r.uniform_row_length() is not None and
result.uniform_row_length() is not None):
self.assertAllEqual(r.uniform_row_length(),
result.uniform_row_length())
def testMergePrecomputedEncodingsFastPaths(self):
# Same object: x gets returned as-is.
x = RowPartition.from_row_splits([0, 3, 8, 8])
self.assertIs(x.merge_precomputed_encodings(x), x)
# Same encoding tensor objects: x gets returned as-is.
y = RowPartition.from_row_splits(x.row_splits(), validate=False)
self.assertIs(x.merge_precomputed_encodings(y), x)
def testMergePrecomputedEncodingsWithMatchingTensors(self):
# The encoding tensors for `a` are a superset of the encoding tensors
# for `b`, and where they overlap, they the same tensor objects.
a = RowPartition.from_value_rowids([0, 0, 3, 4, 4, 4])
b = RowPartition.from_row_splits(a.row_splits(), validate=False)
self.assertIs(a.merge_precomputed_encodings(b), a)
self.assertIs(b.merge_precomputed_encodings(a), a)
self.assertIsNot(a, b)
@parameterized.named_parameters([
dict(
testcase_name='RowSplitMismatch',
x=lambda: RowPartition.from_row_splits([0, 3, 8]),
y=lambda: RowPartition.from_row_splits([0, 3, 8, 9]),
message='incompatible row_splits'),
dict(
testcase_name='RowLengthMismatch',
x=lambda: RowPartition.from_row_lengths([2, 0, 2]),
y=lambda: RowPartition.from_row_lengths([2, 0, 2, 1]),
message='incompatible row_splits'), # row_splits is checked first
dict(
testcase_name='ValueRowIdMismatch',
x=lambda: RowPartition.from_value_rowids([0, 3, 3, 4]),
y=lambda: RowPartition.from_value_rowids([0, 3, 4]),
message='incompatible value_rowids'),
])
def testMergePrecomputedEncodingStaticErrors(self, x, y, message):
if context.executing_eagerly():
return
# Errors that are caught by static shape checks.
x = x()
y = y()
with self.assertRaisesRegexp(ValueError, message):
x.merge_precomputed_encodings(y).row_splits()
with self.assertRaisesRegexp(ValueError, message):
y.merge_precomputed_encodings(x).row_splits()
@parameterized.named_parameters([
dict(
testcase_name='NRowsMismatch',
x=lambda: RowPartition.from_uniform_row_length(5, nvals=20),
y=lambda: RowPartition.from_uniform_row_length(5, nvals=15),
message='incompatible nrows'),
dict(
testcase_name='UniformRowLengthMismatch',
x=lambda: RowPartition.from_uniform_row_length(5, nvals=20),
y=lambda: RowPartition.from_uniform_row_length(2, nvals=8),
message='incompatible uniform_row_length'),
dict(
testcase_name='RowSplitMismatch',
x=lambda: RowPartition.from_row_splits([0, 3, 8]),
y=lambda: RowPartition.from_row_splits([0, 5, 8]),
message='incompatible row_splits'),
dict(
testcase_name='RowLengthMismatch',
x=lambda: RowPartition.from_row_lengths([2, 0, 2]),
y=lambda: RowPartition.from_row_lengths([0, 0, 2]),
message='incompatible row_splits'), # row_splits is checked first
dict(
testcase_name='ValueRowIdMismatch',
x=lambda: RowPartition.from_value_rowids([0, 3, 3]),
y=lambda: RowPartition.from_value_rowids([0, 0, 3]),
message='incompatible row_splits'), # row_splits is checked first
])
def testMergePrecomputedEncodingRuntimeErrors(self, x, y, message):
# Errors that are caught by runtime value checks.
x = x()
y = y()
with self.assertRaisesRegexp(errors.InvalidArgumentError, message):
self.evaluate(x.merge_precomputed_encodings(y).row_splits())
with self.assertRaisesRegexp(errors.InvalidArgumentError, message):
self.evaluate(y.merge_precomputed_encodings(x).row_splits())
@test_util.run_all_in_graph_and_eager_modes
class RowPartitionSpecTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testDefaultConstruction(self):
spec = RowPartitionSpec()
self.assertEqual(spec.nrows, None)
self.assertEqual(spec.nvals, None)
self.assertEqual(spec.uniform_row_length, None)
self.assertEqual(spec.dtype, dtypes.int64)
@parameterized.parameters([
(None, None, None, dtypes.int64, None, None, None, dtypes.int64),
(5, None, None, dtypes.int32, 5, None, None, dtypes.int32),
(None, 20, None, dtypes.int64, None, 20, None, dtypes.int64),
(None, None, 8, dtypes.int64, None, None, 8, dtypes.int64),
(5, None, 8, dtypes.int64, 5, 40, 8, dtypes.int64), # nvals inferred
(None, 20, 4, dtypes.int32, 5, 20, 4, dtypes.int32), # nrows inferred
(0, None, None, dtypes.int32, 0, 0, None, dtypes.int32), # nvals inferred
(None, None, 0, dtypes.int32, None, 0, 0, dtypes.int32), # nvals inferred
]) # pyformat: disable
def testConstruction(self, nrows, nvals, uniform_row_length, dtype,
expected_nrows, expected_nvals,
expected_uniform_row_length, expected_dtype):
spec = RowPartitionSpec(nrows, nvals, uniform_row_length, dtype)
self.assertEqual(spec.nrows, expected_nrows)
self.assertEqual(spec.nvals, expected_nvals)
self.assertEqual(spec.uniform_row_length, expected_uniform_row_length)
self.assertEqual(spec.dtype, expected_dtype)
@parameterized.parameters([
dict(dtype=dtypes.float32, error='dtype must be tf.int32 or tf.int64'),
dict(nrows=0, nvals=5, error='.* not compatible .*'),
dict(uniform_row_length=0, nvals=5, error='.* not compatible .*'),
dict(nvals=11, uniform_row_length=5, error='.* not compatible .*'),
dict(
nrows=8, nvals=10, uniform_row_length=5,
error='.* not compatible .*'),
])
def testConstructionError(self,
nrows=None,
nvals=None,
uniform_row_length=None,
dtype=dtypes.int64,
error=None):
with self.assertRaisesRegexp(ValueError, error):
RowPartitionSpec(nrows, nvals, uniform_row_length, dtype)
def testValueType(self):
spec = RowPartitionSpec()
self.assertEqual(spec.value_type, RowPartition)
@parameterized.parameters([
dict(
spec=RowPartitionSpec(),
expected=(tensor_shape.TensorShape([None]),
tensor_shape.TensorShape([None]),
tensor_shape.TensorShape([None]), dtypes.int64)),
dict(
spec=RowPartitionSpec(dtype=dtypes.int32),
expected=(tensor_shape.TensorShape([None]),
tensor_shape.TensorShape([None]),
tensor_shape.TensorShape([None]), dtypes.int32)),
dict(
spec=RowPartitionSpec(nrows=8, nvals=13),
expected=(tensor_shape.TensorShape([8]),
tensor_shape.TensorShape([13]),
tensor_shape.TensorShape([None]), dtypes.int64)),
dict(
spec=RowPartitionSpec(nrows=8, uniform_row_length=2),
expected=(
tensor_shape.TensorShape([8]),
tensor_shape.TensorShape([16]), # inferred
tensor_shape.TensorShape([2]),
dtypes.int64)),
])
def testSerialize(self, spec, expected):
serialization = spec._serialize()
# TensorShape has an unconventional definition of equality, so we can't use
# assertEqual directly here. But repr() is deterministic and lossless for
# the expected values, so we can use that instead.
self.assertEqual(repr(serialization), repr(expected))
@parameterized.parameters([
dict(
spec=RowPartitionSpec(),
expected=tensor_spec.TensorSpec([None], dtypes.int64)),
dict(
spec=RowPartitionSpec(dtype=dtypes.int32),
expected=tensor_spec.TensorSpec([None], dtypes.int32)),
dict(
spec=RowPartitionSpec(nrows=17, dtype=dtypes.int32),
expected=tensor_spec.TensorSpec([18], dtypes.int32)),
dict(
spec=RowPartitionSpec(nvals=10, uniform_row_length=2),
expected=tensor_spec.TensorSpec([6], dtypes.int64)), # inferred nrow
])
def testComponentSpecs(self, spec, expected):
self.assertEqual(spec._component_specs, expected)
@parameterized.parameters([
dict(
rp_factory=lambda: RowPartition.from_row_splits([0, 3, 7]),
components=[0, 3, 7]),
])
def testToFromComponents(self, rp_factory, components):
rp = rp_factory()
spec = rp._type_spec
actual_components = spec._to_components(rp)
self.assertAllEqual(actual_components, components)
rp_reconstructed = spec._from_components(actual_components)
_assert_row_partition_equal(self, rp, rp_reconstructed)
@parameterized.parameters([
(RowPartitionSpec(), RowPartitionSpec()),
(RowPartitionSpec(nrows=8), RowPartitionSpec(nrows=8)),
(RowPartitionSpec(nrows=8), RowPartitionSpec(nrows=None)),
(RowPartitionSpec(nvals=8), RowPartitionSpec(nvals=8)),
(RowPartitionSpec(nvals=8), RowPartitionSpec(nvals=None)),
(RowPartitionSpec(uniform_row_length=8),
RowPartitionSpec(uniform_row_length=8)),
(RowPartitionSpec(uniform_row_length=8),
RowPartitionSpec(uniform_row_length=None)),
(RowPartitionSpec(nvals=12), RowPartitionSpec(uniform_row_length=3)),
(RowPartitionSpec(nrows=12), RowPartitionSpec(uniform_row_length=72)),
(RowPartitionSpec(nrows=5), RowPartitionSpec(nvals=15)),
(RowPartitionSpec(nvals=0), RowPartitionSpec(nrows=0)),
(RowPartitionSpec(nvals=0), RowPartitionSpec(uniform_row_length=0)),
])
def testIsCompatibleWith(self, spec1, spec2):
self.assertTrue(spec1.is_compatible_with(spec2))
@parameterized.parameters([
(RowPartitionSpec(), RowPartitionSpec(dtype=dtypes.int32)),
(RowPartitionSpec(nvals=5), RowPartitionSpec(uniform_row_length=3)),
(RowPartitionSpec(nrows=7,
nvals=12), RowPartitionSpec(uniform_row_length=3)),
(RowPartitionSpec(nvals=5), RowPartitionSpec(nrows=0)),
(RowPartitionSpec(nvals=5), RowPartitionSpec(uniform_row_length=0)),
])
def testIsNotCompatibleWith(self, spec1, spec2):
self.assertFalse(spec1.is_compatible_with(spec2))
@parameterized.parameters([
dict(
spec1=RowPartitionSpec(nrows=8, nvals=3, dtype=dtypes.int32),
spec2=RowPartitionSpec(nrows=8, nvals=3, dtype=dtypes.int32),
expected=RowPartitionSpec(nrows=8, nvals=3, dtype=dtypes.int32)),
dict(
spec1=RowPartitionSpec(nrows=8, nvals=None),
spec2=RowPartitionSpec(nrows=None, nvals=8),
expected=RowPartitionSpec(nrows=None, nvals=None)),
dict(
spec1=RowPartitionSpec(nrows=8, nvals=33),
spec2=RowPartitionSpec(nrows=3, nvals=13),
expected=RowPartitionSpec(nrows=None, nvals=None)),
dict(
spec1=RowPartitionSpec(nrows=12, uniform_row_length=3),
spec2=RowPartitionSpec(nrows=3, uniform_row_length=3),
expected=RowPartitionSpec(nrows=None, uniform_row_length=3)),
dict(
spec1=RowPartitionSpec(5, 35, 7),
spec2=RowPartitionSpec(8, 80, 10),
expected=RowPartitionSpec(None, None, None)),
])
def testMostSpecificCompatibleType(self, spec1, spec2, expected):
actual = spec1.most_specific_compatible_type(spec2)
self.assertEqual(actual, expected)
@parameterized.parameters([
(RowPartitionSpec(), RowPartitionSpec(dtype=dtypes.int32)),
])
def testMostSpecificCompatibleTypeError(self, spec1, spec2):
with self.assertRaisesRegexp(ValueError, 'not compatible'):
spec1.most_specific_compatible_type(spec2)
def testFromValue(self):
self.assertEqual(
RowPartitionSpec.from_value(RowPartition.from_row_splits([0, 2, 8, 8])),
RowPartitionSpec(nrows=3))
self.assertEqual(
RowPartitionSpec.from_value(
RowPartition.from_row_lengths([5, 3, 0, 2])),
RowPartitionSpec(nrows=4))
self.assertEqual(
RowPartitionSpec.from_value(
RowPartition.from_value_rowids([0, 2, 2, 8])),
RowPartitionSpec(nrows=9, nvals=4))
self.assertEqual(
RowPartitionSpec.from_value(
RowPartition.from_uniform_row_length(
nvals=12, uniform_row_length=3)),
RowPartitionSpec(nvals=12, uniform_row_length=3))
def _assert_row_partition_equal(test_class, actual, expected):
assert isinstance(test_class, test_util.TensorFlowTestCase)
assert isinstance(actual, RowPartition)
assert isinstance(expected, RowPartition)
test_class.assertEqual(actual.has_precomputed_row_splits(),
expected.has_precomputed_row_splits())
test_class.assertEqual(actual.has_precomputed_row_lengths(),
expected.has_precomputed_row_lengths())
test_class.assertEqual(actual.has_precomputed_value_rowids(),
expected.has_precomputed_value_rowids())
test_class.assertEqual(actual.has_precomputed_nrows(),
expected.has_precomputed_nrows())
test_class.assertEqual(actual.uniform_row_length() is None,
expected.uniform_row_length() is None)
if expected.has_precomputed_row_splits():
test_class.assertAllEqual(actual.row_splits(), expected.row_splits())
if expected.has_precomputed_row_lengths():
test_class.assertAllEqual(actual.row_lengths(), expected.row_lengths())
if expected.has_precomputed_value_rowids():
test_class.assertAllEqual(actual.value_rowids(), expected.value_rowids())
if expected.has_precomputed_nrows():
test_class.assertAllEqual(actual.nrows(), expected.nrows())
if expected.uniform_row_length() is not None:
test_class.assertAllEqual(actual.uniform_row_length(),
expected.uniform_row_length())
if __name__ == '__main__':
googletest.main()
| 41.221851 | 80 | 0.653262 |
b1474f8c6853107470c7a1ad601a11241665af0d | 1,369 | py | Python | openprocurement/tender/openua/adapters.py | raccoongang/openprocurement.tender.openua | dd2dbddf317bdb4bb38443240faf126765ef48c4 | [
"Apache-2.0"
] | 8 | 2016-01-28T11:37:09.000Z | 2019-03-17T07:18:09.000Z | openprocurement/tender/openua/adapters.py | raccoongang/openprocurement.tender.openua | dd2dbddf317bdb4bb38443240faf126765ef48c4 | [
"Apache-2.0"
] | 70 | 2016-02-11T16:46:22.000Z | 2018-03-19T15:42:16.000Z | openprocurement/tender/openua/adapters.py | raccoongang/openprocurement.tender.openua | dd2dbddf317bdb4bb38443240faf126765ef48c4 | [
"Apache-2.0"
] | 30 | 2016-01-27T10:51:00.000Z | 2019-03-31T15:56:52.000Z | # -*- coding: utf-8 -*-
from openprocurement.tender.core.adapters import TenderConfigurator
from openprocurement.tender.openua.models import Tender
from openprocurement.tender.openua.constants import (
TENDER_PERIOD, TENDERING_EXTRA_PERIOD, STATUS4ROLE,
CLAIM_SUBMIT_TIME, COMPLAINT_SUBMIT_TIME
)
class TenderAboveThresholdUAConfigurator(TenderConfigurator):
""" AboveThresholdUA Tender configuration adapter """
name = "AboveThresholdUA Tender configurator"
model = Tender
# duration of tendering period. timedelta object.
tendering_period_duration = TENDER_PERIOD
# duration of tender period extension. timedelta object
tendering_period_extra = TENDERING_EXTRA_PERIOD
block_tender_complaint_status = model.block_tender_complaint_status
block_complaint_status = model.block_complaint_status
# Dictionary with allowed complaint statuses for operations for each role
allowed_statuses_for_complaint_operations_for_roles = STATUS4ROLE
# Tender claims should be sumbitted not later then "tender_claim_submit_time" days before tendering period end. Timedelta object
tender_claim_submit_time = CLAIM_SUBMIT_TIME
# Tender complaints should be sumbitted not later then "tender_claim_submit_time" days before tendering period end. Timedelta object
tender_complaint_submit_time = COMPLAINT_SUBMIT_TIME
| 41.484848 | 136 | 0.812272 |
7ed92e6c5eaba0cc403cf2ed11fc2f01ca926024 | 1,841 | py | Python | 0121.best-time-to-buy-and-sell-stock/best-time-to-buy-and-sell-stock.py | yangreal1991/my_leetcode_solutions | 18c223d43e204a156f865d813957c5c1b3f2193a | [
"MIT"
] | 1 | 2020-05-24T02:21:46.000Z | 2020-05-24T02:21:46.000Z | 0121.best-time-to-buy-and-sell-stock/best-time-to-buy-and-sell-stock.py | yangreal1991/my_leetcode_solutions | 18c223d43e204a156f865d813957c5c1b3f2193a | [
"MIT"
] | null | null | null | 0121.best-time-to-buy-and-sell-stock/best-time-to-buy-and-sell-stock.py | yangreal1991/my_leetcode_solutions | 18c223d43e204a156f865d813957c5c1b3f2193a | [
"MIT"
] | null | null | null | class Solution:
def __init__(self):
pass
def maxProfit_BF(self, prices):
"""Say you have an array for which the ith element is the price of a given stock on day i.
Find the maximum profit under the constraint that only one transaction is permitted.
Args:
prices: List[int]
Returns:
result: int
"""
if len(prices) < 2:
return 0
result = 0
for i in range(0, len(prices) - 1):
for j in range(i + 1, len(prices)):
result = max(result, prices[j] - prices[i])
return result
def maxProfit(self, prices):
"""Say you have an array for which the ith element is the price of a given stock on day i.
Find the maximum profit under the constraint that only one transaction is permitted.
Args:
prices: List[int]
Returns:
result: int
"""
if len(prices) < 2:
return 0
result = 0
min_price_before = prices[0]
for i in range(1, len(prices)):
result = max(result, prices[i] - min_price_before)
min_price_before = min(min_price_before, prices[i])
return result
import random
import sys
if __name__ == '__main__':
s = Solution()
prices = [7, 1, 5, 3, 6, 4]
print(prices)
print(s.maxProfit(prices))
print(s.maxProfit_BF(prices))
n_runs = 1000
for i in range(n_runs):
if i % 100 == 0:
print("Iteration: ", i)
prices = [random.randrange(1, 10000, 1) for _ in range(i + 1)]
if s.maxProfit(prices) != s.maxProfit_BF(prices):
print(prices)
print(s.maxProfit(prices))
print(s.maxProfit_BF(prices))
sys.exit(0)
print("All tests passed!!")
| 24.878378 | 98 | 0.554047 |
71ae23daba79b717a6113918458a7d08ce0a5222 | 628 | py | Python | manage.py | MakingL/CuberBackendApp | 3faebbe2a96fdcc625b5d7f2363ffd8f405f9fd8 | [
"BSD-2-Clause"
] | null | null | null | manage.py | MakingL/CuberBackendApp | 3faebbe2a96fdcc625b5d7f2363ffd8f405f9fd8 | [
"BSD-2-Clause"
] | null | null | null | manage.py | MakingL/CuberBackendApp | 3faebbe2a96fdcc625b5d7f2363ffd8f405f9fd8 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CuberApp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.545455 | 73 | 0.683121 |
5d4fd0a1b30b390dd788963e84f5a6e05326e59a | 37,248 | py | Python | src/reportlab/graphics/renderPS.py | Gsllchb/reportlab | d1e65947d8b00104004886958ae460a0777e9313 | [
"BSD-3-Clause"
] | null | null | null | src/reportlab/graphics/renderPS.py | Gsllchb/reportlab | d1e65947d8b00104004886958ae460a0777e9313 | [
"BSD-3-Clause"
] | null | null | null | src/reportlab/graphics/renderPS.py | Gsllchb/reportlab | d1e65947d8b00104004886958ae460a0777e9313 | [
"BSD-3-Clause"
] | null | null | null | #Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
#history https://bitbucket.org/rptlab/reportlab/history-node/tip/src/reportlab/graphics/renderPS.py
__version__='3.3.0'
__doc__="""Render drawing objects in Postscript"""
from reportlab.pdfbase.pdfmetrics import getFont, stringWidth, unicode2T1 # for font info
from reportlab.lib.utils import getBytesIO, getStringIO, asBytes, char2int, rawBytes, asNative, isUnicode
from reportlab.lib.rl_accel import fp_str
from reportlab.lib.colors import black
from reportlab.graphics.renderbase import Renderer, StateTracker, getStateDelta, renderScaledDrawing
from reportlab.graphics.shapes import STATE_DEFAULTS
import math
from operator import getitem
from reportlab import rl_config, xrange, ascii
from reportlab.pdfgen.canvas import FILL_EVEN_ODD, FILL_NON_ZERO
_ESCAPEDICT={}
for c in xrange(256):
if c<32 or c>=127:
_ESCAPEDICT[c]= '\\%03o' % c
elif c in (ord('\\'),ord('('),ord(')')):
_ESCAPEDICT[c] = '\\'+chr(c)
else:
_ESCAPEDICT[c] = chr(c)
del c
def _escape_and_limit(s):
s = asBytes(s)
R = []
aR = R.append
n = 0
for c in s:
c = _ESCAPEDICT[char2int(c)]
aR(c)
n += len(c)
if n>=200:
n = 0
aR('\\\n')
return ''.join(R)
# we need to create encoding vectors for each font we use, or they will
# come out in Adobe's old StandardEncoding, which NOBODY uses.
PS_WinAnsiEncoding="""
/RE { %def
findfont begin
currentdict dup length dict begin
{ %forall
1 index /FID ne { def } { pop pop } ifelse
} forall
/FontName exch def dup length 0 ne { %if
/Encoding Encoding 256 array copy def
0 exch { %forall
dup type /nametype eq { %ifelse
Encoding 2 index 2 index put
pop 1 add
}{ %else
exch pop
} ifelse
} forall
} if pop
currentdict dup end end
/FontName get exch definefont pop
} bind def
/WinAnsiEncoding [
39/quotesingle 96/grave 128/euro 130/quotesinglbase/florin/quotedblbase
/ellipsis/dagger/daggerdbl/circumflex/perthousand
/Scaron/guilsinglleft/OE 145/quoteleft/quoteright
/quotedblleft/quotedblright/bullet/endash/emdash
/tilde/trademark/scaron/guilsinglright/oe/dotlessi
159/Ydieresis 164/currency 166/brokenbar 168/dieresis/copyright
/ordfeminine 172/logicalnot 174/registered/macron/ring
177/plusminus/twosuperior/threesuperior/acute/mu
183/periodcentered/cedilla/onesuperior/ordmasculine
188/onequarter/onehalf/threequarters 192/Agrave/Aacute
/Acircumflex/Atilde/Adieresis/Aring/AE/Ccedilla
/Egrave/Eacute/Ecircumflex/Edieresis/Igrave/Iacute
/Icircumflex/Idieresis/Eth/Ntilde/Ograve/Oacute
/Ocircumflex/Otilde/Odieresis/multiply/Oslash
/Ugrave/Uacute/Ucircumflex/Udieresis/Yacute/Thorn
/germandbls/agrave/aacute/acircumflex/atilde/adieresis
/aring/ae/ccedilla/egrave/eacute/ecircumflex
/edieresis/igrave/iacute/icircumflex/idieresis
/eth/ntilde/ograve/oacute/ocircumflex/otilde
/odieresis/divide/oslash/ugrave/uacute/ucircumflex
/udieresis/yacute/thorn/ydieresis
] def
"""
class PSCanvas:
def __init__(self,size=(300,300), PostScriptLevel=2):
self.width, self.height = size
xtraState = []
self._xtraState_push = xtraState.append
self._xtraState_pop = xtraState.pop
self.comments = 0
self.code = []
self.code_append = self.code.append
self._sep = '\n'
self._strokeColor = self._fillColor = self._lineWidth = \
self._font = self._fontSize = self._lineCap = \
self._lineJoin = self._color = None
self._fontsUsed = [] # track them as we go
self.setFont(STATE_DEFAULTS['fontName'],STATE_DEFAULTS['fontSize'])
self.setStrokeColor(STATE_DEFAULTS['strokeColor'])
self.setLineCap(2)
self.setLineJoin(0)
self.setLineWidth(1)
self.PostScriptLevel=PostScriptLevel
self._fillMode = FILL_EVEN_ODD
def comment(self,msg):
if self.comments: self.code_append('%'+msg)
def drawImage(self, image, x1,y1, x2=None,y2=None): # Postscript Level2 version
# select between postscript level 1 or level 2
if self.PostScriptLevel==1:
self._drawImageLevel1(image, x1,y1, x2=None,y2=None)
elif self.PostScriptLevel==2:
self._drawImageLevel2(image, x1,y1, x2=None,y2=None)
else :
raise ValueError('Unsupported Postscript Level %s' % self.PostScriptLevel)
def clear(self):
self.code_append('showpage') # ugh, this makes no sense oh well.
def _t1_re_encode(self):
if not self._fontsUsed: return
# for each font used, reencode the vectors
C = []
for fontName in self._fontsUsed:
fontObj = getFont(fontName)
if not fontObj._dynamicFont and fontObj.encName=='WinAnsiEncoding':
C.append('WinAnsiEncoding /%s /%s RE' % (fontName, fontName))
if C:
C.insert(0,PS_WinAnsiEncoding)
self.code.insert(1, self._sep.join(C))
def save(self,f=None):
if not hasattr(f,'write'):
_f = open(f,'wb')
else:
_f = f
if self.code[-1]!='showpage': self.clear()
self.code.insert(0,'''\
%%!PS-Adobe-3.0 EPSF-3.0
%%%%BoundingBox: 0 0 %d %d
%%%% Initialization:
/m {moveto} bind def
/l {lineto} bind def
/c {curveto} bind def
''' % (self.width,self.height))
self._t1_re_encode()
_f.write(rawBytes(self._sep.join(self.code)))
if _f is not f:
_f.close()
from reportlab.lib.utils import markfilename
markfilename(f,creatorcode='XPR3',filetype='EPSF')
def saveState(self):
self._xtraState_push((self._fontCodeLoc,))
self.code_append('gsave')
def restoreState(self):
self.code_append('grestore')
self._fontCodeLoc, = self._xtraState_pop()
def stringWidth(self, s, font=None, fontSize=None):
"""Return the logical width of the string if it were drawn
in the current font (defaults to self.font)."""
font = font or self._font
fontSize = fontSize or self._fontSize
return stringWidth(s, font, fontSize)
def setLineCap(self,v):
if self._lineCap!=v:
self._lineCap = v
self.code_append('%d setlinecap'%v)
def setLineJoin(self,v):
if self._lineJoin!=v:
self._lineJoin = v
self.code_append('%d setlinejoin'%v)
def setDash(self, array=[], phase=0):
"""Two notations. pass two numbers, or an array and phase"""
# copied and modified from reportlab.canvas
psoperation = "setdash"
if isinstance(array,(float,int)):
self.code_append('[%s %s] 0 %s' % (array, phase, psoperation))
elif isinstance(array,(tuple,list)):
assert phase >= 0, "phase is a length in user space"
textarray = ' '.join(map(str, array))
self.code_append('[%s] %s %s' % (textarray, phase, psoperation))
def setStrokeColor(self, color):
self._strokeColor = color
self.setColor(color)
def setColor(self, color):
if self._color!=color:
self._color = color
if color:
if hasattr(color, "cyan"):
self.code_append('%s setcmykcolor' % fp_str(color.cyan, color.magenta, color.yellow, color.black))
else:
self.code_append('%s setrgbcolor' % fp_str(color.red, color.green, color.blue))
def setFillColor(self, color):
self._fillColor = color
self.setColor(color)
def setFillMode(self, v):
self._fillMode = v
def setLineWidth(self, width):
if width != self._lineWidth:
self._lineWidth = width
self.code_append('%s setlinewidth' % width)
def setFont(self,font,fontSize,leading=None):
if self._font!=font or self._fontSize!=fontSize:
self._fontCodeLoc = len(self.code)
self._font = font
self._fontSize = fontSize
self.code_append('')
def line(self, x1, y1, x2, y2):
if self._strokeColor != None:
self.setColor(self._strokeColor)
self.code_append('%s m %s l stroke' % (fp_str(x1, y1), fp_str(x2, y2)))
def _escape(self, s):
'''
return a copy of string s with special characters in postscript strings
escaped with backslashes.
'''
try:
return _escape_and_limit(s)
except:
raise ValueError("cannot escape %s" % ascii(s))
def _issueT1String(self,fontObj,x,y,s):
fc = fontObj
code_append = self.code_append
fontSize = self._fontSize
fontsUsed = self._fontsUsed
escape = self._escape
if not isUnicode(s):
try:
s = s.decode('utf8')
except UnicodeDecodeError as e:
i,j = e.args[2:4]
raise UnicodeDecodeError(*(e.args[:4]+('%s\n%s-->%s<--%s' % (e.args[4],s[i-10:i],s[i:j],s[j:j+10]),)))
for f, t in unicode2T1(s,[fontObj]+fontObj.substitutionFonts):
if f!=fc:
psName = asNative(f.face.name)
code_append('(%s) findfont %s scalefont setfont' % (psName,fp_str(fontSize)))
if psName not in fontsUsed:
fontsUsed.append(psName)
fc = f
code_append('%s m (%s) show ' % (fp_str(x,y),escape(t)))
x += f.stringWidth(t.decode(f.encName),fontSize)
if fontObj!=fc:
self._font = None
self.setFont(fontObj.face.name,fontSize)
def drawString(self, x, y, s, angle=0):
if self._fillColor != None:
fontObj = getFont(self._font)
if not self.code[self._fontCodeLoc]:
psName = asNative(fontObj.face.name)
self.code[self._fontCodeLoc]='(%s) findfont %s scalefont setfont' % (psName,fp_str(self._fontSize))
if psName not in self._fontsUsed:
self._fontsUsed.append(psName)
self.setColor(self._fillColor)
if angle!=0:
self.code_append('gsave %s translate %s rotate' % (fp_str(x,y),fp_str(angle)))
x = y = 0
if fontObj._dynamicFont:
s = self._escape(s)
self.code_append('%s m (%s) show ' % (fp_str(x,y),s))
else:
self._issueT1String(fontObj,x,y,s)
if angle!=0:
self.code_append('grestore')
def drawCentredString(self, x, y, text, text_anchor='middle'):
if self._fillColor is not None:
textLen = stringWidth(text, self._font,self._fontSize)
if text_anchor=='end':
x -= textLen
elif text_anchor=='middle':
x -= textLen/2.
elif text_anchor=='numeric':
x -= numericXShift(text_anchor,text,textLen,self._font,self._fontSize)
self.drawString(x,y,text)
def drawRightString(self, text, x, y):
self.drawCentredString(text,x,y,text_anchor='end')
def drawCurve(self, x1, y1, x2, y2, x3, y3, x4, y4, closed=0):
codeline = '%s m %s curveto'
data = (fp_str(x1, y1), fp_str(x2, y2, x3, y3, x4, y4))
if self._fillColor != None:
self.setColor(self._fillColor)
self.code_append((codeline % data) + ' eofill')
if self._strokeColor != None:
self.setColor(self._strokeColor)
self.code_append((codeline % data)
+ ((closed and ' closepath') or '')
+ ' stroke')
########################################################################################
def rect(self, x1,y1, x2,y2, stroke=1, fill=1):
"Draw a rectangle between x1,y1, and x2,y2"
# Path is drawn in counter-clockwise direction"
x1, x2 = min(x1,x2), max(x1, x2) # from piddle.py
y1, y2 = min(y1,y2), max(y1, y2)
self.polygon(((x1,y1),(x2,y1),(x2,y2),(x1,y2)), closed=1, stroke=stroke, fill = fill)
def roundRect(self, x1,y1, x2,y2, rx=8, ry=8):
"""Draw a rounded rectangle between x1,y1, and x2,y2,
with corners inset as ellipses with x radius rx and y radius ry.
These should have x1<x2, y1<y2, rx>0, and ry>0."""
# Path is drawn in counter-clockwise direction
x1, x2 = min(x1,x2), max(x1, x2) # from piddle.py
y1, y2 = min(y1,y2), max(y1, y2)
# Note: arcto command draws a line from current point to beginning of arc
# save current matrix, translate to center of ellipse, scale by rx ry, and draw
# a circle of unit radius in counterclockwise dir, return to original matrix
# arguments are (cx, cy, rx, ry, startAngle, endAngle)
ellipsePath = 'matrix currentmatrix %s %s translate %s %s scale 0 0 1 %s %s arc setmatrix'
# choice between newpath and moveTo beginning of arc
# go with newpath for precision, does this violate any assumptions in code???
rr = ['newpath'] # Round Rect code path
a = rr.append
# upper left corner ellipse is first
a(ellipsePath % (x1+rx, y1+ry, rx, -ry, 90, 180))
a(ellipsePath % (x1+rx, y2-ry, rx, -ry, 180, 270))
a(ellipsePath % (x2-rx, y2-ry, rx, -ry, 270, 360))
a(ellipsePath % (x2-rx, y1+ry, rx, -ry, 0, 90) )
a('closepath')
self._fillAndStroke(rr)
def ellipse(self, x1,y1, x2,y2):
"""Draw an orthogonal ellipse inscribed within the rectangle x1,y1,x2,y2.
These should have x1<x2 and y1<y2."""
#Just invoke drawArc to actually draw the ellipse
self.drawArc(x1,y1, x2,y2)
def circle(self, xc, yc, r):
self.ellipse(xc-r,yc-r, xc+r,yc+r)
def drawArc(self, x1,y1, x2,y2, startAng=0, extent=360, fromcenter=0):
"""Draw a partial ellipse inscribed within the rectangle x1,y1,x2,y2,
starting at startAng degrees and covering extent degrees. Angles
start with 0 to the right (+x) and increase counter-clockwise.
These should have x1<x2 and y1<y2."""
#calculate centre of ellipse
#print "x1,y1,x2,y2,startAng,extent,fromcenter", x1,y1,x2,y2,startAng,extent,fromcenter
cx, cy = (x1+x2)/2.0, (y1+y2)/2.0
rx, ry = (x2-x1)/2.0, (y2-y1)/2.0
codeline = self._genArcCode(x1, y1, x2, y2, startAng, extent)
startAngleRadians = math.pi*startAng/180.0
extentRadians = math.pi*extent/180.0
endAngleRadians = startAngleRadians + extentRadians
codelineAppended = 0
# fill portion
if self._fillColor != None:
self.setColor(self._fillColor)
self.code_append(codeline)
codelineAppended = 1
if self._strokeColor!=None: self.code_append('gsave')
self.lineTo(cx,cy)
self.code_append('eofill')
if self._strokeColor!=None: self.code_append('grestore')
# stroke portion
if self._strokeColor != None:
# this is a bit hacked up. There is certainly a better way...
self.setColor(self._strokeColor)
(startx, starty) = (cx+rx*math.cos(startAngleRadians), cy+ry*math.sin(startAngleRadians))
if not codelineAppended:
self.code_append(codeline)
if fromcenter:
# move to center
self.lineTo(cx,cy)
self.lineTo(startx, starty)
self.code_append('closepath')
self.code_append('stroke')
def _genArcCode(self, x1, y1, x2, y2, startAng, extent):
"Calculate the path for an arc inscribed in rectangle defined by (x1,y1),(x2,y2)"
#calculate semi-minor and semi-major axes of ellipse
xScale = abs((x2-x1)/2.0)
yScale = abs((y2-y1)/2.0)
#calculate centre of ellipse
x, y = (x1+x2)/2.0, (y1+y2)/2.0
codeline = 'matrix currentmatrix %s %s translate %s %s scale 0 0 1 %s %s %s setmatrix'
if extent >= 0:
arc='arc'
else:
arc='arcn'
data = (x,y, xScale, yScale, startAng, startAng+extent, arc)
return codeline % data
def polygon(self, p, closed=0, stroke=1, fill=1):
assert len(p) >= 2, 'Polygon must have 2 or more points'
start = p[0]
p = p[1:]
poly = []
a = poly.append
a("%s m" % fp_str(start))
for point in p:
a("%s l" % fp_str(point))
if closed:
a("closepath")
self._fillAndStroke(poly,stroke=stroke,fill=fill)
def lines(self, lineList, color=None, width=None):
if self._strokeColor != None:
self._setColor(self._strokeColor)
codeline = '%s m %s l stroke'
for line in lineList:
self.code_append(codeline % (fp_str(line[0]),fp_str(line[1])))
def moveTo(self,x,y):
self.code_append('%s m' % fp_str(x, y))
def lineTo(self,x,y):
self.code_append('%s l' % fp_str(x, y))
def curveTo(self,x1,y1,x2,y2,x3,y3):
self.code_append('%s c' % fp_str(x1,y1,x2,y2,x3,y3))
def closePath(self):
self.code_append('closepath')
def polyLine(self, p):
assert len(p) >= 1, 'Polyline must have 1 or more points'
if self._strokeColor != None:
self.setColor(self._strokeColor)
self.moveTo(p[0][0], p[0][1])
for t in p[1:]:
self.lineTo(t[0], t[1])
self.code_append('stroke')
def drawFigure(self, partList, closed=0):
figureCode = []
a = figureCode.append
first = 1
for part in partList:
op = part[0]
args = list(part[1:])
if op == figureLine:
if first:
first = 0
a("%s m" % fp_str(args[:2]))
else:
a("%s l" % fp_str(args[:2]))
a("%s l" % fp_str(args[2:]))
elif op == figureArc:
first = 0
x1,y1,x2,y2,startAngle,extent = args[:6]
a(self._genArcCode(x1,y1,x2,y2,startAngle,extent))
elif op == figureCurve:
if first:
first = 0
a("%s m" % fp_str(args[:2]))
else:
a("%s l" % fp_str(args[:2]))
a("%s curveto" % fp_str(args[2:]))
else:
raise TypeError("unknown figure operator: "+op)
if closed:
a("closepath")
self._fillAndStroke(figureCode)
def _fillAndStroke(self,code,clip=0,fill=1,stroke=1,fillMode=None):
fill = self._fillColor and fill
stroke = self._strokeColor and stroke
if fill or stroke or clip:
self.code.extend(code)
if fill:
if fillMode is None:
fillMode = self._fillMode
if stroke or clip: self.code_append("gsave")
self.setColor(self._fillColor)
self.code_append("eofill" if fillMode==FILL_EVEN_ODD else "fill")
if stroke or clip: self.code_append("grestore")
if stroke:
if clip: self.code_append("gsave")
self.setColor(self._strokeColor)
self.code_append("stroke")
if clip: self.code_append("grestore")
if clip:
self.code_append("clip")
self.code_append("newpath")
def translate(self,x,y):
self.code_append('%s translate' % fp_str(x,y))
def scale(self,x,y):
self.code_append('%s scale' % fp_str(x,y))
def transform(self,a,b,c,d,e,f):
self.code_append('[%s] concat' % fp_str(a,b,c,d,e,f))
def _drawTimeResize(self,w,h):
'''if this is used we're probably in the wrong world'''
self.width, self.height = w, h
############################################################################################
# drawImage(self. image, x1, y1, x2=None, y2=None) is now defined by either _drawImageLevel1
# ._drawImageLevel2, the choice is made in .__init__ depending on option
def _drawImageLevel1(self, image, x1, y1, x2=None,y2=None):
# Postscript Level1 version available for fallback mode when Level2 doesn't work
"""drawImage(self,image,x1,y1,x2=None,y2=None) : If x2 and y2 are ommitted, they are
calculated from image size. (x1,y1) is upper left of image, (x2,y2) is lower right of
image in piddle coordinates."""
# For now let's start with 24 bit RGB images (following piddlePDF again)
component_depth = 8
myimage = image.convert('RGB')
imgwidth, imgheight = myimage.size
if not x2:
x2 = imgwidth + x1
if not y2:
y2 = y1 + imgheight
drawwidth = x2 - x1
drawheight = y2 - y1
#print 'Image size (%d, %d); Draw size (%d, %d)' % (imgwidth, imgheight, drawwidth, drawheight)
# now I need to tell postscript how big image is
# "image operators assume that they receive sample data from
# their data source in x-axis major index order. The coordinate
# of the lower-left corner of the first sample is (0,0), of the
# second (1,0) and so on" -PS2 ref manual p. 215
#
# The ImageMatrix maps unit squre of user space to boundary of the source image
#
# The CurrentTransformationMatrix (CTM) maps the unit square of
# user space to the rect...on the page that is to receive the
# image. A common ImageMatrix is [width 0 0 -height 0 height]
# (for a left to right, top to bottom image )
# first let's map the user coordinates start at offset x1,y1 on page
self.code.extend([
'gsave',
'%s %s translate' % (x1,-y1 - drawheight), # need to start are lower left of image
'%s %s scale' % (drawwidth,drawheight),
'/scanline %d 3 mul string def' % imgwidth # scanline by multiples of image width
])
# now push the dimensions and depth info onto the stack
# and push the ImageMatrix to map the source to the target rectangle (see above)
# finally specify source (PS2 pp. 225 ) and by exmample
self.code.extend([
'%s %s %s' % (imgwidth, imgheight, component_depth),
'[%s %s %s %s %s %s]' % (imgwidth, 0, 0, -imgheight, 0, imgheight),
'{ currentfile scanline readhexstring pop } false 3',
'colorimage '
])
# data source output--now we just need to deliver a hex encode
# series of lines of the right overall size can follow
# piddlePDF again
rawimage = (myimage.tobytes if hasattr(myimage,'tobytes') else myimage.tostring)()
hex_encoded = self._AsciiHexEncode(rawimage)
# write in blocks of 78 chars per line
outstream = getStringIO(hex_encoded)
dataline = outstream.read(78)
while dataline != "":
self.code_append(dataline)
dataline= outstream.read(78)
self.code_append('% end of image data') # for clarity
self.code_append('grestore') # return coordinates to normal
# end of drawImage
def _AsciiHexEncode(self, input): # also based on piddlePDF
"Helper function used by images"
output = getStringIO()
for char in asBytes(input):
output.write('%02x' % char2int(char))
return output.getvalue()
def _drawImageLevel2(self, image, x1,y1, x2=None,y2=None): # Postscript Level2 version
'''At present we're handling only PIL'''
### what sort of image are we to draw
if image.mode=='L' :
imBitsPerComponent = 8
imNumComponents = 1
myimage = image
elif image.mode == '1':
myimage = image.convert('L')
imNumComponents = 1
myimage = image
else :
myimage = image.convert('RGB')
imNumComponents = 3
imBitsPerComponent = 8
imwidth, imheight = myimage.size
if not x2:
x2 = imwidth + x1
if not y2:
y2 = y1 + imheight
drawwidth = x2 - x1
drawheight = y2 - y1
self.code.extend([
'gsave',
'%s %s translate' % (x1,-y1 - drawheight), # need to start are lower left of image
'%s %s scale' % (drawwidth,drawheight)])
if imNumComponents == 3 :
self.code_append('/DeviceRGB setcolorspace')
elif imNumComponents == 1 :
self.code_append('/DeviceGray setcolorspace')
# create the image dictionary
self.code_append("""
<<
/ImageType 1
/Width %d /Height %d %% dimensions of source image
/BitsPerComponent %d""" % (imwidth, imheight, imBitsPerComponent) )
if imNumComponents == 1:
self.code_append('/Decode [0 1]')
if imNumComponents == 3:
self.code_append('/Decode [0 1 0 1 0 1] %% decode color values normally')
self.code.extend([ '/ImageMatrix [%s 0 0 %s 0 %s]' % (imwidth, -imheight, imheight),
'/DataSource currentfile /ASCIIHexDecode filter',
'>> % End image dictionary',
'image'])
# after image operator just need to dump image dat to file as hexstring
rawimage = (myimage.tobytes if hasattr(myimage,'tobytes') else myimage.tostring)()
hex_encoded = self._AsciiHexEncode(rawimage)
# write in blocks of 78 chars per line
outstream = getStringIO(hex_encoded)
dataline = outstream.read(78)
while dataline != "":
self.code_append(dataline)
dataline= outstream.read(78)
self.code_append('> % end of image data') # > is EOD for hex encoded filterfor clarity
self.code_append('grestore') # return coordinates to normal
# renderpdf - draws them onto a canvas
"""Usage:
from reportlab.graphics import renderPS
renderPS.draw(drawing, canvas, x, y)
Execute the script to see some test drawings."""
from reportlab.graphics.shapes import *
# hack so we only get warnings once each
#warnOnce = WarnOnce()
# the main entry point for users...
def draw(drawing, canvas, x=0, y=0, showBoundary=rl_config.showBoundary):
"""As it says"""
R = _PSRenderer()
R.draw(renderScaledDrawing(drawing), canvas, x, y, showBoundary=showBoundary)
def _pointsFromList(L):
'''
given a list of coordinates [x0, y0, x1, y1....]
produce a list of points [(x0,y0), (y1,y0),....]
'''
P=[]
a = P.append
for i in range(0,len(L),2):
a((L[i],L[i+1]))
return P
class _PSRenderer(Renderer):
"""This draws onto a EPS document. It needs to be a class
rather than a function, as some EPS-specific state tracking is
needed outside of the state info in the SVG model."""
def drawNode(self, node):
"""This is the recursive method called for each node
in the tree"""
self._canvas.comment('begin node %r'%node)
color = self._canvas._color
if not (isinstance(node, Path) and node.isClipPath):
self._canvas.saveState()
#apply state changes
deltas = getStateDelta(node)
self._tracker.push(deltas)
self.applyStateChanges(deltas, {})
#draw the object, or recurse
self.drawNodeDispatcher(node)
rDeltas = self._tracker.pop()
if not (isinstance(node, Path) and node.isClipPath):
self._canvas.restoreState()
self._canvas.comment('end node %r'%node)
self._canvas._color = color
#restore things we might have lost (without actually doing anything).
for k, v in rDeltas.items():
if k in self._restores:
setattr(self._canvas,self._restores[k],v)
## _restores = {'stroke':'_stroke','stroke_width': '_lineWidth','stroke_linecap':'_lineCap',
## 'stroke_linejoin':'_lineJoin','fill':'_fill','font_family':'_font',
## 'font_size':'_fontSize'}
_restores = {'strokeColor':'_strokeColor','strokeWidth': '_lineWidth','strokeLineCap':'_lineCap',
'strokeLineJoin':'_lineJoin','fillColor':'_fillColor','fontName':'_font',
'fontSize':'_fontSize'}
def drawRect(self, rect):
if rect.rx == rect.ry == 0:
#plain old rectangle
self._canvas.rect(
rect.x, rect.y,
rect.x+rect.width, rect.y+rect.height)
else:
#cheat and assume ry = rx; better to generalize
#pdfgen roundRect function. TODO
self._canvas.roundRect(
rect.x, rect.y,
rect.x+rect.width, rect.y+rect.height, rect.rx, rect.ry
)
def drawLine(self, line):
if self._canvas._strokeColor:
self._canvas.line(line.x1, line.y1, line.x2, line.y2)
def drawCircle(self, circle):
self._canvas.circle( circle.cx, circle.cy, circle.r)
def drawWedge(self, wedge):
yradius, radius1, yradius1 = wedge._xtraRadii()
if (radius1==0 or radius1 is None) and (yradius1==0 or yradius1 is None) and not wedge.annular:
startangledegrees = wedge.startangledegrees
endangledegrees = wedge.endangledegrees
centerx= wedge.centerx
centery = wedge.centery
radius = wedge.radius
extent = endangledegrees - startangledegrees
self._canvas.drawArc(centerx-radius, centery-yradius, centerx+radius, centery+yradius,
startangledegrees, extent, fromcenter=1)
else:
P = wedge.asPolygon()
if isinstance(P,Path):
self.drawPath(P)
else:
self.drawPolygon(P)
def drawPolyLine(self, p):
if self._canvas._strokeColor:
self._canvas.polyLine(_pointsFromList(p.points))
def drawEllipse(self, ellipse):
#need to convert to pdfgen's bounding box representation
x1 = ellipse.cx - ellipse.rx
x2 = ellipse.cx + ellipse.rx
y1 = ellipse.cy - ellipse.ry
y2 = ellipse.cy + ellipse.ry
self._canvas.ellipse(x1,y1,x2,y2)
def drawPolygon(self, p):
self._canvas.polygon(_pointsFromList(p.points), closed=1)
def drawString(self, stringObj):
if self._canvas._fillColor:
S = self._tracker.getState()
text_anchor, x, y, text = S['textAnchor'], stringObj.x,stringObj.y,stringObj.text
if not text_anchor in ['start','inherited']:
font, fontSize = S['fontName'], S['fontSize']
textLen = stringWidth(text, font,fontSize)
if text_anchor=='end':
x -= textLen
elif text_anchor=='middle':
x -= textLen/2
elif text_anchor=='numeric':
x -= numericXShift(text_anchor,text,textLen,font,fontSize,encoding='winansi')
else:
raise ValueError('bad value for text_anchor '+str(text_anchor))
self._canvas.drawString(x,y,text)
def drawPath(self, path, fillMode=None):
from reportlab.graphics.shapes import _renderPath
c = self._canvas
drawFuncs = (c.moveTo, c.lineTo, c.curveTo, c.closePath)
autoclose = getattr(path,'autoclose','')
def rP(**kwds):
return _renderPath(path, drawFuncs, **kwds)
if fillMode is None:
fillMode = getattr(path,'fillMode',c._fillMode)
fill = c._fillColor is not None
stroke = c._strokeColor is not None
clip = path.isClipPath
fas = lambda **kwds: c._fillAndStroke([], fillMode=fillMode, **kwds)
pathFill = lambda : c._fillAndStroke([], stroke=0, fillMode=fillMode)
pathStroke = lambda : c._fillAndStroke([], fill=0)
if autoclose=='svg':
rP()
fas(stroke=stroke,fill=fill,clip=clip)
elif autoclose=='pdf':
if fill:
rP(forceClose=True)
fas(stroke=stroke,fill=fill,clip=clip)
elif stroke or clip:
rP()
fas(stroke=stroke,fill=0,clip=clip)
else:
if fill and rP(countOnly=True):
rP()
elif stroke or clip:
fas(stroke=stroke,fill=0,clip=clip)
def applyStateChanges(self, delta, newState):
"""This takes a set of states, and outputs the operators
needed to set those properties"""
for key, value in delta.items():
if key == 'transform':
self._canvas.transform(value[0], value[1], value[2],
value[3], value[4], value[5])
elif key == 'strokeColor':
#this has different semantics in PDF to SVG;
#we always have a color, and either do or do
#not apply it; in SVG one can have a 'None' color
self._canvas.setStrokeColor(value)
elif key == 'strokeWidth':
self._canvas.setLineWidth(value)
elif key == 'strokeLineCap': #0,1,2
self._canvas.setLineCap(value)
elif key == 'strokeLineJoin':
self._canvas.setLineJoin(value)
elif key == 'strokeDashArray':
if value:
if isinstance(value,(list,tuple)) and len(value)==2 and isinstance(value[1],(tuple,list)):
phase = value[0]
value = value[1]
else:
phase = 0
self._canvas.setDash(value,phase)
else:
self._canvas.setDash()
## elif key == 'stroke_opacity':
## warnOnce('Stroke Opacity not supported yet')
elif key == 'fillColor':
#this has different semantics in PDF to SVG;
#we always have a color, and either do or do
#not apply it; in SVG one can have a 'None' color
self._canvas.setFillColor(value)
## elif key == 'fill_rule':
## warnOnce('Fill rules not done yet')
## elif key == 'fill_opacity':
## warnOnce('Fill opacity not done yet')
elif key in ['fontSize', 'fontName']:
# both need setting together in PDF
# one or both might be in the deltas,
# so need to get whichever is missing
fontname = delta.get('fontName', self._canvas._font)
fontsize = delta.get('fontSize', self._canvas._fontSize)
self._canvas.setFont(fontname, fontsize)
def drawImage(self, image):
from reportlab.lib.utils import ImageReader
im = ImageReader(image.path)
x0 = image.x
y0 = image.y
x1 = image.width
if x1 is not None: x1 += x0
y1 = image.height
if y1 is not None: y1 += y0
self._canvas.drawImage(im._image,x0,y0,x1,y1)
def drawToFile(d,fn, showBoundary=rl_config.showBoundary,**kwd):
d = renderScaledDrawing(d)
c = PSCanvas((d.width,d.height))
draw(d, c, 0, 0, showBoundary=showBoundary)
c.save(fn)
def drawToString(d, showBoundary=rl_config.showBoundary):
"Returns a PS as a string in memory, without touching the disk"
s = getBytesIO()
drawToFile(d, s, showBoundary=showBoundary)
return s.getvalue()
#########################################################
#
# test code. First, define a bunch of drawings.
# Routine to draw them comes at the end.
#
#########################################################
def test(outDir='epsout',shout=False):
from reportlab.graphics import testshapes
from reportlab.rl_config import verbose
OLDFONTS = testshapes._FONTS[:]
testshapes._FONTS[:] = ['Times-Roman','Times-Bold','Times-Italic', 'Times-BoldItalic','Courier']
try:
import os
# save all drawings and their doc strings from the test file
if not os.path.isdir(outDir):
os.mkdir(outDir)
#grab all drawings from the test module
drawings = []
for funcname in dir(testshapes):
if funcname[0:10] == 'getDrawing':
drawing = eval('testshapes.' + funcname + '()') #execute it
docstring = eval('testshapes.' + funcname + '.__doc__')
drawings.append((drawing, docstring))
i = 0
for (d, docstring) in drawings:
filename = outDir + os.sep + 'renderPS_%d.eps'%i
drawToFile(d,filename)
if shout or verbose>2: print('renderPS test saved %s' % ascii(filename))
i += 1
finally:
testshapes._FONTS[:] = OLDFONTS
if __name__=='__main__':
import sys
if len(sys.argv)>1:
outdir = sys.argv[1]
else:
outdir = 'epsout'
test(outdir,shout=True)
| 38.92163 | 118 | 0.581159 |
5de6cdcd03211bc02b6a6bc9dc03a85fad99918e | 801 | py | Python | tests/test_cli_opts.py | yxliang01/mythril-classic | 2348c75a5816cb4201ba680b3e0a062d4e467dbc | [
"MIT"
] | 6 | 2021-02-13T05:03:32.000Z | 2021-09-19T14:57:58.000Z | tests/test_cli_opts.py | yxliang01/mythril-classic | 2348c75a5816cb4201ba680b3e0a062d4e467dbc | [
"MIT"
] | 21 | 2019-04-12T17:54:51.000Z | 2021-11-04T18:47:45.000Z | tests/test_cli_opts.py | yxliang01/mythril-classic | 2348c75a5816cb4201ba680b3e0a062d4e467dbc | [
"MIT"
] | 2 | 2020-05-26T15:03:20.000Z | 2021-07-29T09:09:05.000Z | from mythril.interfaces.cli import main
import pytest
import json
import sys
def test_version_opt(capsys):
# Check that "myth --version" returns a string with the word
# "version" in it
sys.argv = ["mythril", "--version"]
with pytest.raises(SystemExit) as pytest_wrapped_e:
main()
assert pytest_wrapped_e.type == SystemExit
captured = capsys.readouterr()
assert captured.out.find(" version ") >= 1
# Check that "myth --version -o json" returns a JSON object
sys.argv = ["mythril", "--version", "-o", "json"]
with pytest.raises(SystemExit) as pytest_wrapped_e:
main()
assert pytest_wrapped_e.type == SystemExit
captured = capsys.readouterr()
d = json.loads(captured.out)
assert isinstance(d, dict)
assert d["version_str"]
| 29.666667 | 64 | 0.675406 |
dfc5c8a448b306d051d1988df75316b4dda21069 | 1,164 | py | Python | ding/utils/tests/test_log_writer_helper.py | puyuan1996/DI-engine | da99c01e06589033b7412829dc2f2c339083f219 | [
"Apache-2.0"
] | 464 | 2021-07-08T07:26:33.000Z | 2022-03-31T12:35:16.000Z | ding/utils/tests/test_log_writer_helper.py | puyuan1996/DI-engine | da99c01e06589033b7412829dc2f2c339083f219 | [
"Apache-2.0"
] | 177 | 2021-07-09T08:22:55.000Z | 2022-03-31T07:35:22.000Z | ding/utils/tests/test_log_writer_helper.py | puyuan1996/DI-engine | da99c01e06589033b7412829dc2f2c339083f219 | [
"Apache-2.0"
] | 92 | 2021-07-08T12:16:37.000Z | 2022-03-31T09:24:41.000Z | import pytest
import time
import tempfile
import shutil
import os
from os import path
from ding.framework import Parallel
from ding.framework.task import Task
from ding.utils import DistributedWriter
def main_distributed_writer(tempdir):
with Task() as task:
time.sleep(task.router.node_id * 1) # Sleep 0 and 1, write to different files
tblogger = DistributedWriter(tempdir).plugin(task, is_writer=("node.0" in task.labels))
def _add_scalar(ctx):
n = 10
for i in range(n):
tblogger.add_scalar(str(task.router.node_id), task.router.node_id, ctx.total_step * n + i)
task.use(_add_scalar)
task.use(lambda _: time.sleep(0.2))
task.run(max_step=10)
time.sleep(0.3 + (1 - task.router.node_id) * 2)
@pytest.mark.unittest
def test_distributed_writer():
tempdir = path.join(tempfile.gettempdir(), "tblogger")
try:
Parallel.runner(n_parallel_workers=2)(main_distributed_writer, tempdir)
assert path.exists(tempdir)
assert len(os.listdir(tempdir)) == 1
finally:
if path.exists(tempdir):
shutil.rmtree(tempdir)
| 29.1 | 106 | 0.670103 |
208ecc61049627b8a82e64ba58dd80711cff1cc5 | 1,195 | py | Python | deepchem/trans/__init__.py | kshen3778/deepchem | 43a79ec9d8ad867211cbf2d5ad079238e4324208 | [
"MIT"
] | null | null | null | deepchem/trans/__init__.py | kshen3778/deepchem | 43a79ec9d8ad867211cbf2d5ad079238e4324208 | [
"MIT"
] | null | null | null | deepchem/trans/__init__.py | kshen3778/deepchem | 43a79ec9d8ad867211cbf2d5ad079238e4324208 | [
"MIT"
] | null | null | null | """
Gathers all transformers in one place for convenient imports
"""
# flake8: noqa
from deepchem.trans.transformers import undo_transforms
from deepchem.trans.transformers import undo_grad_transforms
from deepchem.trans.transformers import Transformer
from deepchem.trans.transformers import LogTransformer
from deepchem.trans.transformers import ClippingTransformer
from deepchem.trans.transformers import NormalizationTransformer
from deepchem.trans.transformers import BalancingTransformer
from deepchem.trans.transformers import CDFTransformer
from deepchem.trans.transformers import PowerTransformer
from deepchem.trans.transformers import CoulombFitTransformer
from deepchem.trans.transformers import IRVTransformer
from deepchem.trans.transformers import DAGTransformer
from deepchem.trans.transformers import MinMaxTransformer
from deepchem.trans.transformers import FeaturizationTransformer
from deepchem.trans.transformers import ImageTransformer
from deepchem.trans.transformers import DataTransforms
from deepchem.trans.transformers import Transformer
from deepchem.trans.transformers import FlatteningTransformer
from deepchem.trans.duplicate import DuplicateBalancingTransformer
| 47.8 | 66 | 0.885356 |
96ecf3b884eb14913dbccf107b3ce240e278ec9c | 2,379 | py | Python | src/ai_harness/inspector.py | aicanhelp/ai-harness | 112303b6d41ba023052863bb716bfa870ede6eee | [
"MIT"
] | null | null | null | src/ai_harness/inspector.py | aicanhelp/ai-harness | 112303b6d41ba023052863bb716bfa870ede6eee | [
"MIT"
] | null | null | null | src/ai_harness/inspector.py | aicanhelp/ai-harness | 112303b6d41ba023052863bb716bfa870ede6eee | [
"MIT"
] | null | null | null | class NoneAttr:
pass
class Inspector:
@staticmethod
def parse_attribute(instance, name, with_type=False, parse=False):
parent = instance
if parse:
attributes = name.split('.')
name = attributes[-1]
for attr in attributes[:-1]:
parent = getattr(instance, attr, NoneAttr)
if parent == NoneAttr: return None, None, None
if not with_type: return parent, name, None
field = type(parent).__dict__.get('__configclass_fields__').get(name)
if field is None: return parent, name, None
return parent, name, field.type
@staticmethod
def field_type(instance, name: str, parse=False):
_, _, type = Inspector.parse_attribute(instance, name, True, parse)
return type
@staticmethod
def get_attr(instance, name, parse=False):
parent, name, _ = Inspector.parse_attribute(instance, name, False, parse)
if parent is None: return None
return getattr(parent, name, None)
@staticmethod
def get_attr_with_type(instance, name, parse=False):
parent, name, type = Inspector.parse_attribute(instance, name, True, parse)
if parent is None: return None, None
return getattr(parent, name, None), type
@staticmethod
def set_attr_from(fromO, toO, attr, parseFrom=True, parseTo=True):
if fromO is None or toO is None or attr is None:
return toO
parent = toO
name = attr
t = None
if parseTo:
parent, name, t = Inspector.parse_attribute(parent, attr, True, parseTo)
if parent is None or not hasattr(parent, name): return toO
v = Inspector.get_attr(fromO, attr, parseFrom)
if v is None: return toO
if t is None:
setattr(parent, name, v)
else:
setattr(parent, name, t(v))
return toO
@staticmethod
def dict2obj(dict: dict, obj):
for k, v in dict.items(): setattr(obj, k, v)
return obj
@staticmethod
def initialize_attr(instance, attr, parse=False):
if instance is None or attr is None: return instance
if getattr(instance, attr) is not None: return instance
t = Inspector.field_type(instance, attr, parse)
if attr is None: return instance
setattr(instance, attr, t())
| 32.148649 | 84 | 0.617486 |
fb05819e7412475f029021ebd6612d999aeaf5c3 | 4,729 | py | Python | python/pyspark/__init__.py | rahij/spark | 82721ce00b6cf535abd3d9cd66445e452554d15d | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 4 | 2020-01-17T06:23:43.000Z | 2022-02-05T18:01:45.000Z | python/pyspark/__init__.py | rahij/spark | 82721ce00b6cf535abd3d9cd66445e452554d15d | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 6 | 2020-10-21T13:44:10.000Z | 2022-03-31T05:04:46.000Z | python/pyspark/__init__.py | rahij/spark | 82721ce00b6cf535abd3d9cd66445e452554d15d | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2021-10-30T17:13:49.000Z | 2022-03-13T22:26:29.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PySpark is the Python API for Spark.
Public classes:
- :class:`SparkContext`:
Main entry point for Spark functionality.
- :class:`RDD`:
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
- :class:`Broadcast`:
A broadcast variable that gets reused across tasks.
- :class:`Accumulator`:
An "add-only" shared variable that tasks can only add values to.
- :class:`SparkConf`:
For configuring Spark.
- :class:`SparkFiles`:
Access files shipped with jobs.
- :class:`StorageLevel`:
Finer-grained cache persistence levels.
- :class:`TaskContext`:
Information about the current running task, available on the workers and experimental.
- :class:`RDDBarrier`:
Wraps an RDD under a barrier stage for barrier execution.
- :class:`BarrierTaskContext`:
A :class:`TaskContext` that provides extra info and tooling for barrier execution.
- :class:`BarrierTaskInfo`:
Information about a barrier task.
- :class:`InheritableThread`:
A inheritable thread to use in Spark when the pinned thread mode is on.
"""
from functools import wraps
import types
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD, RDDBarrier
from pyspark.files import SparkFiles
from pyspark.status import StatusTracker, SparkJobInfo, SparkStageInfo
from pyspark.util import InheritableThread
from pyspark.storagelevel import StorageLevel
from pyspark.accumulators import Accumulator, AccumulatorParam
from pyspark.broadcast import Broadcast
from pyspark.serializers import MarshalSerializer, PickleSerializer
from pyspark.taskcontext import TaskContext, BarrierTaskContext, BarrierTaskInfo
from pyspark.profiler import Profiler, BasicProfiler
from pyspark.version import __version__ # noqa: F401
from pyspark._globals import _NoValue # noqa: F401
def since(version):
"""
A decorator that annotates a function to append the version of Spark the function was added.
"""
import re
indent_p = re.compile(r'\n( +)')
def deco(f):
indents = indent_p.findall(f.__doc__)
indent = ' ' * (min(len(m) for m in indents) if indents else 0)
f.__doc__ = f.__doc__.rstrip() + "\n\n%s.. versionadded:: %s" % (indent, version)
return f
return deco
def copy_func(f, name=None, sinceversion=None, doc=None):
"""
Returns a function with same code, globals, defaults, closure, and
name (or provide a new name).
"""
# See
# http://stackoverflow.com/questions/6527633/how-can-i-make-a-deepcopy-of-a-function-in-python
fn = types.FunctionType(f.__code__, f.__globals__, name or f.__name__, f.__defaults__,
f.__closure__)
# in case f was given attrs (note this dict is a shallow copy):
fn.__dict__.update(f.__dict__)
if doc is not None:
fn.__doc__ = doc
if sinceversion is not None:
fn = since(sinceversion)(fn)
return fn
def keyword_only(func):
"""
A decorator that forces keyword arguments in the wrapped method
and saves actual input keyword arguments in `_input_kwargs`.
.. note:: Should only be used to wrap a method where first arg is `self`
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if len(args) > 0:
raise TypeError("Method %s forces keyword arguments." % func.__name__)
self._input_kwargs = kwargs
return func(self, **kwargs)
return wrapper
# for back compatibility
from pyspark.sql import SQLContext, HiveContext, Row # noqa: F401
__all__ = [
"SparkConf", "SparkContext", "SparkFiles", "RDD", "StorageLevel", "Broadcast",
"Accumulator", "AccumulatorParam", "MarshalSerializer", "PickleSerializer",
"StatusTracker", "SparkJobInfo", "SparkStageInfo", "Profiler", "BasicProfiler", "TaskContext",
"RDDBarrier", "BarrierTaskContext", "BarrierTaskInfo", "InheritableThread",
]
| 37.531746 | 98 | 0.716008 |
56a7dcbd6683ac2fdffa04221d062212c043d6c7 | 468 | py | Python | src/storfox_framework/grpc.py | storfox/storfox-framework | ffe8a89cb9abb203d696c6b24467f97834bd92a0 | [
"Apache-2.0"
] | null | null | null | src/storfox_framework/grpc.py | storfox/storfox-framework | ffe8a89cb9abb203d696c6b24467f97834bd92a0 | [
"Apache-2.0"
] | null | null | null | src/storfox_framework/grpc.py | storfox/storfox-framework | ffe8a89cb9abb203d696c6b24467f97834bd92a0 | [
"Apache-2.0"
] | null | null | null | import typing
import asyncio
from grpclib.utils import graceful_exit
from grpclib.server import Server
def run_forever(handlers: typing.List[typing.Any], host, port):
async def start():
server = Server(handlers)
with graceful_exit([server]):
await server.start(host, port)
print(f"Serving on {host}:{port}")
await server.wait_closed()
loop = asyncio.get_event_loop()
loop.run_until_complete(start())
| 26 | 63 | 0.673077 |
0f7ba84ffc147b75a5dbc29988263e3ff31b2d4c | 5,543 | py | Python | python/paddle/fluid/tests/custom_op/test_custom_relu_op_jit.py | TochkaAI/Paddle | 481ee79fc92304f33165f7ed0679f16c36862cea | [
"Apache-2.0"
] | 3 | 2021-06-08T14:24:36.000Z | 2021-06-08T14:24:38.000Z | python/paddle/fluid/tests/custom_op/test_custom_relu_op_jit.py | chenyanlei1/Paddle | f249a5f05f0f5832279244d88c8cb4eaaad1fbd4 | [
"Apache-2.0"
] | 1 | 2021-03-17T07:53:43.000Z | 2021-03-17T07:53:43.000Z | python/paddle/fluid/tests/custom_op/test_custom_relu_op_jit.py | chenyanlei1/Paddle | f249a5f05f0f5832279244d88c8cb4eaaad1fbd4 | [
"Apache-2.0"
] | 2 | 2021-02-19T06:42:29.000Z | 2021-02-26T12:16:05.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import paddle
import numpy as np
from paddle.utils.cpp_extension import load, get_build_directory
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args, extra_nvcc_args, IS_WINDOWS, IS_MAC
from test_custom_relu_op_setup import custom_relu_dynamic, custom_relu_static
# Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed.
file = '{}\\custom_relu_module_jit\\custom_relu_module_jit.pyd'.format(
get_build_directory())
if os.name == 'nt' and os.path.isfile(file):
cmd = 'del {}'.format(file)
run_cmd(cmd, True)
# Compile and load custom op Just-In-Time.
# custom_relu_op_dup.cc is only used for multi ops test,
# not a new op, if you want to test only one op, remove this
# source file
sources = ['custom_relu_op.cc', 'custom_relu_op_dup.cc']
if not IS_MAC:
sources.append('custom_relu_op.cu')
custom_module = load(
name='custom_relu_module_jit',
sources=sources,
extra_include_paths=paddle_includes, # add for Coverage CI
extra_cxx_cflags=extra_cc_args, # test for cc flags
extra_cuda_cflags=extra_nvcc_args, # test for nvcc flags
verbose=True)
class TestJITLoad(unittest.TestCase):
def setUp(self):
self.custom_ops = [
custom_module.custom_relu, custom_module.custom_relu_dup
]
self.dtypes = ['float32', 'float64']
if paddle.is_compiled_with_cuda():
self.dtypes.append('float16')
self.devices = ['cpu']
if paddle.is_compiled_with_cuda():
self.devices.append('gpu')
def test_static(self):
for device in self.devices:
for dtype in self.dtypes:
if device == 'cpu' and dtype == 'float16':
continue
x = np.random.uniform(-1, 1, [4, 8]).astype(dtype)
for custom_op in self.custom_ops:
out = custom_relu_static(custom_op, device, dtype, x)
pd_out = custom_relu_static(custom_op, device, dtype, x,
False)
self.assertTrue(
np.array_equal(out, pd_out),
"custom op out: {},\n paddle api out: {}".format(
out, pd_out))
def test_dynamic(self):
for device in self.devices:
for dtype in self.dtypes:
if device == 'cpu' and dtype == 'float16':
continue
x = np.random.uniform(-1, 1, [4, 8]).astype(dtype)
for custom_op in self.custom_ops:
out, x_grad = custom_relu_dynamic(custom_op, device, dtype,
x)
pd_out, pd_x_grad = custom_relu_dynamic(custom_op, device,
dtype, x, False)
self.assertTrue(
np.array_equal(out, pd_out),
"custom op out: {},\n paddle api out: {}".format(
out, pd_out))
self.assertTrue(
np.array_equal(x_grad, pd_x_grad),
"custom op x grad: {},\n paddle api x grad: {}".format(
x_grad, pd_x_grad))
def test_exception(self):
caught_exception = False
try:
x = np.random.uniform(-1, 1, [4, 8]).astype('int32')
custom_relu_dynamic(custom_module.custom_relu, 'cpu', 'int32', x)
except OSError as e:
caught_exception = True
self.assertTrue(
"function \"relu_cpu_forward\" is not implemented for data type `int32_t`"
in str(e))
if IS_WINDOWS:
self.assertTrue(
r"python\paddle\fluid\tests\custom_op\custom_relu_op.cc" in
str(e))
else:
self.assertTrue(
"python/paddle/fluid/tests/custom_op/custom_relu_op.cc" in
str(e))
self.assertTrue(caught_exception)
caught_exception = False
# MAC-CI don't support GPU
if IS_MAC:
return
try:
x = np.random.uniform(-1, 1, [4, 8]).astype('int32')
custom_relu_dynamic(custom_module.custom_relu, 'gpu', 'int32', x)
except OSError as e:
caught_exception = True
self.assertTrue(
"function \"relu_cuda_forward_kernel\" is not implemented for data type `int32_t`"
in str(e))
self.assertTrue(
"python/paddle/fluid/tests/custom_op/custom_relu_op.cu" in
str(e))
self.assertTrue(caught_exception)
if __name__ == '__main__':
unittest.main()
| 40.757353 | 98 | 0.587588 |
4a854e4abccacb3f58018fb682c621c44f6ead44 | 1,708 | py | Python | lrp_saturation_test.py | yidinghao/whitebox-lstm | dcdeaf11c8c374d200801791b59c007094f70baf | [
"MIT"
] | 2 | 2020-11-17T21:57:24.000Z | 2021-01-23T13:16:24.000Z | lrp_saturation_test.py | yidinghao/whitebox-lstm | dcdeaf11c8c374d200801791b59c007094f70baf | [
"MIT"
] | null | null | null | lrp_saturation_test.py | yidinghao/whitebox-lstm | dcdeaf11c8c374d200801791b59c007094f70baf | [
"MIT"
] | null | null | null | """
This is the script for running the LRP saturation test. It tries to
determine whether prefixes of counter task inputs with equal numbers of
as and bs receive LRP scores of 0. This test is run with varying levels
of model saturation.
"""
import csv
import numpy as np
from attribution import LRPAttribution
from models.whitebox import CounterRNN
def _zero_point(x: str) -> int:
ctr = 0
for ind, letter in enumerate(x):
if letter == "a":
ctr += 1
else:
ctr -= 1
if ctr == 0:
return ind
return -1
def lrp_saturation_test(model: CounterRNN):
lrp = LRPAttribution(model)
with open("datasets/lrp_saturation_test_data.csv", "r") as f:
reader = csv.reader(f)
n_correct = 0
n_zeros = 0
zero_values = []
for line in reader:
y_ind = model.y_stoi[line[1]]
# Test accuracy
x_batch = model.x_field.process([line[0]])
y_hat = model(*x_batch).squeeze()
if y_ind == y_hat.argmax():
n_correct += 1
# Count percentage of zeros
if abs(lrp(line[0], target=3)[0]) < 1e-5:
n_zeros += 1
# Record average value of zero counter
zero_ind = _zero_point(line[0])
c_t = model.lstm.traces[0][1][:, 0]
zero_values.append(c_t[zero_ind])
print("m =", model.m)
print("Zeros: {} out of 1000".format(n_zeros))
print("Correct: {} out of 100".format(n_correct))
print("Avg. Zero Counter:", np.mean(zero_values), end="\n\n")
if __name__ == "__main__":
for m in range(8, 25, 2):
lrp_saturation_test(CounterRNN(m=m / 2))
| 27.548387 | 71 | 0.584309 |
c50a3790e2cc6ed9e1a3fbfb59ca29ea60cf12a7 | 560 | py | Python | test_case_prioritazation/utils/test_data.py | mandriv/test-case-prioritazation | 29bdbe92482e14247042ffc2f619254955fc2bec | [
"MIT"
] | null | null | null | test_case_prioritazation/utils/test_data.py | mandriv/test-case-prioritazation | 29bdbe92482e14247042ffc2f619254955fc2bec | [
"MIT"
] | null | null | null | test_case_prioritazation/utils/test_data.py | mandriv/test-case-prioritazation | 29bdbe92482e14247042ffc2f619254955fc2bec | [
"MIT"
] | null | null | null | def read(filename):
file = open(filename, 'r')
lines = file.readlines()
data = []
for line in lines:
# csv
bits = line.split(',')
# first thing in csv is an identifier
id = bits.pop(0)
# rest are tests
for i in range(len(bits)):
# format each bit so it is a int
bits[i] = int(bits[i][0])
# create dict that holds identifier and faults detected
piece = {
'id': id,
'faults': bits
}
data.append(piece)
return data
| 26.666667 | 63 | 0.5 |
246cf214b8ae7322289bbfa0cbd083beb7817511 | 69,740 | py | Python | test/orm/inheritance/test_relationship.py | gujun4990/sqlalchemy | 057bae2295feb86529a04f09cd2f3d4c2c6d88a8 | [
"MIT"
] | 1 | 2018-04-02T18:41:52.000Z | 2018-04-02T18:41:52.000Z | test/orm/inheritance/test_relationship.py | gujun4990/sqlalchemy | 057bae2295feb86529a04f09cd2f3d4c2c6d88a8 | [
"MIT"
] | null | null | null | test/orm/inheritance/test_relationship.py | gujun4990/sqlalchemy | 057bae2295feb86529a04f09cd2f3d4c2c6d88a8 | [
"MIT"
] | 3 | 2017-09-26T13:59:24.000Z | 2020-12-04T17:51:54.000Z | from sqlalchemy.orm import create_session, relationship, mapper, \
contains_eager, joinedload, subqueryload, subqueryload_all,\
Session, aliased, with_polymorphic, joinedload_all, backref
from sqlalchemy import Integer, String, ForeignKey, select, func
from sqlalchemy.engine import default
from sqlalchemy.testing import AssertsCompiledSQL, fixtures
from sqlalchemy import testing
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.testing import assert_raises, eq_, is_
class Company(fixtures.ComparableEntity):
pass
class Person(fixtures.ComparableEntity):
pass
class Engineer(Person):
pass
class Manager(Person):
pass
class Boss(Manager):
pass
class Machine(fixtures.ComparableEntity):
pass
class Paperwork(fixtures.ComparableEntity):
pass
class SelfReferentialTestJoinedToBase(fixtures.MappedTest):
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
Table('engineers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('primary_language', String(50)),
Column('reports_to_id', Integer,
ForeignKey('people.person_id')))
@classmethod
def setup_mappers(cls):
engineers, people = cls.tables.engineers, cls.tables.people
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Engineer, engineers,
inherits=Person,
inherit_condition=engineers.c.person_id == people.c.person_id,
polymorphic_identity='engineer',
properties={
'reports_to': relationship(
Person,
primaryjoin=(
people.c.person_id == engineers.c.reports_to_id))})
def test_has(self):
p1 = Person(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=p1)
sess = create_session()
sess.add(p1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to.has(Person.name == 'dogbert'))
.first(),
Engineer(name='dilbert'))
def test_oftype_aliases_in_exists(self):
e1 = Engineer(name='dilbert', primary_language='java')
e2 = Engineer(name='wally', primary_language='c++', reports_to=e1)
sess = create_session()
sess.add_all([e1, e2])
sess.flush()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to
.of_type(Engineer)
.has(Engineer.name == 'dilbert'))
.first(),
e2)
def test_join(self):
p1 = Person(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=p1)
sess = create_session()
sess.add(p1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.join('reports_to', aliased=True)
.filter(Person.name == 'dogbert').first(),
Engineer(name='dilbert'))
class SelfReferentialJ2JTest(fixtures.MappedTest):
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
people = Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
engineers = Table('engineers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('primary_language', String(50)),
Column('reports_to_id', Integer,
ForeignKey('managers.person_id')))
managers = Table('managers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),)
@classmethod
def setup_mappers(cls):
engineers = cls.tables.engineers
managers = cls.tables.managers
people = cls.tables.people
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Manager, managers,
inherits=Person,
polymorphic_identity='manager')
mapper(Engineer, engineers,
inherits=Person,
polymorphic_identity='engineer',
properties={
'reports_to': relationship(
Manager,
primaryjoin=(
managers.c.person_id == engineers.c.reports_to_id),
backref='engineers')})
def test_has(self):
m1 = Manager(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=m1)
sess = create_session()
sess.add(m1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to.has(Manager.name == 'dogbert'))
.first(),
Engineer(name='dilbert'))
def test_join(self):
m1 = Manager(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=m1)
sess = create_session()
sess.add(m1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.join('reports_to', aliased=True)
.filter(Manager.name == 'dogbert').first(),
Engineer(name='dilbert'))
def test_filter_aliasing(self):
m1 = Manager(name='dogbert')
m2 = Manager(name='foo')
e1 = Engineer(name='wally', primary_language='java', reports_to=m1)
e2 = Engineer(name='dilbert', primary_language='c++', reports_to=m2)
e3 = Engineer(name='etc', primary_language='c++')
sess = create_session()
sess.add_all([m1, m2, e1, e2, e3])
sess.flush()
sess.expunge_all()
# filter aliasing applied to Engineer doesn't whack Manager
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Manager.name == 'dogbert').all(),
[m1])
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Engineer.name == 'dilbert').all(),
[m2])
eq_(sess.query(Manager, Engineer)
.join(Manager.engineers)
.order_by(Manager.name.desc()).all(),
[(m2, e2), (m1, e1)])
def test_relationship_compare(self):
m1 = Manager(name='dogbert')
m2 = Manager(name='foo')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=m1)
e2 = Engineer(name='wally', primary_language='c++', reports_to=m2)
e3 = Engineer(name='etc', primary_language='c++')
sess = create_session()
sess.add(m1)
sess.add(m2)
sess.add(e1)
sess.add(e2)
sess.add(e3)
sess.flush()
sess.expunge_all()
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Engineer.reports_to == None).all(), # noqa
[])
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Engineer.reports_to == m1).all(),
[m1])
class SelfReferentialJ2JSelfTest(fixtures.MappedTest):
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
people = Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
engineers = Table('engineers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('reports_to_id', Integer,
ForeignKey('engineers.person_id')))
@classmethod
def setup_mappers(cls):
engineers = cls.tables.engineers
people = cls.tables.people
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Engineer, engineers,
inherits=Person,
polymorphic_identity='engineer',
properties={
'reports_to': relationship(
Engineer,
primaryjoin=(
engineers.c.person_id == engineers.c.reports_to_id),
backref='engineers',
remote_side=engineers.c.person_id)})
def _two_obj_fixture(self):
e1 = Engineer(name='wally')
e2 = Engineer(name='dilbert', reports_to=e1)
sess = Session()
sess.add_all([e1, e2])
sess.commit()
return sess
def _five_obj_fixture(self):
sess = Session()
e1, e2, e3, e4, e5 = [
Engineer(name='e%d' % (i + 1)) for i in range(5)
]
e3.reports_to = e1
e4.reports_to = e2
sess.add_all([e1, e2, e3, e4, e5])
sess.commit()
return sess
def test_has(self):
sess = self._two_obj_fixture()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to.has(Engineer.name == 'wally'))
.first(),
Engineer(name='dilbert'))
def test_join_explicit_alias(self):
sess = self._five_obj_fixture()
ea = aliased(Engineer)
eq_(sess.query(Engineer)
.join(ea, Engineer.engineers)
.filter(Engineer.name == 'e1').all(),
[Engineer(name='e1')])
def test_join_aliased_flag_one(self):
sess = self._two_obj_fixture()
eq_(sess.query(Engineer)
.join('reports_to', aliased=True)
.filter(Engineer.name == 'wally').first(),
Engineer(name='dilbert'))
def test_join_aliased_flag_two(self):
sess = self._five_obj_fixture()
eq_(sess.query(Engineer)
.join(Engineer.engineers, aliased=True)
.filter(Engineer.name == 'e4').all(),
[Engineer(name='e2')])
def test_relationship_compare(self):
sess = self._five_obj_fixture()
e1 = sess.query(Engineer).filter_by(name='e1').one()
e2 = sess.query(Engineer).filter_by(name='e2').one()
eq_(sess.query(Engineer)
.join(Engineer.engineers, aliased=True)
.filter(Engineer.reports_to == None).all(), # noqa
[])
eq_(sess.query(Engineer)
.join(Engineer.engineers, aliased=True)
.filter(Engineer.reports_to == e1).all(),
[e1])
eq_(sess.query(Engineer)
.join(Engineer.engineers, aliased=True)
.filter(Engineer.reports_to != None).all(), # noqa
[e1, e2])
class M2MFilterTest(fixtures.MappedTest):
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
organizations = Table('organizations', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)))
engineers_to_org = Table('engineers_to_org', metadata,
Column('org_id', Integer,
ForeignKey('organizations.id')),
Column('engineer_id', Integer,
ForeignKey('engineers.person_id')))
people = Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
engineers = Table('engineers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('primary_language', String(50)))
@classmethod
def setup_mappers(cls):
organizations = cls.tables.organizations
people = cls.tables.people
engineers = cls.tables.engineers
engineers_to_org = cls.tables.engineers_to_org
class Organization(cls.Comparable):
pass
mapper(Organization, organizations,
properties={
'engineers': relationship(
Engineer,
secondary=engineers_to_org,
backref='organizations')})
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Engineer, engineers,
inherits=Person,
polymorphic_identity='engineer')
@classmethod
def insert_data(cls):
Organization = cls.classes.Organization
e1 = Engineer(name='e1')
e2 = Engineer(name='e2')
e3 = Engineer(name='e3')
e4 = Engineer(name='e4')
org1 = Organization(name='org1', engineers=[e1, e2])
org2 = Organization(name='org2', engineers=[e3, e4])
sess = create_session()
sess.add(org1)
sess.add(org2)
sess.flush()
def test_not_contains(self):
Organization = self.classes.Organization
sess = create_session()
e1 = sess.query(Person).filter(Engineer.name == 'e1').one()
eq_(sess.query(Organization)
.filter(~Organization.engineers
.of_type(Engineer)
.contains(e1))
.all(),
[Organization(name='org2')])
# this had a bug
eq_(sess.query(Organization)
.filter(~Organization.engineers
.contains(e1))
.all(),
[Organization(name='org2')])
def test_any(self):
sess = create_session()
Organization = self.classes.Organization
eq_(sess.query(Organization)
.filter(Organization.engineers
.of_type(Engineer)
.any(Engineer.name == 'e1'))
.all(),
[Organization(name='org1')])
eq_(sess.query(Organization)
.filter(Organization.engineers
.any(Engineer.name == 'e1'))
.all(),
[Organization(name='org1')])
class SelfReferentialM2MTest(fixtures.MappedTest, AssertsCompiledSQL):
__dialect__ = "default"
@classmethod
def define_tables(cls, metadata):
Table('secondary', metadata,
Column('left_id', Integer,
ForeignKey('parent.id'),
nullable=False),
Column('right_id', Integer,
ForeignKey('parent.id'),
nullable=False))
Table('parent', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('cls', String(50)))
Table('child1', metadata,
Column('id', Integer,
ForeignKey('parent.id'),
primary_key=True))
Table('child2', metadata,
Column('id', Integer,
ForeignKey('parent.id'),
primary_key=True))
@classmethod
def setup_classes(cls):
class Parent(cls.Basic):
pass
class Child1(Parent):
pass
class Child2(Parent):
pass
@classmethod
def setup_mappers(cls):
child1 = cls.tables.child1
child2 = cls.tables.child2
Parent = cls.classes.Parent
parent = cls.tables.parent
Child1 = cls.classes.Child1
Child2 = cls.classes.Child2
secondary = cls.tables.secondary
mapper(Parent, parent,
polymorphic_on=parent.c.cls)
mapper(Child1, child1,
inherits=Parent,
polymorphic_identity='child1',
properties={
'left_child2': relationship(
Child2,
secondary=secondary,
primaryjoin=parent.c.id == secondary.c.right_id,
secondaryjoin=parent.c.id == secondary.c.left_id,
uselist=False,
backref="right_children")})
mapper(Child2, child2,
inherits=Parent,
polymorphic_identity='child2')
def test_query_crit(self):
Child1, Child2 = self.classes.Child1, self.classes.Child2
sess = create_session()
c11, c12, c13 = Child1(), Child1(), Child1()
c21, c22, c23 = Child2(), Child2(), Child2()
c11.left_child2 = c22
c12.left_child2 = c22
c13.left_child2 = c23
sess.add_all([c11, c12, c13, c21, c22, c23])
sess.flush()
# test that the join to Child2 doesn't alias Child1 in the select
eq_(set(sess.query(Child1).join(Child1.left_child2)),
set([c11, c12, c13]))
eq_(set(sess.query(Child1, Child2).join(Child1.left_child2)),
set([(c11, c22), (c12, c22), (c13, c23)]))
# test __eq__() on property is annotating correctly
eq_(set(sess.query(Child2)
.join(Child2.right_children)
.filter(Child1.left_child2 == c22)),
set([c22]))
# test the same again
self.assert_compile(
sess.query(Child2).join(Child2.right_children).
filter(Child1.left_child2 == c22).with_labels().statement,
"SELECT child2.id AS child2_id, parent.id AS parent_id, "
"parent.cls AS parent_cls FROM secondary AS secondary_1, "
"parent JOIN child2 ON parent.id = child2.id JOIN secondary AS "
"secondary_2 ON parent.id = secondary_2.left_id JOIN "
"(parent AS parent_1 JOIN child1 AS child1_1 "
"ON parent_1.id = child1_1.id) "
"ON parent_1.id = secondary_2.right_id WHERE "
"parent_1.id = secondary_1.right_id AND :param_1 = "
"secondary_1.left_id")
def test_eager_join(self):
Child1, Child2 = self.classes.Child1, self.classes.Child2
sess = create_session()
c1 = Child1()
c1.left_child2 = Child2()
sess.add(c1)
sess.flush()
# test that the splicing of the join works here, doesn't break in
# the middle of "parent join child1"
q = sess.query(Child1).options(joinedload('left_child2'))
self.assert_compile(
q.limit(1).with_labels().statement,
"SELECT child1.id AS child1_id, parent.id AS parent_id, "
"parent.cls AS parent_cls, child2_1.id AS child2_1_id, "
"parent_1.id AS parent_1_id, parent_1.cls AS parent_1_cls "
"FROM parent JOIN child1 ON parent.id = child1.id "
"LEFT OUTER JOIN (secondary AS secondary_1 JOIN "
"(parent AS parent_1 JOIN child2 AS child2_1 "
"ON parent_1.id = child2_1.id) "
"ON parent_1.id = secondary_1.left_id) "
"ON parent.id = secondary_1.right_id "
"LIMIT :param_1",
checkparams={'param_1': 1}
)
# another way to check
eq_(
select([func.count('*')]).select_from(
q.limit(1).with_labels().subquery()
).scalar(),
1
)
assert q.first() is c1
def test_subquery_load(self):
Child1, Child2 = self.classes.Child1, self.classes.Child2
sess = create_session()
c1 = Child1()
c1.left_child2 = Child2()
sess.add(c1)
sess.flush()
sess.expunge_all()
query_ = sess.query(Child1).options(subqueryload('left_child2'))
for row in query_.all():
assert row.left_child2
class EagerToSubclassTest(fixtures.MappedTest):
"""Test eager loads to subclass mappers"""
run_setup_classes = 'once'
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('parent', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('data', String(10)))
Table('base', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10)),
Column('related_id', Integer,
ForeignKey('related.id')))
Table('sub', metadata,
Column('id', Integer,
ForeignKey('base.id'),
primary_key=True),
Column('data', String(10)),
Column('parent_id', Integer,
ForeignKey('parent.id'),
nullable=False))
Table('related', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('data', String(10)))
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Base(cls.Comparable):
pass
class Sub(Base):
pass
class Related(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
sub = cls.tables.sub
Sub = cls.classes.Sub
base = cls.tables.base
Base = cls.classes.Base
parent = cls.tables.parent
Parent = cls.classes.Parent
related = cls.tables.related
Related = cls.classes.Related
mapper(Parent, parent,
properties={'children': relationship(Sub, order_by=sub.c.data)})
mapper(Base, base,
polymorphic_on=base.c.type,
polymorphic_identity='b',
properties={'related': relationship(Related)})
mapper(Sub, sub,
inherits=Base,
polymorphic_identity='s')
mapper(Related, related)
@classmethod
def insert_data(cls):
global p1, p2
Parent = cls.classes.Parent
Sub = cls.classes.Sub
Related = cls.classes.Related
sess = Session()
r1, r2 = Related(data='r1'), Related(data='r2')
s1 = Sub(data='s1', related=r1)
s2 = Sub(data='s2', related=r2)
s3 = Sub(data='s3')
s4 = Sub(data='s4', related=r2)
s5 = Sub(data='s5')
p1 = Parent(data='p1', children=[s1, s2, s3])
p2 = Parent(data='p2', children=[s4, s5])
sess.add(p1)
sess.add(p2)
sess.commit()
def test_joinedload(self):
Parent = self.classes.Parent
sess = Session()
def go():
eq_(sess.query(Parent)
.options(joinedload(Parent.children)).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager(self):
Parent = self.classes.Parent
Sub = self.classes.Sub
sess = Session()
def go():
eq_(sess.query(Parent)
.join(Parent.children)
.options(contains_eager(Parent.children))
.order_by(Parent.data, Sub.data).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
def test_subq_through_related(self):
Parent = self.classes.Parent
Base = self.classes.Base
sess = Session()
def go():
eq_(sess.query(Parent)
.options(subqueryload_all(Parent.children, Base.related))
.order_by(Parent.data).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 3)
def test_subq_through_related_aliased(self):
Parent = self.classes.Parent
Base = self.classes.Base
pa = aliased(Parent)
sess = Session()
def go():
eq_(sess.query(pa)
.options(subqueryload_all(pa.children, Base.related))
.order_by(pa.data).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 3)
class SubClassEagerToSubClassTest(fixtures.MappedTest):
"""Test joinedloads from subclass to subclass mappers"""
run_setup_classes = 'once'
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('parent', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10)))
Table('subparent', metadata,
Column('id', Integer,
ForeignKey('parent.id'),
primary_key=True),
Column('data', String(10)))
Table('base', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10)))
Table('sub', metadata,
Column('id', Integer,
ForeignKey('base.id'),
primary_key=True),
Column('data', String(10)),
Column('subparent_id', Integer,
ForeignKey('subparent.id'),
nullable=False))
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Subparent(Parent):
pass
class Base(cls.Comparable):
pass
class Sub(Base):
pass
@classmethod
def setup_mappers(cls):
sub = cls.tables.sub
Sub = cls.classes.Sub
base = cls.tables.base
Base = cls.classes.Base
parent = cls.tables.parent
Parent = cls.classes.Parent
subparent = cls.tables.subparent
Subparent = cls.classes.Subparent
mapper(Parent, parent,
polymorphic_on=parent.c.type,
polymorphic_identity='b')
mapper(Subparent, subparent,
inherits=Parent,
polymorphic_identity='s',
properties={
'children': relationship(Sub, order_by=base.c.id)})
mapper(Base, base,
polymorphic_on=base.c.type,
polymorphic_identity='b')
mapper(Sub, sub,
inherits=Base,
polymorphic_identity='s')
@classmethod
def insert_data(cls):
global p1, p2
Sub, Subparent = cls.classes.Sub, cls.classes.Subparent
sess = create_session()
p1 = Subparent(
data='p1',
children=[Sub(data='s1'), Sub(data='s2'), Sub(data='s3')])
p2 = Subparent(
data='p2',
children=[Sub(data='s4'), Sub(data='s5')])
sess.add(p1)
sess.add(p2)
sess.flush()
def test_joinedload(self):
Subparent = self.classes.Subparent
sess = create_session()
def go():
eq_(sess.query(Subparent)
.options(joinedload(Subparent.children)).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
eq_(sess.query(Subparent)
.options(joinedload("children")).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager(self):
Subparent = self.classes.Subparent
sess = create_session()
def go():
eq_(sess.query(Subparent)
.join(Subparent.children)
.options(contains_eager(Subparent.children)).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
eq_(sess.query(Subparent)
.join(Subparent.children)
.options(contains_eager("children")).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
def test_subqueryload(self):
Subparent = self.classes.Subparent
sess = create_session()
def go():
eq_(sess.query(Subparent)
.options(subqueryload(Subparent.children)).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 2)
sess.expunge_all()
def go():
eq_(sess.query(Subparent)
.options(subqueryload("children")).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 2)
class SameNamedPropTwoPolymorphicSubClassesTest(fixtures.MappedTest):
"""test pathing when two subclasses contain a different property
for the same name, and polymorphic loading is used.
#2614
"""
run_setup_classes = 'once'
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('a', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10)))
Table('b', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True))
Table('btod', metadata,
Column('bid', Integer, ForeignKey('b.id'), nullable=False),
Column('did', Integer, ForeignKey('d.id'), nullable=False)
)
Table('c', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True))
Table('ctod', metadata,
Column('cid', Integer, ForeignKey('c.id'), nullable=False),
Column('did', Integer, ForeignKey('d.id'), nullable=False))
Table('d', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True))
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(A):
pass
class C(A):
pass
class D(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
A = cls.classes.A
B = cls.classes.B
C = cls.classes.C
D = cls.classes.D
mapper(A, cls.tables.a, polymorphic_on=cls.tables.a.c.type)
mapper(B, cls.tables.b, inherits=A, polymorphic_identity='b',
properties={
'related': relationship(D, secondary=cls.tables.btod)
})
mapper(C, cls.tables.c, inherits=A, polymorphic_identity='c',
properties={
'related': relationship(D, secondary=cls.tables.ctod)
})
mapper(D, cls.tables.d)
@classmethod
def insert_data(cls):
B = cls.classes.B
C = cls.classes.C
D = cls.classes.D
session = Session()
d = D()
session.add_all([
B(related=[d]),
C(related=[d])
])
session.commit()
def test_free_w_poly_subquery(self):
A = self.classes.A
B = self.classes.B
C = self.classes.C
D = self.classes.D
session = Session()
d = session.query(D).one()
a_poly = with_polymorphic(A, [B, C])
def go():
for a in session.query(a_poly).\
options(
subqueryload(a_poly.B.related),
subqueryload(a_poly.C.related)):
eq_(a.related, [d])
self.assert_sql_count(testing.db, go, 3)
def test_fixed_w_poly_subquery(self):
A = self.classes.A
B = self.classes.B
C = self.classes.C
D = self.classes.D
session = Session()
d = session.query(D).one()
def go():
for a in session.query(A).with_polymorphic([B, C]).\
options(subqueryload(B.related), subqueryload(C.related)):
eq_(a.related, [d])
self.assert_sql_count(testing.db, go, 3)
def test_free_w_poly_joined(self):
A = self.classes.A
B = self.classes.B
C = self.classes.C
D = self.classes.D
session = Session()
d = session.query(D).one()
a_poly = with_polymorphic(A, [B, C])
def go():
for a in session.query(a_poly).\
options(
joinedload(a_poly.B.related),
joinedload(a_poly.C.related)):
eq_(a.related, [d])
self.assert_sql_count(testing.db, go, 1)
def test_fixed_w_poly_joined(self):
A = self.classes.A
B = self.classes.B
C = self.classes.C
D = self.classes.D
session = Session()
d = session.query(D).one()
def go():
for a in session.query(A).with_polymorphic([B, C]).\
options(joinedload(B.related), joinedload(C.related)):
eq_(a.related, [d])
self.assert_sql_count(testing.db, go, 1)
class SubClassToSubClassFromParentTest(fixtures.MappedTest):
"""test #2617
"""
run_setup_classes = 'once'
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('z', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True))
Table('a', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10)),
Column('z_id', Integer, ForeignKey('z.id')))
Table('b', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True))
Table('d', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True),
Column('b_id', Integer, ForeignKey('b.id')))
@classmethod
def setup_classes(cls):
class Z(cls.Comparable):
pass
class A(cls.Comparable):
pass
class B(A):
pass
class D(A):
pass
@classmethod
def setup_mappers(cls):
Z = cls.classes.Z
A = cls.classes.A
B = cls.classes.B
D = cls.classes.D
mapper(Z, cls.tables.z)
mapper(A, cls.tables.a, polymorphic_on=cls.tables.a.c.type,
with_polymorphic='*',
properties={
'zs': relationship(Z, lazy="subquery")
})
mapper(B, cls.tables.b, inherits=A, polymorphic_identity='b',
properties={
'related': relationship(D, lazy="subquery",
primaryjoin=cls.tables.d.c.b_id ==
cls.tables.b.c.id)
})
mapper(D, cls.tables.d, inherits=A, polymorphic_identity='d')
@classmethod
def insert_data(cls):
B = cls.classes.B
session = Session()
session.add(B())
session.commit()
def test_2617(self):
A = self.classes.A
session = Session()
def go():
a1 = session.query(A).first()
eq_(a1.related, [])
self.assert_sql_count(testing.db, go, 3)
class SubClassToSubClassMultiTest(AssertsCompiledSQL, fixtures.MappedTest):
"""
Two different joined-inh subclasses, led by a
parent, with two distinct endpoints:
parent -> subcl1 -> subcl2 -> (ep1, ep2)
the join to ep2 indicates we need to join
from the middle of the joinpoint, skipping ep1
"""
run_create_tables = None
run_deletes = None
__dialect__ = 'default'
@classmethod
def define_tables(cls, metadata):
Table('parent', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(30)))
Table('base1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(30)))
Table('sub1', metadata,
Column('id', Integer, ForeignKey('base1.id'), primary_key=True),
Column('parent_id', ForeignKey('parent.id')),
Column('subdata', String(30)))
Table('base2', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('base1_id', ForeignKey('base1.id')),
Column('data', String(30)))
Table('sub2', metadata,
Column('id', Integer, ForeignKey('base2.id'), primary_key=True),
Column('subdata', String(30)))
Table('ep1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('base2_id', Integer, ForeignKey('base2.id')),
Column('data', String(30)))
Table('ep2', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('base2_id', Integer, ForeignKey('base2.id')),
Column('data', String(30)))
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Base1(cls.Comparable):
pass
class Sub1(Base1):
pass
class Base2(cls.Comparable):
pass
class Sub2(Base2):
pass
class EP1(cls.Comparable):
pass
class EP2(cls.Comparable):
pass
@classmethod
def _classes(cls):
return cls.classes.Parent, cls.classes.Base1,\
cls.classes.Base2, cls.classes.Sub1,\
cls.classes.Sub2, cls.classes.EP1,\
cls.classes.EP2
@classmethod
def setup_mappers(cls):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = cls._classes()
mapper(Parent, cls.tables.parent, properties={
'sub1': relationship(Sub1)
})
mapper(Base1, cls.tables.base1, properties={
'sub2': relationship(Sub2)
})
mapper(Sub1, cls.tables.sub1, inherits=Base1)
mapper(Base2, cls.tables.base2, properties={
'ep1': relationship(EP1),
'ep2': relationship(EP2)
})
mapper(Sub2, cls.tables.sub2, inherits=Base2)
mapper(EP1, cls.tables.ep1)
mapper(EP2, cls.tables.ep2)
def test_one(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = Session()
self.assert_compile(
s.query(Parent).join(Parent.sub1, Sub1.sub2).
join(Sub2.ep1).
join(Sub2.ep2),
"SELECT parent.id AS parent_id, parent.data AS parent_data "
"FROM parent JOIN (base1 JOIN sub1 ON base1.id = sub1.id) "
"ON parent.id = sub1.parent_id JOIN "
"(base2 JOIN sub2 "
"ON base2.id = sub2.id) "
"ON base1.id = base2.base1_id "
"JOIN ep1 ON base2.id = ep1.base2_id "
"JOIN ep2 ON base2.id = ep2.base2_id"
)
def test_two(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s2a = aliased(Sub2, flat=True)
s = Session()
self.assert_compile(
s.query(Parent).join(Parent.sub1).
join(s2a, Sub1.sub2),
"SELECT parent.id AS parent_id, parent.data AS parent_data "
"FROM parent JOIN (base1 JOIN sub1 ON base1.id = sub1.id) "
"ON parent.id = sub1.parent_id JOIN "
"(base2 AS base2_1 JOIN sub2 AS sub2_1 "
"ON base2_1.id = sub2_1.id) "
"ON base1.id = base2_1.base1_id"
)
def test_three(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = Session()
self.assert_compile(
s.query(Base1).join(Base1.sub2).
join(Sub2.ep1).
join(Sub2.ep2),
"SELECT base1.id AS base1_id, base1.data AS base1_data "
"FROM base1 JOIN (base2 JOIN sub2 "
"ON base2.id = sub2.id) ON base1.id = "
"base2.base1_id "
"JOIN ep1 ON base2.id = ep1.base2_id "
"JOIN ep2 ON base2.id = ep2.base2_id"
)
def test_four(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = Session()
self.assert_compile(
s.query(Sub2).join(Base1, Base1.id == Sub2.base1_id).
join(Sub2.ep1).
join(Sub2.ep2),
"SELECT sub2.id AS sub2_id, base2.id AS base2_id, "
"base2.base1_id AS base2_base1_id, base2.data AS base2_data, "
"sub2.subdata AS sub2_subdata "
"FROM base2 JOIN sub2 ON base2.id = sub2.id "
"JOIN base1 ON base1.id = base2.base1_id "
"JOIN ep1 ON base2.id = ep1.base2_id "
"JOIN ep2 ON base2.id = ep2.base2_id"
)
def test_five(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = Session()
self.assert_compile(
s.query(Sub2).join(Sub1, Sub1.id == Sub2.base1_id).
join(Sub2.ep1).
join(Sub2.ep2),
"SELECT sub2.id AS sub2_id, base2.id AS base2_id, "
"base2.base1_id AS base2_base1_id, base2.data AS base2_data, "
"sub2.subdata AS sub2_subdata "
"FROM base2 JOIN sub2 ON base2.id = sub2.id "
"JOIN "
"(base1 JOIN sub1 ON base1.id = sub1.id) "
"ON sub1.id = base2.base1_id "
"JOIN ep1 ON base2.id = ep1.base2_id "
"JOIN ep2 ON base2.id = ep2.base2_id"
)
def test_six(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = Session()
self.assert_compile(
s.query(Sub2).from_self().
join(Sub2.ep1).
join(Sub2.ep2),
"SELECT anon_1.sub2_id AS anon_1_sub2_id, "
"anon_1.base2_id AS anon_1_base2_id, "
"anon_1.base2_base1_id AS anon_1_base2_base1_id, "
"anon_1.base2_data AS anon_1_base2_data, "
"anon_1.sub2_subdata AS anon_1_sub2_subdata "
"FROM (SELECT sub2.id AS sub2_id, base2.id AS base2_id, "
"base2.base1_id AS base2_base1_id, base2.data AS base2_data, "
"sub2.subdata AS sub2_subdata "
"FROM base2 JOIN sub2 ON base2.id = sub2.id) AS anon_1 "
"JOIN ep1 ON anon_1.base2_id = ep1.base2_id "
"JOIN ep2 ON anon_1.base2_id = ep2.base2_id"
)
def test_seven(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = Session()
self.assert_compile(
# adding Sub2 to the entities list helps it,
# otherwise the joins for Sub2.ep1/ep2 don't have columns
# to latch onto. Can't really make it better than this
s.query(Parent, Sub2).join(Parent.sub1).\
join(Sub1.sub2).from_self().\
join(Sub2.ep1).
join(Sub2.ep2),
"SELECT anon_1.parent_id AS anon_1_parent_id, "
"anon_1.parent_data AS anon_1_parent_data, "
"anon_1.sub2_id AS anon_1_sub2_id, "
"anon_1.base2_id AS anon_1_base2_id, "
"anon_1.base2_base1_id AS anon_1_base2_base1_id, "
"anon_1.base2_data AS anon_1_base2_data, "
"anon_1.sub2_subdata AS anon_1_sub2_subdata "
"FROM (SELECT parent.id AS parent_id, parent.data AS parent_data, "
"sub2.id AS sub2_id, "
"base2.id AS base2_id, "
"base2.base1_id AS base2_base1_id, "
"base2.data AS base2_data, "
"sub2.subdata AS sub2_subdata "
"FROM parent JOIN (base1 JOIN sub1 ON base1.id = sub1.id) "
"ON parent.id = sub1.parent_id JOIN "
"(base2 JOIN sub2 ON base2.id = sub2.id) "
"ON base1.id = base2.base1_id) AS anon_1 "
"JOIN ep1 ON anon_1.base2_id = ep1.base2_id "
"JOIN ep2 ON anon_1.base2_id = ep2.base2_id"
)
class JoinedloadSinglePolysubSingle(
fixtures.DeclarativeMappedTest,
testing.AssertsCompiledSQL):
"""exercise issue #3611, using the test from dupe issue 3614"""
run_define_tables = None
__dialect__ = 'default'
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
class UserRole(Base):
__tablename__ = 'user_roles'
id = Column(Integer, primary_key=True)
row_type = Column(String(50), nullable=False)
__mapper_args__ = {'polymorphic_on': row_type}
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
user = relationship('User', lazy=False)
class Admin(UserRole):
__tablename__ = 'admins'
__mapper_args__ = {'polymorphic_identity': 'admin'}
id = Column(Integer, ForeignKey('user_roles.id'), primary_key=True)
class Thing(Base):
__tablename__ = 'things'
id = Column(Integer, primary_key=True)
admin_id = Column(Integer, ForeignKey('admins.id'))
admin = relationship('Admin', lazy=False)
def test_query(self):
Thing = self.classes.Thing
sess = Session()
self.assert_compile(
sess.query(Thing),
"SELECT things.id AS things_id, "
"things.admin_id AS things_admin_id, "
"users_1.id AS users_1_id, admins_1.id AS admins_1_id, "
"user_roles_1.id AS user_roles_1_id, "
"user_roles_1.row_type AS user_roles_1_row_type, "
"user_roles_1.user_id AS user_roles_1_user_id FROM things "
"LEFT OUTER JOIN (user_roles AS user_roles_1 JOIN admins "
"AS admins_1 ON user_roles_1.id = admins_1.id) ON "
"admins_1.id = things.admin_id "
"LEFT OUTER JOIN users AS "
"users_1 ON users_1.id = user_roles_1.user_id"
)
class JoinedloadOverWPolyAliased(
fixtures.DeclarativeMappedTest,
testing.AssertsCompiledSQL):
"""exercise issues in #3593 and #3611"""
run_setup_mappers = 'each'
run_setup_classes = 'each'
run_define_tables = 'each'
__dialect__ = 'default'
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Owner(Base):
__tablename__ = 'owner'
id = Column(Integer, primary_key=True)
type = Column(String(20))
__mapper_args__ = {
'polymorphic_on': type,
'with_polymorphic': ('*', None),
}
class SubOwner(Owner):
__mapper_args__ = {'polymorphic_identity': 'so'}
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
type = Column(String(20))
__mapper_args__ = {
'polymorphic_on': type,
'with_polymorphic': ('*', None),
}
class Sub1(Parent):
__mapper_args__ = {'polymorphic_identity': 's1'}
class Link(Base):
__tablename__ = 'link'
parent_id = Column(
Integer, ForeignKey('parent.id'), primary_key=True)
child_id = Column(
Integer, ForeignKey('parent.id'), primary_key=True)
def _fixture_from_base(self):
Parent = self.classes.Parent
Link = self.classes.Link
Link.child = relationship(
Parent, primaryjoin=Link.child_id == Parent.id)
Parent.links = relationship(
Link,
primaryjoin=Parent.id == Link.parent_id,
)
return Parent
def _fixture_from_subclass(self):
Sub1 = self.classes.Sub1
Link = self.classes.Link
Parent = self.classes.Parent
Link.child = relationship(
Parent, primaryjoin=Link.child_id == Parent.id)
Sub1.links = relationship(
Link,
primaryjoin=Sub1.id == Link.parent_id,
)
return Sub1
def _fixture_to_subclass_to_base(self):
Owner = self.classes.Owner
Parent = self.classes.Parent
Sub1 = self.classes.Sub1
Link = self.classes.Link
# Link -> Sub1 -> Owner
Link.child = relationship(
Sub1, primaryjoin=Link.child_id == Sub1.id)
Parent.owner_id = Column(ForeignKey('owner.id'))
Parent.owner = relationship(Owner)
return Parent
def _fixture_to_base_to_base(self):
Owner = self.classes.Owner
Parent = self.classes.Parent
Link = self.classes.Link
# Link -> Parent -> Owner
Link.child = relationship(
Parent, primaryjoin=Link.child_id == Parent.id)
Parent.owner_id = Column(ForeignKey('owner.id'))
Parent.owner = relationship(Owner)
return Parent
def test_from_base(self):
self._test_poly_single_poly(self._fixture_from_base)
def test_from_sub(self):
self._test_poly_single_poly(self._fixture_from_subclass)
def test_to_sub_to_base(self):
self._test_single_poly_poly(self._fixture_to_subclass_to_base)
def test_to_base_to_base(self):
self._test_single_poly_poly(self._fixture_to_base_to_base)
def _test_poly_single_poly(self, fn):
cls = fn()
Link = self.classes.Link
session = Session()
q = session.query(cls).options(
joinedload_all(
cls.links,
Link.child,
cls.links
)
)
if cls is self.classes.Sub1:
extra = " WHERE parent.type IN (:type_1)"
else:
extra = ""
self.assert_compile(
q,
"SELECT parent.id AS parent_id, parent.type AS parent_type, "
"link_1.parent_id AS link_1_parent_id, "
"link_1.child_id AS link_1_child_id, "
"parent_1.id AS parent_1_id, parent_1.type AS parent_1_type, "
"link_2.parent_id AS link_2_parent_id, "
"link_2.child_id AS link_2_child_id "
"FROM parent "
"LEFT OUTER JOIN link AS link_1 ON parent.id = link_1.parent_id "
"LEFT OUTER JOIN parent "
"AS parent_1 ON link_1.child_id = parent_1.id "
"LEFT OUTER JOIN link AS link_2 "
"ON parent_1.id = link_2.parent_id" + extra
)
def _test_single_poly_poly(self, fn):
parent_cls = fn()
Link = self.classes.Link
session = Session()
q = session.query(Link).options(
joinedload_all(
Link.child,
parent_cls.owner
)
)
if Link.child.property.mapper.class_ is self.classes.Sub1:
extra = "AND parent_1.type IN (:type_1) "
else:
extra = ""
self.assert_compile(
q,
"SELECT link.parent_id AS link_parent_id, "
"link.child_id AS link_child_id, parent_1.id AS parent_1_id, "
"parent_1.type AS parent_1_type, "
"parent_1.owner_id AS parent_1_owner_id, "
"owner_1.id AS owner_1_id, owner_1.type AS owner_1_type "
"FROM link LEFT OUTER JOIN parent AS parent_1 "
"ON link.child_id = parent_1.id " + extra +
"LEFT OUTER JOIN owner AS owner_1 "
"ON owner_1.id = parent_1.owner_id"
)
def test_local_wpoly(self):
Sub1 = self._fixture_from_subclass()
Parent = self.classes.Parent
Link = self.classes.Link
poly = with_polymorphic(Parent, [Sub1])
session = Session()
q = session.query(poly).options(
joinedload(poly.Sub1.links).
joinedload(Link.child.of_type(Sub1)).
joinedload(poly.Sub1.links)
)
self.assert_compile(
q,
"SELECT parent.id AS parent_id, parent.type AS parent_type, "
"link_1.parent_id AS link_1_parent_id, "
"link_1.child_id AS link_1_child_id, "
"parent_1.id AS parent_1_id, parent_1.type AS parent_1_type, "
"link_2.parent_id AS link_2_parent_id, "
"link_2.child_id AS link_2_child_id FROM parent "
"LEFT OUTER JOIN link AS link_1 ON parent.id = link_1.parent_id "
"LEFT OUTER JOIN parent AS parent_1 "
"ON link_1.child_id = parent_1.id "
"LEFT OUTER JOIN link AS link_2 ON parent_1.id = link_2.parent_id"
)
def test_local_wpoly_innerjoins(self):
# test for issue #3988
Sub1 = self._fixture_from_subclass()
Parent = self.classes.Parent
Link = self.classes.Link
poly = with_polymorphic(Parent, [Sub1])
session = Session()
q = session.query(poly).options(
joinedload(poly.Sub1.links, innerjoin=True).
joinedload(Link.child.of_type(Sub1), innerjoin=True).
joinedload(poly.Sub1.links, innerjoin=True)
)
self.assert_compile(
q,
"SELECT parent.id AS parent_id, parent.type AS parent_type, "
"link_1.parent_id AS link_1_parent_id, "
"link_1.child_id AS link_1_child_id, "
"parent_1.id AS parent_1_id, parent_1.type AS parent_1_type, "
"link_2.parent_id AS link_2_parent_id, "
"link_2.child_id AS link_2_child_id FROM parent "
"LEFT OUTER JOIN link AS link_1 ON parent.id = link_1.parent_id "
"LEFT OUTER JOIN parent AS parent_1 "
"ON link_1.child_id = parent_1.id "
"LEFT OUTER JOIN link AS link_2 ON parent_1.id = link_2.parent_id"
)
def test_local_wpoly_innerjoins_roundtrip(self):
# test for issue #3988
Sub1 = self._fixture_from_subclass()
Parent = self.classes.Parent
Link = self.classes.Link
session = Session()
session.add_all([
Parent(),
Parent()
])
# represents "Parent" and "Sub1" rows
poly = with_polymorphic(Parent, [Sub1])
# innerjoin for Sub1 only, but this needs
# to be cancelled because the Parent rows
# would be omitted
q = session.query(poly).options(
joinedload(poly.Sub1.links, innerjoin=True).
joinedload(Link.child.of_type(Sub1), innerjoin=True)
)
eq_(len(q.all()), 2)
class JoinAcrossJoinedInhMultiPath(fixtures.DeclarativeMappedTest,
testing.AssertsCompiledSQL):
"""test long join paths with a joined-inh in the middle, where we go multiple
times across the same joined-inh to the same target but with other classes
in the middle. E.g. test [ticket:2908]
"""
run_setup_mappers = 'once'
__dialect__ = 'default'
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Root(Base):
__tablename__ = 'root'
id = Column(Integer, primary_key=True)
sub1_id = Column(Integer, ForeignKey('sub1.id'))
intermediate = relationship("Intermediate")
sub1 = relationship("Sub1")
class Intermediate(Base):
__tablename__ = 'intermediate'
id = Column(Integer, primary_key=True)
sub1_id = Column(Integer, ForeignKey('sub1.id'))
root_id = Column(Integer, ForeignKey('root.id'))
sub1 = relationship("Sub1")
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
class Sub1(Parent):
__tablename__ = 'sub1'
id = Column(Integer, ForeignKey('parent.id'),
primary_key=True)
target = relationship("Target")
class Target(Base):
__tablename__ = 'target'
id = Column(Integer, primary_key=True)
sub1_id = Column(Integer, ForeignKey('sub1.id'))
def test_join(self):
Root, Intermediate, Sub1, Target = \
self.classes.Root, self.classes.Intermediate, \
self.classes.Sub1, self.classes.Target
s1_alias = aliased(Sub1)
s2_alias = aliased(Sub1)
t1_alias = aliased(Target)
t2_alias = aliased(Target)
sess = Session()
q = sess.query(Root).\
join(s1_alias, Root.sub1).join(t1_alias, s1_alias.target).\
join(Root.intermediate).join(s2_alias, Intermediate.sub1).\
join(t2_alias, s2_alias.target)
self.assert_compile(
q,
"SELECT root.id AS root_id, root.sub1_id AS root_sub1_id "
"FROM root "
"JOIN (SELECT parent.id AS parent_id, sub1.id AS sub1_id "
"FROM parent JOIN sub1 ON parent.id = sub1.id) AS anon_1 "
"ON anon_1.sub1_id = root.sub1_id "
"JOIN target AS target_1 ON anon_1.sub1_id = target_1.sub1_id "
"JOIN intermediate ON root.id = intermediate.root_id "
"JOIN (SELECT parent.id AS parent_id, sub1.id AS sub1_id "
"FROM parent JOIN sub1 ON parent.id = sub1.id) AS anon_2 "
"ON anon_2.sub1_id = intermediate.sub1_id "
"JOIN target AS target_2 ON anon_2.sub1_id = target_2.sub1_id")
def test_join_flat(self):
Root, Intermediate, Sub1, Target = \
self.classes.Root, self.classes.Intermediate, \
self.classes.Sub1, self.classes.Target
s1_alias = aliased(Sub1, flat=True)
s2_alias = aliased(Sub1, flat=True)
t1_alias = aliased(Target)
t2_alias = aliased(Target)
sess = Session()
q = sess.query(Root).\
join(s1_alias, Root.sub1).join(t1_alias, s1_alias.target).\
join(Root.intermediate).join(s2_alias, Intermediate.sub1).\
join(t2_alias, s2_alias.target)
self.assert_compile(
q,
"SELECT root.id AS root_id, root.sub1_id AS root_sub1_id "
"FROM root "
"JOIN (parent AS parent_1 JOIN sub1 AS sub1_1 "
"ON parent_1.id = sub1_1.id) "
"ON sub1_1.id = root.sub1_id "
"JOIN target AS target_1 ON sub1_1.id = target_1.sub1_id "
"JOIN intermediate ON root.id = intermediate.root_id "
"JOIN (parent AS parent_2 JOIN sub1 AS sub1_2 "
"ON parent_2.id = sub1_2.id) "
"ON sub1_2.id = intermediate.sub1_id "
"JOIN target AS target_2 ON sub1_2.id = target_2.sub1_id")
def test_joinedload(self):
Root, Intermediate, Sub1, Target = \
self.classes.Root, self.classes.Intermediate, \
self.classes.Sub1, self.classes.Target
sess = Session()
q = sess.query(Root).\
options(
joinedload(Root.sub1).joinedload(Sub1.target),
joinedload(Root.intermediate).joinedload(Intermediate.sub1).
joinedload(Sub1.target))
self.assert_compile(
q,
"SELECT root.id AS root_id, root.sub1_id AS root_sub1_id, "
"target_1.id AS target_1_id, "
"target_1.sub1_id AS target_1_sub1_id, "
"sub1_1.id AS sub1_1_id, parent_1.id AS parent_1_id, "
"intermediate_1.id AS intermediate_1_id, "
"intermediate_1.sub1_id AS intermediate_1_sub1_id, "
"intermediate_1.root_id AS intermediate_1_root_id, "
"target_2.id AS target_2_id, "
"target_2.sub1_id AS target_2_sub1_id, "
"sub1_2.id AS sub1_2_id, parent_2.id AS parent_2_id "
"FROM root "
"LEFT OUTER JOIN intermediate AS intermediate_1 "
"ON root.id = intermediate_1.root_id "
"LEFT OUTER JOIN (parent AS parent_1 JOIN sub1 AS sub1_1 "
"ON parent_1.id = sub1_1.id) "
"ON sub1_1.id = intermediate_1.sub1_id "
"LEFT OUTER JOIN target AS target_1 "
"ON sub1_1.id = target_1.sub1_id "
"LEFT OUTER JOIN (parent AS parent_2 JOIN sub1 AS sub1_2 "
"ON parent_2.id = sub1_2.id) ON sub1_2.id = root.sub1_id "
"LEFT OUTER JOIN target AS target_2 "
"ON sub1_2.id = target_2.sub1_id")
class MultipleAdaptUsesEntityOverTableTest(
AssertsCompiledSQL, fixtures.MappedTest):
__dialect__ = 'default'
run_create_tables = None
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('a', metadata,
Column('id', Integer, primary_key=True),
Column('name', String))
Table('b', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True))
Table('c', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True),
Column('bid', Integer, ForeignKey('b.id')))
Table('d', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True),
Column('cid', Integer, ForeignKey('c.id')))
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(A):
pass
class C(A):
pass
class D(A):
pass
@classmethod
def setup_mappers(cls):
A, B, C, D = cls.classes.A, cls.classes.B, cls.classes.C, cls.classes.D
a, b, c, d = cls.tables.a, cls.tables.b, cls.tables.c, cls.tables.d
mapper(A, a)
mapper(B, b, inherits=A)
mapper(C, c, inherits=A)
mapper(D, d, inherits=A)
def _two_join_fixture(self):
A, B, C, D = (self.classes.A, self.classes.B, self.classes.C,
self.classes.D)
s = Session()
return s.query(B.name, C.name, D.name).select_from(B).\
join(C, C.bid == B.id).\
join(D, D.cid == C.id)
def test_two_joins_adaption(self):
a, b, c, d = self.tables.a, self.tables.b, self.tables.c, self.tables.d
q = self._two_join_fixture()
btoc = q._from_obj[0].left
ac_adapted = btoc.right.element.left
c_adapted = btoc.right.element.right
is_(ac_adapted.element, a)
is_(c_adapted.element, c)
ctod = q._from_obj[0].right
ad_adapted = ctod.left
d_adapted = ctod.right
is_(ad_adapted.element, a)
is_(d_adapted.element, d)
bname, cname, dname = q._entities
b_name_adapted = q._adapt_clause(bname.column, False, True)
c_name_adapted = q._adapt_clause(cname.column, False, True)
d_name_adapted = q._adapt_clause(dname.column, False, True)
assert bool(b_name_adapted == a.c.name)
assert bool(c_name_adapted == ac_adapted.c.name)
assert bool(d_name_adapted == ad_adapted.c.name)
def test_two_joins_sql(self):
q = self._two_join_fixture()
self.assert_compile(
q,
"SELECT a.name AS a_name, a_1.name AS a_1_name, "
"a_2.name AS a_2_name "
"FROM a JOIN b ON a.id = b.id JOIN "
"(a AS a_1 JOIN c AS c_1 ON a_1.id = c_1.id) ON c_1.bid = b.id "
"JOIN (a AS a_2 JOIN d AS d_1 ON a_2.id = d_1.id) "
"ON d_1.cid = c_1.id")
class SameNameOnJoined(fixtures.MappedTest):
run_setup_mappers = 'once'
run_inserts = None
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
'a', metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('t', String(5))
)
Table(
'a_sub', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True)
)
Table(
'b', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('a_id', Integer, ForeignKey('a.id'))
)
@classmethod
def setup_mappers(cls):
class A(cls.Comparable):
pass
class ASub(A):
pass
class B(cls.Comparable):
pass
mapper(
A, cls.tables.a, polymorphic_on=cls.tables.a.c.t,
polymorphic_identity='a',
properties={
'bs': relationship(B, cascade="all, delete-orphan")
}
)
mapper(
ASub, cls.tables.a_sub, inherits=A,
polymorphic_identity='asub', properties={
'bs': relationship(B, cascade="all, delete-orphan")
}
)
mapper(B, cls.tables.b)
def test_persist(self):
A, ASub, B = self.classes('A', 'ASub', 'B')
s = Session(testing.db)
s.add_all([
A(bs=[B(), B(), B()]),
ASub(bs=[B(), B(), B()])
])
s.commit()
eq_(s.query(B).count(), 6)
for a in s.query(A):
eq_(len(a.bs), 3)
s.delete(a)
s.commit()
eq_(s.query(B).count(), 0)
class BetweenSubclassJoinWExtraJoinedLoad(
fixtures.DeclarativeMappedTest,
testing.AssertsCompiledSQL):
"""test for [ticket:3884]"""
run_define_tables = None
__dialect__ = 'default'
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Manager(Person):
__tablename__ = 'managers'
__mapper_args__ = {'polymorphic_identity': 'manager'}
id = Column(Integer, ForeignKey('people.id'), primary_key=True)
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
id = Column(Integer, ForeignKey('people.id'), primary_key=True)
primary_language = Column(String(50))
manager_id = Column(Integer, ForeignKey('managers.id'))
manager = relationship(
Manager, primaryjoin=(Manager.id == manager_id))
class LastSeen(Base):
__tablename__ = 'seen'
id = Column(Integer, ForeignKey('people.id'), primary_key=True)
timestamp = Column(Integer)
taggable = relationship(
Person, primaryjoin=(Person.id == id),
backref=backref("last_seen", lazy=False))
def test_query(self):
Engineer, Manager = self.classes("Engineer", "Manager")
sess = Session()
# eager join is both from Enginer->LastSeen as well as
# Manager->LastSeen. In the case of Manager->LastSeen,
# Manager is internally aliased, and comes to JoinedEagerLoader
# with no "parent" entity but an adapter.
q = sess.query(Engineer, Manager).join(Engineer.manager)
self.assert_compile(
q,
"SELECT people.type AS people_type, engineers.id AS engineers_id, "
"people.id AS people_id, "
"engineers.primary_language AS engineers_primary_language, "
"engineers.manager_id AS engineers_manager_id, "
"people_1.type AS people_1_type, managers_1.id AS managers_1_id, "
"people_1.id AS people_1_id, seen_1.id AS seen_1_id, "
"seen_1.timestamp AS seen_1_timestamp, seen_2.id AS seen_2_id, "
"seen_2.timestamp AS seen_2_timestamp "
"FROM people JOIN engineers ON people.id = engineers.id "
"JOIN (people AS people_1 JOIN managers AS managers_1 "
"ON people_1.id = managers_1.id) "
"ON managers_1.id = engineers.manager_id LEFT OUTER JOIN "
"seen AS seen_1 ON people.id = seen_1.id LEFT OUTER JOIN "
"seen AS seen_2 ON people_1.id = seen_2.id"
)
| 33.30468 | 81 | 0.550545 |
606f15f45f5209b5298440efecef97d0e3aec73c | 2,000 | py | Python | source/callback/infer_display_style_transfer.py | douglasresende/lambda-deep-learning-demo | ebbbd63c0abf87a1a4155b17cef145039b7a1ef7 | [
"Apache-2.0"
] | 80 | 2018-10-27T15:18:03.000Z | 2022-01-29T16:49:36.000Z | source/callback/infer_display_style_transfer.py | douglasresende/lambda-deep-learning-demo | ebbbd63c0abf87a1a4155b17cef145039b7a1ef7 | [
"Apache-2.0"
] | 4 | 2019-02-11T20:28:35.000Z | 2019-10-03T22:58:10.000Z | source/callback/infer_display_style_transfer.py | douglasresende/lambda-deep-learning-demo | ebbbd63c0abf87a1a4155b17cef145039b7a1ef7 | [
"Apache-2.0"
] | 25 | 2018-11-15T23:34:31.000Z | 2020-09-22T16:21:20.000Z | """
Copyright 2018 Lambda Labs. All Rights Reserved.
Licensed under
==========================================================================
"""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from .callback import Callback
class InferDisplayStyleTransfer(Callback):
def __init__(self, config):
super(InferDisplayStyleTransfer, self).__init__(config)
def render_label(self, label, num_classes, label_colors):
label = label.astype(int)
r = np.zeros((label.shape[0], label.shape[1]), dtype=np.uint8)
g = np.zeros((label.shape[0], label.shape[1]), dtype=np.uint8)
b = np.zeros((label.shape[0], label.shape[1]), dtype=np.uint8)
for i_color in range(0, num_classes):
r[label == i_color] = label_colors[i_color, 0]
g[label == i_color] = label_colors[i_color, 1]
b[label == i_color] = label_colors[i_color, 2]
rgb = np.zeros((label.shape[0], label.shape[1], 3), dtype=np.uint8)
rgb[:, :, 0] = r
rgb[:, :, 1] = g
rgb[:, :, 2] = b
return rgb
def before_run(self, sess):
self.graph = tf.get_default_graph()
self.RGB_MEAN = [123.68, 116.78, 103.94]
def after_step(self, sess, outputs_dict, feed_dict=None):
for input_image, output_image in zip(
outputs_dict["input"], outputs_dict["output"]):
input_image = input_image + self.RGB_MEAN
input_image = np.clip(input_image, 0, 255)
input_image = input_image.astype(np.uint8)
transformed_image = np.clip(output_image, 0, 255)
transformed_image = transformed_image.astype(np.uint8)
w = min(input_image.shape[1], transformed_image.shape[1])
h = min(input_image.shape[0], transformed_image.shape[0])
display_image = np.concatenate(
(input_image[0:h, 0:w, :], transformed_image[0:h, 0:w, :]), axis=1)
plt.figure()
plt.axis('off')
plt.imshow(display_image)
plt.show()
def build(config):
return InferDisplayStyleTransfer(config)
| 30.30303 | 77 | 0.633 |
d803890d100279e567be001d6e6d6ea40c28ac30 | 11,979 | py | Python | fastai/basic_data.py | falschparker82/fastai | 371fdbf1f2cb6ffa276237735c205d66c078503a | [
"Apache-2.0"
] | null | null | null | fastai/basic_data.py | falschparker82/fastai | 371fdbf1f2cb6ffa276237735c205d66c078503a | [
"Apache-2.0"
] | null | null | null | fastai/basic_data.py | falschparker82/fastai | 371fdbf1f2cb6ffa276237735c205d66c078503a | [
"Apache-2.0"
] | null | null | null | "`fastai.data` loads and manages datasets with `DataBunch`"
from .torch_core import *
from torch.utils.data.dataloader import default_collate
DatasetType = Enum('DatasetType', 'Train Valid Test Single Fix')
__all__ = ['DataBunch', 'DeviceDataLoader', 'DatasetType']
old_dl_init = torch.utils.data.DataLoader.__init__
def intercept_args(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,
num_workers=0, collate_fn=default_collate, pin_memory=False, drop_last=False,
timeout=0, worker_init_fn=None):
self.init_kwargs = {'batch_size':batch_size, 'shuffle':shuffle, 'sampler':sampler, 'batch_sampler':batch_sampler,
'num_workers':num_workers, 'collate_fn':collate_fn, 'pin_memory':pin_memory,
'drop_last': drop_last, 'timeout':timeout, 'worker_init_fn':worker_init_fn}
old_dl_init(self, dataset, **self.init_kwargs)
torch.utils.data.DataLoader.__init__ = intercept_args
def DataLoader___getattr__(dl, k:str)->Any: return getattr(dl.dataset, k)
DataLoader.__getattr__ = DataLoader___getattr__
def DataLoader___setstate__(dl, data:Any): dl.__dict__.update(data)
DataLoader.__setstate__ = DataLoader___setstate__
@dataclass
class DeviceDataLoader():
"Bind a `DataLoader` to a `torch.device`."
dl: DataLoader
device: torch.device
tfms: List[Callable]=None
collate_fn: Callable=data_collate
def __post_init__(self):
self.dl.collate_fn=self.collate_fn
self.tfms = listify(self.tfms)
def __len__(self)->int: return len(self.dl)
def __getattr__(self,k:str)->Any: return getattr(self.dl, k)
def __setstate__(self,data:Any): self.__dict__.update(data)
@property
def batch_size(self): return self.dl.batch_size
@batch_size.setter
def batch_size(self,v):
new_kwargs = {**self.dl.init_kwargs, 'batch_size':v, 'collate_fn':self.collate_fn}
self.dl = self.dl.__class__(self.dl.dataset, **new_kwargs)
if hasattr(self.dl.dataset, 'bs'): self.dl.dataset.bs = v
@property
def num_workers(self): return self.dl.num_workers
@num_workers.setter
def num_workers(self,v): self.dl.num_workers = v
def add_tfm(self,tfm:Callable)->None:
"Add `tfm` to `self.tfms`."
self.tfms.append(tfm)
def remove_tfm(self,tfm:Callable)->None:
"Remove `tfm` from `self.tfms`."
if tfm in self.tfms: self.tfms.remove(tfm)
def new(self, **kwargs):
"Create a new copy of `self` with `kwargs` replacing current values."
new_kwargs = {**self.dl.init_kwargs, **kwargs}
return DeviceDataLoader(self.dl.__class__(self.dl.dataset, **new_kwargs), self.device, self.tfms,
self.collate_fn)
def proc_batch(self,b:Tensor)->Tensor:
"Process batch `b` of `TensorImage`."
b = to_device(b, self.device)
for f in listify(self.tfms): b = f(b)
return b
def __iter__(self):
"Process and returns items from `DataLoader`."
for b in self.dl: yield self.proc_batch(b)
@classmethod
def create(cls, dataset:Dataset, bs:int=64, shuffle:bool=False, device:torch.device=defaults.device,
tfms:Collection[Callable]=tfms, num_workers:int=defaults.cpus, collate_fn:Callable=data_collate, **kwargs:Any):
"Create DeviceDataLoader from `dataset` with `bs` and `shuffle`: process using `num_workers`."
return cls(DataLoader(dataset, batch_size=bs, shuffle=shuffle, num_workers=num_workers, **kwargs),
device=device, tfms=tfms, collate_fn=collate_fn)
class DataBunch():
"Bind `train_dl`,`valid_dl` and `test_dl` in a a data object."
def __init__(self, train_dl:DataLoader, valid_dl:DataLoader, fix_dl:DataLoader=None, test_dl:Optional[DataLoader]=None,
device:torch.device=None, dl_tfms:Optional[Collection[Callable]]=None, path:PathOrStr='.',
collate_fn:Callable=data_collate, no_check:bool=False):
self.dl_tfms = listify(dl_tfms)
self.device = defaults.device if device is None else device
assert not isinstance(train_dl,DeviceDataLoader)
def _create_dl(dl, **kwargs):
if dl is None: return None
return DeviceDataLoader(dl, self.device, self.dl_tfms, collate_fn, **kwargs)
self.train_dl,self.valid_dl,self.fix_dl,self.test_dl = map(_create_dl, [train_dl,valid_dl,fix_dl,test_dl])
if fix_dl is None: self.fix_dl = self.train_dl.new(shuffle=False, drop_last=False)
self.single_dl = _create_dl(DataLoader(valid_dl.dataset, batch_size=1, num_workers=0))
self.path = Path(path)
if not no_check: self.sanity_check()
def __repr__(self)->str:
return f'{self.__class__.__name__};\n\nTrain: {self.train_ds};\n\nValid: {self.valid_ds};\n\nTest: {self.test_ds}'
@staticmethod
def _init_ds(train_ds:Dataset, valid_ds:Dataset, test_ds:Optional[Dataset]=None):
# train_ds, but without training tfms
fix_ds = valid_ds.new(train_ds.x, train_ds.y) if hasattr(valid_ds,'new') else train_ds
return [o for o in (train_ds,valid_ds,fix_ds,test_ds) if o is not None]
@classmethod
def create(cls, train_ds:Dataset, valid_ds:Dataset, test_ds:Optional[Dataset]=None, path:PathOrStr='.', bs:int=64,
val_bs:int=None, num_workers:int=defaults.cpus, dl_tfms:Optional[Collection[Callable]]=None,
device:torch.device=None, collate_fn:Callable=data_collate, no_check:bool=False)->'DataBunch':
"Create a `DataBunch` from `train_ds`, `valid_ds` and maybe `test_ds` with a batch size of `bs`."
datasets = cls._init_ds(train_ds, valid_ds, test_ds)
val_bs = ifnone(val_bs, bs)
dls = [DataLoader(d, b, shuffle=s, drop_last=s, num_workers=num_workers) for d,b,s in
zip(datasets, (bs,val_bs,val_bs,val_bs), (True,False,False,False)) if d is not None]
return cls(*dls, path=path, device=device, dl_tfms=dl_tfms, collate_fn=collate_fn, no_check=no_check)
def __getattr__(self,k:int)->Any: return getattr(self.train_dl, k)
def __setstate__(self,data:Any): self.__dict__.update(data)
def dl(self, ds_type:DatasetType=DatasetType.Valid)->DeviceDataLoader:
"Returns appropriate `Dataset` for validation, training, or test (`ds_type`)."
#TODO: refactor
return (self.train_dl if ds_type == DatasetType.Train else
self.test_dl if ds_type == DatasetType.Test else
self.valid_dl if ds_type == DatasetType.Valid else
self.single_dl if ds_type == DatasetType.Single else
self.fix_dl)
@property
def dls(self):
res = [self.train_dl, self.valid_dl, self.fix_dl, self.single_dl]
return res if not self.test_dl else res + [self.test_dl]
def add_tfm(self,tfm:Callable)->None:
for dl in self.dls: dl.add_tfm(tfm)
def one_batch(self, ds_type:DatasetType=DatasetType.Train, detach:bool=True, denorm:bool=True, cpu:bool=True)->Collection[Tensor]:
"Get one batch from the data loader of `ds_type`. Optionally `detach` and `denorm`."
dl = self.dl(ds_type)
w = self.num_workers
self.num_workers = 0
try: x,y = next(iter(dl))
finally: self.num_workers = w
if detach: x,y = to_detach(x,cpu=cpu),to_detach(y,cpu=cpu)
norm = getattr(self,'norm',False)
if denorm and norm:
x = self.denorm(x)
if norm.keywords.get('do_y',False): y = self.denorm(y, do_x=True)
return x,y
def one_item(self, item, detach:bool=False, denorm:bool=False, cpu:bool=False):
"Get `item` into a batch. Optionally `detach` and `denorm`."
ds = self.single_ds
with ds.set_item(item):
return self.one_batch(ds_type=DatasetType.Single, detach=detach, denorm=denorm, cpu=cpu)
def show_batch(self, rows:int=5, ds_type:DatasetType=DatasetType.Train, **kwargs)->None:
"Show a batch of data in `ds_type` on a few `rows`."
x,y = self.one_batch(ds_type, True, True)
n_items = rows **2 if self.train_ds.x._square_show else rows
if self.dl(ds_type).batch_size < n_items: n_items = self.dl(ds_type).batch_size
xs = [self.train_ds.x.reconstruct(grab_idx(x, i)) for i in range(n_items)]
#TODO: get rid of has_arg if possible
if has_arg(self.train_ds.y.reconstruct, 'x'):
ys = [self.train_ds.y.reconstruct(grab_idx(y, i), x=x) for i,x in enumerate(xs)]
else : ys = [self.train_ds.y.reconstruct(grab_idx(y, i)) for i in range(n_items)]
self.train_ds.x.show_xys(xs, ys, **kwargs)
def export(self, fname:str='export.pkl'):
"Export the minimal state of `self` for inference in `self.path/fname`."
xtra = dict(normalize=self.norm.keywords) if getattr(self, 'norm', False) else {}
self.valid_ds.export(self.path/fname, **xtra)
def _grab_dataset(self, dl:DataLoader):
ds = dl.dl.dataset
while hasattr(ds, 'dataset'): ds = ds.dataset
return ds
@property
def train_ds(self)->Dataset: return self._grab_dataset(self.train_dl)
@property
def valid_ds(self)->Dataset: return self._grab_dataset(self.valid_dl)
@property
def single_ds(self)->Dataset: return self._grab_dataset(self.single_dl)
@property
def loss_func(self)->Dataset: return getattr(self.train_ds, 'loss_func', F.nll_loss)
@property
def test_ds(self)->Dataset:
return self._grab_dataset(self.test_dl) if self.test_dl is not None else None
@property
def empty_val(self)->bool:
if not hasattr(self, 'valid_dl') or self.valid_dl is None: return True
if hasattr(self.valid_ds, 'items') and len(self.valid_ds.items) == 0: return True
return (len(self.valid_ds) == 0)
@property
def batch_size(self): return self.train_dl.batch_size
@batch_size.setter
def batch_size(self,v):
self.train_dl.batch_size,self.valid_dl.batch_size = v,v
if self.test_dl is not None: self.test_dl.batch_size = v
@property
def classes(self): return self.train_ds.y.classes
def sanity_check(self):
"Check the underlying data in the training set can be properly loaded."
final_message = "You can deactivate this warning by passing `no_check=True`."
if not hasattr(self.train_ds, 'items') or len(self.train_ds.items) == 0 or not hasattr(self.train_dl, 'batch_sampler'): return
if len(self.train_dl) == 0:
warn(f"""Your training dataloader is empty, you have only {len(self.train_dl.dataset)} items in your training set.
Your batch size is {self.train_dl.batch_size}, you should lower it.""")
print(final_message)
return
idx = next(iter(self.train_dl.batch_sampler))
samples,fails = [],[]
for i in idx:
try: samples.append(self.train_dl.dataset[i])
except: fails.append(i)
if len(fails) > 0:
if len(fails) == len(idx):
warn_msg = "There seems to be something wrong with your dataset, can't access any element of self.train_ds.\n"
warn_msg += f"Tried: {show_some(idx)}"
else:
warn_msg = "There seems to be something wrong with your dataset, can't access these elements "
warn_msg += f"in self.train_ds: {show_some(fails)}"
warn(warn_msg)
print(final_message)
return
try: batch = self.collate_fn(samples)
except:
message = "It's not possible to collate samples of your dataset together in a batch."
try:
shapes = [[o[i].data.shape for o in samples] for i in range(2)]
message += f'\nShapes of the inputs/targets:\n{shapes}'
except: pass
warn(message)
print(final_message)
| 48.695122 | 134 | 0.66074 |
348bb2c90274179b057b379fe4dc229eb7c46a9f | 112,938 | py | Python | salt/modules/yumpkg.py | StackKorora/salt | 7bff39ed0ef4bded310b535286cf87a3492f23a2 | [
"Apache-2.0"
] | null | null | null | salt/modules/yumpkg.py | StackKorora/salt | 7bff39ed0ef4bded310b535286cf87a3492f23a2 | [
"Apache-2.0"
] | null | null | null | salt/modules/yumpkg.py | StackKorora/salt | 7bff39ed0ef4bded310b535286cf87a3492f23a2 | [
"Apache-2.0"
] | null | null | null | """
Support for YUM/DNF
.. important::
If you feel that Salt should be using this module to manage packages on a
minion, and it is using a different module (or gives an error similar to
*'pkg.install' is not available*), see :ref:`here
<module-provider-override>`.
.. note::
DNF is fully supported as of version 2015.5.10 and 2015.8.4 (partial
support for DNF was initially added in 2015.8.0), and DNF is used
automatically in place of YUM in Fedora 22 and newer.
.. versionadded:: 3003
Support for ``tdnf`` on Photon OS.
"""
import configparser
import contextlib
import datetime
import fnmatch
import itertools
import logging
import os
import re
import string
import salt.utils.args
import salt.utils.data
import salt.utils.decorators.path
import salt.utils.environment
import salt.utils.files
import salt.utils.functools
import salt.utils.itertools
import salt.utils.lazy
import salt.utils.path
import salt.utils.pkg
import salt.utils.pkg.rpm
import salt.utils.systemd
import salt.utils.versions
from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
try:
import yum
HAS_YUM = True
except ImportError:
HAS_YUM = False
log = logging.getLogger(__name__)
__HOLD_PATTERN = r"[\w+]+(?:[.-][^-]+)*"
PKG_ARCH_SEPARATOR = "."
# Define the module's virtual name
__virtualname__ = "pkg"
def __virtual__():
"""
Confine this module to yum based systems
"""
if __opts__.get("yum_provider") == "yumpkg_api":
return (False, "Module yumpkg: yumpkg_api provider not available")
try:
os_grain = __grains__["os"].lower()
os_family = __grains__["os_family"].lower()
except Exception: # pylint: disable=broad-except
return (False, "Module yumpkg: no yum based system detected")
enabled = ("amazon", "xcp", "xenserver", "virtuozzolinux", "virtuozzo")
if os_family == "redhat" or os_grain in enabled:
if _yum() is None:
return (False, "DNF nor YUM found")
return __virtualname__
return (False, "Module yumpkg: no yum based system detected")
def _strip_headers(output, *args):
if not args:
args_lc = (
"installed packages",
"available packages",
"available upgrades",
"updated packages",
"upgraded packages",
)
else:
args_lc = [x.lower() for x in args]
ret = ""
for line in salt.utils.itertools.split(output, "\n"):
if line.lower() not in args_lc:
ret += line + "\n"
return ret
def _get_copr_repo(copr):
copr = copr.split(":", 1)[1]
copr = copr.split("/", 1)
return "copr:copr.fedorainfracloud.org:{}:{}".format(copr[0], copr[1])
def _get_hold(line, pattern=__HOLD_PATTERN, full=True):
"""
Resolve a package name from a line containing the hold expression. If the
regex is not matched, None is returned.
yum ==> 2:vim-enhanced-7.4.629-5.el6.*
dnf ==> vim-enhanced-2:7.4.827-1.fc22.*
"""
if full:
if _yum() == "dnf":
lock_re = r"({}-\S+)".format(pattern)
else:
lock_re = r"(\d+:{}-\S+)".format(pattern)
else:
if _yum() == "dnf":
lock_re = r"({}-\S+)".format(pattern)
else:
lock_re = r"\d+:({}-\S+)".format(pattern)
match = re.search(lock_re, line)
if match:
if not full:
woarch = match.group(1).rsplit(".", 1)[0]
worel = woarch.rsplit("-", 1)[0]
return worel.rsplit("-", 1)[0]
else:
return match.group(1)
return None
def _yum():
"""
Determine package manager name (yum or dnf),
depending on the executable existence in $PATH.
"""
# Do import due to function clonning to kernelpkg_linux_yum mod
import os
def _check(file):
return (
os.path.exists(file)
and os.access(file, os.F_OK | os.X_OK)
and not os.path.isdir(file)
)
# allow calling function outside execution module
try:
context = __context__
except NameError:
context = {}
contextkey = "yum_bin"
if contextkey not in context:
for dir in os.environ.get("PATH", os.defpath).split(os.pathsep):
if _check(os.path.join(dir, "dnf")):
context[contextkey] = "dnf"
break
elif _check(os.path.join(dir, "yum")):
context[contextkey] = "yum"
break
elif _check(os.path.join(dir, "tdnf")):
context[contextkey] = "tdnf"
break
return context.get(contextkey)
def _call_yum(args, **kwargs):
"""
Call yum/dnf.
"""
params = {
"output_loglevel": "trace",
"python_shell": False,
"env": salt.utils.environment.get_module_environment(globals()),
}
params.update(kwargs)
cmd = []
if salt.utils.systemd.has_scope(__context__) and __salt__["config.get"](
"systemd.scope", True
):
cmd.extend(["systemd-run", "--scope"])
cmd.append(_yum())
cmd.extend(args)
return __salt__["cmd.run_all"](cmd, **params)
def _yum_pkginfo(output):
"""
Parse yum/dnf output (which could contain irregular line breaks if package
names are long) retrieving the name, version, etc., and return a list of
pkginfo namedtuples.
"""
cur = {}
keys = itertools.cycle(("name", "version", "repoid"))
values = salt.utils.itertools.split(_strip_headers(output))
osarch = __grains__["osarch"]
for (key, value) in zip(keys, values):
if key == "name":
try:
cur["name"], cur["arch"] = value.rsplit(".", 1)
except ValueError:
cur["name"] = value
cur["arch"] = osarch
cur["name"] = salt.utils.pkg.rpm.resolve_name(
cur["name"], cur["arch"], osarch
)
else:
if key == "version":
# Suppport packages with no 'Release' parameter
value = value.rstrip("-")
elif key == "repoid":
# Installed packages show a '@' at the beginning
value = value.lstrip("@")
cur[key] = value
if key == "repoid":
# We're done with this package, create the pkginfo namedtuple
pkginfo = salt.utils.pkg.rpm.pkginfo(**cur)
# Clear the dict for the next package
cur = {}
# Yield the namedtuple
if pkginfo is not None:
yield pkginfo
def _versionlock_pkg(grains=None):
"""
Determine versionlock plugin package name
"""
if grains is None:
grains = __grains__
if _yum() == "dnf":
if grains["os"].lower() == "fedora":
return (
"python3-dnf-plugin-versionlock"
if int(grains.get("osrelease")) >= 26
else "python3-dnf-plugins-extras-versionlock"
)
if int(grains.get("osmajorrelease")) >= 8:
return "python3-dnf-plugin-versionlock"
return "python2-dnf-plugin-versionlock"
elif _yum() == "tdnf":
raise SaltInvocationError("Cannot proceed, no versionlock for tdnf")
else:
return (
"yum-versionlock"
if int(grains.get("osmajorrelease")) == 5
else "yum-plugin-versionlock"
)
def _check_versionlock():
"""
Ensure that the appropriate versionlock plugin is present
"""
vl_plugin = _versionlock_pkg()
if vl_plugin not in list_pkgs():
raise SaltInvocationError(
"Cannot proceed, {} is not installed.".format(vl_plugin)
)
def _get_options(**kwargs):
"""
Returns a list of options to be used in the yum/dnf command, based on the
kwargs passed.
"""
# Get repo options from the kwargs
fromrepo = kwargs.pop("fromrepo", "")
repo = kwargs.pop("repo", "")
disablerepo = kwargs.pop("disablerepo", "")
enablerepo = kwargs.pop("enablerepo", "")
disableexcludes = kwargs.pop("disableexcludes", "")
branch = kwargs.pop("branch", "")
setopt = kwargs.pop("setopt", None)
if setopt is None:
setopt = []
else:
setopt = salt.utils.args.split_input(setopt)
get_extra_options = kwargs.pop("get_extra_options", False)
# Support old 'repo' argument
if repo and not fromrepo:
fromrepo = repo
ret = []
if fromrepo:
log.info("Restricting to repo '%s'", fromrepo)
ret.extend(["--disablerepo=*", "--enablerepo={}".format(fromrepo)])
else:
if disablerepo:
targets = (
[disablerepo] if not isinstance(disablerepo, list) else disablerepo
)
log.info("Disabling repo(s): %s", ", ".join(targets))
ret.extend(["--disablerepo={}".format(x) for x in targets])
if enablerepo:
targets = [enablerepo] if not isinstance(enablerepo, list) else enablerepo
log.info("Enabling repo(s): %s", ", ".join(targets))
ret.extend(["--enablerepo={}".format(x) for x in targets])
if disableexcludes:
log.info("Disabling excludes for '%s'", disableexcludes)
ret.append("--disableexcludes={}".format(disableexcludes))
if branch:
log.info("Adding branch '%s'", branch)
ret.append("--branch={}".format(branch))
for item in setopt:
ret.extend(["--setopt", str(item)])
if get_extra_options:
# sorting here to make order uniform, makes unit testing more reliable
for key in sorted(kwargs):
if key.startswith("__"):
continue
value = kwargs[key]
if isinstance(value, str):
log.info("Found extra option --%s=%s", key, value)
ret.append("--{}={}".format(key, value))
elif value is True:
log.info("Found extra option --%s", key)
ret.append("--{}".format(key))
if ret:
log.info("Adding extra options: %s", ret)
return ret
def _get_yum_config():
"""
Returns a dict representing the yum config options and values.
We try to pull all of the yum config options into a standard dict object.
This is currently only used to get the reposdir settings, but could be used
for other things if needed.
If the yum python library is available, use that, which will give us all of
the options, including all of the defaults not specified in the yum config.
Additionally, they will all be of the correct object type.
If the yum library is not available, we try to read the yum.conf
directly ourselves with a minimal set of "defaults".
"""
# in case of any non-fatal failures, these defaults will be used
conf = {
"reposdir": ["/etc/yum/repos.d", "/etc/yum.repos.d"],
}
if HAS_YUM:
try:
yb = yum.YumBase()
yb.preconf.init_plugins = False
for name, value in yb.conf.items():
conf[name] = value
except (AttributeError, yum.Errors.ConfigError) as exc:
raise CommandExecutionError("Could not query yum config: {}".format(exc))
except yum.Errors.YumBaseError as yum_base_error:
raise CommandExecutionError(
"Error accessing yum or rpmdb: {}".format(yum_base_error)
)
else:
# fall back to parsing the config ourselves
# Look for the config the same order yum does
fn = None
paths = (
"/etc/yum/yum.conf",
"/etc/yum.conf",
"/etc/dnf/dnf.conf",
"/etc/tdnf/tdnf.conf",
)
for path in paths:
if os.path.exists(path):
fn = path
break
if not fn:
raise CommandExecutionError(
"No suitable yum config file found in: {}".format(paths)
)
cp = configparser.ConfigParser()
try:
cp.read(fn)
except OSError as exc:
raise CommandExecutionError("Unable to read from {}: {}".format(fn, exc))
if cp.has_section("main"):
for opt in cp.options("main"):
if opt in ("reposdir", "commands", "excludes"):
# these options are expected to be lists
conf[opt] = [x.strip() for x in cp.get("main", opt).split(",")]
else:
conf[opt] = cp.get("main", opt)
else:
log.warning(
"Could not find [main] section in %s, using internal defaults", fn
)
return conf
def _get_yum_config_value(name):
"""
Look for a specific config variable and return its value
"""
conf = _get_yum_config()
if name in conf.keys():
return conf.get(name)
return None
def _normalize_basedir(basedir=None):
"""
Takes a basedir argument as a string or a list. If the string or list is
empty, then look up the default from the 'reposdir' option in the yum
config.
Returns a list of directories.
"""
# if we are passed a string (for backward compatibility), convert to a list
if isinstance(basedir, str):
basedir = [x.strip() for x in basedir.split(",")]
if basedir is None:
basedir = []
# nothing specified, so use the reposdir option as the default
if not basedir:
basedir = _get_yum_config_value("reposdir")
if not isinstance(basedir, list) or not basedir:
raise SaltInvocationError("Could not determine any repo directories")
return basedir
def normalize_name(name):
"""
Strips the architecture from the specified package name, if necessary.
Circumstances where this would be done include:
* If the arch is 32 bit and the package name ends in a 32-bit arch.
* If the arch matches the OS arch, or is ``noarch``.
CLI Example:
.. code-block:: bash
salt '*' pkg.normalize_name zsh.x86_64
"""
try:
arch = name.rsplit(PKG_ARCH_SEPARATOR, 1)[-1]
if arch not in salt.utils.pkg.rpm.ARCHES + ("noarch",):
return name
except ValueError:
return name
if arch in (__grains__["osarch"], "noarch") or salt.utils.pkg.rpm.check_32(
arch, osarch=__grains__["osarch"]
):
return name[: -(len(arch) + 1)]
return name
def parse_arch(name):
"""
Parse name and architecture from the specified package name.
CLI Example:
.. code-block:: bash
salt '*' pkg.parse_arch zsh.x86_64
"""
_name, _arch = None, None
try:
_name, _arch = name.rsplit(PKG_ARCH_SEPARATOR, 1)
except ValueError:
pass
if _arch not in salt.utils.pkg.rpm.ARCHES + ("noarch",):
_name = name
_arch = None
return {"name": _name, "arch": _arch}
def latest_version(*names, **kwargs):
"""
Return the latest version of the named package available for upgrade or
installation. If more than one package name is specified, a dict of
name/version pairs is returned.
If the latest version of a given package is already installed, an empty
string will be returned for that package.
A specific repo can be requested using the ``fromrepo`` keyword argument,
and the ``disableexcludes`` option is also supported.
.. versionadded:: 2014.7.0
Support for the ``disableexcludes`` option
CLI Example:
.. code-block:: bash
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package name> fromrepo=epel-testing
salt '*' pkg.latest_version <package name> disableexcludes=main
salt '*' pkg.latest_version <package1> <package2> <package3> ...
"""
refresh = salt.utils.data.is_true(kwargs.pop("refresh", True))
if len(names) == 0:
return ""
options = _get_options(**kwargs)
# Refresh before looking for the latest version available
if refresh:
refresh_db(**kwargs)
cur_pkgs = list_pkgs(versions_as_list=True)
# Get available versions for specified package(s)
cmd = ["--quiet"]
cmd.extend(options)
cmd.extend(["list", "available"])
cmd.extend(names)
out = _call_yum(cmd, ignore_retcode=True)
if out["retcode"] != 0:
if out["stderr"]:
# Check first if this is just a matter of the packages being
# up-to-date.
if not all([x in cur_pkgs for x in names]):
log.error(
"Problem encountered getting latest version for the "
"following package(s): %s. Stderr follows: \n%s",
", ".join(names),
out["stderr"],
)
updates = []
else:
# Sort by version number (highest to lowest) for loop below
updates = sorted(
_yum_pkginfo(out["stdout"]),
key=lambda pkginfo: _LooseVersion(pkginfo.version),
reverse=True,
)
def _check_cur(pkg):
if pkg.name in cur_pkgs:
for installed_version in cur_pkgs[pkg.name]:
# If any installed version is greater than (or equal to) the
# one found by yum/dnf list available, then it is not an
# upgrade.
if salt.utils.versions.compare(
ver1=installed_version,
oper=">=",
ver2=pkg.version,
cmp_func=version_cmp,
):
return False
# pkg.version is greater than all installed versions
return True
else:
# Package is not installed
return True
ret = {}
for name in names:
# Derive desired pkg arch (for arch-specific packages) based on the
# package name(s) passed to the function. On a 64-bit OS, "pkgame"
# would be assumed to match the osarch, while "pkgname.i686" would
# have an arch of "i686". This desired arch is then compared against
# the updates derived from _yum_pkginfo() above, so that we can
# distinguish an update for a 32-bit version of a package from its
# 64-bit counterpart.
try:
arch = name.rsplit(".", 1)[-1]
if arch not in salt.utils.pkg.rpm.ARCHES:
arch = __grains__["osarch"]
except ValueError:
arch = __grains__["osarch"]
# This loop will iterate over the updates derived by _yum_pkginfo()
# above, which have been sorted descendingly by version number,
# ensuring that the latest available version for the named package is
# examined first. The call to _check_cur() will ensure that a package
# seen by yum as "available" will only be detected as an upgrade if it
# has a version higher than all currently-installed versions of the
# package.
for pkg in (x for x in updates if x.name == name):
# This if/or statement makes sure that we account for noarch
# packages as well as arch-specific packages.
if (
pkg.arch == "noarch"
or pkg.arch == arch
or salt.utils.pkg.rpm.check_32(pkg.arch)
):
if _check_cur(pkg):
ret[name] = pkg.version
# no need to check another match, if there was one
break
else:
ret[name] = ""
# Return a string if only one package name passed
if len(names) == 1:
return ret[names[0]]
return ret
# available_version is being deprecated
available_version = salt.utils.functools.alias_function(
latest_version, "available_version"
)
def upgrade_available(name, **kwargs):
"""
Check whether or not an upgrade is available for a given package
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade_available <package name>
"""
return latest_version(name, **kwargs) != ""
def version(*names, **kwargs):
"""
Returns a string representing the package version or an empty string if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
CLI Example:
.. code-block:: bash
salt '*' pkg.version <package name>
salt '*' pkg.version <package1> <package2> <package3> ...
"""
return __salt__["pkg_resource.version"](*names, **kwargs)
def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs):
"""
.. versionadded:: 2015.5.4
Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if
pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem
making the comparison.
ignore_epoch : False
Set to ``True`` to ignore the epoch when comparing versions
.. versionadded:: 2015.8.10,2016.3.2
CLI Example:
.. code-block:: bash
salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002'
"""
return __salt__["lowpkg.version_cmp"](pkg1, pkg2, ignore_epoch=ignore_epoch)
def _list_pkgs_from_context(versions_as_list, contextkey, attr):
"""
Use pkg list from __context__
"""
return __salt__["pkg_resource.format_pkg_list"](
__context__[contextkey], versions_as_list, attr
)
def list_pkgs(versions_as_list=False, **kwargs):
"""
List the packages currently installed as a dict. By default, the dict
contains versions as a comma separated string::
{'<package_name>': '<version>[,<version>...]'}
versions_as_list:
If set to true, the versions are provided as a list
{'<package_name>': ['<version>', '<version>']}
attr:
If a list of package attributes is specified, returned value will
contain them in addition to version, eg.::
{'<package_name>': [{'version' : 'version', 'arch' : 'arch'}]}
Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``,
``install_date``, ``install_date_time_t``.
If ``all`` is specified, all valid attributes will be returned.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs attr=version,arch
salt '*' pkg.list_pkgs attr='["version", "arch"]'
"""
versions_as_list = salt.utils.data.is_true(versions_as_list)
# not yet implemented or not applicable
if any(
[salt.utils.data.is_true(kwargs.get(x)) for x in ("removed", "purge_desired")]
):
return {}
attr = kwargs.get("attr")
if attr is not None:
attr = salt.utils.args.split_input(attr)
contextkey = "pkg.list_pkgs"
if contextkey in __context__ and kwargs.get("use_context", True):
return _list_pkgs_from_context(versions_as_list, contextkey, attr)
ret = {}
cmd = [
"rpm",
"-qa",
"--queryformat",
salt.utils.pkg.rpm.QUERYFORMAT.replace("%{REPOID}", "(none)") + "\n",
]
output = __salt__["cmd.run"](cmd, python_shell=False, output_loglevel="trace")
for line in output.splitlines():
pkginfo = salt.utils.pkg.rpm.parse_pkginfo(line, osarch=__grains__["osarch"])
if pkginfo is not None:
# see rpm version string rules available at https://goo.gl/UGKPNd
pkgver = pkginfo.version
epoch = None
release = None
if ":" in pkgver:
epoch, pkgver = pkgver.split(":", 1)
if "-" in pkgver:
pkgver, release = pkgver.split("-", 1)
all_attr = {
"epoch": epoch,
"version": pkgver,
"release": release,
"arch": pkginfo.arch,
"install_date": pkginfo.install_date,
"install_date_time_t": pkginfo.install_date_time_t,
}
__salt__["pkg_resource.add_pkg"](ret, pkginfo.name, all_attr)
for pkgname in ret:
ret[pkgname] = sorted(ret[pkgname], key=lambda d: d["version"])
__context__[contextkey] = ret
return __salt__["pkg_resource.format_pkg_list"](
__context__[contextkey], versions_as_list, attr
)
def list_repo_pkgs(*args, **kwargs):
"""
.. versionadded:: 2014.1.0
.. versionchanged:: 2014.7.0
All available versions of each package are now returned. This required
a slight modification to the structure of the return dict. The return
data shown below reflects the updated return dict structure. Note that
packages which are version-locked using :py:mod:`pkg.hold
<salt.modules.yumpkg.hold>` will only show the currently-installed
version, as locking a package will make other versions appear
unavailable to yum/dnf.
.. versionchanged:: 2017.7.0
By default, the versions for each package are no longer organized by
repository. To get results organized by repository, use
``byrepo=True``.
Returns all available packages. Optionally, package names (and name globs)
can be passed and the results will be filtered to packages matching those
names. This is recommended as it speeds up the function considerably.
.. warning::
Running this function on RHEL/CentOS 6 and earlier will be more
resource-intensive, as the version of yum that ships with older
RHEL/CentOS has no yum subcommand for listing packages from a
repository. Thus, a ``yum list installed`` and ``yum list available``
are run, which generates a lot of output, which must then be analyzed
to determine which package information to include in the return data.
This function can be helpful in discovering the version or repo to specify
in a :mod:`pkg.installed <salt.states.pkg.installed>` state.
The return data will be a dictionary mapping package names to a list of
version numbers, ordered from newest to oldest. If ``byrepo`` is set to
``True``, then the return dictionary will contain repository names at the
top level, and each repository will map packages to lists of version
numbers. For example:
.. code-block:: python
# With byrepo=False (default)
{
'bash': ['4.1.2-15.el6_5.2',
'4.1.2-15.el6_5.1',
'4.1.2-15.el6_4'],
'kernel': ['2.6.32-431.29.2.el6',
'2.6.32-431.23.3.el6',
'2.6.32-431.20.5.el6',
'2.6.32-431.20.3.el6',
'2.6.32-431.17.1.el6',
'2.6.32-431.11.2.el6',
'2.6.32-431.5.1.el6',
'2.6.32-431.3.1.el6',
'2.6.32-431.1.2.0.1.el6',
'2.6.32-431.el6']
}
# With byrepo=True
{
'base': {
'bash': ['4.1.2-15.el6_4'],
'kernel': ['2.6.32-431.el6']
},
'updates': {
'bash': ['4.1.2-15.el6_5.2', '4.1.2-15.el6_5.1'],
'kernel': ['2.6.32-431.29.2.el6',
'2.6.32-431.23.3.el6',
'2.6.32-431.20.5.el6',
'2.6.32-431.20.3.el6',
'2.6.32-431.17.1.el6',
'2.6.32-431.11.2.el6',
'2.6.32-431.5.1.el6',
'2.6.32-431.3.1.el6',
'2.6.32-431.1.2.0.1.el6']
}
}
fromrepo : None
Only include results from the specified repo(s). Multiple repos can be
specified, comma-separated.
enablerepo (ignored if ``fromrepo`` is specified)
Specify a disabled package repository (or repositories) to enable.
(e.g., ``yum --enablerepo='somerepo'``)
.. versionadded:: 2017.7.0
disablerepo (ignored if ``fromrepo`` is specified)
Specify an enabled package repository (or repositories) to disable.
(e.g., ``yum --disablerepo='somerepo'``)
.. versionadded:: 2017.7.0
byrepo : False
When ``True``, the return data for each package will be organized by
repository.
.. versionadded:: 2017.7.0
cacheonly : False
When ``True``, the repo information will be retrieved from the cached
repo metadata. This is equivalent to passing the ``-C`` option to
yum/dnf.
.. versionadded:: 2017.7.0
setopt
A comma-separated or Python list of key=value options. This list will
be expanded and ``--setopt`` prepended to each in the yum/dnf command
that is run.
.. versionadded:: 2019.2.0
CLI Examples:
.. code-block:: bash
salt '*' pkg.list_repo_pkgs
salt '*' pkg.list_repo_pkgs foo bar baz
salt '*' pkg.list_repo_pkgs 'samba4*' fromrepo=base,updates
salt '*' pkg.list_repo_pkgs 'python2-*' byrepo=True
"""
byrepo = kwargs.pop("byrepo", False)
cacheonly = kwargs.pop("cacheonly", False)
fromrepo = kwargs.pop("fromrepo", "") or ""
disablerepo = kwargs.pop("disablerepo", "") or ""
enablerepo = kwargs.pop("enablerepo", "") or ""
repo_arg = _get_options(fromrepo=fromrepo, **kwargs)
if fromrepo and not isinstance(fromrepo, list):
try:
fromrepo = [x.strip() for x in fromrepo.split(",")]
except AttributeError:
fromrepo = [x.strip() for x in str(fromrepo).split(",")]
if disablerepo and not isinstance(disablerepo, list):
try:
disablerepo = [x.strip() for x in disablerepo.split(",") if x != "*"]
except AttributeError:
disablerepo = [x.strip() for x in str(disablerepo).split(",") if x != "*"]
if enablerepo and not isinstance(enablerepo, list):
try:
enablerepo = [x.strip() for x in enablerepo.split(",") if x != "*"]
except AttributeError:
enablerepo = [x.strip() for x in str(enablerepo).split(",") if x != "*"]
if fromrepo:
repos = fromrepo
else:
repos = [
repo_name
for repo_name, repo_info in list_repos().items()
if repo_name in enablerepo
or (
repo_name not in disablerepo
and str(repo_info.get("enabled", "1")) == "1"
)
]
ret = {}
def _check_args(args, name):
"""
Do glob matching on args and return True if a match was found.
Otherwise, return False
"""
for arg in args:
if fnmatch.fnmatch(name, arg):
return True
return False
def _parse_output(output, strict=False):
for pkg in _yum_pkginfo(output):
if strict and (pkg.repoid not in repos or not _check_args(args, pkg.name)):
continue
repo_dict = ret.setdefault(pkg.repoid, {})
version_list = repo_dict.setdefault(pkg.name, set())
version_list.add(pkg.version)
yum_version = (
None
if _yum() != "yum"
else _LooseVersion(
__salt__["cmd.run"](["yum", "--version"], python_shell=False)
.splitlines()[0]
.strip()
)
)
# Really old version of yum; does not even have --showduplicates option
if yum_version and yum_version < _LooseVersion("3.2.13"):
cmd_prefix = ["--quiet"]
if cacheonly:
cmd_prefix.append("-C")
cmd_prefix.append("list")
for pkg_src in ("installed", "available"):
# Check installed packages first
out = _call_yum(cmd_prefix + [pkg_src], ignore_retcode=True)
if out["retcode"] == 0:
_parse_output(out["stdout"], strict=True)
# The --showduplicates option is added in 3.2.13, but the
# repository-packages subcommand is only in 3.4.3 and newer
elif yum_version and yum_version < _LooseVersion("3.4.3"):
cmd_prefix = ["--quiet", "--showduplicates"]
if cacheonly:
cmd_prefix.append("-C")
cmd_prefix.append("list")
for pkg_src in ("installed", "available"):
# Check installed packages first
out = _call_yum(cmd_prefix + [pkg_src], ignore_retcode=True)
if out["retcode"] == 0:
_parse_output(out["stdout"], strict=True)
else:
for repo in repos:
cmd = ["--quiet", "--showduplicates", "repository-packages", repo, "list"]
if cacheonly:
cmd.append("-C")
# Can't concatenate because args is a tuple, using list.extend()
cmd.extend(args)
out = _call_yum(cmd, ignore_retcode=True)
if out["retcode"] != 0 and "Error:" in out["stdout"]:
continue
_parse_output(out["stdout"])
if byrepo:
for reponame in ret:
# Sort versions newest to oldest
for pkgname in ret[reponame]:
sorted_versions = sorted(
[_LooseVersion(x) for x in ret[reponame][pkgname]], reverse=True
)
ret[reponame][pkgname] = [x.vstring for x in sorted_versions]
return ret
else:
byrepo_ret = {}
for reponame in ret:
for pkgname in ret[reponame]:
byrepo_ret.setdefault(pkgname, []).extend(ret[reponame][pkgname])
for pkgname in byrepo_ret:
sorted_versions = sorted(
[_LooseVersion(x) for x in byrepo_ret[pkgname]], reverse=True
)
byrepo_ret[pkgname] = [x.vstring for x in sorted_versions]
return byrepo_ret
def list_upgrades(refresh=True, **kwargs):
"""
Check whether or not an upgrade is available for all packages
The ``fromrepo``, ``enablerepo``, and ``disablerepo`` arguments are
supported, as used in pkg states, and the ``disableexcludes`` option is
also supported.
.. versionadded:: 2014.7.0
Support for the ``disableexcludes`` option
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
"""
options = _get_options(**kwargs)
if salt.utils.data.is_true(refresh):
refresh_db(check_update=False, **kwargs)
cmd = ["--quiet"]
cmd.extend(options)
cmd.extend(["list", "upgrades" if _yum() == "dnf" else "updates"])
out = _call_yum(cmd, ignore_retcode=True)
if out["retcode"] != 0 and "Error:" in out:
return {}
return {x.name: x.version for x in _yum_pkginfo(out["stdout"])}
# Preserve expected CLI usage (yum list updates)
list_updates = salt.utils.functools.alias_function(list_upgrades, "list_updates")
def list_downloaded(**kwargs):
"""
.. versionadded:: 2017.7.0
List prefetched packages downloaded by Yum in the local disk.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_downloaded
"""
CACHE_DIR = os.path.join("/var/cache/", _yum())
ret = {}
for root, dirnames, filenames in salt.utils.path.os_walk(CACHE_DIR):
for filename in fnmatch.filter(filenames, "*.rpm"):
package_path = os.path.join(root, filename)
pkg_info = __salt__["lowpkg.bin_pkg_info"](package_path)
pkg_timestamp = int(os.path.getctime(package_path))
ret.setdefault(pkg_info["name"], {})[pkg_info["version"]] = {
"path": package_path,
"size": os.path.getsize(package_path),
"creation_date_time_t": pkg_timestamp,
"creation_date_time": datetime.datetime.fromtimestamp(
pkg_timestamp
).isoformat(),
}
return ret
def info_installed(*names, **kwargs):
"""
.. versionadded:: 2015.8.1
Return the information of the named package(s), installed on the system.
:param all_versions:
Include information for all versions of the packages installed on the minion.
CLI Example:
.. code-block:: bash
salt '*' pkg.info_installed <package1>
salt '*' pkg.info_installed <package1> <package2> <package3> ...
salt '*' pkg.info_installed <package1> <package2> <package3> all_versions=True
"""
all_versions = kwargs.get("all_versions", False)
ret = dict()
for pkg_name, pkgs_nfo in __salt__["lowpkg.info"](*names, **kwargs).items():
pkg_nfo = pkgs_nfo if all_versions else [pkgs_nfo]
for _nfo in pkg_nfo:
t_nfo = dict()
# Translate dpkg-specific keys to a common structure
for key, value in _nfo.items():
if key == "source_rpm":
t_nfo["source"] = value
else:
t_nfo[key] = value
if not all_versions:
ret[pkg_name] = t_nfo
else:
ret.setdefault(pkg_name, []).append(t_nfo)
return ret
def refresh_db(**kwargs):
"""
Check the yum repos for updated packages
Returns:
- ``True``: Updates are available
- ``False``: An error occurred
- ``None``: No updates are available
repo
Refresh just the specified repo
disablerepo
Do not refresh the specified repo
enablerepo
Refresh a disabled repo using this option
branch
Add the specified branch when refreshing
disableexcludes
Disable the excludes defined in your config files. Takes one of three
options:
- ``all`` - disable all excludes
- ``main`` - disable excludes defined in [main] in yum.conf
- ``repoid`` - disable excludes defined for that repo
setopt
A comma-separated or Python list of key=value options. This list will
be expanded and ``--setopt`` prepended to each in the yum/dnf command
that is run.
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
"""
# Remove rtag file to keep multiple refreshes from happening in pkg states
salt.utils.pkg.clear_rtag(__opts__)
retcodes = {
100: True,
0: None,
1: False,
}
ret = True
check_update_ = kwargs.pop("check_update", True)
options = _get_options(**kwargs)
clean_cmd = ["--quiet", "--assumeyes", "clean", "expire-cache"]
clean_cmd.extend(options)
_call_yum(clean_cmd, ignore_retcode=True)
if check_update_:
update_cmd = ["--quiet", "--assumeyes", "check-update"]
if (
__grains__.get("os_family") == "RedHat"
and __grains__.get("osmajorrelease") == 7
):
# This feature is disabled because it is not used by Salt and adds a
# lot of extra time to the command with large repos like EPEL
update_cmd.append("--setopt=autocheck_running_kernel=false")
update_cmd.extend(options)
ret = retcodes.get(_call_yum(update_cmd, ignore_retcode=True)["retcode"], False)
return ret
def clean_metadata(**kwargs):
"""
.. versionadded:: 2014.1.0
Cleans local yum metadata. Functionally identical to :mod:`refresh_db()
<salt.modules.yumpkg.refresh_db>`.
CLI Example:
.. code-block:: bash
salt '*' pkg.clean_metadata
"""
return refresh_db(**kwargs)
class AvailablePackages(salt.utils.lazy.LazyDict):
def __init__(self, *args, **kwargs):
super().__init__()
self._args = args
self._kwargs = kwargs
def _load(self, key):
self._load_all()
return True
def _load_all(self):
self._dict = list_repo_pkgs(*self._args, **self._kwargs)
self.loaded = True
def install(
name=None,
refresh=False,
skip_verify=False,
pkgs=None,
sources=None,
downloadonly=False,
reinstall=False,
normalize=True,
update_holds=False,
saltenv="base",
ignore_epoch=False,
**kwargs
):
"""
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any yum/dnf commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Install the passed package(s), add refresh=True to clean the yum database
before package is installed.
name
The name of the package to be installed. Note that this parameter is
ignored if either "pkgs" or "sources" is passed. Additionally, please
note that this option can only be used to install packages from a
software repository. To install a package file manually, use the
"sources" option.
32-bit packages can be installed on 64-bit systems by appending the
architecture designation (``.i686``, ``.i586``, etc.) to the end of the
package name.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
refresh
Whether or not to update the yum database before executing.
reinstall
Specifying reinstall=True will use ``yum reinstall`` rather than
``yum install`` for requested packages that are already installed.
If a version is specified with the requested package, then
``yum reinstall`` will only be used if the installed version
matches the requested version.
Works with ``sources`` when the package header of the source can be
matched to the name and version of an installed package.
.. versionadded:: 2014.7.0
skip_verify
Skip the GPG verification check (e.g., ``--nogpgcheck``)
downloadonly
Only download the packages, do not install.
version
Install a specific version of the package, e.g. 1.2.3-4.el5. Ignored
if "pkgs" or "sources" is passed.
.. versionchanged:: 2018.3.0
version can now contain comparison operators (e.g. ``>1.2.3``,
``<=2.0``, etc.)
update_holds : False
If ``True``, and this function would update the package version, any
packages held using the yum/dnf "versionlock" plugin will be unheld so
that they can be updated. Otherwise, if this function attempts to
update a held package, the held package(s) will be skipped and an
error will be raised.
.. versionadded:: 2016.11.0
setopt
A comma-separated or Python list of key=value options. This list will
be expanded and ``--setopt`` prepended to each in the yum/dnf command
that is run.
CLI Example:
.. code-block:: bash
salt '*' pkg.install foo setopt='obsoletes=0,plugins=0'
.. versionadded:: 2019.2.0
Repository Options:
fromrepo
Specify a package repository (or repositories) from which to install.
(e.g., ``yum --disablerepo='*' --enablerepo='somerepo'``)
enablerepo (ignored if ``fromrepo`` is specified)
Specify a disabled package repository (or repositories) to enable.
(e.g., ``yum --enablerepo='somerepo'``)
disablerepo (ignored if ``fromrepo`` is specified)
Specify an enabled package repository (or repositories) to disable.
(e.g., ``yum --disablerepo='somerepo'``)
disableexcludes
Disable exclude from main, for a repo or for everything.
(e.g., ``yum --disableexcludes='main'``)
.. versionadded:: 2014.7.0
ignore_epoch : False
Only used when the version of a package is specified using a comparison
operator (e.g. ``>4.1``). If set to ``True``, then the epoch will be
ignored when comparing the currently-installed version to the desired
version.
.. versionadded:: 2018.3.0
Multiple Package Installation Options:
pkgs
A list of packages to install from a software repository. Must be
passed as a python list. A specific version number can be specified
by using a single-element dict representing the package and its
version.
CLI Examples:
.. code-block:: bash
salt '*' pkg.install pkgs='["foo", "bar"]'
salt '*' pkg.install pkgs='["foo", {"bar": "1.2.3-4.el5"}]'
sources
A list of RPM packages to install. Must be passed as a list of dicts,
with the keys being package names, and the values being the source URI
or local path to the package.
CLI Example:
.. code-block:: bash
salt '*' pkg.install sources='[{"foo": "salt://foo.rpm"}, {"bar": "salt://bar.rpm"}]'
normalize : True
Normalize the package name by removing the architecture. This is useful
for poorly created packages which might include the architecture as an
actual part of the name such as kernel modules which match a specific
kernel version.
.. code-block:: bash
salt -G role:nsd pkg.install gpfs.gplbin-2.6.32-279.31.1.el6.x86_64 normalize=False
.. versionadded:: 2014.7.0
diff_attr:
If a list of package attributes is specified, returned value will
contain them, eg.::
{'<package>': {
'old': {
'version': '<old-version>',
'arch': '<old-arch>'},
'new': {
'version': '<new-version>',
'arch': '<new-arch>'}}}
Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``,
``install_date``, ``install_date_time_t``.
If ``all`` is specified, all valid attributes will be returned.
.. versionadded:: 2018.3.0
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
If an attribute list in diff_attr is specified, the dict will also contain
any specified attribute, eg.::
{'<package>': {
'old': {
'version': '<old-version>',
'arch': '<old-arch>'},
'new': {
'version': '<new-version>',
'arch': '<new-arch>'}}}
"""
options = _get_options(**kwargs)
if salt.utils.data.is_true(refresh):
refresh_db(**kwargs)
reinstall = salt.utils.data.is_true(reinstall)
try:
pkg_params, pkg_type = __salt__["pkg_resource.parse_targets"](
name, pkgs, sources, saltenv=saltenv, normalize=normalize, **kwargs
)
except MinionError as exc:
raise CommandExecutionError(exc)
if pkg_params is None or len(pkg_params) == 0:
return {}
version_num = kwargs.get("version")
diff_attr = kwargs.get("diff_attr")
old = (
list_pkgs(versions_as_list=False, attr=diff_attr)
if not downloadonly
else list_downloaded()
)
# Use of __context__ means no duplicate work here, just accessing
# information already in __context__ from the previous call to list_pkgs()
old_as_list = (
list_pkgs(versions_as_list=True) if not downloadonly else list_downloaded()
)
to_install = []
to_downgrade = []
to_reinstall = []
_available = {}
# The above three lists will be populated with tuples containing the
# package name and the string being used for this particular package
# modification. The reason for this method is that the string we use for
# installation, downgrading, or reinstallation will be different than the
# package name in a couple cases:
#
# 1) A specific version is being targeted. In this case the string being
# passed to install/downgrade/reinstall will contain the version
# information after the package name.
# 2) A binary package is being installed via the "sources" param. In this
# case the string being passed will be the path to the local copy of
# the package in the minion cachedir.
#
# The reason that we need both items is to be able to modify the installed
# version of held packages.
if pkg_type == "repository":
has_wildcards = []
has_comparison = []
for pkgname, pkgver in pkg_params.items():
try:
if "*" in pkgver:
has_wildcards.append(pkgname)
elif pkgver.startswith("<") or pkgver.startswith(">"):
has_comparison.append(pkgname)
except (TypeError, ValueError):
continue
_available = AvailablePackages(
*has_wildcards + has_comparison, byrepo=False, **kwargs
)
pkg_params_items = pkg_params.items()
elif pkg_type == "advisory":
pkg_params_items = []
cur_patches = list_patches()
for advisory_id in pkg_params:
if advisory_id not in cur_patches:
raise CommandExecutionError(
'Advisory id "{}" not found'.format(advisory_id)
)
else:
pkg_params_items.append(advisory_id)
else:
pkg_params_items = []
for pkg_source in pkg_params:
if "lowpkg.bin_pkg_info" in __salt__:
rpm_info = __salt__["lowpkg.bin_pkg_info"](pkg_source)
else:
rpm_info = None
if rpm_info is None:
log.error(
"pkg.install: Unable to get rpm information for %s. "
"Version comparisons will be unavailable, and return "
"data may be inaccurate if reinstall=True.",
pkg_source,
)
pkg_params_items.append([pkg_source])
else:
pkg_params_items.append(
[rpm_info["name"], pkg_source, rpm_info["version"]]
)
errors = []
for pkg_item_list in pkg_params_items:
if pkg_type == "repository":
pkgname, version_num = pkg_item_list
elif pkg_type == "advisory":
pkgname = pkg_item_list
version_num = None
else:
try:
pkgname, pkgpath, version_num = pkg_item_list
except ValueError:
pkgname = None
pkgpath = pkg_item_list[0]
version_num = None
if version_num is None:
if pkg_type == "repository":
if reinstall and pkgname in old:
to_reinstall.append((pkgname, pkgname))
else:
to_install.append((pkgname, pkgname))
elif pkg_type == "advisory":
to_install.append((pkgname, pkgname))
else:
to_install.append((pkgname, pkgpath))
else:
# If we are installing a package file and not one from the repo,
# and version_num is not None, then we can assume that pkgname is
# not None, since the only way version_num is not None is if RPM
# metadata parsing was successful.
if pkg_type == "repository":
# yum/dnf does not support comparison operators. If the version
# starts with an equals sign, ignore it.
version_num = version_num.lstrip("=")
if pkgname in has_comparison:
candidates = _available.get(pkgname, [])
target = salt.utils.pkg.match_version(
version_num,
candidates,
cmp_func=version_cmp,
ignore_epoch=ignore_epoch,
)
if target is None:
errors.append(
"No version matching '{}{}' could be found "
"(available: {})".format(
pkgname,
version_num,
", ".join(candidates) if candidates else None,
)
)
continue
else:
version_num = target
if _yum() == "yum":
# yum install does not support epoch without the arch, and
# we won't know what the arch will be when it's not
# provided. It could either be the OS architecture, or
# 'noarch', and we don't make that distinction in the
# pkg.list_pkgs return data.
if ignore_epoch is True:
version_num = version_num.split(":", 1)[-1]
arch = ""
try:
namepart, archpart = pkgname.rsplit(".", 1)
except ValueError:
pass
else:
if archpart in salt.utils.pkg.rpm.ARCHES:
arch = "." + archpart
pkgname = namepart
if "*" in version_num:
# Resolve wildcard matches
candidates = _available.get(pkgname, [])
match = salt.utils.itertools.fnmatch_multiple(
candidates, version_num
)
if match is not None:
version_num = match
else:
errors.append(
"No version matching '{}' found for package "
"'{}' (available: {})".format(
version_num,
pkgname,
", ".join(candidates) if candidates else "none",
)
)
continue
if ignore_epoch is True:
pkgstr = "{}-{}{}".format(pkgname, version_num, arch)
else:
pkgstr = "{}-{}{}".format(
pkgname, version_num.split(":", 1)[-1], arch
)
else:
pkgstr = pkgpath
# Lambda to trim the epoch from the currently-installed version if
# no epoch is specified in the specified version
cver = old_as_list.get(pkgname, [])
if reinstall and cver:
for ver in cver:
if salt.utils.versions.compare(
ver1=version_num,
oper="==",
ver2=ver,
cmp_func=version_cmp,
ignore_epoch=ignore_epoch,
):
# This version is already installed, so we need to
# reinstall.
to_reinstall.append((pkgname, pkgstr))
break
else:
if not cver:
to_install.append((pkgname, pkgstr))
else:
for ver in cver:
if salt.utils.versions.compare(
ver1=version_num,
oper=">=",
ver2=ver,
cmp_func=version_cmp,
ignore_epoch=ignore_epoch,
):
to_install.append((pkgname, pkgstr))
break
else:
if pkgname is not None:
if re.match("^kernel(|-devel)$", pkgname):
# kernel and kernel-devel support multiple
# installs as their paths do not conflict.
# Performing a yum/dnf downgrade will be a
# no-op so just do an install instead. It will
# fail if there are other interdependencies
# that have conflicts, and that's OK. We don't
# want to force anything, we just want to
# properly handle it if someone tries to
# install a kernel/kernel-devel of a lower
# version than the currently-installed one.
# TODO: find a better way to determine if a
# package supports multiple installs.
to_install.append((pkgname, pkgstr))
else:
# None of the currently-installed versions are
# greater than the specified version, so this
# is a downgrade.
to_downgrade.append((pkgname, pkgstr))
def _add_common_args(cmd):
"""
DRY function to add args common to all yum/dnf commands
"""
cmd.extend(options)
if skip_verify:
cmd.append("--nogpgcheck")
if downloadonly:
cmd.append("--downloadonly")
try:
holds = list_holds(full=False)
except SaltInvocationError:
holds = []
log.debug("Failed to get holds, versionlock plugin is probably not installed")
unhold_prevented = []
@contextlib.contextmanager
def _temporarily_unhold(pkgs, targets):
"""
Temporarily unhold packages that need to be updated. Add any
successfully-removed ones (and any packages not in the list of current
holds) to the list of targets.
"""
to_unhold = {}
for pkgname, pkgstr in pkgs:
if pkgname in holds:
if update_holds:
to_unhold[pkgname] = pkgstr
else:
unhold_prevented.append(pkgname)
else:
targets.append(pkgstr)
if not to_unhold:
yield
else:
log.debug("Unholding packages: %s", ", ".join(to_unhold))
try:
# Using list() here for python3 compatibility, dict.keys() no
# longer returns a list in python3.
unhold_names = list(to_unhold.keys())
for unheld_pkg, outcome in unhold(pkgs=unhold_names).items():
if outcome["result"]:
# Package was successfully unheld, add to targets
targets.append(to_unhold[unheld_pkg])
else:
# Failed to unhold package
errors.append(unheld_pkg)
yield
except Exception as exc: # pylint: disable=broad-except
errors.append(
"Error encountered unholding packages {}: {}".format(
", ".join(to_unhold), exc
)
)
finally:
hold(pkgs=unhold_names)
targets = []
with _temporarily_unhold(to_install, targets):
if targets:
if pkg_type == "advisory":
targets = ["--advisory={}".format(t) for t in targets]
cmd = ["-y"]
if _yum() == "dnf":
cmd.extend(["--best", "--allowerasing"])
_add_common_args(cmd)
cmd.append("install" if pkg_type != "advisory" else "update")
cmd.extend(targets)
out = _call_yum(cmd, ignore_retcode=False, redirect_stderr=True)
if out["retcode"] != 0:
errors.append(out["stdout"])
targets = []
with _temporarily_unhold(to_downgrade, targets):
if targets:
cmd = ["-y"]
_add_common_args(cmd)
cmd.append("downgrade")
cmd.extend(targets)
out = _call_yum(cmd, redirect_stderr=True)
if out["retcode"] != 0:
errors.append(out["stdout"])
targets = []
with _temporarily_unhold(to_reinstall, targets):
if targets:
cmd = ["-y"]
_add_common_args(cmd)
cmd.append("reinstall")
cmd.extend(targets)
out = _call_yum(cmd, redirect_stderr=True)
if out["retcode"] != 0:
errors.append(out["stdout"])
__context__.pop("pkg.list_pkgs", None)
new = (
list_pkgs(versions_as_list=False, attr=diff_attr)
if not downloadonly
else list_downloaded()
)
ret = salt.utils.data.compare_dicts(old, new)
for pkgname, _ in to_reinstall:
if pkgname not in ret or pkgname in old:
ret.update(
{pkgname: {"old": old.get(pkgname, ""), "new": new.get(pkgname, "")}}
)
if unhold_prevented:
errors.append(
"The following package(s) could not be updated because they are "
"being held: {}. Set 'update_holds' to True to temporarily "
"unhold these packages so that they can be updated.".format(
", ".join(unhold_prevented)
)
)
if errors:
raise CommandExecutionError(
"Error occurred installing{} package(s)".format(
"/reinstalling" if to_reinstall else ""
),
info={"errors": errors, "changes": ret},
)
return ret
def upgrade(
name=None,
pkgs=None,
refresh=True,
skip_verify=False,
normalize=True,
minimal=False,
obsoletes=True,
**kwargs
):
"""
Run a full system upgrade (a ``yum upgrade`` or ``dnf upgrade``), or
upgrade specified packages. If the packages aren't installed, they will
not be installed.
.. versionchanged:: 2014.7.0
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any yum/dnf commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
.. versionchanged:: 2019.2.0
Added ``obsoletes`` and ``minimal`` arguments
Returns a dictionary containing the changes:
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
salt '*' pkg.upgrade name=openssl
Repository Options:
fromrepo
Specify a package repository (or repositories) from which to install.
(e.g., ``yum --disablerepo='*' --enablerepo='somerepo'``)
enablerepo (ignored if ``fromrepo`` is specified)
Specify a disabled package repository (or repositories) to enable.
(e.g., ``yum --enablerepo='somerepo'``)
disablerepo (ignored if ``fromrepo`` is specified)
Specify an enabled package repository (or repositories) to disable.
(e.g., ``yum --disablerepo='somerepo'``)
disableexcludes
Disable exclude from main, for a repo or for everything.
(e.g., ``yum --disableexcludes='main'``)
.. versionadded:: 2014.7
name
The name of the package to be upgraded. Note that this parameter is
ignored if "pkgs" is passed.
32-bit packages can be upgraded on 64-bit systems by appending the
architecture designation (``.i686``, ``.i586``, etc.) to the end of the
package name.
Warning: if you forget 'name=' and run pkg.upgrade openssl, ALL packages
are upgraded. This will be addressed in next releases.
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade name=openssl
.. versionadded:: 2016.3.0
pkgs
A list of packages to upgrade from a software repository. Must be
passed as a python list. A specific version number can be specified
by using a single-element dict representing the package and its
version. If the package was not already installed on the system,
it will not be installed.
CLI Examples:
.. code-block:: bash
salt '*' pkg.upgrade pkgs='["foo", "bar"]'
salt '*' pkg.upgrade pkgs='["foo", {"bar": "1.2.3-4.el5"}]'
.. versionadded:: 2016.3.0
normalize : True
Normalize the package name by removing the architecture. This is useful
for poorly created packages which might include the architecture as an
actual part of the name such as kernel modules which match a specific
kernel version.
.. code-block:: bash
salt -G role:nsd pkg.upgrade gpfs.gplbin-2.6.32-279.31.1.el6.x86_64 normalize=False
.. versionadded:: 2016.3.0
minimal : False
Use upgrade-minimal instead of upgrade (e.g., ``yum upgrade-minimal``)
Goes to the 'newest' package match which fixes a problem that affects your system.
.. code-block:: bash
salt '*' pkg.upgrade minimal=True
.. versionadded:: 2019.2.0
obsoletes : True
Controls whether yum/dnf should take obsoletes into account and remove them.
If set to ``False`` yum will use ``update`` instead of ``upgrade``
and dnf will be run with ``--obsoletes=False``
.. code-block:: bash
salt '*' pkg.upgrade obsoletes=False
.. versionadded:: 2019.2.0
setopt
A comma-separated or Python list of key=value options. This list will
be expanded and ``--setopt`` prepended to each in the yum/dnf command
that is run.
.. versionadded:: 2019.2.0
.. note::
To add extra arguments to the ``yum upgrade`` command, pass them as key
word arguments. For arguments without assignments, pass ``True``
.. code-block:: bash
salt '*' pkg.upgrade security=True exclude='kernel*'
"""
options = _get_options(get_extra_options=True, **kwargs)
if salt.utils.data.is_true(refresh):
refresh_db(**kwargs)
old = list_pkgs()
targets = []
if name or pkgs:
try:
pkg_params = __salt__["pkg_resource.parse_targets"](
name=name, pkgs=pkgs, sources=None, normalize=normalize, **kwargs
)[0]
except MinionError as exc:
raise CommandExecutionError(exc)
if pkg_params:
# Calling list.extend() on a dict will extend it using the
# dictionary's keys.
targets.extend(pkg_params)
cmd = ["--quiet", "-y"]
cmd.extend(options)
if skip_verify:
cmd.append("--nogpgcheck")
if obsoletes:
cmd.append("upgrade" if not minimal else "upgrade-minimal")
else:
# do not force the removal of obsolete packages
if _yum() == "dnf":
# for dnf we can just disable obsoletes
cmd.append("--obsoletes=False")
cmd.append("upgrade" if not minimal else "upgrade-minimal")
else:
# for yum we have to use update instead of upgrade
cmd.append("update" if not minimal else "update-minimal")
cmd.extend(targets)
result = _call_yum(cmd)
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if result["retcode"] != 0:
raise CommandExecutionError(
"Problem encountered upgrading packages",
info={"changes": ret, "result": result},
)
return ret
def update(
name=None,
pkgs=None,
refresh=True,
skip_verify=False,
normalize=True,
minimal=False,
obsoletes=False,
**kwargs
):
"""
.. versionadded:: 2019.2.0
Calls :py:func:`pkg.upgrade <salt.modules.yumpkg.upgrade>` with
``obsoletes=False``. Mirrors the CLI behavior of ``yum update``.
See :py:func:`pkg.upgrade <salt.modules.yumpkg.upgrade>` for
further documentation.
.. code-block:: bash
salt '*' pkg.update
"""
return upgrade(
name, pkgs, refresh, skip_verify, normalize, minimal, obsoletes, **kwargs
)
def remove(name=None, pkgs=None, **kwargs): # pylint: disable=W0613
"""
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any yum/dnf commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Remove packages
name
The name of the package to be removed
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
"""
try:
pkg_params = __salt__["pkg_resource.parse_targets"](name, pkgs)[0]
except MinionError as exc:
raise CommandExecutionError(exc)
old = list_pkgs()
targets = []
for target in pkg_params:
# Check if package version set to be removed is actually installed:
# old[target] contains a comma-separated list of installed versions
if target in old and not pkg_params[target]:
targets.append(target)
elif target in old and pkg_params[target] in old[target].split(","):
arch = ""
pkgname = target
try:
namepart, archpart = target.rsplit(".", 1)
except ValueError:
pass
else:
if archpart in salt.utils.pkg.rpm.ARCHES:
arch = "." + archpart
pkgname = namepart
targets.append("{}-{}{}".format(pkgname, pkg_params[target], arch))
if not targets:
return {}
out = _call_yum(["-y", "remove"] + targets)
if out["retcode"] != 0 and out["stderr"]:
errors = [out["stderr"]]
else:
errors = []
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
"Error occurred removing package(s)",
info={"errors": errors, "changes": ret},
)
return ret
def purge(name=None, pkgs=None, **kwargs): # pylint: disable=W0613
"""
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any yum/dnf commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Package purges are not supported by yum, this function is identical to
:mod:`pkg.remove <salt.modules.yumpkg.remove>`.
name
The name of the package to be purged
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.purge <package name>
salt '*' pkg.purge <package1>,<package2>,<package3>
salt '*' pkg.purge pkgs='["foo", "bar"]'
"""
return remove(name=name, pkgs=pkgs)
def hold(
name=None, pkgs=None, sources=None, normalize=True, **kwargs
): # pylint: disable=W0613
"""
.. versionadded:: 2014.7.0
Version-lock packages
.. note::
Requires the appropriate ``versionlock`` plugin package to be installed:
- On RHEL 5: ``yum-versionlock``
- On RHEL 6 & 7: ``yum-plugin-versionlock``
- On Fedora: ``python-dnf-plugins-extras-versionlock``
name
The name of the package to be held.
Multiple Package Options:
pkgs
A list of packages to hold. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.hold <package name>
salt '*' pkg.hold pkgs='["foo", "bar"]'
"""
_check_versionlock()
if not name and not pkgs and not sources:
raise SaltInvocationError("One of name, pkgs, or sources must be specified.")
if pkgs and sources:
raise SaltInvocationError("Only one of pkgs or sources can be specified.")
targets = []
if pkgs:
targets.extend(pkgs)
elif sources:
for source in sources:
targets.append(next(iter(source.keys())))
else:
targets.append(name)
current_locks = list_holds(full=False)
ret = {}
for target in targets:
if isinstance(target, dict):
target = next(iter(target.keys()))
ret[target] = {"name": target, "changes": {}, "result": False, "comment": ""}
if target not in current_locks:
if "test" in __opts__ and __opts__["test"]:
ret[target].update(result=None)
ret[target]["comment"] = "Package {} is set to be held.".format(target)
else:
out = _call_yum(["versionlock", target])
if out["retcode"] == 0:
ret[target].update(result=True)
ret[target]["comment"] = "Package {} is now being held.".format(
target
)
ret[target]["changes"]["new"] = "hold"
ret[target]["changes"]["old"] = ""
else:
ret[target]["comment"] = "Package {} was unable to be held.".format(
target
)
else:
ret[target].update(result=True)
ret[target]["comment"] = "Package {} is already set to be held.".format(
target
)
return ret
def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
"""
.. versionadded:: 2014.7.0
Remove version locks
.. note::
Requires the appropriate ``versionlock`` plugin package to be installed:
- On RHEL 5: ``yum-versionlock``
- On RHEL 6 & 7: ``yum-plugin-versionlock``
- On Fedora: ``python-dnf-plugins-extras-versionlock``
name
The name of the package to be unheld
Multiple Package Options:
pkgs
A list of packages to unhold. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.unhold <package name>
salt '*' pkg.unhold pkgs='["foo", "bar"]'
"""
_check_versionlock()
if not name and not pkgs and not sources:
raise SaltInvocationError("One of name, pkgs, or sources must be specified.")
if pkgs and sources:
raise SaltInvocationError("Only one of pkgs or sources can be specified.")
targets = []
if pkgs:
targets.extend(pkgs)
elif sources:
for source in sources:
targets.append(next(iter(source)))
else:
targets.append(name)
# Yum's versionlock plugin doesn't support passing just the package name
# when removing a lock, so we need to get the full list and then use
# fnmatch below to find the match.
current_locks = list_holds(full=_yum() == "yum")
ret = {}
for target in targets:
if isinstance(target, dict):
target = next(iter(target.keys()))
ret[target] = {"name": target, "changes": {}, "result": False, "comment": ""}
if _yum() == "dnf":
search_locks = [x for x in current_locks if x == target]
else:
# To accommodate yum versionlock's lack of support for removing
# locks using just the package name, we have to use fnmatch to do
# glob matching on the target name, and then for each matching
# expression double-check that the package name (obtained via
# _get_hold()) matches the targeted package.
search_locks = [
x
for x in current_locks
if fnmatch.fnmatch(x, "*{}*".format(target))
and target == _get_hold(x, full=False)
]
if search_locks:
if __opts__["test"]:
ret[target].update(result=None)
ret[target]["comment"] = "Package {} is set to be unheld.".format(
target
)
else:
out = _call_yum(["versionlock", "delete"] + search_locks)
if out["retcode"] == 0:
ret[target].update(result=True)
ret[target]["comment"] = "Package {} is no longer held.".format(
target
)
ret[target]["changes"]["new"] = ""
ret[target]["changes"]["old"] = "hold"
else:
ret[target][
"comment"
] = "Package {} was unable to be unheld.".format(target)
else:
ret[target].update(result=True)
ret[target]["comment"] = "Package {} is not being held.".format(target)
return ret
def list_holds(pattern=__HOLD_PATTERN, full=True):
r"""
.. versionchanged:: 2016.3.0,2015.8.4,2015.5.10
Function renamed from ``pkg.get_locked_pkgs`` to ``pkg.list_holds``.
List information on locked packages
.. note::
Requires the appropriate ``versionlock`` plugin package to be installed:
- On RHEL 5: ``yum-versionlock``
- On RHEL 6 & 7: ``yum-plugin-versionlock``
- On Fedora: ``python-dnf-plugins-extras-versionlock``
pattern : \w+(?:[.-][^-]+)*
Regular expression used to match the package name
full : True
Show the full hold definition including version and epoch. Set to
``False`` to return just the name of the package(s) being held.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_holds
salt '*' pkg.list_holds full=False
"""
_check_versionlock()
out = __salt__["cmd.run"]([_yum(), "versionlock", "list"], python_shell=False)
ret = []
for line in salt.utils.itertools.split(out, "\n"):
match = _get_hold(line, pattern=pattern, full=full)
if match is not None:
ret.append(match)
return ret
get_locked_packages = salt.utils.functools.alias_function(
list_holds, "get_locked_packages"
)
def verify(*names, **kwargs):
"""
.. versionadded:: 2014.1.0
Runs an rpm -Va on a system, and returns the results in a dict
Pass options to modify rpm verify behavior using the ``verify_options``
keyword argument
Files with an attribute of config, doc, ghost, license or readme in the
package header can be ignored using the ``ignore_types`` keyword argument
CLI Example:
.. code-block:: bash
salt '*' pkg.verify
salt '*' pkg.verify httpd
salt '*' pkg.verify 'httpd postfix'
salt '*' pkg.verify 'httpd postfix' ignore_types=['config','doc']
salt '*' pkg.verify 'httpd postfix' verify_options=['nodeps','nosize']
"""
return __salt__["lowpkg.verify"](*names, **kwargs)
def group_list():
"""
.. versionadded:: 2014.1.0
Lists all groups known by yum on this system
CLI Example:
.. code-block:: bash
salt '*' pkg.group_list
"""
ret = {
"installed": [],
"available": [],
"installed environments": [],
"available environments": [],
"available languages": {},
}
section_map = {
"installed groups:": "installed",
"available groups:": "available",
"installed environment groups:": "installed environments",
"available environment groups:": "available environments",
"available language groups:": "available languages",
}
out = __salt__["cmd.run_stdout"](
[_yum(), "grouplist", "hidden"], output_loglevel="trace", python_shell=False
)
key = None
for line in salt.utils.itertools.split(out, "\n"):
line_lc = line.lower()
if line_lc == "done":
break
section_lookup = section_map.get(line_lc)
if section_lookup is not None and section_lookup != key:
key = section_lookup
continue
# Ignore any administrative comments (plugin info, repo info, etc.)
if key is None:
continue
line = line.strip()
if key != "available languages":
ret[key].append(line)
else:
match = re.match(r"(.+) \[(.+)\]", line)
if match:
name, lang = match.groups()
ret[key][line] = {"name": name, "language": lang}
return ret
def group_info(name, expand=False, ignore_groups=None):
"""
.. versionadded:: 2014.1.0
.. versionchanged:: 3001,2016.3.0,2015.8.4,2015.5.10
The return data has changed. A new key ``type`` has been added to
distinguish environment groups from package groups. Also, keys for the
group name and group ID have been added. The ``mandatory packages``,
``optional packages``, and ``default packages`` keys have been renamed
to ``mandatory``, ``optional``, and ``default`` for accuracy, as
environment groups include other groups, and not packages. Finally,
this function now properly identifies conditional packages.
Lists packages belonging to a certain group
name
Name of the group to query
expand : False
If the specified group is an environment group, then the group will be
expanded and the return data will include package names instead of
group names.
.. versionadded:: 2016.3.0
ignore_groups : None
This parameter can be used to pass a list of groups to ignore when
expanding subgroups. It is used during recursion in order to prevent
expanding the same group multiple times.
.. versionadded:: 3001
CLI Example:
.. code-block:: bash
salt '*' pkg.group_info 'Perl Support'
"""
pkgtypes = ("mandatory", "optional", "default", "conditional")
ret = {}
for pkgtype in pkgtypes:
ret[pkgtype] = set()
cmd = [_yum(), "--quiet", "groupinfo", name]
out = __salt__["cmd.run_stdout"](cmd, output_loglevel="trace", python_shell=False)
g_info = {}
for line in salt.utils.itertools.split(out, "\n"):
try:
key, value = [x.strip() for x in line.split(":")]
g_info[key.lower()] = value
except ValueError:
continue
if "environment group" in g_info:
ret["type"] = "environment group"
elif "group" in g_info:
ret["type"] = "package group"
ret["group"] = g_info.get("environment group") or g_info.get("group")
ret["id"] = g_info.get("environment-id") or g_info.get("group-id")
if not ret["group"] and not ret["id"]:
raise CommandExecutionError("Group '{}' not found".format(name))
ret["description"] = g_info.get("description", "")
completed_groups = ignore_groups or []
pkgtypes_capturegroup = "(" + "|".join(pkgtypes) + ")"
for pkgtype in pkgtypes:
target_found = False
for line in salt.utils.itertools.split(out, "\n"):
line = line.strip().lstrip(string.punctuation)
match = re.match(
pkgtypes_capturegroup + r" (?:groups|packages):\s*$", line.lower()
)
if match:
if target_found:
# We've reached a new section, break from loop
break
else:
if match.group(1) == pkgtype:
# We've reached the targeted section
target_found = True
continue
if target_found:
if expand and ret["type"] == "environment group":
if not line or line in completed_groups:
continue
log.trace(
'Adding group "%s" to completed list: %s',
line,
completed_groups,
)
completed_groups.append(line)
# Using the @ prefix on the group here in order to prevent multiple matches
# being returned, such as with gnome-desktop
expanded = group_info(
"@" + line, expand=True, ignore_groups=completed_groups
)
# Don't shadow the pkgtype variable from the outer loop
for p_type in pkgtypes:
ret[p_type].update(set(expanded[p_type]))
else:
ret[pkgtype].add(line)
for pkgtype in pkgtypes:
ret[pkgtype] = sorted(ret[pkgtype])
return ret
def group_diff(name):
"""
.. versionadded:: 2014.1.0
.. versionchanged:: 2016.3.0,2015.8.4,2015.5.10
Environment groups are now supported. The key names have been renamed,
similar to the changes made in :py:func:`pkg.group_info
<salt.modules.yumpkg.group_info>`.
Lists which of a group's packages are installed and which are not
installed
CLI Example:
.. code-block:: bash
salt '*' pkg.group_diff 'Perl Support'
"""
pkgtypes = ("mandatory", "optional", "default", "conditional")
ret = {}
for pkgtype in pkgtypes:
ret[pkgtype] = {"installed": [], "not installed": []}
pkgs = list_pkgs()
group_pkgs = group_info(name, expand=True)
for pkgtype in pkgtypes:
for member in group_pkgs.get(pkgtype, []):
if member in pkgs:
ret[pkgtype]["installed"].append(member)
else:
ret[pkgtype]["not installed"].append(member)
return ret
def group_install(name, skip=(), include=(), **kwargs):
"""
.. versionadded:: 2014.1.0
Install the passed package group(s). This is basically a wrapper around
:py:func:`pkg.install <salt.modules.yumpkg.install>`, which performs
package group resolution for the user. This function is currently
considered experimental, and should be expected to undergo changes.
name
Package group to install. To install more than one group, either use a
comma-separated list or pass the value as a python list.
CLI Examples:
.. code-block:: bash
salt '*' pkg.group_install 'Group 1'
salt '*' pkg.group_install 'Group 1,Group 2'
salt '*' pkg.group_install '["Group 1", "Group 2"]'
skip
Packages that would normally be installed by the package group
("default" packages), which should not be installed. Can be passed
either as a comma-separated list or a python list.
CLI Examples:
.. code-block:: bash
salt '*' pkg.group_install 'My Group' skip='foo,bar'
salt '*' pkg.group_install 'My Group' skip='["foo", "bar"]'
include
Packages which are included in a group, which would not normally be
installed by a ``yum groupinstall`` ("optional" packages). Note that
this will not enforce group membership; if you include packages which
are not members of the specified groups, they will still be installed.
Can be passed either as a comma-separated list or a python list.
CLI Examples:
.. code-block:: bash
salt '*' pkg.group_install 'My Group' include='foo,bar'
salt '*' pkg.group_install 'My Group' include='["foo", "bar"]'
.. note::
Because this is essentially a wrapper around pkg.install, any argument
which can be passed to pkg.install may also be included here, and it
will be passed along wholesale.
"""
groups = name.split(",") if isinstance(name, str) else name
if not groups:
raise SaltInvocationError("no groups specified")
elif not isinstance(groups, list):
raise SaltInvocationError("'groups' must be a list")
# pylint: disable=maybe-no-member
if isinstance(skip, str):
skip = skip.split(",")
if not isinstance(skip, (list, tuple)):
raise SaltInvocationError("'skip' must be a list")
if isinstance(include, str):
include = include.split(",")
if not isinstance(include, (list, tuple)):
raise SaltInvocationError("'include' must be a list")
# pylint: enable=maybe-no-member
targets = []
for group in groups:
group_detail = group_info(group)
targets.extend(group_detail.get("mandatory", []))
targets.extend(
[pkg for pkg in group_detail.get("default", []) if pkg not in skip]
)
if include:
targets.extend(include)
# Don't install packages that are already installed, install() isn't smart
# enough to make this distinction.
pkgs = [x for x in targets if x not in list_pkgs()]
if not pkgs:
return {}
return install(pkgs=pkgs, **kwargs)
groupinstall = salt.utils.functools.alias_function(group_install, "groupinstall")
def list_repos(basedir=None, **kwargs):
"""
Lists all repos in <basedir> (default: all dirs in `reposdir` yum option).
CLI Example:
.. code-block:: bash
salt '*' pkg.list_repos
salt '*' pkg.list_repos basedir=/path/to/dir
salt '*' pkg.list_repos basedir=/path/to/dir,/path/to/another/dir
"""
basedirs = _normalize_basedir(basedir)
repos = {}
log.debug("Searching for repos in %s", basedirs)
for bdir in basedirs:
if not os.path.exists(bdir):
continue
for repofile in os.listdir(bdir):
repopath = "{}/{}".format(bdir, repofile)
if not repofile.endswith(".repo"):
continue
filerepos = _parse_repo_file(repopath)[1]
for reponame in filerepos:
repo = filerepos[reponame]
repo["file"] = repopath
repos[reponame] = repo
return repos
def get_repo(repo, basedir=None, **kwargs): # pylint: disable=W0613
"""
Display a repo from <basedir> (default basedir: all dirs in ``reposdir``
yum option).
CLI Examples:
.. code-block:: bash
salt '*' pkg.get_repo myrepo
salt '*' pkg.get_repo myrepo basedir=/path/to/dir
salt '*' pkg.get_repo myrepo basedir=/path/to/dir,/path/to/another/dir
"""
repos = list_repos(basedir)
if repo.startswith("copr:"):
repo = _get_copr_repo(repo)
# Find out what file the repo lives in
repofile = ""
for list_repo in repos:
if list_repo == repo:
repofile = repos[list_repo]["file"]
if repofile:
# Return just one repo
filerepos = _parse_repo_file(repofile)[1]
return filerepos[repo]
return {}
def del_repo(repo, basedir=None, **kwargs): # pylint: disable=W0613
"""
Delete a repo from <basedir> (default basedir: all dirs in `reposdir` yum
option).
If the .repo file in which the repo exists does not contain any other repo
configuration, the file itself will be deleted.
CLI Examples:
.. code-block:: bash
salt '*' pkg.del_repo myrepo
salt '*' pkg.del_repo myrepo basedir=/path/to/dir
salt '*' pkg.del_repo myrepo basedir=/path/to/dir,/path/to/another/dir
"""
if repo.startswith("copr:"):
repo = _get_copr_repo(repo)
# this is so we know which dirs are searched for our error messages below
basedirs = _normalize_basedir(basedir)
repos = list_repos(basedirs)
if repo not in repos:
return "Error: the {} repo does not exist in {}".format(repo, basedirs)
# Find out what file the repo lives in
repofile = ""
for arepo in repos:
if arepo == repo:
repofile = repos[arepo]["file"]
# See if the repo is the only one in the file
onlyrepo = True
for arepo in repos:
if arepo == repo:
continue
if repos[arepo]["file"] == repofile:
onlyrepo = False
# If this is the only repo in the file, delete the file itself
if onlyrepo:
os.remove(repofile)
return "File {} containing repo {} has been removed".format(repofile, repo)
# There must be other repos in this file, write the file with them
header, filerepos = _parse_repo_file(repofile)
content = header
for stanza in filerepos.keys():
if stanza == repo:
continue
comments = ""
if "comments" in filerepos[stanza].keys():
comments = salt.utils.pkg.rpm.combine_comments(
filerepos[stanza]["comments"]
)
del filerepos[stanza]["comments"]
content += "\n[{}]".format(stanza)
for line in filerepos[stanza]:
# A whitespace is needed at the beginning of the new line in order
# to avoid breaking multiple line values allowed on repo files.
value = filerepos[stanza][line]
if isinstance(value, str) and "\n" in value:
value = "\n ".join(value.split("\n"))
content += "\n{}={}".format(line, value)
content += "\n{}\n".format(comments)
with salt.utils.files.fopen(repofile, "w") as fileout:
fileout.write(salt.utils.stringutils.to_str(content))
return "Repo {} has been removed from {}".format(repo, repofile)
def mod_repo(repo, basedir=None, **kwargs):
"""
Modify one or more values for a repo. If the repo does not exist, it will
be created, so long as the following values are specified:
repo
name by which the yum refers to the repo
name
a human-readable name for the repo
baseurl
the URL for yum to reference
mirrorlist
the URL for yum to reference
Key/Value pairs may also be removed from a repo's configuration by setting
a key to a blank value. Bear in mind that a name cannot be deleted, and a
baseurl can only be deleted if a mirrorlist is specified (or vice versa).
CLI Examples:
.. code-block:: bash
salt '*' pkg.mod_repo reponame enabled=1 gpgcheck=1
salt '*' pkg.mod_repo reponame basedir=/path/to/dir enabled=1
salt '*' pkg.mod_repo reponame baseurl= mirrorlist=http://host.com/
"""
# Filter out '__pub' arguments, as well as saltenv
repo_opts = {
x: kwargs[x] for x in kwargs if not x.startswith("__") and x not in ("saltenv",)
}
if all(x in repo_opts for x in ("mirrorlist", "baseurl")):
raise SaltInvocationError(
"Only one of 'mirrorlist' and 'baseurl' can be specified"
)
use_copr = False
if repo.startswith("copr:"):
copr_name = repo.split(":", 1)[1]
repo = _get_copr_repo(repo)
use_copr = True
# Build a list of keys to be deleted
todelete = []
# list() of keys because the dict could be shrinking in the for loop.
for key in list(repo_opts):
if repo_opts[key] != 0 and not repo_opts[key]:
del repo_opts[key]
todelete.append(key)
# Add baseurl or mirrorlist to the 'todelete' list if the other was
# specified in the repo_opts
if "mirrorlist" in repo_opts:
todelete.append("baseurl")
elif "baseurl" in repo_opts:
todelete.append("mirrorlist")
# Fail if the user tried to delete the name
if "name" in todelete:
raise SaltInvocationError("The repo name cannot be deleted")
# Give the user the ability to change the basedir
repos = {}
basedirs = _normalize_basedir(basedir)
repos = list_repos(basedirs)
repofile = ""
header = ""
filerepos = {}
if repo not in repos:
# If the repo doesn't exist, create it in a new file in the first
# repo directory that exists
newdir = None
for d in basedirs:
if os.path.exists(d):
newdir = d
break
if not newdir:
raise SaltInvocationError(
"The repo does not exist and needs to be created, but none "
"of the following basedir directories exist: {}".format(basedirs)
)
repofile = "{}/{}.repo".format(newdir, repo)
if use_copr:
# Is copr plugin installed?
copr_plugin_name = ""
if _yum() == "dnf":
copr_plugin_name = "dnf-plugins-core"
else:
copr_plugin_name = "yum-plugin-copr"
if not __salt__["pkg_resource.version"](copr_plugin_name):
raise SaltInvocationError(
"{} must be installed to use COPR".format(copr_plugin_name)
)
# Enable COPR
out = _call_yum(["copr", "enable", copr_name, "-y"])
if out["retcode"]:
raise CommandExecutionError(
"Unable to add COPR '{}'. '{}' exited with "
"status {!s}: '{}' ".format(
copr_name, _yum(), out["retcode"], out["stderr"]
)
)
# Repo has been added, update repos list
repos = list_repos(basedirs)
repofile = repos[repo]["file"]
header, filerepos = _parse_repo_file(repofile)
else:
repofile = "{}/{}.repo".format(newdir, repo)
if "name" not in repo_opts:
raise SaltInvocationError(
"The repo does not exist and needs to be created, but a name "
"was not given"
)
if "baseurl" not in repo_opts and "mirrorlist" not in repo_opts:
raise SaltInvocationError(
"The repo does not exist and needs to be created, but either "
"a baseurl or a mirrorlist needs to be given"
)
filerepos[repo] = {}
else:
# The repo does exist, open its file
repofile = repos[repo]["file"]
header, filerepos = _parse_repo_file(repofile)
# Error out if they tried to delete baseurl or mirrorlist improperly
if "baseurl" in todelete:
if "mirrorlist" not in repo_opts and "mirrorlist" not in filerepos[repo]:
raise SaltInvocationError(
"Cannot delete baseurl without specifying mirrorlist"
)
if "mirrorlist" in todelete:
if "baseurl" not in repo_opts and "baseurl" not in filerepos[repo]:
raise SaltInvocationError(
"Cannot delete mirrorlist without specifying baseurl"
)
# Delete anything in the todelete list
for key in todelete:
if key in filerepos[repo].copy().keys():
del filerepos[repo][key]
_bool_to_str = lambda x: "1" if x else "0"
# Old file or new, write out the repos(s)
filerepos[repo].update(repo_opts)
content = header
for stanza in filerepos.keys():
comments = salt.utils.pkg.rpm.combine_comments(
filerepos[stanza].pop("comments", [])
)
content += "[{}]\n".format(stanza)
for line in filerepos[stanza].keys():
# A whitespace is needed at the beginning of the new line in order
# to avoid breaking multiple line values allowed on repo files.
value = filerepos[stanza][line]
if isinstance(value, str) and "\n" in value:
value = "\n ".join(value.split("\n"))
content += "{}={}\n".format(
line, value if not isinstance(value, bool) else _bool_to_str(value)
)
content += comments + "\n"
with salt.utils.files.fopen(repofile, "w") as fileout:
fileout.write(salt.utils.stringutils.to_str(content))
return {repofile: filerepos}
def _parse_repo_file(filename):
"""
Turn a single repo file into a dict
"""
parsed = configparser.ConfigParser()
config = {}
try:
parsed.read(filename)
except configparser.MissingSectionHeaderError as err:
log.error("Failed to parse file %s, error: %s", filename, err.message)
return ("", {})
for section in parsed._sections:
section_dict = dict(parsed._sections[section])
section_dict.pop("__name__", None)
config[section] = section_dict
# Try to extract header comments, as well as comments for each repo. Read
# from the beginning of the file and assume any leading comments are
# header comments. Continue to read each section header and then find the
# comments for each repo.
headers = ""
section = None
with salt.utils.files.fopen(filename, "r") as repofile:
for line in repofile:
line = salt.utils.stringutils.to_unicode(line)
line = line.strip()
if line.startswith("#"):
if section is None:
headers += line + "\n"
else:
try:
comments = config[section].setdefault("comments", [])
comments.append(line[1:].lstrip())
except KeyError:
log.debug(
"Found comment in %s which does not appear to "
"belong to any repo section: %s",
filename,
line,
)
elif line.startswith("[") and line.endswith("]"):
section = line[1:-1]
return (headers, salt.utils.data.decode(config))
def file_list(*packages, **kwargs):
"""
.. versionadded:: 2014.1.0
List the files that belong to a package. Not specifying any packages will
return a list of *every* file on the system's rpm database (not generally
recommended).
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
"""
return __salt__["lowpkg.file_list"](*packages)
def file_dict(*packages, **kwargs):
"""
.. versionadded:: 2014.1.0
List the files that belong to a package, grouped by package. Not
specifying any packages will return a list of *every* file on the system's
rpm database (not generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
"""
return __salt__["lowpkg.file_dict"](*packages)
def owner(*paths, **kwargs):
"""
.. versionadded:: 2014.7.0
Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version <salt.modules.yumpkg.version>`, if a
single path is passed, a string will be returned, and if multiple paths are
passed, a dictionary of file/package name pairs will be returned.
If the file is not owned by a package, or is not present on the minion,
then an empty string will be returned for that path.
CLI Examples:
.. code-block:: bash
salt '*' pkg.owner /usr/bin/apachectl
salt '*' pkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf
"""
if not paths:
return ""
ret = {}
cmd_prefix = ["rpm", "-qf", "--queryformat", "%{name}"]
for path in paths:
ret[path] = __salt__["cmd.run_stdout"](
cmd_prefix + [path], output_loglevel="trace", python_shell=False
)
if "not owned" in ret[path].lower():
ret[path] = ""
if len(ret) == 1:
return next(iter(ret.values()))
return ret
def modified(*packages, **flags):
"""
List the modified files that belong to a package. Not specifying any packages
will return a list of _all_ modified files on the system's RPM database.
.. versionadded:: 2015.5.0
Filtering by flags (True or False):
size
Include only files where size changed.
mode
Include only files which file's mode has been changed.
checksum
Include only files which MD5 checksum has been changed.
device
Include only files which major and minor numbers has been changed.
symlink
Include only files which are symbolic link contents.
owner
Include only files where owner has been changed.
group
Include only files where group has been changed.
time
Include only files where modification time of the file has been
changed.
capabilities
Include only files where capabilities differ or not. Note: supported
only on newer RPM versions.
CLI Examples:
.. code-block:: bash
salt '*' pkg.modified
salt '*' pkg.modified httpd
salt '*' pkg.modified httpd postfix
salt '*' pkg.modified httpd owner=True group=False
"""
return __salt__["lowpkg.modified"](*packages, **flags)
@salt.utils.decorators.path.which("yumdownloader")
def download(*packages, **kwargs):
"""
.. versionadded:: 2015.5.0
Download packages to the local disk. Requires ``yumdownloader`` from
``yum-utils`` package.
.. note::
``yum-utils`` will already be installed on the minion if the package
was installed from the Fedora / EPEL repositories.
CLI Example:
.. code-block:: bash
salt '*' pkg.download httpd
salt '*' pkg.download httpd postfix
"""
if not packages:
raise SaltInvocationError("No packages were specified")
CACHE_DIR = "/var/cache/yum/packages"
if not os.path.exists(CACHE_DIR):
os.makedirs(CACHE_DIR)
cached_pkgs = os.listdir(CACHE_DIR)
to_purge = []
for pkg in packages:
to_purge.extend(
[
os.path.join(CACHE_DIR, x)
for x in cached_pkgs
if x.startswith("{}-".format(pkg))
]
)
for purge_target in set(to_purge):
log.debug("Removing cached package %s", purge_target)
try:
os.unlink(purge_target)
except OSError as exc:
log.error("Unable to remove %s: %s", purge_target, exc)
cmd = ["yumdownloader", "-q", "--destdir={}".format(CACHE_DIR)]
cmd.extend(packages)
__salt__["cmd.run"](cmd, output_loglevel="trace", python_shell=False)
ret = {}
for dld_result in os.listdir(CACHE_DIR):
if not dld_result.endswith(".rpm"):
continue
pkg_name = None
pkg_file = None
for query_pkg in packages:
if dld_result.startswith("{}-".format(query_pkg)):
pkg_name = query_pkg
pkg_file = dld_result
break
if pkg_file is not None:
ret[pkg_name] = os.path.join(CACHE_DIR, pkg_file)
if not ret:
raise CommandExecutionError(
"Unable to download any of the following packages: {}".format(
", ".join(packages)
)
)
failed = [x for x in packages if x not in ret]
if failed:
ret["_error"] = "The following package(s) failed to download: {}".format(
", ".join(failed)
)
return ret
def diff(*paths, **kwargs):
"""
Return a formatted diff between current files and original in a package.
NOTE: this function includes all files (configuration and not), but does
not work on binary content.
:param path: Full path to the installed file
:return: Difference string or raises and exception if examined file is binary.
CLI Example:
.. code-block:: bash
salt '*' pkg.diff /etc/apache2/httpd.conf /etc/sudoers
"""
ret = {}
pkg_to_paths = {}
for pth in paths:
pth_pkg = __salt__["lowpkg.owner"](pth)
if not pth_pkg:
ret[pth] = os.path.exists(pth) and "Not managed" or "N/A"
else:
if pkg_to_paths.get(pth_pkg) is None:
pkg_to_paths[pth_pkg] = []
pkg_to_paths[pth_pkg].append(pth)
if pkg_to_paths:
local_pkgs = __salt__["pkg.download"](*pkg_to_paths.keys())
for pkg, files in pkg_to_paths.items():
for path in files:
ret[path] = (
__salt__["lowpkg.diff"](local_pkgs[pkg]["path"], path)
or "Unchanged"
)
return ret
def _get_patches(installed_only=False):
"""
List all known patches in repos.
"""
patches = {}
cmd = [_yum(), "--quiet", "updateinfo", "list", "all"]
ret = __salt__["cmd.run_stdout"](cmd, python_shell=False)
for line in salt.utils.itertools.split(ret, os.linesep):
inst, advisory_id, sev, pkg = re.match(
r"([i|\s]) ([^\s]+) +([^\s]+) +([^\s]+)", line
).groups()
if advisory_id not in patches:
patches[advisory_id] = {
"installed": True if inst == "i" else False,
"summary": [pkg],
}
else:
patches[advisory_id]["summary"].append(pkg)
if inst != "i":
patches[advisory_id]["installed"] = False
if installed_only:
patches = {k: v for k, v in patches.items() if v["installed"]}
return patches
def list_patches(refresh=False, **kwargs):
"""
.. versionadded:: 2017.7.0
List all known advisory patches from available repos.
refresh
force a refresh if set to True.
If set to False (default) it depends on yum if a refresh is
executed.
CLI Examples:
.. code-block:: bash
salt '*' pkg.list_patches
"""
if refresh:
refresh_db()
return _get_patches()
def list_installed_patches(**kwargs):
"""
.. versionadded:: 2017.7.0
List installed advisory patches on the system.
CLI Examples:
.. code-block:: bash
salt '*' pkg.list_installed_patches
"""
return _get_patches(installed_only=True)
| 33.492883 | 97 | 0.577246 |
21148a008c45016485efc8d5814dc743e78223c1 | 1,064 | py | Python | setup.py | dabarrell/flask-microservice-chassis | 435879b0f57a89d7d24ada7755fc9166381f93b9 | [
"MIT"
] | 2 | 2019-04-26T06:30:24.000Z | 2020-04-22T02:31:39.000Z | setup.py | dabarrell/flask-microservice-chassis | 435879b0f57a89d7d24ada7755fc9166381f93b9 | [
"MIT"
] | null | null | null | setup.py | dabarrell/flask-microservice-chassis | 435879b0f57a89d7d24ada7755fc9166381f93b9 | [
"MIT"
] | null | null | null | """
Flask-Chassis
-------------
Microservice chassis
"""
import io
import re
from setuptools import setup
with io.open('flask_chassis/__init__.py', encoding='utf-8') as f:
version = re.search(r"__version__ = '(.+)'", f.read()).group(1)
setup(
name='Flask-Chassis',
version=version,
url='https://github.com/dabarrell/flask-microservice-chassis',
license='MIT',
author='David Barrell',
author_email='david@barrell.me',
description='A basic microservice chassis for flask',
long_description=__doc__,
packages=['flask_chassis'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
) | 28 | 70 | 0.637218 |
30efe6fac3d3fd75326662d5deff29774ffed6b9 | 10,003 | py | Python | model/backbone_model/tsm/ops/dataset.py | AssistiveRoboticsUNH/temporal_feature_lfd | dfcbccad8ddcd26861e09f6e6bfa3c85446f9e64 | [
"MIT"
] | null | null | null | model/backbone_model/tsm/ops/dataset.py | AssistiveRoboticsUNH/temporal_feature_lfd | dfcbccad8ddcd26861e09f6e6bfa3c85446f9e64 | [
"MIT"
] | null | null | null | model/backbone_model/tsm/ops/dataset.py | AssistiveRoboticsUNH/temporal_feature_lfd | dfcbccad8ddcd26861e09f6e6bfa3c85446f9e64 | [
"MIT"
] | null | null | null | # Code for "TSM: Temporal Shift Module for Efficient Video Understanding"
# arXiv:1811.08383
# Ji Lin*, Chuang Gan, Song Han
# {jilin, songhan}@mit.edu, ganchuang@csail.mit.edu
import torch.utils.data as data
from PIL import Image
import os
import numpy as np
from numpy.random import randint
class VideoRecord(object):
def __init__(self, row):
self._data = row
@property
def path(self):
return self._data[0]
@property
def num_frames(self):
return int(self._data[1])
@property
def label(self):
return int(self._data[2])
class TSNDataSet(data.Dataset):
def __init__(self, root_path, list_file,
num_segments=3, new_length=1, modality='RGB',
image_tmpl='img_{:05d}.jpg', transform=None,
random_shift=True, test_mode=False,
remove_missing=False, dense_sample=False, twice_sample=False):
self.root_path = root_path
self.list_file = list_file
self.num_segments = num_segments
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self.remove_missing = remove_missing
self.dense_sample = dense_sample # using dense sample as I3D
self.twice_sample = twice_sample # twice sample for more validation
if self.dense_sample:
print('=> Using dense sample for the dataset...')
if self.twice_sample:
print('=> Using twice sample for the dataset...')
if self.modality == 'RGBDiff':
self.new_length += 1 # Diff needs one more image to calculate diff
self._parse_list()
def _load_image(self, directory, idx):
if self.modality == 'RGB' or self.modality == 'RGBDiff':
try:
return [Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(idx))).convert('RGB')]
except Exception:
print('error loading image:', os.path.join(self.root_path, directory, self.image_tmpl.format(idx)))
return [Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(1))).convert('RGB')]
elif self.modality == 'Flow':
if self.image_tmpl == 'flow_{}_{:05d}.jpg': # ucf
x_img = Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format('x', idx))).convert(
'L')
y_img = Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format('y', idx))).convert(
'L')
elif self.image_tmpl == '{:06d}-{}_{:05d}.jpg': # something v1 flow
x_img = Image.open(os.path.join(self.root_path, '{:06d}'.format(int(directory)), self.image_tmpl.
format(int(directory), 'x', idx))).convert('L')
y_img = Image.open(os.path.join(self.root_path, '{:06d}'.format(int(directory)), self.image_tmpl.
format(int(directory), 'y', idx))).convert('L')
else:
try:
# idx_skip = 1 + (idx-1)*5
flow = Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(idx))).convert(
'RGB')
except Exception:
print('error loading flow file:',
os.path.join(self.root_path, directory, self.image_tmpl.format(idx)))
flow = Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(1))).convert('RGB')
# the input flow file is RGB image with (flow_x, flow_y, blank) for each channel
flow_x, flow_y, _ = flow.split()
x_img = flow_x.convert('L')
y_img = flow_y.convert('L')
return [x_img, y_img]
def _parse_list(self):
# check the frame number is large >3:
tmp = [x.strip().split(' ') for x in open(self.list_file)]
if not self.test_mode or self.remove_missing:
tmp = [item for item in tmp if int(item[1]) >= 3]
self.video_list = [VideoRecord(item) for item in tmp]
if self.image_tmpl == '{:06d}-{}_{:05d}.jpg':
for v in self.video_list:
v._data[1] = int(v._data[1]) / 2
print('video number:%d' % (len(self.video_list)))
def _sample_indices(self, record):
"""
:param record: VideoRecord
:return: list
"""
if self.dense_sample: # i3d dense sample
sample_pos = max(1, 1 + record.num_frames - 64)
t_stride = 64 // self.num_segments
start_idx = 0 if sample_pos == 1 else np.random.randint(0, sample_pos - 1)
offsets = [(idx * t_stride + start_idx) % record.num_frames for idx in range(self.num_segments)]
return np.array(offsets) + 1
else: # normal sample
average_duration = (record.num_frames - self.new_length + 1) // self.num_segments
if average_duration > 0:
offsets = np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration,
size=self.num_segments)
elif record.num_frames > self.num_segments:
offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_val_indices(self, record):
if self.dense_sample: # i3d dense sample
sample_pos = max(1, 1 + record.num_frames - 64)
t_stride = 64 // self.num_segments
start_idx = 0 if sample_pos == 1 else np.random.randint(0, sample_pos - 1)
offsets = [(idx * t_stride + start_idx) % record.num_frames for idx in range(self.num_segments)]
return np.array(offsets) + 1
else:
if record.num_frames > self.num_segments + self.new_length - 1:
tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)])
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_test_indices(self, record):
if self.dense_sample:
sample_pos = max(1, 1 + record.num_frames - 64)
t_stride = 64 // self.num_segments
start_list = np.linspace(0, sample_pos - 1, num=10, dtype=int)
offsets = []
for start_idx in start_list.tolist():
offsets += [(idx * t_stride + start_idx) % record.num_frames for idx in range(self.num_segments)]
return np.array(offsets) + 1
elif self.twice_sample:
tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)] +
[int(tick * x) for x in range(self.num_segments)])
return offsets + 1
else:
tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)])
return offsets + 1
def __getitem__(self, index):
record = self.video_list[index]
print(record.path)
print(self.image_tmpl)
print(self.test_mode)
print('-----------')
# check this is a legit video folder
if self.image_tmpl == 'flow_{}_{:05d}.jpg':
file_name = self.image_tmpl.format('x', 1)
full_path = os.path.join(self.root_path, record.path, file_name)
elif self.image_tmpl == '{:06d}-{}_{:05d}.jpg':
file_name = self.image_tmpl.format(int(record.path), 'x', 1)
full_path = os.path.join(self.root_path, '{:06d}'.format(int(record.path)), file_name)
else:
file_name = self.image_tmpl.format(1)
full_path = os.path.join(self.root_path, record.path, file_name)
while not os.path.exists(full_path):
print('################## Not Found:', os.path.join(self.root_path, record.path, file_name))
index = np.random.randint(len(self.video_list))
record = self.video_list[index]
if self.image_tmpl == 'flow_{}_{:05d}.jpg':
file_name = self.image_tmpl.format('x', 1)
full_path = os.path.join(self.root_path, record.path, file_name)
elif self.image_tmpl == '{:06d}-{}_{:05d}.jpg':
file_name = self.image_tmpl.format(int(record.path), 'x', 1)
full_path = os.path.join(self.root_path, '{:06d}'.format(int(record.path)), file_name)
else:
file_name = self.image_tmpl.format(1)
full_path = os.path.join(self.root_path, record.path, file_name)
if not self.test_mode:
segment_indices = self._sample_indices(record) if self.random_shift else self._get_val_indices(record)
else:
segment_indices = self._get_test_indices(record)
return self.get(record, segment_indices)
def get(self, record, indices):
images = list()
print(indices)
for seg_ind in indices:
p = int(seg_ind)
for i in range(self.new_length):
seg_imgs = self._load_image(record.path, p)
images.extend(seg_imgs)
if p < record.num_frames:
p += 1
print(images)
process_data = self.transform(images)
return process_data, record.label
def __len__(self):
return len(self.video_list)
| 45.262443 | 121 | 0.574628 |
72d02c5451c32c1b04afd2ea4c9a9b1fa372e46f | 3,793 | py | Python | metadata_gather/tests/metadata_extractor/test_json_extractor.py | d-montenegro/MetadataGatherer | c7388ce026c4b66b164eaf933cc65f109f83f368 | [
"MIT"
] | null | null | null | metadata_gather/tests/metadata_extractor/test_json_extractor.py | d-montenegro/MetadataGatherer | c7388ce026c4b66b164eaf933cc65f109f83f368 | [
"MIT"
] | null | null | null | metadata_gather/tests/metadata_extractor/test_json_extractor.py | d-montenegro/MetadataGatherer | c7388ce026c4b66b164eaf933cc65f109f83f368 | [
"MIT"
] | null | null | null | import pytest
from common import MetadataRecord
from metadata_extractor.json_extractor import extract_data_from_json, ExtractionError
from tests.utils import write_json
def test_json_with_single_int_field(temp_json_file):
write_json(temp_json_file, [{'field': 50}])
records = list(extract_data_from_json(temp_json_file.name))
assert records == [MetadataRecord('field', 50)]
def test_json_with_several_int_fields(temp_json_file):
write_json(temp_json_file, [{'field': idx} for idx in range(100)])
records = list(extract_data_from_json(temp_json_file.name))
assert sorted(records) == sorted([
MetadataRecord('field', idx) for idx in range(100)])
def test_json_with_single_str_field(temp_json_file):
write_json(temp_json_file, [{'field': 'string_value'}])
records = list(extract_data_from_json(temp_json_file.name))
assert records == [MetadataRecord('field', 'string_value')]
def test_json_with_several_str_field(temp_json_file):
write_json(temp_json_file, [{'field': f'{idx}'} for idx in range(100)])
records = list(extract_data_from_json(temp_json_file.name))
assert sorted(records) == sorted([
MetadataRecord('field', f'{idx}') for idx in range(100)])
def test_json_with_single_null_field(temp_json_file):
write_json(temp_json_file, [{'field': None}])
records = list(extract_data_from_json(temp_json_file.name))
assert records == [MetadataRecord('field', None)]
def test_json_with_several_nulls_field(temp_json_file):
write_json(temp_json_file, [{'field': None} for _ in range(100)])
records = list(extract_data_from_json(temp_json_file.name))
assert records == [MetadataRecord('field', None)] * 100
def test_json_with_multiple_fields(temp_json_file):
write_json(temp_json_file, [{'field_one': 'abc', 'field_two': 50, 'field_three': None}])
records = list(extract_data_from_json(temp_json_file.name))
assert sorted(records) == sorted([
MetadataRecord('field_one', 'abc'),
MetadataRecord('field_two', 50),
MetadataRecord('field_three', None),
])
def test_json_with_multiple_fields_multiple_rows(temp_json_file):
write_json(temp_json_file, [{'field_one': 'abc', 'field_two': 50} for _ in range(1000)])
records = list(extract_data_from_json(temp_json_file.name))
assert sorted(records) == sorted([MetadataRecord('field_one', 'abc')] * 1000 +
[MetadataRecord('field_two', 50)] * 1000,
)
def test_json_unicode_column_name(temp_json_file):
write_json(temp_json_file, [{'短消息': 50}])
records = list(extract_data_from_json(temp_json_file.name))
assert records == [MetadataRecord('短消息', 50)]
def test_json_unicode_value(temp_json_file):
write_json(temp_json_file, [{'field': '短消息'}])
records = list(extract_data_from_json(temp_json_file.name))
assert records == [MetadataRecord('field', '短消息')]
def test_unexpected_json_structure(temp_json_file):
write_json(temp_json_file, {'field': 'value'})
with pytest.raises(ExtractionError) as exc:
list(extract_data_from_json(temp_json_file.name))
info = exc.value
assert info.args[0] == 'Invalid JSON structure. It must contain a list of objects'
def test_non_json_file(temp_json_file):
temp_json_file.write("This is a not JSON content")
with pytest.raises(ExtractionError) as exc:
list(extract_data_from_json(temp_json_file.name))
info = exc.value
assert info.args[0] == f"The file '{temp_json_file.name}' is not a valid JSON file"
def test_missing_file():
with pytest.raises(ExtractionError) as exc:
list(extract_data_from_json("missing.json"))
info = exc.value
assert info.args[0] == "Could not open file 'missing.json'"
| 37.186275 | 92 | 0.719483 |
86644527f0e2261a79b74c607155d79f0108a597 | 84 | py | Python | suji/__init__.py | jikyo/suji4p | 00b7f6f0ac088810cbb379f6a1687db7d329a996 | [
"Apache-2.0"
] | 3 | 2021-02-16T10:53:33.000Z | 2021-09-21T07:55:09.000Z | suji/__init__.py | jikyo/suji4p | 00b7f6f0ac088810cbb379f6a1687db7d329a996 | [
"Apache-2.0"
] | 1 | 2021-02-15T11:19:43.000Z | 2021-02-24T07:29:03.000Z | suji/__init__.py | jikyo/suji4p | 00b7f6f0ac088810cbb379f6a1687db7d329a996 | [
"Apache-2.0"
] | null | null | null | from suji.converter import values, value
from suji.kansuji import kansujis, kansuji
| 28 | 42 | 0.833333 |
036e214c8cdfc6066e147dfc0dfc26c081ea9dad | 1,004 | py | Python | modules/entities.py | pilgun/app-run-and-log | 8dcef79676ba4f0ede237fdd624981121b98b6d7 | [
"Apache-2.0"
] | 1 | 2019-09-18T22:08:54.000Z | 2019-09-18T22:08:54.000Z | modules/entities.py | pilgun/app-run-and-log | 8dcef79676ba4f0ede237fdd624981121b98b6d7 | [
"Apache-2.0"
] | null | null | null | modules/entities.py | pilgun/app-run-and-log | 8dcef79676ba4f0ede237fdd624981121b98b6d7 | [
"Apache-2.0"
] | 3 | 2018-01-16T17:40:23.000Z | 2020-12-14T07:50:06.000Z | import csv
import glob
import os
import re
import subprocess
from pyaxmlparser import APK
class Csv(object):
def __init__(self, csv_path):
self.csvfile = open(csv_path, 'a+', newline='')
self.writer = csv.writer(self.csvfile)
self.writer.writerow(['Package', 'Status'])
def write_row(self, app, status):
self.writer.writerow([app, status])
self.csvfile.flush()
def close(self):
self.csvfile.close()
def get_lines(self):
self.csvfile.seek(0)
return len(self.csvfile.readlines())
def get_crashes(self):
self.csvfile.seek(0)
crash_count = self.csvfile.read().count(',c')
return crash_count
class Apk:
def __init__(self, path):
self.path = path
self.apk = APK(self.path)
# temporarty hack for ella os.path.basename(path) => path[51:-21]
self.name = path[51:-21]
self.package = self.apk.package
self.activity = self.apk.get_main_activity()
| 25.1 | 73 | 0.623506 |
081774e1699d7f0afc518e37b834be38c0f0d175 | 1,505 | py | Python | solver/sampler.py | microsoft/DualOctreeGNN | 29eed84653d4f0c1681c8227714cf84e76c31abe | [
"MIT"
] | 2 | 2022-03-29T09:08:15.000Z | 2022-03-29T18:45:45.000Z | solver/sampler.py | microsoft/DualOctreeGNN | 29eed84653d4f0c1681c8227714cf84e76c31abe | [
"MIT"
] | null | null | null | solver/sampler.py | microsoft/DualOctreeGNN | 29eed84653d4f0c1681c8227714cf84e76c31abe | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Dual Octree Graph Networks
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Peng-Shuai Wang
# --------------------------------------------------------
import torch
from torch.utils.data import Sampler, DistributedSampler, Dataset
class InfSampler(Sampler):
def __init__(self, dataset: Dataset, shuffle: bool = True) -> None:
self.dataset = dataset
self.shuffle = shuffle
self.reset_sampler()
def reset_sampler(self):
num = len(self.dataset)
indices = torch.randperm(num) if self.shuffle else torch.arange(num)
self.indices = indices.tolist()
self.iter_num = 0
def __iter__(self):
return self
def __next__(self):
value = self.indices[self.iter_num]
self.iter_num = self.iter_num + 1
if self.iter_num >= len(self.indices):
self.reset_sampler()
return value
def __len__(self):
return len(self.dataset)
class DistributedInfSampler(DistributedSampler):
def __init__(self, dataset: Dataset, shuffle: bool = True) -> None:
super().__init__(dataset, shuffle=shuffle)
self.reset_sampler()
def reset_sampler(self):
self.indices = list(super().__iter__())
self.iter_num = 0
def __iter__(self):
return self
def __next__(self):
value = self.indices[self.iter_num]
self.iter_num = self.iter_num + 1
if self.iter_num >= len(self.indices):
self.reset_sampler()
return value
| 25.948276 | 72 | 0.643189 |
91dd1c0e6149d3d60b57690dee1cfe15098f87ab | 2,026 | py | Python | part7/app/frameworks_and_drivers/db/postgres.py | y-tomimoto/CleanArchitecture | dd7cc8966c29ad24bf39a8b0672eae6341004037 | [
"MIT"
] | 4 | 2021-09-03T04:03:38.000Z | 2021-12-26T14:59:50.000Z | part7/app/frameworks_and_drivers/db/postgres.py | y-tomimoto/CleanArchitecture | dd7cc8966c29ad24bf39a8b0672eae6341004037 | [
"MIT"
] | null | null | null | part7/app/frameworks_and_drivers/db/postgres.py | y-tomimoto/CleanArchitecture | dd7cc8966c29ad24bf39a8b0672eae6341004037 | [
"MIT"
] | null | null | null | # sqlクライアント用のconfig
from enterprise_business_rules.memo_data import MemoData
from interface_adapters.gataways.memo_repository_gateway import MemoRepositoryGateway
from werkzeug.exceptions import Conflict, NotFound
class PostgreSQL(MemoRepositoryGateway):
def __init__(self, connection):
self.conn = connection
self.conn.autocommit = True
def exist(self, memo_id) -> bool:
cursor = self.conn.cursor()
# memo_idがあるかどうか確認する
query = "SELECT EXISTS(SELECT * FROM test_table WHERE memo_id = %s)"
cursor.execute(query, [memo_id])
result: tuple = cursor.fetchone()
# DBクライアントをcloseする
cursor.close()
# 検索結果が1件あるかどうかで存在を確認する
if result[0] == 1:
return True
else:
return False
def get(self, memo_id: int) -> MemoData:
# 指定されたidがあるかどうか確認する
is_exist: bool = self.exist(memo_id)
if not is_exist:
raise NotFound(f'memo_id [{memo_id}] is not registered yet.')
# DBクライアントを作成する
cursor = self.conn.cursor()
# memo_idで検索を実行する
query = "SELECT * FROM test_table WHERE memo_id = %s"
cursor.execute(query, [memo_id])
result: tuple = cursor.fetchone()
# DBクライアントをcloseする
cursor.close()
return MemoData(memo_id=memo_id, memo=result[1], memo_author=result[2])
def save(self, memo_data: MemoData) -> bool:
memo_id: int = memo_data.memo_id
memo: str = memo_data.memo
memo_author = memo_data.memo_author
# 指定されたidがあるかどうか確認する
is_exist = self.exist(memo_id)
if is_exist:
raise Conflict(f'memo_id [{memo_id}] is already registered.')
# DBクライアントを作成する
cursor = self.conn.cursor()
# memoを保存する
query = "INSERT INTO test_table (memo_id, memo, memo_author) VALUES (%s, %s, %s)"
cursor.execute(query, (memo_id, memo, memo_author))
# DBクライアントをcloseする
cursor.close()
return True
| 27.378378 | 89 | 0.628825 |
d1074d6c346cc83133d0c48e872c59418ff43b9c | 3,633 | py | Python | python/GafferImageUI/DisplayTransformUI.py | pier-robot/gaffer | 9267f2ba3822b14430d8a283c745261110b0f570 | [
"BSD-3-Clause"
] | null | null | null | python/GafferImageUI/DisplayTransformUI.py | pier-robot/gaffer | 9267f2ba3822b14430d8a283c745261110b0f570 | [
"BSD-3-Clause"
] | null | null | null | python/GafferImageUI/DisplayTransformUI.py | pier-robot/gaffer | 9267f2ba3822b14430d8a283c745261110b0f570 | [
"BSD-3-Clause"
] | null | null | null | ##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import PyOpenColorIO
import IECore
import Gaffer
import GafferUI
import GafferImage
from . import OpenColorIOTransformUI
def __displayPresetNames( plug ) :
config = PyOpenColorIO.GetCurrentConfig()
return IECore.StringVectorData( [ "None" ] + list( config.getDisplays() ) )
def __displayPresetValues( plug ) :
config = PyOpenColorIO.GetCurrentConfig()
return IECore.StringVectorData( [ "" ] + list( config.getDisplays() ) )
def __viewPresetNames( plug ) :
config = PyOpenColorIO.GetCurrentConfig()
display = plug.parent()["display"].getValue()
return IECore.StringVectorData( [ "None" ] + list( config.getViews( display ) ) )
def __viewPresetValues( plug ) :
config = PyOpenColorIO.GetCurrentConfig()
display = plug.parent()["display"].getValue()
return IECore.StringVectorData( [ "" ] + list( config.getViews( display ) ) )
Gaffer.Metadata.registerNode(
GafferImage.DisplayTransform,
"description",
"""
Applies color transformations provided by
OpenColorIO via a DisplayTransform file and OCIO FileTransform.
""",
plugs = {
"inputColorSpace" : [
"description",
"""
The colour space of the input image.
""",
"presetNames", OpenColorIOTransformUI.colorSpacePresetNames,
"presetValues", OpenColorIOTransformUI.colorSpacePresetValues,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"display" : [
"description",
"""
The name of the display to use.
""",
"presetNames", __displayPresetNames,
"presetValues", __displayPresetValues,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"view" : [
"description",
"""
The name of the view to use.
""",
"presetNames", __viewPresetNames,
"presetValues", __viewPresetValues,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
}
)
| 28.162791 | 82 | 0.695568 |
f2541362ebdf20a9e13bd5da95c94232d5b08a33 | 5,014 | py | Python | server/function/modules/dense_motion.py | wisehackermonkey/deepfakes-openfaas-function | f5190f120ed3ed0208e4cd902e27f4c3f6b14174 | [
"MIT"
] | 1 | 2022-01-21T19:17:37.000Z | 2022-01-21T19:17:37.000Z | server/function/modules/dense_motion.py | wisehackermonkey/deepfakes-openfaas-function | f5190f120ed3ed0208e4cd902e27f4c3f6b14174 | [
"MIT"
] | null | null | null | server/function/modules/dense_motion.py | wisehackermonkey/deepfakes-openfaas-function | f5190f120ed3ed0208e4cd902e27f4c3f6b14174 | [
"MIT"
] | 1 | 2021-05-29T04:09:03.000Z | 2021-05-29T04:09:03.000Z | from torch import nn
import torch.nn.functional as F
import torch
from function.modules.util import Hourglass, AntiAliasInterpolation2d, make_coordinate_grid, kp2gaussian
class DenseMotionNetwork(nn.Module):
"""
Module that predicting a dense motion from sparse motion representation given by kp_source and kp_driving
"""
def __init__(self, block_expansion, num_blocks, max_features, num_kp, num_channels, estimate_occlusion_map=False,
scale_factor=1, kp_variance=0.01):
super(DenseMotionNetwork, self).__init__()
self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp + 1) * (num_channels + 1),
max_features=max_features, num_blocks=num_blocks)
self.mask = nn.Conv2d(self.hourglass.out_filters, num_kp + 1, kernel_size=(7, 7), padding=(3, 3))
if estimate_occlusion_map:
self.occlusion = nn.Conv2d(self.hourglass.out_filters, 1, kernel_size=(7, 7), padding=(3, 3))
else:
self.occlusion = None
self.num_kp = num_kp
self.scale_factor = scale_factor
self.kp_variance = kp_variance
if self.scale_factor != 1:
self.down = AntiAliasInterpolation2d(num_channels, self.scale_factor)
def create_heatmap_representations(self, source_image, kp_driving, kp_source):
"""
Eq 6. in the paper H_k(z)
"""
spatial_size = source_image.shape[2:]
gaussian_driving = kp2gaussian(kp_driving, spatial_size=spatial_size, kp_variance=self.kp_variance)
gaussian_source = kp2gaussian(kp_source, spatial_size=spatial_size, kp_variance=self.kp_variance)
heatmap = gaussian_driving - gaussian_source
#adding background feature
zeros = torch.zeros(heatmap.shape[0], 1, spatial_size[0], spatial_size[1]).type(heatmap.type())
heatmap = torch.cat([zeros, heatmap], dim=1)
heatmap = heatmap.unsqueeze(2)
return heatmap
def create_sparse_motions(self, source_image, kp_driving, kp_source):
"""
Eq 4. in the paper T_{s<-d}(z)
"""
bs, _, h, w = source_image.shape
identity_grid = make_coordinate_grid((h, w), type=kp_source['value'].type())
identity_grid = identity_grid.view(1, 1, h, w, 2)
coordinate_grid = identity_grid - kp_driving['value'].view(bs, self.num_kp, 1, 1, 2)
if 'jacobian' in kp_driving:
jacobian = torch.matmul(kp_source['jacobian'], torch.inverse(kp_driving['jacobian']))
jacobian = jacobian.unsqueeze(-3).unsqueeze(-3)
jacobian = jacobian.repeat(1, 1, h, w, 1, 1)
coordinate_grid = torch.matmul(jacobian, coordinate_grid.unsqueeze(-1))
coordinate_grid = coordinate_grid.squeeze(-1)
driving_to_source = coordinate_grid + kp_source['value'].view(bs, self.num_kp, 1, 1, 2)
#adding background feature
identity_grid = identity_grid.repeat(bs, 1, 1, 1, 1)
sparse_motions = torch.cat([identity_grid, driving_to_source], dim=1)
return sparse_motions
def create_deformed_source_image(self, source_image, sparse_motions):
"""
Eq 7. in the paper \hat{T}_{s<-d}(z)
"""
bs, _, h, w = source_image.shape
source_repeat = source_image.unsqueeze(1).unsqueeze(1).repeat(1, self.num_kp + 1, 1, 1, 1, 1)
source_repeat = source_repeat.view(bs * (self.num_kp + 1), -1, h, w)
sparse_motions = sparse_motions.view((bs * (self.num_kp + 1), h, w, -1))
sparse_deformed = F.grid_sample(source_repeat, sparse_motions)
sparse_deformed = sparse_deformed.view((bs, self.num_kp + 1, -1, h, w))
return sparse_deformed
def forward(self, source_image, kp_driving, kp_source):
if self.scale_factor != 1:
source_image = self.down(source_image)
bs, _, h, w = source_image.shape
out_dict = dict()
heatmap_representation = self.create_heatmap_representations(source_image, kp_driving, kp_source)
sparse_motion = self.create_sparse_motions(source_image, kp_driving, kp_source)
deformed_source = self.create_deformed_source_image(source_image, sparse_motion)
out_dict['sparse_deformed'] = deformed_source
input = torch.cat([heatmap_representation, deformed_source], dim=2)
input = input.view(bs, -1, h, w)
prediction = self.hourglass(input)
mask = self.mask(prediction)
mask = F.softmax(mask, dim=1)
out_dict['mask'] = mask
mask = mask.unsqueeze(2)
sparse_motion = sparse_motion.permute(0, 1, 4, 2, 3)
deformation = (sparse_motion * mask).sum(dim=1)
deformation = deformation.permute(0, 2, 3, 1)
out_dict['deformation'] = deformation
# Sec. 3.2 in the paper
if self.occlusion:
occlusion_map = torch.sigmoid(self.occlusion(prediction))
out_dict['occlusion_map'] = occlusion_map
return out_dict
| 43.982456 | 117 | 0.658357 |
02c3310f2073aec37fc9a29d9339af95d7b54a39 | 758 | py | Python | two_sum.py | jaebradley/leetcode.py | 64634cc7d0e975ddd163f35acb18cc92960b8eb5 | [
"MIT"
] | null | null | null | two_sum.py | jaebradley/leetcode.py | 64634cc7d0e975ddd163f35acb18cc92960b8eb5 | [
"MIT"
] | 2 | 2019-11-13T19:55:49.000Z | 2019-11-13T19:55:57.000Z | two_sum.py | jaebradley/leetcode.py | 64634cc7d0e975ddd163f35acb18cc92960b8eb5 | [
"MIT"
] | null | null | null | """
https://leetcode.com/problems/two-sum/
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
Example:
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
"""
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
differences = {}
for index, num in enumerate(nums):
if num in differences:
return [differences[num], index]
else:
differences[target - num] = index
| 25.266667 | 107 | 0.591029 |
61f2d3d1c24fc21497207b21c5916f8e879a2302 | 13,669 | py | Python | microdot/microdot.py | jonathanfoster/microdot | b810346aa48784ad00d70f160f2542222895f30a | [
"MIT"
] | null | null | null | microdot/microdot.py | jonathanfoster/microdot | b810346aa48784ad00d70f160f2542222895f30a | [
"MIT"
] | null | null | null | microdot/microdot.py | jonathanfoster/microdot | b810346aa48784ad00d70f160f2542222895f30a | [
"MIT"
] | null | null | null | try:
from sys import print_exception
except ImportError: # pragma: no cover
import traceback
def print_exception(exc):
traceback.print_exc()
concurrency_mode = 'threaded'
try: # pragma: no cover
import threading
def create_thread(f, *args, **kwargs):
"""Use the threading module."""
threading.Thread(target=f, args=args, kwargs=kwargs).start()
except ImportError: # pragma: no cover
try:
import _thread
def create_thread(f, *args, **kwargs):
"""Use MicroPython's _thread module."""
def run():
f(*args, **kwargs)
_thread.start_new_thread(run, ())
except ImportError:
def create_thread(f, *args, **kwargs):
"""No threads available, call function synchronously."""
f(*args, **kwargs)
concurrency_mode = 'sync'
try:
import ujson as json
except ImportError:
import json
try:
import ure as re
except ImportError:
import re
try:
import usocket as socket
except ImportError:
try:
import socket
except ImportError: # pragma: no cover
socket = None
def urldecode(string):
string = string.replace('+', ' ')
parts = string.split('%')
if len(parts) == 1:
return string
result = [parts[0]]
for item in parts[1:]:
if item == '':
result.append('%')
else:
code = item[:2]
result.append(chr(int(code, 16)))
result.append(item[2:])
return ''.join(result)
class Request():
class G:
pass
def __init__(self, client_addr, method, url, http_version, headers, body):
self.client_addr = client_addr
self.method = method
self.path = url
self.http_version = http_version
if '?' in self.path:
self.path, self.query_string = self.path.split('?', 1)
self.args = self._parse_urlencoded(self.query_string)
else:
self.query_string = None
self.args = {}
self.headers = headers
self.cookies = {}
self.content_length = 0
self.content_type = None
for header, value in self.headers.items():
if header == 'Content-Length':
self.content_length = int(value)
elif header == 'Content-Type':
self.content_type = value
elif header == 'Cookie':
for cookie in value.split(';'):
name, value = cookie.strip().split('=', 1)
self.cookies[name] = value
self.body = body
self._json = None
self._form = None
self.g = Request.G()
@staticmethod
def create(client_stream, client_addr):
# request line
line = client_stream.readline().strip().decode()
if not line: # pragma: no cover
return None
method, url, http_version = line.split()
http_version = http_version.split('/', 1)[1]
# headers
headers = {}
content_length = 0
while True:
line = client_stream.readline().strip().decode()
if line == '':
break
header, value = line.split(':', 1)
value = value.strip()
headers[header] = value
if header == 'Content-Length':
content_length = int(value)
# body
body = client_stream.read(content_length) if content_length else b''
return Request(client_addr, method, url, http_version, headers, body)
def _parse_urlencoded(self, urlencoded):
return {
urldecode(key): urldecode(value) for key, value in [
pair.split('=', 1) for pair in
urlencoded.split('&')]}
@property
def json(self):
if self.content_type != 'application/json':
return None
if self._json is None:
self._json = json.loads(self.body.decode())
return self._json
@property
def form(self):
if self.content_type != 'application/x-www-form-urlencoded':
return None
if self._form is None:
self._form = self._parse_urlencoded(self.body.decode())
return self._form
class Response():
types_map = {
'css': 'text/css',
'gif': 'image/gif',
'html': 'text/html',
'jpg': 'image/jpeg',
'js': 'application/javascript',
'json': 'application/json',
'png': 'image/png',
'txt': 'text/plain',
}
send_file_buffer_size = 1024
def __init__(self, body='', status_code=200, headers=None):
self.status_code = status_code
self.headers = headers or {}
if isinstance(body, (dict, list)):
self.body = json.dumps(body).encode()
self.headers['Content-Type'] = 'application/json'
elif isinstance(body, str):
self.body = body.encode()
else:
# this applies to bytes or file-like objects
self.body = body
def set_cookie(self, cookie, value, path=None, domain=None, expires=None,
max_age=None, secure=False, http_only=False):
http_cookie = '{cookie}={value}'.format(cookie=cookie, value=value)
if path:
http_cookie += '; Path=' + path
if domain:
http_cookie += '; Domain=' + domain
if expires:
http_cookie += '; Expires=' + expires.strftime(
"%a, %d %b %Y %H:%M:%S GMT")
if max_age:
http_cookie += '; Max-Age=' + str(max_age)
if secure:
http_cookie += '; Secure'
if http_only:
http_cookie += '; HttpOnly'
if 'Set-Cookie' in self.headers:
self.headers['Set-Cookie'].append(http_cookie)
else:
self.headers['Set-Cookie'] = [http_cookie]
def complete(self):
if isinstance(self.body, bytes) and \
'Content-Length' not in self.headers:
self.headers['Content-Length'] = str(len(self.body))
if 'Content-Type' not in self.headers:
self.headers['Content-Type'] = 'text/plain'
def write(self, stream):
self.complete()
# status code
stream.write('HTTP/1.0 {status_code} {reason}\r\n'.format(
status_code=self.status_code,
reason='OK' if self.status_code == 200 else 'N/A').encode())
# headers
for header, value in self.headers.items():
values = value if isinstance(value, list) else [value]
for value in values:
stream.write('{header}: {value}\r\n'.format(
header=header, value=value).encode())
stream.write(b'\r\n')
# body
if self.body:
if hasattr(self.body, 'read'):
while True:
buf = self.body.read(self.send_file_buffer_size)
if len(buf):
stream.write(buf)
if len(buf) < self.send_file_buffer_size:
break
if hasattr(self.body, 'close'):
self.body.close()
else:
stream.write(self.body)
@classmethod
def redirect(cls, location, status_code=302):
return cls(status_code=status_code, headers={'Location': location})
@classmethod
def send_file(cls, filename, status_code=200, content_type=None):
if content_type is None:
ext = filename.split('.')[-1]
if ext in Response.types_map:
content_type = Response.types_map[ext]
else:
content_type = 'application/octet-stream'
f = open(filename, 'rb')
return cls(body=f, status_code=status_code,
headers={'Content-Type': content_type})
class URLPattern():
def __init__(self, url_pattern):
self.pattern = ''
self.args = []
use_regex = False
for segment in url_pattern.lstrip('/').split('/'):
if segment and segment[0] == '<':
if segment[-1] != '>':
raise ValueError('invalid URL pattern')
segment = segment[1:-1]
if ':' in segment:
type_, name = segment.rsplit(':', 1)
else:
type_ = 'string'
name = segment
if type_ == 'string':
pattern = '[^/]+'
elif type_ == 'int':
pattern = '\\d+'
elif type_ == 'path':
pattern = '.+'
elif type_.startswith('re:'):
pattern = type_[3:]
else:
raise ValueError('invalid URL segment type')
use_regex = True
self.pattern += '/({pattern})'.format(pattern=pattern)
self.args.append({'type': type_, 'name': name})
else:
self.pattern += '/{segment}'.format(segment=segment)
if use_regex:
self.pattern = re.compile('^' + self.pattern + '$')
def match(self, path):
if isinstance(self.pattern, str):
if path != self.pattern:
return
return {}
g = self.pattern.match(path)
if not g:
return
args = {}
i = 1
for arg in self.args:
value = g.group(i)
if arg['type'] == 'int':
value = int(value)
args[arg['name']] = value
i += 1
return args
class Microdot():
def __init__(self):
self.url_map = []
self.before_request_handlers = []
self.after_request_handlers = []
self.error_handlers = {}
self.debug = False
def route(self, url_pattern, methods=None):
def decorated(f):
self.url_map.append(
(methods or ['GET'], URLPattern(url_pattern), f))
return f
return decorated
def before_request(self, f):
self.before_request_handlers.append(f)
return f
def after_request(self, f):
self.after_request_handlers.append(f)
return f
def errorhandler(self, status_code_or_exception_class):
def decorated(f):
self.error_handlers[status_code_or_exception_class] = f
return f
return decorated
def run(self, host='0.0.0.0', port=5000, debug=False):
self.debug = debug
s = socket.socket()
ai = socket.getaddrinfo(host, port)
addr = ai[0][-1]
if self.debug: # pragma: no cover
print('Starting {mode} server on {host}:{port}...'.format(
mode=concurrency_mode, host=host, port=port))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(addr)
s.listen(5)
while True:
sock, addr = s.accept()
create_thread(self.dispatch_request, sock, addr)
def find_route(self, req):
f = None
for route_methods, route_pattern, route_handler in self.url_map:
if req.method in route_methods:
req.url_args = route_pattern.match(req.path)
if req.url_args is not None:
f = route_handler
break
return f
def dispatch_request(self, sock, addr):
if not hasattr(sock, 'readline'): # pragma: no cover
stream = sock.makefile("rwb")
else:
stream = sock
req = Request.create(stream, addr)
if req:
f = self.find_route(req)
try:
res = None
if f:
for handler in self.before_request_handlers:
res = handler(req)
if res:
break
if res is None:
res = f(req, **req.url_args)
if isinstance(res, tuple):
res = Response(*res)
elif not isinstance(res, Response):
res = Response(res)
for handler in self.after_request_handlers:
res = handler(req, res) or res
elif 404 in self.error_handlers:
res = self.error_handlers[404](req)
else:
res = 'Not found', 404
except Exception as exc:
print_exception(exc)
res = None
if exc.__class__ in self.error_handlers:
try:
res = self.error_handlers[exc.__class__](req, exc)
except Exception as exc2: # pragma: no cover
print_exception(exc2)
if res is None:
if 500 in self.error_handlers:
res = self.error_handlers[500](req)
else:
res = 'Internal server error', 500
if isinstance(res, tuple):
res = Response(*res)
elif not isinstance(res, Response):
res = Response(res)
res.write(stream)
stream.close()
if stream != sock: # pragma: no cover
sock.close()
if self.debug and req: # pragma: no cover
print('{method} {path} {status_code}'.format(
method=req.method, path=req.path,
status_code=res.status_code))
redirect = Response.redirect
send_file = Response.send_file
| 32.779376 | 78 | 0.519058 |
9bb70456b756eeba58d148e223915d4d5e9a92b7 | 3,564 | py | Python | scripts/performance/perf_load/perf_gen_req_parser.py | rantwijk/indy-node | 3cb77dab5482c8b721535020fec41506de819d2e | [
"Apache-2.0"
] | 1 | 2019-03-19T23:44:54.000Z | 2019-03-19T23:44:54.000Z | scripts/performance/perf_load/perf_gen_req_parser.py | rantwijk/indy-node | 3cb77dab5482c8b721535020fec41506de819d2e | [
"Apache-2.0"
] | null | null | null | scripts/performance/perf_load/perf_gen_req_parser.py | rantwijk/indy-node | 3cb77dab5482c8b721535020fec41506de819d2e | [
"Apache-2.0"
] | null | null | null | import json
from perf_load.perf_req_gen_seq import RGSeqReqs
from perf_load.perf_req_gen_nym import RGNym, RGGetNym
from perf_load.perf_req_gen_schema import RGSchema, RGGetSchema
from perf_load.perf_req_gen_attrib import RGAttrib, RGGetAttrib
from perf_load.perf_req_gen_definition import RGGetDefinition, RGDefinition
from perf_load.perf_req_gen_revoc import RGDefRevoc, RGGetDefRevoc, RGEntryRevoc, RGGetEntryRevoc, RGGetRevocRegDelta
from perf_load.perf_req_gen_payment import RGGetPaymentSources, RGPayment, RGVerifyPayment
from perf_load.perf_req_gen_cfg_writes import RGConfigChangeState
from perf_load.perf_req_gen_demoted_node import RGPoolNewDemotedNode
from perf_load.perf_req_gen_get_txn import RGGetTxn
class ReqTypeParser:
_supported_requests =\
{"nym": RGNym, "schema": RGSchema, "attrib": RGAttrib, "cred_def": RGDefinition, "revoc_reg_def": RGDefRevoc,
"revoc_reg_entry": RGEntryRevoc, "get_nym": RGGetNym, "get_attrib": RGGetAttrib, "get_schema": RGGetSchema,
"get_cred_def": RGGetDefinition, "get_revoc_reg_def": RGGetDefRevoc, "get_revoc_reg": RGGetEntryRevoc,
"get_revoc_reg_delta": RGGetRevocRegDelta, "get_payment_sources": RGGetPaymentSources, "payment": RGPayment,
"verify_payment": RGVerifyPayment, "cfg_writes": RGConfigChangeState, "demoted_node": RGPoolNewDemotedNode,
"get_txn": RGGetTxn}
@classmethod
def supported_requests(cls):
return list(cls._supported_requests.keys())
@classmethod
def __add_label(cls, cls_name, param):
ret_dict = param if param is not None else {}
if isinstance(param, int):
ret_dict = {}
ret_dict["count"] = param
lbl = [k for k, v in cls._supported_requests.items() if v == cls_name]
if "label" not in ret_dict:
ret_dict["label"] = lbl[0]
return cls_name, ret_dict
@classmethod
def __parse_single(cls, req_kind, prms):
if req_kind is None:
return cls.__parse_single(prms, None)
if isinstance(req_kind, str) and req_kind in cls._supported_requests:
return cls.__add_label(cls._supported_requests[req_kind], prms)
if isinstance(req_kind, str) and req_kind not in cls._supported_requests:
ret_cls, ret_par = cls.__parse_single(None, prms)
ret_par.update({"label": req_kind})
return ret_cls, ret_par
if isinstance(req_kind, dict) and len(req_kind.keys()) == 1:
k = list(req_kind)[0]
v = req_kind[k]
return cls.__parse_single(k, v)
raise RuntimeError("Invalid parameter format")
@classmethod
def create_req_generator(cls, req_kind_arg):
if req_kind_arg in cls._supported_requests:
return cls._supported_requests[req_kind_arg], {"label": req_kind_arg}
try:
reqs = json.loads(req_kind_arg)
except Exception as e:
raise RuntimeError("Invalid parameter format")
ret_reqs = []
randomizing = False
if isinstance(reqs, dict):
randomizing = True
for k, v in reqs.items():
ret_reqs.append(cls.__parse_single(k, v))
elif isinstance(reqs, list):
randomizing = False
for r in reqs:
ret_reqs.append(cls.__parse_single(r, {}))
if len(ret_reqs) == 1:
req = ret_reqs[0][0]
return cls.__add_label(req, ret_reqs[0][1])
else:
return RGSeqReqs, {'next_random': randomizing, 'reqs': ret_reqs}
| 45.692308 | 117 | 0.68266 |
da42251acdc75f61b20c74dadd97db7a8fd325b0 | 3,284 | py | Python | scripts/BI/pyro_model/testing/ExplanationEvaluation/models/model_selector.py | shalinkpatel/GCN_Integration | 253fa4321606acf0ee0a98667bf6e5eb8ec96cf1 | [
"MIT"
] | null | null | null | scripts/BI/pyro_model/testing/ExplanationEvaluation/models/model_selector.py | shalinkpatel/GCN_Integration | 253fa4321606acf0ee0a98667bf6e5eb8ec96cf1 | [
"MIT"
] | 1 | 2022-02-10T06:32:42.000Z | 2022-02-10T06:32:42.000Z | scripts/BI/pyro_model/testing/ExplanationEvaluation/models/model_selector.py | shalinkpatel/GCN_Integration | 253fa4321606acf0ee0a98667bf6e5eb8ec96cf1 | [
"MIT"
] | null | null | null | import torch
import os
from ExplanationEvaluation.models.GNN_paper import NodeGCN as GNN_NodeGCN
from ExplanationEvaluation.models.GNN_paper import GraphGCN as GNN_GraphGCN
from ExplanationEvaluation.models.PG_paper import NodeGCN as PG_NodeGCN
from ExplanationEvaluation.models.PG_paper import GraphGCN as PG_GraphGCN
def string_to_model(paper, dataset):
"""
Given a paper and a dataset return the cooresponding neural model needed for training.
:param paper: the paper who's classification model we want to use.
:param dataset: the dataset on which we wish to train. This ensures that the model in- and output are correct.
:returns: torch.nn.module models
"""
if paper == "GNN":
if dataset in ['syn1']:
return GNN_NodeGCN(10, 4)
elif dataset in ['syn2']:
return GNN_NodeGCN(10, 8)
elif dataset in ['syn3']:
return GNN_NodeGCN(10, 2)
elif dataset in ['syn4']:
return GNN_NodeGCN(10, 2)
elif dataset == "ba2":
return GNN_GraphGCN(10, 2)
elif dataset == "mutag":
return GNN_GraphGCN(14, 2)
else:
raise NotImplementedError
elif paper == "PG":
if dataset in ['syn1']:
return PG_NodeGCN(10, 4)
elif dataset in ['syn2']:
return PG_NodeGCN(10, 8)
elif dataset in ['syn3']:
return PG_NodeGCN(10, 2)
elif dataset in ['syn4']:
return PG_NodeGCN(10, 2)
elif dataset == "ba2":
return PG_GraphGCN(10, 2)
elif dataset == "mutag":
return PG_GraphGCN(14, 2)
else:
raise NotImplementedError
else:
raise NotImplementedError
def get_pretrained_path(paper, dataset):
"""
Given a paper and dataset loads the pre-trained model.
:param paper: the paper who's classification model we want to use.
:param dataset: the dataset on which we wish to train. This ensures that the model in- and output are correct.
:returns: str; the path to the pre-trined model parameters.
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
path = f"{dir_path}/pretrained/{paper}/{dataset}/best_model"
return path
def model_selector(paper, dataset, pretrained=True, return_checkpoint=False):
"""
Given a paper and dataset loads accociated model.
:param paper: the paper who's classification model we want to use.
:param dataset: the dataset on which we wish to train. This ensures that the model in- and output are correct.
:param pretrained: whter to return a pre-trained model or not.
:param return_checkpoint: wheter to return the dict contining the models parameters or not.
:returns: torch.nn.module models and optionallly a dict containing it's parameters.
"""
model = string_to_model(paper, dataset)
if pretrained:
path = get_pretrained_path(paper, dataset)
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['model_state_dict'])
print(f"This model obtained: Train Acc: {checkpoint['train_acc']:.4f}, Val Acc: {checkpoint['val_acc']:.4f}, Test Acc: {checkpoint['test_acc']:.4f}.")
if return_checkpoint:
return model, checkpoint
return model | 41.56962 | 158 | 0.665043 |
afcc47a014b32316965f03986fbd6589c5ff7866 | 202 | py | Python | orijang/urls.py | naxvinci/Font_Finder | b4fec9265edf125440f57baf141377684be1b0d3 | [
"MIT"
] | 1 | 2020-08-25T06:28:14.000Z | 2020-08-25T06:28:14.000Z | orijang/urls.py | naxvinci/Font_Finder | b4fec9265edf125440f57baf141377684be1b0d3 | [
"MIT"
] | 13 | 2020-09-25T22:26:51.000Z | 2022-02-10T02:05:39.000Z | orijang/urls.py | naxvinci/Font_Finder | b4fec9265edf125440f57baf141377684be1b0d3 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path
from django.conf.urls import url,include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('font_detect.urls')),
]
| 22.444444 | 42 | 0.717822 |
392ff311a288b223d7bea77455ce2e62739bd940 | 2,023 | py | Python | populate_rango.py | apenchev/tangowithdjango | 7678137224df6948b0333841eeb2c54dc5668437 | [
"MIT"
] | null | null | null | populate_rango.py | apenchev/tangowithdjango | 7678137224df6948b0333841eeb2c54dc5668437 | [
"MIT"
] | null | null | null | populate_rango.py | apenchev/tangowithdjango | 7678137224df6948b0333841eeb2c54dc5668437 | [
"MIT"
] | null | null | null | import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tango_with_django_project.settings')
import django
django.setup()
from rango.models import Category, Page
def populate():
python_cat = add_cat('Python', views=128, likes=64)
add_page(cat=python_cat,
title="Official Python Tutorial",
url="http://docs.python.org/2/tutorial/")
add_page(cat=python_cat,
title="How to Think like a Computer Scientist",
url="http://www.greenteapress.com/thinkpython/")
add_page(cat=python_cat,
title="Learn Python in 10 Minutes",
url="http://www.korokithakis.net/tutorials/python/")
django_cat = add_cat("Django", views=64, likes=32)
add_page(cat=django_cat,
title="Official Django Tutorial",
url="https://docs.djangoproject.com/en/1.5/intro/tutorial01/")
add_page(cat=django_cat,
title="Django Rocks",
url="http://www.djangorocks.com/")
add_page(cat=django_cat,
title="How to Tango with Django",
url="http://www.tangowithdjango.com/")
frame_cat = add_cat("Other Frameworks", views=32, likes=16)
add_page(cat=frame_cat,
title="Bottle",
url="http://bottlepy.org/docs/dev/")
add_page(cat=frame_cat,
title="Flask",
url="http://flask.pocoo.org")
name_cat = add_cat("Atanas Penchev", views=4, likes=3)
add_page(cat=name_cat,
title="Github",
url="https://github.com/apenchev")
add_page(cat=name_cat,
title="Python Everywhere",
url="http://apenchev.pythonanywhere.com/")
# Print out what we have added to the user.
for c in Category.objects.all():
for p in Page.objects.filter(category=c):
print "- {0} - {1}".format(str(c), str(p))
def add_page(cat, title, url, views=0):
p = Page.objects.get_or_create(category=cat, title=title, url=url, views=views)[0]
return p
def add_cat(name, views, likes):
c = Category.objects.get_or_create(name=name, views=views, likes=likes)[0]
return c
# Start execution here!
if __name__ == '__main__':
print "Starting Rango population script..."
populate()
| 26.618421 | 85 | 0.695007 |
b1b0c26ef5acabcf56413ee1f047413962bd0a18 | 6,876 | py | Python | kubernetes/client/models/v1_config_map_list.py | lp67/python | 33c5ea9835356410ce4a9fa54a02c6a2a22143c6 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_config_map_list.py | lp67/python | 33c5ea9835356410ce4a9fa54a02c6a2a22143c6 | [
"Apache-2.0"
] | 4 | 2019-11-19T10:33:47.000Z | 2022-03-01T03:33:52.000Z | kubernetes/client/models/v1_config_map_list.py | mohramadan911/PythonClientAPI | 5d111812c81b7a573ac8661d1aec60bb97072412 | [
"Apache-2.0"
] | 2 | 2021-08-10T16:35:31.000Z | 2021-09-14T04:53:06.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.20
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1ConfigMapList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1ConfigMap]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1ConfigMapList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1ConfigMapList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ConfigMapList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ConfigMapList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ConfigMapList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1ConfigMapList. # noqa: E501
Items is the list of ConfigMaps. # noqa: E501
:return: The items of this V1ConfigMapList. # noqa: E501
:rtype: list[V1ConfigMap]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1ConfigMapList.
Items is the list of ConfigMaps. # noqa: E501
:param items: The items of this V1ConfigMapList. # noqa: E501
:type: list[V1ConfigMap]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1ConfigMapList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ConfigMapList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ConfigMapList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ConfigMapList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ConfigMapList. # noqa: E501
:return: The metadata of this V1ConfigMapList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ConfigMapList.
:param metadata: The metadata of this V1ConfigMapList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ConfigMapList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ConfigMapList):
return True
return self.to_dict() != other.to_dict()
| 33.378641 | 312 | 0.622746 |
2e0fe8e6cce095b6da77fae7ce266525bd335697 | 665 | py | Python | sims/s304/plot-total-energy.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | 1 | 2019-12-19T16:21:13.000Z | 2019-12-19T16:21:13.000Z | sims/s304/plot-total-energy.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | null | null | null | sims/s304/plot-total-energy.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | 2 | 2020-01-08T06:23:33.000Z | 2020-01-08T07:06:50.000Z | from pylab import *
import numpy
import tables
Lx = 8*pi
Ly = 4*pi
nFrame = 40
elcEnergy = numpy.zeros((nFrame+1,), numpy.float)
ionEnergy = numpy.zeros((nFrame+1,), numpy.float)
emEnergy = numpy.zeros((nFrame+1,), numpy.float)
Tm = linspace(0, nFrame, nFrame+1)
for i in range(0,nFrame+1):
print "Working on %d .." % i
fh = tables.openFile("s304-5m-gem_q_%d.h5" % i)
q = fh.root.StructGridField
dx = Lx/q.shape[0]
dy = Ly/q.shape[1]
emEnergy[i] = 0.5*dx*dy*sum(q[:,:,13]**2 + q[:,:,14]**2 + q[:,:,15]**2)
figure(1)
plot(Tm, emEnergy, '-b', label='EM')
xlabel('Time')
ylabel('Magnetic energy')
savefig('s304-mag-energy.png')
show()
| 20.151515 | 75 | 0.622556 |
f2a32c3ab20b24a0969d757779736867daf0f493 | 1,139 | py | Python | python/lbann/modules/graph/utils.py | LLNL/LBANN | 8bcc5d461e52de70e329d73081ca7eee3e5c580a | [
"Apache-2.0"
] | null | null | null | python/lbann/modules/graph/utils.py | LLNL/LBANN | 8bcc5d461e52de70e329d73081ca7eee3e5c580a | [
"Apache-2.0"
] | null | null | null | python/lbann/modules/graph/utils.py | LLNL/LBANN | 8bcc5d461e52de70e329d73081ca7eee3e5c580a | [
"Apache-2.0"
] | null | null | null | import lbann
def GraphExpand(features, indices, name=None):
"""Places the features according the indices to an expanded matrix
output[i] = features[indices[i]]
Args:
features (Layer) : 2D matrix with shape (N, F)
indices (Layer): 1D matrix with shape (E)
returnL (Layer) of shape (E,F)
"""
GraphExpand.count += 1
if (name is None):
name = f"graph_expand_{GraphExpand.count}"
return lbann.Gather(features, indices, axis=0, name=name)
def GraphReduce(features, indices, dims, name=None):
"""Performs a sum-reduction of the features according the indices.
output[indices[i]] += features[i]
Args:
features (layer) : 2D matrix with shape (E, F)
indices (layer): 1D matrix with shape (E)
dims (list of int): tuple of ints with the values (N, F)
returns: (layer) of shape (N, F)
"""
GraphReduce.count += 1
if (name is None):
name = f"graph_reduce_{GraphReduce.count}"
return lbann.Scatter(features, indices, dims=dims, axis=0, name=name)
GraphReduce.count = 0
GraphExpand.count = 0
| 32.542857 | 73 | 0.626866 |
a282309a8df5e9a49af8c9769389d2665c1f8313 | 538 | py | Python | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/damp-brook-31516 | 0c2cb3efc3699a1a7e9a162895b80a265b241d3a | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/damp-brook-31516 | 0c2cb3efc3699a1a7e9a162895b80a265b241d3a | [
"FTL",
"AML",
"RSA-MD"
] | 1 | 2021-10-18T16:33:23.000Z | 2021-10-18T16:33:23.000Z | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/damp-brook-31516 | 0c2cb3efc3699a1a7e9a162895b80a265b241d3a | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "damp-brook-31516.botics.co"
site_params = {
"name": "Damp Brook",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 20.692308 | 61 | 0.654275 |
de51a9c64a47f794736746a4bd9e127289a9fcea | 210 | py | Python | pycalc/model.py | lbreede/pyqt-cheatsheet | ca094108ed60f2f0c3ae3ba5c7b4b5e7a5c73861 | [
"MIT"
] | null | null | null | pycalc/model.py | lbreede/pyqt-cheatsheet | ca094108ed60f2f0c3ae3ba5c7b4b5e7a5c73861 | [
"MIT"
] | null | null | null | pycalc/model.py | lbreede/pyqt-cheatsheet | ca094108ed60f2f0c3ae3ba5c7b4b5e7a5c73861 | [
"MIT"
] | null | null | null | __version__ = "0.1"
__author__ = "Lennart Breede"
ERROR_MSG = "ERROR"
def evaluateExpression(expression):
try:
result = str(eval(expression, {}, {}))
except Exception:
result = ERROR_MSG
return result | 17.5 | 40 | 0.709524 |
47912551f5238c109333dc28d7b7d5adcde6e231 | 2,240 | py | Python | tests/unit/raptiformica/cli/test_parse_join_arguments.py | vdloo/raptiformica | e2807e5e913312034161efcbd74525a4b15b37e7 | [
"MIT"
] | 21 | 2016-09-04T11:27:31.000Z | 2019-10-30T08:23:14.000Z | tests/unit/raptiformica/cli/test_parse_join_arguments.py | vdloo/raptiformica | e2807e5e913312034161efcbd74525a4b15b37e7 | [
"MIT"
] | 5 | 2017-09-17T15:59:37.000Z | 2018-02-03T14:53:32.000Z | tests/unit/raptiformica/cli/test_parse_join_arguments.py | vdloo/raptiformica | e2807e5e913312034161efcbd74525a4b15b37e7 | [
"MIT"
] | 2 | 2017-11-21T18:14:51.000Z | 2017-11-22T01:20:45.000Z | from mock import call
from raptiformica.cli import parse_join_arguments
from tests.testcase import TestCase
class TestParseJoinArguments(TestCase):
def setUp(self):
self.argument_parser = self.set_up_patch('raptiformica.cli.ArgumentParser')
self.parse_arguments = self.set_up_patch('raptiformica.cli.parse_arguments')
self.get_first_server_type = self.set_up_patch(
'raptiformica.cli.get_first_server_type'
)
self.get_first_server_type.return_value = 'headless'
self.get_server_types = self.set_up_patch(
'raptiformica.cli.get_server_types'
)
self.get_first_server_type.return_value = [
self.get_first_server_type.return_value
]
def test_parse_join_arguments_instantiates_argparser(self):
parse_join_arguments()
self.argument_parser.assert_called_once_with(
prog='raptiformica join',
description='Provision and join this machine into an existing network'
)
def test_parse_join_arguments_adds_arguments(self):
parse_join_arguments()
expected_calls = [
call(
'host',
type=str,
help='Hostname or ip of the remote machine '
'to use to slave this machine to'
),
call('--port', '-p', type=int, default=22,
help='Port to use to connect to the remote machine with over SSH'),
call('--server-type', type=str, default=self.get_first_server_type.return_value,
choices=self.get_server_types.return_value,
help='Specify a server type. Default is {}'.format(self.get_first_server_type.return_value)),
]
self.assertEqual(
self.argument_parser.return_value.add_argument.mock_calls,
expected_calls
)
def test_parse_join_arguments_parses_arguments(self):
parse_join_arguments()
self.parse_arguments.assert_called_once_with(self.argument_parser.return_value)
def test_parse_join_arguments_returns_parsed_arguments(self):
ret = parse_join_arguments()
self.assertEqual(ret, self.parse_arguments.return_value)
| 36.721311 | 110 | 0.665625 |
91e52e4567e1b173da6d0900479f8fd7fae2e498 | 529 | py | Python | learnmodels/migrations/0001_initial.py | fanandactuaility/mysite | fe8c19b2eb9f91f8a86752b232eaf32e418f67eb | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | learnmodels/migrations/0001_initial.py | fanandactuaility/mysite | fe8c19b2eb9f91f8a86752b232eaf32e418f67eb | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | learnmodels/migrations/0001_initial.py | fanandactuaility/mysite | fe8c19b2eb9f91f8a86752b232eaf32e418f67eb | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('tagline', models.TextField()),
],
),
]
| 24.045455 | 114 | 0.567108 |
8a7be636a469ec5d8e4589595dd8e18a59ab10dc | 34,290 | py | Python | app/main/views.py | Cpapa97/pipecontrol | 8178db7c5e48d607b0a63df45671928df75e6a42 | [
"MIT"
] | null | null | null | app/main/views.py | Cpapa97/pipecontrol | 8178db7c5e48d607b0a63df45671928df75e6a42 | [
"MIT"
] | 3 | 2019-04-13T07:47:21.000Z | 2019-09-09T19:12:00.000Z | app/main/views.py | Cpapa97/pipecontrol | 8178db7c5e48d607b0a63df45671928df75e6a42 | [
"MIT"
] | 6 | 2017-06-28T22:26:47.000Z | 2018-05-26T15:51:52.000Z | from collections import OrderedDict
from inspect import isclass
from slacker import Slacker
from datetime import datetime, timedelta
import pandas as pd
import datajoint as dj
import uuid
import numpy as np
import matplotlib.pyplot as plt
import mpld3
import graphviz
import json
import http
from flask import render_template, redirect, url_for, flash, request, session, send_from_directory
from flask_weasyprint import render_pdf, HTML, CSS
from pymysql.err import IntegrityError
from . import main, forms, tables
from .. import schemata
from ..schemata import experiment, shared, reso, meso, stack, pupil, treadmill, tune, xcorr, mice, stimulus
def escape_json(json_string):
""" Clean JSON strings so they can be used as html attributes."""
return json_string.replace('"', '"')
@main.route('/')
def index():
if not 'user' in session:
return redirect(url_for('main.user'))
return render_template('index.html')
@main.route('/user', methods=['GET', 'POST'])
def user():
form = forms.UserForm(request.form)
if request.method == 'POST' and form.validate():
flash('User switched to {}'.format(form.user.data))
session['user'] = form.user.data
elif 'user' in session:
form.user.data = session['user']
return render_template('user.html', form=form)
@main.route('/autoprocessing', methods=['GET', 'POST'])
def autoprocessing():
form = forms.AutoProcessing(request.form)
if request.method == 'POST' and form.validate():
tuple_ = {'animal_id': form['animal_id'].data, 'session': form['session'].data,
'scan_idx': form['scan_idx'].data, 'priority': form['priority'].data,
'autosegment': form['autosegment'].data}
if not experiment.AutoProcessing().proj() & tuple_:
experiment.AutoProcessing().insert1(tuple_)
flash('{} inserted in AutoProcessing'.format(tuple_))
return render_template('autoprocessing.html', form=form)
@main.route('/correction', methods=['GET', 'POST'])
def correction():
modules = OrderedDict([('reso', reso), ('meso', meso), ('stack', stack)])
if request.method == 'POST':
keys = [json.loads(k) for k in request.form.getlist('channel') if k]
module = modules[request.form['module_name']]
module.CorrectionChannel().insert(keys, ignore_extra_fields=True)
flash('{} key(s) inserted in CorrectionChannel'.format(len(keys)))
all_tables = []
user_sessions = experiment.Session() & {'username': session.get('user', 'unknown')}
for module_name, module in modules.items():
if module_name in ['reso', 'meso']:
keys_rel = ((module.ScanInfo() * module.ScanInfo.Field().proj()
& user_sessions) - module.CorrectionChannel())
correction_table = tables.CorrectionTable
else: # stack
keys_rel = (module.StackInfo() & user_sessions) - module.CorrectionChannel()
correction_table = tables.StackCorrectionTable
items = keys_rel.proj('nchannels').fetch(as_dict=True)
for item in items:
channels = list(range(1, item['nchannels'] + 1))
values = [escape_json(json.dumps({**item, 'channel': c})) for c in channels]
item['channel'] = {'name': 'channel', 'options': channels, 'values': values}
all_tables.append((module_name, correction_table(items)))
return render_template('correction.html', correction_tables=all_tables)
@main.route('/segmentation', methods=['GET', 'POST'])
def segmentation():
modules = OrderedDict([('reso', reso), ('meso', meso)])
if request.method == 'POST':
module = modules[request.form['module_name']]
keys = [json.loads(k) for k in request.form.getlist('compartment') if k]
keys = [{**key, 'segmentation_method': 6} for key in keys]
module.SegmentationTask().insert(keys, ignore_extra_fields=True)
flash('{} key(s) inserted in SegmentationTask'.format(len(keys)))
keys = [json.loads(k) for k in request.form.getlist('ignore_item')]
module.DoNotSegment().insert(keys, ignore_extra_fields=True)
flash('{} key(s) ignored'.format(len(keys)))
all_tables = []
user_sessions = experiment.Session() & {'username': session.get('user', 'unknown')}
compartments = experiment.Compartment().fetch('compartment')
for module_name, module in modules.items():
segtask_rel = ((module.ScanInfo() * shared.Channel() * module.MotionCorrection() &
user_sessions & 'channel <= nchannels') - module.SegmentationTask() -
module.DoNotSegment())
items = segtask_rel.proj().fetch(as_dict=True)
for item in items:
values = [escape_json(json.dumps({**item, 'compartment': c})) for c in compartments]
item['ignore'] = {'name': 'ignore_item', 'value': escape_json(json.dumps(item))}
item['compartment'] = {'name': 'compartment', 'options': compartments, 'values': values}
all_tables.append((module_name, tables.SegmentationTable(items)))
return render_template('segmentationtask.html', segmentation_tables=all_tables)
@main.route('/progress', methods=['GET', 'POST'])
def progress():
all_tables = []
user_sessions = experiment.Session() & {'username': session.get('user', 'unknown')}
for module_name, module in [('reso', reso), ('meso', meso), ('stack', stack)]:
items = []
for rel_name, possible_rel in module.__dict__.items():
try:
remaining, total = possible_rel().progress(user_sessions, display=False)
items.append({'table': rel_name, 'processed': '{}/{}'.format(total - remaining, total),
'percentage': '{:.1f}%'.format(100 * (1 - remaining / total))})
except Exception: # not a dj.Computed class
pass
all_tables.append((module_name, tables.ProgressTable(items)))
return render_template('progress.html', progress_tables=all_tables)
@main.route('/jobs', methods=['GET', 'POST'])
def jobs():
modules = OrderedDict([('reso', reso), ('meso', meso), ('stack', stack),
('stimulus', stimulus), ('pupil', pupil),
('treadmill', treadmill), ('tune', tune)])
if request.method == 'POST':
to_delete = []
for tn_plus_kh in request.form.getlist('delete_item'):
table_name, key_hash = tn_plus_kh.split('+')
to_delete.append({'table_name': table_name, 'key_hash': key_hash})
jobs_rel = modules[request.form['module_name']].schema.jobs & to_delete
num_jobs_to_delete = len(jobs_rel)
jobs_rel.delete()
flash('{} job(s) deleted.'.format(num_jobs_to_delete))
all_tables = []
fetch_attributes = ['table_name', 'status', 'key', 'user', 'key_hash',
'error_message', 'timestamp']
for name, module in modules.items():
items = module.schema.jobs.proj(*fetch_attributes).fetch(order_by='table_name, '
'timestamp DESC',
as_dict=True)
for item in items:
value = '{}+{}'.format(item['table_name'], item['key_hash']) # + is separator
item['delete'] = {'name': 'delete_item', 'value': value}
item['key_hash'] = item['key_hash'][:8] + '...' # shorten it for display
all_tables.append((name, tables.JobTable(items)))
return render_template('jobs.html', job_tables=all_tables)
@main.route('/summary', methods=['GET', 'POST'])
def summary():
form = forms.RestrictionForm(request.form)
summary_rel = ((reso.ScanInfo.Field() & reso.SummaryImages()).proj() +
(meso.ScanInfo.Field() & meso.SummaryImages()).proj())
if request.method == 'POST' and form.validate():
summary_rel = summary_rel & form['restriction'].data
items = summary_rel.fetch(as_dict=True, limit=25)
table = tables.SummaryTable(items)
return render_template('summary.html', form=form, table=table)
@main.route('/quality/', methods=['GET', 'POST'])
def quality():
form = forms.QualityForm(request.form)
if request.method == 'POST' and form.validate():
key = {'animal_id': form['animal_id'].data, 'session': form['session'].data,
'scan_idx': form['scan_idx'].data}
pipe = reso if reso.ScanInfo() & key else meso if meso.ScanInfo() & key else None
if pipe is not None:
oracle_keys = (tune.OracleMap() & key).fetch('KEY', order_by='field')
cos2map_keys = (tune.Cos2Map() & key).fetch('KEY', order_by='field')
summary_keys = (pipe.SummaryImages.Correlation() & key).fetch('KEY', order_by='field')
quality_keys = (pipe.Quality.Contrast() & key).fetch('KEY', order_by='field')
eye_key = (pupil.Eye() & key).fetch1('KEY') if pupil.Eye() & key else None
items = []
for schema_ in [pipe, pupil, tune]:
for cls in filter(lambda x: issubclass(x, (dj.Computed, dj.Imported)),
filter(isclass, map(lambda x: getattr(schema_, x), dir(schema_)))):
items.append({'relation': cls.__name__, 'populated': bool(cls() & key)})
progress_table = tables.CheckmarkTable(items)
items = [{'attribute': a, 'value': v} for a, v in (pipe.ScanInfo() & key).fetch1().items()]
info_table = tables.InfoTable(items)
return render_template('quality.html', form=form, progress_table=progress_table,
info_table=info_table, oracle_keys=oracle_keys,
cos2map_keys=cos2map_keys, summary_keys=summary_keys,
quality_keys=quality_keys, eye_key=eye_key)
else:
flash('{} is not in reso or meso'.format(key))
return render_template('quality.html', form=form)
@main.route('/figure/<animal_id>/<session>/<scan_idx>/<field>/<pipe_version>/<which>')
def figure(animal_id, session, scan_idx, field, pipe_version, which):
key = {'animal_id': animal_id, 'session': session, 'scan_idx': scan_idx,
'field': field, 'pipe_version': pipe_version}
pipe = reso if reso.SummaryImages() & key else meso if meso.SummaryImages() & key else None
if pipe is not None:
summary_rel = pipe.SummaryImages.Average() * pipe.SummaryImages.Correlation() & key
images, channels = summary_rel.fetch('{}_image'.format(which), 'channel')
composite = np.zeros([*images[0].shape, 3])
for image, channel in zip(images, channels):
composite[..., 2 - channel] = image
composite = (composite - composite.min()) / (composite.max() - composite.min())
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(composite, origin='lower', interpolation='lanczos')
ax.set_title('{} image'.format(which.capitalize()))
ax.axis('off')
figure = mpld3.fig_to_html(fig)
else:
figure = None
flash('Could not find images for {}'.format(key))
return render_template('figure.html', figure=figure)
@main.route('/traces/<animal_id>/<session>/<scan_idx>/<field>/<pipe_version>/'
'<segmentation_method>/<spike_method>')
def traces(animal_id, session, scan_idx, field, pipe_version, segmentation_method,
spike_method):
key = {'animal_id': animal_id, 'session': session, 'scan_idx': scan_idx,
'field': field, 'pipe_version': pipe_version, 'channel': request.args['channel'],
'segmentation_method': segmentation_method, 'spike_method': spike_method}
pipe = reso if reso.Activity() & key else meso if meso.Activity() & key else None
if pipe is not None:
traces = np.stack((pipe.Activity.Trace() & key).fetch('trace', limit=25))
f = traces.var(ddof=1, axis=0, keepdims=True) / traces.mean(axis=0, keepdims=True)
traces /= f
fps = (pipe.ScanInfo() & key).fetch1('fps')
middle_point = traces.shape[-1] / 2
traces = traces[:, max(0, int(middle_point - 30 * fps)): int(middle_point + 30 * fps)]
x_axis = np.arange(traces.shape[-1]) / fps
box_height = np.max(traces.max(axis=1) - traces.min(axis=1))
fig, ax = plt.subplots(figsize=(12, 12))
ax.set_title('Deconvolved activity for 20 cells during one minute')
for i, trace in enumerate(traces):
ax.plot(x_axis, i * box_height + trace, '-k')
ax.set_xlabel('Time (secs)')
ax.set_yticks([])
ax.axis('tight')
figure = mpld3.fig_to_html(fig)
else:
figure = None
flash('Could not find activity traces for {}'.format(key))
return render_template('figure.html', figure=figure)
@main.route('/tmp/<path:filename>')
def tmpfile(filename):
return send_from_directory('/tmp/', filename)
@main.route('/schema/', defaults={'schema': 'experiment', 'table': 'Scan', 'subtable': None},
methods=['GET', 'POST'])
@main.route('/schema/<schema>/<table>', defaults={'subtable': None}, methods=['GET', 'POST'])
@main.route('/schema/<schema>/<table>/<subtable>', methods=['GET', 'POST'])
def relation(schema, table, subtable):
graph_attr = {'size': '12, 12', 'rankdir': 'LR', 'splines': 'ortho'}
node_attr = {'style': 'filled', 'shape': 'note', 'align': 'left', 'ranksep': '0.1',
'fontsize': '10', 'fontfamily': 'opensans', 'height': '0.2',
'fontname': 'Sans-Serif'}
dot = graphviz.Digraph(graph_attr=graph_attr, node_attr=node_attr, engine='dot',
format='svg')
def add_node(name, node_attr={}):
""" Add a node/table to the current graph (adding subgraphs if needed). """
table_names = dict(zip(['schema', 'table', 'subtable'], name.split('.')))
graph_attr = {'color': 'grey80', 'style': 'filled', 'label': table_names['schema']}
with dot.subgraph(name='cluster_{}'.format(table_names['schema']), node_attr=node_attr,
graph_attr=graph_attr) as subgraph:
subgraph.node(name, label=name, URL=url_for('main.relation', **table_names),
target='_top', **node_attr)
return name
def name_lookup(full_name):
""" Look for a table's class name given its full name. """
pretty_name = dj.table.lookup_class_name(full_name, schemata.__dict__)
return pretty_name or full_name
root_rel = getattr(getattr(schemata, schema), table)
root_rel = root_rel if subtable is None else getattr(root_rel, subtable)
root_dependencies = root_rel.connection.dependencies
root_dependencies.load()
node_attrs = {dj.Manual: {'fillcolor': 'green3'}, dj.Computed: {'fillcolor': 'coral1'},
dj.Lookup: {'fillcolor': 'azure3'}, dj.Imported: {'fillcolor': 'cornflowerblue'},
dj.Part: {'fillcolor': 'azure3', 'fontsize': '8'}}
root_name = root_rel().full_table_name
root_id = add_node(name_lookup(root_name), node_attrs[dj.erd._get_tier(root_name)])
for node_name, _ in root_dependencies.in_edges(root_name):
if dj.erd._get_tier(node_name) is dj.erd._AliasNode: # renamed attribute
node_name = list(root_dependencies.in_edges(node_name))[0][0]
node_id = add_node(name_lookup(node_name), node_attrs[dj.erd._get_tier(node_name)])
dot.edge(node_id, root_id)
for _, node_name in root_dependencies.out_edges(root_name):
if dj.erd._get_tier(node_name) is dj.erd._AliasNode: # renamed attribute
node_name = list(root_dependencies.out_edges(node_name))[0][1]
node_id = add_node(name_lookup(node_name), node_attrs[dj.erd._get_tier(node_name)])
dot.edge(root_id, node_id)
filename = uuid.uuid4()
dot.render('/tmp/{}'.format(filename))
form = forms.RestrictionForm(request.form)
if request.method == 'POST' and form.validate():
root_rel = root_rel() & form['restriction'].data
else:
root_rel = root_rel()
table = tables.create_datajoint_table(root_rel, limit=25)
return render_template('schema.html', filename='{}.svg'.format(filename), table=table,
form=form)
@main.route('/tracking/<animal_id>/<session>/<scan_idx>', methods=['GET', 'POST'])
def tracking(animal_id, session, scan_idx):
form = forms.TrackingForm(request.form)
if request.method == 'POST' and form.validate():
# TODO: Process input
pass
key = {'animal_id': animal_id, 'session': session, 'scan_idx': scan_idx}
if pupil.Eye() & key:
preview_frames = (pupil.Eye() & key).fetch1('preview_frames')
fig, axes = plt.subplots(4, 4, figsize=(10, 8), sharex=True, sharey=True)
for ax, frame in zip(axes.ravel(), preview_frames.transpose([2, 0, 1])):
ax.imshow(frame, cmap='gray', interpolation='lanczos')
ax.axis('off')
ax.set_aspect(1)
# mpld3.plugins.connect(fig, mpld3.plugins.LinkedBrush([]))
figure = mpld3.fig_to_html(fig)
else:
figure = None
flash('Could not find eye frames for {}'.format(key))
return render_template('trackingtask.html', form=form, figure=figure)
@main.route('/report/', methods=['GET', 'POST'])
def report():
form = forms.ReportForm(request.form)
if request.method == 'POST' and form.validate():
report_type = 'scan' if form.session.data and form.scan_idx.data else 'mouse'
endpoint = 'main.{}report{}'.format(report_type, '_pdf' if form.as_pdf.data else '')
return redirect(url_for(endpoint, animal_id=form.animal_id.data,
session=form.session.data, scan_idx=form.scan_idx.data))
return render_template('report.html', form=form)
@main.route('/report/scan/<int:animal_id>-<int:session>-<int:scan_idx>')
def scanreport(animal_id, session, scan_idx):
key = {'animal_id': animal_id, 'session': session, 'scan_idx': scan_idx}
pipe = reso if reso.ScanInfo() & key else meso if meso.ScanInfo() & key else None
if pipe is not None:
pxori_keys = (tune.PixelwiseOri() & key).fetch('KEY', order_by='field')
quality_keys = (pipe.Quality.Contrast() & key).fetch('KEY', order_by='field')
oracletime_keys = (tune.MovieOracleTimeCourse() & key).fetch('KEY', order_by='field')
has_ori = bool(tune.Ori() & key)
has_xsnr = bool(xcorr.XNR() & key)
has_sta = bool(tune.STA() & key)
has_staqual = bool(tune.STAQual() & key)
has_staext = bool(tune.STAExtent() & key)
has_eye = bool(pupil.Eye() & key)
has_eyetrack = bool(pupil.FittedContour() & key)
image_keys = []
channels = shared.Channel() & 'channel <= {}'.format((pipe.ScanInfo() & key).fetch1('nchannels'))
for field_key in (pipe.ScanInfo.Field() * channels & key).fetch('KEY'):
field_key['has_summary'] = bool(pipe.SummaryImages() & field_key)
field_key['has_oracle'] = bool(tune.OracleMap() & field_key)
field_key['has_cos2map'] = bool(tune.Cos2Map() * tune.CaMovie() & field_key)
image_keys.append(field_key)
image_keys = list(filter(lambda k: k['has_summary'] or k['has_oracle'] or k['has_cos2map'], image_keys))
craniotomy_notes, session_notes = (experiment.Session() & key).fetch1('craniotomy_notes', 'session_notes')
craniotomy_notes, session_notes = craniotomy_notes.strip(), session_notes.strip()
somas = pipe.MaskClassification.Type() & {'type': 'soma'}
scan_somas = pipe.ScanSet.Unit() * pipe.ScanSet.UnitInfo() & {**key, 'segmentation_method': 6} & somas
somas_per_field = pipe.ScanSet().aggr(scan_somas, avg_z='ROUND(AVG(um_z))', num_somas='count(*)')
fields, num_somas, depths = somas_per_field.fetch('field', 'num_somas', 'avg_z')
items = [{'field': f, 'somas': s, 'depth': z} for f, s, z in zip(fields, num_somas, depths)]
items.append({'field': 'ALL', 'somas': sum(num_somas), 'depth': '-'})
stats_table = tables.StatsTable(items)
has_registration_over_time = bool(stack.RegistrationOverTime() & {'animal_id': animal_id,
'scan_session': session})
return render_template('scan_report.html', animal_id=animal_id, session=session, scan_idx=scan_idx,
craniotomy_notes=craniotomy_notes, session_notes=session_notes,
stats_table=stats_table, has_ori=has_ori, has_xsnr=has_xsnr, has_sta=has_sta,
has_staqual=has_staqual, has_staext=has_staext, image_keys=image_keys,
has_eye=has_eye, has_eyetrack=has_eyetrack, pxori_keys=pxori_keys,
quality_keys=quality_keys, oracletime_keys=oracletime_keys,
has_registration_over_time=has_registration_over_time)
else:
flash('{} is not in reso or meso'.format(key))
return redirect(url_for('main.report'))
@main.route('/report/mouse/<int:animal_id>')
def mousereport(animal_id):
key = dict(animal_id=animal_id)
auto = experiment.AutoProcessing() & key
meso_scanh = mice.Mice().aggr(meso.ScanInfo() & dict(animal_id=animal_id),
time="TIME_FORMAT(SEC_TO_TIME(sum(nframes / fps)),'%%Hh %%im %%Ss')",
setup="'meso'")
stim_time = [dj.U('animal_id', 'session', 'scan_idx', 'stimulus_type').aggr(
stim * stimulus.Condition() * stimulus.Trial() & key,
time="TIME_FORMAT(SEC_TO_TIME(sum({})),'%%Hh %%im %%Ss')".format(duration_field))
for duration_field, stim in zip(['cut_after', 'ori_on_secs + ori_off_secs', 'duration', 'duration'],
[stimulus.Clip(), stimulus.Monet(),
stimulus.Monet2(), stimulus.Varma()])
]
def in_auto_proc(k):
return bool(experiment.AutoProcessing() & k)
stim_time = tables.create_datajoint_table(stim_time,
check_funcs=dict(autoprocessing=in_auto_proc))
reso_scanh = mice.Mice().aggr(reso.ScanInfo() & dict(animal_id=animal_id),
time="TIME_FORMAT(SEC_TO_TIME(sum(nframes / fps)),'%%Hh %%im %%Ss')",
setup="'reso'")
scanh = tables.create_datajoint_table([reso_scanh, meso_scanh])
scans = tables.create_datajoint_table(
(experiment.Scan() & auto), selection=['session', 'scan_idx', 'lens', 'depth', 'site_number', 'scan_ts']
)
scaninfo = tables.create_datajoint_table(
[(pipe.ScanInfo() & auto) for pipe in [reso, meso]],
selection=['nfields', 'fps', 'scan_idx', 'session', 'nframes', 'nchannels', 'usecs_per_line']
)
# --- orientation statistics per stack
df1 = pd.DataFrame((stack.StackSet.Match() & key).proj('munit_id', session='scan_session').fetch())
df2 = pd.DataFrame((tune.Ori.Cell() & key).fetch())
df = df1.merge(df2)
idx = df.groupby(['animal_id', 'stack_session', 'stack_idx','munit_id', 'ori_type', 'stimulus_type'])['selectivity'].idxmax()
df3 = df.ix[idx]
gr = df3.groupby(['animal_id', 'stack_session', 'stimulus_type','ori_type'])
df3 = gr.agg(dict(r2=lambda x: np.mean(x > 0.01)*100)).reset_index().rename(columns={'r2':'% cells above'})
stats = tables.create_datajoint_table([experiment.Scan().aggr(
pipe.ScanSet.Unit() * pipe.ScanSet.UnitInfo() * pipe.MaskClassification.Type() & auto & dict(type='soma'),
somas='count(*)', scan_type='"{}"'.format(pipe.__name__)) for pipe in [reso, meso]],
selection=['scan_type', 'session', 'scan_idx', 'somas'])
stats.items.append(dict(scan_type='', session='ALL', scan_idx='ALL', somas=sum([e['somas'] for e in stats.items])))
scan_movie_oracle = bool(tune.MovieOracle() & key)
mouse_per_stack_oracle = bool(stack.StackSet() * tune.MovieOracle() & key)
cell_matches = bool(stack.StackSet() & key)
stack_ori = bool(stack.StackSet() * tune.Ori() & key)
stack_rf = bool(stack.StackSet() * tune.STAQual() & key)
kuiper = bool(tune.Kuiper() & key)
cell_counts = tables.create_datajoint_table(
(stack.StackSet() & key).aggr(stack.StackSet.Unit(), unique_neurons='count(*)'))
return render_template('mouse_report.html', animal_id=animal_id, scans=scans,
scaninfo=scaninfo, stats=stats, scanh=scanh,
stim_time=stim_time,
scan_movie_oracle=scan_movie_oracle, mouse_per_stack_oracle=mouse_per_stack_oracle,
cell_matches=cell_matches, cell_counts=cell_counts,
stack_ori=stack_ori, stack_rf=stack_rf, kuiper=kuiper)
@main.route('/report/scan/<int:animal_id>-<int:session>-<int:scan_idx>.pdf')
def scanreport_pdf(animal_id, session, scan_idx):
html = scanreport(animal_id=animal_id, session=session, scan_idx=scan_idx)
stylesheets = [CSS(url_for('static', filename='styles.css')),
CSS(url_for('static', filename='datajoint.css'))]
return render_pdf(HTML(string=html), stylesheets=stylesheets)
@main.route('/report/mouse/<int:animal_id>.pdf')
def mousereport_pdf(animal_id):
html = mousereport(animal_id=animal_id)
stylesheets = [CSS(url_for('static', filename='styles.css')),
CSS(url_for('static', filename='datajoint.css'))]
return render_pdf(HTML(string=html), stylesheets=stylesheets)
@main.route('/surgery', methods=['GET', 'POST'])
def surgery():
form = forms.SurgeryForm(request.form)
if request.method == 'POST' and form.validate():
animal_id_tuple = {'animal_id': form['animal_id'].data}
new_surgery_id = 1 # Surgery ID is a unique integer that starts at 1 and functions as a primary key
if experiment.Surgery.proj() & animal_id_tuple:
# If the animal_id already has a surgery, add 1 to max surgery_id and use that number as ID
new_surgery_id = 1 + (experiment.Surgery & animal_id_tuple).fetch('surgery_id',
order_by='surgery_id DESC',
limit=1)[0]
# Creating key to insert into Surgery. Status_tuple is used for SurgeryStatus
tuple_ = {'animal_id': form['animal_id'].data, 'surgery_id': new_surgery_id,
'date': str(form['date'].data), 'time': str(form['time_input'].data),
'username': form['user'].data, 'surgery_outcome': form['outcome'].data,
'surgery_quality': form['surgery_quality'].data, 'surgery_type': form['surgery_type'].data,
'weight': form['weight'].data, 'ketoprofen': form['ketoprofen'].data, 'mouse_room': form['room'].data,
'surgery_notes': form['notes'].data}
status_tuple_ = {'animal_id': tuple_['animal_id'], 'surgery_id': tuple_['surgery_id'], 'checkup_notes': ''}
if not experiment.Surgery.proj() & tuple_:
try:
experiment.Surgery.insert1(tuple_)
experiment.SurgeryStatus.insert1(status_tuple_)
flash('Inserted record for animal {}'.format(tuple_['animal_id']))
except IntegrityError as ex:
ex_message = "Error: Key value not allowed. More information below."
details = str(ex.args)
flash(ex_message)
flash(details)
else:
flash('Record already exists.')
# If the method is not POST, set the username choice to session user and date to today.
# This is to make sure choices are not changed during insertion
else:
# Date default is set in view.py because form.py does not run datetime.today() on each view
form['date'].data = datetime.today()
if 'user' in session:
form['user'].data = session['user']
return render_template('surgery.html', form=form, current_date=datetime.today())
@main.route('/surgery/status', methods=['GET', 'POST'])
def surgery_status():
# Any surgeries newer than below date are fetched for display
date_res = (datetime.today() - timedelta(days=8)).strftime("%Y-%m-%d")
restriction = 'surgery_outcome = "Survival" and date > "{}"'.format(date_res)
new_surgeries = []
for status_key in (experiment.Surgery & restriction).fetch(order_by='date DESC'):
if len(experiment.SurgeryStatus & status_key) > 0:
new_surgeries.append(((experiment.SurgeryStatus & status_key) * experiment.Surgery).fetch(order_by="timestamp DESC")[0])
table = tables.SurgeryStatusTable(new_surgeries)
return render_template('surgery_status.html', table=table)
@main.route('/surgery/update/<animal_id>/<surgery_id>', methods=['GET', 'POST'])
def surgery_update(animal_id, surgery_id):
key = {'animal_id': animal_id, 'surgery_id': surgery_id}
form = forms.SurgeryEditStatusForm(request.form)
if request.method == 'POST':
tuple_ = {'animal_id': form['animal_id'].data, 'surgery_id': form['surgery_id'].data,
'day_one': int(form['dayone_check'].data), 'day_two': int(form['daytwo_check'].data),
'day_three': int(form['daythree_check'].data),
'euthanized': int(form['euthanized_check'].data), 'checkup_notes': form['notes'].data}
try:
experiment.SurgeryStatus.insert1(tuple_)
flash("Surgery status for animal {} on date {} updated.".format(animal_id, form['date_field'].data))
except IntegrityError as ex:
ex_message = "Error: Key value not allowed. More information below."
details = str(ex.args)
flash(ex_message)
flash(details)
return redirect(url_for('main.surgery_status'))
if len(experiment.SurgeryStatus & key) > 0:
data = ((experiment.SurgeryStatus & key) * experiment.Surgery).fetch(order_by='timestamp DESC')[0]
return render_template('surgery_edit_status.html', form=form, animal_id=data['animal_id'], surgery_id=data['surgery_id'],
date=data['date'], day_one=bool(data['day_one']), day_two=bool(data['day_two']),
day_three=bool(data['day_three']), euthanized=bool(data['euthanized']),
notes=data['checkup_notes'])
else:
return render_template('404.html')
@main.route('/api/v1/surgery/notification', methods=['GET'])
def surgery_notification():
# Sends notifications to specified slack channel, surgeon, and lab manager about any checkups that need to be done
num_to_word = {1: 'one', 2: 'two', 3: 'three'} # Used to figure out which column to look up for checkup date
# Define all Slack notification variables
slack_notification_channel = "#surgery_reminders"
slack_manager = "camila"
slacktable = dj.create_virtual_module('pipeline_notification', 'pipeline_notification')
domain, api_key = slacktable.SlackConnection.fetch1('domain', 'api_key')
slack = Slacker(api_key, timeout=60)
# Only fetch surgeries done 1 to 3 days ago
lessthan_date_res = (datetime.today()).strftime("%Y-%m-%d")
greaterthan_date_res = (datetime.today() - timedelta(days=4)).strftime("%Y-%m-%d")
restriction = 'surgery_outcome = "Survival" and date < "{}" and date > "{}"'.format(lessthan_date_res,
greaterthan_date_res)
surgery_data = (experiment.Surgery & restriction).fetch(order_by='date DESC')
for entry in surgery_data:
status = (experiment.SurgeryStatus & entry).fetch(order_by="timestamp DESC")[0]
day_key = "day_" + num_to_word[(datetime.today().date() - entry['date']).days]
edit_url = "<{}|Update Status Here>".format(url_for('main.surgery_update',
_external=True,
animal_id=entry['animal_id'],
surgery_id=entry['surgery_id']))
if status['euthanized'] == 0 and status[day_key] == 0:
manager_message = "{} needs to check animal {} in room {} for surgery on {}. {}".format(
entry['username'].title(),
entry['animal_id'],
entry['mouse_room'],
entry['date'],
edit_url)
ch_message = "<!channel> Reminder: " + manager_message
slack.chat.post_message("@" + slack_manager, manager_message)
slack.chat.post_message(slack_notification_channel, ch_message)
if len(slacktable.SlackUser & entry) > 0:
slackname = (slacktable.SlackUser & entry).fetch('slack_user')
pm_message = "Don't forget to check on animal {} today! {}".format(entry['animal_id'], edit_url)
slack.chat.post_message("@" + slackname, pm_message, as_user=True)
return '', http.HTTPStatus.NO_CONTENT
@main.route('/api/v1/surgery/spawn_missing_data', methods=['GET'])
def surgery_spawn_missing_data():
# Finds any Surgery entries without a corresponding SurgeryStatus and inserts a SurgeryStatus key
if len(experiment.Surgery - experiment.SurgeryStatus) > 0:
missing_data = (experiment.Surgery - experiment.SurgeryStatus).proj().fetch()
for entry in missing_data:
experiment.SurgeryStatus.insert1(entry)
return '', http.HTTPStatus.NO_CONTENT
| 51.026786 | 132 | 0.614115 |
f4462fa53edb378c98ed0c2de5dcfc0e60665ce5 | 414 | py | Python | stdplugins/ping1.py | ppppspsljdhdd/Pepe | 1e57825ddb0ab3ba15a19cad0ecfbf2622f6b851 | [
"Apache-2.0"
] | null | null | null | stdplugins/ping1.py | ppppspsljdhdd/Pepe | 1e57825ddb0ab3ba15a19cad0ecfbf2622f6b851 | [
"Apache-2.0"
] | null | null | null | stdplugins/ping1.py | ppppspsljdhdd/Pepe | 1e57825ddb0ab3ba15a19cad0ecfbf2622f6b851 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from uniborg.util import admin_cmd, edit_or_reply
@borg.on(admin_cmd(pattern="ping", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
e = await edit_or_reply(event, "Poooong!")
start = datetime.now()
await e.edit("Pong!")
end = datetime.now()
ms = (end - start).microseconds / 1000
await e.edit("**Pong!**\n`{}` `ms`".format(ms))
| 25.875 | 52 | 0.652174 |
4a4bd36b1caa94127b5a789de77f9e023e1e913b | 1,863 | py | Python | lib/stacks/pocket_to_kindle/lambdas/create_doc.py | shaftoe/api-l3x-in | 06426f62708051b570e8839398562982d770903f | [
"Apache-2.0"
] | 11 | 2020-03-01T15:24:09.000Z | 2022-01-06T08:31:31.000Z | lib/stacks/pocket_to_kindle/lambdas/create_doc.py | shaftoe/api-l3x-in | 06426f62708051b570e8839398562982d770903f | [
"Apache-2.0"
] | 1 | 2020-08-28T15:25:39.000Z | 2020-08-30T07:35:59.000Z | lib/stacks/pocket_to_kindle/lambdas/create_doc.py | shaftoe/api-l3x-in | 06426f62708051b570e8839398562982d770903f | [
"Apache-2.0"
] | 5 | 2020-10-15T03:06:37.000Z | 2021-09-29T07:07:18.000Z | """Lambda pocket-to-kindle create_doc."""
from datetime import datetime
from os import environ as env
from uuid import uuid4
import utils
import utils.aws as aws
import utils.handlers as handlers
import utils.helpers as helpers
def create_doc(event: utils.LambdaEvent) -> str:
"""Build clean HTML file from URL source and store it to S3."""
utils.Log.info("Fetch content from %s", event["url"])
requests = helpers.import_non_stdlib_module("requests")
response = requests.get(url=event["url"])
if not response.status_code == 200:
raise utils.HandledError("Error downloading %s: "
"HTTP status code %d" % (event["ur"], response.status_code),
status_code=response.status_code)
utils.Log.info("Create readability-clean HTML text from %s source", event["url"])
readability = helpers.import_non_stdlib_module("readability")
doc = readability.Document(response.text)
utils.Log.debug("Document title:\n%s", doc.title())
utils.Log.debug("Document readability-cleaned content:\n%s", doc.summary())
now = datetime.utcnow()
file_name = f"pocket-{event['item_id']}" if "item_id" in event else uuid4()
key_name = now.strftime(f"%Y/%m/%d/{file_name}.html")
aws.put_object_to_s3_bucket(key=key_name, bucket=env["DOCUMENT_BUCKET"],
body=bytes(doc.summary(), encoding="utf-8"))
file_url = f"s3://{env['DOCUMENT_BUCKET']}/{key_name}"
utils.Log.info("File %s created successfully", file_url)
return f"success: {file_url}"
def handler(event, context) -> utils.Response:
"""Lambda entry point."""
return handlers.EventHandler(
name="pocket_create_doc",
event=utils.LambdaEvent(event),
context=utils.LambdaContext(context),
action=create_doc,
).response
| 35.150943 | 93 | 0.66613 |
645f50bf2d9e8b2ea20480e5abb594e74fe986e4 | 977 | py | Python | userbot/plugins/eye.py | justteen/BUZZ-USERBOT | 55651cce150e1d04d2c61efb2565ef9f46b42933 | [
"BSL-1.0"
] | null | null | null | userbot/plugins/eye.py | justteen/BUZZ-USERBOT | 55651cce150e1d04d2c61efb2565ef9f46b42933 | [
"BSL-1.0"
] | null | null | null | userbot/plugins/eye.py | justteen/BUZZ-USERBOT | 55651cce150e1d04d2c61efb2565ef9f46b42933 | [
"BSL-1.0"
] | null | null | null | """COMMAND : .eye"""
import asyncio
from userbot.utils import lightning_cmd
@borg.on(lightning_cmd(pattern="eye"))
async def _(event):
if event.fwd_from:
return
animation_interval = 3
animation_ttl = range(0, 103)
# input_str = event.pattern_match.group(1)
# if input_str == "eye":
await event.edit("👁👁")
animation_chars = [
"👁👁\n 👄 =====> Abey Ja Na Gandu",
"👁👁\n 👅 =====> Abey Ja Na Madarchod",
"👁👁\n 💋 =====> Abey Ja Na Randi",
"👁👁\n 👄 =====> Abey Ja Na Betichod",
"👁👁\n 👅 =====> Abey Ja Na Behenchod",
"👁👁\n 💋 =====> Abey Ja Na Na Mard",
"👁👁\n 👄 =====> Abey Ja Na Randi",
"👁👁\n 👅 =====> Abey Ja Na Bhosdk",
"👁👁\n 💋 =====> Abey Ja Na Chutiye",
"👁👁\n 👄 =====> Hi All, How Are You Guys...",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 103])
| 22.204545 | 54 | 0.497441 |
b3eb933882c136a120872b49c3f79c05b1bae414 | 502 | py | Python | readthedocs/projects/migrations/0008_add_subproject_alias_prefix.py | kennethlarsen/readthedocs.org | 735d630d83f79ae24772d10e66fd35b8f5675a30 | [
"MIT"
] | 2 | 2018-01-14T14:04:00.000Z | 2021-02-07T19:25:45.000Z | readthedocs/projects/migrations/0008_add_subproject_alias_prefix.py | Alig1493/readthedocs.org | c37b00995c1bbc5ee51d3552ef176546373bb912 | [
"MIT"
] | 4 | 2021-03-31T20:17:21.000Z | 2021-12-13T20:49:19.000Z | readthedocs/projects/migrations/0008_add_subproject_alias_prefix.py | Alig1493/readthedocs.org | c37b00995c1bbc5ee51d3552ef176546373bb912 | [
"MIT"
] | 6 | 2019-02-13T16:08:41.000Z | 2020-03-12T14:17:14.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0007_migrate_canonical_data'),
]
operations = [
migrations.AddField(
model_name='projectrelationship',
name='alias',
field=models.CharField(max_length=255, null=True, verbose_name='Alias', blank=True),
),
]
| 23.904762 | 96 | 0.653386 |
961f6fdd6614152922cf65826ca5a843c601bbd2 | 5,028 | py | Python | test/functional/mempool_persist.py | cryptomiles/cryptomiles | d3815eaf7716fbca9459f4162ae7ba4714298d27 | [
"MIT"
] | null | null | null | test/functional/mempool_persist.py | cryptomiles/cryptomiles | d3815eaf7716fbca9459f4162ae7ba4714298d27 | [
"MIT"
] | null | null | null | test/functional/mempool_persist.py | cryptomiles/cryptomiles | d3815eaf7716fbca9459f4162ae7ba4714298d27 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Copyright (c) 2017-2018 The Cryptomiles Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool persistence.
By default, cryptomilesd will dump mempool on shutdown and
then reload it on startup. This can be overridden with
the -persistmempool=0 command line option.
Test is as follows:
- start node0, node1 and node2. node1 has -persistmempool=0
- create 5 transactions on node2 to its own address. Note that these
are not sent to node0 or node1 addresses because we don't want
them to be saved in the wallet.
- check that node0 and node1 have 5 transactions in their mempools
- shutdown all nodes.
- startup node0. Verify that it still has 5 transactions
in its mempool. Shutdown node0. This tests that by default the
mempool is persistent.
- startup node1. Verify that its mempool is empty. Shutdown node1.
This tests that with -persistmempool=0, the mempool is not
dumped to disk when the node is shut down.
- Restart node0 with -persistmempool=0. Verify that its mempool is
empty. Shutdown node0. This tests that with -persistmempool=0,
the mempool is not loaded from disk on start up.
- Restart node0 with -persistmempool. Verify that it has 5
transactions in its mempool. This tests that -persistmempool=0
does not overwrite a previously valid mempool stored on disk.
- Remove node0 mempool.dat and verify savemempool RPC recreates it
and verify that node1 can load it and has 5 transaction in its
mempool.
- Verify that savemempool throws when the RPC is called if
node1 can't write to disk.
"""
import os
import time
from test_framework.test_framework import CryptomilesTestFramework
from test_framework.util import *
class MempoolPersistTest(CryptomilesTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [[], ["-persistmempool=0"], []]
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
self.log.debug("Mine a single block to get out of IBD")
self.nodes[0].generate(1)
self.sync_all()
self.log.debug("Send 5 transactions from node2 (to its own address)")
for i in range(5):
self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
self.sync_all()
self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools")
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Stop-start node0 and node1. Verify that node0 has the transactions in its mempool and node1 does not.")
self.stop_nodes()
self.start_node(0)
self.start_node(1)
# Give cryptomilesd a second to reload the mempool
time.sleep(1)
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
self.stop_nodes()
self.start_node(0, extra_args=["-persistmempool=0"])
# Give cryptomilesd a second to reload the mempool
time.sleep(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
self.stop_nodes()
self.start_node(0)
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
mempooldat0 = os.path.join(self.options.tmpdir, 'node0', 'regtest', 'mempool.dat')
mempooldat1 = os.path.join(self.options.tmpdir, 'node1', 'regtest', 'mempool.dat')
self.log.debug("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it")
os.remove(mempooldat0)
self.nodes[0].savemempool()
assert os.path.isfile(mempooldat0)
self.log.debug("Stop nodes, make node1 use mempool.dat from node0. Verify it has 5 transactions")
os.rename(mempooldat0, mempooldat1)
self.stop_nodes()
self.start_node(1, extra_args=[])
wait_until(lambda: len(self.nodes[1].getrawmempool()) == 5)
self.log.debug("Prevent cryptomilesd from writing mempool.dat to disk. Verify that `savemempool` fails")
# to test the exception we are setting bad permissions on a tmp file called mempool.dat.new
# which is an implementation detail that could change and break this test
mempooldotnew1 = mempooldat1 + '.new'
with os.fdopen(os.open(mempooldotnew1, os.O_CREAT, 0o000), 'w'):
pass
assert_raises_rpc_error(-1, "Unable to dump mempool to disk", self.nodes[1].savemempool)
os.remove(mempooldotnew1)
if __name__ == '__main__':
MempoolPersistTest().main()
| 44.892857 | 127 | 0.697295 |
3719593c5188d20abf8cb97151530faada455fdb | 40 | py | Python | olea/packages/pypat/__init__.py | Pix-00/olea | 98bee1fd8866a3929f685a139255afb7b6813f31 | [
"Apache-2.0"
] | 2 | 2020-06-18T03:25:52.000Z | 2020-06-18T07:33:45.000Z | olea/packages/pypat/__init__.py | Pix-00/olea | 98bee1fd8866a3929f685a139255afb7b6813f31 | [
"Apache-2.0"
] | 15 | 2021-01-28T07:11:04.000Z | 2021-05-24T07:11:37.000Z | olea/packages/pypat/__init__.py | Pix-00/olea | 98bee1fd8866a3929f685a139255afb7b6813f31 | [
"Apache-2.0"
] | null | null | null | __all__ = ['Pat']
from .pat import Pat
| 10 | 20 | 0.65 |
a8ecb1ff45fa1a3d0943246ae09c3edf85c3292e | 369 | py | Python | ntc_rosetta/translators/openconfig/ios/openconfig_if_ethernet/ethernet.py | steinzi/ntc-rosetta | fac7086c460a2c68c769879930b74c05ed89b1f4 | [
"Apache-2.0"
] | 95 | 2019-05-29T13:04:12.000Z | 2022-03-11T20:38:13.000Z | ntc_rosetta/translators/openconfig/ios/openconfig_if_ethernet/ethernet.py | tbotnz/ntc-rosetta | fac7086c460a2c68c769879930b74c05ed89b1f4 | [
"Apache-2.0"
] | 35 | 2019-05-30T03:58:34.000Z | 2021-03-03T15:30:05.000Z | ntc_rosetta/translators/openconfig/ios/openconfig_if_ethernet/ethernet.py | tbotnz/ntc-rosetta | fac7086c460a2c68c769879930b74c05ed89b1f4 | [
"Apache-2.0"
] | 28 | 2019-05-29T19:32:26.000Z | 2021-04-05T09:49:30.000Z | from ntc_rosetta.translators.openconfig.ios.openconfig_vlan import switched_vlan
from yangify.translator import Translator, TranslatorData
class Ethernet(Translator):
class Yangify(TranslatorData):
path = (
"openconfig-interfaces:interfaces/interface/openconfig-if-ethernet:ethernet"
)
switched_vlan = switched_vlan.SwitchedVlan
| 28.384615 | 88 | 0.764228 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.