text
stringlengths 29
850k
|
|---|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### This example demonstrates how to implement a cross moving average for the futures front contract
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="indicator" />
### <meta name="tag" content="futures" />
class EmaCrossFuturesFrontMonthAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2013, 10, 8)
self.SetEndDate(2013, 10, 10)
self.SetCash(1000000)
future = self.AddFuture(Futures.Metals.Gold);
# Only consider the front month contract
# Update the universe once per day to improve performance
future.SetFilter(lambda x: x.FrontMonth().OnlyApplyFilterAtMarketOpen())
# Symbol of the current contract
self.symbol = None
# Create two exponential moving averages
self.fast = ExponentialMovingAverage(100)
self.slow = ExponentialMovingAverage(300)
self.tolerance = 0.001
self.consolidator = None
# Add a custom chart to track the EMA cross
chart = Chart('EMA Cross')
chart.AddSeries(Series('Fast', SeriesType.Line, 0))
chart.AddSeries(Series('Slow', SeriesType.Line, 0))
self.AddChart(chart)
def OnData(self,slice):
holding = None if self.symbol is None else self.Portfolio.get(self.symbol)
if holding is not None:
# Buy the futures' front contract when the fast EMA is above the slow one
if self.fast.Current.Value > self.slow.Current.Value * (1 + self.tolerance):
if not holding.Invested:
self.SetHoldings(self.symbol, .1)
self.PlotEma()
elif holding.Invested:
self.Liquidate(self.symbol)
self.PlotEma()
def OnSecuritiesChanged(self, changes):
if len(changes.RemovedSecurities) > 0:
# Remove the consolidator for the previous contract
# and reset the indicators
if self.symbol is not None and self.consolidator is not None:
self.SubscriptionManager.RemoveConsolidator(self.symbol, self.consolidator)
self.fast.Reset()
self.slow.Reset()
# We don't need to call Liquidate(_symbol),
# since its positions are liquidated because the contract has expired.
# Only one security will be added: the new front contract
self.symbol = changes.AddedSecurities[0].Symbol
# Create a new consolidator and register the indicators to it
self.consolidator = self.ResolveConsolidator(self.symbol, Resolution.Minute)
self.RegisterIndicator(self.symbol, self.fast, self.consolidator)
self.RegisterIndicator(self.symbol, self.slow, self.consolidator)
# Warm up the indicators
self.WarmUpIndicator(self.symbol, self.fast, Resolution.Minute)
self.WarmUpIndicator(self.symbol, self.slow, Resolution.Minute)
self.PlotEma()
def PlotEma(self):
self.Plot('EMA Cross', 'Fast', self.fast.Current.Value)
self.Plot('EMA Cross', 'Slow', self.slow.Current.Value)
|
When staying at Hotel Garza Canela, a great birding base in San Blas, Mexico, we found the hotel staff and other birders quite willing to give helpful tips on where to go looking for birds. One recommended area was Cerro de San Juan Ecological Reserve near Tepic. A trip to this area required a 1 1/2 hour drive to get there.
We had purchased a booklet with a map and directions to the ecological reserve, and it seemed that the trip would be straight forward, therefore, a few stops along the way to photograph birds did not seem an unlikely thing to do. The first birds that caught our attention were a pair of Black-throated Magpie Jays.
We felt no hesitation at wandering off the road to investigate these flamboyant birds, and soon realized that there was a White-throated Magpie Jay keeping the others company.
For quite a few minutes, Bob and I enjoyed the show put on by these birds.
and a diminutive Cinnamon Hummingbird.
Turkey Vultures were hanging out in the same place, and the Magpie Jays often shared the trees with these large scavengers.
About an hour into our drive, the sight of two Bronzed Cowbirds sitting on a bough bent with brilliant blossoms had us hopping from the car for a quick snapshot or two.
Once in Tepic, it was not easy to locate the road sign directing us in the direction of Miramar and hence the Reserve, so a couple of stops at service stations had us attempting to understand directions from staff who spoke only Spanish.
and the gradual climb into the mountains soon provided elevated views of the surrounding countryside through openings in the canopy of trees. Cerro de San Juan Ecological Reserve protects a healthy forest habitat in the mountains with vegetation at the lower elevations running heavy to thorn forest.
Our arrival at the Reserve was a little later than desired for the purposes of birding, but the surroundings were pleasant enough beneath sunny skies with dappled shade keeping us cool.
The road soon deteriorated to a narrow, winding surface riddled with potholes, so progress was slow. The best way to bird in the Reserve is to park the car and walk the road.
On one such stroll, Bob and I managed to espy a Green Jay.
Although this magnificent Green Jay kept to the dark shadows within the heavy foliage, it did afford a couple of good views before we walked on.
Long stretches of the gravel road were fully exposed to sunshine, and that is where we located a Berylline Hummingbird. For the most part, the bird sightings were pretty scarce due in large part to the late morning hour, 11 a.m.
After traveling about 3 kilometres up the mountainside, very nice views of the rich farmland on surrounding hillsides could be seen at many lookout points.
At Kilometre 5, we found ourselves in an open meadow habitat. This was at the highest point on the ridge.
Signs indicated that the property belonged to La Rancho Noria (La Noria Ranch).
We saw no evidence of inhabitants nor activity on the premises, but it was a logical place for Bob and me to alight from the car and do some exploring along the edge of the public roadway. There was ample space to pull our car off to the side of the road.
and darted over the meadow grass in pursuit of insects.
but it soon returned to perch in the shade at the perimeter of the open area.
and a pair of Audubon Orioles. We were beginning to think that our timing wasn’t so bad after all.
When Bob and I caught sight of a large black bird soaring on high, at first we thought it was a Bald Eagle; it appeared to have a strikingly white head.
As the bird drew nearer, we had to laugh at ourselves because it was simply a Common Raven transporting a large white egg back to its nest.
A sign on the opposite side of the road from La Noria Ranch drew our attention to a hiking trail that bisects the pine and oak forest. Having spent quite a lot of time driving on this particular day, Bob and I decided it would be a good idea to stretch our legs and get a closer look at the terrain.
We do a lot of hiking in the woods at home in Canada, so the prospect of getting out in the bush excited us. Compared to a lot of the trails that we had used thus far in Mexico, this one was in far better condition and well maintained.
Prepared with walking sticks, we set off on an adventure.
The lack of thick vegetation in the understory presented a welcoming landscape that allowed a cool breeze to flow through while permitting good views through the trees.
We were intrigued by some huge Agave plants growing at the side of the trail and remarked about the substantial size of the leaves.
the most interesting patterns on the leaves reminded me of some fine embossing at the hand of Mother Nature.
should they get too distracted by the panoramic view.
a Pine Flycatcher flitted about the Pine Trees alongside the trail.
Soon after, a curious Hermit Thrush came in close for a look at we two who quietly sauntered through its domain.
Back at the trailhead, our car sat waiting right where we had left it, alongside a signpost marking the way to Cuarenteno and Palapitas. Cuarenteno was a good distance further along the ridge…in my guess as far as we had come already within the Reserve…so Bob and I decided to retrace our route back to Tepic.
By the time we got back to the main highway, a dense fog had settled over the forest.
One last stop was made later that afternoon before returning to San Blas and that was at the orchards outside of La Bajada. We were looking forward to visiting that recommended habitat, as well. More on that in a subsequent post.
Thank you for that – it looked so beautiful. Did you feel safe driving around in Mexico?
Thanks very much, Anne. We did feel safe driving around Mexico, but I have to admit that, when driving across country from Zitacuaro to San Blas, we chose to use the toll highway as advised. It provides a more direct and dependable route plus traffic is lighter owing to the cost therefore there was less chance of coming into contact with unscrupulous or ill-intentioned people. Even driving around Zitacuaro and San Blas, we never felt at risk.
|
# From the recipe at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/573466
# A backwards compatible enhancement has been made to allow full access to registry types still through the dictionary metaphor
# converted to use _winreg instead of Win32 API from PyWin32
"""Slightly magical Win32api Registry -> Dictionary-like-object wrapper"""
import cPickle
import _winreg
class RegistryDict(object):
def __init__(self, keyhandle = _winreg.HKEY_LOCAL_MACHINE, keypath = [], flags = _winreg.KEY_READ):
"""If flags=None, then it will create the key.. otherwise pass a _winreg.KEY_* sam"""
self.keyhandle = None
self.open(keyhandle, keypath, flags)
@staticmethod
def massageIncomingRegistryValue((obj, objtype), bReturnType=False):
r=None
if objtype == _winreg.REG_BINARY and obj[:8]=='PyPickle':
obj = obj[8:]
r = (cPickle.loads(obj), objtype)
elif objtype == _winreg.REG_NONE:
r = (None, objtype)
elif objtype in (_winreg.REG_SZ, _winreg.REG_EXPAND_SZ,
_winreg.REG_RESOURCE_LIST, _winreg.REG_LINK,
_winreg.REG_BINARY, _winreg.REG_DWORD,
_winreg.REG_DWORD_LITTLE_ENDIAN, _winreg.REG_DWORD_BIG_ENDIAN,
_winreg.REG_MULTI_SZ):
r = (obj,objtype)
if r == None:
raise NotImplementedError, "Registry type 0x%08X not supported" % (objtype,)
if bReturnType:
return r
else:
return r[0]
def __getitem__(self, key):
bReturnType=False
if (type(key) is tuple) and (len(key)==1):
key = key[0]
bReturnType=True
# is it data?
try:
return self.massageIncomingRegistryValue(_winreg.QueryValueEx(self.keyhandle, key),bReturnType)
except:
if key == '':
# Special case: this dictionary key means "default value"
raise KeyError, key
pass
# it's probably a registry key then
try:
return RegistryDict(self.keyhandle, key, _winreg.KEY_ALL_ACCESS)
except:
pass
# must not be there
raise KeyError, key
def has_key(self, key):
return self.__contains__(key)
def __contains__(self, key):
try:
self.__getitem__(key)
return 1
except KeyError:
return 0
def copy(self):
return dict(self.iteritems())
def __repr__(self):
return repr(self.copy())
def __str__(self):
return self.__repr__()
def __cmp__(self, other):
# Do the objects have the same state?
return self.keyhandle == other.keyhandle
def __hash__(self):
raise TypeError, "RegistryDict objects are unhashable"
def clear(self):
keylist = list(self.iterkeys())
# Two-step to avoid changing the set while iterating over it
for k in keylist:
del self[k]
def iteritems_data(self):
i = 0
# yield data
try:
while 1:
s, obj, objtype = _winreg.EnumValue(self.keyhandle, i)
yield s, self.massageIncomingRegistryValue((obj, objtype))
i += 1
except:
pass
def iteritems_children(self, access=_winreg.KEY_ALL_ACCESS):
i = 0
try:
while 1:
s = _winreg.EnumKey(self.keyhandle, i)
yield s, RegistryDict(self.keyhandle, [s], access)
i += 1
except:
pass
def iteritems(self, access=_winreg.KEY_ALL_ACCESS):
# yield children
for item in self.iteritems_data():
yield item
for item in self.iteritems_children(access):
yield item
def iterkeys_data(self):
for key, value in self.iteritems_data():
yield key
def iterkeys_children(self, access=_winreg.KEY_ALL_ACCESS):
for key, value in self.iteritems_children(access):
yield key
def iterkeys(self):
for key, value in self.iteritems():
yield key
def itervalues_data(self):
for key, value in self.iteritems_data():
yield value
def itervalues_children(self, access=_winreg.KEY_ALL_ACCESS):
for key, value in self.iteritems_children(access):
yield value
def itervalues(self, access=_winreg.KEY_ALL_ACCESS):
for key, value in self.iteritems(access):
yield value
def items(self, access=_winreg.KEY_ALL_ACCESS):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self, access=_winreg.KEY_ALL_ACCESS):
return list(self.itervalues(access))
def __delitem__(self, key):
# Delete a string value or a subkey, depending on the type
try:
item = self[key]
except:
return # Silently ignore bad keys
itemtype = type(item)
if itemtype is str:
_winreg.DeleteValue(self.keyhandle, key)
elif isinstance(item, RegistryDict):
# Delete everything in the subkey, then the subkey itself
item.clear()
_winreg.DeleteKey(self.keyhandle, key)
else:
raise ValueError, "Unknown item type in RegistryDict"
def __len__(self):
return len(self.items())
def __iter__(self):
return self.iterkeys()
def popitem(self):
try:
k, v = self.iteritems().next()
del self[k]
return k, v
except StopIteration:
raise KeyError, "RegistryDict is empty"
def get(self,key,default=None):
try:
return self.__getitem__(key)
except:
return default
def setdefault(self,key,default=None):
try:
return self.__getitem__(key)
except:
self.__setitem__(key)
return default
def update(self,d):
for k,v in d.items():
self.__setitem__(k, v)
def __setitem__(self, item, value):
item = str(item)
pyvalue = type(value)
if pyvalue is tuple and len(value)==2:
valuetype = value[1]
value = value[0]
else:
if pyvalue is dict or isinstance(value, RegistryDict):
d = RegistryDict(self.keyhandle, item)
d.clear()
d.update(value)
return
if pyvalue is str:
valuetype = _winreg.REG_SZ
elif pyvalue is int:
valuetype = _winreg.REG_DWORD
else:
valuetype = _winreg.REG_BINARY
value = 'PyPickle' + cPickle.dumps(value)
_winreg.SetValueEx(self.keyhandle, item, 0, valuetype, value)
def open(self, keyhandle, keypath, flags = None):
if type(keypath) is str:
keypath = keypath.split('\\')
if flags is None:
for subkey in keypath:
keyhandle = _winreg.CreateKey(keyhandle, subkey)
else:
for subkey in keypath:
keyhandle = _winreg.OpenKeyEx(keyhandle, subkey, 0, flags)
self.keyhandle = keyhandle
def close(self):
try:
_winreg.CloseKey(self.keyhandle)
except:
pass
## end of http://code.activestate.com/recipes/573466/ }}}
|
The courtesan in the picture is wearing a robe decorated with dianthus flowers which are also called pinks, but the Japanese word is nadeshiko (撫子). It is considered one of the Seven Flowers of Autumn.
If Andreas Marks dates for the publisher of this print, Imariya Ushizō, ca. 1816-1827, then the date of this image can be no later than that end date.
|
"""
@package mi.instrument.wetlabs.fluorometer.flord_d.test.test_driver
@file marine-integrations/mi/instrument/wetlabs/fluorometer/flort_d/driver.py
@author Tapana Gupta
@brief Test cases for flord_d driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
from mock import Mock
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import InstrumentDriverUnitTestCase
from mi.idk.unit_test import ParameterTestConfigKey
from mi.core.instrument.chunker import StringChunker
from mi.instrument.wetlabs.fluorometer.flort_d.test.test_driver import DriverTestMixinSub
from mi.instrument.wetlabs.fluorometer.flord_d.driver import InstrumentDriver
from mi.instrument.wetlabs.fluorometer.flord_d.driver import FlordProtocol
from mi.instrument.wetlabs.fluorometer.flort_d.driver import FlordMenuParticle, FlordSampleParticle
from mi.instrument.wetlabs.fluorometer.flort_d.driver import DataParticleType
from mi.instrument.wetlabs.fluorometer.flort_d.driver import InstrumentCommands
from mi.instrument.wetlabs.fluorometer.flort_d.driver import ProtocolState
from mi.instrument.wetlabs.fluorometer.flort_d.driver import ProtocolEvent
from mi.instrument.wetlabs.fluorometer.flort_d.driver import Capability
from mi.instrument.wetlabs.fluorometer.flort_d.driver import Parameter
from mi.instrument.wetlabs.fluorometer.flort_d.driver import Prompt
from mi.instrument.wetlabs.fluorometer.flort_d.driver import FlordMenuParticleKey
from mi.instrument.wetlabs.fluorometer.flort_d.driver import FlordSampleParticleKey
from mi.instrument.wetlabs.fluorometer.flort_d.driver import NEWLINE
from mi.core.instrument.instrument_driver import DriverProtocolState, DriverConfigKey
# SAMPLE DATA FOR TESTING
from mi.instrument.wetlabs.fluorometer.flord_d.test.sample_data import SAMPLE_MNU_RESPONSE
from mi.instrument.wetlabs.fluorometer.flord_d.test.sample_data import SAMPLE_SAMPLE_RESPONSE
from mi.instrument.wetlabs.fluorometer.flord_d.test.sample_data import SAMPLE_MET_RESPONSE
from mi.core.exceptions import InstrumentCommandException, SampleException
__author__ = 'Tapana Gupta'
__license__ = 'Apache 2.0'
log = get_logger()
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.wetlabs.fluorometer.flord_d.driver',
driver_class="FlordInstrumentDriver",
instrument_agent_resource_id='3DLE2A',
instrument_agent_name='wetlabs_fluorometer_flord_d',
instrument_agent_packet_config=DataParticleType(),
driver_startup_config={
DriverConfigKey.PARAMETERS: {Parameter.RUN_WIPER_INTERVAL: '00:10:00',
Parameter.RUN_CLOCK_SYNC_INTERVAL: '00:10:00',
Parameter.RUN_ACQUIRE_STATUS_INTERVAL: '00:10:00'}}
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python, mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class FlordDriverTestMixinSub(DriverTestMixinSub):
"""
Mixin class used for storing data particle constance and common data assertion methods.
"""
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
_Driver = InstrumentDriver
_flordD_mnu_parameters = {
FlordMenuParticleKey.SERIAL_NUM: {TYPE: unicode, VALUE: 'BBFL2W-993', REQUIRED: True},
FlordMenuParticleKey.FIRMWARE_VER: {TYPE: unicode, VALUE: 'Triplet5.20', REQUIRED: True},
FlordMenuParticleKey.AVE: {TYPE: int, VALUE: 1, REQUIRED: True},
FlordMenuParticleKey.PKT: {TYPE: int, VALUE: 0, REQUIRED: True},
FlordMenuParticleKey.M1D: {TYPE: int, VALUE: 0, REQUIRED: True},
FlordMenuParticleKey.M2D: {TYPE: int, VALUE: 0, REQUIRED: True},
FlordMenuParticleKey.M1S: {TYPE: float, VALUE: 1.000E+00, REQUIRED: True},
FlordMenuParticleKey.M2S: {TYPE: float, VALUE: 1.000E+00, REQUIRED: True},
FlordMenuParticleKey.SEQ: {TYPE: int, VALUE: 0, REQUIRED: True},
FlordMenuParticleKey.RAT: {TYPE: int, VALUE: 19200, REQUIRED: True},
FlordMenuParticleKey.SET: {TYPE: int, VALUE: 0, REQUIRED: True},
FlordMenuParticleKey.REC: {TYPE: int, VALUE: 1, REQUIRED: True},
FlordMenuParticleKey.MAN: {TYPE: int, VALUE: 0, REQUIRED: True},
FlordMenuParticleKey.INT: {TYPE: unicode, VALUE: '00:00:10', REQUIRED: True},
FlordMenuParticleKey.DAT: {TYPE: unicode, VALUE: '07/11/13', REQUIRED: True},
FlordMenuParticleKey.CLK: {TYPE: unicode, VALUE: '12:48:34', REQUIRED: True},
FlordMenuParticleKey.MST: {TYPE: unicode, VALUE: '12:48:31', REQUIRED: True},
FlordMenuParticleKey.MEM: {TYPE: int, VALUE: 4095, REQUIRED: True}
}
_flordD_sample_parameters = {
FlordSampleParticleKey.wave_beta: {TYPE: int, VALUE: 700, REQUIRED: True},
FlordSampleParticleKey.raw_sig_beta: {TYPE: int, VALUE: 4130, REQUIRED: True},
FlordSampleParticleKey.wave_chl: {TYPE: int, VALUE: 695, REQUIRED: True},
FlordSampleParticleKey.raw_sig_chl: {TYPE: int, VALUE: 1018, REQUIRED: True},
FlordSampleParticleKey.raw_temp: {TYPE: int, VALUE: 525, REQUIRED: True},
FlordSampleParticleKey.SIG_1_OFFSET: {TYPE: float, VALUE: 0, REQUIRED: True},
FlordSampleParticleKey.SIG_2_OFFSET: {TYPE: float, VALUE: 0, REQUIRED: True},
FlordSampleParticleKey.SIG_1_SCALE_FACTOR: {TYPE: int, VALUE: 0, REQUIRED: True},
FlordSampleParticleKey.SIG_2_SCALE_FACTOR: {TYPE: int, VALUE: 0, REQUIRED: True}
}
# #
# Driver Parameter Methods
# #
def assert_particle_mnu(self, data_particle, verify_values=False):
"""
Verify flortd_mnu particle
@param data_particle: FlortDMNU_ParticleKey data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(FlordMenuParticleKey, self._flordD_mnu_parameters)
self.assert_data_particle_header(data_particle, DataParticleType.FLORDD_MNU)
self.assert_data_particle_parameters(data_particle, self._flordD_mnu_parameters, verify_values)
def assert_particle_sample(self, data_particle, verify_values=False):
"""
Verify flortd_sample particle
@param data_particle: FlortDSample_ParticleKey data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(FlordSampleParticleKey, self._flordD_sample_parameters)
self.assert_data_particle_header(data_particle, DataParticleType.FLORDD_SAMPLE)
self.assert_data_particle_parameters(data_particle, self._flordD_sample_parameters, verify_values)
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
@attr('UNIT', group='mi')
class DriverUnitTest(InstrumentDriverUnitTestCase, FlordDriverTestMixinSub):
def setUp(self):
InstrumentDriverUnitTestCase.setUp(self)
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might cause confusion. Also
do a little extra validation for the capabilities
"""
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(InstrumentCommands())
# Test capabilities for duplicates, them verify that capabilities is a subset of protocol events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_driver_schema(self):
"""
Get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(FlordProtocol.sieve_function)
self.assert_chunker_sample(chunker, SAMPLE_MNU_RESPONSE)
self.assert_chunker_sample_with_noise(chunker, SAMPLE_MNU_RESPONSE)
self.assert_chunker_fragmented_sample(chunker, SAMPLE_MNU_RESPONSE, 128)
self.assert_chunker_combined_sample(chunker, SAMPLE_MNU_RESPONSE)
self.assert_chunker_sample(chunker, SAMPLE_MET_RESPONSE)
self.assert_chunker_sample_with_noise(chunker, SAMPLE_MET_RESPONSE)
self.assert_chunker_fragmented_sample(chunker, SAMPLE_MET_RESPONSE, 32)
self.assert_chunker_combined_sample(chunker, SAMPLE_MET_RESPONSE)
self.assert_chunker_sample(chunker, SAMPLE_SAMPLE_RESPONSE)
self.assert_chunker_sample_with_noise(chunker, SAMPLE_SAMPLE_RESPONSE)
self.assert_chunker_fragmented_sample(chunker, SAMPLE_SAMPLE_RESPONSE, 32)
self.assert_chunker_combined_sample(chunker, SAMPLE_SAMPLE_RESPONSE)
def test_corrupt_data_sample(self):
particle = FlordMenuParticle(SAMPLE_MNU_RESPONSE.replace('Ave 1', 'Ave foo'))
with self.assertRaises(SampleException):
particle.generate()
particle = FlordSampleParticle(SAMPLE_SAMPLE_RESPONSE.replace('700', 'foo'))
with self.assertRaises(SampleException):
particle.generate()
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
self.assert_raw_particle_published(driver, True)
# Start validating data particles
self.assert_particle_published(driver, SAMPLE_MNU_RESPONSE, self.assert_particle_mnu, True)
self.assert_particle_published(driver, SAMPLE_SAMPLE_RESPONSE, self.assert_particle_sample, True)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities.
Iterate through available capabilities, and verify that they can pass successfully through the filter.
Test silly made up capabilities to verify they are blocked by filter.
"""
mock_callback = Mock(spec="PortAgentClient")
protocol = FlordProtocol(Prompt, NEWLINE, mock_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in this dict must
also be defined in the protocol FSM.
"""
capabilities = {
ProtocolState.UNKNOWN: [ProtocolEvent.DISCOVER],
ProtocolState.COMMAND: [ProtocolEvent.GET,
ProtocolEvent.SET,
ProtocolEvent.START_DIRECT,
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.RUN_WIPER,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.CLOCK_SYNC],
ProtocolState.AUTOSAMPLE: [ProtocolEvent.STOP_AUTOSAMPLE,
ProtocolEvent.RUN_WIPER_SCHEDULED,
ProtocolEvent.SCHEDULED_CLOCK_SYNC,
ProtocolEvent.SCHEDULED_ACQUIRE_STATUS,
ProtocolEvent.GET],
ProtocolState.DIRECT_ACCESS: [ProtocolEvent.STOP_DIRECT,
ProtocolEvent.EXECUTE_DIRECT]
}
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, capabilities)
def test_command_response(self):
"""
Test response with no errors
Test the general command response will raise an exception if the command is not recognized by
the instrument
"""
mock_callback = Mock()
protocol = FlordProtocol(Prompt, NEWLINE, mock_callback)
# test response with no errors
protocol._parse_command_response(SAMPLE_MNU_RESPONSE, None)
# test response with 'unrecognized command'
response = False
try:
protocol._parse_command_response('unrecognized command', None)
except InstrumentCommandException:
response = True
finally:
self.assertTrue(response)
# test correct response with error
response = False
try:
protocol._parse_command_response(SAMPLE_MET_RESPONSE + NEWLINE + 'unrecognized command', None)
except InstrumentCommandException:
response = True
finally:
self.assertTrue(response)
def test_run_wiper_response(self):
"""
Test response with no errors
Test the run wiper response will raise an exception:
1. if the command is not recognized by
2. the status of the wiper is bad
"""
mock_callback = Mock()
protocol = FlordProtocol(Prompt, NEWLINE, mock_callback)
# test response with no errors
protocol._parse_run_wiper_response('mvs 1', None)
# test response with 'unrecognized command'
response = False
try:
protocol._parse_run_wiper_response('unrecognized command', None)
except InstrumentCommandException:
response = True
finally:
self.assertTrue(response)
# test response with error
response = False
try:
protocol._parse_run_wiper_response("mvs 0" + NEWLINE, None)
except InstrumentCommandException:
response = True
finally:
self.assertTrue(response)
def test_discover_state(self):
"""
Test discovering the instrument in the COMMAND state and in the AUTOSAMPLE state
"""
mock_callback = Mock()
protocol = FlordProtocol(Prompt, NEWLINE, mock_callback)
# COMMAND state, wait for particles returns an empty list
protocol.wait_for_particles = Mock(return_value=[])
next_state, result = protocol._handler_unknown_discover()
self.assertEqual(next_state, DriverProtocolState.COMMAND)
# AUTOSAMPLE state, wait for particles returns one or more particles
protocol.wait_for_particles = Mock(return_value=[1])
next_state, result = protocol._handler_unknown_discover()
self.assertEqual(next_state, DriverProtocolState.AUTOSAMPLE)
def test_create_commands(self):
"""
Test creating different types of commands
1. command with no end of line
2. simple command with no parameters
3. command with parameter
"""
# create the operator commands
mock_callback = Mock()
protocol = FlordProtocol(Prompt, NEWLINE, mock_callback)
# !!!!!
cmd = protocol._build_no_eol_command('!!!!!')
self.assertEqual(cmd, '!!!!!')
# $met
cmd = protocol._build_simple_command('$met')
self.assertEqual(cmd, '$met' + NEWLINE)
# $mnu
cmd = protocol._build_simple_command('$mnu')
self.assertEqual(cmd, '$mnu' + NEWLINE)
# $run
cmd = protocol._build_simple_command('$run')
self.assertEqual(cmd, '$run' + NEWLINE)
# parameters
cmd = protocol._build_single_parameter_command('$ave', Parameter.MEASUREMENTS_PER_REPORTED, 14)
self.assertEqual(cmd, '$ave 14' + NEWLINE)
cmd = protocol._build_single_parameter_command('$m2d', Parameter.MEASUREMENT_2_DARK_COUNT, 34)
self.assertEqual(cmd, '$m2d 34' + NEWLINE)
cmd = protocol._build_single_parameter_command('$m1s', Parameter.MEASUREMENT_1_SLOPE, 23.1341)
self.assertEqual(cmd, '$m1s 23.1341' + NEWLINE)
cmd = protocol._build_single_parameter_command('$dat', Parameter.DATE, '041014')
self.assertEqual(cmd, '$dat 041014' + NEWLINE)
cmd = protocol._build_single_parameter_command('$clk', Parameter.TIME, '010034')
self.assertEqual(cmd, '$clk 010034' + NEWLINE)
|
[Snort-devel] stream4 vs. dsize: how to spot an overflow?
There are many signatures that are designed to spot buffer overflows.
note, "useful for dsize and stream4".
packets, we don't see it.
reassembled stream of minimum size N?"
What does one do in 2.0?
|
# Copyright (C) 2018 Emmanuel Gil Peyrot <linkmauve AT linkmauve.fr>
#
# This file is part of Gajim.
#
# Gajim is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; version 3 only.
#
# Gajim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gajim. If not, see <http://www.gnu.org/licenses/>.
import logging
import hashlib
from base64 import b64decode
import nbxmpp
from nbxmpp.namespaces import Namespace
from nbxmpp.structs import StanzaHandler
from gajim.common import app
from gajim.common import configpaths
from gajim.common.modules.base import BaseModule
log = logging.getLogger('gajim.c.m.bob')
class BitsOfBinary(BaseModule):
def __init__(self, con):
BaseModule.__init__(self, con)
self.handlers = [
StanzaHandler(name='iq',
callback=self._answer_bob_request,
typ='get',
ns=Namespace.BOB),
]
# Used to track which cids are in-flight.
self.awaiting_cids = {}
def _answer_bob_request(self, _con, stanza, _properties):
self._log.info('Request from %s for BoB data', stanza.getFrom())
iq = stanza.buildReply('error')
err = nbxmpp.ErrorNode(nbxmpp.ERR_ITEM_NOT_FOUND)
iq.addChild(node=err)
self._log.info('Sending item-not-found')
self._con.connection.send(iq)
raise nbxmpp.NodeProcessed
def _on_bob_received(self, _nbxmpp_client, result, cid):
"""
Called when we receive BoB data
"""
if cid not in self.awaiting_cids:
return
if result.getType() == 'result':
data = result.getTags('data', namespace=Namespace.BOB)
if data.getAttr('cid') == cid:
for func in self.awaiting_cids[cid]:
cb = func[0]
args = func[1]
pos = func[2]
bob_data = data.getData()
def recurs(node, cid, data):
if node.getData() == 'cid:' + cid:
node.setData(data)
else:
for child in node.getChildren():
recurs(child, cid, data)
recurs(args[pos], cid, bob_data)
cb(*args)
del self.awaiting_cids[cid]
return
# An error occurred, call callback without modifying data.
for func in self.awaiting_cids[cid]:
cb = func[0]
args = func[1]
cb(*args)
del self.awaiting_cids[cid]
def get_bob_data(self, cid, to, callback, args, position):
"""
Request for BoB (XEP-0231) and when data will arrive, call callback
with given args, after having replaced cid by it's data in
args[position]
"""
if cid in self.awaiting_cids:
self.awaiting_cids[cid].appends((callback, args, position))
else:
self.awaiting_cids[cid] = [(callback, args, position)]
iq = nbxmpp.Iq(to=to, typ='get')
iq.addChild(name='data', attrs={'cid': cid}, namespace=Namespace.BOB)
self._con.connection.SendAndCallForResponse(
iq, self._on_bob_received, {'cid': cid})
def parse_bob_data(stanza):
data_node = stanza.getTag('data', namespace=Namespace.BOB)
if data_node is None:
return None
cid = data_node.getAttr('cid')
type_ = data_node.getAttr('type')
max_age = data_node.getAttr('max-age')
if max_age is not None:
try:
max_age = int(max_age)
except Exception:
log.exception(stanza)
return None
if cid is None or type_ is None:
log.warning('Invalid data node (no cid or type attr): %s', stanza)
return None
try:
algo_hash = cid.split('@')[0]
algo, hash_ = algo_hash.split('+')
except Exception:
log.exception('Invalid cid: %s', stanza)
return None
bob_data = data_node.getData()
if not bob_data:
log.warning('No data found: %s', stanza)
return None
filepath = configpaths.get('BOB') / algo_hash
if algo_hash in app.bob_cache or filepath.exists():
log.info('BoB data already cached')
return None
try:
bob_data = b64decode(bob_data)
except Exception:
log.warning('Unable to decode data')
log.exception(stanza)
return None
if len(bob_data) > 10000:
log.warning('%s: data > 10000 bytes', stanza.getFrom())
return None
try:
sha = hashlib.new(algo)
except ValueError as error:
log.warning(stanza)
log.warning(error)
return None
sha.update(bob_data)
if sha.hexdigest() != hash_:
log.warning('Invalid hash: %s', stanza)
return None
if max_age == 0:
app.bob_cache[algo_hash] = bob_data
else:
try:
with open(str(filepath), 'w+b') as file:
file.write(bob_data)
except Exception:
log.warning('Unable to save data')
log.exception(stanza)
return None
log.info('BoB data stored: %s', algo_hash)
return filepath
def store_bob_data(bob_data):
if bob_data is None:
return None
algo_hash = '%s+%s' % (bob_data.algo, bob_data.hash_)
filepath = configpaths.get('BOB') / algo_hash
if algo_hash in app.bob_cache or filepath.exists():
log.info('BoB data already cached')
return None
if bob_data.max_age == 0:
app.bob_cache[algo_hash] = bob_data.data
else:
try:
with open(str(filepath), 'w+b') as file:
file.write(bob_data.data)
except Exception:
log.exception('Unable to save data')
return None
log.info('BoB data stored: %s', algo_hash)
return filepath
def get_instance(*args, **kwargs):
return BitsOfBinary(*args, **kwargs), 'BitsOfBinary'
|
Hastelloy C276 is today known as a universally corrosion resistant material and is used in various environments from moderately oxidizing to strong reducing conditions. The limiting factor when dealing with strong oxidizing environments is the low Cr content which indicates that hot and concentrated nitric acid environments are not required. This alloy has exceptional resistance to acid chlorides, formic & acetic acids, wet chloride gas, chlorine solutions, sulfuric acid & hydrochloric acid, solvents, acetic anhydride and hypochlorites. It has high resistance to phosphoric acid at all temperatures below boiling and at concentrations lower than 65%. Hastelloy C276 has high resistance to corrosion by seawater especially under crevice conditions that induce attack in other commonly used materials. This alloy also has high resistance to pitting that is localized corrosion.
The low carbon content in Hastelloy C276 minimizes carbide precipitation during welding to maintain resistance to intergranular attack in heat effected zones of weld joints. This alloy has excellent high temperature strength and moderate oxidation resistance however the embrittling high temperature precipitates will occur eventually. Hastelloy C276 is widely used in chemical processing industries in reaction vessels, transfer piping, evaporators and heat exchangers. It is used in pollution control (FGD) in ducts, fans, stack liners, dampers and fan housings. This alloy is also used in various other applications including pickling tanks and pickling hooks. Hastelloy C276 exhibits high performance in oilfield applications especially in the recovery and handling of “sour” natural gas that contains hydrogen sulfide, carbon dioxide and chlorides. It is highly resistant to these environments due to its high chromium, nickel and molybdenum content.
It is resistant to wet chloride gas, hypochlorite and chlorine dioxide solutions.
It has high resistance to strong solutions of oxidizing salts such as ferric and cupric chlorides.
It is not prone to grain boundary precipitation in the as welded condition and is suitable for numerous chemical process applications.
Hastelloy C276 is especially effective in Scrubber (FGD) applications where it is able to withstand the higher chloride environments before the onset of localized corrosion. Hastelloy C276 is resistant to chloride-induced stress corrosion cracking due to its high nickel content. This alloy performs excellently in oxidizing, non-oxidising and mixed acids media due to its high molybdenum and chromium contents. The high molybdenum and chromium contents also allow this alloy to exhibit excellent resistance to pitting and crevice corrosion attack. The development of pits is inhibited by the addition of tungsten. Hastelloy C276 is appropriate to use for off-shore applications where hydrogen-sulphide (H2S) stress corrosion cracking resistance is necessary. Numerous industries have started using this alloy as it is one of the most corrosion resistant alloys in aggressive or corrosive environments.
|
"""untitled URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from django.contrib import admin
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
from django.contrib.auth.views import login as login_view
from django.contrib.auth.views import logout as logout_view
from sqlviewer.glimpse import views as glimpse_views
from sqlviewer.connect import views as connect_views
import sqlviewer.glimpse.api
api = [
url(r'^api/v1/models/(?P<model_id>\d+)?$', sqlviewer.glimpse.api.ModelView.as_view()),
url(r'^api/v1/models/(?P<model_id>\d+)/versions/(?P<version_id>\d+)?$', sqlviewer.glimpse.api.VersionView.as_view()),
url(r'^api/v1/models/(?P<model_id>\d+)/versions/(?P<version_id>\d+)/tables(/)?$', sqlviewer.glimpse.api.table_details, name='api-table-details'),
url(r'^api/v1/models/(?P<model_id>\d+)/versions/(?P<version_id>\d+)/diagrams/(?P<diagram_id>\d+)?$', sqlviewer.glimpse.api.DiagramView.as_view()),
]
pages = [
url(r'^$', glimpse_views.models_list_view, name="home"),
url(r'^admin/', admin.site.urls),
url(r'^models/upload$', glimpse_views.model_upload_view, name='model_upload'),
url(r'^accounts/login/$', login_view, name='login'),
url(r'^accounts/logout/$', logout_view, name='logout'),
url(r'^models/(?P<model_id>\d+)/versions/(?P<version_id>\d+)$', glimpse_views.model_version_details_view, name='model_details'),
url(r'^models/(?P<model_id>\d+)/versions/(?P<version_id>\d+)/connect$', connect_views.connect_view, name='glimpse-connect'),
url(r'^models/(?P<model_id>\d+)/versions/(?P<version_id>\d+)/search$', glimpse_views.version_search_view, name='version_search'),
url(r'^models/(?P<model_id>\d+)/versions/(?P<version_id>\d+)/diagrams/(?P<diagram_id>\d+)$', glimpse_views.diagram_details_view, name='diagram_details'),
]
urlpatterns = pages + api
|
This poor guy! He is walking through 2017 with the biggest regret ever - not getting a girls information! The two were at the 2016 Peach Bowl. She was cheering for UW and he was cheering for Alabama. All she said is that she was from Montana.... For his sake, I'm hoping she is from Billings!! Here is the link to the post. If you know anything, reach out to him!
|
# -*- coding: iso-8859-1 -*-
import base64
from MailListener_POP3 import parse_mail_parts
def test_parse_mail_parts():
"""
Given
- Email data
When
- Email contains special characters
Then
- run parse_mail_parts method
- Validate The result body.
"""
class MockEmailPart:
pass
part = MockEmailPart()
part._headers = [['content-transfer-encoding', 'quoted-printable']]
part._payload = "El Ni\xc3\xb1o"
parts = [part]
body, html, attachments = parse_mail_parts(parts)
assert body == 'El Nio'
def test_base64_mail_decode():
"""
Given
- base64 email data which could not be decoded into utf-8
When
- Email contains special characters
Then
- run parse_mail_parts method
- Validate that no exception is thrown
- Validate The result body
"""
class MockEmailPart:
pass
test_payload = 'Foo\xbbBar=='
base_64_encoded_test_payload = base64.b64encode(test_payload)
part = MockEmailPart()
part._headers = [['content-transfer-encoding', 'base64']]
part._payload = base_64_encoded_test_payload
parts = [part]
body, html, attachments = parse_mail_parts(parts)
assert body.replace(u'\uFFFD', '?') == 'Foo?Bar=='
|
Welcome to the eSources.co.uk International Products Directory. We feature Wholesale Products from international suppliers.
eSources is the UK's fastest growing international wholesale products directory. Whether you are sourcing brand new wholesale merchandise, overstocks, clearance offers, surplus lines or would like a customised product manufactured by a trade supplier or manufacturer, the eSources wholesale directory is the easiest way to find reliable wholesale goods and supplies.
If you are a supplier based outside the UK you can register for premium membership to have your products featured in this wholesale directory. Membership includes free online trade cart to receive orders directly from eSources.
|
"""
Webmaster: Your Application's View
"""
from webmaster import (View, flash, abort, session, request, url_for,
redirect, flash_data, get_flashed_data)
from webmaster.decorators import (route, menu, template, plugin, methods,
render_as_json, render_as_xml,
require_user_roles, login_required,
no_login_required)
from webmaster.ext import (mailer, cache, storage, recaptcha, csrf)
from webmaster.packages import (contact_page, user, publisher)
from webmaster.exceptions import (ApplicationError, ModelError, UserError)
from application import model
# ------------------------------------------------------------------------------
# /
# This is the entry point of the site
# All root based (/) endpoint could be placed in here
# It extends the contact_page module, to be accessed at '/contact'
#
@menu("Main Menu", order=1)
@plugin(contact_page.contact_page)
@route("/")
class Index(View):
@menu("Home", order=1)
def index(self):
self.meta_tags(title="Hello View!")
return {}
@menu("Boom")
@template("Index/index2.html", version="1.6.9.6")
def boom(self):
return {}
# ------------------------------------------------------------------------------
# /account
# The User Account section
# Extends user.account which forces the whole endpoint to be authenticated
# If an action needs not authentication, @no_login_required can be added
#
@menu("My Account", group_name="account", order=3, align_right=True, visible=False)
@plugin(user.auth, model=model.User)
class Account(View):
@menu("My Account", order=1)
def index(self):
self.meta_tags(title="My Account")
return {}
@menu("Upload Image Demo", order=2)
@route("upload", methods=["GET", "POST"])
def upload(self):
self.meta_tags(title="Upload Demo")
if request.method == "POST":
try:
_file = request.files.get('file')
if _file:
my_object = storage.upload(_file,
prefix="demo/",
public=True,
allowed_extensions=["gif", "jpg", "jpeg", "png"])
if my_object:
return redirect(url_for("Account:upload", object_name=my_object.name))
except Exception as e:
flash_error(e.message)
return redirect(url_for("Account:upload"))
my_object = None
object_name = request.args.get("object_name")
if object_name:
my_object = storage.get(object_name=object_name)
return {"my_object": my_object}
@menu("No Login", order=3)
@no_login_required
def no_login(self):
return {}
# ------------------------------------------------------------------------------
# /blog
# Using the publisher.page, we created a blog
#
@plugin(publisher.page,
model=model.Publisher,
query={"types": ["blog"]},
slug="slug",
title="Blog",
endpoint="",
menu={"name": "Blog"})
class Blog(View):
pass
|
For best results thaw under refrigeration. Use within 48 hours after thawed. Cook steak according to a cooking or meat thermometer rather than a time table. Remove steaks from grill, pan, or oven 5 degrees before desired doneness. Let steak rest 5-10 minutes before serving.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Filename: engine_cdmi.py
import engine,urllib2,urllib,re
class cdmi(engine.engine):
def __init__(self,proxy,locale=None):
engine.engine.__init__(self,proxy,locale)
self.netEncoder='utf8'
def request(self,artist,title):
url = '%s - %s' % (str(unicode(artist,self.locale).encode('utf8')),(unicode(title,self.locale).encode('utf8')))
url = urllib.quote(url)
url = 'http://www.cdmi.net/LRC/cgi-bin/lrc_search.cgi?lrc_id=%s' % url
try:
#print url
opener = urllib2.build_opener(urllib2.HTTPRedirectHandler,urllib2.ProxyHandler(self.proxy))
file=opener.open(url)
originalLrc=file.read()
file.close()
except IOError:
return (None,True)
else:
if(originalLrc.startswith('[ti:No Lyrics]')):
return (None,False)
else:
value = re.findall(r"\[ar:(.*?)\]",originalLrc)
if value:
artist = value[0]
value = re.findall(r"\[ti:(.*?)\]",originalLrc)
if value:
title = value[0]
return ([[artist,title,originalLrc]],False)
#def downIt(self,url):
#return url
|
Over 40 years of engineering experience pertaining to mechanical engineering, materials, metallurgy, electronic components, and failure analysis. Specialized expertise includes printed circuit board fabrication, SMT and through hole system card assembly, backplane systems, card cage cabinet solutions, electroforming, corrosion, system interconnect, quality control, and explosive bonding. Forensic experience includes commercial marina facilities, mechanical and electrical components, machinery, residential public utilities, and electrical equipment and residential fire investigation.
|
"""A module to receive data from UR CB2 robots."""
# The MIT License (MIT)
#
# Copyright (c) 2016 GTRC.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import struct
import array
import threading
class URReceiver(object):
"""A class to receive and process data from a UR Robot
The receiving and processing can be run in a separate thread by calling
start(). The stop() command must be called before exiting to halt the
additional thread. Alternatively, receive(), decode(), and
print_parsed_data() can be called in sequence in order to receive,
decode, and print data. One should not call receive(), decode(), or any
of the print methods, if a separate thread is being used. You should
never write to any of the data fields externally, however you can read
from them. Python's atomic read/write architecture should prevent you
from getting any half baked results from basic types, for all lists and
tuples, you must lock using lock (recommend that you use `with lock:`
paradigm.
Attributes:
clean_data: Double array of length 101 for all of the data returned by
the robot
raw_data: String of complete raw data packet
__socket: The socket for communications
clean_packets: The Integer number of packets which have been received
cleanly
stub_packets: The Integer number of packets which have been received
as stubs
received: The total Integer number of complete data sets which have
been received
waiting_data: String to hold incomplete data sets
new_data: Boolean whether new data is available for processing
time: Double of time elapsed since the controller was started
target_joint_positions: 6 member Double list of target joint positions
target_joint_velocities: 6 member Double list of target joint velocities
target_joint_accelerations: 6 member Double list of target joint
accelerations
target_joint_currents: 6 member Double list of target joint currents
target_joint_moments: 6 member Double list of target joint moments as
torques
actual_joint_positions: 6 member Double list of actual joint positions
actual_joint_velocities: 6 member Double list of actual joint velocities
actual_joint_currents: 6 member Double list of actual joint currents
tool_accelerometer: 3 member Double list of ool x,y and z accelerometer
values (software version 1.7)
force_tcp: 6 member Double list of generalised forces in the TCP
position: 6 member Double list of cartesian coordinates of the tool:
(x,y,z,rx,ry,rz), where rx, ry and rz is a rotation vector
representation of the tool orientation
tool_speed: 6 member Double list of speed of the tool given in cartesian
coordinates
digital_inputs: Current state of the digital inputs. NOTE: these are
bits encoded as int64_t, e.g. a value of 5 corresponds to bit 0 and
bit 2 set high
joint_temperature: 6 member Double list of temperature of each joint in
degrees celsius
controller_period: Double of controller real time thread execution time
robot_control_mode: Double of robot control mode (see
PolyScopeProgramServer on the "How to" page
joint_control_modes: 6 member Double list of joint control modes (see
PolyScopeProgramServer on the "How to" page) (only from software
version 1.8 and on)
run: Boolean on whether to run or not
__receiving_thread: Thread object for running the receiving and parsing
loops
verbose: Boolean defining whether or not to print data
lock: A threading lock which is used to protect data from race
conditions
_is_stopped: A boolean specifying whether the robot is stopped
"""
# Format specifier:
# ! : network (big endian)
# I : unsigned int, message size
# 85d : 85 doubles
# q : int64_t for digital inputs
# 15d : 15 doubles
#: Format spec for complete data packet
format = struct.Struct('! I 85d q 15d')
#: The format spec for the packet length field
formatLength = struct.Struct('! I')
#: The width to be given to name items when printing out
name_width = 30
#: The precision for printing data
precision = 7
double_format_string = "{:+0"+str(precision+4)+"."+str(precision)+"f}"
def __init__(self, open_socket, verbose=False):
"""Construct a UR Robot connection given connection parameters
Args:
open_socket (socket.socket): The socket to use for communications.
verbose (bool): Whether to print received data in main loop
"""
self.clean_data = array.array('d', [0] * 101)
self.raw_data = ''
self.__socket = open_socket
self.clean_packets = 0
self.stub_packets = 0
self.received = 0
self.waiting_data = ''
self.new_data = False
self.time = 0.0
self.target_joint_positions = [0.0]*6
self.target_joint_velocities = [0.0]*6
self.target_joint_accelerations = [0.0]*6
self.target_joint_currents = [0.0]*6
self.target_joint_moments = [0.0]*6
self.actual_joint_positions = [0.0]*6
self.actual_joint_velocities = [0.0]*6
self.actual_joint_currents = [0.0]*6
self.tool_accelerometer = [0.0]*3
self.force_tcp = [0.0]*6
self.position = [0.0]*6
self.tool_speed = [0.0]*6
self.digital_inputs = 0
self.joint_temperature = [0.0]*6
self.controller_period = 0.0
self.robot_control_mode = 0.0
self.joint_control_modes = [0.0]*6
self.run = False
self.__receiving_thread = None
self.verbose = verbose
self.lock = threading.Lock()
self._is_stopped = False
if verbose:
print "\033[2J" # Clear screen
def __del__(self):
"""Shutdown side thread and print aggregated connection stats"""
self.stop()
print "Received: "+str(self.received) + " data sets"
print "Received: "+str(self.clean_packets) + " clean packets"
print "Received: "+str(self.stub_packets) + " stub packets"
def decode(self):
"""Decode the data stored in the class's rawData field.
Only process the data if there is new data available. Unset the
self.newData flag upon completion. Note, this will lock the data set
and block execution in a number of other functions
"""
with self.lock:
if self.new_data:
self.clean_data = self.format.unpack(self.raw_data)
self.time = self.clean_data[1]
self.target_joint_positions = self.clean_data[2:8]
self.target_joint_velocities = self.clean_data[8:14]
self.target_joint_accelerations = self.clean_data[14:20]
self.target_joint_currents = self.clean_data[20:26]
self.target_joint_moments = self.clean_data[26:32]
self.actual_joint_positions = self.clean_data[32:38]
self.actual_joint_velocities = self.clean_data[38:44]
self.actual_joint_currents = self.clean_data[44:50]
self.tool_accelerometer = self.clean_data[50:53]
# unused = self.clean_data[53:68]
self.force_tcp = self.clean_data[68:74]
self.position = self.clean_data[74:80]
self.tool_speed = self.clean_data[80:86]
self.digital_inputs = self.clean_data[86]
self.joint_temperature = self.clean_data[87:93]
self.controller_period = self.clean_data[93]
# test value = self.clean_data[94]
self.robot_control_mode = self.clean_data[95]
self.joint_control_modes = self.clean_data[96:102]
self.new_data = False
self._is_stopped = self.is_stopped()
def receive(self):
"""Receive data from the UR Robot.
If an entire data set is not received, then store the data in a
temporary location (self.waitingData). Once a complete packet is
received, place the complete packet into self.rawData and set the
newData flag. Note, this will lock the data set and block execution in a
number of other functions once a full data set is built.
"""
incoming_data = self.__socket.recv(812) # expect to get 812 bytes
if len(incoming_data) == 812:
self.clean_packets += 1
else:
self.stub_packets += 1
if self.formatLength.unpack(incoming_data[0:4])[0] == 812:
self.waiting_data = incoming_data
else:
self.waiting_data += incoming_data
if len(self.waiting_data) == 812:
with self.lock:
self.raw_data = self.waiting_data
self.received += 1
self.new_data = True
def print_raw_data(self):
"""Print the raw data which is stored in self.raw_data.
Note, this will lock the data set and block execution in a number of
other functions
"""
with self.lock:
print "Received (raw): "+self.raw_data + "\n"
def print_data(self):
"""Print the processed data stored in self.clean_data
Note, this will lock the data set and block execution in a number of
other functions
"""
with self.lock:
print "Received (unpacked):\n "
print self.clean_data
print "\n"
def output_data_item(self, name, values):
"""Output item with name and values.
Formatting is specified by self.name_width and self.precision.
Args:
name (str): The name of the value
values (float, int, tuple of float, list of float): The list of
values
"""
to_print = ("%-"+str(self.name_width)+"s") % name
if isinstance(values, (list, tuple)):
to_print += ": [%s]" % ', '.join(self.double_format_string.format(x)
for x in values)
elif isinstance(values, (int, bool)):
to_print += ": [%s]" % str(values)
elif isinstance(values, float):
to_print += ": [%s]" % self.double_format_string.format(values)
else:
print "I don't know that data type: " + str(type(values))
print to_print
def print_parsed_data(self):
"""Print the parsed data
Note, this will lock the data set and block execution in a number of
other functions
"""
with self.lock:
print "\033[H"
self.output_data_item("Time since controller turn on",
self.time)
self.output_data_item("Target joint positions",
self.target_joint_positions)
self.output_data_item("Target joint velocities",
self.target_joint_velocities)
self.output_data_item("Target joint accelerations",
self.target_joint_accelerations)
self.output_data_item("Target joint currents",
self.target_joint_currents)
self.output_data_item("Target joint moments (torque)",
self.target_joint_moments)
self.output_data_item("Actual joint positions",
self.actual_joint_positions)
self.output_data_item("Actual joint velocities",
self.actual_joint_velocities)
self.output_data_item("Actual joint currents",
self.actual_joint_currents)
self.output_data_item("Tool accelerometer values",
self.tool_accelerometer)
self.output_data_item("Generalised forces in the TCP",
self.force_tcp)
self.output_data_item("Cartesian tool position",
self.position)
self.output_data_item("Cartesian tool speed",
self.tool_speed)
self.output_data_item("Joint temperatures (deg C)",
self.joint_temperature)
self.output_data_item("Controller period",
self.controller_period)
self.output_data_item("Robot control mode",
self.robot_control_mode)
self.output_data_item("Joint control modes",
self.joint_control_modes)
print ((("%-"+str(self.name_width)+"s") % "Digital Input Number") +
": " + '|'.join('{:^2d}'.format(x) for x in range(0, 18)))
print ((("%-"+str(self.name_width)+"s") % "Digital Input Value: ") +
": " + '|'.join('{:^2s}'.format(x) for x in '{:018b}'.format(
self.digital_inputs)[::-1]))
self.output_data_item("Is Stopped:",
self._is_stopped)
def start(self):
"""Spawn a new thread for receiving and run it"""
if (self.__receiving_thread is None or
not self.__receiving_thread.is_alive()):
self.run = True
self.__receiving_thread = threading.Thread(group=None,
target=self.loop,
name='receiving_thread',
args=(),
kwargs={})
self.__receiving_thread.start()
def loop(self):
"""The main loop which receives, decodes, and optionally prints data"""
while self.run:
self.receive()
self.decode()
if self.verbose:
self.print_parsed_data()
def stop(self):
"""Stops execution of the auxiliary receiving thread"""
if self.__receiving_thread is not None:
if self.__receiving_thread.is_alive():
self.verbose_print('attempting to shutdown auxiliary thread',
'*')
self.run = False # Python writes like this are atomic
self.__receiving_thread.join()
self.verbose_print('\033[500D')
self.verbose_print('\033[500C')
self.verbose_print('-', '-', 40)
if self.__receiving_thread.is_alive():
self.verbose_print('failed to shutdown auxiliary thread',
'*')
else:
self.verbose_print('shutdown auxiliary thread', '*')
else:
self.verbose_print('auxiliary thread already shutdown', '*')
else:
self.verbose_print('no auxiliary threads exist', '*')
def verbose_print(self, string_input, emphasis='', count=5):
"""Print input if verbose is set
Args:
string_input (str): The input string to be printed.
emphasis (str): Emphasis character to be placed around input.
count (int): Number of emphasis characters to use.
"""
if self.verbose:
if emphasis == '':
print string_input
else:
print (emphasis*count + " " + string_input + " " +
emphasis * count)
def is_stopped(self, error=0.005):
"""Check whether the robot is stopped.
Check whether the joint velocities are all below some error. Note, this
will lock the data set and block execution in a number of other
functions
Args:
error (float): The error range to define "stopped"
Returns: Boolean, whether the robot is stopped.
"""
with self.lock:
to_return = (
all(v == 0 for v in self.target_joint_velocities) and
all(v < error for v in self.actual_joint_velocities))
return to_return
def at_goal(self, goal, cartesian, error=0.005):
"""Check whether the robot is at a goal point.
Check whether the differences between the joint or cartesian
coordinates are all below some error. This can be used to
determine if a move has been completed. It can also be used to
create blends by beginning the next move prior to the current one
reaching its goal. Note, this will lock the data set and block execution
in a number of other functions.
Args:
goal (6 member tuple or list of floats): The goal to check against
cartesian (bool): Whether the goal is in cartesian coordinates or
not (in which case joint coordinates)
error (float): The error range in which to consider an object at
its goal, in meters for cartesian space and radians for axis
space.
Returns: Boolean, whether the current position is within the error
range of the goal.
"""
with self.lock:
to_return = (
all(abs(g-a) < error for g, a in zip(self.position, goal))
if cartesian else
all(abs(g-a) < error for g, a in
zip(self.actual_joint_positions, goal)))
return to_return
def __enter__(self):
"""Enters the URRobot receiver from a with statement"""
return self
def __exit__(self, *_):
"""Exits at the end of a context manager statement by destructing."""
self.stop()
|
When a friend invites you to a birthday brunch or long-planned summer dinner party, you don't necessarily want to show up with a bottle of Trader Joe's Prosecco. But a bottle of rosé Champagne? Now that suits the occasion.
The family-run Philopponnat's Brut Réserve Rosé is a beauty, a gorgeous deep pink, with the scent of toast and roses, the taste of mirabelle plums, a fine mousse — and a long, elegant finish. It's a reminder of how very alluring a well-made rosé Champagne can be. As these sparkling wines go, it's something of a bargain too. And if you want to make a big statement, John & Pete's in West Hollywood has the Philipponnat rosé in a magnum ($130). I like the idea too of a half bottle ($30) from Hi-Time Wine Cellars to start off dinner with a significant other.
Enjoy this rosé Champagne as an aperitif or with smoked trout or salmon, cold poached salmon, seared scallops or steamed lobster.
Where to find it: Hi-Time Wine Cellars in Costa Mesa, (949) 650-8463, www.hitimewine.net; John and Pete's Fine Wines & Spirits in West Hollywood, (310) 657-3080, www.johnandpetes.com; Lincoln Fine Wines in Venice, (310) 392-7816, www.lincolnfinewines.com; Manhattan Fine Wines in Manhattan Beach, (310) 374-3454, www.manhattanfinewines.com. You can also order online from the Rare Wine Co., www.rarewineco.com.
|
from iotqautils.gtwRest import Rest_Utils_SBC
from common.gw_configuration import CBROKER_URL,CBROKER_HEADER,CBROKER_PATH_HEADER,IOT_SERVER_ROOT,DEF_ENTITY_TYPE,MANAGER_SERVER_ROOT
from lettuce import world
iotagent = Rest_Utils_SBC(server_root=IOT_SERVER_ROOT+'/iot')
iota_manager = Rest_Utils_SBC(server_root=MANAGER_SERVER_ROOT+'/iot')
URLTypes = {
"IoTUL2": "/iot/d",
"IoTRepsol": "/iot/repsol",
"IoTEvadts": "/iot/evadts",
"IoTTT": "/iot/tt",
"IoTMqtt": "/iot/mqtt"
}
ProtocolTypes = {
"IoTUL2": "PDI-IoTA-UltraLight",
"IoTTT": "PDI-IoTA-ThinkingThings",
"IoTMqtt": "PDI-IoTA-MQTT-UltraLight"
}
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
class UserSteps(object):
world.service_exists = False
world.service_path_exists = False
world.device_exists = False
def service_created(self, service_name, service_path={}, resource={}):
headers = {}
params = {}
headers[CBROKER_HEADER] = str(service_name)
if service_path:
if not service_path == 'void':
headers[CBROKER_PATH_HEADER] = str(service_path)
else:
headers[CBROKER_PATH_HEADER] = '/path_' + str(service_name)
if resource:
params['resource']= resource
service = iotagent.get_service('', headers, params)
if service.status_code == 200:
serv = service.json()
if serv['count'] == 1:
world.service_exists = True
if resource:
world.service_path_exists = True
return True
else:
return False
else:
return False
def device_created(self, service_name, device_name, service_path={}):
headers = {}
headers[CBROKER_HEADER] = str(service_name)
if service_path:
if not service_path=='void':
headers[CBROKER_PATH_HEADER] = str(service_path)
else:
headers[CBROKER_PATH_HEADER] = '/path_' + str(service_name)
device = iotagent.get_device(device_name, headers)
if device.status_code == 200:
world.device_exists=True
return True
else:
return False
def create_device(self, service_name, device_name, service_path={}, endpoint={}, commands={}, entity_name={}, entity_type={}, attributes={}, static_attributes={}, protocol={}, manager={}):
headers = {}
if not service_name=='void':
headers[CBROKER_HEADER] = str(service_name)
if service_path:
if not service_path=='void':
headers[CBROKER_PATH_HEADER] = str(service_path)
else:
headers[CBROKER_PATH_HEADER] = '/path_' + str(service_name)
device={
"devices":[
{
# "device_id": device_name
}
]
}
if device_name:
if device_name=='void':
device_name=""
device['devices'][0]['device_id'] = device_name
if commands:
device['devices'][0]['commands'] = commands
if endpoint:
device['devices'][0]['endpoint'] = endpoint
if entity_type:
device['devices'][0]['entity_type'] = entity_type
# else:
# device['devices'][0]['entity_type'] = DEF_ENTITY_TYPE
if entity_name:
device['devices'][0]['entity_name'] = entity_name
if attributes:
device['devices'][0]['attributes'] = attributes
if static_attributes:
device['devices'][0]['static_attributes'] = static_attributes
if protocol:
if protocol=="void":
protocol=""
device['devices'][0]['protocol'] = protocol
if manager:
req = iota_manager.post_device(device,headers)
else:
req = iotagent.post_device(device,headers)
# assert req.status_code == 201, 'ERROR: ' + req.text + "El device {} no se ha creado correctamente".format(device_name)
return req
def create_service(self, service_name, protocol, attributes={}, static_attributes={}):
headers = {}
headers[CBROKER_HEADER] = service_name
headers[CBROKER_PATH_HEADER] = '/path_' + str(service_name)
resource = URLTypes.get(protocol)
if (protocol == 'IotTT') | (protocol == 'IoTRepsol'):
apikey=''
else:
apikey='apikey_' + str(service_name)
service={
"services":[
{
"apikey": apikey,
"entity_type": DEF_ENTITY_TYPE,
"cbroker": CBROKER_URL,
"resource": resource
}
]
}
if attributes:
service['services'][0]['attributes'] = attributes
if static_attributes:
service['services'][0]['static_attributes'] = static_attributes
req = iotagent.post_service(service, headers)
assert req.status_code == 201, 'ERROR: ' + req.text + "El servicio {} no se ha creado correctamente".format(service_name)
world.service_exists = True
return req
def create_service_with_params(self, service_name, service_path, resource={}, apikey={}, cbroker={}, entity_type={}, token={}, attributes={}, static_attributes={}, protocol={}):
world.protocol={}
headers = {}
if not service_name == 'void':
headers[CBROKER_HEADER] = service_name
if not service_path == 'void':
headers[CBROKER_PATH_HEADER] = str(service_path)
service={
"services":[
{
# "resource": resource
}
]
}
if resource:
if not resource == 'void':
if not resource == 'null':
service['services'][0]['resource'] = resource
else:
service['services'][0]['resource'] = ""
# if not apikey == 'void':
if apikey:
if not apikey == 'null':
service['services'][0]['apikey'] = apikey
else:
service['services'][0]['apikey'] = ""
if cbroker:
if not cbroker == 'null':
service['services'][0]['cbroker'] = cbroker
else:
service['services'][0]['cbroker'] = ""
if entity_type:
service['services'][0]['entity_type'] = entity_type
if token:
service['services'][0]['token'] = token
if attributes:
service['services'][0]['attributes'] = attributes
if static_attributes:
service['services'][0]['static_attributes'] = static_attributes
if protocol:
if not protocol == 'void':
if not protocol == 'null':
resource = URLTypes.get(protocol)
prot = ProtocolTypes.get(protocol)
if not prot:
prot = protocol
service['services'][0]['protocol']= [prot]
else:
resource = protocol
service['services'][0]['protocol'] = []
req = iota_manager.post_service(service, headers)
else:
req = iotagent.post_service(service, headers)
if req.status_code == 201 or req.status_code == 409:
world.remember.setdefault(service_name, {})
if service_path == 'void':
service_path='/'
# world.remember[service_name].setdefault('path', set())
# world.remember[service_name]['path'].add(service_path)
# world.remember[service_name]['path'][service_path].setdefault('resource', set())
# world.remember[service_name]['path'][service_path]['resource'].add(service_path)
world.remember[service_name].setdefault(service_path, {})
world.remember[service_name][service_path].setdefault('resource', {})
world.remember[service_name][service_path]['resource'].setdefault(resource, {})
if not apikey:
apikey = ""
world.remember[service_name][service_path]['resource'][resource].setdefault(apikey)
# print world.remember
world.service_exists = True
world.service_path_exists = True
return req
def delete_device(self, device_name, service_name, service_path={}):
headers = {}
headers[CBROKER_HEADER] = service_name
if service_path:
headers[CBROKER_PATH_HEADER] = str(service_path)
else:
headers[CBROKER_PATH_HEADER] = '/path_' + str(service_name)
req = iotagent.delete_device(device_name,headers)
assert req.status_code == 204, 'ERROR: ' + req.text + "El device {} no se ha borrado correctamente".format(device_name)
return req
def delete_service(self, service_name, service_path={}, resource={}, apikey={}):
params={}
headers = {}
headers[CBROKER_HEADER] = service_name
if world.protocol:
resource2 = URLTypes.get(world.protocol)
if (world.protocol == 'IotTT') | (world.protocol == 'IoTRepsol'):
apikey=''
else:
apikey='apikey_' + str(service_name)
params = {"resource": resource2,
"apikey": apikey}
if resource:
if apikey:
params = {"resource": resource,
"apikey": apikey
}
else:
params = {"resource": resource}
if service_path:
headers[CBROKER_PATH_HEADER] = str(service_path)
else:
headers[CBROKER_PATH_HEADER] = '/path_' + str(service_name)
print params
req = iotagent.delete_service('', headers, params)
assert req.status_code == 204, 'ERROR: ' + req.text + "El servicio {} no se ha borrado correctamente".format(service_name)
return req
def service_precond(self, service_name, protocol, attributes={}, static_attributes={}):
world.service_name = service_name
if not self.service_created(service_name):
service = self.create_service(service_name, protocol, attributes, static_attributes)
assert service.status_code == 201, 'Error al crear el servcio {} '.format(service_name)
print 'Servicio {} creado '.format(service_name)
else:
print 'El servicio {} existe '.format(service_name)
world.protocol=protocol
world.remember.setdefault(service_name, {})
world.service_exists = True
def service_with_params_precond(self, service_name, service_path, resource, apikey, cbroker={}, entity_type={}, token={}, attributes={}, static_attributes={}):
world.protocol={}
world.service_name = service_name
if not self.service_created(service_name, service_path, resource):
service = self.create_service_with_params(service_name, service_path, resource, apikey, cbroker, entity_type, token, attributes, static_attributes)
assert service.status_code == 201, 'Error al crear el servcio {} '.format(service_name)
print 'Servicio {} creado '.format(service_name)
else:
print 'El servicio {} existe '.format(service_name)
world.remember.setdefault(service_name, {})
if service_path == 'void':
service_path='/'
world.remember[service_name].setdefault(service_path, {})
world.remember[service_name][service_path].setdefault('resource', {})
world.remember[service_name][service_path]['resource'].setdefault(resource, {})
if not apikey:
apikey = ""
world.remember[service_name][service_path]['resource'][resource].setdefault(apikey)
world.service_exists = True
world.service_path_exists = True
def device_precond(self, device_id, endpoint={}, protocol={}, commands={}, entity_name={}, entity_type={}, attributes={}, static_attributes={}):
world.device_id = device_id
if not self.device_created(world.service_name, device_id):
prot = ProtocolTypes.get(protocol)
device = self.create_device(world.service_name, device_id, {}, endpoint, commands, entity_name, entity_type, attributes, static_attributes, prot)
assert device.status_code == 201, 'Error al crear el device {} '.format(device_id)
print 'Device {} creado '.format(device_id)
else:
print 'El device {} existe '.format(device_id)
world.remember[world.service_name].setdefault('device', set())
world.remember[world.service_name]['device'].add(device_id)
world.device_exists = True
def device_of_service_precond(self, service_name, service_path, device_id, endpoint={}, commands={}, entity_name={}, entity_type={}, attributes={}, static_attributes={}, protocol={}, manager={}):
world.device_id = device_id
if not self.device_created(service_name, device_id, service_path):
prot = ProtocolTypes.get(protocol)
device = self.create_device(world.service_name, device_id, service_path, endpoint, commands, entity_name, entity_type, attributes, static_attributes, prot, manager)
assert device.status_code == 201, 'Error al crear el device {} '.format(device_id)
print 'Device {} con path {} creado '.format(device_id, service_path)
else:
print 'El device {} existe '.format(device_id)
if service_path=='void':
service_path2='/'
else:
service_path2=service_path
world.remember[service_name][service_path2].setdefault('device', set())
world.remember[service_name][service_path2]['device'].add(device_id)
world.device_exists = True
def clean(self,dirty):
if world.service_exists:
for srv in dirty.keys():
if world.device_exists:
if world.service_path_exists:
for path in dirty[srv]:
if dirty[srv][path].__contains__('device'):
for device in dirty[srv][path]['device']:
req_device = self.delete_device(device,srv,path)
if req_device.status_code == 204:
print 'Se ha borrado el device:{} del servicio:{} y path:{}'.format(device,srv,path)
else:
print 'No se ha podido borrar el device:{} del servicio:{} y path:{}'.format(device,srv,path)
else:
if dirty[srv].__contains__('device'):
for device in dirty[srv]['device']:
req_device = self.delete_device(device,srv)
if req_device.status_code == 204:
print 'Se ha borrado el device ' + str(device) + ' del servicio ' + str(srv)
else:
print 'No se ha podido borrar el device ' + str(device) + ' del servicio ' + str(srv)
if world.service_path_exists:
for path in dirty[srv]:
if dirty[srv][path].__contains__('resource'):
for resource in dirty[srv][path]['resource']:
for apikey in dirty[srv][path]['resource'][resource]:
req_service = self.delete_service(srv, path, resource, apikey)
if req_service.status_code == 204:
print 'Se ha borrado el servicio:{} path:{} resource:{} y apikey:{}'.format(srv,path,resource,apikey)
else:
print 'No se ha podido borrar el servicio:{} path:{} resource:{} y apikey:{}'.format(srv,path,resource,apikey)
else:
req_service = self.delete_service(srv)
if req_service.status_code == 204:
print 'Se ha borrado el servicio ' + srv
else:
print 'No se ha podido borrar el servicio ' + srv
|
Lightning quick Jomo Cosmos youngster Lehlogonolo Kekana battling Baroka FC's Ananias Gebhardt, during the two's Nedbank Cup clash.
National First Division (NFD) teams have knocked out four Premier Soccer League teams in this year's Nedbank Cup.
One of them is Jomo Cosmos, who beat Baroka 1-0 in Limpopo on Tuesday to advance to the last 16 of the competition.
The defending champions, Free State Stars, were beaten by Richards Bay at home in the first round of the competition.
Ezenkosi boss Jomo Sono will now rather avoid the tough remaining sides and get a favourable draw to increase their chances of progressing further.
There are eight PSL teams, seven from the NFD and one from the ABC Motsepe League remaining ahead of tonight's draw, which will start at 7pm and broadcast live on SuperSport channel 204.
Sono said after the win against Baroka that he fancies his side's chances of reaching the quarterfinals, but would like an easy draw.
"Last year we knocked Bidvest Wits out and we just hope we get an easy draw so that we can go on," said Sono.
"The big boys are not here and we fancy our chances, especially because [Orlando] Pirates and [Mamelodi] Sundowns are not here. Wits is also a problem, but we will give it a go."
|
# -*- python -*-
# Copyright (C) 2009-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/home/tcwg-buildslave/workspace/tcwg-make-release/label/tcwg-x86_64-ex40/target/aarch64-linux-gnu/_build/builds/destdir/x86_64-unknown-linux-gnu/share/gcc-4.9.4/python'
libdir = '/home/tcwg-buildslave/workspace/tcwg-make-release/label/tcwg-x86_64-ex40/target/aarch64-linux-gnu/_build/builds/destdir/x86_64-unknown-linux-gnu/aarch64-linux-gnu/lib/../lib64'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
|
"That looks really hurt nanodesu."
Tama and Pochi murmured in a safe zone.
Near the keep of Saga Empire's sunken castle in the center of the imperial capital, the bullhorn woman--Lamia demon lord raised her body with her shoulder dyed red.
Fallen hero Fuu controlling the demon lord was hit by Hikaru's <<Multiple Javelin>>.
"That was not Lulu's but Miko's attack, was it."
Standing behind Tama and Pochi, Liza looked up at the girls floating in the air inside Nana's protective <<Absolute Throne>>.
The demon lord roared out.
Hero Fuu's reflexes aren't good enough to dodge rain of multiple javelins accelerated to supersonic speed with Lulu's accelerated canon.
Hero Fuu's slightly shrill voice resounded above the imperial capital's debris.
Arisa sounded like she was relieved somewhat.
"Lamia demon lord turned her face away to protect that kid."
Apparently, master sniper Lulu saw what happened.
The demon lord who was roaring at them had her face gouged out, but it had already begun regenerating while producing white steam.
The demon lord wraps Hero Fuu in both her hands.
The chest flesh on the demon lord's upper breastbone opens up as she leads hero Fuu inside.
"I see--I'm going to become one with Lamiko-san."
Hero Fuu disappeared into the other side of flesh wall with half-squinting entranced eyes.
"It's a two-mouthed woman nanodesu!"
"No no, I don't think she's eating him though?"
Even Arisa who corrected Tama and Pochi didn't sound too confident about it.
A dark purple light swept over the demon lord's body, right afterward, her figure changed.
"The demon lord has undergone a change form so I report."
Equipment that appeared to be Hero Fuu's got equipped on the demon lord.
Her lower half is still that of snake, but it's now wrapped in transformed leg armor.
However, it seemed she slackened her strength during the equipment transformation, Mia's Lesser Fenrir managed to pry its way out of the snake tail's constriction.
"It's so dark... yet warm and soft."
At Hero Fuu's murmur, the voice of a girl responded in broken speech.
A small room made of red pulsing veins.
Hero Fuu sat on a cockpit-like chair located in the center of the room.
Hero Fuu was gasping for breath at the sudden acceleration, but he didn't get thrown off the chair.
Since slender tentacles secured him on the seat in place of seat belts.
"Lamiko-san, can't I look the outside?"
The wall in front of him turned transparent, showing the outside.
A spherical-shaped laminated magic circles--Nana's throne is floating ahead.
"Our enemy are strong. Lamiko-san, use my equipment."
Hero's armor and dagger Hero Fuu took from his Inventory were taken by tentacles that extended out of the walls here.
"I'd have given you other stuff I got with me, but I gave them all to the generals and the others already..."
Her voice was flat, but Hero Fuu sensed a deep affection in it.
Straight ahead which is rare for him who's usually looking down.
The demon lord resumed the battle responding to Hero Fuu's order.
Defensive shield Phalanx that Pochi deployed in a hurry defended against an attack that managed to split even the rampart.
The demon lord had swung out her arm, assaulting Pochi with a gigantic holy dagger.
At first Pochi blocked the attack with her sword, but even a weapon made of dragon fangs that can [Pierces through everything] wouldn't be sturdy enough to repeatedly clash with a massive holy dagger made of Orichalcum alloy without chipping.
Once the blade is chipped even a little, the crack will only get worse from there.
"We can't get close to her at this rate."
"Not true nanodesu! With Capatult's launch we can do it nodesuyo!"
After dodging the second assault of the holy dagger, Liza runs in parallel with Pochi.
A merry-looking cat ninja who was being chased by undead snake hair ran past the two.
"Stop playing around, defeat them."
Thud, the cat tail struck the ground, and then innumerable shadow extended out of gaps between debris, binding the undead snake hair.
Immediately after, blue shining bullets rain down on the undead, annihilating them all.
Looks like Lulu launched a support fire when she saw these undead stopped moving.
"We should get back to attacking the main body ourselves."
The beastkin girls gauged the distance to the demon lord as the Lesser Fenrir kept her busy.
Above, Nana is dodging and defending, Hikaru and Lulu attacking, while Arisa is in charge of detecting and disturbing the demon lord in fierce clashes.
Shockwaves generated from these clashes have left none of the original buildings around the imperial castle standing including the rampart.
"--Really, it's truly strong. This demon lord."
"Well, on top of being an upper level 90s, she's even got herself hero's equipment after all."
Hikaru responded to Arisa's murmur.
"Why didn't they deploy Lamia demon lord right from the start then?"
"You mean before the vampire demon lord?"
"Un, if this is the mastermind--Gobu King's handiworks, both of those demon lords should have been deployed at different cities each."
"Maybe he's the type that saves stuff for the last?"
"I could see that if he were a bad guy in a home console RPG, but isn't it too much of a foolish move to unleash your card one by one?"
While replying to Arisa randomly, Hikaru covered the beastkin girls' assault with support magic.
"You're right. But you're bound to have the rug pulled out from under you if you're fixated on the idea that our enemy is incompetent."
Hikaru mocked herself for falling victim to a similar ploy that cost her many of her friends.
"Maybe he adjusted it so Master's help won't come? Or perhaps, he's keeping us here so we won't go support Master?"
Arisa racked her brain while muttering.
"But what would that entail? Gobu King went around and setting fires at every cities, all while constantly running away from Master like he's harassing him. If all of that had some sort of reason behind--"
Arisa's golden armor received a transmission from the Solitary Island Palace.
The comm jamming barrier surrounding the capital seems to have vanished during all the uproar.
Zena who was standing by in the Solitary Island Palace reported about <<Miasma Crystal>> found in the capital.
Presently it's being purified in a ritual by Sera and other high ranking priests.
Similar items have been discovered in Seryuu City and Muno Earldom as well, so they've issued warning to every locations.
"Nice follow-up! We'll leave those to you. And since I've told you the situation here, please ask Tina-sama and other personnels that are good with detective works to work out what our enemy is trying to achieve."
Arisa has conveyed everything that has transpired with her group and Satou's to Zena.
"I will pass it over to Elterina-sama and her highness Sistina."
Now that she's entrusted the investigation to her allies, Arisa can fight without reserve.
The clashes with demon lord that have been seesawing back and forth are gradually tilting toward Arisa and the girls' favor.
"Lamiko-san's attacks don't work? Just what's up with those cheat armor! Even the twerps and tail girl broke through what should have been absolute defense aegis with their white weapons! And most annoyingly--"
Hero Fuu cursed out and then glared hard at the floating Throne.
"That gunner with her starting out-looking FPS equipment shooting bullets endlessly like there's no tomorrow! How dare they shoot and wound Lamiko-san's beautiful body!"
The demon lord's Unique Skill [Shield of Reflect God (Aegis)] did reflect back everything that hit it, but all the rifle bullets other than those intended to be feints managed to hit the snake lower half that was outside the aegis's range.
"Lamiko-san, lower right! Three hostiles with white weapons coming!"
Even with the warning, the demon lord's arms are already busy dealing with the white dragon coming from above and the Lesser Fenrir rushing on the ground, only her snake hair's petrification laser could be used to stop them.
And even the petrification laser was dodged by their Flickering Steps moving in zig-zag.
"If only I had magic like Yuuki's..."
Hero Fuu chewed his fingernails as he could do nothing but watch.
As the girl said that, a screen not unlike a selection screen in a PC game got displayed before Hero Fuu.
"Magic list? Is this maybe--『Master Wizard』's power?"
"You mean leaving the control to me?"
After glancing at the screen once, Hero Fuu broke out a stiff smile while kukukuku-ing.
"Nice! We're going to win this!"
Hero Fuu's finger crawled on the screen and picked a magic.
Severe earthquake ran through the ground, Pochi and Lesser Fenrir tripped over and got caught in the demon lord's attack.
Miasma around the imperial capital converged, draining vitality off anyone inside the whirlpool-like swamp area.
Liza carrying Pochi jumped twice in the air, joined by ninja Tama to take refuge inside Nana's Throne.
The girls got locked up in a barrier of space magic along with the Throne but they immediately got out of it as Arisa cast the opposing spell.
"That was faster than I thought, but that bought us enough time."
A gigantic meteorite falling from the sky assaulted the Throne.
It's the meteor summoning magic that the False King Shin once used in an attempt to destroy Shiga Kingdom's castle.
The girls who just got out of Labyrinth were late to notice it as the meteor squashed them all along with the Throne.
"Ahahahahaha. This is amazing, Lamiko-san. We're invincible."
Hero Fuu laughed out loud like a maniac inside the demon lord.
"Oh crap, what the heck is that?"
"Must be Meteor. What now Yuuki."
Hero Yuuki and Hero Seigi who had evacuated far away from the castle with Flying Shoes looked up at the gigantic falling meteorite in the sky.
"I mean, like, aren't we in danger here?"
Hero Yuuki and Hero Seigi made a nose dive and took shelter in a place that would shield them from the explosion and flying debris.
Moment later, raging winds and broken debris flew all around them.
"What's the demon lord searching for? Don't tell me, those girls are still alive?"
"After eating that Meteor? High level heroes are incredible."
The demon lord pushed her way through broken pieces of meteorites in the impact crater.
Hero Yuuki concentrated his mana and shoot Inferno at the demon lord's lower half.
The demon lord couldn't protect herself with Aegis from the surprise attack, and got her lower half burned.
The demon lord that had started regenerating with rising white smoke turned toward the two heroes.
Rock balls of vengeance are pouring behind the two fleeing heroes.
Every one of those rocks is as big as a house.
"I'm gonna die, die, dieeeeeeeee."
"Shut your trap Seigi, keep those legs moving!"
The two heroes frantically ran in the sky, even more desperately than when they were being chased by the vampire demon lord.
"Phew--those kids are really reckless."
"Un, but we're saved thanks to them."
"Succeeded reactivating <<Absolute Throne>>, so I report."
Throne should have been capable to endure a mass that huge by design, but either it was an early failure or the meteor was not an attack relying on mass alone, the Throne function had shut down temporarily.
Twofolds faint purple lights circulate on Arisa's body.
"It's payback time! Let me show you what serious Arisa-chan's capable of."
Arisa gallantly declared so as she pointed her wand at the demon lord.
Arisa and the girls' turn will end in the next chapter, and the chapter after the next will be back to Satou.
|
#!/usr/bin/env python
"""
Print cumulative survey footage over a project's history
"""
import sys
import os.path
from collections import Counter
from davies.compass import *
def print_cumulative_footage(datfiles):
total = 0.0 # miles
monthly_stats = Counter() # feet per month
cumulative_stats = {} # cumulative miles by month
for datfilename in datfiles:
print datfilename, '...'
datfile = DatFile.read(datfilename)
for survey in datfile:
month = survey.date.strftime('%Y-%m')
monthly_stats[month] += survey.included_length
print 'MONTH\tFEET\tTOTAL MILES'
for month in sorted(monthly_stats.keys()):
total += monthly_stats[month] / 5280.0
cumulative_stats[month] = total
print '%s\t%5d\t%5.1f' % (month, monthly_stats[month], total)
if __name__ == '__main__':
if len(sys.argv) < 2:
print >> sys.stderr, 'usage: %s DATFILE...' % os.path.basename(sys.argv[0])
sys.exit(2)
datfiles = sys.argv[1:]
print_cumulative_footage(datfiles)
|
Round Two of invites will go out next week.
As I mentioned recently, we’ll be reopening the DW Lounge where we previously posted our stories, and are hoping to zap you all with a couple of our romance based tales. 😀 You’re welcome!
For the Lounge reopening, we have a tentative date in mind this January, so stay tuned here for updates on that.
Till then, I’d like to wish you all a very happy, peaceful and healthy year ahead.
So excited to share this with all of you and feeling super blessed that we got to meet again this year and catch up in person. It is very rarely that this sort of this happens because we met over a blog and through our mutual passion for reading and writing brought us together. Then we became great friends and joint authors of this blog but to be actually able to meet face to face is almost a miracle and to have done this twice, makes us feel truly blessed and grateful. The past few years and 2018 has been a roller coaster year for both of us, work, travel, life itself. Hardly having time to keep up with everything that is happening and then having to always be on top of it. Takes a toll but there are the simple joys that somehow seem to make everything better and brighter, like meeting a great friend after a long time. So glad we got the opportunity to spend Christmas and soon New Year together.
Wish all of you wonderful readers a very Happy 2019!
We hope to be back with a lot more than we have been able to do this year.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ploty.py
#
# Copyright 2016 rob <rob@Novu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import numpy as np
import matplotlib
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from pylab import text
import wikipedia
from pylab import savefig, rcParams
'''some plot globals'''
bbox = dict(boxstyle="round,pad=0.3", fc="white", ec="k", lw=0.1)
matplotlib.style.use("ggplot")
rcParams.update({'figure.autolayout': True})
matplotlib.rc('font', family='DejaVu Sans')
def main(data_list):
"""WorldClim.org now with 20km resolution / 1 km would be possible """
fig = plt.figure(figsize = (15,7), facecolor="0.92", edgecolor='k')
#ax = fig.add_subplot(111)
font = 10
Title = ("Improved RothC SOC Turnover Model // Baseline // Ferralsol Novo Progresseo")
fig.suptitle(Title, fontsize=11, y=1 )
ax1 = plt.subplot2grid((2,4), (0, 0), rowspan=1, colspan=1)
ax2 = plt.subplot2grid((2,4), (1, 0), rowspan=1, colspan=1)
ax3 = plt.subplot2grid((2,4), (0, 1), rowspan=1, colspan=1)
ax4 = plt.subplot2grid((2,4), (1, 1), rowspan=1, colspan=1)
ax5 = plt.subplot2grid((2,4), (0, 2), rowspan=1, colspan=2)
ax6 = plt.subplot2grid((2,4), (1, 2), rowspan=1, colspan=2)
plt.subplots_adjust(bottom=0.85)
#make empty links to append the y data
rpm_list = []
dpm_list = []
bio_list = []
hum_list = []
co2_list = []
x_list = []
# walk through the data and append the data
value = "Y_"
for i in data_list:
if i["pool"] == "rpm":
rpm_list.append(i[value])
if i["pool"] == "dpm":
dpm_list.append(i[value])
if i["pool"] == "bio":
bio_list.append(i[value])
if i["pool"] == "hum":
hum_list.append(i[value])
if i["pool"] == "co2":
co2_list.append(i[value])
x_list.append(np.datetime64(i["datetime"]).astype(datetime))
# sump up all lists to get total SOC
full_list = [sum(x) for x in zip(rpm_list, dpm_list, hum_list, bio_list)]
ax1.plot(x_list, rpm_list, "#616161")
ax1.set_ylabel((r'Mg SOC ha$^-$' + r'$^1$' ))
ax1.set_title("RPM pool", fontsize = font)
ax2.plot(x_list , dpm_list, "#424242")
ax2.set_title("DPM pool", fontsize = font)
ax2.set_ylabel((r'Mg SOC ha$^-$' + r'$^1$' ))
ax2.set_xlabel("year")
ax3.plot(x_list, hum_list, "#424242")
ax3.set_title("HUM pool", fontsize = font)
ax4.plot(x_list, bio_list, "#616161")
ax4.set_title("BIO pool", fontsize = font)
ax4.set_xlabel("year")
ax5.plot(x_list, full_list, "#616161")
ax5.set_title("SOC combined", fontsize = font)
ax6.plot(x_list, co2_list, "#424242")
ax6.set_title("C02", fontsize = font)
ax6.set_xlabel("year")
plt.show()
savefig("fig.png", bbox_inches="tight")
return 0
if __name__ == '__main__':
main()
|
Heinrich Bollandt (also Polland), German painter. He was one of the most important artists of the Brandenburg-Bayreuther Renaissance, active mainly in Bayreuth, but also in Lübeck and Berlin. He painted portraits and altarpieces and worked in a style that seems almost archaic and evidently depended on the art of Lucas Cranach the Elder.
|
from __future__ import absolute_import
import posixpath
from rest_framework import serializers
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.organization import OrganizationReleasesBaseEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.models import Release, ReleaseFile
try:
from django.http import (CompatibleStreamingHttpResponse as StreamingHttpResponse)
except ImportError:
from django.http import StreamingHttpResponse
class ReleaseFileSerializer(serializers.Serializer):
name = serializers.CharField(max_length=200, required=True)
class OrganizationReleaseFileDetailsEndpoint(OrganizationReleasesBaseEndpoint):
doc_section = DocSection.RELEASES
def download(self, releasefile):
file = releasefile.file
fp = file.getfile()
response = StreamingHttpResponse(
iter(lambda: fp.read(4096), b''),
content_type=file.headers.get('content-type', 'application/octet-stream'),
)
response['Content-Length'] = file.size
response['Content-Disposition'] = 'attachment; filename="%s"' % posixpath.basename(
" ".join(releasefile.name.split())
)
return response
def get(self, request, organization, version, file_id):
"""
Retrieve an Organization Release's File
```````````````````````````````````````
Return details on an individual file within a release. This does
not actually return the contents of the file, just the associated
metadata.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string version: the version identifier of the release.
:pparam string file_id: the ID of the file to retrieve.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=organization.id,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
if not self.has_release_permission(request, organization, release):
raise PermissionDenied
try:
releasefile = ReleaseFile.objects.get(
release=release,
id=file_id,
)
except ReleaseFile.DoesNotExist:
raise ResourceDoesNotExist
download_requested = request.GET.get('download') is not None
if download_requested and (request.access.has_scope('project:write')):
return self.download(releasefile)
elif download_requested:
return Response(status=403)
return Response(serialize(releasefile, request.user))
def put(self, request, organization, version, file_id):
"""
Update an Organization Release's File
`````````````````````````````````````
Update metadata of an existing file. Currently only the name of
the file can be changed.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string version: the version identifier of the release.
:pparam string file_id: the ID of the file to update.
:param string name: the new name of the file.
:param string dist: the name of the dist.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=organization.id,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
if not self.has_release_permission(request, organization, release):
raise PermissionDenied
try:
releasefile = ReleaseFile.objects.get(
release=release,
id=file_id,
)
except ReleaseFile.DoesNotExist:
raise ResourceDoesNotExist
serializer = ReleaseFileSerializer(data=request.DATA)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = serializer.object
releasefile.update(
name=result['name'],
)
return Response(serialize(releasefile, request.user))
def delete(self, request, organization, version, file_id):
"""
Delete an Organization Release's File
`````````````````````````````````````
Permanently remove a file from a release.
This will also remove the physical file from storage.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string version: the version identifier of the release.
:pparam string file_id: the ID of the file to delete.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=organization.id,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
if not self.has_release_permission(request, organization, release):
raise PermissionDenied
try:
releasefile = ReleaseFile.objects.get(
release=release,
id=file_id,
)
except ReleaseFile.DoesNotExist:
raise ResourceDoesNotExist
file = releasefile.file
# TODO(dcramer): this doesnt handle a failure from file.deletefile() to
# the actual deletion of the db row
releasefile.delete()
file.delete()
return Response(status=204)
|
NEW Vanilla Skī, Our Fall Launch, Awesome Giveaway, & The Keto Reboot!
LIMITED RELEASE – Vanilla Ski KETO//MAX!!
This is a BRAND NEW flavor that just launched this morning and it will be here until it sells out! It is a creamy Madagascar vanilla that tastes amazing!
Fill a shaker cup with ice!
Fill the rest of the cup with water and shake!!!
This contains our MAX ketones which are a 40% more bioavailable ketone, helping to increase energy, lose cravings, supports fat loss, muscle preservation and improved mood, sleep and mental focus!
Friendly reminder that our entire Fall Collection, Harvest Sunrise, Pumpkin Spice and Chai Tea, is available to order from today until the end of Autumn!
As a team, were on day 10 of 40 and so far its been incredible to use this workbook to help get in alignment with our desires and to put things into action on a daily basis! I will be starting a new 40 Day group in a few days! Please let me know if you are interested in joining us!
Are you ready to Reboot!? WE ARE!! The Keto Reboot kits for the November 18th reboot will be available for purchase November 1st – 8th, ONLY. This ensures we have time to get the kit to you before the Keto Reboot starts! Reboot kits will be available to purchase HERE when they are released. Remember, this is a 60-hour REBOOT where you will engage your body to use FAT as an energy source while gradually shifting into Keto Adaptation.
Stay Active on the Keto Reboot Facebook page for encouragement and insights into rebooting your system back to it’s N8tive State!
|
# -*- coding: utf-8 -*-
## begin license ##
#
# "Meresco Components" are components to build searchengines, repositories
# and archives, based on "Meresco Core".
#
# Copyright (C) 2006-2011 Seek You Too (CQ2) http://www.cq2.nl
# Copyright (C) 2006-2011, 2020 Stichting Kennisnet https://www.kennisnet.nl
# Copyright (C) 2012, 2020 Seecr (Seek You Too B.V.) https://seecr.nl
# Copyright (C) 2020 Data Archiving and Network Services https://dans.knaw.nl
# Copyright (C) 2020 SURF https://www.surf.nl
# Copyright (C) 2020 The Netherlands Institute for Sound and Vision https://beeldengeluid.nl
#
# This file is part of "Meresco Components"
#
# "Meresco Components" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Components" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Components"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from seecr.test import SeecrTestCase, CallTrace
from os.path import join
from os import mkdir, listdir
from meresco.components.http import utils as httputils
from meresco.components.http.utils import CRLF, notFoundHtml
from meresco.components.log import LogFileServer, DirectoryLog
class LogFileServerTest(SeecrTestCase):
def setUp(self):
SeecrTestCase.setUp(self)
self.logDir = join(self.tempdir, 'log')
directoryLog = DirectoryLog(self.logDir)
self.qlfs = LogFileServer("Fancy <name>", directoryLog, basepath='/log')
def testGenerateEmptyHtmlFileLinkListing(self):
headers, body = "".join(self.qlfs.handleRequest(path="/log")).split(CRLF+CRLF)
self.assertEqual('HTTP/1.0 200 OK\r\nContent-Type: text/html; charset=utf-8', headers)
self.assertTrue(body.startswith('<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">\n<html>'), body)
self.assertTrue(body.rfind('</body>\n</html>') != -1, body)
self.assertTrue('<title>"Fancy <name>" Logging</title>' in body, body)
self.assertTrue('Logging - logfile listing' in body, body)
def testEmptyDirectoryEmptyHtmlResult(self):
headers, body = "".join(self.qlfs.handleRequest(path="/")).split(CRLF+CRLF)
self.assertFalse('<li>' in body)
def testDirectoryHtmlResult(self):
filename = '2009-11-10-afile.1'
open(join(self.logDir, filename), 'w').close()
headers, body = "".join(self.qlfs.handleRequest(path="/log")).split(CRLF+CRLF)
self.assertTrue('<li>' in body)
self.assertTrue('<a href="/log/%s"' % filename in body, body)
filename2 = '2009-11-22-yet_another_file.txt'
open(join(self.logDir, filename2), 'w').close()
headers, body = "".join(self.qlfs.handleRequest(path="/log/")).split(CRLF+CRLF)
self.assertTrue('<a href="/log/%s"' % filename in body, body)
self.assertTrue('<a href="/log/%s"' % filename2 in body, body)
self.assertTrue(body.index(filename) > body.index(filename2), 'The files should be sorted.')
def testPathNotSpecifiedAsIndexEffectivelyUsesMerescoFileServer(self):
headers, body = "".join(self.qlfs.handleRequest(path="/thisIsWrongMister")).split(CRLF+CRLF)
self.assertTrue("HTTP/1.0 404 Not Found" in headers, headers)
def testPathIsASubDir(self):
aSubDirCalled = "subdir"
mkdir(join(self.logDir, aSubDirCalled))
headers, body = "".join(self.qlfs.handleRequest(path="/%s" % aSubDirCalled)).split(CRLF+CRLF)
self.assertTrue("HTTP/1.0 404 Not Found" in headers, headers)
def testGetNonExistingLogFile(self):
headers, body = "".join(self.qlfs.handleRequest(path="/log/thisIsWrongMister")).split(CRLF+CRLF)
self.assertTrue("HTTP/1.0 404 Not Found" in headers, headers)
|
How to remove wax buildup off wood floors wikizieco. Wood floor wax remover how to wax wooden floor hardwood. How to remove wax buildup on your hardwood floors. Housekeeping tips : removing wax from wood floors youtube.
|
'''
Created on 21.10.2015
@author: selen00r
'''
class ExcludeTransaction:
'''
Base class for strategies to exclude transactions while calculating results from the transaction history
'''
def exclude(self, idxData):
return False
class ExcludeAvg200Low(ExcludeTransaction):
def __init__(self, offset = 0.0):
self.offset = offset
def exclude(self, transactionResult):
if transactionResult.indexBuy.mean200 > 0:
checkValue = transactionResult.indexBuy.mean200 + (transactionResult.indexBuy.mean200 * self.offset)
return (transactionResult.indexBuy.close < checkValue)
else:
return True
class TransactionResultPrinter:
'''
Base class for printing of transactions
'''
def printResult(self, transactionResult, result, resultEuro, hasResult = False ):
pass
class ResultCalculator:
'''
Base class to calculate a transaction result
'''
def __init__(self):
self.total = 0.0
def calcResult(self, buy, sell):
result = (float(sell)/float(buy))-1.0
self.total += result
return result
def reset(self):
self.total = 0
def getTotal(self):
return self.total
class ResultCalculatorPut(ResultCalculator):
def __init__(self):
ResultCalculator.__init__(self)
def calcResult(self, buy, sell):
return ResultCalculator.calcResult(self, buy, sell) * (-1.0)
class ResultCalculatorEuro(ResultCalculator):
'''
Base class to calculate a transaction result in Euros
'''
def __init__(self, invest, fixInvest = True, maxInvest = 0.0):
ResultCalculator.__init__(self)
self.invest = invest
self.total = invest
self.totalInvest = invest
self.fixInvest = fixInvest
self.maxInvest = maxInvest
def _checkTotal(self):
if self.total < 0:
self.total = 0
if self.total < self.invest:
self.totalInvest = self.totalInvest +(self.invest - self.total)
self.total = self.invest
def calcResult(self, buy, sell):
result = ResultCalculator().calcResult(buy, sell)
if self.fixInvest:
result *= self.invest
else:
result *= self.total
self.total += result
self._checkTotal()
return result
def reset(self):
self.total = self.invest
self.totalInvest = self.invest
def getTotalInvest(self):
return self.totalInvest
class ResultCalculatorEuroPut(ResultCalculatorEuro):
def __init__(self, invest, fixInvest = True, maxInvest = 0.0):
ResultCalculatorEuro.__init__(self, invest, fixInvest, maxInvest)
def calcResult(self, buy, sell):
return ResultCalculatorEuro.calcResult(self, buy, sell, knockOut) * (-1.0)
class ResultCalculatorEuroLeverage(ResultCalculatorEuro):
def __init__(self, distance, invest, fixInvest = True, maxInvest = 0.0):
ResultCalculatorEuro.__init__(self, invest, fixInvest, maxInvest)
self.distance = distance
self.k = 1.1302864364
self.d = 0.2029128054
def calcResult(self, buy, sell):
result = ResultCalculator().calcResult(buy, sell)
startCalc = (self.k * (self.distance)) + self.d
actCalc = (self.k * ((self.distance) + (result*100.0))) + self.d
percCalc = (actCalc / startCalc)-1
if self.fixInvest:
result = self.invest * percCalc
else:
newInvest = self.total
if newInvest < self.invest:
newInvest = self.invest
if (self.maxInvest > 0.0) and (newInvest > self.maxInvest):
result = (self.maxInvest) * percCalc
else:
result = newInvest * percCalc
self.total += result
self._checkTotal()
return result
class ResultCalculatorEuroLeveragePut(ResultCalculatorEuroLeverage):
def __init__(self, distance, invest, fixInvest = True, maxInvest = 0.0):
ResultCalculatorEuroLeverage.__init__(self, distance, invest, fixInvest, maxInvest)
def calcResult(self, buy, sell):
return ResultCalculatorEuroLeverage.calcResult(self, buy, sell) * (-1.0)
class EvalResult:
'''
Base class for transaction result evaluation
'''
def __init__(self, name, invest, fixInvest = True):
self.name = name
self.winCount = 0
self.lossCount = 0
self.knockOutCount = 0
self.maxWin = 0.0
self.maxLoss = 0.0
self.sumWin = 0.0
self.sumLoss = 0.0
self.maxWinEuro = 0.0
self.maxLossEuro = 0.0
self.sumWinEuro = 0.0
self.sumLossEuro = 0.0
self.invest = invest
self.fixInvest = fixInvest
self.checkExclude = ExcludeTransaction()
self.resultCalculator = ResultCalculator()
self.resultCalculatorEuro = ResultCalculatorEuro( self.invest, self.fixInvest )
def _updateWin(self, result, resultEuro):
self.winCount += 1
self.sumWin += result
if (self.maxWin < result):
self.maxWin = result
if (self.maxWinEuro < resultEuro):
self.maxWinEuro = resultEuro
self.sumWinEuro += resultEuro
def _updateLoss(self, result, resultEuro):
self.lossCount += 1
self.sumLoss += result
if (self.maxLoss > result):
self.maxLoss = result
if (self.maxLossEuro > resultEuro):
self.maxLossEuro = resultEuro
self.sumLossEuro += resultEuro
def setExcludeChecker(self, checkExclude):
self.checkExclude = checkExclude
def setResultCalculator(self, calculator):
self.resultCalculator = calculator
def setResultCalculatorEuro(self, calculator):
self.resultCalculatorEuro = calculator
def getTotalCount(self):
return (self.winCount + self.lossCount)
def getTotalResult(self):
return self.resultCalculator.getTotal()
def getTotalResultEuro(self):
return self.resultCalculatorEuro.getTotal()
def getTotalInvestEuro(self):
return self.resultCalculatorEuro.getTotalInvest()
def getWinRatio(self):
if (self.getTotalCount() > 0):
return (float(self.winCount)/float(self.getTotalCount()))
else:
return 0.0
def getMeanWin(self):
if (self.winCount > 0):
return (self.sumWin / float(self.winCount))
else:
return 0
def getMeanLoss(self):
if (self.lossCount > 0):
return (self.sumLoss / float(self.lossCount))
else:
return 0
def getWinLoss(self, buy, sell):
return self.resultCalculator.calcResult(buy, sell)
def getWinLossEuro(self, buy, sell):
return self.resultCalculatorEuro.calcResult(buy, sell)
def evaluateIndex(self, transactionResultHistory, resultPrinter = None ):
for transactionResult in transactionResultHistory:
self.evaluate( transactionResult, resultPrinter )
def _updateResult(self, transactionResult, result, resultEuro ):
if result < 0.0:
self._updateLoss(result, resultEuro)
else:
self._updateWin(result, resultEuro)
def evaluate(self, transactionResult, resultPrinter = None):
hasResult = False
result = 0.0
resultEuro = 0.0
if not (self.checkExclude.exclude(transactionResult)):
indexSell = transactionResult.indexSell.close
if transactionResult.knockOut != 0.0:
indexKnockOut = transactionResult.indexBuy.close + (transactionResult.indexBuy.close * transactionResult.knockOut)
if transactionResult.knockOut < 0:
if transactionResult.getLowValue() < indexKnockOut:
indexSell = indexKnockOut
else:
if transactionResult.getHighValue() > indexKnockOut:
indexSell = indexKnockOut
result = self.getWinLoss( transactionResult.indexBuy.close, indexSell)
resultEuro = self.getWinLossEuro(transactionResult.indexBuy.close, indexSell)
self._updateResult( transactionResult, result, resultEuro)
hasResult = True
if resultPrinter:
resultPrinter.printResult( transactionResult, result, resultEuro, hasResult )
|
For everyone finishing up the semester, whether students, professors, or families and friends waiting to see them again.
For fruitful times of rest and renewal this summer, and the chance to learn from Christians whose vocations lie elsewhere than the academy as well.
For good rhythms of rest and prayer and work and play this summer.
For attention to justice and mercy in our summer routines.
For God’s guidance and blessing on ESN as we work toward new endeavors this summer and continue the work God has given us to do.
For an opportunity ESN is currently exploring; we can’t share about it yet, but would deeply appreciate your prayers for wisdom and blessing.
As you have prayer requests to share, please email us.
|
#!/usr/bin/env python
"""
This is a console-driven menu program for management of some AWS and OpenStack cloud services
"""
import time
from Menu_Option_1 import menu_1
from Menu_Option_2 import menu_2
from Menu_Option_3 import menu_3
from Menu_Option_4 import menu_4
from Menu_Option_5 import menu_5
def main():
menu1 = menu_1()
menu2 = menu_2()
menu3 = menu_3()
menu4 = menu_4()
menu5 = menu_5()
# loop_lvl_1 displays the initial menu options
loop_lvl_1 = 1
while loop_lvl_1 == 1:
# print initial menu options
print "\nWelcome to CloudTools. \nPlease make a selection by " \
"entering the number of your chosen menu item below.\n" \
"Your options are:\n" \
"1) Compute \n2) Storage \n3) CloudWatch Monitoring \n" \
"4) AutoScaling \n5) CloudTrail \n6) Quit CloudTools"
choice_lvl_1 = input("Choose your option: \n")
if choice_lvl_1 == 1:
menu1.main_menu()
elif choice_lvl_1 == 2:
menu2.main_menu()
elif choice_lvl_1 == 3:
menu3.main_menu()
elif choice_lvl_1 == 4:
menu4.main_menu()
elif choice_lvl_1 == 5:
menu5.main_menu()
elif choice_lvl_1 == 6:
loop_lvl_1 = 0
else:
print "Please enter number between 1 and 6 only"
time.sleep(2)
print "Thank you for using CloudTools, Goodbye."
main()
"""
lines 6 - 11: Import all required modules
line 14: Create the main method, i.e. the application
lines 16 - 20: instantiate the menu option objects
line 23: create loop variable
lines 24 - 31: while the loop is set to 1; print the menu to screen
lines 32 - 46: Reads in user input and calls relevant menu object
lines 48 - 49: If user hits 6, exit the loop and end the application
lines 51 - 53: If user enters a character not stated in the menu, print
error message
line 55: prints goodbye message when application is ended
line 58: calls the main method
"""
|
It is very important to be on your own throughout the massage meeting. Simply kick back offer the very same Javanese massage that you would offer to a customer. Do not fidget, since it will come via in your touch.
|
import os, sys, functools, shutil, errno
def relative_to_interpreter(path):
return os.path.join(os.path.dirname(sys.executable), path)
def relative_to_package(path):
return os.path.join(__path__[0], path)
def relative_to_cwd(path):
return os.path.join(os.getcwd(), path)
def memoize(obj):
memoized = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
k = str(args) + str(kwargs)
if k not in memoized:
memoized[k] = obj(*args, **kwargs)
return memoized[k]
return memoizer
def remove(path):
try:
os.remove(path)
except OSError:
if not os.path.exists(path):
pass
elif os.path.isdir(path):
try:
shutil.rmtree(path)
except OSError as e:
if e.errno == errno.ENOTEMPTY:
shutil.rmtree(path)
else:
raise
else:
raise
def makedirs(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def copy(src, dst):
remove(dst)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copy(src, dst)
def fallback(*candidates):
for i in candidates:
if i is not None:
return i
def remove_illegal_chars_for_postscript_name_part(name):
"""
Conforming The Compact Font Format Specification (version 1.0), section 7 "Name INDEX".
Also removing "-".
"""
return name.translate({
ord(i): None for i in "[](){}<>/%\u0000\u0020\u0009\u000D\u000A\u000C-"
})
from hindkit import constants
from hindkit import filters
from hindkit.objects.base import BaseFile
from hindkit.objects.family import Family, DesignSpace, Fmndb
from hindkit.objects.font import Master, Style, Product
from hindkit.objects.glyphdata import GlyphData, Goadb
from hindkit.objects.client import Client
from hindkit.objects.feature import FeatureClasses, FeatureTables, FeatureLanguagesystems, FeatureGSUB, FeatureGPOS, FeatureKern, FeatureMark, FeatureOS2Extension, FeatureNameExtension, FeatureMatches, FeatureReferences
from hindkit.objects.project import Project
|
Let’s get all the legal stuff out of the way… I know, boring right? But, it is very important!
This blog, Sawdust and High Heels, holds my own personal views and opinions and not those of my employer, any associations I am a member of, or organisations I volunteer with.
All information on my blog is authentic and documented based on my own experience and, as such, there may be errors or missing information. You use it at your own risk.
I, the sole author of Sawdust and High Heels, do not share personal information that is collected about your visit for use other than to analyze content performance. I will never sell my data/readers’ information to anyone.
Articles may be shared freely, but the exclusive copyright remains with me, the author of Sawdust and High Heels. If you are interested in republishing any of my content, please contact me.
None of my images are allowed to be shared out of context or outside the perimeters of my blog posts on any website, advertising, social media or blog without my written permission. This will result in immediate legal action.
The information provided on Sawdust and High Heels is for entertainment purposes only and I am not providing legal or any other professional advise.
I sometimes get sent products or offered services for free to write reviews on. This is at my discretion and I will always offer my personal, unbiased opinion. I will always clearly state that I received these products for review purposes for free. If I do not like a product or do not agree with the company’s ethos, it will not be featured on my website. I do not receive payment for these reviews and am not obliged to report back on the item or event in question.
|
#!/usr/bin/python
# begin boilerplate
import sys
import os
scriptName = os.path.basename(sys.argv[0])
scriptPath = os.path.dirname(sys.argv[0])
sharedPath = os.path.join(scriptPath, "../shared/")
sys.path.append(os.path.abspath(sharedPath))
import Pieces
import GitHelper
import Scripts
import Utils
#end boilerplat
class Script(Scripts.Script):
def helpString(self):
return "fishlamp repo <init or update> <repo name if init>\nIf current repo is not a FishLamp repo, this inits the current repo to be a FishLamp project. Otherwise it just updates the submodules\nAlso and adds fishlamp-core submodule if it's not added"
def update(self) :
GitHelper.confirmGitRoot();
Pieces.createFishLampFolderIfNeeded();
# GitHelper.addSubmodules(Pieces.defaultPieces(), Pieces.folderName());
def init(self) :
parm = self.parameterAtIndex(2, "Expecting name of path as first argument");
if os.path.exits(parm):
Utils.printError(parm + " already exists");
sys.exit(1);
os.makedirs(directory)
os.chdir(directory)
if GitHelper.isGitRepo() :
Utils.printError("git repo already exists");
sys.exit(1);
GitHelper.init();
self.update();
def run(self):
mode = self.parameterAtIndex(1, "Expecting either update or init as first parameter");
if mode == "update":
self.update();
elif mode == "init":
self.init();
else:
Utils.printError("expecting mode or update");
Script().run();
"""
init repo
#!/bin/bash
function usage() {
echo "if current repo is not a FishLamp repo, this inits the current repo to be a FishLamp project. Otherwise it just updates the submodules".
echo "Also and adds fishlamp-core submodule if it's not added"
}
if [ "$1" == "--help" ]; then
usage
exit 0;
fi
set -e
MY_PATH="`dirname \"$0\"`"
MY_PATH="`( cd \"$MY_PATH\" && pwd )`"
fishlamp="FishLamp"
fishlamp_dir="`pwd`/$fishlamp"
if [ ! -d ".git" -a ! -f ".git" ]; then
echo "# git folder not found - please run in root of your repository."
exit 1
fi
if [ ! -d "$fishlamp_dir" ]; then
mkdir "$fishlamp_dir"
fi
declare -a repo_list=( "fishlamp-core" )
git submodule update --init --recursive
for submodule in "${repo_list[@]}"; do
fishlamp update-submodule "$submodule"
done
"""
"""
#!/bin/sh
# fishlamp-new-repo.sh
# fishlamp-install
#
# Created by Mike Fullerton on 7/27/13.
#
function usage() {
echo "creates a new folder for the repo and then inits the new repo to be a FishLamp project."
}
if [ "$1" == "--help" ]; then
usage
exit 0;
fi
set -e
new_repo="$1"
if [ -d "$new_repo" ]; then
echo "$new_repo already exists"
exit 1;
fi
mkdir -p "$new_repo"
cd "$new_repo"
if [ -d ".git" -o -f ".git" ]; then
echo "# git repo already created"
exit 1
fi
git init
fishlamp init-repo
"""
|
Identification of an uptake hydrogenase required for hydrogen-dependent reduction of Fe(III) and other electron acceptors by Geobacter sulfurreducens.
Title Identification of an uptake hydrogenase required for hydrogen-dependent reduction of Fe(III) and other electron acceptors by Geobacter sulfurreducens.
Geobacter sulfurreducens, a representative of the family Geobacteraceae that predominates in Fe(III)-reducing subsurface environments, can grow by coupling the oxidation of hydrogen to the reduction of a variety of electron acceptors, including Fe(III), fumarate, and quinones. An examination of the G. sulfurreducens genome revealed two operons, hya and hyb, which appeared to encode periplasmically oriented respiratory uptake hydrogenases. In order to assess the roles of these two enzymes in hydrogen-dependent growth, Hya- and Hyb-deficient mutants were generated by gene replacement. Hyb was found to be required for hydrogen-dependent reduction of Fe(III), anthraquinone-2,6-disulfonate, and fumarate by resting cell suspensions and to be essential for growth with hydrogen and these three electron acceptors. Hya, in contrast, was not. These findings suggest that Hyb is an essential respiratory hydrogenase in G. sulfurreducens.
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: raven_filter
:platform: Unix
:synopsis: A plugin remove ring artefacts
.. moduleauthor:: Nicola Wadeson <scientificsoftware@diamond.ac.uk>
"""
import logging
import numpy as np
import pyfftw
from savu.plugins.base_filter import BaseFilter
from savu.plugins.driver.cpu_plugin import CpuPlugin
from savu.plugins.utils import register_plugin
@register_plugin
class RavenFilter(BaseFilter, CpuPlugin):
"""
Ring artefact removal method
:param uvalue: To define the shape of filter. Default: 30.
:param vvalue: How many rows to be applied the filter. Default: 1.
:param nvalue: To define the shape of filter. Default: 8.
:param padFT: Padding for Fourier transform. Default: 20.
"""
def __init__(self):
logging.debug("Starting Raven Filter")
super(RavenFilter, self).__init__("RavenFilter")
self.count=0
def set_filter_padding(self, in_data, out_data):
self.pad = self.parameters['padFT']
# don't currently have functionality to pad top/bottom but not
# right/left so padding everywhere for now
in_data[0].padding = {'pad_frame_edges': self.pad}
out_data[0].padding = {'pad_frame_edges': self.pad}
def pre_process(self):
in_pData = self.get_plugin_in_datasets()[0]
sino_shape = in_pData.get_shape()
width1 = sino_shape[1] + 2*self.pad
height1 = sino_shape[0] + 2*self.pad
v0 = np.abs(self.parameters['vvalue'])
u0 = np.abs(self.parameters['uvalue'])
n = np.abs(self.parameters['nvalue'])
# Create filter
centerx = np.ceil(width1/2.0)-1.0
centery = np.int16(np.ceil(height1/2.0)-1)
self.row1 = centery - v0
self.row2 = centery + v0+1
listx = np.arange(width1)-centerx
filtershape = 1.0/(1.0 + np.power(listx/u0, 2*n))
filtershapepad2d = np.zeros((self.row2 - self.row1, filtershape.size))
filtershapepad2d[:] = np.float64(filtershape)
self.filtercomplex = filtershapepad2d + filtershapepad2d*1j
a = pyfftw.n_byte_align_empty((height1, width1), 16, 'complex128')
self.fft_object = pyfftw.FFTW(a, a, axes=(0, 1))
self.ifft_object = pyfftw.FFTW(a, a, axes=(0, 1),
direction='FFTW_BACKWARD')
def filter_frames(self, data):
if(self.count%25==0):
logging.debug( "raven...%i"%self.count)
data2d = data[0]
sino2 = np.fft.fftshift(self.fft_object(data2d))
sino2[self.row1:self.row2] = \
sino2[self.row1:self.row2] * self.filtercomplex
sino3 = np.fft.ifftshift(sino2)
sino4 = self.ifft_object(sino3).real
sino4 = sino4[:, np.newaxis, :]
self.count+=1
return sino4
def get_plugin_pattern(self):
return 'SINOGRAM'
def get_max_frames(self):
return 1
|
Why wait to build? Come see this beautiful one story home ''Edison I'' built by Dream Finders. This 3 bedroom, 2 bathroom home comes with a Gourmet Kitchen, 42'' Cabinets, California Island with Granite counter tops and Stainless steel appliances. Bay window in master bedroom for additional space to enjoy. This smart house offers Nest A/C system for energy savings and Security cameras on front and back of home. A custom garage height to 8' for easy access for larger vehicles. Want privacy? Home backs to conservation that offers quiet evenings while sitting under the covered lanai. Beautiful golf course community w/parks, fitness room, swimming pools, basketball courts, bike path, walking/jogging path, & much more!
|
## This code is written by Davide Albanese, <albanese@fbk.eu>.
## (C) 2011 mlpy Developers.
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import scipy.linalg as spla
from ridge import ridge_base
from ols import ols_base
from kernel_class import *
import sys
if sys.version >= '3':
from . import kernel
else:
import kernel
__all__ = ['LDA', 'SRDA', 'KFDA', 'PCA', 'PCAFast', 'KPCA']
def proj(u, v):
"""(<v, u> / <u, u>) u
"""
return (np.dot(v, u) / np.dot(u, u)) * u
def gso(v, norm=False):
"""Gram-Schmidt orthogonalization.
Vectors v_1, ..., v_k are stored by rows.
"""
for j in range(v.shape[0]):
for i in range(j):
v[j] = v[j] - proj(v[i], v[j])
if norm:
v[j] /= np.linalg.norm(v[j])
def lda(xarr, yarr):
"""Linear Discriminant Analysis.
Returns the transformation matrix `coeff` (P, C-1),
where `x` is a matrix (N,P) and C is the number of
classes. Each column of `x` represents a variable,
while the rows contain observations. Each column of
`coeff` contains coefficients for one transformation
vector.
Sample(s) can be embedded into the C-1 dimensional space
by z = x coeff (z = np.dot(x, coeff)).
:Parameters:
x : 2d array_like object (N, P)
data matrix
y : 1d array_like object integer (N)
class labels
:Returns:
coeff: 2d numpy array (P, P)
transformation matrix.
"""
n, p = xarr.shape[0], xarr.shape[1]
labels = np.unique(yarr)
sw = np.zeros((p, p), dtype=np.float)
for i in labels:
idx = np.where(yarr==i)[0]
sw += np.cov(xarr[idx], rowvar=0) * \
(idx.shape[0] - 1)
st = np.cov(xarr, rowvar=0) * (n - 1)
sb = st - sw
evals, evecs = spla.eig(sb, sw, overwrite_a=True,
overwrite_b=True)
idx = np.argsort(evals)[::-1]
evecs = evecs[:, idx]
evecs = evecs[:, :labels.shape[0]-1]
return evecs
def srda(xarr, yarr, alpha):
"""Spectral Regression Discriminant Analysis.
Returns the (P, C-1) transformation matrix, where
`x` is a matrix (N,P) and C is the number of classes.
Each column of `x` represents a variable, while the
rows contain observations. `x` must be centered
(subtracting the empirical mean vector from each column
of`x`).
Sample(s) can be embedded into the C-1 dimensional space
by z = x coeff (z = np.dot(x, coeff)).
:Parameters:
x : 2d array_like object
training data (N, P)
y : 1d array_like object integer
target values (N)
alpha : float (>=0)
regularization parameter
:Returns:
coeff : 2d numpy array (P, C-1)
tranformation matrix
"""
# Point 1 in section 4.2
yu = np.unique(yarr)
yk = np.zeros((yu.shape[0]+1, yarr.shape[0]), dtype=np.float)
yk[0] = 1.
for i in range(1, yk.shape[0]):
yk[i][yarr==yu[i-1]] = 1.
gso(yk, norm=False) # orthogonalize yk
yk = yk[1:-1]
# Point 2 in section 4.2
ak = np.empty((yk.shape[0], xarr.shape[1]), dtype=np.float)
for i in range(yk.shape[0]):
ak[i] = ridge_base(xarr, yk[i], alpha)
return ak.T
def pca(xarr, method='svd'):
"""Principal Component Analysis.
Returns the principal component coefficients `coeff`(K,K)
and the corresponding eigenvalues (K) of the covariance
matrix of `x` (N,P) sorted by decreasing eigenvalue, where
K=min(N,P). Each column of `x` represents a variable,
while the rows contain observations. Each column of `coeff`
contains coefficients for one principal component.
Sample(s) can be embedded into the M (<=K) dimensional
space by z = x coeff_M (z = np.dot(x, coeff[:, :M])).
:Parameters:
x : 2d numpy array (N, P)
data matrix
method : str
'svd' or 'cov'
:Returns:
coeff, evals : 2d numpy array (K, K), 1d numpy array (K)
principal component coefficients (eigenvectors of
the covariance matrix of x) and eigenvalues sorted by
decreasing eigenvalue.
"""
n, p = xarr.shape
if method == 'svd':
x_h = (xarr - np.mean(xarr, axis=0)) / np.sqrt(n - 1)
u, s, v = np.linalg.svd(x_h.T, full_matrices=False)
evecs = u
evals = s**2
elif method == 'cov':
k = np.min((n, p))
C = np.cov(xarr, rowvar=0)
evals, evecs = np.linalg.eigh(C)
idx = np.argsort(evals)[::-1]
evecs = evecs[:, idx]
evals = evals[idx]
evecs = evecs[:, :k]
evals = evals[:k]
else:
raise ValueError("method must be 'svd' or 'cov'")
return evecs, evals
def pca_fast(xarr, m, eps):
"""Fast principal component analysis using the fixed-point
algorithm.
Returns the first `m` principal component coefficients
`coeff` (P, M). Each column of `x` represents a variable,
while the rows contain observations. Each column of `coeff`
contains coefficients for one principal component.
Sample(s) can be embedded into the m (<=P) dimensional space
by z = x coeff (z = np.dot(X, coeff)).
:Parameters:
x : 2d numpy array (N, P)
data matrix
m : integer (0 < m <= P)
the number of principal axes or eigenvectors required
eps : float (> 0)
tolerance error
:Returns:
coeff : 2d numpy array (P, H)
principal component coefficients
"""
m = int(m)
np.random.seed(0)
evecs = np.random.rand(m, xarr.shape[1])
C = np.cov(xarr, rowvar=0)
for i in range(0, m):
while True:
evecs_old = np.copy(evecs[i])
evecs[i] = np.dot(C, evecs[i])
# Gram-Schmidt orthogonalization
a = np.dot(evecs[i], evecs[:i].T).reshape(-1, 1)
b = a * evecs[:i]
evecs[i] -= np.sum(b, axis=0) # if i=0 sum is 0
# Normalization
evecs[i] = evecs[i] / np.linalg.norm(evecs[i])
# convergence criteria
if np.abs(np.dot(evecs[i], evecs_old) - 1) < eps:
break
return evecs.T
def lda_fast(xarr, yarr):
"""Fast implementation of Linear Discriminant Analysis.
Returns the (P, C-1) transformation matrix, where
`x` is a centered matrix (N,P) and C is the number of classes.
Each column of `x` represents a variable, while the
rows contain observations. `x` must be centered
(subtracting the empirical mean vector from each column
of`x`).
:Parameters:
x : 2d array_like object
training data (N, P)
y : 1d array_like object integer
target values (N)
:Returns:
A : 2d numpy array (P, C-1)
tranformation matrix
"""
yu = np.unique(yarr)
yk = np.zeros((yu.shape[0]+1, yarr.shape[0]), dtype=np.float)
yk[0] = 1.
for i in range(1, yk.shape[0]):
yk[i][yarr==yu[i-1]] = 1.
gso(yk, norm=False) # orthogonalize yk
yk = yk[1:-1]
ak = np.empty((yk.shape[0], xarr.shape[1]), dtype=np.float)
for i in range(yk.shape[0]):
ak[i], _ = ols_base(xarr, yk[i], -1)
return ak.T
def kpca(K):
"""Kernel Principal Component Analysis, PCA in
a kernel-defined feature space making use of the
dual representation.
Returns the kernel principal component coefficients
`coeff` (N, N) computed as :math:`\lambda^{-1/2} \mathbf{v}_j`
where :math:`\lambda` and :math:`\mathbf{v}` are the ordered
eigenvalues and the corresponding eigenvector of the centered
kernel matrix K.
Sample(s) can be embedded into the G (<=N) dimensional space
by z = K coeff_G (z = np.dot(K, coeff[:, :G])).
:Parameters:
K: 2d array_like object (N,N)
precomputed centered kernel matrix
:Returns:
coeff, evals: 2d numpy array (N,N), 1d numpy array (N)
kernel principal component coefficients, eigenvalues
sorted by decreasing eigenvalue.
"""
evals, evecs = np.linalg.eigh(K)
idx = np.argsort(evals)
idx = idx[::-1]
evecs = evecs[:, idx]
evals = evals[idx]
for i in range(len(evals)):
evecs[:, i] /= np.sqrt(evals[i])
return evecs, evals
def kfda(Karr, yarr, lmb=0.001):
"""Kernel Fisher Discriminant Analysis.
Returns the transformation matrix `coeff` (N,1),
where `K` is a the kernel matrix (N,N) and y
is the class labels (the alghoritm works only with 2
classes).
:Parameters:
K: 2d array_like object (N, N)
precomputed kernel matrix
y : 1d array_like object integer (N)
class labels
lmb : float (>= 0.0)
regularization parameter
:Returns:
coeff: 2d numpy array (N,1)
kernel fisher coefficients.
"""
labels = np.unique(yarr)
n = yarr.shape[0]
idx1 = np.where(yarr==labels[0])[0]
idx2 = np.where(yarr==labels[1])[0]
n1 = idx1.shape[0]
n2 = idx2.shape[0]
K1, K2 = Karr[:, idx1], Karr[:, idx2]
N1 = np.dot(np.dot(K1, np.eye(n1) - (1 / float(n1))), K1.T)
N2 = np.dot(np.dot(K2, np.eye(n2) - (1 / float(n2))), K2.T)
N = N1 + N2 + np.diag(np.repeat(lmb, n))
M1 = np.sum(K1, axis=1) / float(n1)
M2 = np.sum(K2, axis=1) / float(n2)
M = M1 - M2
coeff = np.linalg.solve(N, M).reshape(-1, 1)
return coeff
class LDA:
"""Linear Discriminant Analysis.
"""
def __init__(self, method='cov'):
"""Initialization.
:Parameters:
method : str
'cov' or 'fast'
"""
self._coeff = None
self._mean = None
if method not in ['cov', 'fast']:
raise ValueError("method must be 'cov' or 'fast'")
self._method = method
def learn(self, x, y):
"""Computes the transformation matrix.
`x` is a matrix (N,P) and `y` is a vector containing
the class labels. Each column of `x` represents a
variable, while the rows contain observations.
"""
xarr = np.asarray(x, dtype=np.float)
yarr = np.asarray(y, dtype=np.int)
if xarr.ndim != 2:
raise ValueError("x must be a 2d array_like object")
if yarr.ndim != 1:
raise ValueError("y must be an 1d array_like object")
if xarr.shape[0] != yarr.shape[0]:
raise ValueError("x, y shape mismatch")
self._mean = np.mean(xarr, axis=0)
if self._method == 'cov':
self._coeff = lda(xarr, yarr)
elif self._method == 'fast':
self._coeff = lda_fast(xarr-self._mean, yarr)
def transform(self, t):
"""Embed `t` (M,P) into the C-1 dimensional space.
Returns a (M,C-1) matrix.
"""
if self._coeff is None:
raise ValueError("no model computed")
tarr = np.asarray(t, dtype=np.float)
try:
return np.dot(tarr-self._mean, self._coeff)
except:
ValueError("t, coeff: shape mismatch")
def coeff(self):
"""Returns the tranformation matrix (P,C-1), where
C is the number of classes. Each column contains
coefficients for one transformation vector.
"""
return self._coeff
class SRDA:
"""Spectral Regression Discriminant Analysis.
"""
def __init__(self, alpha=0.001):
"""Initialization.
:Parameters:
alpha : float (>=0)
regularization parameter
"""
self._coeff = None
self._mean = None
self._alpha = alpha
def learn(self, x, y):
"""Computes the transformation matrix.
`x` is a matrix (N,P) and `y` is a vector containing
the class labels. Each column of `x` represents a
variable, while the rows contain observations.
"""
xarr = np.asarray(x, dtype=np.float)
yarr = np.asarray(y, dtype=np.int)
if xarr.ndim != 2:
raise ValueError("x must be a 2d array_like object")
if yarr.ndim != 1:
raise ValueError("y must be an 1d array_like object")
if xarr.shape[0] != yarr.shape[0]:
raise ValueError("x, y shape mismatch")
self._mean = np.mean(xarr, axis=0)
self._coeff = srda(xarr-self._mean, yarr, self._alpha)
def transform(self, t):
"""Embed t (M,P) into the C-1 dimensional space.
Returns a (M,C-1) matrix.
"""
if self._coeff is None:
raise ValueError("no model computed")
tarr = np.asarray(t, dtype=np.float)
try:
return np.dot(tarr-self._mean, self._coeff)
except:
ValueError("t, coeff: shape mismatch")
def coeff(self):
"""Returns the tranformation matrix (P,C-1), where
C is the number of classes. Each column contains
coefficients for one transformation vector.
"""
return self._coeff
class KFDA:
"""Kernel Fisher Discriminant Analysis.
"""
def __init__(self, lmb=0.001, kernel=None):
"""Initialization.
:Parameters:
lmb : float (>= 0.0)
regularization parameter
kernel : None or mlpy.Kernel object.
if kernel is None, K and Kt in .learn()
and in .transform() methods must be precomputed kernel
matricies, else K and Kt must be training (resp.
test) data in input space.
"""
if kernel is not None:
if not isinstance(kernel, Kernel):
raise ValueError("kernel must be None or a mlpy.Kernel object")
self._kernel = kernel
self._x = None
self._coeff = None
self._lmb = lmb
def learn(self, K, y):
"""Computes the transformation vector.
:Parameters:
K: 2d array_like object
precomputed training kernel matrix (if kernel=None);
training data in input space (if kernel is a Kernel object)
y : 1d array_like object integer (N)
class labels (only two classes)
"""
Karr = np.array(K, dtype=np.float)
yarr = np.asarray(y, dtype=np.int)
if yarr.ndim != 1:
raise ValueError("y must be an 1d array_like object")
if self._kernel is None:
if Karr.shape[0] != Karr.shape[1]:
raise ValueError("K must be a square matrix")
else:
self._x = Karr.copy()
Karr = self._kernel.kernel(Karr, Karr)
labels = np.unique(yarr)
if labels.shape[0] != 2:
raise ValueError("number of classes must be = 2")
self._coeff = kfda(Karr, yarr, self._lmb)
def transform(self, Kt):
"""Embed Kt into the 1d kernel fisher space.
:Parameters:
Kt : 1d or 2d array_like object
precomputed test kernel matrix. (if kernel=None);
test data in input space (if kernel is a Kernel object).
"""
if self._coeff is None:
raise ValueError("no model computed")
Ktarr = np.asarray(Kt, dtype=np.float)
if self._kernel is not None:
Ktarr = self._kernel.kernel(Ktarr, self._x)
try:
return np.dot(Ktarr, self._coeff)
except:
ValueError("Kt, coeff: shape mismatch")
def coeff(self):
"""Returns the tranformation vector (N,1).
"""
return self._coeff
class PCA:
"""Principal Component Analysis.
"""
def __init__(self, method='svd', whiten=False):
"""Initialization.
:Parameters:
method : str
method, 'svd' or 'cov'
whiten : bool
whitening. The eigenvectors will be scaled
by eigenvalues**-(1/2)
"""
self._coeff = None
self._coeff_inv = None
self._evals = None
self._mean = None
self._method = method
self._whiten = whiten
def learn(self, x):
"""Compute the principal component coefficients.
`x` is a matrix (N,P). Each column of `x` represents a
variable, while the rows contain observations.
"""
xarr = np.asarray(x, dtype=np.float)
if xarr.ndim != 2:
raise ValueError("x must be a 2d array_like object")
self._mean = np.mean(xarr, axis=0)
self._coeff, self._evals = pca(x, method=self._method)
if self._whiten:
self._coeff_inv = np.empty((self._coeff.shape[1],
self._coeff.shape[0]), dtype=np.float)
for i in range(len(self._evals)):
eval_sqrt = np.sqrt(self._evals[i])
self._coeff_inv[i] = self._coeff[:, i] * \
eval_sqrt
self._coeff[:, i] /= eval_sqrt
else:
self._coeff_inv = self._coeff.T
def transform(self, t, k=None):
"""Embed `t` (M,P) into the k dimensional subspace.
Returns a (M,K) matrix. If `k` =None will be set to
min(N,P)
"""
if self._coeff is None:
raise ValueError("no PCA computed")
if k == None:
k = self._coeff.shape[1]
if k < 1 or k > self._coeff.shape[1]:
raise ValueError("k must be in [1, %d] or None" % \
self._coeff.shape[1])
tarr = np.asarray(t, dtype=np.float)
try:
return np.dot(tarr-self._mean, self._coeff[:, :k])
except:
raise ValueError("t, coeff: shape mismatch")
def transform_inv(self, z):
"""Transform data back to its original space,
where `z` is a (M,K) matrix. Returns a (M,P) matrix.
"""
if self._coeff is None:
raise ValueError("no PCA computed")
zarr = np.asarray(z, dtype=np.float)
return np.dot(zarr, self._coeff_inv[:zarr.shape[1]]) +\
self._mean
def coeff(self):
"""Returns the tranformation matrix (P,L), where
L=min(N,P), sorted by decreasing eigenvalue.
Each column contains coefficients for one principal
component.
"""
return self._coeff
def coeff_inv(self):
"""Returns the inverse of tranformation matrix (L,P),
where L=min(N,P), sorted by decreasing eigenvalue.
"""
return self._coeff_inv
def evals(self):
"""Returns sorted eigenvalues (L), where L=min(N,P).
"""
return self._evals
class PCAFast:
"""Fast Principal Component Analysis.
"""
def __init__(self, k=2, eps=0.01):
"""Initialization.
:Parameters:
k : integer
the number of principal axes or eigenvectors required
eps : float (> 0)
tolerance error
"""
self._coeff = None
self._coeff_inv = None
self._mean = None
self._k = k
self._eps = eps
def learn(self, x):
"""Compute the firsts `k` principal component coefficients.
`x` is a matrix (N,P). Each column of `x` represents a
variable, while the rows contain observations.
"""
xarr = np.asarray(x, dtype=np.float)
if xarr.ndim != 2:
raise ValueError("x must be a 2d array_like object")
self._mean = np.mean(xarr, axis=0)
self._coeff = pca_fast(xarr, m=self._k, eps=self._eps)
self._coeff_inv = self._coeff.T
def transform(self, t):
"""Embed t (M,P) into the `k` dimensional subspace.
Returns a (M,K) matrix.
"""
if self._coeff is None:
raise ValueError("no PCA computed")
tarr = np.asarray(t, dtype=np.float)
try:
return np.dot(tarr-self._mean, self._coeff)
except:
raise ValueError("t, coeff: shape mismatch")
def transform_inv(self, z):
"""Transform data back to its original space,
where `z` is a (M,K) matrix. Returns a (M,P) matrix.
"""
if self._coeff is None:
raise ValueError("no PCA computed")
zarr = np.asarray(z, dtype=np.float)
return np.dot(zarr, self._coeff_inv) + self._mean
def coeff(self):
"""Returns the tranformation matrix (P,K) sorted by
decreasing eigenvalue.
Each column contains coefficients for one principal
component.
"""
return self._coeff
def coeff_inv(self):
"""Returns the inverse of tranformation matrix (K,P),
sorted by decreasing eigenvalue.
"""
return self._coeff_inv
class KPCA:
"""Kernel Principal Component Analysis.
"""
def __init__(self, kernel=None):
"""Initialization.
:Parameters:
kernel : None or mlpy.Kernel object.
if kernel is None, K and Kt in .learn()
and in .transform() methods must be precomputed kernel
matricies, else K and Kt must be training (resp.
test) data in input space.
"""
if kernel is not None:
if not isinstance(kernel, Kernel):
raise ValueError("kernel must be None or a mlpy.Kernel object")
self._coeff = None
self._evals = None
self._K = None
self._kernel = kernel
self._x = None
def learn(self, K):
"""Compute the kernel principal component coefficients.
:Parameters:
K: 2d array_like object
precomputed training kernel matrix (if kernel=None);
training data in input space (if kernel is a Kernel object)
"""
Karr = np.asarray(K, dtype=np.float)
if Karr.ndim != 2:
raise ValueError("K must be a 2d array_like object")
if self._kernel is None:
if Karr.shape[0] != Karr.shape[1]:
raise ValueError("K must be a square matrix")
else:
self._x = Karr.copy()
Karr = self._kernel.kernel(Karr, Karr)
self._K = Karr.copy()
Karr = kernel.kernel_center(Karr, Karr)
self._coeff, self._evals = kpca(Karr)
def transform(self, Kt, k=None):
"""Embed Kt into the `k` dimensional subspace.
:Parameters:
Kt : 1d or 2d array_like object
precomputed test kernel matrix. (if kernel=None);
test data in input space (if kernel is a Kernel object).
"""
if self._coeff is None:
raise ValueError("no KPCA computed")
if k == None:
k = self._coeff.shape[1]
if k < 1 or k > self._coeff.shape[1]:
raise ValueError("k must be in [1, %d] or None" % \
self._coeff.shape[1])
Ktarr = np.asarray(Kt, dtype=np.float)
if self._kernel is not None:
Ktarr = self._kernel.kernel(Ktarr, self._x)
Ktarr = kernel.kernel_center(Ktarr, self._K)
try:
return np.dot(Ktarr, self._coeff[:, :k])
except:
raise ValueError("Kt, coeff: shape mismatch")
def coeff(self):
"""Returns the tranformation matrix (N,N) sorted by
decreasing eigenvalue.
"""
return self._coeff
def evals(self):
"""Returns sorted eigenvalues (N).
"""
return self._evals
|
Holistic nutrition involves reviewing an individual's current diet and health symptoms and creating customized meal plans and food choices using whole, unrefined healing foods. Consuming nutrient-rich food is THE starting place for improving overall health, impacting energy levels, improving digestive health, balancing blood sugar levels, supporting hormone and adrenal health, sleep habits and for maintaining a healthy weight (for ones unique body type) throughout ones life.
A Registered Holistic Nutritionist is someone who uses food as a means for preventing dis-ease and creates personalised programs and meal plans to ensure that each unique body is receiving the needed nutrients in order heal itself.
Our registered holistic nutritionist specializes in adrenal fatigue and nutritional guidance for peri-menopausal, menopausal and post menopausal symptoms including assistance for women's breast and heart health.
Nutritional Assessment ~ An initial nutritional assessment is utilized to explore the client's nutritional well-being in relation to their present food choices and analyze present dietary practices. This assessment may uncover potential deficiencies or imbalances throughout the body.
Personalized Programs ~ Personal health rebuilding programs and support in developing positive nutritional habits, lifestyle and general health. This can include weight normalization and management for our own individual body types.
Meal Planning and grocery store tours ~ Customized meal plans for specific dietary/nutritional requirements. This includes healthy recipes using whole foods that are sustainable and cost-effective meal options.
Food Awareness and Supplementation ~ Food awareness helps to create a positive, mindful connection to food and encourages a more favourable relationship between you and added healthy food choices. If supplements are indeed beneficial, I provide advice for particular supplementation.
|
# -*- coding: utf-8 -*-
from uuid import uuid4
from django.conf import settings
from django import http
from django.contrib.auth.views import logout_then_login
from django.core.urlresolvers import reverse, reverse_lazy
from django.core.exceptions import ImproperlyConfigured
try:
from django.utils.deprecation import MiddlewareMixin
except Exception as e:
MiddlewareMixin = object
import logging
logger = logging.getLogger('django')
def setAndWarning(attrib,value):
defined = getattr(settings,attrib,None)
if defined is not None:
if type(defined) != type(value):
logger.warning("{0} in settings is not valid type.. set to {1}".format(attrib,value))
return value
else:
return defined
logger.warning("{0} no defined on settings... set to {1}".format(attrib,value))
return value
# Lista los paths a omitir, ['*'] omite todos los paths
PADLOCK_SKIP_PATHS = setAndWarning("PADLOCK_SKIP_PATHS",[])
# Establece el tiempo de vida de la cookies (el valor recomendable es >= 120)
PADLOCK_COOKIE_AGE = setAndWarning("PADLOCK_COOKIE_AGE",3600*12)
# Boolean que indica cerrar sesión en caso de intento de romper seguridad
PADLOCK_AUTHBROKEN_LOGOUT = setAndWarning("PADLOCK_AUTHBROKEN_LOGOUT",False)
# URL de redirecionamiento luego de cerrar sesión
# Note que si empieza con '/' o ' http' se usa como string
# de caso contrario se usa como un pattern name
PADLOCK_LOGOUT_REDIRECT_TO = setAndWarning("PADLOCK_LOGOUT_REDIRECT_TO",'/')
# Define el nombre de las cockies
PADLOCK_PREFIX = setAndWarning("PADLOCK_PREFIX",'padlock')
fake_key_pieces = str(uuid4()).split('-')
def getURIRedirect():
if PADLOCK_LOGOUT_REDIRECT_TO.startswith('/'):
return PADLOCK_LOGOUT_REDIRECT_TO
if PADLOCK_LOGOUT_REDIRECT_TO.startswith('?'):
return PADLOCK_LOGOUT_REDIRECT_TO
elif PADLOCK_LOGOUT_REDIRECT_TO.startswith('http'):
return PADLOCK_LOGOUT_REDIRECT_TO
else:
return reverse(PADLOCK_LOGOUT_REDIRECT_TO)
def authFailAction(request):
if PADLOCK_AUTHBROKEN_LOGOUT:
if request.is_authenticated:
return logout_then_login(request, getURIRedirect())
response = http.HttpResponseForbidden()
for keynum,row in enumerate(fake_key_pieces):
response.delete_cookie(PADLOCK_PREFIX+'_id_%d' % keynum)
response.delete_cookie('sessionid')
return response
# PADLOCK_SKIP_PATHS = ['/es/','/auth/ingresar/'] or ['*'] for all pages
def locksmith_build_job(response,PadLockKey):
pieces = PadLockKey.split('-')
for keynum,row in enumerate(pieces):
response.set_cookie(PADLOCK_PREFIX+'_id_%d' % keynum,row,max_age=PADLOCK_COOKIE_AGE)
return response
def locksmith_restore_job(request):
pieces = []
for keynum,row in enumerate(fake_key_pieces):
piece = request.COOKIES.get(PADLOCK_PREFIX+'_id_%d' % keynum,None)
if piece is None:
return False
else:
pieces.append(piece)
return '-'.join(pieces,)
class PadLockMiddleware(MiddlewareMixin):
def process_request(self, request):
if getattr(request,"user",None) is None:
return None
if '*' in PADLOCK_SKIP_PATHS or request.path in PADLOCK_SKIP_PATHS:
return None
if request.user.is_authenticated:
padlock_id = locksmith_restore_job(request)
if not padlock_id:
return authFailAction(request)
if padlock_id != request.session.get(PADLOCK_PREFIX,None):
return authFailAction(request)
if request.method == 'POST':
padlock_id = locksmith_restore_job(request)
if not padlock_id:
return authFailAction(request)
if padlock_id != request.session.get(PADLOCK_PREFIX,None):
return authFailAction(request)
return None
def process_response(self, request, response):
if getattr(request,"user",None) is None:
return response
if '*' in PADLOCK_SKIP_PATHS or request.path in PADLOCK_SKIP_PATHS:
return response
if PADLOCK_PREFIX+'_id_0' in request.COOKIES:
# print("hay un PadLock existente")
if request.user.is_authenticated:
if locksmith_restore_job(request) != request.session.get(PADLOCK_PREFIX,False):
return authFailAction(request)
# return http.HttpResponseForbidden()
else:
if locksmith_restore_job(request) and request.session.get(PADLOCK_PREFIX,None) is None:
# print("Seteando nuevo PadLock habiendo un padlock Cookie")
padlock_id = str(uuid4())
request.session[PADLOCK_PREFIX] = padlock_id
response = locksmith_build_job(response,padlock_id)
else:
if request.method != 'POST':
# print("Seteando nuevo PadLock")
padlock_id = str(uuid4())
request.session[PADLOCK_PREFIX] = padlock_id
response = locksmith_build_job(response,padlock_id)
else:
# print("No se ha Seteando un nuevo PadLock por ser un post directo")
pass
return response
|
“Defendants initially claimed that they revoked Acosta’s press pass because he ‘plac[ed] his hands” on an intern. That contention is not accurate. The President himself has stated that the Acosta’s conduct was not “overly horrible” and that Acosta’s credentials were actually suspended because he failed to “treat the White House with respect'”.
“The sole justification for Defendants’ conduct is their dislike for Plaintiffs’ coverage of the administration and critique of the President. But that is insufficient to justify such a substantial restriction on Plaintiffs’ First Amendment rights”.
“Defendants did not provide Plaintiffs an opportunity to be heard before revoking Acosta’s press credentials. Nor have they provided him any avenue to challenge or appeal the revocation of his credentials. Rather, Defendants have stated that they do not plan to ever rescind the revocation of Acosta’s credentials”.
|
#-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frédéric Rodrigo 2014 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from modules.OsmoseTranslation import T_
from plugins.Plugin import Plugin
class TagFix_MultipleTag_Lang_es(Plugin):
only_for = ["es"]
def init(self, logger):
Plugin.init(self, logger)
self.errors[30326] = self.def_class(item = 3032, level = 1, tags = ['tag', 'fix:chair'],
title = T_('Watch multiple tags'))
import re
self.Panaderia = re.compile(u"panader.a (.*)", re.IGNORECASE)
def node(self, data, tags):
err = []
if not "name" in tags:
return err
if not "shop" in tags:
panaderia = self.Panaderia.match(tags["name"])
if panaderia:
err.append({"class": 30326, "subclass": 0, "fix": {"+": {"shop": "bakery"}, "~": {"name": panaderia.group(1)} }})
return err
def way(self, data, tags, nds):
return self.node(data, tags)
def relation(self, data, tags, members):
return self.node(data, tags)
###########################################################################
from plugins.Plugin import TestPluginCommon
class Test(TestPluginCommon):
def test(self):
a = TagFix_MultipleTag_Lang_es(None)
class _config:
options = {"language": "es"}
class father:
config = _config()
a.father = father()
a.init(None)
for t in [{"name": u"Panadería Doña Neli"},
]:
self.check_err(a.node(None, t), t)
for t in [{"name": u"Panadería Doña Neli", "shop": "b"},
]:
assert not a.way(None, t, None), t
|
The Fish Catcher Fallout 4 Fallout New Vegas and more!
Usuable Fishing Rod - Immersive Fishing - posted in Fallout 4 Mod Requests: Ive been looking for a good fishing mod but cant find any. So now Ive been thinking up a good fishing mod the past few days and this is what i came up with. Make the fishing rod equip-able as if it was a throw-able grenade type item, so when you Throw, it sort of how to fix a hole in pex tubing The fish should be able to be sold and cooked/used in recipes etc. Maybe make certain fish spawn in certain areas of the map, and maybe put in a few rare fish here and there. It'd also be cool if there was some kind of journal or statistic that shows which fish you've caught how many times, etc.
This is actually an amazing location for farming legendaries, if you figure out a few tricks; there are 3 separate trigger-able spawn events with a high chance of legendaries at Lynn Woods - two raider events, and deathclaws which can be savescummed.
The radioactive wastes of Fallout 4 have produced a no shortage of vile mutated creatures for the Sole Survivor to contend with and their quest to thrive.
The magazine Astoundingly Awesome Tales - Attack of the Fishmen is a special collectible book in Fallout 4 that is located in the wreck of Skylanes Flight 1981.
|
#!/usr/bin/env python
"""
Hello world script for Session API ( https://www.tropo.com/docs/webapi/sessionapi.htm )
Upon launch, it will trigger a message to be sent via Jabber to the addess specified in
'number'.
"""
# Sample application using the itty-bitty python web framework from:
# http://github.com/toastdriven/itty
from itty import *
from tropo import Tropo, Session, JoinPrompt, LeavePrompt
from urllib import urlencode
from urllib2 import urlopen
@post('/index.json')
def index(request):
session = Session(request.body)
t = Tropo()
#jj = JoinPrompt(value = "who are you who let you come in")
jj = JoinPrompt("who are you who let you come in")
#ll = LeavePrompt(value = "byebye samsung")
ll = LeavePrompt("byebye samsung")
t.call(to=session.parameters['callToNumber'], network='SIP')
t.conference(id='yuxiangj', joinPrompt=jj.json, leavePrompt=ll.json)
t.say(session.parameters['message'])
return t.RenderJsonSDK()
#base_url = 'http://api.tropo.com/1.0/sessions'
base_url = 'http://192.168.26.21:8080/gateway/sessions'
token = '7776687947547a6261677359524e665670427145574f544e44616b5a64456d6c526b576265647448516e796c' # Insert your token here
action = 'create'
#number = 'sip:xiangjun_yu@10.140.254.55:5678' # change to the Jabber ID to which you want to send the message
number = 'sip:frank@172.16.22.128:5678' # change to the Jabber ID to which you want to send the message
message = 'hello from the session API!'
params = urlencode([('action', action), ('token', token), ('callToNumber', number), ('message', message)])
data = urlopen('%s?%s' % (base_url, params)).read()
#run_itty(server='wsgiref', host='0.0.0.0', port=8888)
run_itty(config='sample_conf')
|
Addiction comes in different forms—and the fastest-growing form of addiction in the United States is addiction of methamphetamine, also referred to as meth. Meth has long been touted as the most dangerous substance on Earth, and it is not hard to understand why. Not only is it highly addictive, but it is also extremely accessible. Meth is a synthetic drug—completely man-made—and is comparatively cheap, hence its prevalence in communities across the country.
Meth addiction can have profoundly hazardous effects on the body, mind and soul—and over time it can prove deadly. Meth creates a strong sense of dependence, and withdrawal from it can be challenging. However, with the right treatment, recovery is entirely attainable. At Seacliff, we offer individualized treatment for meth addiction, and our goal is to provide individuals with the foundation they need for lifelong wholeness and healing. We invite you to embrace this message of hope, and to come to Seacliff for freedom from meth addiction.
What Happens When People Abuse Meth?
Meth is classified as a stimulant, and enacts a powerful effect on the central nervous system. Those who use meth are tricked into thinking they have unlimited energy and stamina; this produces a “high,” both in terms of energy and mood, yet at the same time meth depletes the body’s reserves of energy, which can ultimately compromise vital organs and bodily functions. In the beginning, the meth user will experience energy and euphoria, both of which ultimately give way to depression, violent paranoia, and lasting brain damage. These are grim effects, and are what makes this message of hope so urgent: Treatment works, and recovery is possible.
What Comes First: Drug Addiction or Mental Illness?
Often, addiction—including meth addiction—will occur at the same time as a mood disorder or a mental illness. These are called co-occurring conditions, and might include anxiety, depression, bipolar disorder, and beyond. The relationship between addiction and mental illness is complicated, and it can be hard to determine which causes the other. Sometimes, addiction may arise through self-medication— for example, meth may start as a way to cope with the symptoms of depression. More often, addiction and mental illness arise due to the same abnormality of the brain. Ultimately, the relationship varies from one person to the next, but Seacliff’s dual diagnosis care can help you discover the root of your problem.
One of the difficulties in co-occurring disorders is that they muddy the diagnostic process. When addiction is present, most symptoms will be attributed to that—and as such, a serious mental illness may lurk just below the surface, undetected and untreated. This can cause the mental health disorder to deepen or worsen over time.
The solution to this is to seek dual diagnosis care, which will look past the symptoms and look instead for the underlying issue. If you are not sure whether you have a co-occurring mental health disorder, we urge you to contact Seacliff today to inquire about dual diagnosis.
If you or a loved one is struggling with these symptoms, seek treatment right away!
We cannot overstate it: Treatment works. Those who live in the grip of meth addiction can find freedom—and the process starts with clinical intervention. The Seacliff team will help you develop a plan for lifelong recovery. Start the process by contacting our team today. Choose recovery, and a life apart from meth addiction and mental illness.
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/extended-ipv4-reachability/prefixes/prefix/undefined-subtlvs/undefined-subtlv/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of the undefined sub-TLV.
"""
__slots__ = ("_path_helper", "_extmethods", "__type", "__length", "__value")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__length = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__value = YANGDynClass(
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"extended-ipv4-reachability",
"prefixes",
"prefix",
"undefined-subtlvs",
"undefined-subtlv",
"state",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/type (uint8)
YANG Description: TLV Type.
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/type (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: TLV Type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_length(self):
"""
Getter method for length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/length (uint8)
YANG Description: TLV length.
"""
return self.__length
def _set_length(self, v, load=False):
"""
Setter method for length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/length (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_length is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_length() directly.
YANG Description: TLV length.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """length must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="length", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__length = t
if hasattr(self, "_set"):
self._set()
def _unset_length(self):
self.__length = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_value(self):
"""
Getter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/value (binary)
YANG Description: TLV value.
"""
return self.__value
def _set_value(self, v, load=False):
"""
Setter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/value (binary)
If this variable is read-only (config: false) in the
source YANG file, then _set_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_value() directly.
YANG Description: TLV value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """value must be of a type compatible with binary""",
"defined-type": "binary",
"generated-type": """YANGDynClass(base=bitarray, is_leaf=True, yang_name="value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='binary', is_config=False)""",
}
)
self.__value = t
if hasattr(self, "_set"):
self._set()
def _unset_value(self):
self.__value = YANGDynClass(
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
type = __builtin__.property(_get_type)
length = __builtin__.property(_get_length)
value = __builtin__.property(_get_value)
_pyangbind_elements = OrderedDict(
[("type", type), ("length", length), ("value", value)]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/extended-ipv4-reachability/prefixes/prefix/undefined-subtlvs/undefined-subtlv/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of the undefined sub-TLV.
"""
__slots__ = ("_path_helper", "_extmethods", "__type", "__length", "__value")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__length = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__value = YANGDynClass(
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"extended-ipv4-reachability",
"prefixes",
"prefix",
"undefined-subtlvs",
"undefined-subtlv",
"state",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/type (uint8)
YANG Description: TLV Type.
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/type (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: TLV Type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_length(self):
"""
Getter method for length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/length (uint8)
YANG Description: TLV length.
"""
return self.__length
def _set_length(self, v, load=False):
"""
Setter method for length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/length (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_length is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_length() directly.
YANG Description: TLV length.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """length must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="length", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__length = t
if hasattr(self, "_set"):
self._set()
def _unset_length(self):
self.__length = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_value(self):
"""
Getter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/value (binary)
YANG Description: TLV value.
"""
return self.__value
def _set_value(self, v, load=False):
"""
Setter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/value (binary)
If this variable is read-only (config: false) in the
source YANG file, then _set_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_value() directly.
YANG Description: TLV value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """value must be of a type compatible with binary""",
"defined-type": "binary",
"generated-type": """YANGDynClass(base=bitarray, is_leaf=True, yang_name="value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='binary', is_config=False)""",
}
)
self.__value = t
if hasattr(self, "_set"):
self._set()
def _unset_value(self):
self.__value = YANGDynClass(
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
type = __builtin__.property(_get_type)
length = __builtin__.property(_get_length)
value = __builtin__.property(_get_value)
_pyangbind_elements = OrderedDict(
[("type", type), ("length", length), ("value", value)]
)
|
Lake Nakuru is located in the Lake Nakuru National Park in Kenya and is considered a unique natural spectacle worldwide. The up to four meter deep lake, one of the alkaline soda lakes in the eastern East African Rift Valley, is world famous for its numerous flamingos. At times up to two million flamingos feed on blue-green algae (Spirulina) or small crustaceans, which give the white animals their pink colour. Already from a distance, one can recognize a pink band that usually runs around the whole lake at the lakeside. If there is a lack of food, the flamingos move to one of the other soda lakes such as Lake Bogoria or Lake Elmentea.
The lake has been a UNESCO World Heritage Site since 2011 as part of the "Kenyan Lake System in the Great Rift Valley".
The lake is located in the eastern part of the Great Rift Valley. The lake basin is bordered in the north by the Menengai Caldera, in the south by the volcano Mt. Eburru, in the west by the ditch edge of the Mau Escarpment and in the east by the Kinangop Plateau and the Aberdare Volcano Complex (Aberdare Range). The 1800 km² catchment area is low in rainfall, which is why the hydrological budget of the basin is extremely negative and the lake is strongly alkaline.
The lake is completely located in Nakuru County in Kenya, about 140 km northwest of the capital Nairobi.
On the northern shore of the lake lies the city of Nakuru, with about 300,000 inhabitants (2000) the fourth largest city in Kenya.
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflow_v2.services.conversation_profiles import ConversationProfilesAsyncClient
from google.cloud.dialogflow_v2.services.conversation_profiles import ConversationProfilesClient
from google.cloud.dialogflow_v2.services.conversation_profiles import pagers
from google.cloud.dialogflow_v2.services.conversation_profiles import transports
from google.cloud.dialogflow_v2.services.conversation_profiles.transports.base import _GOOGLE_AUTH_VERSION
from google.cloud.dialogflow_v2.types import audio_config
from google.cloud.dialogflow_v2.types import conversation_profile
from google.cloud.dialogflow_v2.types import conversation_profile as gcd_conversation_profile
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert ConversationProfilesClient._get_default_mtls_endpoint(None) is None
assert ConversationProfilesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert ConversationProfilesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert ConversationProfilesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert ConversationProfilesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert ConversationProfilesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
ConversationProfilesClient,
ConversationProfilesAsyncClient,
])
def test_conversation_profiles_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'dialogflow.googleapis.com:443'
@pytest.mark.parametrize("client_class", [
ConversationProfilesClient,
ConversationProfilesAsyncClient,
])
def test_conversation_profiles_client_service_account_always_use_jwt(client_class):
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
client = client_class(credentials=creds)
use_jwt.assert_not_called()
@pytest.mark.parametrize("transport_class,transport_name", [
(transports.ConversationProfilesGrpcTransport, "grpc"),
(transports.ConversationProfilesGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_conversation_profiles_client_service_account_always_use_jwt_true(transport_class, transport_name):
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
@pytest.mark.parametrize("client_class", [
ConversationProfilesClient,
ConversationProfilesAsyncClient,
])
def test_conversation_profiles_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'dialogflow.googleapis.com:443'
def test_conversation_profiles_client_get_transport_class():
transport = ConversationProfilesClient.get_transport_class()
available_transports = [
transports.ConversationProfilesGrpcTransport,
]
assert transport in available_transports
transport = ConversationProfilesClient.get_transport_class("grpc")
assert transport == transports.ConversationProfilesGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(ConversationProfilesClient, transports.ConversationProfilesGrpcTransport, "grpc"),
(ConversationProfilesAsyncClient, transports.ConversationProfilesGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(ConversationProfilesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ConversationProfilesClient))
@mock.patch.object(ConversationProfilesAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ConversationProfilesAsyncClient))
def test_conversation_profiles_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(ConversationProfilesClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(ConversationProfilesClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(ConversationProfilesClient, transports.ConversationProfilesGrpcTransport, "grpc", "true"),
(ConversationProfilesAsyncClient, transports.ConversationProfilesGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(ConversationProfilesClient, transports.ConversationProfilesGrpcTransport, "grpc", "false"),
(ConversationProfilesAsyncClient, transports.ConversationProfilesGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(ConversationProfilesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ConversationProfilesClient))
@mock.patch.object(ConversationProfilesAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ConversationProfilesAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_conversation_profiles_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(ConversationProfilesClient, transports.ConversationProfilesGrpcTransport, "grpc"),
(ConversationProfilesAsyncClient, transports.ConversationProfilesGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_conversation_profiles_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(ConversationProfilesClient, transports.ConversationProfilesGrpcTransport, "grpc"),
(ConversationProfilesAsyncClient, transports.ConversationProfilesGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_conversation_profiles_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_conversation_profiles_client_client_options_from_dict():
with mock.patch('google.cloud.dialogflow_v2.services.conversation_profiles.transports.ConversationProfilesGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = ConversationProfilesClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_list_conversation_profiles(transport: str = 'grpc', request_type=conversation_profile.ListConversationProfilesRequest):
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = conversation_profile.ListConversationProfilesResponse(
next_page_token='next_page_token_value',
)
response = client.list_conversation_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == conversation_profile.ListConversationProfilesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListConversationProfilesPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_conversation_profiles_from_dict():
test_list_conversation_profiles(request_type=dict)
def test_list_conversation_profiles_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__') as call:
client.list_conversation_profiles()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == conversation_profile.ListConversationProfilesRequest()
@pytest.mark.asyncio
async def test_list_conversation_profiles_async(transport: str = 'grpc_asyncio', request_type=conversation_profile.ListConversationProfilesRequest):
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(conversation_profile.ListConversationProfilesResponse(
next_page_token='next_page_token_value',
))
response = await client.list_conversation_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == conversation_profile.ListConversationProfilesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListConversationProfilesAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_conversation_profiles_async_from_dict():
await test_list_conversation_profiles_async(request_type=dict)
def test_list_conversation_profiles_field_headers():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = conversation_profile.ListConversationProfilesRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__') as call:
call.return_value = conversation_profile.ListConversationProfilesResponse()
client.list_conversation_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_conversation_profiles_field_headers_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = conversation_profile.ListConversationProfilesRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(conversation_profile.ListConversationProfilesResponse())
await client.list_conversation_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_conversation_profiles_flattened():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = conversation_profile.ListConversationProfilesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_conversation_profiles(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_list_conversation_profiles_flattened_error():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_conversation_profiles(
conversation_profile.ListConversationProfilesRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_list_conversation_profiles_flattened_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = conversation_profile.ListConversationProfilesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(conversation_profile.ListConversationProfilesResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_conversation_profiles(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_conversation_profiles_flattened_error_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_conversation_profiles(
conversation_profile.ListConversationProfilesRequest(),
parent='parent_value',
)
def test_list_conversation_profiles_pager():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
],
next_page_token='abc',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[],
next_page_token='def',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
],
next_page_token='ghi',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_conversation_profiles(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, conversation_profile.ConversationProfile)
for i in results)
def test_list_conversation_profiles_pages():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
],
next_page_token='abc',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[],
next_page_token='def',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
],
next_page_token='ghi',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
],
),
RuntimeError,
)
pages = list(client.list_conversation_profiles(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_conversation_profiles_async_pager():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
],
next_page_token='abc',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[],
next_page_token='def',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
],
next_page_token='ghi',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
],
),
RuntimeError,
)
async_pager = await client.list_conversation_profiles(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, conversation_profile.ConversationProfile)
for i in responses)
@pytest.mark.asyncio
async def test_list_conversation_profiles_async_pages():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
],
next_page_token='abc',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[],
next_page_token='def',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
],
next_page_token='ghi',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_conversation_profiles(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_get_conversation_profile(transport: str = 'grpc', request_type=conversation_profile.GetConversationProfileRequest):
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = conversation_profile.ConversationProfile(
name='name_value',
display_name='display_name_value',
language_code='language_code_value',
)
response = client.get_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == conversation_profile.GetConversationProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, conversation_profile.ConversationProfile)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.language_code == 'language_code_value'
def test_get_conversation_profile_from_dict():
test_get_conversation_profile(request_type=dict)
def test_get_conversation_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_conversation_profile),
'__call__') as call:
client.get_conversation_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == conversation_profile.GetConversationProfileRequest()
@pytest.mark.asyncio
async def test_get_conversation_profile_async(transport: str = 'grpc_asyncio', request_type=conversation_profile.GetConversationProfileRequest):
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(conversation_profile.ConversationProfile(
name='name_value',
display_name='display_name_value',
language_code='language_code_value',
))
response = await client.get_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == conversation_profile.GetConversationProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, conversation_profile.ConversationProfile)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.language_code == 'language_code_value'
@pytest.mark.asyncio
async def test_get_conversation_profile_async_from_dict():
await test_get_conversation_profile_async(request_type=dict)
def test_get_conversation_profile_field_headers():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = conversation_profile.GetConversationProfileRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_conversation_profile),
'__call__') as call:
call.return_value = conversation_profile.ConversationProfile()
client.get_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_conversation_profile_field_headers_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = conversation_profile.GetConversationProfileRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_conversation_profile),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(conversation_profile.ConversationProfile())
await client.get_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_conversation_profile_flattened():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = conversation_profile.ConversationProfile()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_conversation_profile(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_conversation_profile_flattened_error():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_conversation_profile(
conversation_profile.GetConversationProfileRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_conversation_profile_flattened_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = conversation_profile.ConversationProfile()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(conversation_profile.ConversationProfile())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_conversation_profile(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_conversation_profile_flattened_error_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_conversation_profile(
conversation_profile.GetConversationProfileRequest(),
name='name_value',
)
def test_create_conversation_profile(transport: str = 'grpc', request_type=gcd_conversation_profile.CreateConversationProfileRequest):
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_conversation_profile.ConversationProfile(
name='name_value',
display_name='display_name_value',
language_code='language_code_value',
)
response = client.create_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_conversation_profile.CreateConversationProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_conversation_profile.ConversationProfile)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.language_code == 'language_code_value'
def test_create_conversation_profile_from_dict():
test_create_conversation_profile(request_type=dict)
def test_create_conversation_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_conversation_profile),
'__call__') as call:
client.create_conversation_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_conversation_profile.CreateConversationProfileRequest()
@pytest.mark.asyncio
async def test_create_conversation_profile_async(transport: str = 'grpc_asyncio', request_type=gcd_conversation_profile.CreateConversationProfileRequest):
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcd_conversation_profile.ConversationProfile(
name='name_value',
display_name='display_name_value',
language_code='language_code_value',
))
response = await client.create_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_conversation_profile.CreateConversationProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_conversation_profile.ConversationProfile)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.language_code == 'language_code_value'
@pytest.mark.asyncio
async def test_create_conversation_profile_async_from_dict():
await test_create_conversation_profile_async(request_type=dict)
def test_create_conversation_profile_field_headers():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_conversation_profile.CreateConversationProfileRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_conversation_profile),
'__call__') as call:
call.return_value = gcd_conversation_profile.ConversationProfile()
client.create_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_conversation_profile_field_headers_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_conversation_profile.CreateConversationProfileRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_conversation_profile),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_conversation_profile.ConversationProfile())
await client.create_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_create_conversation_profile_flattened():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_conversation_profile.ConversationProfile()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_conversation_profile(
parent='parent_value',
conversation_profile=gcd_conversation_profile.ConversationProfile(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].conversation_profile == gcd_conversation_profile.ConversationProfile(name='name_value')
def test_create_conversation_profile_flattened_error():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_conversation_profile(
gcd_conversation_profile.CreateConversationProfileRequest(),
parent='parent_value',
conversation_profile=gcd_conversation_profile.ConversationProfile(name='name_value'),
)
@pytest.mark.asyncio
async def test_create_conversation_profile_flattened_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_conversation_profile.ConversationProfile()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_conversation_profile.ConversationProfile())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_conversation_profile(
parent='parent_value',
conversation_profile=gcd_conversation_profile.ConversationProfile(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].conversation_profile == gcd_conversation_profile.ConversationProfile(name='name_value')
@pytest.mark.asyncio
async def test_create_conversation_profile_flattened_error_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_conversation_profile(
gcd_conversation_profile.CreateConversationProfileRequest(),
parent='parent_value',
conversation_profile=gcd_conversation_profile.ConversationProfile(name='name_value'),
)
def test_update_conversation_profile(transport: str = 'grpc', request_type=gcd_conversation_profile.UpdateConversationProfileRequest):
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_conversation_profile.ConversationProfile(
name='name_value',
display_name='display_name_value',
language_code='language_code_value',
)
response = client.update_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_conversation_profile.UpdateConversationProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_conversation_profile.ConversationProfile)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.language_code == 'language_code_value'
def test_update_conversation_profile_from_dict():
test_update_conversation_profile(request_type=dict)
def test_update_conversation_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_conversation_profile),
'__call__') as call:
client.update_conversation_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_conversation_profile.UpdateConversationProfileRequest()
@pytest.mark.asyncio
async def test_update_conversation_profile_async(transport: str = 'grpc_asyncio', request_type=gcd_conversation_profile.UpdateConversationProfileRequest):
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcd_conversation_profile.ConversationProfile(
name='name_value',
display_name='display_name_value',
language_code='language_code_value',
))
response = await client.update_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_conversation_profile.UpdateConversationProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_conversation_profile.ConversationProfile)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.language_code == 'language_code_value'
@pytest.mark.asyncio
async def test_update_conversation_profile_async_from_dict():
await test_update_conversation_profile_async(request_type=dict)
def test_update_conversation_profile_field_headers():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_conversation_profile.UpdateConversationProfileRequest()
request.conversation_profile.name = 'conversation_profile.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_conversation_profile),
'__call__') as call:
call.return_value = gcd_conversation_profile.ConversationProfile()
client.update_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'conversation_profile.name=conversation_profile.name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_update_conversation_profile_field_headers_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_conversation_profile.UpdateConversationProfileRequest()
request.conversation_profile.name = 'conversation_profile.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_conversation_profile),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_conversation_profile.ConversationProfile())
await client.update_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'conversation_profile.name=conversation_profile.name/value',
) in kw['metadata']
def test_update_conversation_profile_flattened():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_conversation_profile.ConversationProfile()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_conversation_profile(
conversation_profile=gcd_conversation_profile.ConversationProfile(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].conversation_profile == gcd_conversation_profile.ConversationProfile(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
def test_update_conversation_profile_flattened_error():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_conversation_profile(
gcd_conversation_profile.UpdateConversationProfileRequest(),
conversation_profile=gcd_conversation_profile.ConversationProfile(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
@pytest.mark.asyncio
async def test_update_conversation_profile_flattened_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_conversation_profile.ConversationProfile()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_conversation_profile.ConversationProfile())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_conversation_profile(
conversation_profile=gcd_conversation_profile.ConversationProfile(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].conversation_profile == gcd_conversation_profile.ConversationProfile(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
@pytest.mark.asyncio
async def test_update_conversation_profile_flattened_error_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_conversation_profile(
gcd_conversation_profile.UpdateConversationProfileRequest(),
conversation_profile=gcd_conversation_profile.ConversationProfile(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
def test_delete_conversation_profile(transport: str = 'grpc', request_type=conversation_profile.DeleteConversationProfileRequest):
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == conversation_profile.DeleteConversationProfileRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_conversation_profile_from_dict():
test_delete_conversation_profile(request_type=dict)
def test_delete_conversation_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_conversation_profile),
'__call__') as call:
client.delete_conversation_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == conversation_profile.DeleteConversationProfileRequest()
@pytest.mark.asyncio
async def test_delete_conversation_profile_async(transport: str = 'grpc_asyncio', request_type=conversation_profile.DeleteConversationProfileRequest):
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == conversation_profile.DeleteConversationProfileRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_conversation_profile_async_from_dict():
await test_delete_conversation_profile_async(request_type=dict)
def test_delete_conversation_profile_field_headers():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = conversation_profile.DeleteConversationProfileRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_conversation_profile),
'__call__') as call:
call.return_value = None
client.delete_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_conversation_profile_field_headers_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = conversation_profile.DeleteConversationProfileRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_conversation_profile),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_conversation_profile_flattened():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_conversation_profile(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_conversation_profile_flattened_error():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_conversation_profile(
conversation_profile.DeleteConversationProfileRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_delete_conversation_profile_flattened_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_conversation_profile(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_delete_conversation_profile_flattened_error_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_conversation_profile(
conversation_profile.DeleteConversationProfileRequest(),
name='name_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.ConversationProfilesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.ConversationProfilesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ConversationProfilesClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.ConversationProfilesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ConversationProfilesClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.ConversationProfilesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = ConversationProfilesClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.ConversationProfilesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.ConversationProfilesGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.ConversationProfilesGrpcTransport,
transports.ConversationProfilesGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.ConversationProfilesGrpcTransport,
)
def test_conversation_profiles_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.ConversationProfilesTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_conversation_profiles_base_transport():
# Instantiate the base transport.
with mock.patch('google.cloud.dialogflow_v2.services.conversation_profiles.transports.ConversationProfilesTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.ConversationProfilesTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'list_conversation_profiles',
'get_conversation_profile',
'create_conversation_profile',
'update_conversation_profile',
'delete_conversation_profile',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
@requires_google_auth_gte_1_25_0
def test_conversation_profiles_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dialogflow_v2.services.conversation_profiles.transports.ConversationProfilesTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ConversationProfilesTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_conversation_profiles_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dialogflow_v2.services.conversation_profiles.transports.ConversationProfilesTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ConversationProfilesTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id="octopus",
)
def test_conversation_profiles_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dialogflow_v2.services.conversation_profiles.transports.ConversationProfilesTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ConversationProfilesTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_conversation_profiles_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
ConversationProfilesClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_conversation_profiles_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
ConversationProfilesClient()
adc.assert_called_once_with(
scopes=( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/dialogflow',),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.ConversationProfilesGrpcTransport,
transports.ConversationProfilesGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_conversation_profiles_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/dialogflow',),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.ConversationProfilesGrpcTransport,
transports.ConversationProfilesGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_conversation_profiles_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.ConversationProfilesGrpcTransport, grpc_helpers),
(transports.ConversationProfilesGrpcAsyncIOTransport, grpc_helpers_async)
],
)
def test_conversation_profiles_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.ConversationProfilesGrpcTransport, transports.ConversationProfilesGrpcAsyncIOTransport])
def test_conversation_profiles_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_conversation_profiles_host_no_port():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='dialogflow.googleapis.com'),
)
assert client.transport._host == 'dialogflow.googleapis.com:443'
def test_conversation_profiles_host_with_port():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='dialogflow.googleapis.com:8000'),
)
assert client.transport._host == 'dialogflow.googleapis.com:8000'
def test_conversation_profiles_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.ConversationProfilesGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_conversation_profiles_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.ConversationProfilesGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.ConversationProfilesGrpcTransport, transports.ConversationProfilesGrpcAsyncIOTransport])
def test_conversation_profiles_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.ConversationProfilesGrpcTransport, transports.ConversationProfilesGrpcAsyncIOTransport])
def test_conversation_profiles_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_agent_path():
project = "squid"
expected = "projects/{project}/agent".format(project=project, )
actual = ConversationProfilesClient.agent_path(project)
assert expected == actual
def test_parse_agent_path():
expected = {
"project": "clam",
}
path = ConversationProfilesClient.agent_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_agent_path(path)
assert expected == actual
def test_conversation_model_path():
project = "whelk"
location = "octopus"
conversation_model = "oyster"
expected = "projects/{project}/locations/{location}/conversationModels/{conversation_model}".format(project=project, location=location, conversation_model=conversation_model, )
actual = ConversationProfilesClient.conversation_model_path(project, location, conversation_model)
assert expected == actual
def test_parse_conversation_model_path():
expected = {
"project": "nudibranch",
"location": "cuttlefish",
"conversation_model": "mussel",
}
path = ConversationProfilesClient.conversation_model_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_conversation_model_path(path)
assert expected == actual
def test_conversation_profile_path():
project = "winkle"
conversation_profile = "nautilus"
expected = "projects/{project}/conversationProfiles/{conversation_profile}".format(project=project, conversation_profile=conversation_profile, )
actual = ConversationProfilesClient.conversation_profile_path(project, conversation_profile)
assert expected == actual
def test_parse_conversation_profile_path():
expected = {
"project": "scallop",
"conversation_profile": "abalone",
}
path = ConversationProfilesClient.conversation_profile_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_conversation_profile_path(path)
assert expected == actual
def test_document_path():
project = "squid"
knowledge_base = "clam"
document = "whelk"
expected = "projects/{project}/knowledgeBases/{knowledge_base}/documents/{document}".format(project=project, knowledge_base=knowledge_base, document=document, )
actual = ConversationProfilesClient.document_path(project, knowledge_base, document)
assert expected == actual
def test_parse_document_path():
expected = {
"project": "octopus",
"knowledge_base": "oyster",
"document": "nudibranch",
}
path = ConversationProfilesClient.document_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_document_path(path)
assert expected == actual
def test_knowledge_base_path():
project = "cuttlefish"
knowledge_base = "mussel"
expected = "projects/{project}/knowledgeBases/{knowledge_base}".format(project=project, knowledge_base=knowledge_base, )
actual = ConversationProfilesClient.knowledge_base_path(project, knowledge_base)
assert expected == actual
def test_parse_knowledge_base_path():
expected = {
"project": "winkle",
"knowledge_base": "nautilus",
}
path = ConversationProfilesClient.knowledge_base_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_knowledge_base_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "scallop"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = ConversationProfilesClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "abalone",
}
path = ConversationProfilesClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "squid"
expected = "folders/{folder}".format(folder=folder, )
actual = ConversationProfilesClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "clam",
}
path = ConversationProfilesClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "whelk"
expected = "organizations/{organization}".format(organization=organization, )
actual = ConversationProfilesClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "octopus",
}
path = ConversationProfilesClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "oyster"
expected = "projects/{project}".format(project=project, )
actual = ConversationProfilesClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nudibranch",
}
path = ConversationProfilesClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "cuttlefish"
location = "mussel"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = ConversationProfilesClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "winkle",
"location": "nautilus",
}
path = ConversationProfilesClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.ConversationProfilesTransport, '_prep_wrapped_messages') as prep:
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.ConversationProfilesTransport, '_prep_wrapped_messages') as prep:
transport_class = ConversationProfilesClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
|
Founded in 1972 by Stephen Marks, French Connection, the edgy, attitudinal, high street British fashion brand set out to create well-designed and fashion-forward clothing with a quirky spin on design. With more than 1,500 outlets across 25 countries worldwide, French Connection is truly a global fashion brand. Brand Marketing India (BMI), the exclusive licensee for French Connection in India, introduced the brand to the Indian market at the Lakme Fashion Week in April 2007 and has since opened 26 points of sale across 8 cities. BMI has ensured that the brand in India emulates the same global strategy with respect to timely seasonal launches, competitive pricing and the highest standards of in-store service.
|
import math
from abc import ABC, abstractmethod
from datetime import datetime
from functools import lru_cache
from html import unescape
from os import path
from jinja2 import Environment, FileSystemLoader
from markupsafe import Markup
from notifications_utils import LETTER_MAX_PAGE_COUNT, SMS_CHAR_COUNT_LIMIT
from notifications_utils.columns import Columns
from notifications_utils.countries.data import Postage
from notifications_utils.field import Field, PlainTextField
from notifications_utils.formatters import (
MAGIC_SEQUENCE,
add_prefix,
add_trailing_newline,
autolink_sms,
escape_html,
formatted_list,
make_quotes_smart,
nl2br,
normalise_multiple_newlines,
normalise_whitespace,
normalise_whitespace_and_newlines,
notify_email_markdown,
notify_email_preheader_markdown,
notify_letter_preview_markdown,
notify_plain_text_email_markdown,
remove_smart_quotes_from_email_addresses,
remove_whitespace_before_punctuation,
replace_hyphens_with_en_dashes,
replace_hyphens_with_non_breaking_hyphens,
sms_encode,
strip_leading_whitespace,
strip_unsupported_characters,
unlink_govuk_escaped,
)
from notifications_utils.postal_address import (
PostalAddress,
address_lines_1_to_7_keys,
)
from notifications_utils.sanitise_text import SanitiseSMS
from notifications_utils.take import Take
from notifications_utils.template_change import TemplateChange
template_env = Environment(loader=FileSystemLoader(
path.join(
path.dirname(path.abspath(__file__)),
'jinja_templates',
)
))
class Template(ABC):
encoding = "utf-8"
def __init__(
self,
template,
values=None,
redact_missing_personalisation=False,
):
if not isinstance(template, dict):
raise TypeError('Template must be a dict')
if values is not None and not isinstance(values, dict):
raise TypeError('Values must be a dict')
if template.get('template_type') != self.template_type:
raise TypeError(
f'Cannot initialise {self.__class__.__name__} '
f'with {template.get("template_type")} template_type'
)
self.id = template.get("id", None)
self.name = template.get("name", None)
self.content = template["content"]
self.values = values
self._template = template
self.redact_missing_personalisation = redact_missing_personalisation
def __repr__(self):
return "{}(\"{}\", {})".format(self.__class__.__name__, self.content, self.values)
@abstractmethod
def __str__(self):
pass
@property
def content_with_placeholders_filled_in(self):
return str(Field(
self.content,
self.values,
html='passthrough',
redact_missing_personalisation=self.redact_missing_personalisation,
markdown_lists=True,
)).strip()
@property
def values(self):
if hasattr(self, '_values'):
return self._values
return {}
@values.setter
def values(self, value):
if not value:
self._values = {}
else:
placeholders = Columns.from_keys(self.placeholders)
self._values = Columns(value).as_dict_with_keys(
self.placeholders | set(
key for key in value.keys()
if Columns.make_key(key) not in placeholders.keys()
)
)
@property
def placeholders(self):
return get_placeholders(self.content)
@property
def missing_data(self):
return list(
placeholder for placeholder in self.placeholders
if self.values.get(placeholder) is None
)
@property
def additional_data(self):
return self.values.keys() - self.placeholders
def get_raw(self, key, default=None):
return self._template.get(key, default)
def compare_to(self, new):
return TemplateChange(self, new)
@property
def content_count(self):
return len(self.content_with_placeholders_filled_in)
def is_message_empty(self):
if not self.content:
return True
if not self.content.startswith('((') or not self.content.endswith('))'):
# If the content doesn’t start or end with a placeholder we
# can guarantee it’s not empty, no matter what
# personalisation has been provided.
return False
return self.content_count == 0
def is_message_too_long(self):
return False
class BaseSMSTemplate(Template):
template_type = 'sms'
def __init__(
self,
template,
values=None,
prefix=None,
show_prefix=True,
sender=None,
):
self.prefix = prefix
self.show_prefix = show_prefix
self.sender = sender
self._content_count = None
super().__init__(template, values)
@property
def values(self):
return super().values
@values.setter
def values(self, value):
# If we change the values of the template it’s possible the
# content count will have changed, so we need to reset the
# cached count.
if self._content_count is not None:
self._content_count = None
# Assigning to super().values doesn’t work here. We need to get
# the property object instead, which has the special method
# fset, which invokes the setter it as if we were
# assigning to it outside this class.
super(BaseSMSTemplate, type(self)).values.fset(self, value)
@property
def content_with_placeholders_filled_in(self):
# We always call SMSMessageTemplate.__str__ regardless of
# subclass, to avoid any HTML formatting. SMS templates differ
# in that the content can include the service name as a prefix.
# So historically we’ve returned the fully-formatted message,
# rather than some plain-text represenation of the content. To
# preserve compatibility for consumers of the API we maintain
# that behaviour by overriding this method here.
return SMSMessageTemplate.__str__(self)
@property
def prefix(self):
return self._prefix if self.show_prefix else None
@prefix.setter
def prefix(self, value):
self._prefix = value
@property
def content_count(self):
"""
Return the number of characters in the message. Note that we don't distinguish between GSM and non-GSM
characters at this point, as `get_sms_fragment_count` handles that separately.
Also note that if values aren't provided, will calculate the raw length of the unsubstituted placeholders,
as in the message `foo ((placeholder))` has a length of 19.
"""
if self._content_count is None:
self._content_count = len(self._get_unsanitised_content())
return self._content_count
@property
def content_count_without_prefix(self):
# subtract 2 extra characters to account for the colon and the space,
# added max zero in case the content is empty the __str__ methods strips the white space.
if self.prefix:
return max((self.content_count - len(self.prefix) - 2), 0)
else:
return self.content_count
@property
def fragment_count(self):
content_with_placeholders = str(self)
# Extended GSM characters count as 2 characters
character_count = self.content_count + count_extended_gsm_chars(content_with_placeholders)
return get_sms_fragment_count(character_count, non_gsm_characters(content_with_placeholders))
def is_message_too_long(self):
"""
Message is validated with out the prefix.
We have decided to be lenient and let the message go over the character limit. The SMS provider will
send messages well over our limit. There were some inconsistencies with how we were validating the
length of a message. This should be the method used anytime we want to reject a message for being too long.
"""
return self.content_count_without_prefix > SMS_CHAR_COUNT_LIMIT
def is_message_empty(self):
return self.content_count_without_prefix == 0
def _get_unsanitised_content(self):
# This is faster to call than SMSMessageTemplate.__str__ if all
# you need to know is how many characters are in the message
if self.values:
values = self.values
else:
values = {
key: MAGIC_SEQUENCE for key in self.placeholders
}
return Take(PlainTextField(
self.content, values, html='passthrough'
)).then(
add_prefix, self.prefix
).then(
remove_whitespace_before_punctuation
).then(
normalise_whitespace_and_newlines
).then(
normalise_multiple_newlines
).then(
str.strip
).then(
str.replace, MAGIC_SEQUENCE, ''
)
class SMSMessageTemplate(BaseSMSTemplate):
def __str__(self):
return sms_encode(self._get_unsanitised_content())
class SMSBodyPreviewTemplate(BaseSMSTemplate):
def __init__(
self,
template,
values=None,
):
super().__init__(template, values, show_prefix=False)
def __str__(self):
return Markup(Take(Field(
self.content,
self.values,
html='escape',
redact_missing_personalisation=True,
)).then(
sms_encode
).then(
remove_whitespace_before_punctuation
).then(
normalise_whitespace_and_newlines
).then(
normalise_multiple_newlines
).then(
str.strip
))
class SMSPreviewTemplate(BaseSMSTemplate):
jinja_template = template_env.get_template('sms_preview_template.jinja2')
def __init__(
self,
template,
values=None,
prefix=None,
show_prefix=True,
sender=None,
show_recipient=False,
show_sender=False,
downgrade_non_sms_characters=True,
redact_missing_personalisation=False,
):
self.show_recipient = show_recipient
self.show_sender = show_sender
self.downgrade_non_sms_characters = downgrade_non_sms_characters
super().__init__(template, values, prefix, show_prefix, sender)
self.redact_missing_personalisation = redact_missing_personalisation
def __str__(self):
return Markup(self.jinja_template.render({
'sender': self.sender,
'show_sender': self.show_sender,
'recipient': Field('((phone number))', self.values, with_brackets=False, html='escape'),
'show_recipient': self.show_recipient,
'body': Take(Field(
self.content,
self.values,
html='escape',
redact_missing_personalisation=self.redact_missing_personalisation,
)).then(
add_prefix, (escape_html(self.prefix) or None) if self.show_prefix else None
).then(
sms_encode if self.downgrade_non_sms_characters else str
).then(
remove_whitespace_before_punctuation
).then(
normalise_whitespace_and_newlines
).then(
normalise_multiple_newlines
).then(
nl2br
).then(
autolink_sms
)
}))
class BaseBroadcastTemplate(BaseSMSTemplate):
template_type = 'broadcast'
MAX_CONTENT_COUNT_GSM = 1_395
MAX_CONTENT_COUNT_UCS2 = 615
@property
def encoded_content_count(self):
if self.non_gsm_characters:
return self.content_count
return self.content_count + count_extended_gsm_chars(
self.content_with_placeholders_filled_in
)
@property
def non_gsm_characters(self):
return non_gsm_characters(self.content)
@property
def max_content_count(self):
if self.non_gsm_characters:
return self.MAX_CONTENT_COUNT_UCS2
return self.MAX_CONTENT_COUNT_GSM
@property
def content_too_long(self):
return self.encoded_content_count > self.max_content_count
class BroadcastPreviewTemplate(BaseBroadcastTemplate, SMSPreviewTemplate):
jinja_template = template_env.get_template('broadcast_preview_template.jinja2')
class BroadcastMessageTemplate(BaseBroadcastTemplate, SMSMessageTemplate):
@classmethod
def from_content(cls, content):
return cls(
template={
'template_type': cls.template_type,
'content': content,
},
values=None, # events have already done interpolation of any personalisation
)
@classmethod
def from_event(cls, broadcast_event):
"""
should be directly callable with the results of the BroadcastEvent.serialize() function from api/models.py
"""
return cls.from_content(
broadcast_event['transmitted_content']['body']
)
def __str__(self):
return Take(Field(
self.content.strip(),
self.values,
html='escape',
)).then(
sms_encode
).then(
remove_whitespace_before_punctuation
).then(
normalise_whitespace_and_newlines
).then(
normalise_multiple_newlines
)
class SubjectMixin():
def __init__(
self,
template,
values=None,
**kwargs
):
self._subject = template['subject']
super().__init__(template, values, **kwargs)
@property
def subject(self):
return Markup(Take(Field(
self._subject,
self.values,
html='escape',
redact_missing_personalisation=self.redact_missing_personalisation,
)).then(
do_nice_typography
).then(
normalise_whitespace
))
@property
def placeholders(self):
return get_placeholders(self._subject) | super().placeholders
class BaseEmailTemplate(SubjectMixin, Template):
template_type = 'email'
@property
def html_body(self):
return Take(Field(
self.content,
self.values,
html='escape',
markdown_lists=True,
redact_missing_personalisation=self.redact_missing_personalisation,
)).then(
unlink_govuk_escaped
).then(
strip_unsupported_characters
).then(
add_trailing_newline
).then(
notify_email_markdown
).then(
do_nice_typography
)
@property
def content_size_in_bytes(self):
return len(self.content_with_placeholders_filled_in.encode("utf8"))
def is_message_too_long(self):
"""
SES rejects email messages bigger than 10485760 bytes (just over 10 MB per message (after base64 encoding)):
https://docs.aws.amazon.com/ses/latest/DeveloperGuide/quotas.html#limits-message
Base64 is apparently wasteful because we use just 64 different values per byte, whereas a byte can represent
256 different characters. That is, we use bytes (which are 8-bit words) as 6-bit words. There is
a waste of 2 bits for each 8 bits of transmission data. To send three bytes of information
(3 times 8 is 24 bits), you need to use four bytes (4 times 6 is again 24 bits). Thus the base64 version
of a file is 4/3 larger than it might be. So we use 33% more storage than we could.
https://lemire.me/blog/2019/01/30/what-is-the-space-overhead-of-base64-encoding/
That brings down our max safe size to 7.5 MB == 7500000 bytes before base64 encoding
But this is not the end! The message we send to SES is structured as follows:
"Message": {
'Subject': {
'Data': subject,
},
'Body': {'Text': {'Data': body}, 'Html': {'Data': html_body}}
},
Which means that we are sending the contents of email message twice in one request: once in plain text
and once with html tags. That means our plain text content needs to be much shorter to make sure we
fit within the limit, especially since HTML body can be much byte-heavier than plain text body.
Hence, we decided to put the limit at 1MB, which is equivalent of between 250 and 500 pages of text.
That's still an extremely long email, and should be sufficient for all normal use, while at the same
time giving us safe margin while sending the emails through Amazon SES.
EDIT: putting size up to 2MB as GOV.UK email digests are hitting the limit.
"""
return self.content_size_in_bytes > 2000000
class PlainTextEmailTemplate(BaseEmailTemplate):
def __str__(self):
return Take(Field(
self.content, self.values, html='passthrough', markdown_lists=True
)).then(
unlink_govuk_escaped
).then(
strip_unsupported_characters
).then(
add_trailing_newline
).then(
notify_plain_text_email_markdown
).then(
do_nice_typography
).then(
unescape
).then(
strip_leading_whitespace
).then(
add_trailing_newline
)
@property
def subject(self):
return Markup(Take(Field(
self._subject,
self.values,
html='passthrough',
redact_missing_personalisation=self.redact_missing_personalisation
)).then(
do_nice_typography
).then(
normalise_whitespace
))
class HTMLEmailTemplate(BaseEmailTemplate):
jinja_template = template_env.get_template('email_template.jinja2')
PREHEADER_LENGTH_IN_CHARACTERS = 256
def __init__(
self,
template,
values=None,
govuk_banner=True,
complete_html=True,
brand_logo=None,
brand_text=None,
brand_colour=None,
brand_banner=False,
brand_name=None
):
super().__init__(template, values)
self.govuk_banner = govuk_banner
self.complete_html = complete_html
self.brand_logo = brand_logo
self.brand_text = brand_text
self.brand_colour = brand_colour
self.brand_banner = brand_banner
self.brand_name = brand_name
@property
def preheader(self):
return " ".join(Take(Field(
self.content,
self.values,
html='escape',
markdown_lists=True,
)).then(
unlink_govuk_escaped
).then(
strip_unsupported_characters
).then(
add_trailing_newline
).then(
notify_email_preheader_markdown
).then(
do_nice_typography
).split())[:self.PREHEADER_LENGTH_IN_CHARACTERS].strip()
def __str__(self):
return self.jinja_template.render({
'subject': self.subject,
'body': self.html_body,
'preheader': self.preheader,
'govuk_banner': self.govuk_banner,
'complete_html': self.complete_html,
'brand_logo': self.brand_logo,
'brand_text': self.brand_text,
'brand_colour': self.brand_colour,
'brand_banner': self.brand_banner,
'brand_name': self.brand_name
})
class EmailPreviewTemplate(BaseEmailTemplate):
jinja_template = template_env.get_template('email_preview_template.jinja2')
def __init__(
self,
template,
values=None,
from_name=None,
from_address=None,
reply_to=None,
show_recipient=True,
redact_missing_personalisation=False,
):
super().__init__(template, values, redact_missing_personalisation=redact_missing_personalisation)
self.from_name = from_name
self.from_address = from_address
self.reply_to = reply_to
self.show_recipient = show_recipient
def __str__(self):
return Markup(self.jinja_template.render({
'body': self.html_body,
'subject': self.subject,
'from_name': escape_html(self.from_name),
'from_address': self.from_address,
'reply_to': self.reply_to,
'recipient': Field("((email address))", self.values, with_brackets=False),
'show_recipient': self.show_recipient
}))
@property
def subject(self):
return Take(Field(
self._subject,
self.values,
html='escape',
redact_missing_personalisation=self.redact_missing_personalisation
)).then(
do_nice_typography
).then(
normalise_whitespace
)
class BaseLetterTemplate(SubjectMixin, Template):
template_type = 'letter'
address_block = '\n'.join(
f'(({line.replace("_", " ")}))' for line in address_lines_1_to_7_keys
)
def __init__(
self,
template,
values=None,
contact_block=None,
admin_base_url='http://localhost:6012',
logo_file_name=None,
redact_missing_personalisation=False,
date=None,
):
self.contact_block = (contact_block or '').strip()
super().__init__(template, values, redact_missing_personalisation=redact_missing_personalisation)
self.admin_base_url = admin_base_url
self.logo_file_name = logo_file_name
self.date = date or datetime.utcnow()
@property
def subject(self):
return Take(Field(
self._subject,
self.values,
redact_missing_personalisation=self.redact_missing_personalisation,
html='escape',
)).then(
do_nice_typography
).then(
normalise_whitespace
)
@property
def placeholders(self):
return get_placeholders(self.contact_block) | super().placeholders
@property
def postal_address(self):
return PostalAddress.from_personalisation(Columns(self.values))
@property
def _address_block(self):
if self.postal_address.has_enough_lines and not self.postal_address.has_too_many_lines:
return self.postal_address.normalised_lines
if 'address line 7' not in self.values and 'postcode' in self.values:
self.values['address line 7'] = self.values['postcode']
return Field(
self.address_block,
self.values,
html='escape',
with_brackets=False,
).splitlines()
@property
def _contact_block(self):
return Take(Field(
'\n'.join(
line.strip()
for line in self.contact_block.split('\n')
),
self.values,
redact_missing_personalisation=self.redact_missing_personalisation,
html='escape',
)).then(
remove_whitespace_before_punctuation
).then(
nl2br
)
@property
def _date(self):
return self.date.strftime('%-d %B %Y')
@property
def _message(self):
return Take(Field(
self.content,
self.values,
html='escape',
markdown_lists=True,
redact_missing_personalisation=self.redact_missing_personalisation,
)).then(
add_trailing_newline
).then(
notify_letter_preview_markdown
).then(
do_nice_typography
).then(
replace_hyphens_with_non_breaking_hyphens
)
class LetterPreviewTemplate(BaseLetterTemplate):
jinja_template = template_env.get_template('letter_pdf/preview.jinja2')
def __str__(self):
return Markup(self.jinja_template.render({
'admin_base_url': self.admin_base_url,
'logo_file_name': self.logo_file_name,
# logo_class should only ever be None, svg or png
'logo_class': self.logo_file_name.lower()[-3:] if self.logo_file_name else None,
'subject': self.subject,
'message': self._message,
'address': self._address_block,
'contact_block': self._contact_block,
'date': self._date,
}))
class LetterPrintTemplate(LetterPreviewTemplate):
jinja_template = template_env.get_template('letter_pdf/print.jinja2')
class LetterImageTemplate(BaseLetterTemplate):
jinja_template = template_env.get_template('letter_image_template.jinja2')
first_page_number = 1
allowed_postage_types = (
Postage.FIRST,
Postage.SECOND,
Postage.EUROPE,
Postage.REST_OF_WORLD,
)
def __init__(
self,
template,
values=None,
image_url=None,
page_count=None,
contact_block=None,
postage=None,
):
super().__init__(template, values, contact_block=contact_block)
if not image_url:
raise TypeError('image_url is required')
if not page_count:
raise TypeError('page_count is required')
if postage not in [None] + list(self.allowed_postage_types):
raise TypeError('postage must be None, {}'.format(formatted_list(
self.allowed_postage_types,
conjunction='or',
before_each='\'',
after_each='\'',
)))
self.image_url = image_url
self.page_count = int(page_count)
self._postage = postage
@property
def postage(self):
if self.postal_address.international:
return self.postal_address.postage
return self._postage
@property
def last_page_number(self):
return min(self.page_count, LETTER_MAX_PAGE_COUNT) + self.first_page_number
@property
def page_numbers(self):
return list(range(self.first_page_number, self.last_page_number))
@property
def postage_description(self):
return {
Postage.FIRST: 'first class',
Postage.SECOND: 'second class',
Postage.EUROPE: 'international',
Postage.REST_OF_WORLD: 'international',
}.get(self.postage)
@property
def postage_class_value(self):
return {
Postage.FIRST: 'letter-postage-first',
Postage.SECOND: 'letter-postage-second',
Postage.EUROPE: 'letter-postage-international',
Postage.REST_OF_WORLD: 'letter-postage-international',
}.get(self.postage)
def __str__(self):
return Markup(self.jinja_template.render({
'image_url': self.image_url,
'page_numbers': self.page_numbers,
'address': self._address_block,
'contact_block': self._contact_block,
'date': self._date,
'subject': self.subject,
'message': self._message,
'show_postage': bool(self.postage),
'postage_description': self.postage_description,
'postage_class_value': self.postage_class_value,
}))
def get_sms_fragment_count(character_count, non_gsm_characters):
if non_gsm_characters:
return 1 if character_count <= 70 else math.ceil(float(character_count) / 67)
else:
return 1 if character_count <= 160 else math.ceil(float(character_count) / 153)
def non_gsm_characters(content):
"""
Returns a set of all the non gsm characters in a text. this doesn't include characters that we will downgrade (eg
emoji, ellipsis, ñ, etc). This only includes welsh non gsm characters that will force the entire SMS to be encoded
with UCS-2.
"""
return set(content) & set(SanitiseSMS.WELSH_NON_GSM_CHARACTERS)
def count_extended_gsm_chars(content):
return sum(
map(content.count, SanitiseSMS.EXTENDED_GSM_CHARACTERS)
)
def do_nice_typography(value):
return Take(
value
).then(
remove_whitespace_before_punctuation
).then(
make_quotes_smart
).then(
remove_smart_quotes_from_email_addresses
).then(
replace_hyphens_with_en_dashes
)
@lru_cache(maxsize=1024)
def get_placeholders(content):
return Field(content).placeholders
|
- Voluntary work - no pay.
- Webtoon Cleaning - Other software can be used as long as the clean looks good.
I don't have a video card, but is it compulsory to to get one in order to clean. I'm proficient at Adobe CS5 and graphic designing, so is this ok?
I really want to help you but i dont have a lot of time. Are there things to do, which are not really urgent? I mean I could clean one or two pages a week, but I guess thats just too slow.
I'm a serious comic fan and would love to help get the scans out faster. I'm an artist, pretty good with photoshop. PC specs: Core i5 2500k 4.5 GHZ, Radeon HD4850 graphics card, photoshop cs5.
See that big button up there that says "Recruitment Form"?
Click on it and fill it out if you're serious about it.
Hi there! I just sent a cleaner application for Half Prince, hope I can pass the test!
It means you can not support fast boot, and other special windows 8 things. Disable fast-boot and it should boot fine.
|
import re
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import PyQt4.QtCore as QtCore
import ui_adhesionflexdlg
import sys
import string
MAC = "qt_mac_set_native_menubar" in dir()
class AdhesionFlexDlg(QDialog,ui_adhesionflexdlg.Ui_AdhesionFlexDlg):
#signals
# gotolineSignal = QtCore.pyqtSignal( ('int',))
def __init__(self,_currentEditor=None,parent=None):
super(AdhesionFlexDlg, self).__init__(parent)
self.editorWindow=parent
self.setupUi(self)
if not MAC:
self.cancelPB.setFocusPolicy(Qt.NoFocus)
self.updateUi()
def keyPressEvent(self, event):
molecule=str(self.afMoleculeLE.text())
molecule=string.rstrip(molecule)
if event.key()==Qt.Key_Return :
if molecule!="":
self.on_afMoleculeAddPB_clicked()
event.accept()
@pyqtSignature("") # signature of the signal emited by the button
def on_afMoleculeAddPB_clicked(self):
molecule=str(self.afMoleculeLE.text())
molecule=string.rstrip(molecule)
rows=self.afTable.rowCount()
if molecule =="":
return
# check if molecule with this name already exist
moleculeAlreadyExists=False
for rowId in range(rows):
name=str(self.afTable.item(rowId,0).text())
name=string.rstrip(name)
if name==molecule:
moleculeAlreadyExists=True
break
if moleculeAlreadyExists:
QMessageBox.warning(self,"Molecule Name Already Exists","Molecule name already exist. Please choose different name",QMessageBox.Ok)
return
self.afTable.insertRow(rows)
moleculeItem=QTableWidgetItem(molecule)
self.afTable.setItem (rows,0, moleculeItem)
# reset molecule entry line
self.afMoleculeLE.setText("")
return
@pyqtSignature("") # signature of the signal emited by the button
def on_clearAFTablePB_clicked(self):
rows=self.afTable.rowCount()
for i in range (rows-1,-1,-1):
self.afTable.removeRow(i)
def extractInformation(self):
adhDict={}
for row in range(self.afTable.rowCount()):
molecule=str(self.afTable.item(row,0).text())
adhDict[row]=molecule
return adhDict,str(self.bindingFormulaLE.text())
def updateUi(self):
self.afTable.horizontalHeader().setStretchLastSection(True)
|
Just a basic analysis if your motor is rated for 30 kW then the rated HP is 40 and actual load on the motor will be 29.5 HP. If the source system is considered stiff then supply frequency doesn't change. Torque varies with the motor terminal voltage squared so the torque being at 75% of motor rating means voltage can be decreased to 87% to have run adequately (assuming slip stays constant).
Reality is that the supply voltage (again system is stiff) will not drop so to compensate slip must drop by 25%. So if the slip is at 5% then the motor being loaded at 75% means the slip drops to 1.25% which ultimately means rotor speed increases near synchronous speed.
So what does that mean to the system in terms of voltage, current, and power factor? Voltage will remain relatively unaffected if the system is stiff or tightly coupled, load current of the motor will drop but overall current will increase because requirement for core magnetization and leakage flux of both the stator and rotor and lack of "resistance" due to mechanical load to restrict current flow. This will result in a slightly lower power factor.
This is all theoretical and factual values can be proven with field trial test. If the motor has no load connected to it the rotor circuit is just purely a short circuit and the current flowing will be traveling over the leakage reactances hence VAr consumption increases and reducing current.
Overall current would increase because less mechanical load to act as a "resistance" to limit current flow. So current goes up but that increased has to flow through the leakage reactance of both stator and rotor therefore increasing VAr consumption reducing both voltage and power factor.
Ugh finally in front of a computer. But for real practicality purposes that this isn't an issue for the jet engine but your power factor will suffer slightly and current may increase slightly as well as the windage losses.
To understand this look up the classical induction motor electrical representation, observe the equations, look up the Torque/speed curve, and play with the variables varying them to see what happens as one variable changes as others are held constant.
|
"""
Functions used for ROI manipulation.
Authors:
- Sander W Keemink <swkeemink@scimail.eu>
"""
from __future__ import division
import numpy as np
from past.builtins import basestring
from skimage.measure import find_contours
try:
from collections import abc
except ImportError:
import collections as abc
from .polygons import poly2mask
from .readimagejrois import read_imagej_roi_zip
def get_mask_com(mask):
"""
Get the center of mass for a boolean mask.
Parameters
----------
mask : :term:`array_like`
A two-dimensional boolean-mask.
Returns
-------
x : float
Center of mass along first dimension.
y : float
Center of mass along second dimension.
"""
# Ensure array_like input is a numpy.ndarray
mask = np.asarray(mask)
if mask.ndim != 2:
raise ValueError(
"Mask must be two-dimensional. Received input with {} dimensions"
"".format(mask.ndim)
)
# TODO: make this work for non-boolean masks too
x, y = mask.nonzero()
return np.mean(x), np.mean(y)
def split_npil(mask, centre, num_slices, adaptive_num=False):
"""
Split a mask into approximately equal slices by area around its center.
Parameters
----------
mask : :term:`array_like`
Mask as a 2d boolean array.
centre : tuple
The center co-ordinates around which the mask will be split.
num_slices : int
The number of slices into which the mask will be divided.
adaptive_num : bool, optional
If ``True``, the `num_slices` input is treated as the number of
slices to use if the ROI is surrounded by valid pixels, and
automatically reduces the number of slices if it is on the
boundary of the sampled region.
Returns
-------
masks : list
A list with `num_slices` many masks, each of which is a 2d
boolean numpy array.
"""
# TODO: This should yield an iterable instead.
# Ensure array_like input is a numpy.ndarray
mask = np.asarray(mask)
# Get the (x,y) co-ordinates of the pixels in the mask
x, y = mask.nonzero()
if x.size == 0 or y.size == 0:
raise ValueError("ROI mask must be not be empty")
# Find the angle of the vector from the mask centre to each pixel
theta = np.arctan2(x - centre[0], y - centre[1])
# Find where the mask comes closest to the centre. We will put a
# slice boundary here, to prevent one slice being non-contiguous
# for masks near the image boundary.
# TODO: give it the bins to use
n_bins = 20
n_bins = min(n_bins, len(mask))
bins = np.linspace(-np.pi, np.pi, n_bins + 1)
bin_counts, bins = np.histogram(theta, bins=bins)
bin_min_index = np.argmin(bin_counts)
if adaptive_num:
# Change the number of slices we will used based on the
# proportion of these bins which are empty
num_slices = round(num_slices * sum(bin_counts > 0) / n_bins)
num_slices = max(1, num_slices)
# Ensure num_slices is an integer number
num_slices = int(num_slices)
if num_slices < 1:
raise ValueError("Number of slices must be positive")
# Change theta so it is the angle relative to a new zero-point,
# the middle of the bin which is least populated by mask pixels.
theta_offset = bins[bin_min_index] + np.pi / n_bins
theta = (theta - theta_offset) % (2 * np.pi) - np.pi
# get the boundaries
bounds = [
np.percentile(theta, 100.0 * (i + 1) / num_slices) for i in range(num_slices)
]
# predefine the masks
masks = []
# get the first mask
# empty predefinition
mask = np.zeros(np.shape(mask), dtype=bool)
# set relevant pixels to True
mask[x[theta <= bounds[0]], y[theta <= bounds[0]]] = True
masks.append(mask)
# get the rest of the masks
for i in range(1, num_slices):
# find which pixels are within bounds
truths = (theta > bounds[i - 1]) * (theta <= bounds[i])
# empty predefinition
mask = np.zeros(np.shape(mask), dtype=bool)
# set relevant pixels to True
mask[x[truths], y[truths]] = True
masks.append(mask)
return masks
def shift_2d_array(a, shift=1, axis=0):
"""
Shift array values, without wrap around.
Parameters
----------
a : :term:`array_like`
Input array.
shift : int, optional
How much to shift array by. Default is ``1``.
axis : int, optional
The axis along which elements are shifted. Default is ``0``.
Returns
-------
out : numpy.ndarray
Array with the same shape as `a`, but shifted appropriately.
"""
# Ensure array_like input is a numpy.ndarray
a = np.asarray(a)
# do initial shift
out = np.roll(a, shift, axis)
# then fill in refilled parts of the array
if axis == 0:
if shift > 0:
out[:shift] = 0
elif shift < 0:
out[shift:] = 0
elif axis == 1:
if shift > 0:
out[:, :shift] = 0
elif shift < 0:
out[:, shift:] = 0
else:
raise ValueError("Axis must be 0 or 1, but {} was given.".format(axis))
# return shifted array
return out
def get_npil_mask(mask, totalexpansion=4):
"""
Given the masks for a ROI, find the surrounding neuropil.
Our implementation is as follows:
- On even iterations (where indexing begins at zero), expand
the mask in each of the 4 cardinal directions.
- On odd numbered iterations, expand the mask in each of the 4
diagonal directions.
This procedure generates a neuropil whose shape is similar to the
shape of the input ROI mask.
Parameters
----------
mask : :term:`array_like`
The reference ROI mask to expand the neuropil from. The array
should contain only boolean values.
totalexpansion : float, optional
How much larger to make the neuropil total area than mask area.
Default is ``4``.
Returns
-------
grown_mask : numpy.ndarray
A boolean numpy.ndarray mask, where the region surrounding
the input is now ``True`` and the region of the input mask is
``False``.
Note
----
For fixed number of `iterations`, squarer input masks will have
larger output neuropil masks.
"""
# Ensure array_like input is a numpy.ndarray
mask = np.asarray(mask)
# Make a copy of original mask which will be grown
grown_mask = np.copy(mask)
area_orig = grown_mask.sum() # original area
area_current = 0 # current size
shpe = np.shape(mask)
area_total = shpe[0] * shpe[1]
count = 0
# for count in range(iterations):
while (
area_current < totalexpansion * area_orig
and area_current < area_total - area_orig
):
# Check which case to use. In current version, we alternate
# between case 0 (cardinals) and case 1 (diagonals).
case = count % 2
# Make a copy of the mask without any new additions. We will
# need to keep using this mask to mark new changes, so we
# don't use a partially updated version.
refmask = np.copy(grown_mask)
if False: # case == 2: # Not currently used
# Move polygon around one pixel in each 8 directions
# N, NE, E, SE, S, SW, W, NW, (the centre is also redone)
for dx in [-1, 0, 1]:
for dy in [-1, 0, 1]:
movedmask = shift_2d_array(refmask, dx, 0)
movedmask = shift_2d_array(movedmask, dy, 1)
grown_mask[movedmask] = True
elif case == 0:
# Move polygon around one pixel in each of the 4 cardinal
# directions: N, E, S, W.
for dx in [-1, 1]:
grown_mask[shift_2d_array(refmask, dx, 0)] = True
for dy in [-1, 1]:
grown_mask[shift_2d_array(refmask, dy, 1)] = True
elif case == 1:
# Move polygon around one pixel in each of the 4 diagonal
# directions: NE, SE, SW, NW
for dx in [-1, 1]:
for dy in [-1, 1]:
movedmask = shift_2d_array(refmask, dx, 0)
movedmask = shift_2d_array(movedmask, dy, 1)
grown_mask[movedmask] = True
# update area
area_current = grown_mask.sum() - area_orig
# iterate counter
count += 1
# Remove original mask from the neuropil mask
grown_mask[mask] = False
# Return the finished neuropil mask
return grown_mask
def getmasks_npil(cellMask, nNpil=4, expansion=1):
"""
Generate neuropil masks using :func:`get_npil_mask` function.
Parameters
----------
cellMask : :term:`array_like`
The cell mask (boolean 2d arrays).
nNpil : int, optional
Number of neuropil subregions. Default is ``4``.
expansion : float, optional
Area of each neuropil region, relative to the area of `cellMask`.
Default is ``1``.
Returns
-------
masks_split : list
Returns a list with soma and neuropil masks (boolean 2d arrays).
"""
# Ensure array_like input is a numpy.ndarray
cellMask = np.asarray(cellMask)
# get the total neuropil for this cell
mask = get_npil_mask(cellMask, totalexpansion=expansion * nNpil)
# get the center of mass for the cell
centre = get_mask_com(cellMask)
# split it up in nNpil neuropils
masks_split = split_npil(mask, centre, nNpil)
return masks_split
def readrois(roiset):
"""
Read ImageJ rois from a roiset zipfile.
We ensure that the third dimension (i.e. frame number) is always zero.
Parameters
----------
roiset : str
Path to a roiset zipfile.
Returns
-------
rois : list
The ROIs (regions of interest) from within roiset, as polygons
describing the outline of each ROI.
"""
# read rois
rois = read_imagej_roi_zip(roiset)
# set frame number to 0 for every roi
for i in range(len(rois)):
if "polygons" in rois[i]:
rois[i] = rois[i]["polygons"][:, :2]
# check if we are looking at an oval roi
elif "mask" in rois[i]:
# this is an oval roi, which gets imported as a 3D mask.
# First get the frame that has the mask in it by finding the
# nonzero frame
mask_frame = np.nonzero(rois[i]["mask"])[0][0]
# get the mask
mask = rois[i]["mask"][mask_frame, :, :]
# finally, get the outline coordinates
rois[i] = find_roi_edge(mask)[0]
else:
raise ValueError(
"ROI #{} contains neither a polygon nor mask representation"
" of the region of interest."
"".format(i)
)
return rois
def getmasks(rois, shpe):
"""
Get the masks for the specified rois.
Parameters
----------
rois : :term:`array_like`
List of roi coordinates. Each roi coordinate should be a 2d-array
or equivalent list. i.e.:
``roi = [[0, 0], [0, 1], [1, 1], [1, 0]]``
or
``roi = np.array([[0, 0], [0, 1], [1, 1], [1, 0]])``
i.e. a n by 2 array, where n is the number of coordinates.
If a 2 by n array is given, this will be transposed.
shpe : :term:`array_like`
Shape of underlying image ``(width, height)``.
Returns
-------
masks : :term:`list` of :class:`numpy.ndarray`
List of masks for each ROI in `rois`.
"""
# get number of rois
nrois = len(rois)
# start empty mask list
masks = []
for i in range(nrois):
# transpose if array of 2 by n
if np.asarray(rois[i]).shape[0] == 2:
rois[i] = np.asarray(rois[i]).T
# transform current roi to mask
mask = poly2mask(rois[i], shpe)
# store in list
masks.append(np.array(mask[0].todense()))
return masks
def find_roi_edge(mask):
"""
Find the outline of a mask.
Uses :func:`skimage.measure.find_contours`.
Parameters
----------
mask : :term:`array_like`
The mask, as a binary array.
Returns
-------
outline : :term:`list` of (n,2)-:class:`~numpy.ndarray`
Array with coordinates of pixels in the outline of the mask.
See Also
--------
skimage.measure.find_contours
"""
# Ensure array_like input is a numpy.ndarray
mask = np.asarray(mask)
# Pad with 0s to make sure that edge ROIs are properly estimated
mask_shape = np.shape(mask)
padded_shape = (mask_shape[0] + 2, mask_shape[1] + 2)
padded_mask = np.zeros(padded_shape)
padded_mask[1:-1, 1:-1] = mask
# detect contours
outline = find_contours(padded_mask, level=0.5)
# update coordinates to take into account padding and set so that the
# coordinates are defined from the corners (as in the mask2poly function
# in SIMA https://github.com/losonczylab/sima/blob/master/sima/ROI.py)
for i in range(len(outline)):
outline[i] -= 0.5
return outline
def rois2masks(rois, shape):
"""
Convert ROIs into a list of binary masks.
Parameters
----------
rois : str or list of array_like
Either a string containing a path to an ImageJ roi zip file,
or a list of arrays encoding polygons, or list of binary arrays
representing masks.
shape : array_like
Image shape as a length 2 vector.
Returns
-------
masks : list of numpy.ndarray
List of binary arrays.
"""
# If it's a string, parse the string
if isinstance(rois, basestring):
rois = readrois(rois)
if not isinstance(rois, abc.Sequence):
raise TypeError(
"Wrong ROIs input format: expected a list or sequence, but got"
" a {}".format(rois.__class__)
)
# If it's a something by 2 array (or vice versa), assume polygons
if np.shape(rois[0])[1] == 2 or np.shape(rois[0])[0] == 2:
return getmasks(rois, shape)
# If it's a list of bigger arrays, assume masks
elif np.shape(rois[0]) == shape:
return rois
raise ValueError("Wrong ROIs input format: unfamiliar shape.")
|
Unitarian Universalism is a religion without spiritual walls, a community that puts love into action, and a movement to create a better world for all.
Unitarian Universalists are diverse in our beliefs, backgrounds, and identities—and we are united in living a ministry that seeks justice. We value all people as blessings. We act in solidarity with those who are excluded and victims of violence and oppression. Through the UUA’s Standing on the Side of Love campaign, we amplify the voices of the oppressed through online media; we show up in partnership at justice events across the country; we lobby national leaders on immigration reform, equality for lesbian, gay, bisexual, transgender, and queer people, religious freedom, and more; and above all we work to make love real in the world.
Purchase Standing on the Side of Love gear at the UUA Bookstore.
|
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from common import public
class qylogo():
"""中标"""
need_check_ziduan = [
u'key',
u'_id',
u'data_source',
u'bbd_version',
u'bbd_url',
u'rawdata',
u'bbd_uptime',
u'company_full_name',
u'source',
u'company_short',
u'uuid',
u'retain1',
u'retain2',
u'company_logo',
u'bbd_dotime'
]
def check_key(self, indexstr, ustr):
"""key 清洗验证"""
ret = None
return ret
def check__id(self, indexstr, ustr):
"""_id 清洗验证"""
ret = None
return ret
def check_data_source(self, indexstr, ustr):
"""datasource 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
if ustr not in (u'猎聘', u'拉勾'):
ret = u'不是指定字段'
else:
ret = u'为空'
return ret
def check_bbd_version(self, indexstr, ustr):
"""version 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
if not public.all_num(ustr):
ret = u'不是全数字'
return ret
def check_bbd_url(self, indexstr, ustr):
"""url 清洗验证"""
ret = None
return ret
def check_rawdata(self, indexstr, ustr):
"""rawdata 清洗验证"""
ret = None
return ret
def check_bbd_uptime(self, indexstr, ustr):
"""uptime 清洗验证"""
ret = None
return ret
def check_company_full_name(self, indexstr, ustr):
"""企业全称 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
ret = None
# if not public.has_count_hz(ustr, 2):
# ret = u'没有两个以上汉字'
else:
ret = u'为空'
return ret
def check_source(self, indexstr, ustr):
"""Source 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
if ustr not in (u'猎聘', u'拉勾'):
ret = u'不是指定字段'
else:
ret = u'为空'
return ret
def check_company_short(self, indexstr, ustr):
"""企业简称 清洗验证"""
ret = None
return ret
def check_uuid(self, indexstr, ustr):
"""uuid 清洗验证"""
ret = None
return ret
def check_retain1(self, indexstr, ustr):
"""retain1 清洗验证"""
ret = None
return ret
def check_retain2(self, indexstr, ustr):
"""retain2 清洗验证"""
ret = None
return ret
def check_company_logo(self, indexstr, ustr):
"""企业logo 清洗验证"""
ret = None
return ret
def check_bbd_dotime(self, indexstr, ustr):
"""do_time 清洗验证"""
ret = None
if ustr and len(ustr):
if not public.bbd_dotime_date_format(ustr):
ret = u"不合法日期"
return ret
|
Once you have determined a need for an interpreter and you've actually gone ahead and appointed a qualified interpreter, then it's your responsibility as a judge to actively supervise the interpretation. What you're going to have to do basically is you're going to have to listen with both your ears and your eyes. So that, for example, if a defendant has the benefit of an interpreter, and if a witness is testifying, but you notice because you're observing that the interpreter is actually not simultaneously interpreting, the interpreter is not interpreting what the question is, or even what the answer is, then you have an obligation as a judge to stop the proceedings and inquire why it is that the interpreter is actually not interpreting each question and each answer given by the witness.
So that's what I mean that you have to listen with your eyes as well. You might have another situation where you have a witness who is testifying, who does not speak English and has an interpreter. But let's say the witness is describing injuries that they suffered and they talk about having headaches, they talk about neck pain, they talk about stomach pain, and they indicate physically where else they are experiencing pain, but the interpreter says, the witness says that he only experiences headaches, then you know based on your observations, because the individual is touching other parts of their anatomy, that there's probably a question whether or not the interpreter is actually accurately interpreting.
So look for silence when simultaneous interpretation is required. Make sure that the response that the interpreter is giving is consistent with some of the physical motions of the witness or the litigants.
|
#Collection of operations on the MTF config
#2015, K Schweiger
import os
import sys
import sharedfunctions
#returns a list. With following entrys:
#0: A list with specs that should be printed
def configreader(workdir):
lines = sharedfunctions.readFile(workdir, 'MTF.cfg')
config = {}
for line in lines:
#ignore Lines beginning with # or nothing in it
#Define keys and seperators for config elements
configkeys = {"printspec=" : [",", "str"],
"showspec=" : [",", "str"],
"numtoprint=" : [None, "int"],
"maxnamelen=" : [None, "int"],
"openedacc=" : [None, "str"],
"termwidth=" : [None, "int"],
"termheight=" : [None, "int"],
"nGenre=" : [None, "int"],
"genrePriority=" : [",", "str"],
"invisibleGenre=": [",", "str"]}
if len(line) > 0 and line[0] != "#":
for key in configkeys:
if line.startswith(key):
if configkeys[key][0] is not None:
config.update({key : line[len(key)::].split(configkeys[key][0])})
else:
if configkeys[key][1] is "str":
config.update({key : str(line[len(key)::])})
elif configkeys[key][1] is "int":
config.update({key : int(line[len(key)::])})
return config
def getconfigpart(workdir, cfg):
config = configreader(workdir)
if cfg == "SpecsToPrint":
return config["printspec="]
elif cfg == "SpecsToShow":
return config["showspec="]
elif cfg == "NumToPrint":
return config["numtoprint="]
elif cfg == "MaxNameLen":
return config["maxnamelen="]
elif cfg == "DateAcc":
return config["openedacc="]
elif cfg == "GenrePriority":
return config["genrePriority="]
elif cfg == "NumberofGenres":
return config["nGenre="]
elif cfg == "TerminalWidth":
return config["termwidth="]
elif cfg == "TerminalHeight":
return config["termheight="]
elif cfg == "InvisibleGenres":
return config["invisibleGenre="]
|
Introduction. Our company collects information in different ways from Visitors and Members who access the various parts of our Services and the network of Web sites accessible through our Service. We use this information primarily to provide a customized experience as you use our Properties and Services, and generally, do not share this information with third parties. However, we may disclose personal information collected if we have received your permission beforehand or in very special circumstances, such as when we believe that such disclosure is required by law or other special cases described below.
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Paul Nilsson, paul.nilsson@cern.ch, 2019
import os
# from pilot.util.container import execute
from pilot.common.errorcodes import ErrorCodes
import logging
logger = logging.getLogger(__name__)
errors = ErrorCodes()
def verify_setup_command(cmd):
"""
Verify the setup command.
:param cmd: command string to be verified (string).
:return: pilot error code (int), diagnostics (string).
"""
ec = 0
diagnostics = ""
return ec, diagnostics
def get_setup_command(job, prepareasetup):
"""
Return the path to asetup command, the asetup command itself and add the options (if desired).
If prepareasetup is False, the function will only return the path to the asetup script. It is then assumed
to be part of the job parameters.
Handle the case where environmental variables are set -
HARVESTER_CONTAINER_RELEASE_SETUP_FILE, HARVESTER_LD_LIBRARY_PATH, HARVESTER_PYTHONPATH
This will create the string need for the pilot to execute to setup the environment.
:param job: job object.
:param prepareasetup: not used.
:return: setup command (string).
"""
cmd = ""
# return immediately if there is no release or if user containers are used
if job.swrelease == 'NULL' or '--containerImage' in job.jobparams:
logger.debug('get_setup_command return value: {}'.format(str(cmd)))
return cmd
# test if environmental variable HARVESTER_CONTAINER_RELEASE_SETUP_FILE is defined
setupfile = os.environ.get('HARVESTER_CONTAINER_RELEASE_SETUP_FILE', '')
if setupfile != "":
cmd = "source {};".format(setupfile)
# test if HARVESTER_LD_LIBRARY_PATH is defined
if os.environ.get('HARVESTER_LD_LIBRARY_PATH', '') != "":
cmd += "export LD_LIBRARY_PATH=$HARVESTER_LD_LIBRARY_PATH:$LD_LIBRARY_PATH;"
# test if HARVESTER_PYTHONPATH is defined
if os.environ.get('HARVESTER_PYTHONPATH', '') != "":
cmd += "export PYTHONPATH=$HARVESTER_PYTHONPATH:$PYTHONPATH;"
#unset FRONTIER_SERVER variable
cmd += "unset FRONTIER_SERVER"
logger.debug('get_setup_command return value: {}'.format(str(cmd)))
return cmd
|
Sorry for not posting on Friday or Saturday. You might be wondering...where's Dan with his daily posts!? Don't worry I'm not slowing down just yet; I've actually just been at the New York Comic Con (NYCC) working as an exhibitor for my brother's art booth. Unfortunately, that left me with literally no time in the day to get to my post. I had no idea it would be quite that crazy or time consuming! All is well now though.
I'll be doing a full write up on my time there tomorrow- so look for that!
Anyway, lets get to what you're here for. Here we have the NYCC 2015 exclusive "Brand New Mater" "POP!" vinyl figure, just released this past week from Funko! This was only available at the official Funko booth there at the convention (which was very hard to get into). I feel super lucky to have gotten one! This is a rare, limited edition collector's item of just 1500 pieces made.
This is the second convention exclusive Cars Funko "POP!" released this year. Check out my SDCC Dinoco Lightning Mcqueen post HERE if you missed it.
Since this is considered a Mater "variant" (just a straight repaint of the standard Mater Funko "POP!"), it's still #129 in the Disney "POP!" Funko line. Even though it uses the same sculpt as the original, this was a must have for me. I'm a sucker for limited/exclusive items in general.
Like the other cars in this collection (see the original line HERE, released in July), this one has a great design, vibrant colors and some nice little details. If you're any kind of Pixar Cars fan and/or collector, than this is for you. What's so cool about the Funko "POP!" products is that they're simple enough that a baby could enjoy them, yet sophisticated enough for adult collectors to display- so whatever your age, you'll appreciate this!
The only way to get one of these now, unfortunately, is on the second hand market (you can find this typically selling for $40-$50 at this point). Original retail was $15. If you're interested, be sure to check out current listings on eBay using this direct link . Get it while you can!
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import inspect
import os
import sys
import time
import traceback
import app.buffer_file
screenLog = [u"--- screen log ---"]
fullLog = [u"--- begin log ---"]
enabledChannels = {
u"meta": True,
#'mouse': True,
u"startup": True,
}
shouldWritePrintLog = False
startTime = time.time()
def get_lines():
return screenLog
def parse_lines(frame, logChannel, *args):
if not len(args):
args = [u""]
msg = str(args[0])
if 1:
msg = u"%s %s %s %s: %s" % (
logChannel,
os.path.split(frame[1])[1],
frame[2],
frame[3],
msg,
)
prior = msg
for i in args[1:]:
if not len(prior) or prior[-1] != u"\n":
msg += u" "
prior = repr(i) # unicode(i)
msg += prior
return msg.split(u"\n")
def channel_enable(logChannel, isEnabled):
global fullLog, shouldWritePrintLog
fullLog += [
u"%10s %10s: %s %r" % (u"logging", u"channel_enable", logChannel, isEnabled)
]
if isEnabled:
enabledChannels[logChannel] = isEnabled
shouldWritePrintLog = True
else:
enabledChannels.pop(channel, None)
def channel(logChannel, *args):
global fullLog, screenLog
if logChannel in enabledChannels:
lines = parse_lines(inspect.stack()[2], logChannel, *args)
screenLog += lines
fullLog += lines
def caller(*args):
global fullLog, screenLog
priorCaller = inspect.stack()[2]
msg = (
u"%s %s %s"
% (os.path.split(priorCaller[1])[1], priorCaller[2], priorCaller[3]),
) + args
lines = parse_lines(inspect.stack()[1], u"caller", *msg)
screenLog += lines
fullLog += lines
def exception(e, *args):
global fullLog
lines = parse_lines(inspect.stack()[1], u"except", *args)
fullLog += lines
errorType, value, tracebackInfo = sys.exc_info()
out = traceback.format_exception(errorType, value, tracebackInfo)
for i in out:
error(i[:-1])
def check_failed(prefix, a, op, b):
stack(u"failed %s %r %s %r" % (prefix, a, op, b))
raise Exception("fatal error")
def check_ge(a, b):
if a >= b:
return
check_failed(u"check_ge", a, u">=", b)
def check_gt(a, b):
if a > b:
return
check_failed(u"check_lt", a, u"<", b)
def check_le(a, b):
if a <= b:
return
check_failed(u"check_le", a, u"<=", b)
def check_lt(a, b):
if a < b:
return
check_failed(u"check_lt", a, u"<", b)
def stack(*args):
global fullLog, screenLog
callStack = inspect.stack()[1:]
callStack.reverse()
for i, frame in enumerate(callStack):
line = [
u"stack %2d %14s %4s %s"
% (i, os.path.split(frame[1])[1], frame[2], frame[3])
]
screenLog += line
fullLog += line
if len(args):
screenLog.append(u"stack " + repr(args[0]))
fullLog.append(u"stack " + repr(args[0]))
def info(*args):
channel(u"info", *args)
def meta(*args):
"""Log information related to logging."""
channel(u"meta", *args)
def mouse(*args):
channel(u"mouse", *args)
def parser(*args):
channel(u"parser", *args)
def startup(*args):
channel(u"startup", *args)
def quick(*args):
global fullLog, screenLog
msg = str(args[0])
prior = msg
for i in args[1:]:
if not len(prior) or prior[-1] != u"\n":
msg += u" "
prior = i # unicode(i)
msg += prior
lines = msg.split(u"\n")
screenLog += lines
fullLog += lines
def debug(*args):
global fullLog, screenLog
if u"debug" in enabledChannels:
lines = parse_lines(inspect.stack()[1], u"debug_@@@", *args)
screenLog += lines
fullLog += lines
def detail(*args):
global fullLog
if u"detail" in enabledChannels:
lines = parse_lines(inspect.stack()[1], u"detail", *args)
fullLog += lines
def error(*args):
global fullLog
lines = parse_lines(inspect.stack()[1], u"error", *args)
fullLog += lines
def when(*args):
args = (time.time() - startTime,) + args
channel(u"info", *args)
def wrapper(function, shouldWrite=True):
global shouldWritePrintLog
shouldWritePrintLog = shouldWrite
r = -1
try:
try:
r = function()
except BaseException:
shouldWritePrintLog = True
errorType, value, tracebackInfo = sys.exc_info()
out = traceback.format_exception(errorType, value, tracebackInfo)
for i in out:
error(i[:-1])
finally:
flush()
return r
def write_to_file(path):
fullPath = app.buffer_file.expand_full_path(path)
with io.open(fullPath, "w+", encoding=u"UTF-8") as out:
out.write(u"\n".join(fullLog) + u"\n")
def flush():
if shouldWritePrintLog:
sys.stdout.write(u"\n".join(fullLog) + u"\n")
|
72 results were returned in 8 pages from a total of 27032 records.
Radio Liberty located in Kuwait, Kuwait operating on 5945 Khz.
Radio Liberty located in Biblis, Germany operating on 9765 Khz.
Radio Liberty located in Udon Thani, Thailand operating on 13760 Khz.
Radio Liberty located in Biblis, Germany operating on 9760 Khz.
Radio Liberty located in Lampertheim, Germany operating on 9460 Khz.
Radio Liberty located in Lampertheim, Germany operating on 9555 Khz.
Radio Liberty located in Udon Thani, Thailand operating on 15660 Khz.
Radio Liberty located in Irana Wila, Sri Lanka operating on 9855 Khz.
Radio Liberty located in Udon Thani, Thailand operating on 15145 Khz.
Radio Liberty located in Udon Thani, Thailand operating on 15460 Khz.
|
# -*- coding: utf-8 -*-
###########################################################################
# OCRFeeder - The complete OCR suite
# Copyright (C) 2009 Joaquim Rocha
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
from ocrfeeder.odf.draw import Frame, TextBox, Image
from ocrfeeder.odf.opendocument import OpenDocumentText
from ocrfeeder.odf.style import Style, MasterPage, GraphicProperties, ParagraphProperties, \
TextProperties, PageLayout, PageLayoutProperties
from ocrfeeder.odf.text import P, Page, PageSequence
from pango import WEIGHT_BOLD, WEIGHT_NORMAL, STYLE_ITALIC, STYLE_NORMAL, \
STYLE_OBLIQUE
from ocrfeeder.util import TEXT_TYPE, IMAGE_TYPE, ALIGN_LEFT, ALIGN_RIGHT, ALIGN_CENTER, \
ALIGN_FILL
from ocrfeeder.util.configuration import ConfigurationManager
from ocrfeeder.util.graphics import getImagePrintSize
from ocrfeeder.util.lib import debug
from reportlab.pdfgen import canvas
from reportlab.lib import units
from reportlab.lib.utils import ImageReader, simpleSplit
import math
import os.path
import shutil
import tempfile
class DocumentGeneratorManager(object):
GENERATORS = {}
def __init__(self):
pass
def register(self, id, generator):
self.GENERATORS[id] = generator
def get(self, id):
return self.GENERATORS.get(id)
def getFormats(self):
return self.GENERATORS.keys()
class DocumentGenerator(object):
def __init__(self):
pass
def makeDocument(self):
raise NotImplementedError('Method not defined!')
def addBox(self, data_box):
if data_box.getType() == TEXT_TYPE:
self.addText(data_box)
elif data_box.getType() == IMAGE_TYPE:
self.addImage(data_box)
def addText(self, data_box):
raise NotImplementedError('Method not defined!')
def addImage(self, data_box):
raise NotImplementedError('Method not defined!')
def addBoxes(self, data_boxes):
for data_box in data_boxes:
self.addBox(data_box)
def save(self):
raise NotImplementedError('Method not defined!')
def newPage(self):
raise NotImplementedError('Method not defined!')
def convertFontStyle(self, style):
raise NotImplementedError('Method not defined!')
def convertFontWeight(self, weight):
raise NotImplementedError('Method not defined!')
def convertTextAlign(self, align):
if align == ALIGN_LEFT:
return 'left'
elif align == ALIGN_RIGHT:
return 'right'
elif align == ALIGN_CENTER:
return 'center'
elif align == ALIGN_FILL:
return 'justified'
class HtmlGenerator(DocumentGenerator):
def __init__(self, name):
self.name = name
self.document = ''
self.bodies = []
self.styles = ''
self.style_names = []
self.images = []
def addText(self, data_box):
text_lines = data_box.getText().splitlines()
new_div = '''
<div style="position: absolute; margin-left: %(x)spx; margin-top: %(y)spx;">
<p class="%(class)s">%(text)s</p>
</div>
''' % {'class': self.__handleStyle(data_box.text_data), 'text': '<br/>'.join(text_lines), 'x': data_box.x, 'y': data_box.y}
self.bodies[-1] += new_div
def addImage(self, data_box):
format = 'PNG'
image_file = tempfile.mkstemp(dir = ConfigurationManager.TEMPORARY_FOLDER,
suffix = '.' + format.lower())[1]
data_box.image.save(image_file, format = format)
self.images.append(image_file)
new_div = '''
<div style="position: absolute; margin-left: %(x)spx; margin-top: %(y)spx;">
<img src="images/%(image)s" alt="%(image)s" />
</div>
''' % {'image': os.path.basename(image_file), 'x': data_box.x, 'y': data_box.y}
self.bodies[-1] += new_div
def __handleStyle(self, text_data):
style_name = 'style%s%s%s%s%s%s%s' % (text_data.face, text_data.size, text_data.line_space,
text_data.letter_space, text_data.justification,
text_data.weight, text_data.style)
if not style_name in self.style_names:
self.style_names.append(style_name)
self.styles += '''
.%(style_name)s {
font-family: %(face)s;
font-size: %(size)spt;
font-weight: %(weight)s;
font-style: %(style)s;
text-align: %(align)s;
letter-spacing: %(letter_space)spt;
line-height: %(line_space)spt;
}
''' % {'style_name':style_name, 'face': text_data.face,
'size': text_data.size, 'weight': self.convertFontWeight(text_data.weight),
'align': text_data.justification, 'style': self.convertFontStyle(text_data.style),
'line_space': text_data.line_space, 'letter_space': text_data.letter_space}
return style_name
def convertFontStyle(self, style):
if style == STYLE_OBLIQUE:
return 'oblique'
elif style == STYLE_ITALIC:
return 'italic'
return 'normal'
def convertFontWeight(self, weight):
if weight == WEIGHT_BOLD:
return 'bold'
return 'normal'
def addPage(self, page_data):
self.bodies.append('')
self.current_page_resolution = page_data.resolution
self.addBoxes(page_data.data_boxes)
def save(self):
pages = []
for i in xrange(len(self.bodies)):
previous_page = ''
next_page = ''
if i != 0:
if i - 1 == 0:
previous_page = '<a href="index.html">«</a>'
else:
previous_page = '<a href="page%s.html">«</a>' % (i)
elif i != len(self.bodies) - 1:
next_page = '<a href="page%s.html">»</a>' % (i + 2)
pages.append('''
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>%(title)s</title>
<link rel="stylesheet" type="text/css" href="style.css" />
</head>
<body>
<div style="margin-left: auto; margin-right: auto; width: 800px; overflow: hidden;">
<div style="float: left;">
%(previous_page)s
</div>
<div style="float: right;">
%(next_page)s
</div>
</div>
<hr/>
%(body)s
</body>
</html>
''' % {'title': self.name, 'body': self.bodies[i], 'previous_page': previous_page, 'next_page': next_page}
)
if not os.path.isdir(self.name):
os.mkdir(self.name)
images_folder = os.path.join(self.name, 'images')
if not os.path.exists(images_folder):
os.mkdir(images_folder)
if pages:
file = open(os.path.join(self.name, 'index.html'), 'w')
file.write(pages[0])
file.close()
if len(pages) > 1:
for i in xrange(1, len(pages)):
file = open(os.path.join(self.name, 'page%s.html' % (i + 1)), 'w')
file.write(pages[i])
file.close()
if self.styles:
file = open(os.path.join(self.name, 'style.css'), 'w')
file.write(self.styles)
file.close()
for image in self.images:
shutil.move(image, images_folder)
class OdtGenerator(DocumentGenerator):
def __init__(self, name):
self.name = name
self.document = OpenDocumentText()
self.current_page = None
self.photo_style = Style(name="Photo", family="graphic")
self.document.styles.addElement(self.photo_style)
self.font_styles = []
self.page_layouts = []
self.page_masters = []
self.page_styles = []
self.temp_images = []
frame_style = Style(name='FrameStyle', family = 'graphic')
frame_style.addElement(GraphicProperties(borderlinewidth='none'))
self.document.styles.addElement(frame_style)
frame_style_rotated = Style(name='FrameStyleRotated', family = 'graphic')
frame_style_rotated.addElement(GraphicProperties(fill = 'none', stroke = 'none', verticalpos = 'from-top', verticalrel = 'paragraph'))
self.document.automaticstyles.addElement(frame_style_rotated)
def addText(self, data_box):
text = data_box.getText()
frame_style = Style(name='FrameStyle', family = 'graphic')
debug('Angle: ', data_box.text_data.angle)
angle = data_box.text_data.angle
if angle:
frame_style = Style(name='FrameStyleRotated', family = 'graphic')
x, y, width, height = data_box.getBoundsPrintSize(self.current_page_resolution)
frame = Frame(stylename = frame_style, width = str(width) + 'in', height = str(height) + 'in', x = str(x) + 'in', y = str(y) + 'in', anchortype = 'paragraph')
if angle:
frame.addAttribute('transform', 'rotate (%s) translate (%scm %scm)' % (abs(math.radians(angle)), x, y))
self.current_page.addElement(frame)
textbox = TextBox()
frame.addElement(textbox)
for line in text.split('\n'):
textbox.addElement(P(stylename = self.__handleFrameStyle(data_box.text_data), text = line))
def addImage(self, data_box):
format = 'PNG'
image_file = tempfile.mkstemp(dir = ConfigurationManager.TEMPORARY_FOLDER,
suffix = '.' + format)[1]
data_box.image.save(image_file, format = format)
x, y, width, height = data_box.getBoundsPrintSize(self.current_page_resolution)
photo_frame = Frame(stylename=self.photo_style, x = '%sin' % x, y = '%sin' % y, width = '%sin' % width, height = '%sin' % height, anchortype='paragraph')
self.current_page.addElement(photo_frame)
location = self.document.addPicture(image_file)
photo_frame.addElement(Image(href=location))
self.temp_images.append(image_file)
def newPage(self, page_data):
master_name = self.__handlePageMaster(page_data)
page_style_name = '%sPage' % master_name
if not page_style_name in self.page_styles:
page_style = Style(name = page_style_name, family = 'paragraph', masterpagename = master_name)
page_style.addElement(ParagraphProperties(breakbefore = 'page'))
self.document.automaticstyles.addElement(page_style)
new_page = P(stylename = page_style_name)
self.document.text.addElement(new_page)
return new_page
def addPage(self, page_data):
self.current_page = self.newPage(page_data)
self.current_page_resolution = page_data.resolution
self.addBoxes(page_data.data_boxes)
def save(self):
name = self.name
if not name.lower().endswith('.odt'):
name += '.odt'
self.document.save(name)
for image in self.temp_images:
try:
os.unlink(image)
except:
debug('Error removing image: %s' % image)
def __handlePageMaster(self, page_data):
layout_name = 'Page%s%s' % (page_data.width, page_data.height)
if not layout_name in self.page_layouts:
page_layout = PageLayout(name = layout_name)
page_layout.addElement(PageLayoutProperties(margintop = '0in', marginbottom = '0in', marginleft = '0in', marginright = '0in', pagewidth = '%sin' % page_data.width, pageheight = '%sin' % page_data.height))
self.document.automaticstyles.addElement(page_layout)
self.page_layouts.append(layout_name)
master_name = layout_name + 'Master'
if not master_name in self.page_masters:
master_page = MasterPage(name = master_name, pagelayoutname = layout_name)
self.document.masterstyles.addElement(master_page)
self.page_masters.append(master_name)
return master_name
def __handleFrameStyle(self, text_data):
style_name = 'box%s%s%s%s%s' % (text_data.face, text_data.size, text_data.line_space,
text_data.letter_space, text_data.justification)
if not style_name in self.font_styles:
frame_style = Style(name = style_name, family = 'paragraph')
frame_style.addElement(ParagraphProperties(linespacing = '%spt' % text_data.line_space, textalign = self.convertTextAlign(text_data.justification)))
frame_style.addElement(TextProperties(letterspacing = '%spt' % text_data.letter_space, fontstyle = self.convertFontStyle(text_data.style), fontweight = self.convertFontWeight(text_data.weight), fontsize = '%spt' % text_data.size, fontfamily = str(text_data.face)))
self.document.styles.addElement(frame_style)
self.font_styles.append(style_name)
return style_name
def __handleFrameStyleRotated(self, text_data):
style_name = 'box%s%s%s%s%sRotated' % (text_data.face, text_data.size, text_data.line_space,
text_data.letter_space, text_data.justification)
if not style_name in self.font_styles:
frame_style = Style(name = style_name, family = 'paragraph')
frame_style.addElement(ParagraphProperties(linespacing = '%spt' % text_data.line_space, textalign = self.convertTextAlign(text_data.justification)))
frame_style.addElement(TextProperties(letterspacing = '%spt' % text_data.letter_space, fontstyle = self.convertFontStyle(text_data.style), fontweight = self.convertFontWeight(text_data.weight), fontsize = '%spt' % text_data.size, fontfamily = str(text_data.face)))
self.document.automaticstyles.addElement(frame_style)
self.font_styles.append(style_name)
return style_name
def convertFontStyle(self, style):
if style == STYLE_OBLIQUE:
return 'oblique'
elif style == STYLE_ITALIC:
return 'italic'
return 'normal'
def convertFontWeight(self, weight):
if weight == WEIGHT_BOLD:
return 'bold'
return 'normal'
# Generates a .txt file
class PlaintextGenerator(DocumentGenerator):
def __init__(self, name):
self.name = name
self.text = ''
def addText(self, newText):
self.text += newText
def addPage(self, page):
self.addText(page.getTextFromBoxes())
def save(self):
try:
# This will create a new file or **overwrite an existing file
f = open(self.name, "w")
try:
f.write(self.text) # Write text to file
finally:
f.close() # Close the file
except IOError:
pass
class PdfGenerator(DocumentGenerator):
def __init__(self, name, from_scratch = False):
self.name = name
self._from_scratch = from_scratch
self.canvas = canvas.Canvas(self.name)
self.page_data = None
def addText(self, box):
x, y, width, height = box.getBoundsPrintSize(self.page_data.resolution)
text = self.canvas.beginText()
# Make the text transparent if we are not
# creating a PDF from scratch
if not self._from_scratch:
text.setTextRenderMode(3)
text.setTextOrigin(x * units.inch,
(self.page_data.height - y) * units.inch)
text.setCharSpace(box.text_data.letter_space)
text.setLeading(box.text_data.line_space + box.text_data.size)
text.moveCursor(0, box.text_data.size)
#todo: efficiently add the required font
self.canvas.setFontSize(box.text_data.size)
lines = simpleSplit(box.text,
self.canvas._fontname,
box.text_data.size,
box.width)
text.textLines('\n'.join(lines))
self.canvas.drawText(text)
def addImage(self, box):
# Do nothing as the images will be already
# seen in the PDF
if not self._from_scratch:
return
x, y, width, height = box.getBoundsPrintSize(self.page_data.resolution)
self.canvas.drawInlineImage(box.image,
x * units.inch,
(self.page_data.height - (y + height)) * \
units.inch,
width * units.inch,
height * units.inch)
def addPage(self, page_data):
self.canvas.setPageSize((page_data.width * units.inch,
page_data.height * units.inch))
self.page_data = page_data
# Paste the source image that users will read
# in the PDF
if not self._from_scratch:
image = ImageReader(page_data.image_path)
self.canvas.drawImage(image, 0, 0,
page_data.width * units.inch,
page_data.height * units.inch)
self.addBoxes(page_data.data_boxes)
self.canvas.showPage()
def save(self):
self.canvas.save()
manager = DocumentGeneratorManager()
manager.register('HTML', HtmlGenerator)
manager.register('ODT', OdtGenerator)
manager.register('TXT', PlaintextGenerator)
manager.register('PDF', PdfGenerator)
|
Here she is, the fairy of good and happy dreams! She's the youngest fairy of all living in the Fairyland, the one who's making sure that all the kids have the most peaceful and restful sleep at night. No one can see her because she only comes when everyone is asleep, except you because you are going to be this cutie's personal stylista today! So, join her in her fantasy world, check out her candy-colored wardrobe and style this beautiful creature from head to toe choosing the loveliest fairy-like outfit to dress her up and the accessories that best complete her sweet fairy look! Enjoy it!
|
# -*- coding: utf-8 -*-
"""
Project name: Open Methodology for Security Tool Developers
Project URL: https://github.com/cr0hn/OMSTD
Copyright (c) 2014, cr0hn<-AT->cr0hn.com
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__author__ = 'cr0hn - cr0hn<-at->cr0hn.com (@ggdaniel)'
# ----------------------------------------------------------------------
class Displayer:
instance = None
def __new__(cls, *args, **kwargs):
if cls.instance is None:
cls.instance = object.__new__(cls, *args, **kwargs)
cls.__initialized = False
return cls.instance
def config(self, **kwargs):
self.out_file = kwargs.get("out_file", None)
self.out_screen = kwargs.get("out_screen", True)
self.verbosity = kwargs.get("verbosity", 0)
if self.out_file:
self.out_file_handler = open(self.out_file, "w")
def display(self, message):
if self.verbosity > 0:
self.__display(message)
def display_verbosity(self, message):
if self.verbosity > 1:
self.__display(message)
def display_more_verbosity(self, message):
if self.verbosity > 2:
self.__display(message)
def __display(self, message):
if self.out_screen:
print(message)
if self.out_file_handler:
self.out_file_handler.write(message)
def __init__(self):
if not self.__initialized:
self.__initialized = True
self.out_file = None
self.out_file_handler = None
self.out_screen = True
self.verbosity = 0
# ----------------------------------------------------------------------
def hello():
"""Display a hello world text"""
# Use displayer
out = Displayer()
out.display("hello")
out.display_verbosity("hello")
# This will not be displayed by the verbosity level to 1
out.display_more_verbosity("hello")
# ----------------------------------------------------------------------
if __name__ == '__main__':
# Config displayer
d = Displayer()
d.config(out_screen=True,
out_file="~/my_log.txt",
verbosity=1)
# Call function
hello(1)
|
Immediate access to the IBM M2090-728 Exam and 1800+ other exam PDFs.
Both of our Exams Packages come with all of our IBM Exams including all ActualTests M2090-728 tests. Find the same core area IBM questions with professionally verified answers, and PASS YOUR EXAM.
OR - Upgrade the Unlimited Access Package to include our Exam Engine. Know more than just the answers, understand the solutions! There is an Exam Engine for each of the 1,800 tests, including IBM M2090-728. Why Upgrade?
Customize your IBM Information Management Data Security & Privacy Sales Mastery v1 certification experience.
Control the training process by customizing your M2090-728 practice certification questions and answers. The fastest and best way to train.
Passing the IBM M2090-728 exam has never been faster or easier, now with actual questions and answers, without the messy M2090-728 braindumps that are frequently incorrect. ActualTests Unlimited Access Exams are not only the cheaper way to pass without resorting to M2090-728 dumps, but at only $149.00 you get access to ALL of the exams from every certification vendor.
This is more than a IBM M2090-728 practice exam, this is a compilation of the actual questions and answers from the IBM Information Management Data Security & Privacy Sales Mastery v1 test. Where our competitor's products provide a basic M2090-728 practice test to prepare you for what may appear on the exam and prepare you for surprises, the ActualTest M2090-728 exam questions are complete, comprehensive and guarantees to prepare you for your IBM exam.
An overview of the IBM M2090-728 course through studying the questions and answers.
Our Unlimited Access Package will prepare you for your exam with guaranteed results, surpassing other IBM M2090-728 Labs, or our competitor's dopey IBM M2090-728 Study Guide. Your exam will download as a single IBM M2090-728 PDF or complete M2090-728 testing engine as well as over 1000 other technical exam PDF and exam engine downloads. Forget buying your prep materials separately at three time the price of our unlimited access plan - skip the Sales Mastery M2090-728 audio exams and select the one package that gives it all to you at your discretion: IBM M2090-728 Study Materials featuring the exam engine.
Skip all the worthless IBM M2090-728 tutorials and download IBM Information Management Data Security & Privacy Sales Mastery v1 exam details with real questions and answers and a price too unbelievable to pass up. Act now and download your Actual Tests today!
Difficulty finding the right IBM M2090-728 answers? Don't leave your fate to M2090-728 books, you should sooner trust a IBM M2090-728 dump or some random IBM M2090-728 download than to depend on a thick IBM Information Management Data Security & Privacy Sales Mastery v1 book. Naturally the BEST training is from IBM M2090-728 CBT at ActualTests - far from being a wretched IBM Information Management Data Security & Privacy Sales Mastery v1 brain dump, the IBM M2090-728 cost is rivaled by its value - the ROI on the IBM M2090-728 exam papers is tremendous, with an absolute guarantee to pass Sales Mastery M2090-728 tests on the first attempt.
Still searching for IBM M2090-728 exam dumps? Don't be silly, M2090-728 dumps only complicate your goal to pass your IBM M2090-728 quiz, in fact the IBM M2090-728 braindump could actually ruin your reputation and credit you as a fraud. That's correct, the IBM M2090-728 cost for literally cheating on your IBM M2090-728 materials is loss of reputation. Which is why you should certainly train with the M2090-728 practice exams only available through ActualTests.
Keep walking if all you want is free IBM M2090-728 dumps or some cheap IBM M2090-728 free PDF - ActualTests only provide the highest quality of authentic IBM Information Management Data Security & Privacy Sales Mastery v1 notes than any other IBM M2090-728 online training course released. Absolutely ActualTests IBM M2090-728 online tests will instantly increase your Sales Mastery M2090-728 online test score! Stop guessing and begin learning with a classic professional in all things IBM M2090-728 practise tests.
What you will not find at ActualTests are latest IBM M2090-728 dumps or an IBM M2090-728 lab, but you will find the most advanced, correct and guaranteed IBM M2090-728 practice questions available to man. Simply put, IBM Information Management Data Security & Privacy Sales Mastery v1 sample questions of the real exams are the only thing that can guarantee you are ready for your IBM M2090-728 simulation questions on test day.
Proper training for IBM Sales Mastery M2090-728 begins with preparation products designed to deliver real IBM Sales Mastery M2090-728 results by making you pass the test the first time. A lot goes into earning your IBM Sales Mastery M2090-728 certification exam score, and the IBM Sales Mastery M2090-728 cost involved adds up over time. You will spend both time and money, so make the most of both with ActualTest's IBM Sales Mastery M2090-728 questions and answers. Learn more than just the IBM Sales Mastery M2090-728 answers to score high, learn the material from the ground up, building a solid foundation for re-certification and advancements in the IBM Sales Mastery M2090-728 life cycle.
Don't settle for sideline IBM Sales Mastery M2090-728 dumps or the shortcut using IBM Sales Mastery M2090-728 cheats. Prepare for your IBM Sales Mastery M2090-728 tests like a professional using the same Sales Mastery M2090-728 online training that thousands of others have used with ActualTests IBM Sales Mastery M2090-728 practice exams.
|
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""unit tests for the decorators module
"""
from logilab.common.testlib import TestCase, unittest_main
from logilab.common.decorators import monkeypatch, cached, clear_cache, copy_cache
class DecoratorsTC(TestCase):
def test_monkeypatch_with_same_name(self):
class MyClass: pass
@monkeypatch(MyClass)
def meth1(self):
return 12
self.assertEqual([attr for attr in dir(MyClass) if attr[:2] != '__'],
['meth1'])
inst = MyClass()
self.assertEqual(inst.meth1(), 12)
def test_monkeypatch_with_custom_name(self):
class MyClass: pass
@monkeypatch(MyClass, 'foo')
def meth2(self, param):
return param + 12
self.assertEqual([attr for attr in dir(MyClass) if attr[:2] != '__'],
['foo'])
inst = MyClass()
self.assertEqual(inst.foo(4), 16)
def test_cannot_cache_generator(self):
def foo():
yield 42
self.assertRaises(AssertionError, cached, foo)
def test_cached_preserves_docstrings_and_name(self):
class Foo(object):
@cached
def foo(self):
""" what's up doc ? """
def bar(self, zogzog):
""" what's up doc ? """
bar = cached(bar, 1)
@cached
def quux(self, zogzog):
""" what's up doc ? """
self.assertEqual(Foo.foo.__doc__, """ what's up doc ? """)
self.assertEqual(Foo.foo.__name__, 'foo')
self.assertEqual(Foo.foo.func_name, 'foo')
self.assertEqual(Foo.bar.__doc__, """ what's up doc ? """)
self.assertEqual(Foo.bar.__name__, 'bar')
self.assertEqual(Foo.bar.func_name, 'bar')
self.assertEqual(Foo.quux.__doc__, """ what's up doc ? """)
self.assertEqual(Foo.quux.__name__, 'quux')
self.assertEqual(Foo.quux.func_name, 'quux')
def test_cached_single_cache(self):
class Foo(object):
@cached(cacheattr=u'_foo')
def foo(self):
""" what's up doc ? """
foo = Foo()
foo.foo()
self.assertTrue(hasattr(foo, '_foo'))
clear_cache(foo, 'foo')
self.assertFalse(hasattr(foo, '_foo'))
def test_cached_multi_cache(self):
class Foo(object):
@cached(cacheattr=u'_foo')
def foo(self, args):
""" what's up doc ? """
foo = Foo()
foo.foo(1)
self.assertEqual(foo._foo, {(1,): None})
clear_cache(foo, 'foo')
self.assertFalse(hasattr(foo, '_foo'))
def test_cached_keyarg_cache(self):
class Foo(object):
@cached(cacheattr=u'_foo', keyarg=1)
def foo(self, other, args):
""" what's up doc ? """
foo = Foo()
foo.foo(2, 1)
self.assertEqual(foo._foo, {2: None})
clear_cache(foo, 'foo')
self.assertFalse(hasattr(foo, '_foo'))
def test_cached_property(self):
class Foo(object):
@property
@cached(cacheattr=u'_foo')
def foo(self):
""" what's up doc ? """
foo = Foo()
foo.foo
self.assertEqual(foo._foo, None)
clear_cache(foo, 'foo')
self.assertFalse(hasattr(foo, '_foo'))
def test_copy_cache(self):
class Foo(object):
@cached(cacheattr=u'_foo')
def foo(self, args):
""" what's up doc ? """
foo = Foo()
foo.foo(1)
self.assertEqual(foo._foo, {(1,): None})
foo2 = Foo()
self.assertFalse(hasattr(foo2, '_foo'))
copy_cache(foo2, 'foo', foo)
self.assertEqual(foo2._foo, {(1,): None})
if __name__ == '__main__':
unittest_main()
|
LockRite's Poulton Le Fylde Locksmith is an expert in lock repairs and replacements for both commercial and domestic clients. Our focus on customer service and value for money makes us the number one choice for major companies and home owners alike.
We cover Poulton Le Fylde and the surrounding areas and are on-call 24 hours a day for all types of locksmith work, from burglary repairs, to lock-outs. We aim to get to you within 30 minutes, and we're available 24 hours a day.
|
import logging
from apptools.io.api import File
from pyface.api import FileDialog, OK
from pyface.action.api import Action
from traits.api import Any
from cviewer.plugins.text_editor.editor.text_editor import TextEditor
from cviewer.plugins.ui.preference_manager import preference_manager
# Logging imports
import logging
logger = logging.getLogger('root.'+__name__)
class NetworkVizTubes(Action):
tooltip = "Show 3D Network with Tubes"
description = "Show 3D Network with Tubes and colorcoded Nodes"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from scripts import threedviz2
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(threedviz2)
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class NetworkReport(Action):
tooltip = "Network Report"
description = "Network Report"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from scripts import reportlab
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(reportlab)
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class WriteGEXF(Action):
tooltip = "Write Gephi GEXF file"
description = "Write Gephi GEXF file"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from scripts import writegexf
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(writegexf)
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class CorticoCortico(Action):
tooltip = "Extract cortico-cortico fibers"
description = "Extract cortico-cortico fibers"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from scripts import corticocortico
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(corticocortico)
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class NipypeBet(Action):
tooltip = "Brain extraction using BET"
description = "Brain extraction using BET"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from scripts import nipypebet
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(nipypebet)
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class ShowTracks(Action):
tooltip = "Show tracks between two regions"
description = "Show tracks between two regions"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from scripts import ctrackedge
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(ctrackedge)
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class XNATPushPull(Action):
tooltip = "Push and pull files from and to XNAT Server"
description = "Push and pull files from and to XNAT Server"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from scripts import pushpull
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(pushpull)
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class ComputeNBS(Action):
tooltip = "Compute NBS"
description = "Compute NBS"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
# from cnetwork_nbs_action import NBSNetworkParameter, NBSMoreParameter
from scripts import nbsscript
# cfile = self.window.application.get_service('cviewer.plugins.cff2.cfile.CFile')
#
# no = NBSNetworkParameter(cfile)
# no.edit_traits(kind='livemodal')
#
# if (len(no.selected1) == 0 or len(no.selected2) == 0):
# return
#
# mo = NBSMoreParameter(cfile, no.selected1[0], no.selected2[0])
# mo.edit_traits(kind='livemodal')
#
# import datetime as dt
# a=dt.datetime.now()
# ostr = '%s%s%s' % (a.hour, a.minute, a.second)
# if not (len(no.selected1) == 0 or len(no.selected2) == 0):
# # if cancel, not create surface
# # create a temporary file
# import tempfile
# myf = tempfile.mktemp(suffix='.py', prefix='my')
# f=open(myf, 'w')
# f.write(nbsscript % (str(no.selected1),
# mo.first_edge_value,
# str(no.selected2),
# mo.second_edge_value,
# mo.THRES,
# mo.K,
# mo.TAIL,
# ostr))
# f.close()
#
# self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(nbsscript)
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class ShowNetworks(Action):
tooltip = "Create a 3D Network"
description = "Create a 3D Network"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from cnetwork_action import NetworkParameter
from scripts import netscript
cfile = self.window.application.get_service('cviewer.plugins.cff2.cfile.CFile')
no = NetworkParameter(cfile)
no.edit_traits(kind='livemodal')
if not no.netw[no.graph]['name'] == "None":
# if cancel, not create surface
# create a temporary file
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(netscript % (no.netw[no.graph]['name'],
no.node_position,
no.edge_value,
no.node_label))
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class ConnectionMatrix(Action):
tooltip = "Show connection matrix"
description = "Show connection matrix"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from cnetwork_action import MatrixNetworkParameter
from scripts import conmatrix
cfile = self.window.application.get_service('cviewer.plugins.cff2.cfile.CFile')
no = MatrixNetworkParameter(cfile)
no.edit_traits(kind='livemodal')
if not no.netw[no.graph]['name'] == "None":
# if cancel, not create surface
# create a temporary file
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(conmatrix % (no.netw[no.graph]['name'],
no.node_label))
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class SimpleConnectionMatrix(Action):
tooltip = "Show simple connection matrix"
description = "Show simple connection matrix"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from cnetwork_action import MatrixEdgeNetworkParameter
from scripts import conmatrixpyplot
cfile = self.window.application.get_service('cviewer.plugins.cff2.cfile.CFile')
no = MatrixEdgeNetworkParameter(cfile)
no.edit_traits(kind='livemodal')
if not no.netw[no.graph]['name'] == "None":
# if cancel, not create surface
# create a temporary file
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(conmatrixpyplot % (no.netw[no.graph]['name'],
no.edge_label))
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class ShowSurfaces(Action):
""" Open a new file in the text editor
"""
tooltip = "Create a surface"
description = "Create a surface"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from csurface_action import SurfaceParameter
from scripts import surfscript
cfile = self.window.application.get_service('cviewer.plugins.cff2.cfile.CFile')
so = SurfaceParameter(cfile)
so.edit_traits(kind='livemodal')
if not so.pointset_da[so.pointset]['name'] == "None":
# if cancel, not create surface
# create a temporary file
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
if so.labels_da[so.labels].has_key('da_idx'):
labels = so.labels_da[so.labels]['da_idx']
else:
labels = 0
f.write(surfscript % (so.pointset_da[so.pointset]['name'],
so.pointset_da[so.pointset]['da_idx'],
so.faces_da[so.faces]['name'],
so.faces_da[so.faces]['da_idx'],
so.labels_da[so.labels]['name'],
labels))
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class ShowVolumes(Action):
""" Open a new file in the text editor
"""
tooltip = "Create a volume"
description = "Create a volume"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from cvolume_action import VolumeParameter
from scripts import volslice
cfile = self.window.application.get_service('cviewer.plugins.cff2.cfile.CFile')
so = VolumeParameter(cfile)
so.edit_traits(kind='livemodal')
if True: #not so.pointset_da[so.pointset]['name'] == "None":
# if cancel, not create surface
# create a temporary file
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(volslice % so.volumes[so.myvolume]['name'])
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
|
Since you are acquainted with various kinds of Facebook account, so you might determine to Adjustment Birthday celebration on Facebook. How To Change My Birthday On Facebook. You can do this (Change Birthday Facebook) on phone.
Consequently, this short article will cover the best ways to Modification Facebook birthday on your smart phone.
How To Change My Birthday On Facebook.
Modification Birthday Facebook on mobile.
Action 1: Open Facebook app.
Open up Facebook app, this is a dark-blue app with a white "f" on it. This will certainly open your Facebook Information Feed if you are currently logged right into Facebook on your phone or tablet computer. Meanwhile, if you are not logged into Facebook, enter your email address (or phone number) and also password to proceed.
Action 2: Tap the three stroke ☰.
Faucet this stroke, ☰ It remains in either the bottom-right corner of the display (apple iphone) or the top-right corner of the display (Android). The setting depends upon the type of phone you utilize to access your Facebook account.
Step 3: Tap your name.
The following activity to be taken is touching your name. You will see this tab on top of the menu. Doing so takes you to your account web page. Idea me you are a step better to transform birthday celebration on Facebook account you opened up years ago.
Tip 4: Tap the About tab.
Equally as I clarified under that of desktop computer procedure, here once more you are to Tap the About tab which is listed below your profile photo. If your gadget is Android, you will have to scroll down to see the About option.
Now Scroll down to the "BASIC INFO" section and faucet Edit. The Edit switch gets on the right side of the screen, directly throughout from the "BASIC DETAILS" going. On Android, you should initially tap Even more Concerning You on this web page to access it.
Step 6: Edit and save.
Edit your birthday celebration, there are two areas listed below the "BIRTHDAY" heading: "Birthday celebration", which is the day as well as month of your birthday celebration, and also "Birth Year", which is the year you were born.
To transform these: Tap the month, day, or year to motivate a drop-down menu. Tap the month, day, or year that you wish to present. Repeat this procedure for every value that you desire to change.
Since you have actually modified you birthday effectively, it is time to conserve what you have been up until now, simply touch the SAVE switch to conserved adjustment.
Ultimately on Adjustment Birthday Celebration Facebook.
If you adhere to these overviews on Adjustment Birthday Facebook on Facebook account correctly, you will certainly alter birthday celebration on Facebook successful without issue.
If you know any type of ways of doing it, kindly drop it on the remark box bellow. This post might have helped you out on ways to alter birthday on Facebook, please be an excellent assistant by sharing it amongst your pals on social networks.
|
from packetbeat import BaseTest
class Test(BaseTest):
def test_amqp_channel_error(self):
self.render_config_template(
amqp_ports=[5672],
)
self.run_packetbeat(pcap="amqp_channel_error.pcap",
debug_selectors=["amqp,tcp,publish"])
objs = self.read_output()
assert all([o["type"] == "amqp" for o in objs])
assert len(objs) == 3
assert objs[0]["method"] == "exchange.declare"
assert objs[0]["status"] == "OK"
assert objs[0]["amqp.exchange"] == "titres"
assert objs[0]["amqp.durable"] == True
assert objs[0]["amqp.exchange-type"] == "fanout"
assert objs[0]["amqp.passive"] == False
assert objs[0]["amqp.no-wait"] == True
assert objs[1]["method"] == "queue.declare"
assert objs[1]["status"] == "OK"
assert objs[1]["amqp.queue"] == "my_queue"
assert objs[1]["amqp.exclusive"] == True
assert objs[1]["amqp.no-wait"] == False
assert objs[1]["amqp.durable"] == False
assert objs[1]["amqp.auto-delete"] == False
assert objs[1]["amqp.passive"] == False
assert objs[2]["method"] == "channel.close"
assert objs[2]["status"] == "Error"
assert objs[2]["amqp.reply-code"] == 404
assert objs[2]["amqp.reply-text"] == "NOT_FOUND - no exchange 'plop' in vhost '/'"
assert objs[2]["amqp.class-id"] == 50
assert objs[2]["amqp.method-id"] == 20
|
Mystery one: what is dark matter. Mystery two: how to unify physics. Are they the same mystery?
The joy and toll of doing remote science.
An invisible civilization could be living right under your nose.
Will We Ever Know What Dark Matter Is?
The search for the elusive material is reaching the end of its tether.
Answers to the biggest mysteries may lie well outside traditional paradigms.
Dark matter might be fluid in galaxies but something else on vaster, cosmological scales.
Maybe Newtonian physics doesn't need dark matter to work, but Mordehai Milgrom instead.
Physicists are rethinking how to use the Large Hadron Collider to search for dark matter.
Reina Maruyama wasn’t expecting her particle detector to work buried deep in ice. She was wrong.
This weird type of dark matter would also puff up galaxies and make stars age prematurely.
A poet finds inspiration in cosmic darkness.
The elusive substance may not be a new kind of particle at all.
The binary pairing of Darkness and Light is so basic to human culture, no other name for the unseen stuff could do.
Without the extra heft of dark matter, you wouldn’t be here.
This isn’t the first time that scientists have wrestled with the unseen.
|
# Copyright (C) 2015 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import creator.utils
import abc
import glob2
import os
import string
import sys
import weakref
class ContextProvider(object, metaclass=abc.ABCMeta):
"""
The *ContextProvider* is the interface class for rendering macros
providing the data necessary, eg. the value of variables. Some macro
functions, like ``$(wildcard ...)`, expect the context does provide
a macro ``$ProjectPath`` which should provide the directory that
contains the project files.
"""
@abc.abstractmethod
def has_macro(self, name):
"""
Args:
name (str): The name of the macro to check for existence.
Returns:
bool: True if the macro exists, False if not.
"""
return False
@abc.abstractmethod
def get_macro(self, name, default=NotImplemented):
"""
Args:
name (str): The name of the macro to retrieve.
default (any): The default value to be returned if the macro
can not be served. The default value is :class:`NotImplemented`
which causes this function to raise a :class:`KeyError` instead.
Returns:
ExpressionNode: The macro associated with the specified *name*.
Raises:
KeyError: If there is no macro with the specified name and the
*default* parameter has the value :class:`NotImplemented`.
"""
if default is NotImplemented:
raise KeyError(name)
return default
@abc.abstractmethod
def get_namespace(self):
"""
Returns:
str: The name of the context that is used to identify it globally.
"""
raise NotImplementedError
class MutableContext(ContextProvider):
"""
This implementation of the :class:`ContextProvider` interface
enables reading and writing macros via the Python ``__getitem__()``
and ``__setitem__()`` interface and stores these internally. If a
string is set with ``__setitem__()``, it will automatically be parsed
into an expression tree.
Attributes:
macros (dict of str -> ExpressionNode): The internal dictionary
mapping the macro names with the actual macro objects.
"""
def __init__(self):
super().__init__()
self.macros = {}
def __setitem__(self, name, value):
if isinstance(value, str):
value = parse(value, self)
elif not isinstance(value, ExpressionNode):
message = 'value must be str or ExpressionNode'
raise TypeError(message, type(value))
# Make sure the macro does not contain a reference to itself.
# It will be resolved by expanding the original value immediately
# in the expression hierarchy.
old_value = self.macros.get(name) or TextNode('')
for ref_name in self.get_aliases(name):
value = value.substitute(ref_name, old_value)
self.macros[name] = value
def __delitem__(self, name):
try:
del self.macros[name]
except KeyError:
pass
def get_aliases(self, name):
"""
This function can be implemented by subclasses to specify under
what aliases the same macro can be found. The default implementation
simply returns *name*.
Args:
name (str): The name that was passed to :meth:`__setitem__`.
Returns:
list of str: A list of aliases.
"""
return [name]
def function(self, func):
"""
Decorator for a Python callable to be wrapped in a :class:`Function`
expression node and assigned to the *MutableContext*.
"""
self.macros[func.__name__] = Function(func)
return self.macros[func.__name__]
def has_macro(self, name):
return name in self.macros
def get_macro(self, name, default=NotImplemented):
if name in self.macros:
return self.macros[name]
elif default is not NotImplemented:
return default
else:
raise KeyError(name)
def get_namespace(self, name):
raise NotImplementedError
class ChainContext(ContextProvider):
"""
This context chains multiple :class:`ContextProvider`s.
"""
def __init__(self, *contexts):
super().__init__()
self.contexts = []
for context in contexts:
if context is not None:
if not isinstance(context, ContextProvider):
raise TypeError('expected ContextProvider', type(context))
self.contexts.append(context)
def has_macro(self, name):
for context in self.contexts:
if contexts.has_macro(name):
return True
return False
def get_macro(self, name, default=NotImplemented):
for context in self.contexts:
try:
return context.get_macro(name)
except KeyError:
pass
if default is NotImplemented:
raise KeyError(name)
return default
def get_namespace(self, name):
for context in self.contexts:
try:
return context.get_namespace(name)
except KeyError:
pass
raise KeyError(name)
class StackFrameContext(ContextProvider):
"""
This :class:`ContextProvider` implementation exposes the contents
of a Python stack frame.
Args:
stack_depth (int): The number of stacks to go backwards from the
calling stack frame to reach the frame that is supposed to be
exposed by this context.
"""
def __init__(self, stack_depth=0):
super().__init__()
frame = sys._getframe()
for i in range(stack_depth + 1):
frame = frame.f_back
self.frame = frame
def has_macro(self, name):
try:
self.get_macro(name)
except KeyError:
return False
return True
def get_macro(self, name, default=NotImplemented):
frame = self.frame
if name in frame.f_locals:
value = frame.f_locals[name]
elif name in frame.f_globals:
value = frame.f_globals[name]
elif default is not NotImplemented:
return default
else:
raise KeyError(name)
if isinstance(value, str):
value = creator.macro.TextNode(str(value))
if not isinstance(value, creator.macro.ExpressionNode):
raise KeyError(name)
return value
def get_namespace(self, name):
raise KeyError(name)
class ExpressionNode(object, metaclass=abc.ABCMeta):
"""
Base class for macro expression nodes that can be evaluated with
a :class:`ContextProvider` and rendered to a string.
"""
@abc.abstractmethod
def eval(self, context, args):
"""
Evaluate the expression node given the specified context and
function call arguments into a string.
Args:
context (ContextProvider): The context to evaluate with.
args (list of ExpressionNode): A list of arguments that should
be taken into account for the evaluation.
Returns:
str: The evaluated macro.
"""
raise NotImplementedError
@abc.abstractmethod
def substitute(self, ref_name, node):
"""
This function must be implemented by nodes that expand a variable
name like the :meth:`VarNode` and must replace any occurence that
expands the reference named by *ref_name* with *node*.
Args:
ref_name (str): The name of the variable. May contain a double
colon ``:`` to separate namespace and variable name.
node (ExpressionNode): The node to insert in place.
Returns:
ExpressionNode: *self* or *node*.
"""
return self
@abc.abstractmethod
def copy(self, new_context):
"""
Create a copy of the node and return it. If *new_context* is not
None, its a *ContextProvider* that should be used inside the *VarNode*s
instead of their previous.
Args:
new_context (ContextProvider or None): The new context.
Returns:
ExpressionNode: The copy of the node.
"""
raise NotImplementedError
class TextNode(ExpressionNode):
"""
The *TextNode* simply evaluates into the same text it was initialized
with. It does not the context to evaluate.
Attributes:
text (str): The text of the node.
"""
def __init__(self, text):
if not isinstance(text, str):
raise TypeError('text must be str', type(text))
super().__init__()
self.text = text
def eval(self, context, args):
return self.text
def substitute(self, ref_name, node):
return self
def copy(self, new_context):
return TextNode(self.text)
class ConcatNode(ExpressionNode):
"""
This expression node can contain a number of other nodes which are
simply concatenated on evaluation. It also implements a parse-time
performance improvement when appending raw text to the node as it
will simply update the last :class:`TextNode` (if present) instead
of creating a new node for each chunk.
Attributes:
nodes (list of ExpressionNode): The list of nodes.
"""
def __init__(self, nodes=None):
super().__init__()
self.nodes = [] if nodes is None else nodes
def append(self, node):
"""
Appends a :class:`ExpressionNode` or text to this node.
Args:
node (ExpressionNode or str): The node or text to add.
"""
if type(node) is TextNode:
text = node.text
elif isinstance(node, str):
text = node
node = None
else:
text = None
if text is not None:
if self.nodes and isinstance(self.nodes[-1], TextNode):
self.nodes[-1].text += text
return
if node is None:
node = TextNode(text)
self.nodes.append(node)
def eval(self, context, args):
return ''.join(n.eval(context, args) for n in self.nodes)
def substitute(self, ref_name, node):
for i in range(len(self.nodes)):
self.nodes[i] = self.nodes[i].substitute(ref_name, node)
return self
def copy(self, new_context):
nodes = [n.copy(new_context) for n in self.nodes]
return ConcatNode(nodes)
class VarNode(ExpressionNode):
"""
This expression node implements a variable expansion or function call.
"""
def __init__(self, varname, args, context):
super().__init__()
self.varname = varname
self.args = args
self.context = weakref.ref(context)
def eval(self, context, args):
if self.context:
context = self.context()
# Evaluate the arguments to the function.
sub_args = [TextNode(n.eval(context, args)) for n in self.args]
# Does the identifier access an argument?
arg_index = None
try:
arg_index = int(self.varname)
except ValueError:
pass
if arg_index is not None and arg_index >= 0 and arg_index < len(args):
return args[arg_index].eval(context, sub_args).strip()
# Try to get the macro and evaluate it.
try:
macro = context.get_macro(self.varname)
except KeyError:
return ''
return macro.eval(context, sub_args).strip()
def substitute(self, ref_name, node):
if ref_name == self.varname:
return node
elif self.context():
namespace = self.context().get_namespace()
if ref_name == creator.utils.create_var(namespace, self.varname):
return node
for i in range(len(self.args)):
self.args[i] = self.args[i].substitute(ref_name, node)
return self
def copy(self, new_context):
args = [n.copy(new_context) for n in self.args]
return VarNode(self.varname, args, new_context)
class Function(ExpressionNode):
"""
This class can be used to wrap a Python function to make it a
function that can be called from a macro. The wrapped function
must accept the same arguments as :meth:`eval`.
"""
def __init__(self, func):
super().__init__()
self.func = func
@property
def name(self):
return self.func.__name__
def eval(self, context, args):
return self.func(context, args)
def substitute(self, ref_name, node):
return self
def copy(self, new_context):
return self
class Parser(object):
"""
This class implements the process of parsing a string into an
expression node hierarchy.
"""
CHARS_WHITESPACE = string.whitespace
CHARS_IDENTIFIER = string.ascii_letters + string.digits + '-_.<@:'
CHAR_POPEN = '('
CHAR_PCLOSE = ')'
CHAR_BOPEN = '{'
CHAR_BCLOSE = '}'
CHAR_NAMESPACEACCESS = ':'
CHAR_ARGSEP = ','
def parse(self, text, context):
"""
Args:
text (str): The text to parse into an expression tree.
Returns:
ConcatNode: The root node of the hierarchy.
"""
if context is not None and not isinstance(context, ContextProvider):
raise TypeError('context must be None or ContextProvider', type(context))
scanner = creator.utils.Scanner(text.strip())
return self._parse_arg(scanner, context, closing_at='')
def _parse_arg(self, scanner, context, closing_at):
root = ConcatNode()
char = scanner.char
while scanner and char not in closing_at:
if char == '$':
char = scanner.next()
node = None
if char != '$':
cursor = scanner.state()
node = self._parse_macro(scanner, context)
if not node:
scanner.restore(cursor)
if node:
root.append(node)
char = scanner.char
else:
root.append('$')
char = scanner.next()
elif char == '\\':
char = scanner.next()
if char:
root.append(char)
char = scanner.next()
else:
root.append('\\')
else:
root.append(char)
char = scanner.next()
return root
def _parse_macro(self, scanner, context):
# Check if a shortcut identifier was used.
shortcut = None
if scanner.char in Globals.shortcut_map:
shortcut = Globals.shortcut_map[scanner.char]
scanner.next()
is_call = False
is_braced = False
# Check if we have an opening parenthesis (function call).
if scanner.char == self.CHAR_POPEN:
is_call = True
closing = self.CHAR_PCLOSE
scanner.next()
# Or if we got braces (enclosed variable expansion).
elif scanner.char == self.CHAR_BOPEN:
is_braced = True
closing = self.CHAR_BCLOSE
scanner.next()
scanner.consume_set(self.CHARS_WHITESPACE)
# If a shortcut was used and this is a call, we already know
# the function that is used to call.
if shortcut and is_call:
varname = shortcut
# Read the variable or function name that is referenced
# in this expression.
else:
varname = scanner.consume_set(self.CHARS_IDENTIFIER)
if not varname:
return None
# If its a function call, we need to read in the arguments.
if is_call:
args = []
scanner.consume_set(self.CHARS_WHITESPACE)
closing_at = closing + self.CHAR_ARGSEP
while scanner.char and scanner.char != closing:
node = self._parse_arg(scanner, context, closing_at)
args.append(node)
if scanner.char == self.CHAR_ARGSEP:
scanner.next()
elif scanner.char == closing:
break
# Skip whitespace after the argument separator.
scanner.consume_set(self.CHARS_WHITESPACE)
if scanner.char != closing:
return None
scanner.next()
return VarNode(varname, args, context)
# If its braced, we only need the name of the variable that
# is being referenced.
elif is_braced:
scanner.consume_set(self.CHARS_WHITESPACE)
if scanner.char != closing:
return None
scanner.next()
node = VarNode(varname, [], context)
if shortcut:
node = VarNode(shortcut, [node], context)
return node
parser = Parser()
parse = parser.parse
class Globals:
shortcut_map = {
'"': 'quote',
'!': 'quotesplit',
'*': 'wildcard',
}
@Function
def addprefix(context, args):
if len(args) != 2:
message = 'addprefix requires 2 arguments, got {0}'.format(len(args))
raise TypeError(message)
prefix = args[0].eval(context, [])
items = creator.utils.split(args[1].eval(context, []))
items = [prefix + x for x in items]
return creator.utils.join(items)
@Function
def addsuffix(context, args):
if len(args) != 2:
message = 'addsuffix requires 2 arguments, got {0}'.format(len(args))
raise TypeError(message)
suffix = args[0].eval(context, [])
items = creator.utils.split(args[1].eval(context, []))
items = [x + suffix for x in items]
return creator.utils.join(items)
@Function
def quote(context, args):
items = [n.eval(context, []).strip() for n in args]
items = [creator.utils.quote(x) for x in items]
return ' '.join(items)
@Function
def quoteall(context, args):
items = ';'.join(n.eval(context, []).strip() for n in args)
items = [creator.utils.quote(x) for x in creator.utils.split(items)]
return creator.utils.join(items)
@Function
def quotesplit(context, args):
items = ';'.join(n.eval(context, []).strip() for n in args)
items = creator.utils.split(items)
items = [creator.utils.quote(x) for x in items]
return ' '.join(items)
@Function
def subst(context, args):
if len(args) != 3:
message = 'subst requires 3 arguments, got {0}'.format(len(args))
raise TypeError(message)
subject, replacement, items = [n.eval(context, []).strip() for n in args]
items = creator.utils.split(items)
items = [x.replace(subject, replacement) for x in items]
return creator.utils.join(items)
@Function
def split(context, args):
items = ';'.join(n.eval(context, []).strip() for n in args)
return ' '.join(creator.utils.split(items))
@Function
def wildcard(context, args):
patterns = [n.eval(context, []).strip() for n in args]
items = []
for pattern in patterns:
items.extend(glob2.glob(pattern))
items.sort()
return creator.utils.join(items)
@Function
def suffix(context, args):
if len(args) != 2:
message = 'suffix requires 2 arguments, got {0}'.format(len(args))
raise TypeError(message)
items, suffix = [n.eval(context, []).strip() for n in args]
items = creator.utils.split(items)
items = [creator.utils.set_suffix(x, suffix) for x in items]
return creator.utils.join(items)
@Function
def prefix(context, args):
if len(args) != 2:
message = 'prefix requires 2 arguments, got {0}'.format(len(args))
raise TypeError(message)
items, prefix = [n.eval(context, []).strip() for n in args]
result = []
for item in creator.utils.split(items):
dirname, basename = os.path.split(item)
basename = prefix + basename
result.append(os.path.join(dirname, basename))
return creator.utils.join(result)
@Function
def move(context, args):
if len(args) != 3:
message = 'move requires 3 arguments, got {0}'.format(len(args))
raise TypeError(message)
items, base, new_base = [n.eval(context, []).strip() for n in args]
result = []
for item in creator.utils.split(items):
relpath = os.path.relpath(item, base)
result.append(os.path.join(new_base, relpath))
return creator.utils.join(result)
@Function
def dir(context, args):
items = ';'.join(n.eval(context, []) for n in args)
items = creator.utils.split(items)
return creator.utils.join(os.path.dirname(x) for x in items)
@Function
def normpath(context, args):
items = ';'.join(n.eval(context, []).strip() for n in args)
items = creator.utils.split(items)
return creator.utils.join(creator.utils.normpath(x) for x in items)
@Function
def upper(context, args):
if len(args) != 1:
message = 'upper expects exactly 1 argument, got {0}'.format(len(args))
raise TypeError(message)
return args[0].eval(context, []).upper()
@Function
def lower(context, args):
if len(args) != 1:
message = 'lower expects exactly 1 argument, got {0}'.format(len(args))
raise TypeError(message)
return args[0].eval(context, []).lower()
@Function
def capitalize(context, args):
if len(args) != 1:
message = 'lower expects exactly 1 argument, got {0}'.format(len(args))
raise TypeError(message)
return string.capwords(args[0].eval(context, []))
|
Be inspired by real-life success stories from people who started out just like you… we have helped thousands of people like you lose weight….
Look Good, Feel Great & Live Life….
Sharing your story is a great way to celebrate your success AND receive $50.00 CASH BACK.
Hi my name is Jade, I’m 25 years old and have struggled with my weight since being a young teen. My heaviest weight was 105 kilo’s. I’ve tried every diet program, shake, magic pill – you name it, I’ve tried it – nothing really worked. I’d lose a few kilo’s here or there, but never did I lose the amount I needed to be considered a healthy weight.
From the age of 17 till about 24, my weight had been up and down like a yoyo, I’d lose it, put it back on… the same thing most people go through. I didn’t just try quick fixes, in all the years I’ve been trying to lose weight – I learnt what to eat and what not to eat, how to train at the gym etc Even though I’d never lost all the weight I wanted to lose, I still new right from wrong. Even after I’d lost 10-15 kilo’s, over time I was never able to maintain the weight I’d lost.
The first time I tried the VLC diet, I thought it was one of the hardest, no let me re-phrase that, it was the hardest diet to stick to, but I wouldn’t call it a diet – I’d call it a program which enables you to pin point what your body can and can’t tolerate after completing a specific eating program which is designed to assist you lose weight that has been stored in places that no other diet has been able to shift before.
After completing the specific eating program which I only did for 21 days, not only had I lost a great deal of weight (a healthy amount) my body had changed in ways that I’d never seen before. I personally have a pear shaped body, big thighs and a big bottom – this diet stripped the fat from those areas without extreme exercise. Now don’t get me wrong, I believe in exercise but this diet still allows the body to reshape itself with the soft gel capsules or diet drops of which I have used both with great results.
I did do this diet more than once, all up I completed it 4 times and I’m proud to say after all the diets and all the programs I have tried in all the years, this has been the only one that has brought me to my goal weight. I can now say, I am classed as a healthy weight due to my BMI – before I was classed as obese. I started at 105 kilo’s and I now weigh 65 kilo’s……so I’ve lost 40 kilo’s.
Now anybody who has dieted numerous times will know that the last few kilo’s are the hardest. In all the years I was never able to get myself into the 60’s but this program got me there and I must admit, the day I looked at the scales and realised I was finally in the 60’s, it was one of the happiest moments of my life. Yes its hard, and yes the first 21 days are restrictive but it is so worth it and not only have I lost all the weight and am at my goal weight, I’ve also managed to keep it off by following the maintenance phase properly. Yes this is a diet program but once completed – it allows you to make a life style change that you can manage forever. Its the best thing I have ever done and I would HIGHLY recommend it to anyone trying to lose weight.
If followed correctly it has amazing results. I personally feel its awareness should be publicised more because it truly is an amazing program. Since losing the weight, the first couple of times I also used the green coffee beans and garcinia to assist with maintaining my weight loss and again both those products were fantastic.
I travelled to Bali, ate and drank not too bad, but not too good and in that week I didn’t put on any weight, I would swear I came back smaller. If paired with a healthy eating program they are more successful but I can personally guarantee that they work. All you have to do, is look at the photo’s… they will prove and show you how successful this program truly is.
I’ve never been happier and I have myself and this program to thank. I just wish I had discovered them earlier in my life as my weight held me back a great deal. Do yourself a favour and give it a go but do it properly, because if you don’t – your only cheating yourself.
After I received my diet drops from you earlier this year, I was excited to be starting the diet as many of my friends had done it too and achieved amazing results. I was part of a 12 week challenge at Fenix Fitness Nerang and I wanted results!
I think that the before and after photos here speak for themselves and I am thrilled with the results.
I found the VLC Diet to be the easiest diet that I have ever done because the results were “instant” and that kept me focussed.
I had NO cravings, NO hunger pains and NO headaches (unlike what I had experienced on other diets.
I lost 13.1kg and during the Maintenance Phase I ran 5kms on the treadmill every other day and attended the Gym with my personal trainer – all to get fit for the Challenge. I came 3rd in the Women’s Challenge out of 300 entrants!
Thank you for providing a good quality product. I have recommended that my friends and family order their diet drops through you!
I have lost 60kgs so far on this program I have come down from 185kg to 124kg. Jamie.
After watching my daughter lose 30kg, I was very keen to try the program. I started the VLC Diet Supreme Soft-Gel Capsules with the 500 calorie diet on the 5th of February weighting in at 104kg. I stuck to the diet extremely well, through the hard times when I didn’t lose anything for a day or two, to the good times when the weight was falling off. I couldn’t believe how easy it was and that I wasn’t hungry at all. I now also have a metabolism, which never really worked in the past. Since being on the diet my body has changed a lot and I have now lost 27kg in 10 weeks. Wow what a difference, this is only the beginning.
I started at 85kg and ended at 63kg for a total of 22kg lost!! The Program was amazing! It took some effort at the start as it’s so different from all other weight loss methods. But as long as you follow the instructions and trust the process, you quickly get into the groove and it becomes so EASY!!! I used the vlcd capsules; 3 rounds of 40 days on protocol.
Have struggled with my weight since being in a couple of car accidents,and have tried a few different diets and didnt find any that suited my body or lifestyle until discovering the this diet, although I prefer to call it a eating program.
My starting weight was 98 kgs and I’m now down to 87 with another week to go on round two of the VLC Diet . Then I start my “Food Holiday” which is what I have named the maintanence phase after the retrictive part of the program.
The things I love most about this eating program is the discipline and structure.. It works for me, I spend time each week preparing my meals ahead so each day I’m organised and its so easy.. When I buy my meat, I spend time cutting and weighing into portions and freezing for later use.
While I’m doing this for my own benefit, its lovely when people notice the difference in me, and complement me on my achievements and newer look.. I still have a way to go before I reach my ultimate goal weight but doing it in stages makes it doable..
Update – Deb has now lost a total of 23kg*! Go Deb!!
Size 16 and on my way to a size 14!!
This took 22 days. I’m doing the maintenance right now – then start Kettlebell exercise program. I’m very happy about this product and would recommend to anyone interested. Thank you so much for everything.
|
import ast
import json
import os
import re
import time
import urllib2
import environment
import framework
import utils
def cases_iterator(cases):
for case in cases:
if isinstance(case, MultiCase):
for c in case:
yield c
else:
yield case
class Log(object):
def __init__(self, line):
self.line = line
try:
(self.method,
self.remote_address,
self.username,
self.start_time,
self.end_time,
self.total_time,
self.plan_type,
self.original_sql,
self.bind_variables,
self.number_of_queries,
self.rewritten_sql,
self.query_sources,
self.mysql_response_time,
self.waiting_for_connection_time,
self.rowcount,
self.size_of_response,
self.cache_hits,
self.cache_misses,
self.cache_absent,
self.cache_invalidations,
self.error) = line.strip().split('\t')
except ValueError:
print "Wrong looking line: %r" % line
raise
def check(self, case):
if isinstance(case, basestring):
return []
if isinstance(case, MultiCase):
return sum((self.check(subcase) for subcase in case.sqls_and_cases), [])
failures = []
for method in dir(self):
if method.startswith('check_'):
if not case.is_testing_cache and method.startswith('check_cache_'):
continue
fail = getattr(self, method)(case)
if fail:
failures.append(fail)
return failures
def fail(self, reason, should, is_):
return "FAIL: %s: %r != %r" % (reason, should, is_)
def check_original_sql(self, case):
# The following is necessary because Python and Go use different
# notations for bindings: %(foo)s vs :foo.
sql = re.sub(r'%\((\w+)\)s', r':\1', case.sql)
# Eval is a cheap hack - Go always uses doublequotes, Python
# prefers single quotes.
if sql != eval(self.original_sql):
return self.fail('wrong sql', case.sql, self.original_sql)
def check_rowcount(self, case):
if case.rowcount is not None and int(self.rowcount) != case.rowcount:
return self.fail("Bad rowcount", case.rowcount, self.rowcount)
def check_cache_hits(self, case):
if case.cache_hits is not None and int(self.cache_hits) != case.cache_hits:
return self.fail("Bad Cache Hits", case.cache_hits, self.cache_hits)
def check_cache_absent(self, case):
if case.cache_absent is not None and int(self.cache_absent) != case.cache_absent:
return self.fail("Bad Cache Absent", case.cache_absent, self.cache_absent)
def check_cache_misses(self, case):
if case.cache_misses is not None and int(self.cache_misses) != case.cache_misses:
return self.fail("Bad Cache Misses", case.cache_misses, self.cache_misses)
def check_cache_invalidations(self, case):
if case.cache_invalidations is not None and int(self.cache_invalidations) != case.cache_invalidations:
return self.fail("Bad Cache Invalidations", case.cache_invalidations, self.cache_invalidations)
def check_query_plan(self, case):
if case.query_plan is not None and case.query_plan != self.plan_type:
return self.fail("Bad query plan", case.query_plan, self.plan_type)
def check_rewritten_sql(self, case):
if case.rewritten is None:
return
queries = []
for q in ast.literal_eval(self.rewritten_sql).split(';'):
q = q.strip()
if q and q != '*/':
queries.append(q)
if case.rewritten != queries:
return self.fail("Bad rewritten SQL", case.rewritten, queries)
def check_number_of_queries(self, case):
if case.rewritten is not None and int(self.number_of_queries) != len(case.rewritten):
return self.fail("wrong number of queries", len(case.rewritten), int(self.number_of_queries))
class Case(object):
def __init__(self, sql, bindings=None, result=None, rewritten=None, doc='',
rowcount=None, cache_table=None, query_plan=None, cache_hits=None,
cache_misses=None, cache_absent=None, cache_invalidations=None,
remote_address="[::1]"):
# For all cache_* parameters, a number n means "check this value
# is exactly n," while None means "I am not interested in this
# value, leave it alone."
self.sql = sql
self.bindings = bindings or {}
self.result = result
if isinstance(rewritten, basestring):
rewritten = [rewritten]
self.rewritten = rewritten
self.rowcount = rowcount
self.doc = doc
self.query_plan = query_plan
self.cache_table = cache_table
self.cache_hits= cache_hits
self.cache_misses = cache_misses
self.cache_absent = cache_absent
self.cache_invalidations = cache_invalidations
self.remote_address = remote_address
@property
def is_testing_cache(self):
return any(attr is not None for attr in [self.cache_hits,
self.cache_misses,
self.cache_absent,
self.cache_invalidations])
def run(self, cursor, env):
failures = []
env.querylog.reset()
if self.is_testing_cache:
tstart = self.table_stats(env)
if self.sql in ('begin', 'commit', 'rollback'):
getattr(cursor.connection, self.sql)()
else:
cursor.execute(self.sql, self.bindings)
if self.result is not None:
result = list(cursor)
if self.result != result:
failures.append("%r:\n%s !=\n%s" % (self.sql, self.result, result))
for i in range(30):
lines = env.querylog.tailer.readLines()
if not lines:
time.sleep(0.1)
continue
break
for line in lines:
case_failures = Log(line).check(self)
if case_failures:
failures.extend(case_failures)
if self.is_testing_cache:
tdelta = self.table_stats_delta(tstart, env)
if self.cache_hits is not None and tdelta['Hits'] != self.cache_hits:
failures.append("Bad Cache Hits: %s != %s" % (self.cache_hits, tdelta['Hits']))
if self.cache_absent is not None and tdelta['Absent'] != self.cache_absent:
failures.append("Bad Cache Absent: %s != %s" % (self.cache_absent, tdelta['Absent']))
if self.cache_misses is not None and tdelta['Misses'] != self.cache_misses:
failures.append("Bad Cache Misses: %s != %s" % (self.cache_misses, tdelta['Misses']))
if self.cache_invalidations is not None and tdelta['Invalidations'] != self.cache_invalidations:
failures.append("Bad Cache Invalidations: %s != %s" % (self.cache_invalidations, tdelta['Invalidations']))
return failures
def table_stats_delta(self, old, env):
result = {}
new = self.table_stats(env)
for k, v in new.items():
result[k] = new[k] - old[k]
return result
def table_stats(self, env):
return env.http_get('/debug/table_stats')[self.cache_table]
def __str__(self):
return "Case %r" % self.doc
class MultiCase(object):
def __init__(self, doc, sqls_and_cases):
self.doc = doc
self.sqls_and_cases = sqls_and_cases
def run(self, cursor, env):
failures = []
for case in self.sqls_and_cases:
if isinstance(case, basestring):
if case in ('begin', 'commit', 'rollback'):
getattr(cursor.connection, case)()
else:
cursor.execute(case)
continue
failures += case.run(cursor, env)
return failures
def __iter__(self):
return iter(self.sqls_and_cases)
def __str__(self):
return "MultiCase: %s" % self.doc
|
How to Flourish your Business – Is Communication Essential?
Thinking of opening up a business, company or a corporation? Then you need to learn some basics of it! Do not think that money is the rudimentary and the basic element necessary for that. Well, it sure is but not absolutely. We have to make sure that we are covering the entirety of the subject and taking care of different aspects while initiating a business, which is quite important. If balance is not maintained properly, implicating, if money is there, but other indispensable elements and facts are ignored, your business will be there in the market but probably, unproductive or unsuccessful. Therefore, making a good balance and check on the capital, human labor, machinery, devices, clients and technology within the business setup is the initiating of a well-planned organization. In addition to that, when all the mentioned elements are taken care of, make sure your organization is effective and efficient in communication. It is imperative to draft out a communication plan to highlight various strategies. A company’s philosophy is based on the fact on how communication is guaranteed within. Hence, effective communication is defined as understanding and knowing your personnel and the right kind of messages.
It is essential to develop a certain hierarchy within the office scenario, this will help you to generate operative and effective communication which is transferred within the entire office. If you are aware of the roles of your employees, a certain kind of communication might be allotted to them within the hierarchical structure, this will allow communication to take place in a more professional way.
The productivity and output of a certain communication channel will definitely have an effect on the quality of communication.
Is only possible when the right kind of devices and apparatus are being used in the process, with the help of which communication is fast as well as credible. These used means of communication should be reliable and prompt enough. Also, it should be made sure that vital drafts and information related to the business should be jotted down, whereas means of communication such as the Emails should be regularly checked and corrected if there is an error.
In order to initiate a process or a procedure, make sure you are well prepared for it, mentally and physically. Physically, you can start working on a plan when you have the guiding principles for it. Hence, for an effective interaction, it is essential that different communication strategies are drafted out. This allows the main and essential key points to be written down due to which, the employees are cognizant of the guidelines and systems to follow for a brilliant communication.
Lastly, the employees within the organization should be dealt with the essence of encouragement and motivation. They should be allowed to analyze the different stances of the business organization, this will allow them to be cleared within their mind which can eventually help in a better communication. A confused and a perplexed individual will not be of much benefit, as compared to the one who is well aware of the guidelines of the organization.
Jose Miller is an established academic writer with ten years of professional teaching experience and more than fifteen years of academic writing experience. During her tenure as an academic writer, she has written on a range of subjects. Currently, she is working as an academic content writer and content manager at ordercollegepapers.com.
|
#!/usr/bin/env python2.7
# Copyright (C) 2017, Weizhi Song, Torsten Thomas.
# songwz03@gmail.com
# t.thomas@unsw.edu.au
# Binning_refiner is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Binning_refiner is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# metaWRAP author notes:
# I thank the original creator of this script! This is a great idea! To make
# this script more usable as part of the metaWRAP binning pipeline, I
# removed unnecessary visual aaspects of the original Bin_refiner script
# and made it python2 compatible.
# Check out the original program: https://github.com/songweizhi/Binning_refiner
# And the publication: https://www.ncbi.nlm.nih.gov/pubmed/28186226
import os
import glob
import shutil
import argparse
from time import sleep
from sys import stdout
from Bio import SeqIO
##################################################### CONFIGURATION ####################################################
parser = argparse.ArgumentParser()
parser.add_argument('-1',
required=True,
help='first bin folder name')
parser.add_argument('-2',
required=True,
help='second bin folder name')
parser.add_argument('-3',
required=False,
help='third bin folder name')
parser.add_argument('-o',
required=True,
help='output folder name')
parser.add_argument('-ms',
required=False,
default=524288,
type=int,
help='(optional) minimum size for refined bins, default = 524288 (0.5Mbp)')
args = vars(parser.parse_args())
output_dir = args['o']
if output_dir[-1]=='/':
output_dir=output_dir[:-1]
input_bin_folder_1 = args['1']
if input_bin_folder_1[-1] == '/':
input_bin_folder_1 = input_bin_folder_1[:-1]
input_bin_folder_2 = args['2']
if input_bin_folder_2[-1] == '/':
input_bin_folder_2 = input_bin_folder_2[:-1]
if args['3'] != None:
input_bin_folder_3 = args['3']
if input_bin_folder_3[-1] == '/':
input_bin_folder_3 = input_bin_folder_3[:-1]
bin_size_cutoff = args['ms']
bin_size_cutoff_MB = float("{0:.2f}".format(bin_size_cutoff / (1024 * 1024)))
# get input bin folder list
input_bin_folder_list = []
if args['3'] == None:
print('Specified 2 input bin sets: -1 %s -2 %s' % (input_bin_folder_1, input_bin_folder_2))
input_bin_folder_list = [input_bin_folder_1, input_bin_folder_2]
else:
print('Specified 3 input bin sets: -1 %s -2 %s -3 %s' % (input_bin_folder_1, input_bin_folder_2, input_bin_folder_3))
input_bin_folder_list = [input_bin_folder_1, input_bin_folder_2, input_bin_folder_3]
################################################ Define folder/file name ###############################################
wd = os.getcwd()
output_folder = output_dir
pwd_output_folder = '%s/%s' % (wd, output_folder)
########################################################################################################################
# get bin name list
bin_folder_1_bins_files = '%s/%s/*.fa*' % (wd, input_bin_folder_1)
bin_folder_2_bins_files = '%s/%s/*.fa*' % (wd, input_bin_folder_2)
# check input files
folder_bins_dict = {}
all_input_bins_list = []
all_input_bins_number_list = []
for bin_folder in input_bin_folder_list:
bins_files = '%s/%s/*.fa*' % (wd, bin_folder)
bin_folder_bins = [os.path.basename(file_name) for file_name in glob.glob(bins_files)]
all_input_bins_list.append(bin_folder_bins)
all_input_bins_number_list.append(len(bin_folder_bins))
folder_bins_dict[bin_folder] = bin_folder_bins
if len(bin_folder_bins) == 0:
print('No input bin detected from %s folder, please double-check!' % (bin_folder))
exit()
bin_folder_bins_ext_list = []
for bin in bin_folder_bins:
bin_file_name, bin_file_ext = os.path.splitext(bin)
bin_folder_bins_ext_list.append(bin_file_ext)
bin_folder_bins_ext_list_uniq = []
for each in bin_folder_bins_ext_list:
if each not in bin_folder_bins_ext_list_uniq:
bin_folder_bins_ext_list_uniq.append(each)
else:
pass
# check whether bins in the same folder have same extension, exit if not
if len(bin_folder_bins_ext_list_uniq) > 1:
print('Different file extensions were found from %s bins, please use same extension (fa, fas or fasta) '
'for all bins in the same folder.' % (bin_folder))
exit()
else:
pass
# create output folder
if os.path.isdir(output_folder):
shutil.rmtree(output_folder)
os.mkdir(output_folder)
else:
os.mkdir(output_folder)
# create folder to hold bins with renamed contig name
combined_all_bins_file = '%s/%s/combined_all_bins.fasta' % (wd, output_folder)
separator = '__'
for each_folder in input_bin_folder_list:
sleep(1)
print('Add folder/bin name to contig name for %s bins' % each_folder)
os.mkdir('%s/%s/%s_new' % (wd, output_folder, each_folder))
# add binning program and bin id to metabat_bin's contig name
each_folder_bins = folder_bins_dict[each_folder]
for each_bin in each_folder_bins:
bin_file_name, bin_file_ext = os.path.splitext(each_bin)
each_bin_content = SeqIO.parse('%s/%s/%s' % (wd, each_folder, each_bin), 'fasta')
new = open('%s/%s/%s_new/%s_%s.fasta' % (wd, output_folder, each_folder, each_folder, bin_file_name), 'w')
for each_contig in each_bin_content:
each_contig_new_id = '%s%s%s%s%s' % (each_folder, separator, bin_file_name, separator, each_contig.id)
each_contig.id = each_contig_new_id
each_contig.description = ''
SeqIO.write(each_contig, new, 'fasta')
new.close()
# Combine all new bins
os.system('cat %s/%s/%s_new/*.fasta > %s/%s/combined_%s_bins.fa' % (wd, output_folder, each_folder, wd, output_folder, each_folder))
os.system('rm -r %s/%s/%s_new' % (wd, output_folder, each_folder))
# combine all modified bins together
sleep(1)
print('Combine all bins together')
if len(input_bin_folder_list) == 2:
pwd_combined_folder_1_bins = '%s/%s/combined_%s_bins.fa' % (wd, output_folder, input_bin_folder_1)
pwd_combined_folder_2_bins = '%s/%s/combined_%s_bins.fa' % (wd, output_folder, input_bin_folder_2)
os.system('cat %s %s > %s' % (pwd_combined_folder_1_bins, pwd_combined_folder_2_bins, combined_all_bins_file))
if len(input_bin_folder_list) == 3:
pwd_combined_folder_1_bins = '%s/%s/combined_%s_bins.fa' % (wd, output_folder, input_bin_folder_1)
pwd_combined_folder_2_bins = '%s/%s/combined_%s_bins.fa' % (wd, output_folder, input_bin_folder_2)
pwd_combined_folder_3_bins = '%s/%s/combined_%s_bins.fa' % (wd, output_folder, input_bin_folder_3)
os.system('cat %s %s %s > %s' % (pwd_combined_folder_1_bins, pwd_combined_folder_2_bins, pwd_combined_folder_3_bins, combined_all_bins_file))
combined_all_bins = SeqIO.parse(combined_all_bins_file, 'fasta')
contig_bin_dict = {}
contig_length_dict = {}
for each in combined_all_bins:
each_id_split = each.id.split(separator)
folder_name = each_id_split[0]
bin_name = each_id_split[1]
contig_id = each_id_split[2]
length = len(each.seq)
if contig_id not in contig_bin_dict:
contig_bin_dict[contig_id] = ['%s%s%s' % (folder_name, separator, bin_name)]
contig_length_dict[contig_id] = length
elif contig_id in contig_bin_dict:
contig_bin_dict[contig_id].append('%s%s%s' % (folder_name, separator, bin_name))
contig_assignments_file = '%s/%s/contig_assignments.txt' % (wd, output_folder)
contig_assignments = open(contig_assignments_file, 'w')
for each in contig_bin_dict:
if len(contig_bin_dict[each]) == len(input_bin_folder_list):
contig_assignments.write('%s\t%s\t%s\n' % ('\t'.join(contig_bin_dict[each]), each, contig_length_dict[each]))
contig_assignments.close()
contig_assignments_file_sorted = '%s/%s/contig_assignments_sorted.txt' % (wd, output_folder)
contig_assignments_file_sorted_one_line = '%s/%s/contig_assignments_sorted_one_line.txt' % (wd, output_folder)
os.system('cat %s | sort > %s' % (contig_assignments_file, contig_assignments_file_sorted))
contig_assignments_sorted = open(contig_assignments_file_sorted)
contig_assignments_sorted_one_line = open(contig_assignments_file_sorted_one_line, 'w')
current_match = ''
current_match_contigs = []
current_length_total = 0
n = 1
for each in contig_assignments_sorted:
each_split = each.strip().split('\t')
current_contig = each_split[-2]
current_length = int(each_split[-1])
matched_bins = '\t'.join(each_split[:-2])
if current_match == '':
current_match = matched_bins
current_match_contigs.append(current_contig)
current_length_total += current_length
elif current_match == matched_bins:
current_match_contigs.append(current_contig)
current_length_total += current_length
elif current_match != matched_bins:
refined_bin_name = 'refined_bin%s' % n
if current_length_total >= bin_size_cutoff:
contig_assignments_sorted_one_line.write('Refined_%s\t%s\t%sbp\t%s\n' % (n, current_match, current_length_total,'\t'.join(current_match_contigs)))
n += 1
current_match = matched_bins
current_match_contigs = []
current_match_contigs.append(current_contig)
current_length_total = 0
current_length_total += current_length
if current_length_total >= bin_size_cutoff:
contig_assignments_sorted_one_line.write('Refined_%s\t%s\t%sbp\t%s\n' % (n, current_match, current_length_total,'\t'.join(current_match_contigs)))
else:
n -= 1
contig_assignments_sorted_one_line.close()
refined_bin_number = n
sleep(1)
print('The number of refined bins: %s' % refined_bin_number)
# Export refined bins and prepare input for GoogleVis
sleep(1)
print('Exporting refined bins...')
separated_1 = '%s/%s/Refined_bins_sources_and_length.txt' % (wd, output_folder)
separated_2 = '%s/%s/Refined_bins_contigs.txt' % (wd, output_folder)
googlevis_input_file = '%s/%s/GoogleVis_Sankey_%sMbp.csv' % (wd, output_folder, bin_size_cutoff_MB)
os.mkdir('%s/%s/Refined' % (wd, output_folder))
refined_bins = open(contig_assignments_file_sorted_one_line)
googlevis_input_handle = open(googlevis_input_file, 'w')
separated_1_handle = open(separated_1, 'w')
separated_2_handle = open(separated_2, 'w')
googlevis_input_handle.write('C1,C2,Length (Mbp)\n')
for each_refined_bin in refined_bins:
each_refined_bin_split = each_refined_bin.strip().split('\t')
each_refined_bin_name = each_refined_bin_split[0]
each_refined_bin_length = 0
each_refined_bin_contig = []
if len(input_bin_folder_list) == 2:
each_refined_bin_source = each_refined_bin_split[1:3]
each_refined_bin_length = int(each_refined_bin_split[3][:-2])
each_refined_bin_contig = each_refined_bin_split[4:]
separated_1_handle.write('%s\t%sbp\t%s\n' % (each_refined_bin_name, each_refined_bin_length, '\t'.join(each_refined_bin_source)))
separated_2_handle.write('%s\n%s\n' % (each_refined_bin_name, '\t'.join(each_refined_bin_contig)))
if len(input_bin_folder_list) == 3:
each_refined_bin_source = each_refined_bin_split[1:4]
each_refined_bin_length = int(each_refined_bin_split[4][:-2])
each_refined_bin_contig = each_refined_bin_split[5:]
separated_1_handle.write('%s\t%sbp\t%s\n' % (each_refined_bin_name, each_refined_bin_length, '\t'.join(each_refined_bin_source)))
separated_2_handle.write('%s\n%s\n' % (each_refined_bin_name, '\t'.join(each_refined_bin_contig)))
each_refined_bin_length_mbp = float("{0:.2f}".format(each_refined_bin_length / (1024 * 1024)))
m = 0
while m < len(each_refined_bin_source)-1:
googlevis_input_handle.write('%s,%s,%s\n' % (each_refined_bin_source[m], each_refined_bin_source[m+1], each_refined_bin_length_mbp))
m += 1
stdout.write('\rExtracting refined bin: %s.fasta' % each_refined_bin_name)
refined_bin_file = '%s/%s/Refined/%s.fasta' % (wd, output_folder, each_refined_bin_name)
refined_bin_handle = open(refined_bin_file, 'w')
input_contigs_file = '%s/%s/combined_%s_bins.fa' % (wd, output_folder, input_bin_folder_1)
input_contigs = SeqIO.parse(input_contigs_file, 'fasta')
for each_input_contig in input_contigs:
each_input_contig_id = each_input_contig.id.split(separator)[-1]
if each_input_contig_id in each_refined_bin_contig:
each_input_contig.id = each_input_contig_id
each_input_contig.description = ''
SeqIO.write(each_input_contig, refined_bin_handle, 'fasta')
refined_bin_handle.close()
googlevis_input_handle.close()
separated_1_handle.close()
separated_2_handle.close()
# remove temporary files
sleep(1)
print('\nDeleting temporary files')
os.system('rm %s' % contig_assignments_file)
os.system('rm %s' % (combined_all_bins_file))
os.system('rm %s/%s/*.fa' % (wd, output_folder))
os.system('rm %s' % (contig_assignments_file_sorted))
os.system('rm %s' % (contig_assignments_file_sorted_one_line))
sleep(1)
print('\nAll done!')
|
One night, Rosalinda is awakened by a noise in the garden. When she and her pet hen, Blanca, investigate, they see a man leaving with a large sack-full of fruit from Rosalinda's beloved lemon tree.
After consulting with family and neighbors about how to save her sick tree, Rosalinda sets out in search of La Anciana, the Old One, the only person who might have a solution to Rosalinda's predicament. When she finally meets La Anciana, the old woman offers an inventive way for Rosalinda to help her tree–and the Night Man who was driven to steal her lemons.
Set in the Mexican countryside, Under the Lemon Moon shines with the light of generosity and forgiveness. The gentle story and glowing illustrations make this tale perfect for sharing with young readers who are discovering how it feels to receive and give gifts.
Check out what close reading looks like for Under the Lemon Moon.
Discover more teaching ideas for Under the Lemon Moon from author Edith Hope Fine.
Check out Under the Lemon Moon in the Building Classroom Community in Second Grade unit and collection.
|
import unittest
from kcv_x1 import KCV
def pairs(text,dlm=' ',dlm2=':',cast=str):
for row in text.split(dlm):
cols = row.split(dlm2)
yield cols[0],cast(cols[1])
class test_memory_kcv(unittest.TestCase):
def setUp(self):
pass
#self.db = KCV()
#self.db.set_items('k1',pairs('a:3 b:1 c:2 d:0 e:5 f:4'))
### WRITE ###
def test_set(self):
db = KCV()
db.set('k1','c1',42)
db.set('k1','c2',4.2)
db.set('k1','c3','fourty two')
db.set('k1',1,'one')
db.set(2,'c2','two')
self.assertEqual(db.get('k1','c1'),42)
self.assertEqual(db.get('k1','c2'),4.2)
self.assertEqual(db.get('k1','c3'),'fourty two')
self.assertEqual(db.get('k1','c4'),None)
self.assertEqual(db.get('k1',1),'one')
self.assertEqual(db.get(2,'c2'),'two')
def test_set_items(self):
db = KCV()
db.set_items('k1',pairs('a:aa b:bb c:cc'))
self.assertEqual(db.get('k1','a'),'aa')
self.assertEqual(db.get('k1','b'),'bb')
self.assertEqual(db.get('k1','c'),'cc')
def test_incr(self):
db = KCV()
db.incr('k1','c1',2)
self.assertEqual(db.get('k1','c1'),2,'incorrect INCR of new key new col')
db.incr('k1','c1',3)
self.assertEqual(db.get('k1','c1'),5,'incorrect INCR of existing key existing col')
db.incr('k1','c1',-1)
self.assertEqual(db.get('k1','c1'),4,'incorrect INCR with negative value')
def test_incr_items(self):
db = KCV()
db.incr_items('k1',pairs('a:11 b:22 c:33',cast=int))
self.assertEqual(db.get('k1','a'),11)
self.assertEqual(db.get('k1','b'),22)
self.assertEqual(db.get('k1','c'),33)
db.incr_items('k1',pairs('a:1 b:2 c:3',cast=int))
self.assertEqual(db.get('k1','a'),12)
self.assertEqual(db.get('k1','b'),24)
self.assertEqual(db.get('k1','c'),36)
def test_delete(self):
db = KCV()
db.set('k1','c1',123)
db.set('k1','c2',321)
self.assertEqual(db.get('k1','c1'),123)
self.assertEqual(db.get('k1','c2'),321)
db.delete('k1','c1')
self.assertEqual(db.get('k1','c1'),None)
self.assertEqual(db.get('k1','c2'),321)
db.delete('k1','c2')
self.assertEqual(db.get('k1','c2'),None)
def test_drop(self):
db = KCV()
db.set('k1','c1',1)
db.set('k2','c2',2)
db.set('k3','c3',3)
self.assertEqual(db.get('k1','c1'),1)
self.assertEqual(db.get('k2','c2'),2)
self.assertEqual(db.get('k3','c3'),3)
db.drop()
db.create()
self.assertEqual(db.get('k1','c1'),None)
self.assertEqual(db.get('k2','c2'),None)
self.assertEqual(db.get('k3','c3'),None)
### READ ###
def test_get(self):
db = KCV()
db.set('k1','c1',1)
db.set('k1','c2',2)
db.set('k2','c3',3)
self.assertEqual(db.get('k1','c1'),1)
self.assertEqual(db.get('k1','c2'),2)
self.assertEqual(db.get('k2','c3'),3)
self.assertEqual(db.get('k2','c4'),None)
self.assertEqual(db.get('k1','xxx',123),123)
self.assertEqual(db.get('xxx','zzz',123),123)
def test_items(self):
db = KCV()
db.set('k1','c1',1)
db.set('k1','c2',2)
db.set('k1','c3',3)
items = dict(db.items('k1'))
self.assertEqual(len(items),3)
self.assertEqual(items['c2'],2)
self.assertEqual(items['c3'],3)
self.assertEqual(items['c3'],3)
def test_scan_items(self):
db = KCV()
db.set('k11','c11',1)
db.set('k11','c12',2)
db.set('k12','c11',3)
db.set('k12','c12',4)
k_items = dict(db.scan_items('k1*','c11',cast=dict))
self.assertEqual(len(k_items),2)
self.assertEqual('k11' in k_items,True)
self.assertEqual('k12' in k_items,True)
self.assertEqual(len(k_items['k11']),1)
self.assertEqual(len(k_items['k12']),1)
self.assertEqual(k_items['k11']['c11'],1)
self.assertEqual(k_items['k12']['c11'],3)
def test_scan(self):
db = KCV()
db.set('k11','c11',1)
db.set('k11','c12',2)
db.set('k12','c11',3)
db.set('k12','c12',4)
kcv = list(db.scan(order='kaca'))
self.assertEqual(len(kcv),4)
self.assertEqual(kcv[0],('k11','c11',1))
self.assertEqual(kcv[1],('k11','c12',2))
self.assertEqual(kcv[2],('k12','c11',3))
self.assertEqual(kcv[3],('k12','c12',4))
k = list(db.scan(mode='k',order='ka'))
self.assertEqual(len(k),2)
self.assertEqual(k[0],'k11')
self.assertEqual(k[1],'k12')
def test_scan_int(self):
db = KCV()
db.set(1,11,111)
db.set(1,12,123)
db.set(2,22,222)
db.set(2,11,234)
db.set(3,11,345)
kcv = list(db.scan(k=1,order='kaca'))
self.assertEqual(len(kcv),2)
self.assertEqual(kcv[0],(1,11,111))
self.assertEqual(kcv[1],(1,12,123))
kcv = list(db.scan(kin=[1,3],cin=[11,12],order='kaca'))
self.assertEqual(len(kcv),3)
self.assertEqual(kcv[0],(1,11,111))
self.assertEqual(kcv[1],(1,12,123))
self.assertEqual(kcv[2],(3,11,345))
def test_col_store(self):
db = KCV()
db.set_items('k1',pairs('a:aa b:bb c:cc'))
db.set_items('k2',pairs('d:dd e:ee f:ff'))
db.set_items('k3',pairs('g:gg h:hh i:ii'))
db.to_col_store('kcv_x1_test.db',batch=4)
self.assertEqual(db.get('k1','a'),'aa')
self.assertEqual(db.get('k2','e'),'ee')
self.assertEqual(db.get('k3','i'),'ii')
db.drop()
db.create()
self.assertEqual(db.items('k1'), {})
self.assertEqual(db.items('k2'), {})
self.assertEqual(db.items('k3'), {})
db.from_col_store('kcv_x1_test.db')
self.assertEqual(db.get('k1','a'),'aa')
self.assertEqual(db.get('k2','e'),'ee')
self.assertEqual(db.get('k3','i'),'ii')
def test_block(self):
with KCV('kcv_x1_test.db') as db:
db.set('k1','c1',42)
db2=KCV('kcv_x1_test.db')
self.assertEqual(db2.get('k1','c1'),42)
def test_compact(self):
import os
path = 'kcv_x1_test.db'
db=KCV(path)
for i in range(1000):
db.set(i,i,i)
db.sync()
size1 = os.stat(path)[6]
db.drop()
db.sync()
size2 = os.stat(path)[6]
db.sync(compact=True)
size3 = os.stat(path)[6]
self.assertTrue(size3 < size2 <= size1)
if __name__=="__main__":
unittest.main()
|
Time certainly flies when you’re having fun, and having fun is exactly what we’ve been doing while reviewing all of your artistic submissions since last month!
Yes, it was just one month ago that we bought the Players' Gallery back in full force, and we’ve been overwhelmed by the sheer volume and positivity of your response. So, as promised, we’re back this month with another selection of RuneScape-inspired masterpieces for you to enjoy.
There were a lot of worthy contenders, but, after a great deal of discussion, debate and Duel Arena fighting in the studio car park, we’ve selected the eight which made us smile the most. From malfunctioning crashed stars and worried wizards, to a pink mage and a dedicated butterfly catcher - we hope you like our selection too.
We also have the small matter of announcing this month's featured artist... Our most accomplished piece comes from Marikdebie and their submission, “Duel Arenaâ€. Two mighty warriors fighting it out with fire and anti-fire? It’s a sight to behold! Well done to our winner who's bagged a month of RuneScape membership absolutely free.
To check out our winner and the other brilliant entries this month, head on over to Player Gallery 9 this very moment. And, when you’ve done that, you can dive in and discuss our gallery feature, give us feedback and make suggestions to us, all on our official Gallery - Feedback & Ideas forum thread. Get stuck in!
Until next month, let’s see what other artistic wonders you can produce by sending us in your lovingly produced artwork. Just be sure to follow the guidelines on the main gallery page to make sure we can review your entries.
|
#!/usr/bin/env python2
import sys
sys.path.append('../../../src/')
import cgmap as cg
import mdtraj as md
import md_check as check
############################### config #####################################
input_traj = "dppc.trr"
input_top = "dppc.pdb"
input_maps = ["mapping_bead_1_dppc",
"mapping_bead_2_dppc",
"mapping_bead_3_dppc"]
output_traj = "dppc.trr"
output_top = "dppc.pdb"
reference_traj = "dppc.trr"
reference_top = "dppc.pdb"
output_dir ='./output/'
input_dir ='./input/'
reference_dir ='./reference/'
#collection of names of molecules.
lipid_types = ['DPPC']
############################### config proc ################################
fq_input_maps = [ input_dir + loc for loc in input_maps ]
#read maps for each bead from files.
#list of lists of strings.
mapping_atom_names_dppc = [ [ l.strip() for l in open(mp_file,'r').readlines()]
for mp_file in fq_input_maps ]
#index strings for which to atom query the trajectory when making beads.
#list of lists of strings.
name_lists = [ " or ".join( ["name %s"%mn for mn in mapping_names ])
for mapping_names in mapping_atom_names_dppc ]
#names of cg beads created.
label_lists = ['DPH','DPM','DPT']
############################### run ########################################
### pull in trajectories
trj = md.load(input_dir + input_traj,top=input_dir + input_top)
#the types of each molecule in the trajectory.
molecule_types = [lipid_types.index(r.name) for r in trj.top.residues]
#actual map command
cg_trj = cg.map_molecules( trj = trj,
selection_list = [ name_lists ],
bead_label_list = [ label_lists ],
molecule_types = molecule_types,
split_shared_atoms = False)
cg_trj.save(output_dir + output_traj)
cg_trj[0].save(output_dir + output_top)
############################### check results ###############################
# reloading results from disk.
cg_traj = cg_trj.load(output_dir + output_traj,top=output_dir + output_top)
ref_cg_traj = cg_trj.load(reference_dir + reference_traj,
top=reference_dir + reference_top)
result=check.md_content_equality(cg_traj,ref_cg_traj)
sys.exit(check.check_result_to_exitval(result))
|
What is better Gridle or Intervals? Getting the suitable Project Management Software product is as simple as evaluating the good and weaker functions and terms offered by Gridle and Intervals. Here you can also match their all round scores: 8.0 for Gridle vs. 8.3 for Intervals. Or you can look at their general user satisfaction rating, 100% for Gridle vs. 98% for Intervals.
We suggest that you put some effort and examine their specific functions and decide which one is the better alternative for your business. Moreover. keep in mind to factor in your company’s or industry’s special situation, for example, a multilingual software for a global team or a mobile platform to help you work on the go.
Users who are pressed for time or would like to get a Productivity Suite Software recommendation from our team may want to try out these top choices for the current year: Slack, Wrike, Asana.
Intervals is offered in 4 different plans, each catering to a specific storage size and number of active projects. All plans come with a 21-day trial period. Give the details a look, and select the best plan for your business.
Hyundai, Ghirardelli, and Warner Bros.
Take control of your workflow with Intervals, a web-based time, task and project management software.
In case you are still having doubts about which solution will be best for your company it might be a good idea to investigate each service’s social metrics. Such metrics are usually a way to see how popular each product is and how large is its online presence. For instance, in case of Facebook Gridle has 1564 likes on their official profile while Intervals profile is liked by 49383 people.
|
#! /usr/bin/env python
# coding=utf-8
# Import all model parameters and spatial datasets to MongoDB
# Author: Junzhi Liu
# Revised: Liang-Jun Zhu
#
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
from gridfs import *
from config import *
from find_sites import FindSites
from gen_subbasins import ImportSubbasinStatistics
from generate_stream_input import GenerateReachTable
from import_bmp_scenario import ImportBMPTables
from import_parameters import (ImportLookupTables, ImportModelConfiguration,
ImportParameters)
from weights_mongo import GenerateWeightDependentParameters, GenerateWeightInfo
def BuildMongoDB():
statusFile = WORKING_DIR + os.sep + FN_STATUS_MONGO
f = open(statusFile, 'w')
# build mongodb database
try:
conn = MongoClient(host=HOSTNAME, port=PORT)
except ConnectionFailure:
sys.stderr.write("Could not connect to MongoDB: %s" % ConnectionFailure.message)
sys.exit(1)
db = conn[SpatialDBName]
# import parameters information to MongoDB
ImportParameters(TXT_DB_DIR + os.sep + sqliteFile, db)
# import lookup tables from to MongoDB as GridFS. By LJ, 2016-6-13
ImportLookupTables(TXT_DB_DIR + os.sep + sqliteFile, db)
# import model configuration
ImportModelConfiguration(db)
f.write("10, Generating reach table...\n")
f.flush()
GenerateReachTable(WORKING_DIR, db, forCluster)
# prepare meteorology data
if not forCluster:
subbasinRaster = WORKING_DIR + os.sep + mask_to_ext # mask.tif
else:
subbasinRaster = WORKING_DIR + os.sep + subbasinOut # subbasin.tif
if stormMode:
meteoThiessenList = [PrecSitesThiessen]
meteoTypeList = [DataType_Precipitation]
else:
meteoThiessenList = [MeteorSitesThiessen, PrecSitesThiessen]
meteoTypeList = [DataType_Meteorology, DataType_Precipitation]
f.write("20, Finding nearby stations for each sub-basin...\n")
f.flush()
if not forCluster: # OMP version
basinFile = WORKING_DIR + os.sep + basinVec
nSubbasins = FindSites(db, ClimateDBName, basinFile, FLD_BASINID, meteoThiessenList, meteoTypeList, simuMode)
subbasinFile = WORKING_DIR + os.sep + DIR_NAME_SUBBSN + os.sep + subbasinVec # MPI version
nSubbasins = FindSites(db, ClimateDBName, subbasinFile, FLD_SUBBASINID, meteoThiessenList, meteoTypeList, simuMode)
print "Meteorology sites table generated done. Number of sub-basins:%d" % nSubbasins
if not forCluster: # changed by LJ, SubbasinID is 0 means the whole basin!
nSubbasins = 0
# import raster data to MongoDB
f.write("40, Importing raster to MongoDB...\n")
f.flush()
tifFolder = WORKING_DIR + os.sep + DIR_NAME_TIFFIMPORT
if not os.path.exists(tifFolder):
os.mkdir(tifFolder)
subbasinStartID = 1
if not forCluster:
subbasinStartID = 0
for i in range(subbasinStartID, nSubbasins + 1):
subdir = tifFolder + os.sep + str(i)
if not os.path.exists(subdir):
os.mkdir(subdir)
strCmd = '"%s/import_raster" %s %s %s %s %s %d %s' % (
CPP_PROGRAM_DIR, subbasinRaster, WORKING_DIR, SpatialDBName,
DB_TAB_SPATIAL.upper(), HOSTNAME, PORT, tifFolder)
print strCmd
RunExternalCmd(strCmd)
# os.system(strCmd)
print 'Generating weight data...'
f.write("70, Generating weight data for interpolation of meteorology data...\n")
f.flush()
for i in range(subbasinStartID, nSubbasins + 1):
GenerateWeightInfo(conn, SpatialDBName, i, stormMode)
# added by Liangjun, 2016-6-17
GenerateWeightDependentParameters(conn, i)
if genIUH:
f.write("80, Generating IUH (Instantaneous Unit Hydrograph)...\n")
f.flush()
dt = 24
print 'Generating IUH (Instantaneous Unit Hydrograph)...'
strCmd = '"%s/iuh" %s %d %s %s %s %d' % (CPP_PROGRAM_DIR, HOSTNAME, PORT,
SpatialDBName, DB_TAB_SPATIAL.upper(), dt, nSubbasins)
print strCmd
# os.system(strCmd)
RunExternalCmd(strCmd)
f.write("90, Generating Grid layering...\n")
f.flush()
layeringDir = WORKING_DIR + os.sep + DIR_NAME_LAYERINFO
if not os.path.exists(layeringDir):
os.mkdir(layeringDir)
print 'Generating Grid layering...'
strCmd = '"%s/grid_layering" %s %d %s %s %s %d' % (
CPP_PROGRAM_DIR, HOSTNAME, PORT, layeringDir, SpatialDBName, DB_TAB_SPATIAL.upper(), nSubbasins)
print strCmd
# os.system(strCmd)
RunExternalCmd(strCmd)
# Test if the grid layering data is imported successfully. Added by LJ, 2016-11-3
gridLayeringFiles = ['%d_FLOWOUT_INDEX_D8' % nSubbasins, '%d_FLOWIN_INDEX_D8' % nSubbasins]
spatial = GridFS(db, DB_TAB_SPATIAL.upper())
needReRun = False
while not needReRun:
needReRun = True
for gridlyr in gridLayeringFiles:
if not spatial.exists(filename=gridlyr):
needReRun = False
print "%s is not imported successfully, grid_layering will be rerun!" % gridlyr
RunExternalCmd(strCmd)
break
# Import BMP scenario database to MongoDB
ImportBMPTables()
ImportLookupTables(TXT_DB_DIR + os.sep + sqliteFile, db)
ImportModelConfiguration(db)
ImportSubbasinStatistics()
f.write("100,Finished!")
f.close()
print 'Build DB: %s finished!' % SpatialDBName
# test code
if __name__ == "__main__":
LoadConfiguration(GetINIfile())
BuildMongoDB()
|
Instead of exploring the benefits of flexible web design, we rely on a little white lie: “minimum screen resolution.” These three words contain a powerful magic, under the cover of which we churn out fixed-width layout after fixed-width layout, perhaps revisiting a design every few years to “bump up” the width once it’s judged safe enough to do so. “Minimum screen resolution” lets us design for a contrived subset of users who see our design as god and Photoshop intended.
Five years after Marcotte’s article, any so-called web designer proposing to work with a fixed “minimum screen resolution” would … and should … be fired.
Except maybe in government and big industry.
This entry was posted in Design, Mobile and tagged design, web. Bookmark the permalink.
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Codec(msrest.serialization.Model):
"""Describes the basic properties of all codecs.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Audio, CopyAudio, CopyVideo, Video.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.Audio': 'Audio', '#Microsoft.Media.CopyAudio': 'CopyAudio', '#Microsoft.Media.CopyVideo': 'CopyVideo', '#Microsoft.Media.Video': 'Video'}
}
def __init__(
self,
**kwargs
):
super(Codec, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.label = kwargs.get('label', None)
class Audio(Codec):
"""Defines the common properties for all audio codecs.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AacAudio.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
:param channels: The number of channels in the audio.
:type channels: int
:param sampling_rate: The sampling rate to use for encoding in hertz.
:type sampling_rate: int
:param bitrate: The bitrate, in bits per second, of the output encoded audio.
:type bitrate: int
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'channels': {'key': 'channels', 'type': 'int'},
'sampling_rate': {'key': 'samplingRate', 'type': 'int'},
'bitrate': {'key': 'bitrate', 'type': 'int'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.AacAudio': 'AacAudio'}
}
def __init__(
self,
**kwargs
):
super(Audio, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.Audio' # type: str
self.channels = kwargs.get('channels', None)
self.sampling_rate = kwargs.get('sampling_rate', None)
self.bitrate = kwargs.get('bitrate', None)
class AacAudio(Audio):
"""Describes Advanced Audio Codec (AAC) audio encoding settings.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
:param channels: The number of channels in the audio.
:type channels: int
:param sampling_rate: The sampling rate to use for encoding in hertz.
:type sampling_rate: int
:param bitrate: The bitrate, in bits per second, of the output encoded audio.
:type bitrate: int
:param profile: The encoding profile to be used when encoding audio with AAC. Possible values
include: "AacLc", "HeAacV1", "HeAacV2".
:type profile: str or ~azure.mgmt.media.models.AacAudioProfile
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'channels': {'key': 'channels', 'type': 'int'},
'sampling_rate': {'key': 'samplingRate', 'type': 'int'},
'bitrate': {'key': 'bitrate', 'type': 'int'},
'profile': {'key': 'profile', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AacAudio, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.AacAudio' # type: str
self.profile = kwargs.get('profile', None)
class ClipTime(msrest.serialization.Model):
"""Base class for specifying a clip time. Use sub classes of this class to specify the time position in the media.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AbsoluteClipTime, UtcClipTime.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.AbsoluteClipTime': 'AbsoluteClipTime', '#Microsoft.Media.UtcClipTime': 'UtcClipTime'}
}
def __init__(
self,
**kwargs
):
super(ClipTime, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class AbsoluteClipTime(ClipTime):
"""Specifies the clip time as an absolute time position in the media file. The absolute time can point to a different position depending on whether the media file starts from a timestamp of zero or not.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param time: Required. The time position on the timeline of the input media. It is usually
specified as an ISO8601 period. e.g PT30S for 30 seconds.
:type time: ~datetime.timedelta
"""
_validation = {
'odata_type': {'required': True},
'time': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'time': {'key': 'time', 'type': 'duration'},
}
def __init__(
self,
**kwargs
):
super(AbsoluteClipTime, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.AbsoluteClipTime' # type: str
self.time = kwargs['time']
class AccessControl(msrest.serialization.Model):
"""AccessControl.
:param default_action: The behavior for IP access control in Key Delivery. Possible values
include: "Allow", "Deny".
:type default_action: str or ~azure.mgmt.media.models.DefaultAction
:param ip_allow_list: The IP allow list for access control in Key Delivery. If the default
action is set to 'Allow', the IP allow list must be empty.
:type ip_allow_list: list[str]
"""
_attribute_map = {
'default_action': {'key': 'defaultAction', 'type': 'str'},
'ip_allow_list': {'key': 'ipAllowList', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AccessControl, self).__init__(**kwargs)
self.default_action = kwargs.get('default_action', None)
self.ip_allow_list = kwargs.get('ip_allow_list', None)
class AccountEncryption(msrest.serialization.Model):
"""AccountEncryption.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of key used to encrypt the Account Key. Possible values
include: "SystemKey", "CustomerKey".
:type type: str or ~azure.mgmt.media.models.AccountEncryptionKeyType
:param key_vault_properties: The properties of the key used to encrypt the account.
:type key_vault_properties: ~azure.mgmt.media.models.KeyVaultProperties
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'},
}
def __init__(
self,
**kwargs
):
super(AccountEncryption, self).__init__(**kwargs)
self.type = kwargs['type']
self.key_vault_properties = kwargs.get('key_vault_properties', None)
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
class AccountFilter(ProxyResource):
"""An Account Filter.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:param presentation_time_range: The presentation time range.
:type presentation_time_range: ~azure.mgmt.media.models.PresentationTimeRange
:param first_quality: The first quality.
:type first_quality: ~azure.mgmt.media.models.FirstQuality
:param tracks: The tracks selection conditions.
:type tracks: list[~azure.mgmt.media.models.FilterTrackSelection]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'presentation_time_range': {'key': 'properties.presentationTimeRange', 'type': 'PresentationTimeRange'},
'first_quality': {'key': 'properties.firstQuality', 'type': 'FirstQuality'},
'tracks': {'key': 'properties.tracks', 'type': '[FilterTrackSelection]'},
}
def __init__(
self,
**kwargs
):
super(AccountFilter, self).__init__(**kwargs)
self.system_data = None
self.presentation_time_range = kwargs.get('presentation_time_range', None)
self.first_quality = kwargs.get('first_quality', None)
self.tracks = kwargs.get('tracks', None)
class AccountFilterCollection(msrest.serialization.Model):
"""A collection of AccountFilter items.
:param value: A collection of AccountFilter items.
:type value: list[~azure.mgmt.media.models.AccountFilter]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[AccountFilter]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccountFilterCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class AkamaiAccessControl(msrest.serialization.Model):
"""Akamai access control.
:param akamai_signature_header_authentication_key_list: authentication key list.
:type akamai_signature_header_authentication_key_list:
list[~azure.mgmt.media.models.AkamaiSignatureHeaderAuthenticationKey]
"""
_attribute_map = {
'akamai_signature_header_authentication_key_list': {'key': 'akamaiSignatureHeaderAuthenticationKeyList', 'type': '[AkamaiSignatureHeaderAuthenticationKey]'},
}
def __init__(
self,
**kwargs
):
super(AkamaiAccessControl, self).__init__(**kwargs)
self.akamai_signature_header_authentication_key_list = kwargs.get('akamai_signature_header_authentication_key_list', None)
class AkamaiSignatureHeaderAuthenticationKey(msrest.serialization.Model):
"""Akamai Signature Header authentication key.
:param identifier: identifier of the key.
:type identifier: str
:param base64_key: authentication key.
:type base64_key: str
:param expiration: The expiration time of the authentication key.
:type expiration: ~datetime.datetime
"""
_attribute_map = {
'identifier': {'key': 'identifier', 'type': 'str'},
'base64_key': {'key': 'base64Key', 'type': 'str'},
'expiration': {'key': 'expiration', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(AkamaiSignatureHeaderAuthenticationKey, self).__init__(**kwargs)
self.identifier = kwargs.get('identifier', None)
self.base64_key = kwargs.get('base64_key', None)
self.expiration = kwargs.get('expiration', None)
class ApiError(msrest.serialization.Model):
"""The API error.
:param error: The error properties.
:type error: ~azure.mgmt.media.models.ODataError
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ODataError'},
}
def __init__(
self,
**kwargs
):
super(ApiError, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class Asset(ProxyResource):
"""An Asset.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:ivar asset_id: The Asset ID.
:vartype asset_id: str
:ivar created: The creation date of the Asset.
:vartype created: ~datetime.datetime
:ivar last_modified: The last modified date of the Asset.
:vartype last_modified: ~datetime.datetime
:param alternate_id: The alternate ID of the Asset.
:type alternate_id: str
:param description: The Asset description.
:type description: str
:param container: The name of the asset blob container.
:type container: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:ivar storage_encryption_format: The Asset encryption format. One of None or
MediaStorageEncryption. Possible values include: "None", "MediaStorageClientEncryption".
:vartype storage_encryption_format: str or
~azure.mgmt.media.models.AssetStorageEncryptionFormat
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'asset_id': {'readonly': True},
'created': {'readonly': True},
'last_modified': {'readonly': True},
'storage_encryption_format': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'asset_id': {'key': 'properties.assetId', 'type': 'str'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'},
'alternate_id': {'key': 'properties.alternateId', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'container': {'key': 'properties.container', 'type': 'str'},
'storage_account_name': {'key': 'properties.storageAccountName', 'type': 'str'},
'storage_encryption_format': {'key': 'properties.storageEncryptionFormat', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Asset, self).__init__(**kwargs)
self.system_data = None
self.asset_id = None
self.created = None
self.last_modified = None
self.alternate_id = kwargs.get('alternate_id', None)
self.description = kwargs.get('description', None)
self.container = kwargs.get('container', None)
self.storage_account_name = kwargs.get('storage_account_name', None)
self.storage_encryption_format = None
class AssetCollection(msrest.serialization.Model):
"""A collection of Asset items.
:param value: A collection of Asset items.
:type value: list[~azure.mgmt.media.models.Asset]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Asset]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AssetCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class AssetContainerSas(msrest.serialization.Model):
"""The Asset Storage container SAS URLs.
:param asset_container_sas_urls: The list of Asset container SAS URLs.
:type asset_container_sas_urls: list[str]
"""
_attribute_map = {
'asset_container_sas_urls': {'key': 'assetContainerSasUrls', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AssetContainerSas, self).__init__(**kwargs)
self.asset_container_sas_urls = kwargs.get('asset_container_sas_urls', None)
class AssetFileEncryptionMetadata(msrest.serialization.Model):
"""The Asset File Storage encryption metadata.
All required parameters must be populated in order to send to Azure.
:param initialization_vector: The Asset File initialization vector.
:type initialization_vector: str
:param asset_file_name: The Asset File name.
:type asset_file_name: str
:param asset_file_id: Required. The Asset File Id.
:type asset_file_id: str
"""
_validation = {
'asset_file_id': {'required': True},
}
_attribute_map = {
'initialization_vector': {'key': 'initializationVector', 'type': 'str'},
'asset_file_name': {'key': 'assetFileName', 'type': 'str'},
'asset_file_id': {'key': 'assetFileId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AssetFileEncryptionMetadata, self).__init__(**kwargs)
self.initialization_vector = kwargs.get('initialization_vector', None)
self.asset_file_name = kwargs.get('asset_file_name', None)
self.asset_file_id = kwargs['asset_file_id']
class AssetFilter(ProxyResource):
"""An Asset Filter.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:param presentation_time_range: The presentation time range.
:type presentation_time_range: ~azure.mgmt.media.models.PresentationTimeRange
:param first_quality: The first quality.
:type first_quality: ~azure.mgmt.media.models.FirstQuality
:param tracks: The tracks selection conditions.
:type tracks: list[~azure.mgmt.media.models.FilterTrackSelection]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'presentation_time_range': {'key': 'properties.presentationTimeRange', 'type': 'PresentationTimeRange'},
'first_quality': {'key': 'properties.firstQuality', 'type': 'FirstQuality'},
'tracks': {'key': 'properties.tracks', 'type': '[FilterTrackSelection]'},
}
def __init__(
self,
**kwargs
):
super(AssetFilter, self).__init__(**kwargs)
self.system_data = None
self.presentation_time_range = kwargs.get('presentation_time_range', None)
self.first_quality = kwargs.get('first_quality', None)
self.tracks = kwargs.get('tracks', None)
class AssetFilterCollection(msrest.serialization.Model):
"""A collection of AssetFilter items.
:param value: A collection of AssetFilter items.
:type value: list[~azure.mgmt.media.models.AssetFilter]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[AssetFilter]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AssetFilterCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class AssetStreamingLocator(msrest.serialization.Model):
"""Properties of the Streaming Locator.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Streaming Locator name.
:vartype name: str
:ivar asset_name: Asset Name.
:vartype asset_name: str
:ivar created: The creation time of the Streaming Locator.
:vartype created: ~datetime.datetime
:ivar start_time: The start time of the Streaming Locator.
:vartype start_time: ~datetime.datetime
:ivar end_time: The end time of the Streaming Locator.
:vartype end_time: ~datetime.datetime
:ivar streaming_locator_id: StreamingLocatorId of the Streaming Locator.
:vartype streaming_locator_id: str
:ivar streaming_policy_name: Name of the Streaming Policy used by this Streaming Locator.
:vartype streaming_policy_name: str
:ivar default_content_key_policy_name: Name of the default ContentKeyPolicy used by this
Streaming Locator.
:vartype default_content_key_policy_name: str
"""
_validation = {
'name': {'readonly': True},
'asset_name': {'readonly': True},
'created': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'streaming_locator_id': {'readonly': True},
'streaming_policy_name': {'readonly': True},
'default_content_key_policy_name': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'asset_name': {'key': 'assetName', 'type': 'str'},
'created': {'key': 'created', 'type': 'iso-8601'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'streaming_locator_id': {'key': 'streamingLocatorId', 'type': 'str'},
'streaming_policy_name': {'key': 'streamingPolicyName', 'type': 'str'},
'default_content_key_policy_name': {'key': 'defaultContentKeyPolicyName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AssetStreamingLocator, self).__init__(**kwargs)
self.name = None
self.asset_name = None
self.created = None
self.start_time = None
self.end_time = None
self.streaming_locator_id = None
self.streaming_policy_name = None
self.default_content_key_policy_name = None
class Preset(msrest.serialization.Model):
"""Base type for all Presets, which define the recipe or instructions on how the input media files should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AudioAnalyzerPreset, BuiltInStandardEncoderPreset, FaceDetectorPreset, StandardEncoderPreset.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.AudioAnalyzerPreset': 'AudioAnalyzerPreset', '#Microsoft.Media.BuiltInStandardEncoderPreset': 'BuiltInStandardEncoderPreset', '#Microsoft.Media.FaceDetectorPreset': 'FaceDetectorPreset', '#Microsoft.Media.StandardEncoderPreset': 'StandardEncoderPreset'}
}
def __init__(
self,
**kwargs
):
super(Preset, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class AudioAnalyzerPreset(Preset):
"""The Audio Analyzer preset applies a pre-defined set of AI-based analysis operations, including speech transcription. Currently, the preset supports processing of content with a single audio track.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoAnalyzerPreset.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param audio_language: The language for the audio payload in the input using the BCP-47 format
of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is
recommended that you specify it. The language must be specified explicitly for
AudioAnalysisMode::Basic, since automatic language detection is not included in basic mode. If
the language isn't specified or set to null, automatic language detection will choose the first
language detected and process with the selected language for the duration of the file. It does
not currently support dynamically switching between languages after the first language is
detected. The automatic detection works best with audio recordings with clearly discernable
speech. If automatic detection fails to find the language, transcription would fallback to
'en-US'." The list of supported languages is available here:
https://go.microsoft.com/fwlink/?linkid=2109463.
:type audio_language: str
:param mode: Determines the set of audio analysis operations to be performed. If unspecified,
the Standard AudioAnalysisMode would be chosen. Possible values include: "Standard", "Basic".
:type mode: str or ~azure.mgmt.media.models.AudioAnalysisMode
:param experimental_options: Dictionary containing key value pairs for parameters not exposed
in the preset itself.
:type experimental_options: dict[str, str]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'audio_language': {'key': 'audioLanguage', 'type': 'str'},
'mode': {'key': 'mode', 'type': 'str'},
'experimental_options': {'key': 'experimentalOptions', 'type': '{str}'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.VideoAnalyzerPreset': 'VideoAnalyzerPreset'}
}
def __init__(
self,
**kwargs
):
super(AudioAnalyzerPreset, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.AudioAnalyzerPreset' # type: str
self.audio_language = kwargs.get('audio_language', None)
self.mode = kwargs.get('mode', None)
self.experimental_options = kwargs.get('experimental_options', None)
class Overlay(msrest.serialization.Model):
"""Base type for all overlays - image, audio or video.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AudioOverlay, VideoOverlay.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param input_label: Required. The label of the job input which is to be used as an overlay. The
Input must specify exactly one file. You can specify an image file in JPG, PNG, GIF or BMP
format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See
https://aka.ms/mesformats for the complete list of supported audio and video file formats.
:type input_label: str
:param start: The start position, with reference to the input video, at which the overlay
starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5
seconds into the input video. If not specified the overlay starts from the beginning of the
input video.
:type start: ~datetime.timedelta
:param end: The end position, with reference to the input video, at which the overlay ends. The
value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into
the input video. If not specified or the value is greater than the input video duration, the
overlay will be applied until the end of the input video if the overlay media duration is
greater than the input video duration, else the overlay will last as long as the overlay media
duration.
:type end: ~datetime.timedelta
:param fade_in_duration: The duration over which the overlay fades in onto the input video. The
value should be in ISO 8601 duration format. If not specified the default behavior is to have
no fade in (same as PT0S).
:type fade_in_duration: ~datetime.timedelta
:param fade_out_duration: The duration over which the overlay fades out of the input video. The
value should be in ISO 8601 duration format. If not specified the default behavior is to have
no fade out (same as PT0S).
:type fade_out_duration: ~datetime.timedelta
:param audio_gain_level: The gain level of audio in the overlay. The value should be in the
range [0, 1.0]. The default is 1.0.
:type audio_gain_level: float
"""
_validation = {
'odata_type': {'required': True},
'input_label': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'input_label': {'key': 'inputLabel', 'type': 'str'},
'start': {'key': 'start', 'type': 'duration'},
'end': {'key': 'end', 'type': 'duration'},
'fade_in_duration': {'key': 'fadeInDuration', 'type': 'duration'},
'fade_out_duration': {'key': 'fadeOutDuration', 'type': 'duration'},
'audio_gain_level': {'key': 'audioGainLevel', 'type': 'float'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.AudioOverlay': 'AudioOverlay', '#Microsoft.Media.VideoOverlay': 'VideoOverlay'}
}
def __init__(
self,
**kwargs
):
super(Overlay, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.input_label = kwargs['input_label']
self.start = kwargs.get('start', None)
self.end = kwargs.get('end', None)
self.fade_in_duration = kwargs.get('fade_in_duration', None)
self.fade_out_duration = kwargs.get('fade_out_duration', None)
self.audio_gain_level = kwargs.get('audio_gain_level', None)
class AudioOverlay(Overlay):
"""Describes the properties of an audio overlay.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param input_label: Required. The label of the job input which is to be used as an overlay. The
Input must specify exactly one file. You can specify an image file in JPG, PNG, GIF or BMP
format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See
https://aka.ms/mesformats for the complete list of supported audio and video file formats.
:type input_label: str
:param start: The start position, with reference to the input video, at which the overlay
starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5
seconds into the input video. If not specified the overlay starts from the beginning of the
input video.
:type start: ~datetime.timedelta
:param end: The end position, with reference to the input video, at which the overlay ends. The
value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into
the input video. If not specified or the value is greater than the input video duration, the
overlay will be applied until the end of the input video if the overlay media duration is
greater than the input video duration, else the overlay will last as long as the overlay media
duration.
:type end: ~datetime.timedelta
:param fade_in_duration: The duration over which the overlay fades in onto the input video. The
value should be in ISO 8601 duration format. If not specified the default behavior is to have
no fade in (same as PT0S).
:type fade_in_duration: ~datetime.timedelta
:param fade_out_duration: The duration over which the overlay fades out of the input video. The
value should be in ISO 8601 duration format. If not specified the default behavior is to have
no fade out (same as PT0S).
:type fade_out_duration: ~datetime.timedelta
:param audio_gain_level: The gain level of audio in the overlay. The value should be in the
range [0, 1.0]. The default is 1.0.
:type audio_gain_level: float
"""
_validation = {
'odata_type': {'required': True},
'input_label': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'input_label': {'key': 'inputLabel', 'type': 'str'},
'start': {'key': 'start', 'type': 'duration'},
'end': {'key': 'end', 'type': 'duration'},
'fade_in_duration': {'key': 'fadeInDuration', 'type': 'duration'},
'fade_out_duration': {'key': 'fadeOutDuration', 'type': 'duration'},
'audio_gain_level': {'key': 'audioGainLevel', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(AudioOverlay, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.AudioOverlay' # type: str
class TrackDescriptor(msrest.serialization.Model):
"""Base type for all TrackDescriptor types, which define the metadata and selection for tracks that should be processed by a Job.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AudioTrackDescriptor, VideoTrackDescriptor.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.AudioTrackDescriptor': 'AudioTrackDescriptor', '#Microsoft.Media.VideoTrackDescriptor': 'VideoTrackDescriptor'}
}
def __init__(
self,
**kwargs
):
super(TrackDescriptor, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class AudioTrackDescriptor(TrackDescriptor):
"""A TrackSelection to select audio tracks.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SelectAudioTrackByAttribute, SelectAudioTrackById.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param channel_mapping: Optional designation for single channel audio tracks. Can be used to
combine the tracks into stereo or multi-channel audio tracks. Possible values include:
"FrontLeft", "FrontRight", "Center", "LowFrequencyEffects", "BackLeft", "BackRight",
"StereoLeft", "StereoRight".
:type channel_mapping: str or ~azure.mgmt.media.models.ChannelMapping
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'channel_mapping': {'key': 'channelMapping', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.SelectAudioTrackByAttribute': 'SelectAudioTrackByAttribute', '#Microsoft.Media.SelectAudioTrackById': 'SelectAudioTrackById'}
}
def __init__(
self,
**kwargs
):
super(AudioTrackDescriptor, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.AudioTrackDescriptor' # type: str
self.channel_mapping = kwargs.get('channel_mapping', None)
class BuiltInStandardEncoderPreset(Preset):
"""Describes a built-in preset for encoding the input video with the Standard Encoder.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param preset_name: Required. The built-in preset to be used for encoding videos. Possible
values include: "H264SingleBitrateSD", "H264SingleBitrate720p", "H264SingleBitrate1080p",
"AdaptiveStreaming", "AACGoodQualityAudio", "ContentAwareEncodingExperimental",
"ContentAwareEncoding", "CopyAllBitrateNonInterleaved", "H264MultipleBitrate1080p",
"H264MultipleBitrate720p", "H264MultipleBitrateSD", "H265ContentAwareEncoding",
"H265AdaptiveStreaming", "H265SingleBitrate720p", "H265SingleBitrate1080p",
"H265SingleBitrate4K".
:type preset_name: str or ~azure.mgmt.media.models.EncoderNamedPreset
"""
_validation = {
'odata_type': {'required': True},
'preset_name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'preset_name': {'key': 'presetName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BuiltInStandardEncoderPreset, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.BuiltInStandardEncoderPreset' # type: str
self.preset_name = kwargs['preset_name']
class CbcsDrmConfiguration(msrest.serialization.Model):
"""Class to specify DRM configurations of CommonEncryptionCbcs scheme in Streaming Policy.
:param fair_play: FairPlay configurations.
:type fair_play: ~azure.mgmt.media.models.StreamingPolicyFairPlayConfiguration
:param play_ready: PlayReady configurations.
:type play_ready: ~azure.mgmt.media.models.StreamingPolicyPlayReadyConfiguration
:param widevine: Widevine configurations.
:type widevine: ~azure.mgmt.media.models.StreamingPolicyWidevineConfiguration
"""
_attribute_map = {
'fair_play': {'key': 'fairPlay', 'type': 'StreamingPolicyFairPlayConfiguration'},
'play_ready': {'key': 'playReady', 'type': 'StreamingPolicyPlayReadyConfiguration'},
'widevine': {'key': 'widevine', 'type': 'StreamingPolicyWidevineConfiguration'},
}
def __init__(
self,
**kwargs
):
super(CbcsDrmConfiguration, self).__init__(**kwargs)
self.fair_play = kwargs.get('fair_play', None)
self.play_ready = kwargs.get('play_ready', None)
self.widevine = kwargs.get('widevine', None)
class CencDrmConfiguration(msrest.serialization.Model):
"""Class to specify DRM configurations of CommonEncryptionCenc scheme in Streaming Policy.
:param play_ready: PlayReady configurations.
:type play_ready: ~azure.mgmt.media.models.StreamingPolicyPlayReadyConfiguration
:param widevine: Widevine configurations.
:type widevine: ~azure.mgmt.media.models.StreamingPolicyWidevineConfiguration
"""
_attribute_map = {
'play_ready': {'key': 'playReady', 'type': 'StreamingPolicyPlayReadyConfiguration'},
'widevine': {'key': 'widevine', 'type': 'StreamingPolicyWidevineConfiguration'},
}
def __init__(
self,
**kwargs
):
super(CencDrmConfiguration, self).__init__(**kwargs)
self.play_ready = kwargs.get('play_ready', None)
self.widevine = kwargs.get('widevine', None)
class CheckNameAvailabilityInput(msrest.serialization.Model):
"""The input to the check name availability request.
:param name: The account name.
:type name: str
:param type: The account type. For a Media Services account, this should be 'MediaServices'.
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityInput, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.type = kwargs.get('type', None)
class CommonEncryptionCbcs(msrest.serialization.Model):
"""Class for CommonEncryptionCbcs encryption scheme.
:param enabled_protocols: Representing supported protocols.
:type enabled_protocols: ~azure.mgmt.media.models.EnabledProtocols
:param clear_tracks: Representing which tracks should not be encrypted.
:type clear_tracks: list[~azure.mgmt.media.models.TrackSelection]
:param content_keys: Representing default content key for each encryption scheme and separate
content keys for specific tracks.
:type content_keys: ~azure.mgmt.media.models.StreamingPolicyContentKeys
:param drm: Configuration of DRMs for current encryption scheme.
:type drm: ~azure.mgmt.media.models.CbcsDrmConfiguration
"""
_attribute_map = {
'enabled_protocols': {'key': 'enabledProtocols', 'type': 'EnabledProtocols'},
'clear_tracks': {'key': 'clearTracks', 'type': '[TrackSelection]'},
'content_keys': {'key': 'contentKeys', 'type': 'StreamingPolicyContentKeys'},
'drm': {'key': 'drm', 'type': 'CbcsDrmConfiguration'},
}
def __init__(
self,
**kwargs
):
super(CommonEncryptionCbcs, self).__init__(**kwargs)
self.enabled_protocols = kwargs.get('enabled_protocols', None)
self.clear_tracks = kwargs.get('clear_tracks', None)
self.content_keys = kwargs.get('content_keys', None)
self.drm = kwargs.get('drm', None)
class CommonEncryptionCenc(msrest.serialization.Model):
"""Class for envelope encryption scheme.
:param enabled_protocols: Representing supported protocols.
:type enabled_protocols: ~azure.mgmt.media.models.EnabledProtocols
:param clear_tracks: Representing which tracks should not be encrypted.
:type clear_tracks: list[~azure.mgmt.media.models.TrackSelection]
:param content_keys: Representing default content key for each encryption scheme and separate
content keys for specific tracks.
:type content_keys: ~azure.mgmt.media.models.StreamingPolicyContentKeys
:param drm: Configuration of DRMs for CommonEncryptionCenc encryption scheme.
:type drm: ~azure.mgmt.media.models.CencDrmConfiguration
"""
_attribute_map = {
'enabled_protocols': {'key': 'enabledProtocols', 'type': 'EnabledProtocols'},
'clear_tracks': {'key': 'clearTracks', 'type': '[TrackSelection]'},
'content_keys': {'key': 'contentKeys', 'type': 'StreamingPolicyContentKeys'},
'drm': {'key': 'drm', 'type': 'CencDrmConfiguration'},
}
def __init__(
self,
**kwargs
):
super(CommonEncryptionCenc, self).__init__(**kwargs)
self.enabled_protocols = kwargs.get('enabled_protocols', None)
self.clear_tracks = kwargs.get('clear_tracks', None)
self.content_keys = kwargs.get('content_keys', None)
self.drm = kwargs.get('drm', None)
class ContentKeyPolicy(ProxyResource):
"""A Content Key Policy resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:ivar policy_id: The legacy Policy ID.
:vartype policy_id: str
:ivar created: The creation date of the Policy.
:vartype created: ~datetime.datetime
:ivar last_modified: The last modified date of the Policy.
:vartype last_modified: ~datetime.datetime
:param description: A description for the Policy.
:type description: str
:param options: The Key Policy options.
:type options: list[~azure.mgmt.media.models.ContentKeyPolicyOption]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'policy_id': {'readonly': True},
'created': {'readonly': True},
'last_modified': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'policy_id': {'key': 'properties.policyId', 'type': 'str'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'},
'description': {'key': 'properties.description', 'type': 'str'},
'options': {'key': 'properties.options', 'type': '[ContentKeyPolicyOption]'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicy, self).__init__(**kwargs)
self.system_data = None
self.policy_id = None
self.created = None
self.last_modified = None
self.description = kwargs.get('description', None)
self.options = kwargs.get('options', None)
class ContentKeyPolicyConfiguration(msrest.serialization.Model):
"""Base class for Content Key Policy configuration. A derived class must be used to create a configuration.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ContentKeyPolicyClearKeyConfiguration, ContentKeyPolicyFairPlayConfiguration, ContentKeyPolicyPlayReadyConfiguration, ContentKeyPolicyUnknownConfiguration, ContentKeyPolicyWidevineConfiguration.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.ContentKeyPolicyClearKeyConfiguration': 'ContentKeyPolicyClearKeyConfiguration', '#Microsoft.Media.ContentKeyPolicyFairPlayConfiguration': 'ContentKeyPolicyFairPlayConfiguration', '#Microsoft.Media.ContentKeyPolicyPlayReadyConfiguration': 'ContentKeyPolicyPlayReadyConfiguration', '#Microsoft.Media.ContentKeyPolicyUnknownConfiguration': 'ContentKeyPolicyUnknownConfiguration', '#Microsoft.Media.ContentKeyPolicyWidevineConfiguration': 'ContentKeyPolicyWidevineConfiguration'}
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyConfiguration, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class ContentKeyPolicyClearKeyConfiguration(ContentKeyPolicyConfiguration):
"""Represents a configuration for non-DRM keys.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyClearKeyConfiguration, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyClearKeyConfiguration' # type: str
class ContentKeyPolicyCollection(msrest.serialization.Model):
"""A collection of ContentKeyPolicy items.
:param value: A collection of ContentKeyPolicy items.
:type value: list[~azure.mgmt.media.models.ContentKeyPolicy]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ContentKeyPolicy]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class ContentKeyPolicyFairPlayConfiguration(ContentKeyPolicyConfiguration):
"""Specifies a configuration for FairPlay licenses.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param ask: Required. The key that must be used as FairPlay Application Secret key.
:type ask: bytearray
:param fair_play_pfx_password: Required. The password encrypting FairPlay certificate in PKCS
12 (pfx) format.
:type fair_play_pfx_password: str
:param fair_play_pfx: Required. The Base64 representation of FairPlay certificate in PKCS 12
(pfx) format (including private key).
:type fair_play_pfx: str
:param rental_and_lease_key_type: Required. The rental and lease key type. Possible values
include: "Unknown", "Undefined", "DualExpiry", "PersistentUnlimited", "PersistentLimited".
:type rental_and_lease_key_type: str or
~azure.mgmt.media.models.ContentKeyPolicyFairPlayRentalAndLeaseKeyType
:param rental_duration: Required. The rental duration. Must be greater than or equal to 0.
:type rental_duration: long
:param offline_rental_configuration: Offline rental policy.
:type offline_rental_configuration:
~azure.mgmt.media.models.ContentKeyPolicyFairPlayOfflineRentalConfiguration
"""
_validation = {
'odata_type': {'required': True},
'ask': {'required': True},
'fair_play_pfx_password': {'required': True},
'fair_play_pfx': {'required': True},
'rental_and_lease_key_type': {'required': True},
'rental_duration': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'ask': {'key': 'ask', 'type': 'bytearray'},
'fair_play_pfx_password': {'key': 'fairPlayPfxPassword', 'type': 'str'},
'fair_play_pfx': {'key': 'fairPlayPfx', 'type': 'str'},
'rental_and_lease_key_type': {'key': 'rentalAndLeaseKeyType', 'type': 'str'},
'rental_duration': {'key': 'rentalDuration', 'type': 'long'},
'offline_rental_configuration': {'key': 'offlineRentalConfiguration', 'type': 'ContentKeyPolicyFairPlayOfflineRentalConfiguration'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyFairPlayConfiguration, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyFairPlayConfiguration' # type: str
self.ask = kwargs['ask']
self.fair_play_pfx_password = kwargs['fair_play_pfx_password']
self.fair_play_pfx = kwargs['fair_play_pfx']
self.rental_and_lease_key_type = kwargs['rental_and_lease_key_type']
self.rental_duration = kwargs['rental_duration']
self.offline_rental_configuration = kwargs.get('offline_rental_configuration', None)
class ContentKeyPolicyFairPlayOfflineRentalConfiguration(msrest.serialization.Model):
"""ContentKeyPolicyFairPlayOfflineRentalConfiguration.
All required parameters must be populated in order to send to Azure.
:param playback_duration_seconds: Required. Playback duration.
:type playback_duration_seconds: long
:param storage_duration_seconds: Required. Storage duration.
:type storage_duration_seconds: long
"""
_validation = {
'playback_duration_seconds': {'required': True},
'storage_duration_seconds': {'required': True},
}
_attribute_map = {
'playback_duration_seconds': {'key': 'playbackDurationSeconds', 'type': 'long'},
'storage_duration_seconds': {'key': 'storageDurationSeconds', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyFairPlayOfflineRentalConfiguration, self).__init__(**kwargs)
self.playback_duration_seconds = kwargs['playback_duration_seconds']
self.storage_duration_seconds = kwargs['storage_duration_seconds']
class ContentKeyPolicyRestriction(msrest.serialization.Model):
"""Base class for Content Key Policy restrictions. A derived class must be used to create a restriction.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ContentKeyPolicyOpenRestriction, ContentKeyPolicyTokenRestriction, ContentKeyPolicyUnknownRestriction.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.ContentKeyPolicyOpenRestriction': 'ContentKeyPolicyOpenRestriction', '#Microsoft.Media.ContentKeyPolicyTokenRestriction': 'ContentKeyPolicyTokenRestriction', '#Microsoft.Media.ContentKeyPolicyUnknownRestriction': 'ContentKeyPolicyUnknownRestriction'}
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyRestriction, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class ContentKeyPolicyOpenRestriction(ContentKeyPolicyRestriction):
"""Represents an open restriction. License or key will be delivered on every request.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyOpenRestriction, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyOpenRestriction' # type: str
class ContentKeyPolicyOption(msrest.serialization.Model):
"""Represents a policy option.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar policy_option_id: The legacy Policy Option ID.
:vartype policy_option_id: str
:param name: The Policy Option description.
:type name: str
:param configuration: Required. The key delivery configuration.
:type configuration: ~azure.mgmt.media.models.ContentKeyPolicyConfiguration
:param restriction: Required. The requirements that must be met to deliver keys with this
configuration.
:type restriction: ~azure.mgmt.media.models.ContentKeyPolicyRestriction
"""
_validation = {
'policy_option_id': {'readonly': True},
'configuration': {'required': True},
'restriction': {'required': True},
}
_attribute_map = {
'policy_option_id': {'key': 'policyOptionId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ContentKeyPolicyConfiguration'},
'restriction': {'key': 'restriction', 'type': 'ContentKeyPolicyRestriction'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyOption, self).__init__(**kwargs)
self.policy_option_id = None
self.name = kwargs.get('name', None)
self.configuration = kwargs['configuration']
self.restriction = kwargs['restriction']
class ContentKeyPolicyPlayReadyConfiguration(ContentKeyPolicyConfiguration):
"""Specifies a configuration for PlayReady licenses.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param licenses: Required. The PlayReady licenses.
:type licenses: list[~azure.mgmt.media.models.ContentKeyPolicyPlayReadyLicense]
:param response_custom_data: The custom response data.
:type response_custom_data: str
"""
_validation = {
'odata_type': {'required': True},
'licenses': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'licenses': {'key': 'licenses', 'type': '[ContentKeyPolicyPlayReadyLicense]'},
'response_custom_data': {'key': 'responseCustomData', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyPlayReadyConfiguration, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyPlayReadyConfiguration' # type: str
self.licenses = kwargs['licenses']
self.response_custom_data = kwargs.get('response_custom_data', None)
class ContentKeyPolicyPlayReadyContentKeyLocation(msrest.serialization.Model):
"""Base class for content key ID location. A derived class must be used to represent the location.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader, ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader': 'ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader', '#Microsoft.Media.ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier': 'ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier'}
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyPlayReadyContentKeyLocation, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader(ContentKeyPolicyPlayReadyContentKeyLocation):
"""Specifies that the content key ID is in the PlayReady header.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader' # type: str
class ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier(ContentKeyPolicyPlayReadyContentKeyLocation):
"""Specifies that the content key ID is specified in the PlayReady configuration.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param key_id: Required. The content key ID.
:type key_id: str
"""
_validation = {
'odata_type': {'required': True},
'key_id': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'key_id': {'key': 'keyId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier' # type: str
self.key_id = kwargs['key_id']
class ContentKeyPolicyPlayReadyExplicitAnalogTelevisionRestriction(msrest.serialization.Model):
"""Configures the Explicit Analog Television Output Restriction control bits. For further details see the PlayReady Compliance Rules.
All required parameters must be populated in order to send to Azure.
:param best_effort: Required. Indicates whether this restriction is enforced on a Best Effort
basis.
:type best_effort: bool
:param configuration_data: Required. Configures the restriction control bits. Must be between 0
and 3 inclusive.
:type configuration_data: int
"""
_validation = {
'best_effort': {'required': True},
'configuration_data': {'required': True},
}
_attribute_map = {
'best_effort': {'key': 'bestEffort', 'type': 'bool'},
'configuration_data': {'key': 'configurationData', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyPlayReadyExplicitAnalogTelevisionRestriction, self).__init__(**kwargs)
self.best_effort = kwargs['best_effort']
self.configuration_data = kwargs['configuration_data']
class ContentKeyPolicyPlayReadyLicense(msrest.serialization.Model):
"""The PlayReady license.
All required parameters must be populated in order to send to Azure.
:param allow_test_devices: Required. A flag indicating whether test devices can use the
license.
:type allow_test_devices: bool
:param begin_date: The begin date of license.
:type begin_date: ~datetime.datetime
:param expiration_date: The expiration date of license.
:type expiration_date: ~datetime.datetime
:param relative_begin_date: The relative begin date of license.
:type relative_begin_date: ~datetime.timedelta
:param relative_expiration_date: The relative expiration date of license.
:type relative_expiration_date: ~datetime.timedelta
:param grace_period: The grace period of license.
:type grace_period: ~datetime.timedelta
:param play_right: The license PlayRight.
:type play_right: ~azure.mgmt.media.models.ContentKeyPolicyPlayReadyPlayRight
:param license_type: Required. The license type. Possible values include: "Unknown",
"NonPersistent", "Persistent".
:type license_type: str or ~azure.mgmt.media.models.ContentKeyPolicyPlayReadyLicenseType
:param content_key_location: Required. The content key location.
:type content_key_location:
~azure.mgmt.media.models.ContentKeyPolicyPlayReadyContentKeyLocation
:param content_type: Required. The PlayReady content type. Possible values include: "Unknown",
"Unspecified", "UltraVioletDownload", "UltraVioletStreaming".
:type content_type: str or ~azure.mgmt.media.models.ContentKeyPolicyPlayReadyContentType
"""
_validation = {
'allow_test_devices': {'required': True},
'license_type': {'required': True},
'content_key_location': {'required': True},
'content_type': {'required': True},
}
_attribute_map = {
'allow_test_devices': {'key': 'allowTestDevices', 'type': 'bool'},
'begin_date': {'key': 'beginDate', 'type': 'iso-8601'},
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
'relative_begin_date': {'key': 'relativeBeginDate', 'type': 'duration'},
'relative_expiration_date': {'key': 'relativeExpirationDate', 'type': 'duration'},
'grace_period': {'key': 'gracePeriod', 'type': 'duration'},
'play_right': {'key': 'playRight', 'type': 'ContentKeyPolicyPlayReadyPlayRight'},
'license_type': {'key': 'licenseType', 'type': 'str'},
'content_key_location': {'key': 'contentKeyLocation', 'type': 'ContentKeyPolicyPlayReadyContentKeyLocation'},
'content_type': {'key': 'contentType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyPlayReadyLicense, self).__init__(**kwargs)
self.allow_test_devices = kwargs['allow_test_devices']
self.begin_date = kwargs.get('begin_date', None)
self.expiration_date = kwargs.get('expiration_date', None)
self.relative_begin_date = kwargs.get('relative_begin_date', None)
self.relative_expiration_date = kwargs.get('relative_expiration_date', None)
self.grace_period = kwargs.get('grace_period', None)
self.play_right = kwargs.get('play_right', None)
self.license_type = kwargs['license_type']
self.content_key_location = kwargs['content_key_location']
self.content_type = kwargs['content_type']
class ContentKeyPolicyPlayReadyPlayRight(msrest.serialization.Model):
"""Configures the Play Right in the PlayReady license.
All required parameters must be populated in order to send to Azure.
:param first_play_expiration: The amount of time that the license is valid after the license is
first used to play content.
:type first_play_expiration: ~datetime.timedelta
:param scms_restriction: Configures the Serial Copy Management System (SCMS) in the license.
Must be between 0 and 3 inclusive.
:type scms_restriction: int
:param agc_and_color_stripe_restriction: Configures Automatic Gain Control (AGC) and Color
Stripe in the license. Must be between 0 and 3 inclusive.
:type agc_and_color_stripe_restriction: int
:param explicit_analog_television_output_restriction: Configures the Explicit Analog Television
Output Restriction in the license. Configuration data must be between 0 and 3 inclusive.
:type explicit_analog_television_output_restriction:
~azure.mgmt.media.models.ContentKeyPolicyPlayReadyExplicitAnalogTelevisionRestriction
:param digital_video_only_content_restriction: Required. Enables the Image Constraint For
Analog Component Video Restriction in the license.
:type digital_video_only_content_restriction: bool
:param image_constraint_for_analog_component_video_restriction: Required. Enables the Image
Constraint For Analog Component Video Restriction in the license.
:type image_constraint_for_analog_component_video_restriction: bool
:param image_constraint_for_analog_computer_monitor_restriction: Required. Enables the Image
Constraint For Analog Component Video Restriction in the license.
:type image_constraint_for_analog_computer_monitor_restriction: bool
:param allow_passing_video_content_to_unknown_output: Required. Configures Unknown output
handling settings of the license. Possible values include: "Unknown", "NotAllowed", "Allowed",
"AllowedWithVideoConstriction".
:type allow_passing_video_content_to_unknown_output: str or
~azure.mgmt.media.models.ContentKeyPolicyPlayReadyUnknownOutputPassingOption
:param uncompressed_digital_video_opl: Specifies the output protection level for uncompressed
digital video.
:type uncompressed_digital_video_opl: int
:param compressed_digital_video_opl: Specifies the output protection level for compressed
digital video.
:type compressed_digital_video_opl: int
:param analog_video_opl: Specifies the output protection level for compressed digital audio.
:type analog_video_opl: int
:param compressed_digital_audio_opl: Specifies the output protection level for compressed
digital audio.
:type compressed_digital_audio_opl: int
:param uncompressed_digital_audio_opl: Specifies the output protection level for uncompressed
digital audio.
:type uncompressed_digital_audio_opl: int
"""
_validation = {
'digital_video_only_content_restriction': {'required': True},
'image_constraint_for_analog_component_video_restriction': {'required': True},
'image_constraint_for_analog_computer_monitor_restriction': {'required': True},
'allow_passing_video_content_to_unknown_output': {'required': True},
}
_attribute_map = {
'first_play_expiration': {'key': 'firstPlayExpiration', 'type': 'duration'},
'scms_restriction': {'key': 'scmsRestriction', 'type': 'int'},
'agc_and_color_stripe_restriction': {'key': 'agcAndColorStripeRestriction', 'type': 'int'},
'explicit_analog_television_output_restriction': {'key': 'explicitAnalogTelevisionOutputRestriction', 'type': 'ContentKeyPolicyPlayReadyExplicitAnalogTelevisionRestriction'},
'digital_video_only_content_restriction': {'key': 'digitalVideoOnlyContentRestriction', 'type': 'bool'},
'image_constraint_for_analog_component_video_restriction': {'key': 'imageConstraintForAnalogComponentVideoRestriction', 'type': 'bool'},
'image_constraint_for_analog_computer_monitor_restriction': {'key': 'imageConstraintForAnalogComputerMonitorRestriction', 'type': 'bool'},
'allow_passing_video_content_to_unknown_output': {'key': 'allowPassingVideoContentToUnknownOutput', 'type': 'str'},
'uncompressed_digital_video_opl': {'key': 'uncompressedDigitalVideoOpl', 'type': 'int'},
'compressed_digital_video_opl': {'key': 'compressedDigitalVideoOpl', 'type': 'int'},
'analog_video_opl': {'key': 'analogVideoOpl', 'type': 'int'},
'compressed_digital_audio_opl': {'key': 'compressedDigitalAudioOpl', 'type': 'int'},
'uncompressed_digital_audio_opl': {'key': 'uncompressedDigitalAudioOpl', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyPlayReadyPlayRight, self).__init__(**kwargs)
self.first_play_expiration = kwargs.get('first_play_expiration', None)
self.scms_restriction = kwargs.get('scms_restriction', None)
self.agc_and_color_stripe_restriction = kwargs.get('agc_and_color_stripe_restriction', None)
self.explicit_analog_television_output_restriction = kwargs.get('explicit_analog_television_output_restriction', None)
self.digital_video_only_content_restriction = kwargs['digital_video_only_content_restriction']
self.image_constraint_for_analog_component_video_restriction = kwargs['image_constraint_for_analog_component_video_restriction']
self.image_constraint_for_analog_computer_monitor_restriction = kwargs['image_constraint_for_analog_computer_monitor_restriction']
self.allow_passing_video_content_to_unknown_output = kwargs['allow_passing_video_content_to_unknown_output']
self.uncompressed_digital_video_opl = kwargs.get('uncompressed_digital_video_opl', None)
self.compressed_digital_video_opl = kwargs.get('compressed_digital_video_opl', None)
self.analog_video_opl = kwargs.get('analog_video_opl', None)
self.compressed_digital_audio_opl = kwargs.get('compressed_digital_audio_opl', None)
self.uncompressed_digital_audio_opl = kwargs.get('uncompressed_digital_audio_opl', None)
class ContentKeyPolicyProperties(msrest.serialization.Model):
"""The properties of the Content Key Policy.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar policy_id: The legacy Policy ID.
:vartype policy_id: str
:ivar created: The creation date of the Policy.
:vartype created: ~datetime.datetime
:ivar last_modified: The last modified date of the Policy.
:vartype last_modified: ~datetime.datetime
:param description: A description for the Policy.
:type description: str
:param options: Required. The Key Policy options.
:type options: list[~azure.mgmt.media.models.ContentKeyPolicyOption]
"""
_validation = {
'policy_id': {'readonly': True},
'created': {'readonly': True},
'last_modified': {'readonly': True},
'options': {'required': True},
}
_attribute_map = {
'policy_id': {'key': 'policyId', 'type': 'str'},
'created': {'key': 'created', 'type': 'iso-8601'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'description': {'key': 'description', 'type': 'str'},
'options': {'key': 'options', 'type': '[ContentKeyPolicyOption]'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyProperties, self).__init__(**kwargs)
self.policy_id = None
self.created = None
self.last_modified = None
self.description = kwargs.get('description', None)
self.options = kwargs['options']
class ContentKeyPolicyRestrictionTokenKey(msrest.serialization.Model):
"""Base class for Content Key Policy key for token validation. A derived class must be used to create a token key.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ContentKeyPolicyRsaTokenKey, ContentKeyPolicySymmetricTokenKey, ContentKeyPolicyX509CertificateTokenKey.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.ContentKeyPolicyRsaTokenKey': 'ContentKeyPolicyRsaTokenKey', '#Microsoft.Media.ContentKeyPolicySymmetricTokenKey': 'ContentKeyPolicySymmetricTokenKey', '#Microsoft.Media.ContentKeyPolicyX509CertificateTokenKey': 'ContentKeyPolicyX509CertificateTokenKey'}
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyRestrictionTokenKey, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class ContentKeyPolicyRsaTokenKey(ContentKeyPolicyRestrictionTokenKey):
"""Specifies a RSA key for token validation.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param exponent: Required. The RSA Parameter exponent.
:type exponent: bytearray
:param modulus: Required. The RSA Parameter modulus.
:type modulus: bytearray
"""
_validation = {
'odata_type': {'required': True},
'exponent': {'required': True},
'modulus': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'exponent': {'key': 'exponent', 'type': 'bytearray'},
'modulus': {'key': 'modulus', 'type': 'bytearray'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyRsaTokenKey, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyRsaTokenKey' # type: str
self.exponent = kwargs['exponent']
self.modulus = kwargs['modulus']
class ContentKeyPolicySymmetricTokenKey(ContentKeyPolicyRestrictionTokenKey):
"""Specifies a symmetric key for token validation.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param key_value: Required. The key value of the key.
:type key_value: bytearray
"""
_validation = {
'odata_type': {'required': True},
'key_value': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'key_value': {'key': 'keyValue', 'type': 'bytearray'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicySymmetricTokenKey, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicySymmetricTokenKey' # type: str
self.key_value = kwargs['key_value']
class ContentKeyPolicyTokenClaim(msrest.serialization.Model):
"""Represents a token claim.
:param claim_type: Token claim type.
:type claim_type: str
:param claim_value: Token claim value.
:type claim_value: str
"""
_attribute_map = {
'claim_type': {'key': 'claimType', 'type': 'str'},
'claim_value': {'key': 'claimValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyTokenClaim, self).__init__(**kwargs)
self.claim_type = kwargs.get('claim_type', None)
self.claim_value = kwargs.get('claim_value', None)
class ContentKeyPolicyTokenRestriction(ContentKeyPolicyRestriction):
"""Represents a token restriction. Provided token must match these requirements for successful license or key delivery.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param issuer: Required. The token issuer.
:type issuer: str
:param audience: Required. The audience for the token.
:type audience: str
:param primary_verification_key: Required. The primary verification key.
:type primary_verification_key: ~azure.mgmt.media.models.ContentKeyPolicyRestrictionTokenKey
:param alternate_verification_keys: A list of alternative verification keys.
:type alternate_verification_keys:
list[~azure.mgmt.media.models.ContentKeyPolicyRestrictionTokenKey]
:param required_claims: A list of required token claims.
:type required_claims: list[~azure.mgmt.media.models.ContentKeyPolicyTokenClaim]
:param restriction_token_type: Required. The type of token. Possible values include: "Unknown",
"Swt", "Jwt".
:type restriction_token_type: str or
~azure.mgmt.media.models.ContentKeyPolicyRestrictionTokenType
:param open_id_connect_discovery_document: The OpenID connect discovery document.
:type open_id_connect_discovery_document: str
"""
_validation = {
'odata_type': {'required': True},
'issuer': {'required': True},
'audience': {'required': True},
'primary_verification_key': {'required': True},
'restriction_token_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'issuer': {'key': 'issuer', 'type': 'str'},
'audience': {'key': 'audience', 'type': 'str'},
'primary_verification_key': {'key': 'primaryVerificationKey', 'type': 'ContentKeyPolicyRestrictionTokenKey'},
'alternate_verification_keys': {'key': 'alternateVerificationKeys', 'type': '[ContentKeyPolicyRestrictionTokenKey]'},
'required_claims': {'key': 'requiredClaims', 'type': '[ContentKeyPolicyTokenClaim]'},
'restriction_token_type': {'key': 'restrictionTokenType', 'type': 'str'},
'open_id_connect_discovery_document': {'key': 'openIdConnectDiscoveryDocument', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyTokenRestriction, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyTokenRestriction' # type: str
self.issuer = kwargs['issuer']
self.audience = kwargs['audience']
self.primary_verification_key = kwargs['primary_verification_key']
self.alternate_verification_keys = kwargs.get('alternate_verification_keys', None)
self.required_claims = kwargs.get('required_claims', None)
self.restriction_token_type = kwargs['restriction_token_type']
self.open_id_connect_discovery_document = kwargs.get('open_id_connect_discovery_document', None)
class ContentKeyPolicyUnknownConfiguration(ContentKeyPolicyConfiguration):
"""Represents a ContentKeyPolicyConfiguration that is unavailable in the current API version.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyUnknownConfiguration, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyUnknownConfiguration' # type: str
class ContentKeyPolicyUnknownRestriction(ContentKeyPolicyRestriction):
"""Represents a ContentKeyPolicyRestriction that is unavailable in the current API version.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyUnknownRestriction, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyUnknownRestriction' # type: str
class ContentKeyPolicyWidevineConfiguration(ContentKeyPolicyConfiguration):
"""Specifies a configuration for Widevine licenses.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param widevine_template: Required. The Widevine template.
:type widevine_template: str
"""
_validation = {
'odata_type': {'required': True},
'widevine_template': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'widevine_template': {'key': 'widevineTemplate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyWidevineConfiguration, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyWidevineConfiguration' # type: str
self.widevine_template = kwargs['widevine_template']
class ContentKeyPolicyX509CertificateTokenKey(ContentKeyPolicyRestrictionTokenKey):
"""Specifies a certificate for token validation.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param raw_body: Required. The raw data field of a certificate in PKCS 12 format
(X509Certificate2 in .NET).
:type raw_body: bytearray
"""
_validation = {
'odata_type': {'required': True},
'raw_body': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'raw_body': {'key': 'rawBody', 'type': 'bytearray'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyX509CertificateTokenKey, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyX509CertificateTokenKey' # type: str
self.raw_body = kwargs['raw_body']
class CopyAudio(Codec):
"""A codec flag, which tells the encoder to copy the input audio bitstream.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CopyAudio, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.CopyAudio' # type: str
class CopyVideo(Codec):
"""A codec flag, which tells the encoder to copy the input video bitstream without re-encoding.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CopyVideo, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.CopyVideo' # type: str
class CrossSiteAccessPolicies(msrest.serialization.Model):
"""The client access policy.
:param client_access_policy: The content of clientaccesspolicy.xml used by Silverlight.
:type client_access_policy: str
:param cross_domain_policy: The content of crossdomain.xml used by Silverlight.
:type cross_domain_policy: str
"""
_attribute_map = {
'client_access_policy': {'key': 'clientAccessPolicy', 'type': 'str'},
'cross_domain_policy': {'key': 'crossDomainPolicy', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CrossSiteAccessPolicies, self).__init__(**kwargs)
self.client_access_policy = kwargs.get('client_access_policy', None)
self.cross_domain_policy = kwargs.get('cross_domain_policy', None)
class DefaultKey(msrest.serialization.Model):
"""Class to specify properties of default content key for each encryption scheme.
:param label: Label can be used to specify Content Key when creating a Streaming Locator.
:type label: str
:param policy_name: Policy used by Default Key.
:type policy_name: str
"""
_attribute_map = {
'label': {'key': 'label', 'type': 'str'},
'policy_name': {'key': 'policyName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DefaultKey, self).__init__(**kwargs)
self.label = kwargs.get('label', None)
self.policy_name = kwargs.get('policy_name', None)
class Deinterlace(msrest.serialization.Model):
"""Describes the de-interlacing settings.
:param parity: The field parity for de-interlacing, defaults to Auto. Possible values include:
"Auto", "TopFieldFirst", "BottomFieldFirst".
:type parity: str or ~azure.mgmt.media.models.DeinterlaceParity
:param mode: The deinterlacing mode. Defaults to AutoPixelAdaptive. Possible values include:
"Off", "AutoPixelAdaptive".
:type mode: str or ~azure.mgmt.media.models.DeinterlaceMode
"""
_attribute_map = {
'parity': {'key': 'parity', 'type': 'str'},
'mode': {'key': 'mode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Deinterlace, self).__init__(**kwargs)
self.parity = kwargs.get('parity', None)
self.mode = kwargs.get('mode', None)
class EdgePolicies(msrest.serialization.Model):
"""EdgePolicies.
:param usage_data_collection_policy:
:type usage_data_collection_policy: ~azure.mgmt.media.models.EdgeUsageDataCollectionPolicy
"""
_attribute_map = {
'usage_data_collection_policy': {'key': 'usageDataCollectionPolicy', 'type': 'EdgeUsageDataCollectionPolicy'},
}
def __init__(
self,
**kwargs
):
super(EdgePolicies, self).__init__(**kwargs)
self.usage_data_collection_policy = kwargs.get('usage_data_collection_policy', None)
class EdgeUsageDataCollectionPolicy(msrest.serialization.Model):
"""EdgeUsageDataCollectionPolicy.
:param data_collection_frequency: Usage data collection frequency in ISO 8601 duration format
e.g. PT10M , PT5H.
:type data_collection_frequency: str
:param data_reporting_frequency: Usage data reporting frequency in ISO 8601 duration format
e.g. PT10M , PT5H.
:type data_reporting_frequency: str
:param max_allowed_unreported_usage_duration: Maximum time for which the functionality of the
device will not be hampered for not reporting the usage data.
:type max_allowed_unreported_usage_duration: str
:param event_hub_details: Details of Event Hub where the usage will be reported.
:type event_hub_details: ~azure.mgmt.media.models.EdgeUsageDataEventHub
"""
_attribute_map = {
'data_collection_frequency': {'key': 'dataCollectionFrequency', 'type': 'str'},
'data_reporting_frequency': {'key': 'dataReportingFrequency', 'type': 'str'},
'max_allowed_unreported_usage_duration': {'key': 'maxAllowedUnreportedUsageDuration', 'type': 'str'},
'event_hub_details': {'key': 'eventHubDetails', 'type': 'EdgeUsageDataEventHub'},
}
def __init__(
self,
**kwargs
):
super(EdgeUsageDataCollectionPolicy, self).__init__(**kwargs)
self.data_collection_frequency = kwargs.get('data_collection_frequency', None)
self.data_reporting_frequency = kwargs.get('data_reporting_frequency', None)
self.max_allowed_unreported_usage_duration = kwargs.get('max_allowed_unreported_usage_duration', None)
self.event_hub_details = kwargs.get('event_hub_details', None)
class EdgeUsageDataEventHub(msrest.serialization.Model):
"""EdgeUsageDataEventHub.
:param name: Name of the Event Hub where usage will be reported.
:type name: str
:param namespace: Namespace of the Event Hub where usage will be reported.
:type namespace: str
:param token: SAS token needed to interact with Event Hub.
:type token: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'namespace': {'key': 'namespace', 'type': 'str'},
'token': {'key': 'token', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdgeUsageDataEventHub, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.namespace = kwargs.get('namespace', None)
self.token = kwargs.get('token', None)
class EnabledProtocols(msrest.serialization.Model):
"""Class to specify which protocols are enabled.
All required parameters must be populated in order to send to Azure.
:param download: Required. Enable Download protocol or not.
:type download: bool
:param dash: Required. Enable DASH protocol or not.
:type dash: bool
:param hls: Required. Enable HLS protocol or not.
:type hls: bool
:param smooth_streaming: Required. Enable SmoothStreaming protocol or not.
:type smooth_streaming: bool
"""
_validation = {
'download': {'required': True},
'dash': {'required': True},
'hls': {'required': True},
'smooth_streaming': {'required': True},
}
_attribute_map = {
'download': {'key': 'download', 'type': 'bool'},
'dash': {'key': 'dash', 'type': 'bool'},
'hls': {'key': 'hls', 'type': 'bool'},
'smooth_streaming': {'key': 'smoothStreaming', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(EnabledProtocols, self).__init__(**kwargs)
self.download = kwargs['download']
self.dash = kwargs['dash']
self.hls = kwargs['hls']
self.smooth_streaming = kwargs['smooth_streaming']
class EntityNameAvailabilityCheckOutput(msrest.serialization.Model):
"""The response from the check name availability request.
All required parameters must be populated in order to send to Azure.
:param name_available: Required. Specifies if the name is available.
:type name_available: bool
:param reason: Specifies the reason if the name is not available.
:type reason: str
:param message: Specifies the detailed reason if the name is not available.
:type message: str
"""
_validation = {
'name_available': {'required': True},
}
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EntityNameAvailabilityCheckOutput, self).__init__(**kwargs)
self.name_available = kwargs['name_available']
self.reason = kwargs.get('reason', None)
self.message = kwargs.get('message', None)
class EnvelopeEncryption(msrest.serialization.Model):
"""Class for EnvelopeEncryption encryption scheme.
:param enabled_protocols: Representing supported protocols.
:type enabled_protocols: ~azure.mgmt.media.models.EnabledProtocols
:param clear_tracks: Representing which tracks should not be encrypted.
:type clear_tracks: list[~azure.mgmt.media.models.TrackSelection]
:param content_keys: Representing default content key for each encryption scheme and separate
content keys for specific tracks.
:type content_keys: ~azure.mgmt.media.models.StreamingPolicyContentKeys
:param custom_key_acquisition_url_template: Template for the URL of the custom service
delivering keys to end user players. Not required when using Azure Media Services for issuing
keys. The template supports replaceable tokens that the service will update at runtime with
the value specific to the request. The currently supported token values are
{AlternativeMediaId}, which is replaced with the value of
StreamingLocatorId.AlternativeMediaId, and {ContentKeyId}, which is replaced with the value of
identifier of the key being requested.
:type custom_key_acquisition_url_template: str
"""
_attribute_map = {
'enabled_protocols': {'key': 'enabledProtocols', 'type': 'EnabledProtocols'},
'clear_tracks': {'key': 'clearTracks', 'type': '[TrackSelection]'},
'content_keys': {'key': 'contentKeys', 'type': 'StreamingPolicyContentKeys'},
'custom_key_acquisition_url_template': {'key': 'customKeyAcquisitionUrlTemplate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EnvelopeEncryption, self).__init__(**kwargs)
self.enabled_protocols = kwargs.get('enabled_protocols', None)
self.clear_tracks = kwargs.get('clear_tracks', None)
self.content_keys = kwargs.get('content_keys', None)
self.custom_key_acquisition_url_template = kwargs.get('custom_key_acquisition_url_template', None)
class FaceDetectorPreset(Preset):
"""Describes all the settings to be used when analyzing a video in order to detect (and optionally redact) all the faces present.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param resolution: Specifies the maximum resolution at which your video is analyzed. The
default behavior is "SourceResolution," which will keep the input video at its original
resolution when analyzed. Using "StandardDefinition" will resize input videos to standard
definition while preserving the appropriate aspect ratio. It will only resize if the video is
of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before
processing. Switching to "StandardDefinition" will reduce the time it takes to process high
resolution video. It may also reduce the cost of using this component (see
https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details).
However, faces that end up being too small in the resized video may not be detected. Possible
values include: "SourceResolution", "StandardDefinition".
:type resolution: str or ~azure.mgmt.media.models.AnalysisResolution
:param mode: This mode provides the ability to choose between the following settings: 1)
Analyze - For detection only.This mode generates a metadata JSON file marking appearances of
faces throughout the video.Where possible, appearances of the same person are assigned the same
ID. 2) Combined - Additionally redacts(blurs) detected faces. 3) Redact - This enables a 2-pass
process, allowing for selective redaction of a subset of detected faces.It takes in the
metadata file from a prior analyze pass, along with the source video, and a user-selected
subset of IDs that require redaction. Possible values include: "Analyze", "Redact", "Combined".
:type mode: str or ~azure.mgmt.media.models.FaceRedactorMode
:param blur_type: Blur type. Possible values include: "Box", "Low", "Med", "High", "Black".
:type blur_type: str or ~azure.mgmt.media.models.BlurType
:param experimental_options: Dictionary containing key value pairs for parameters not exposed
in the preset itself.
:type experimental_options: dict[str, str]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'resolution': {'key': 'resolution', 'type': 'str'},
'mode': {'key': 'mode', 'type': 'str'},
'blur_type': {'key': 'blurType', 'type': 'str'},
'experimental_options': {'key': 'experimentalOptions', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(FaceDetectorPreset, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.FaceDetectorPreset' # type: str
self.resolution = kwargs.get('resolution', None)
self.mode = kwargs.get('mode', None)
self.blur_type = kwargs.get('blur_type', None)
self.experimental_options = kwargs.get('experimental_options', None)
class Filters(msrest.serialization.Model):
"""Describes all the filtering operations, such as de-interlacing, rotation etc. that are to be applied to the input media before encoding.
:param deinterlace: The de-interlacing settings.
:type deinterlace: ~azure.mgmt.media.models.Deinterlace
:param rotation: The rotation, if any, to be applied to the input video, before it is encoded.
Default is Auto. Possible values include: "Auto", "None", "Rotate0", "Rotate90", "Rotate180",
"Rotate270".
:type rotation: str or ~azure.mgmt.media.models.Rotation
:param crop: The parameters for the rectangular window with which to crop the input video.
:type crop: ~azure.mgmt.media.models.Rectangle
:param overlays: The properties of overlays to be applied to the input video. These could be
audio, image or video overlays.
:type overlays: list[~azure.mgmt.media.models.Overlay]
"""
_attribute_map = {
'deinterlace': {'key': 'deinterlace', 'type': 'Deinterlace'},
'rotation': {'key': 'rotation', 'type': 'str'},
'crop': {'key': 'crop', 'type': 'Rectangle'},
'overlays': {'key': 'overlays', 'type': '[Overlay]'},
}
def __init__(
self,
**kwargs
):
super(Filters, self).__init__(**kwargs)
self.deinterlace = kwargs.get('deinterlace', None)
self.rotation = kwargs.get('rotation', None)
self.crop = kwargs.get('crop', None)
self.overlays = kwargs.get('overlays', None)
class FilterTrackPropertyCondition(msrest.serialization.Model):
"""The class to specify one track property condition.
All required parameters must be populated in order to send to Azure.
:param property: Required. The track property type. Possible values include: "Unknown", "Type",
"Name", "Language", "FourCC", "Bitrate".
:type property: str or ~azure.mgmt.media.models.FilterTrackPropertyType
:param value: Required. The track property value.
:type value: str
:param operation: Required. The track property condition operation. Possible values include:
"Equal", "NotEqual".
:type operation: str or ~azure.mgmt.media.models.FilterTrackPropertyCompareOperation
"""
_validation = {
'property': {'required': True},
'value': {'required': True},
'operation': {'required': True},
}
_attribute_map = {
'property': {'key': 'property', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FilterTrackPropertyCondition, self).__init__(**kwargs)
self.property = kwargs['property']
self.value = kwargs['value']
self.operation = kwargs['operation']
class FilterTrackSelection(msrest.serialization.Model):
"""Representing a list of FilterTrackPropertyConditions to select a track. The filters are combined using a logical AND operation.
All required parameters must be populated in order to send to Azure.
:param track_selections: Required. The track selections.
:type track_selections: list[~azure.mgmt.media.models.FilterTrackPropertyCondition]
"""
_validation = {
'track_selections': {'required': True},
}
_attribute_map = {
'track_selections': {'key': 'trackSelections', 'type': '[FilterTrackPropertyCondition]'},
}
def __init__(
self,
**kwargs
):
super(FilterTrackSelection, self).__init__(**kwargs)
self.track_selections = kwargs['track_selections']
class FirstQuality(msrest.serialization.Model):
"""Filter First Quality.
All required parameters must be populated in order to send to Azure.
:param bitrate: Required. The first quality bitrate.
:type bitrate: int
"""
_validation = {
'bitrate': {'required': True},
}
_attribute_map = {
'bitrate': {'key': 'bitrate', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(FirstQuality, self).__init__(**kwargs)
self.bitrate = kwargs['bitrate']
class Format(msrest.serialization.Model):
"""Base class for output.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ImageFormat, MultiBitrateFormat.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param filename_pattern: Required. The pattern of the file names for the generated output
files. The following macros are supported in the file name: {Basename} - An expansion macro
that will use the name of the input video file. If the base name(the file suffix is not
included) of the input video file is less than 32 characters long, the base name of input video
files will be used. If the length of base name of the input video file exceeds 32 characters,
the base name is truncated to the first 32 characters in total length. {Extension} - The
appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index}
- A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video
bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec.
{Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed
from the filename.
:type filename_pattern: str
"""
_validation = {
'odata_type': {'required': True},
'filename_pattern': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'filename_pattern': {'key': 'filenamePattern', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.ImageFormat': 'ImageFormat', '#Microsoft.Media.MultiBitrateFormat': 'MultiBitrateFormat'}
}
def __init__(
self,
**kwargs
):
super(Format, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.filename_pattern = kwargs['filename_pattern']
class InputDefinition(msrest.serialization.Model):
"""Base class for defining an input. Use sub classes of this class to specify tracks selections and related metadata.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: FromAllInputFile, FromEachInputFile, InputFile.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param included_tracks: The list of TrackDescriptors which define the metadata and selection of
tracks in the input.
:type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.FromAllInputFile': 'FromAllInputFile', '#Microsoft.Media.FromEachInputFile': 'FromEachInputFile', '#Microsoft.Media.InputFile': 'InputFile'}
}
def __init__(
self,
**kwargs
):
super(InputDefinition, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.included_tracks = kwargs.get('included_tracks', None)
class FromAllInputFile(InputDefinition):
"""An InputDefinition that looks across all of the files provided to select tracks specified by the IncludedTracks property. Generally used with the AudioTrackByAttribute and VideoTrackByAttribute to allow selection of a single track across a set of input files.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param included_tracks: The list of TrackDescriptors which define the metadata and selection of
tracks in the input.
:type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'},
}
def __init__(
self,
**kwargs
):
super(FromAllInputFile, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.FromAllInputFile' # type: str
class FromEachInputFile(InputDefinition):
"""An InputDefinition that looks at each input file provided to select tracks specified by the IncludedTracks property. Generally used with the AudioTrackByAttribute and VideoTrackByAttribute to select tracks from each file given.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param included_tracks: The list of TrackDescriptors which define the metadata and selection of
tracks in the input.
:type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'},
}
def __init__(
self,
**kwargs
):
super(FromEachInputFile, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.FromEachInputFile' # type: str
class Layer(msrest.serialization.Model):
"""The encoder can be configured to produce video and/or images (thumbnails) at different resolutions, by specifying a layer for each desired resolution. A layer represents the properties for the video or image at a resolution.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: H265VideoLayer, JpgLayer, PngLayer, VideoLayer.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param width: The width of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in width as the input.
:type width: str
:param height: The height of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in height as the input.
:type height: str
:param label: The alphanumeric label for this layer, which can be used in multiplexing
different video and audio layers, or in naming the output file.
:type label: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'height': {'key': 'height', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.H265VideoLayer': 'H265VideoLayer', '#Microsoft.Media.JpgLayer': 'JpgLayer', '#Microsoft.Media.PngLayer': 'PngLayer', '#Microsoft.Media.VideoLayer': 'VideoLayer'}
}
def __init__(
self,
**kwargs
):
super(Layer, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.width = kwargs.get('width', None)
self.height = kwargs.get('height', None)
self.label = kwargs.get('label', None)
class VideoLayer(Layer):
"""Describes the settings to be used when encoding the input video into a desired output bitrate layer.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: H264Layer.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param width: The width of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in width as the input.
:type width: str
:param height: The height of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in height as the input.
:type height: str
:param label: The alphanumeric label for this layer, which can be used in multiplexing
different video and audio layers, or in naming the output file.
:type label: str
:param bitrate: Required. The average bitrate in bits per second at which to encode the input
video when generating this layer. This is a required field.
:type bitrate: int
:param max_bitrate: The maximum bitrate (in bits per second), at which the VBV buffer should be
assumed to refill. If not specified, defaults to the same value as bitrate.
:type max_bitrate: int
:param b_frames: The number of B-frames to be used when encoding this layer. If not specified,
the encoder chooses an appropriate number based on the video profile and level.
:type b_frames: int
:param frame_rate: The frame rate (in frames per second) at which to encode this layer. The
value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the
form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame
rates based on the profile and level. If it is not specified, the encoder will use the same
frame rate as the input video.
:type frame_rate: str
:param slices: The number of slices to be used when encoding this layer. If not specified,
default is zero, which means that encoder will use a single slice for each frame.
:type slices: int
:param adaptive_b_frame: Whether or not adaptive B-frames are to be used when encoding this
layer. If not specified, the encoder will turn it on whenever the video profile permits its
use.
:type adaptive_b_frame: bool
"""
_validation = {
'odata_type': {'required': True},
'bitrate': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'height': {'key': 'height', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'bitrate': {'key': 'bitrate', 'type': 'int'},
'max_bitrate': {'key': 'maxBitrate', 'type': 'int'},
'b_frames': {'key': 'bFrames', 'type': 'int'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'slices': {'key': 'slices', 'type': 'int'},
'adaptive_b_frame': {'key': 'adaptiveBFrame', 'type': 'bool'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.H264Layer': 'H264Layer'}
}
def __init__(
self,
**kwargs
):
super(VideoLayer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.VideoLayer' # type: str
self.bitrate = kwargs['bitrate']
self.max_bitrate = kwargs.get('max_bitrate', None)
self.b_frames = kwargs.get('b_frames', None)
self.frame_rate = kwargs.get('frame_rate', None)
self.slices = kwargs.get('slices', None)
self.adaptive_b_frame = kwargs.get('adaptive_b_frame', None)
class H264Layer(VideoLayer):
"""Describes the settings to be used when encoding the input video into a desired output bitrate layer with the H.264 video codec.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param width: The width of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in width as the input.
:type width: str
:param height: The height of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in height as the input.
:type height: str
:param label: The alphanumeric label for this layer, which can be used in multiplexing
different video and audio layers, or in naming the output file.
:type label: str
:param bitrate: Required. The average bitrate in bits per second at which to encode the input
video when generating this layer. This is a required field.
:type bitrate: int
:param max_bitrate: The maximum bitrate (in bits per second), at which the VBV buffer should be
assumed to refill. If not specified, defaults to the same value as bitrate.
:type max_bitrate: int
:param b_frames: The number of B-frames to be used when encoding this layer. If not specified,
the encoder chooses an appropriate number based on the video profile and level.
:type b_frames: int
:param frame_rate: The frame rate (in frames per second) at which to encode this layer. The
value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the
form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame
rates based on the profile and level. If it is not specified, the encoder will use the same
frame rate as the input video.
:type frame_rate: str
:param slices: The number of slices to be used when encoding this layer. If not specified,
default is zero, which means that encoder will use a single slice for each frame.
:type slices: int
:param adaptive_b_frame: Whether or not adaptive B-frames are to be used when encoding this
layer. If not specified, the encoder will turn it on whenever the video profile permits its
use.
:type adaptive_b_frame: bool
:param profile: We currently support Baseline, Main, High, High422, High444. Default is Auto.
Possible values include: "Auto", "Baseline", "Main", "High", "High422", "High444".
:type profile: str or ~azure.mgmt.media.models.H264VideoProfile
:param level: We currently support Level up to 6.2. The value can be Auto, or a number that
matches the H.264 profile. If not specified, the default is Auto, which lets the encoder choose
the Level that is appropriate for this layer.
:type level: str
:param buffer_window: The VBV buffer window length. The value should be in ISO 8601 format. The
value should be in the range [0.1-100] seconds. The default is 5 seconds (for example, PT5S).
:type buffer_window: ~datetime.timedelta
:param reference_frames: The number of reference frames to be used when encoding this layer. If
not specified, the encoder determines an appropriate number based on the encoder complexity
setting.
:type reference_frames: int
:param entropy_mode: The entropy mode to be used for this layer. If not specified, the encoder
chooses the mode that is appropriate for the profile and level. Possible values include:
"Cabac", "Cavlc".
:type entropy_mode: str or ~azure.mgmt.media.models.EntropyMode
"""
_validation = {
'odata_type': {'required': True},
'bitrate': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'height': {'key': 'height', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'bitrate': {'key': 'bitrate', 'type': 'int'},
'max_bitrate': {'key': 'maxBitrate', 'type': 'int'},
'b_frames': {'key': 'bFrames', 'type': 'int'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'slices': {'key': 'slices', 'type': 'int'},
'adaptive_b_frame': {'key': 'adaptiveBFrame', 'type': 'bool'},
'profile': {'key': 'profile', 'type': 'str'},
'level': {'key': 'level', 'type': 'str'},
'buffer_window': {'key': 'bufferWindow', 'type': 'duration'},
'reference_frames': {'key': 'referenceFrames', 'type': 'int'},
'entropy_mode': {'key': 'entropyMode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(H264Layer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.H264Layer' # type: str
self.profile = kwargs.get('profile', None)
self.level = kwargs.get('level', None)
self.buffer_window = kwargs.get('buffer_window', None)
self.reference_frames = kwargs.get('reference_frames', None)
self.entropy_mode = kwargs.get('entropy_mode', None)
class Video(Codec):
"""Describes the basic properties for encoding the input video.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: H264Video, H265Video, Image.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
:param key_frame_interval: The distance between two key frames. The value should be non-zero in
the range [0.5, 20] seconds, specified in ISO 8601 format. The default is 2 seconds(PT2S). Note
that this setting is ignored if VideoSyncMode.Passthrough is set, where the KeyFrameInterval
value will follow the input source setting.
:type key_frame_interval: ~datetime.timedelta
:param stretch_mode: The resizing mode - how the input video will be resized to fit the desired
output resolution(s). Default is AutoSize. Possible values include: "None", "AutoSize",
"AutoFit".
:type stretch_mode: str or ~azure.mgmt.media.models.StretchMode
:param sync_mode: The Video Sync Mode. Possible values include: "Auto", "Passthrough", "Cfr",
"Vfr".
:type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'},
'stretch_mode': {'key': 'stretchMode', 'type': 'str'},
'sync_mode': {'key': 'syncMode', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.H264Video': 'H264Video', '#Microsoft.Media.H265Video': 'H265Video', '#Microsoft.Media.Image': 'Image'}
}
def __init__(
self,
**kwargs
):
super(Video, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.Video' # type: str
self.key_frame_interval = kwargs.get('key_frame_interval', None)
self.stretch_mode = kwargs.get('stretch_mode', None)
self.sync_mode = kwargs.get('sync_mode', None)
class H264Video(Video):
"""Describes all the properties for encoding a video with the H.264 codec.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
:param key_frame_interval: The distance between two key frames. The value should be non-zero in
the range [0.5, 20] seconds, specified in ISO 8601 format. The default is 2 seconds(PT2S). Note
that this setting is ignored if VideoSyncMode.Passthrough is set, where the KeyFrameInterval
value will follow the input source setting.
:type key_frame_interval: ~datetime.timedelta
:param stretch_mode: The resizing mode - how the input video will be resized to fit the desired
output resolution(s). Default is AutoSize. Possible values include: "None", "AutoSize",
"AutoFit".
:type stretch_mode: str or ~azure.mgmt.media.models.StretchMode
:param sync_mode: The Video Sync Mode. Possible values include: "Auto", "Passthrough", "Cfr",
"Vfr".
:type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode
:param scene_change_detection: Whether or not the encoder should insert key frames at scene
changes. If not specified, the default is false. This flag should be set to true only when the
encoder is being configured to produce a single output video.
:type scene_change_detection: bool
:param complexity: Tells the encoder how to choose its encoding settings. The default value is
Balanced. Possible values include: "Speed", "Balanced", "Quality".
:type complexity: str or ~azure.mgmt.media.models.H264Complexity
:param layers: The collection of output H.264 layers to be produced by the encoder.
:type layers: list[~azure.mgmt.media.models.H264Layer]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'},
'stretch_mode': {'key': 'stretchMode', 'type': 'str'},
'sync_mode': {'key': 'syncMode', 'type': 'str'},
'scene_change_detection': {'key': 'sceneChangeDetection', 'type': 'bool'},
'complexity': {'key': 'complexity', 'type': 'str'},
'layers': {'key': 'layers', 'type': '[H264Layer]'},
}
def __init__(
self,
**kwargs
):
super(H264Video, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.H264Video' # type: str
self.scene_change_detection = kwargs.get('scene_change_detection', None)
self.complexity = kwargs.get('complexity', None)
self.layers = kwargs.get('layers', None)
class H265VideoLayer(Layer):
"""Describes the settings to be used when encoding the input video into a desired output bitrate layer.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: H265Layer.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param width: The width of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in width as the input.
:type width: str
:param height: The height of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in height as the input.
:type height: str
:param label: The alphanumeric label for this layer, which can be used in multiplexing
different video and audio layers, or in naming the output file.
:type label: str
:param bitrate: Required. The average bitrate in bits per second at which to encode the input
video when generating this layer. For example: a target bitrate of 3000Kbps or 3Mbps means this
value should be 3000000 This is a required field.
:type bitrate: int
:param max_bitrate: The maximum bitrate (in bits per second), at which the VBV buffer should be
assumed to refill. If not specified, defaults to the same value as bitrate.
:type max_bitrate: int
:param b_frames: The number of B-frames to be used when encoding this layer. If not specified,
the encoder chooses an appropriate number based on the video profile and level.
:type b_frames: int
:param frame_rate: The frame rate (in frames per second) at which to encode this layer. The
value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the
form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame
rates based on the profile and level. If it is not specified, the encoder will use the same
frame rate as the input video.
:type frame_rate: str
:param slices: The number of slices to be used when encoding this layer. If not specified,
default is zero, which means that encoder will use a single slice for each frame.
:type slices: int
:param adaptive_b_frame: Specifies whether or not adaptive B-frames are to be used when
encoding this layer. If not specified, the encoder will turn it on whenever the video profile
permits its use.
:type adaptive_b_frame: bool
"""
_validation = {
'odata_type': {'required': True},
'bitrate': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'height': {'key': 'height', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'bitrate': {'key': 'bitrate', 'type': 'int'},
'max_bitrate': {'key': 'maxBitrate', 'type': 'int'},
'b_frames': {'key': 'bFrames', 'type': 'int'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'slices': {'key': 'slices', 'type': 'int'},
'adaptive_b_frame': {'key': 'adaptiveBFrame', 'type': 'bool'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.H265Layer': 'H265Layer'}
}
def __init__(
self,
**kwargs
):
super(H265VideoLayer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.H265VideoLayer' # type: str
self.bitrate = kwargs['bitrate']
self.max_bitrate = kwargs.get('max_bitrate', None)
self.b_frames = kwargs.get('b_frames', None)
self.frame_rate = kwargs.get('frame_rate', None)
self.slices = kwargs.get('slices', None)
self.adaptive_b_frame = kwargs.get('adaptive_b_frame', None)
class H265Layer(H265VideoLayer):
"""Describes the settings to be used when encoding the input video into a desired output bitrate layer with the H.265 video codec.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param width: The width of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in width as the input.
:type width: str
:param height: The height of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in height as the input.
:type height: str
:param label: The alphanumeric label for this layer, which can be used in multiplexing
different video and audio layers, or in naming the output file.
:type label: str
:param bitrate: Required. The average bitrate in bits per second at which to encode the input
video when generating this layer. For example: a target bitrate of 3000Kbps or 3Mbps means this
value should be 3000000 This is a required field.
:type bitrate: int
:param max_bitrate: The maximum bitrate (in bits per second), at which the VBV buffer should be
assumed to refill. If not specified, defaults to the same value as bitrate.
:type max_bitrate: int
:param b_frames: The number of B-frames to be used when encoding this layer. If not specified,
the encoder chooses an appropriate number based on the video profile and level.
:type b_frames: int
:param frame_rate: The frame rate (in frames per second) at which to encode this layer. The
value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the
form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame
rates based on the profile and level. If it is not specified, the encoder will use the same
frame rate as the input video.
:type frame_rate: str
:param slices: The number of slices to be used when encoding this layer. If not specified,
default is zero, which means that encoder will use a single slice for each frame.
:type slices: int
:param adaptive_b_frame: Specifies whether or not adaptive B-frames are to be used when
encoding this layer. If not specified, the encoder will turn it on whenever the video profile
permits its use.
:type adaptive_b_frame: bool
:param profile: We currently support Main. Default is Auto. Possible values include: "Auto",
"Main".
:type profile: str or ~azure.mgmt.media.models.H265VideoProfile
:param level: We currently support Level up to 6.2. The value can be Auto, or a number that
matches the H.265 profile. If not specified, the default is Auto, which lets the encoder choose
the Level that is appropriate for this layer.
:type level: str
:param buffer_window: The VBV buffer window length. The value should be in ISO 8601 format. The
value should be in the range [0.1-100] seconds. The default is 5 seconds (for example, PT5S).
:type buffer_window: ~datetime.timedelta
:param reference_frames: The number of reference frames to be used when encoding this layer. If
not specified, the encoder determines an appropriate number based on the encoder complexity
setting.
:type reference_frames: int
"""
_validation = {
'odata_type': {'required': True},
'bitrate': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'height': {'key': 'height', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'bitrate': {'key': 'bitrate', 'type': 'int'},
'max_bitrate': {'key': 'maxBitrate', 'type': 'int'},
'b_frames': {'key': 'bFrames', 'type': 'int'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'slices': {'key': 'slices', 'type': 'int'},
'adaptive_b_frame': {'key': 'adaptiveBFrame', 'type': 'bool'},
'profile': {'key': 'profile', 'type': 'str'},
'level': {'key': 'level', 'type': 'str'},
'buffer_window': {'key': 'bufferWindow', 'type': 'duration'},
'reference_frames': {'key': 'referenceFrames', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(H265Layer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.H265Layer' # type: str
self.profile = kwargs.get('profile', None)
self.level = kwargs.get('level', None)
self.buffer_window = kwargs.get('buffer_window', None)
self.reference_frames = kwargs.get('reference_frames', None)
class H265Video(Video):
"""Describes all the properties for encoding a video with the H.265 codec.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
:param key_frame_interval: The distance between two key frames. The value should be non-zero in
the range [0.5, 20] seconds, specified in ISO 8601 format. The default is 2 seconds(PT2S). Note
that this setting is ignored if VideoSyncMode.Passthrough is set, where the KeyFrameInterval
value will follow the input source setting.
:type key_frame_interval: ~datetime.timedelta
:param stretch_mode: The resizing mode - how the input video will be resized to fit the desired
output resolution(s). Default is AutoSize. Possible values include: "None", "AutoSize",
"AutoFit".
:type stretch_mode: str or ~azure.mgmt.media.models.StretchMode
:param sync_mode: The Video Sync Mode. Possible values include: "Auto", "Passthrough", "Cfr",
"Vfr".
:type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode
:param scene_change_detection: Specifies whether or not the encoder should insert key frames at
scene changes. If not specified, the default is false. This flag should be set to true only
when the encoder is being configured to produce a single output video.
:type scene_change_detection: bool
:param complexity: Tells the encoder how to choose its encoding settings. Quality will provide
for a higher compression ratio but at a higher cost and longer compute time. Speed will
produce a relatively larger file but is faster and more economical. The default value is
Balanced. Possible values include: "Speed", "Balanced", "Quality".
:type complexity: str or ~azure.mgmt.media.models.H265Complexity
:param layers: The collection of output H.265 layers to be produced by the encoder.
:type layers: list[~azure.mgmt.media.models.H265Layer]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'},
'stretch_mode': {'key': 'stretchMode', 'type': 'str'},
'sync_mode': {'key': 'syncMode', 'type': 'str'},
'scene_change_detection': {'key': 'sceneChangeDetection', 'type': 'bool'},
'complexity': {'key': 'complexity', 'type': 'str'},
'layers': {'key': 'layers', 'type': '[H265Layer]'},
}
def __init__(
self,
**kwargs
):
super(H265Video, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.H265Video' # type: str
self.scene_change_detection = kwargs.get('scene_change_detection', None)
self.complexity = kwargs.get('complexity', None)
self.layers = kwargs.get('layers', None)
class Hls(msrest.serialization.Model):
"""HTTP Live Streaming (HLS) packing setting for the live output.
:param fragments_per_ts_segment: The number of fragments in an HTTP Live Streaming (HLS) TS
segment in the output of the live event. This value does not affect the packing ratio for HLS
CMAF output.
:type fragments_per_ts_segment: int
"""
_attribute_map = {
'fragments_per_ts_segment': {'key': 'fragmentsPerTsSegment', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(Hls, self).__init__(**kwargs)
self.fragments_per_ts_segment = kwargs.get('fragments_per_ts_segment', None)
class Image(Video):
"""Describes the basic properties for generating thumbnails from the input video.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: JpgImage, PngImage.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
:param key_frame_interval: The distance between two key frames. The value should be non-zero in
the range [0.5, 20] seconds, specified in ISO 8601 format. The default is 2 seconds(PT2S). Note
that this setting is ignored if VideoSyncMode.Passthrough is set, where the KeyFrameInterval
value will follow the input source setting.
:type key_frame_interval: ~datetime.timedelta
:param stretch_mode: The resizing mode - how the input video will be resized to fit the desired
output resolution(s). Default is AutoSize. Possible values include: "None", "AutoSize",
"AutoFit".
:type stretch_mode: str or ~azure.mgmt.media.models.StretchMode
:param sync_mode: The Video Sync Mode. Possible values include: "Auto", "Passthrough", "Cfr",
"Vfr".
:type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode
:param start: Required. The position in the input video from where to start generating
thumbnails. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or
a frame count (For example, 10 to start at the 10th frame), or a relative value to stream
duration (For example, 10% to start at 10% of stream duration). Also supports a macro {Best},
which tells the encoder to select the best thumbnail from the first few seconds of the video
and will only produce one thumbnail, no matter what other settings are for Step and Range. The
default value is macro {Best}.
:type start: str
:param step: The intervals at which thumbnails are generated. The value can be in ISO 8601
format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30
for one image every 30 frames), or a relative value to stream duration (For example, 10% for
one image every 10% of stream duration). Note: Step value will affect the first generated
thumbnail, which may not be exactly the one specified at transform preset start time. This is
due to the encoder, which tries to select the best thumbnail between start time and Step
position from start time as the first output. As the default value is 10%, it means if stream
has long duration, the first generated thumbnail might be far away from the one specified at
start time. Try to select reasonable value for Step if the first thumbnail is expected close to
start time, or set Range value at 1 if only one thumbnail is needed at start time.
:type step: str
:param range: The position relative to transform preset start time in the input video at which
to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to
stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop
at the 300th frame from the frame at start time. If this value is 1, it means only producing
one thumbnail at start time), or a relative value to the stream duration (For example, 50% to
stop at half of stream duration from start time). The default value is 100%, which means to
stop at the end of the stream.
:type range: str
"""
_validation = {
'odata_type': {'required': True},
'start': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'},
'stretch_mode': {'key': 'stretchMode', 'type': 'str'},
'sync_mode': {'key': 'syncMode', 'type': 'str'},
'start': {'key': 'start', 'type': 'str'},
'step': {'key': 'step', 'type': 'str'},
'range': {'key': 'range', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.JpgImage': 'JpgImage', '#Microsoft.Media.PngImage': 'PngImage'}
}
def __init__(
self,
**kwargs
):
super(Image, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.Image' # type: str
self.start = kwargs['start']
self.step = kwargs.get('step', None)
self.range = kwargs.get('range', None)
class ImageFormat(Format):
"""Describes the properties for an output image file.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: JpgFormat, PngFormat.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param filename_pattern: Required. The pattern of the file names for the generated output
files. The following macros are supported in the file name: {Basename} - An expansion macro
that will use the name of the input video file. If the base name(the file suffix is not
included) of the input video file is less than 32 characters long, the base name of input video
files will be used. If the length of base name of the input video file exceeds 32 characters,
the base name is truncated to the first 32 characters in total length. {Extension} - The
appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index}
- A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video
bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec.
{Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed
from the filename.
:type filename_pattern: str
"""
_validation = {
'odata_type': {'required': True},
'filename_pattern': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'filename_pattern': {'key': 'filenamePattern', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.JpgFormat': 'JpgFormat', '#Microsoft.Media.PngFormat': 'PngFormat'}
}
def __init__(
self,
**kwargs
):
super(ImageFormat, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ImageFormat' # type: str
class InputFile(InputDefinition):
"""An InputDefinition for a single file. TrackSelections are scoped to the file specified.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param included_tracks: The list of TrackDescriptors which define the metadata and selection of
tracks in the input.
:type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor]
:param filename: Name of the file that this input definition applies to.
:type filename: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'},
'filename': {'key': 'filename', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(InputFile, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.InputFile' # type: str
self.filename = kwargs.get('filename', None)
class IPAccessControl(msrest.serialization.Model):
"""The IP access control.
:param allow: The IP allow list.
:type allow: list[~azure.mgmt.media.models.IPRange]
"""
_attribute_map = {
'allow': {'key': 'allow', 'type': '[IPRange]'},
}
def __init__(
self,
**kwargs
):
super(IPAccessControl, self).__init__(**kwargs)
self.allow = kwargs.get('allow', None)
class IPRange(msrest.serialization.Model):
"""The IP address range in the CIDR scheme.
:param name: The friendly name for the IP address range.
:type name: str
:param address: The IP address.
:type address: str
:param subnet_prefix_length: The subnet mask prefix length (see CIDR notation).
:type subnet_prefix_length: int
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'address': {'key': 'address', 'type': 'str'},
'subnet_prefix_length': {'key': 'subnetPrefixLength', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(IPRange, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.address = kwargs.get('address', None)
self.subnet_prefix_length = kwargs.get('subnet_prefix_length', None)
class Job(ProxyResource):
"""A Job resource type. The progress and state can be obtained by polling a Job or subscribing to events using EventGrid.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:ivar created: The UTC date and time when the customer has created the Job, in
'YYYY-MM-DDThh:mm:ssZ' format.
:vartype created: ~datetime.datetime
:ivar state: The current state of the job. Possible values include: "Canceled", "Canceling",
"Error", "Finished", "Processing", "Queued", "Scheduled".
:vartype state: str or ~azure.mgmt.media.models.JobState
:param description: Optional customer supplied description of the Job.
:type description: str
:param input: The inputs for the Job.
:type input: ~azure.mgmt.media.models.JobInput
:ivar last_modified: The UTC date and time when the customer has last updated the Job, in
'YYYY-MM-DDThh:mm:ssZ' format.
:vartype last_modified: ~datetime.datetime
:param outputs: The outputs for the Job.
:type outputs: list[~azure.mgmt.media.models.JobOutput]
:param priority: Priority with which the job should be processed. Higher priority jobs are
processed before lower priority jobs. If not set, the default is normal. Possible values
include: "Low", "Normal", "High".
:type priority: str or ~azure.mgmt.media.models.Priority
:param correlation_data: Customer provided key, value pairs that will be returned in Job and
JobOutput state events.
:type correlation_data: dict[str, str]
:ivar start_time: The UTC date and time at which this Job began processing.
:vartype start_time: ~datetime.datetime
:ivar end_time: The UTC date and time at which this Job finished processing.
:vartype end_time: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'created': {'readonly': True},
'state': {'readonly': True},
'last_modified': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'state': {'key': 'properties.state', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'input': {'key': 'properties.input', 'type': 'JobInput'},
'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'},
'outputs': {'key': 'properties.outputs', 'type': '[JobOutput]'},
'priority': {'key': 'properties.priority', 'type': 'str'},
'correlation_data': {'key': 'properties.correlationData', 'type': '{str}'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(Job, self).__init__(**kwargs)
self.system_data = None
self.created = None
self.state = None
self.description = kwargs.get('description', None)
self.input = kwargs.get('input', None)
self.last_modified = None
self.outputs = kwargs.get('outputs', None)
self.priority = kwargs.get('priority', None)
self.correlation_data = kwargs.get('correlation_data', None)
self.start_time = None
self.end_time = None
class JobCollection(msrest.serialization.Model):
"""A collection of Job items.
:param value: A collection of Job items.
:type value: list[~azure.mgmt.media.models.Job]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Job]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class JobError(msrest.serialization.Model):
"""Details of JobOutput errors.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code describing the error. Possible values include: "ServiceError",
"ServiceTransientError", "DownloadNotAccessible", "DownloadTransientError",
"UploadNotAccessible", "UploadTransientError", "ConfigurationUnsupported", "ContentMalformed",
"ContentUnsupported".
:vartype code: str or ~azure.mgmt.media.models.JobErrorCode
:ivar message: A human-readable language-dependent representation of the error.
:vartype message: str
:ivar category: Helps with categorization of errors. Possible values include: "Service",
"Download", "Upload", "Configuration", "Content".
:vartype category: str or ~azure.mgmt.media.models.JobErrorCategory
:ivar retry: Indicates that it may be possible to retry the Job. If retry is unsuccessful,
please contact Azure support via Azure Portal. Possible values include: "DoNotRetry",
"MayRetry".
:vartype retry: str or ~azure.mgmt.media.models.JobRetry
:ivar details: An array of details about specific errors that led to this reported error.
:vartype details: list[~azure.mgmt.media.models.JobErrorDetail]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'category': {'readonly': True},
'retry': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'retry': {'key': 'retry', 'type': 'str'},
'details': {'key': 'details', 'type': '[JobErrorDetail]'},
}
def __init__(
self,
**kwargs
):
super(JobError, self).__init__(**kwargs)
self.code = None
self.message = None
self.category = None
self.retry = None
self.details = None
class JobErrorDetail(msrest.serialization.Model):
"""Details of JobOutput errors.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Code describing the error detail.
:vartype code: str
:ivar message: A human-readable representation of the error.
:vartype message: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
class JobInput(msrest.serialization.Model):
"""Base class for inputs to a Job.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: JobInputClip, JobInputSequence, JobInputs.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.JobInputClip': 'JobInputClip', '#Microsoft.Media.JobInputSequence': 'JobInputSequence', '#Microsoft.Media.JobInputs': 'JobInputs'}
}
def __init__(
self,
**kwargs
):
super(JobInput, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class JobInputClip(JobInput):
"""Represents input files for a Job.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: JobInputAsset, JobInputHttp.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param files: List of files. Required for JobInputHttp. Maximum of 4000 characters each.
:type files: list[str]
:param start: Defines a point on the timeline of the input media at which processing will
start. Defaults to the beginning of the input media.
:type start: ~azure.mgmt.media.models.ClipTime
:param end: Defines a point on the timeline of the input media at which processing will end.
Defaults to the end of the input media.
:type end: ~azure.mgmt.media.models.ClipTime
:param label: A label that is assigned to a JobInputClip, that is used to satisfy a reference
used in the Transform. For example, a Transform can be authored so as to take an image file
with the label 'xyz' and apply it as an overlay onto the input video before it is encoded. When
submitting a Job, exactly one of the JobInputs should be the image file, and it should have the
label 'xyz'.
:type label: str
:param input_definitions: Defines a list of InputDefinitions. For each InputDefinition, it
defines a list of track selections and related metadata.
:type input_definitions: list[~azure.mgmt.media.models.InputDefinition]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'files': {'key': 'files', 'type': '[str]'},
'start': {'key': 'start', 'type': 'ClipTime'},
'end': {'key': 'end', 'type': 'ClipTime'},
'label': {'key': 'label', 'type': 'str'},
'input_definitions': {'key': 'inputDefinitions', 'type': '[InputDefinition]'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.JobInputAsset': 'JobInputAsset', '#Microsoft.Media.JobInputHttp': 'JobInputHttp'}
}
def __init__(
self,
**kwargs
):
super(JobInputClip, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.JobInputClip' # type: str
self.files = kwargs.get('files', None)
self.start = kwargs.get('start', None)
self.end = kwargs.get('end', None)
self.label = kwargs.get('label', None)
self.input_definitions = kwargs.get('input_definitions', None)
class JobInputAsset(JobInputClip):
"""Represents an Asset for input into a Job.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param files: List of files. Required for JobInputHttp. Maximum of 4000 characters each.
:type files: list[str]
:param start: Defines a point on the timeline of the input media at which processing will
start. Defaults to the beginning of the input media.
:type start: ~azure.mgmt.media.models.ClipTime
:param end: Defines a point on the timeline of the input media at which processing will end.
Defaults to the end of the input media.
:type end: ~azure.mgmt.media.models.ClipTime
:param label: A label that is assigned to a JobInputClip, that is used to satisfy a reference
used in the Transform. For example, a Transform can be authored so as to take an image file
with the label 'xyz' and apply it as an overlay onto the input video before it is encoded. When
submitting a Job, exactly one of the JobInputs should be the image file, and it should have the
label 'xyz'.
:type label: str
:param input_definitions: Defines a list of InputDefinitions. For each InputDefinition, it
defines a list of track selections and related metadata.
:type input_definitions: list[~azure.mgmt.media.models.InputDefinition]
:param asset_name: Required. The name of the input Asset.
:type asset_name: str
"""
_validation = {
'odata_type': {'required': True},
'asset_name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'files': {'key': 'files', 'type': '[str]'},
'start': {'key': 'start', 'type': 'ClipTime'},
'end': {'key': 'end', 'type': 'ClipTime'},
'label': {'key': 'label', 'type': 'str'},
'input_definitions': {'key': 'inputDefinitions', 'type': '[InputDefinition]'},
'asset_name': {'key': 'assetName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobInputAsset, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.JobInputAsset' # type: str
self.asset_name = kwargs['asset_name']
class JobInputHttp(JobInputClip):
"""Represents HTTPS job input.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param files: List of files. Required for JobInputHttp. Maximum of 4000 characters each.
:type files: list[str]
:param start: Defines a point on the timeline of the input media at which processing will
start. Defaults to the beginning of the input media.
:type start: ~azure.mgmt.media.models.ClipTime
:param end: Defines a point on the timeline of the input media at which processing will end.
Defaults to the end of the input media.
:type end: ~azure.mgmt.media.models.ClipTime
:param label: A label that is assigned to a JobInputClip, that is used to satisfy a reference
used in the Transform. For example, a Transform can be authored so as to take an image file
with the label 'xyz' and apply it as an overlay onto the input video before it is encoded. When
submitting a Job, exactly one of the JobInputs should be the image file, and it should have the
label 'xyz'.
:type label: str
:param input_definitions: Defines a list of InputDefinitions. For each InputDefinition, it
defines a list of track selections and related metadata.
:type input_definitions: list[~azure.mgmt.media.models.InputDefinition]
:param base_uri: Base URI for HTTPS job input. It will be concatenated with provided file
names. If no base uri is given, then the provided file list is assumed to be fully qualified
uris. Maximum length of 4000 characters.
:type base_uri: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'files': {'key': 'files', 'type': '[str]'},
'start': {'key': 'start', 'type': 'ClipTime'},
'end': {'key': 'end', 'type': 'ClipTime'},
'label': {'key': 'label', 'type': 'str'},
'input_definitions': {'key': 'inputDefinitions', 'type': '[InputDefinition]'},
'base_uri': {'key': 'baseUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobInputHttp, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.JobInputHttp' # type: str
self.base_uri = kwargs.get('base_uri', None)
class JobInputs(JobInput):
"""Describes a list of inputs to a Job.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param inputs: List of inputs to a Job.
:type inputs: list[~azure.mgmt.media.models.JobInput]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[JobInput]'},
}
def __init__(
self,
**kwargs
):
super(JobInputs, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.JobInputs' # type: str
self.inputs = kwargs.get('inputs', None)
class JobInputSequence(JobInput):
"""A Sequence contains an ordered list of Clips where each clip is a JobInput. The Sequence will be treated as a single input.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param inputs: JobInputs that make up the timeline.
:type inputs: list[~azure.mgmt.media.models.JobInputClip]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[JobInputClip]'},
}
def __init__(
self,
**kwargs
):
super(JobInputSequence, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.JobInputSequence' # type: str
self.inputs = kwargs.get('inputs', None)
class JobOutput(msrest.serialization.Model):
"""Describes all the properties of a JobOutput.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: JobOutputAsset.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:ivar error: If the JobOutput is in the Error state, it contains the details of the error.
:vartype error: ~azure.mgmt.media.models.JobError
:ivar state: Describes the state of the JobOutput. Possible values include: "Canceled",
"Canceling", "Error", "Finished", "Processing", "Queued", "Scheduled".
:vartype state: str or ~azure.mgmt.media.models.JobState
:ivar progress: If the JobOutput is in a Processing state, this contains the Job completion
percentage. The value is an estimate and not intended to be used to predict Job completion
times. To determine if the JobOutput is complete, use the State property.
:vartype progress: int
:param label: A label that is assigned to a JobOutput in order to help uniquely identify it.
This is useful when your Transform has more than one TransformOutput, whereby your Job has more
than one JobOutput. In such cases, when you submit the Job, you will add two or more
JobOutputs, in the same order as TransformOutputs in the Transform. Subsequently, when you
retrieve the Job, either through events or on a GET request, you can use the label to easily
identify the JobOutput. If a label is not provided, a default value of
'{presetName}_{outputIndex}' will be used, where the preset name is the name of the preset in
the corresponding TransformOutput and the output index is the relative index of the this
JobOutput within the Job. Note that this index is the same as the relative index of the
corresponding TransformOutput within its Transform.
:type label: str
:ivar start_time: The UTC date and time at which this Job Output began processing.
:vartype start_time: ~datetime.datetime
:ivar end_time: The UTC date and time at which this Job Output finished processing.
:vartype end_time: ~datetime.datetime
"""
_validation = {
'odata_type': {'required': True},
'error': {'readonly': True},
'state': {'readonly': True},
'progress': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'error': {'key': 'error', 'type': 'JobError'},
'state': {'key': 'state', 'type': 'str'},
'progress': {'key': 'progress', 'type': 'int'},
'label': {'key': 'label', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.JobOutputAsset': 'JobOutputAsset'}
}
def __init__(
self,
**kwargs
):
super(JobOutput, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.error = None
self.state = None
self.progress = None
self.label = kwargs.get('label', None)
self.start_time = None
self.end_time = None
class JobOutputAsset(JobOutput):
"""Represents an Asset used as a JobOutput.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:ivar error: If the JobOutput is in the Error state, it contains the details of the error.
:vartype error: ~azure.mgmt.media.models.JobError
:ivar state: Describes the state of the JobOutput. Possible values include: "Canceled",
"Canceling", "Error", "Finished", "Processing", "Queued", "Scheduled".
:vartype state: str or ~azure.mgmt.media.models.JobState
:ivar progress: If the JobOutput is in a Processing state, this contains the Job completion
percentage. The value is an estimate and not intended to be used to predict Job completion
times. To determine if the JobOutput is complete, use the State property.
:vartype progress: int
:param label: A label that is assigned to a JobOutput in order to help uniquely identify it.
This is useful when your Transform has more than one TransformOutput, whereby your Job has more
than one JobOutput. In such cases, when you submit the Job, you will add two or more
JobOutputs, in the same order as TransformOutputs in the Transform. Subsequently, when you
retrieve the Job, either through events or on a GET request, you can use the label to easily
identify the JobOutput. If a label is not provided, a default value of
'{presetName}_{outputIndex}' will be used, where the preset name is the name of the preset in
the corresponding TransformOutput and the output index is the relative index of the this
JobOutput within the Job. Note that this index is the same as the relative index of the
corresponding TransformOutput within its Transform.
:type label: str
:ivar start_time: The UTC date and time at which this Job Output began processing.
:vartype start_time: ~datetime.datetime
:ivar end_time: The UTC date and time at which this Job Output finished processing.
:vartype end_time: ~datetime.datetime
:param asset_name: Required. The name of the output Asset.
:type asset_name: str
"""
_validation = {
'odata_type': {'required': True},
'error': {'readonly': True},
'state': {'readonly': True},
'progress': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'asset_name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'error': {'key': 'error', 'type': 'JobError'},
'state': {'key': 'state', 'type': 'str'},
'progress': {'key': 'progress', 'type': 'int'},
'label': {'key': 'label', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'asset_name': {'key': 'assetName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobOutputAsset, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.JobOutputAsset' # type: str
self.asset_name = kwargs['asset_name']
class JpgFormat(ImageFormat):
"""Describes the settings for producing JPEG thumbnails.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param filename_pattern: Required. The pattern of the file names for the generated output
files. The following macros are supported in the file name: {Basename} - An expansion macro
that will use the name of the input video file. If the base name(the file suffix is not
included) of the input video file is less than 32 characters long, the base name of input video
files will be used. If the length of base name of the input video file exceeds 32 characters,
the base name is truncated to the first 32 characters in total length. {Extension} - The
appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index}
- A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video
bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec.
{Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed
from the filename.
:type filename_pattern: str
"""
_validation = {
'odata_type': {'required': True},
'filename_pattern': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'filename_pattern': {'key': 'filenamePattern', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JpgFormat, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.JpgFormat' # type: str
class JpgImage(Image):
"""Describes the properties for producing a series of JPEG images from the input video.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
:param key_frame_interval: The distance between two key frames. The value should be non-zero in
the range [0.5, 20] seconds, specified in ISO 8601 format. The default is 2 seconds(PT2S). Note
that this setting is ignored if VideoSyncMode.Passthrough is set, where the KeyFrameInterval
value will follow the input source setting.
:type key_frame_interval: ~datetime.timedelta
:param stretch_mode: The resizing mode - how the input video will be resized to fit the desired
output resolution(s). Default is AutoSize. Possible values include: "None", "AutoSize",
"AutoFit".
:type stretch_mode: str or ~azure.mgmt.media.models.StretchMode
:param sync_mode: The Video Sync Mode. Possible values include: "Auto", "Passthrough", "Cfr",
"Vfr".
:type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode
:param start: Required. The position in the input video from where to start generating
thumbnails. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or
a frame count (For example, 10 to start at the 10th frame), or a relative value to stream
duration (For example, 10% to start at 10% of stream duration). Also supports a macro {Best},
which tells the encoder to select the best thumbnail from the first few seconds of the video
and will only produce one thumbnail, no matter what other settings are for Step and Range. The
default value is macro {Best}.
:type start: str
:param step: The intervals at which thumbnails are generated. The value can be in ISO 8601
format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30
for one image every 30 frames), or a relative value to stream duration (For example, 10% for
one image every 10% of stream duration). Note: Step value will affect the first generated
thumbnail, which may not be exactly the one specified at transform preset start time. This is
due to the encoder, which tries to select the best thumbnail between start time and Step
position from start time as the first output. As the default value is 10%, it means if stream
has long duration, the first generated thumbnail might be far away from the one specified at
start time. Try to select reasonable value for Step if the first thumbnail is expected close to
start time, or set Range value at 1 if only one thumbnail is needed at start time.
:type step: str
:param range: The position relative to transform preset start time in the input video at which
to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to
stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop
at the 300th frame from the frame at start time. If this value is 1, it means only producing
one thumbnail at start time), or a relative value to the stream duration (For example, 50% to
stop at half of stream duration from start time). The default value is 100%, which means to
stop at the end of the stream.
:type range: str
:param layers: A collection of output JPEG image layers to be produced by the encoder.
:type layers: list[~azure.mgmt.media.models.JpgLayer]
:param sprite_column: Sets the number of columns used in thumbnail sprite image. The number of
rows are automatically calculated and a VTT file is generated with the coordinate mappings for
each thumbnail in the sprite. Note: this value should be a positive integer and a proper value
is recommended so that the output image resolution will not go beyond JPEG maximum pixel
resolution limit 65535x65535.
:type sprite_column: int
"""
_validation = {
'odata_type': {'required': True},
'start': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'},
'stretch_mode': {'key': 'stretchMode', 'type': 'str'},
'sync_mode': {'key': 'syncMode', 'type': 'str'},
'start': {'key': 'start', 'type': 'str'},
'step': {'key': 'step', 'type': 'str'},
'range': {'key': 'range', 'type': 'str'},
'layers': {'key': 'layers', 'type': '[JpgLayer]'},
'sprite_column': {'key': 'spriteColumn', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(JpgImage, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.JpgImage' # type: str
self.layers = kwargs.get('layers', None)
self.sprite_column = kwargs.get('sprite_column', None)
class JpgLayer(Layer):
"""Describes the settings to produce a JPEG image from the input video.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param width: The width of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in width as the input.
:type width: str
:param height: The height of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in height as the input.
:type height: str
:param label: The alphanumeric label for this layer, which can be used in multiplexing
different video and audio layers, or in naming the output file.
:type label: str
:param quality: The compression quality of the JPEG output. Range is from 0-100 and the default
is 70.
:type quality: int
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'height': {'key': 'height', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'quality': {'key': 'quality', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(JpgLayer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.JpgLayer' # type: str
self.quality = kwargs.get('quality', None)
class KeyDelivery(msrest.serialization.Model):
"""KeyDelivery.
:param access_control: The access control properties for Key Delivery.
:type access_control: ~azure.mgmt.media.models.AccessControl
"""
_attribute_map = {
'access_control': {'key': 'accessControl', 'type': 'AccessControl'},
}
def __init__(
self,
**kwargs
):
super(KeyDelivery, self).__init__(**kwargs)
self.access_control = kwargs.get('access_control', None)
class KeyVaultProperties(msrest.serialization.Model):
"""KeyVaultProperties.
Variables are only populated by the server, and will be ignored when sending a request.
:param key_identifier: The URL of the Key Vault key used to encrypt the account. The key may
either be versioned (for example https://vault/keys/mykey/version1) or reference a key without
a version (for example https://vault/keys/mykey).
:type key_identifier: str
:ivar current_key_identifier: The current key used to encrypt the Media Services account,
including the key version.
:vartype current_key_identifier: str
"""
_validation = {
'current_key_identifier': {'readonly': True},
}
_attribute_map = {
'key_identifier': {'key': 'keyIdentifier', 'type': 'str'},
'current_key_identifier': {'key': 'currentKeyIdentifier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultProperties, self).__init__(**kwargs)
self.key_identifier = kwargs.get('key_identifier', None)
self.current_key_identifier = None
class ListContainerSasInput(msrest.serialization.Model):
"""The parameters to the list SAS request.
:param permissions: The permissions to set on the SAS URL. Possible values include: "Read",
"ReadWrite", "ReadWriteDelete".
:type permissions: str or ~azure.mgmt.media.models.AssetContainerPermission
:param expiry_time: The SAS URL expiration time. This must be less than 24 hours from the
current time.
:type expiry_time: ~datetime.datetime
"""
_attribute_map = {
'permissions': {'key': 'permissions', 'type': 'str'},
'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(ListContainerSasInput, self).__init__(**kwargs)
self.permissions = kwargs.get('permissions', None)
self.expiry_time = kwargs.get('expiry_time', None)
class ListContentKeysResponse(msrest.serialization.Model):
"""Class of response for listContentKeys action.
:param content_keys: ContentKeys used by current Streaming Locator.
:type content_keys: list[~azure.mgmt.media.models.StreamingLocatorContentKey]
"""
_attribute_map = {
'content_keys': {'key': 'contentKeys', 'type': '[StreamingLocatorContentKey]'},
}
def __init__(
self,
**kwargs
):
super(ListContentKeysResponse, self).__init__(**kwargs)
self.content_keys = kwargs.get('content_keys', None)
class ListEdgePoliciesInput(msrest.serialization.Model):
"""ListEdgePoliciesInput.
:param device_id: Unique identifier of the edge device.
:type device_id: str
"""
_attribute_map = {
'device_id': {'key': 'deviceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListEdgePoliciesInput, self).__init__(**kwargs)
self.device_id = kwargs.get('device_id', None)
class ListPathsResponse(msrest.serialization.Model):
"""Class of response for listPaths action.
:param streaming_paths: Streaming Paths supported by current Streaming Locator.
:type streaming_paths: list[~azure.mgmt.media.models.StreamingPath]
:param download_paths: Download Paths supported by current Streaming Locator.
:type download_paths: list[str]
"""
_attribute_map = {
'streaming_paths': {'key': 'streamingPaths', 'type': '[StreamingPath]'},
'download_paths': {'key': 'downloadPaths', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(ListPathsResponse, self).__init__(**kwargs)
self.streaming_paths = kwargs.get('streaming_paths', None)
self.download_paths = kwargs.get('download_paths', None)
class ListStreamingLocatorsResponse(msrest.serialization.Model):
"""The Streaming Locators associated with this Asset.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar streaming_locators: The list of Streaming Locators.
:vartype streaming_locators: list[~azure.mgmt.media.models.AssetStreamingLocator]
"""
_validation = {
'streaming_locators': {'readonly': True},
}
_attribute_map = {
'streaming_locators': {'key': 'streamingLocators', 'type': '[AssetStreamingLocator]'},
}
def __init__(
self,
**kwargs
):
super(ListStreamingLocatorsResponse, self).__init__(**kwargs)
self.streaming_locators = None
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.location = kwargs['location']
class LiveEvent(TrackedResource):
"""The live event.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:param description: A description for the live event.
:type description: str
:param input: Live event input settings. It defines how the live event receives input from a
contribution encoder.
:type input: ~azure.mgmt.media.models.LiveEventInput
:param preview: Live event preview settings. Preview allows live event producers to preview the
live streaming content without creating any live output.
:type preview: ~azure.mgmt.media.models.LiveEventPreview
:param encoding: Encoding settings for the live event. It configures whether a live encoder is
used for the live event and settings for the live encoder if it is used.
:type encoding: ~azure.mgmt.media.models.LiveEventEncoding
:param transcriptions: Live transcription settings for the live event. See
https://go.microsoft.com/fwlink/?linkid=2133742 for more information about the live
transcription feature.
:type transcriptions: list[~azure.mgmt.media.models.LiveEventTranscription]
:ivar provisioning_state: The provisioning state of the live event.
:vartype provisioning_state: str
:ivar resource_state: The resource state of the live event. See
https://go.microsoft.com/fwlink/?linkid=2139012 for more information. Possible values include:
"Stopped", "Allocating", "StandBy", "Starting", "Running", "Stopping", "Deleting".
:vartype resource_state: str or ~azure.mgmt.media.models.LiveEventResourceState
:param cross_site_access_policies: Live event cross site access policies.
:type cross_site_access_policies: ~azure.mgmt.media.models.CrossSiteAccessPolicies
:param use_static_hostname: Specifies whether a static hostname would be assigned to the live
event preview and ingest endpoints. This value can only be updated if the live event is in
Standby state.
:type use_static_hostname: bool
:param hostname_prefix: When useStaticHostname is set to true, the hostnamePrefix specifies the
first part of the hostname assigned to the live event preview and ingest endpoints. The final
hostname would be a combination of this prefix, the media service account name and a short code
for the Azure Media Services data center.
:type hostname_prefix: str
:param stream_options: The options to use for the LiveEvent. This value is specified at
creation time and cannot be updated. The valid values for the array entry values are 'Default'
and 'LowLatency'.
:type stream_options: list[str or ~azure.mgmt.media.models.StreamOptionsFlag]
:ivar created: The creation time for the live event.
:vartype created: ~datetime.datetime
:ivar last_modified: The last modified time of the live event.
:vartype last_modified: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
'resource_state': {'readonly': True},
'created': {'readonly': True},
'last_modified': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'description': {'key': 'properties.description', 'type': 'str'},
'input': {'key': 'properties.input', 'type': 'LiveEventInput'},
'preview': {'key': 'properties.preview', 'type': 'LiveEventPreview'},
'encoding': {'key': 'properties.encoding', 'type': 'LiveEventEncoding'},
'transcriptions': {'key': 'properties.transcriptions', 'type': '[LiveEventTranscription]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'resource_state': {'key': 'properties.resourceState', 'type': 'str'},
'cross_site_access_policies': {'key': 'properties.crossSiteAccessPolicies', 'type': 'CrossSiteAccessPolicies'},
'use_static_hostname': {'key': 'properties.useStaticHostname', 'type': 'bool'},
'hostname_prefix': {'key': 'properties.hostnamePrefix', 'type': 'str'},
'stream_options': {'key': 'properties.streamOptions', 'type': '[str]'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(LiveEvent, self).__init__(**kwargs)
self.system_data = None
self.description = kwargs.get('description', None)
self.input = kwargs.get('input', None)
self.preview = kwargs.get('preview', None)
self.encoding = kwargs.get('encoding', None)
self.transcriptions = kwargs.get('transcriptions', None)
self.provisioning_state = None
self.resource_state = None
self.cross_site_access_policies = kwargs.get('cross_site_access_policies', None)
self.use_static_hostname = kwargs.get('use_static_hostname', None)
self.hostname_prefix = kwargs.get('hostname_prefix', None)
self.stream_options = kwargs.get('stream_options', None)
self.created = None
self.last_modified = None
class LiveEventActionInput(msrest.serialization.Model):
"""The LiveEvent action input parameter definition.
:param remove_outputs_on_stop: The flag indicates whether live outputs are automatically
deleted when live event is being stopped. Deleting live outputs do not delete the underlying
assets.
:type remove_outputs_on_stop: bool
"""
_attribute_map = {
'remove_outputs_on_stop': {'key': 'removeOutputsOnStop', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(LiveEventActionInput, self).__init__(**kwargs)
self.remove_outputs_on_stop = kwargs.get('remove_outputs_on_stop', None)
class LiveEventEncoding(msrest.serialization.Model):
"""Specifies the live event type and optional encoding settings for encoding live events.
:param encoding_type: Live event type. When encodingType is set to None, the service simply
passes through the incoming video and audio layer(s) to the output. When encodingType is set to
Standard or Premium1080p, a live encoder transcodes the incoming stream into multiple bitrates
or layers. See https://go.microsoft.com/fwlink/?linkid=2095101 for more information. This
property cannot be modified after the live event is created. Possible values include: "None",
"Standard", "Premium1080p".
:type encoding_type: str or ~azure.mgmt.media.models.LiveEventEncodingType
:param preset_name: The optional encoding preset name, used when encodingType is not None. This
value is specified at creation time and cannot be updated. If the encodingType is set to
Standard, then the default preset name is ‘Default720p’. Else if the encodingType is set to
Premium1080p, the default preset is ‘Default1080p’.
:type preset_name: str
:param stretch_mode: Specifies how the input video will be resized to fit the desired output
resolution(s). Default is None. Possible values include: "None", "AutoSize", "AutoFit".
:type stretch_mode: str or ~azure.mgmt.media.models.StretchMode
:param key_frame_interval: Use an ISO 8601 time value between 0.5 to 20 seconds to specify the
output fragment length for the video and audio tracks of an encoding live event. For example,
use PT2S to indicate 2 seconds. For the video track it also defines the key frame interval, or
the length of a GoP (group of pictures). If this value is not set for an encoding live event,
the fragment duration defaults to 2 seconds. The value cannot be set for pass-through live
events.
:type key_frame_interval: ~datetime.timedelta
"""
_attribute_map = {
'encoding_type': {'key': 'encodingType', 'type': 'str'},
'preset_name': {'key': 'presetName', 'type': 'str'},
'stretch_mode': {'key': 'stretchMode', 'type': 'str'},
'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'},
}
def __init__(
self,
**kwargs
):
super(LiveEventEncoding, self).__init__(**kwargs)
self.encoding_type = kwargs.get('encoding_type', None)
self.preset_name = kwargs.get('preset_name', None)
self.stretch_mode = kwargs.get('stretch_mode', None)
self.key_frame_interval = kwargs.get('key_frame_interval', None)
class LiveEventEndpoint(msrest.serialization.Model):
"""The live event endpoint.
:param protocol: The endpoint protocol.
:type protocol: str
:param url: The endpoint URL.
:type url: str
"""
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LiveEventEndpoint, self).__init__(**kwargs)
self.protocol = kwargs.get('protocol', None)
self.url = kwargs.get('url', None)
class LiveEventInput(msrest.serialization.Model):
"""The live event input.
All required parameters must be populated in order to send to Azure.
:param streaming_protocol: Required. The input protocol for the live event. This is specified
at creation time and cannot be updated. Possible values include: "FragmentedMP4", "RTMP".
:type streaming_protocol: str or ~azure.mgmt.media.models.LiveEventInputProtocol
:param access_control: Access control for live event input.
:type access_control: ~azure.mgmt.media.models.LiveEventInputAccessControl
:param key_frame_interval_duration: ISO 8601 time duration of the key frame interval duration
of the input. This value sets the EXT-X-TARGETDURATION property in the HLS output. For example,
use PT2S to indicate 2 seconds. Leave the value empty for encoding live events.
:type key_frame_interval_duration: str
:param access_token: A UUID in string form to uniquely identify the stream. This can be
specified at creation time but cannot be updated. If omitted, the service will generate a
unique value.
:type access_token: str
:param endpoints: The input endpoints for the live event.
:type endpoints: list[~azure.mgmt.media.models.LiveEventEndpoint]
"""
_validation = {
'streaming_protocol': {'required': True},
}
_attribute_map = {
'streaming_protocol': {'key': 'streamingProtocol', 'type': 'str'},
'access_control': {'key': 'accessControl', 'type': 'LiveEventInputAccessControl'},
'key_frame_interval_duration': {'key': 'keyFrameIntervalDuration', 'type': 'str'},
'access_token': {'key': 'accessToken', 'type': 'str'},
'endpoints': {'key': 'endpoints', 'type': '[LiveEventEndpoint]'},
}
def __init__(
self,
**kwargs
):
super(LiveEventInput, self).__init__(**kwargs)
self.streaming_protocol = kwargs['streaming_protocol']
self.access_control = kwargs.get('access_control', None)
self.key_frame_interval_duration = kwargs.get('key_frame_interval_duration', None)
self.access_token = kwargs.get('access_token', None)
self.endpoints = kwargs.get('endpoints', None)
class LiveEventInputAccessControl(msrest.serialization.Model):
"""The IP access control for live event input.
:param ip: The IP access control properties.
:type ip: ~azure.mgmt.media.models.IPAccessControl
"""
_attribute_map = {
'ip': {'key': 'ip', 'type': 'IPAccessControl'},
}
def __init__(
self,
**kwargs
):
super(LiveEventInputAccessControl, self).__init__(**kwargs)
self.ip = kwargs.get('ip', None)
class LiveEventInputTrackSelection(msrest.serialization.Model):
"""A track selection condition. This property is reserved for future use, any value set on this property will be ignored.
:param property: Property name to select. This property is reserved for future use, any value
set on this property will be ignored.
:type property: str
:param operation: Comparing operation. This property is reserved for future use, any value set
on this property will be ignored.
:type operation: str
:param value: Property value to select. This property is reserved for future use, any value set
on this property will be ignored.
:type value: str
"""
_attribute_map = {
'property': {'key': 'property', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LiveEventInputTrackSelection, self).__init__(**kwargs)
self.property = kwargs.get('property', None)
self.operation = kwargs.get('operation', None)
self.value = kwargs.get('value', None)
class LiveEventListResult(msrest.serialization.Model):
"""The LiveEvent list result.
:param value: The result of the List Live Event operation.
:type value: list[~azure.mgmt.media.models.LiveEvent]
:param odata_count: The number of result.
:type odata_count: int
:param odata_next_link: The link to the next set of results. Not empty if value contains
incomplete list of live outputs.
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[LiveEvent]'},
'odata_count': {'key': '@odata\\.count', 'type': 'int'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LiveEventListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_count = kwargs.get('odata_count', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class LiveEventOutputTranscriptionTrack(msrest.serialization.Model):
"""Describes a transcription track in the output of a live event, generated using speech-to-text transcription. This property is reserved for future use, any value set on this property will be ignored.
All required parameters must be populated in order to send to Azure.
:param track_name: Required. The output track name. This property is reserved for future use,
any value set on this property will be ignored.
:type track_name: str
"""
_validation = {
'track_name': {'required': True},
}
_attribute_map = {
'track_name': {'key': 'trackName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LiveEventOutputTranscriptionTrack, self).__init__(**kwargs)
self.track_name = kwargs['track_name']
class LiveEventPreview(msrest.serialization.Model):
"""Live event preview settings.
:param endpoints: The endpoints for preview. Do not share the preview URL with the live event
audience.
:type endpoints: list[~azure.mgmt.media.models.LiveEventEndpoint]
:param access_control: The access control for live event preview.
:type access_control: ~azure.mgmt.media.models.LiveEventPreviewAccessControl
:param preview_locator: The identifier of the preview locator in Guid format. Specifying this
at creation time allows the caller to know the preview locator url before the event is created.
If omitted, the service will generate a random identifier. This value cannot be updated once
the live event is created.
:type preview_locator: str
:param streaming_policy_name: The name of streaming policy used for the live event preview.
This value is specified at creation time and cannot be updated.
:type streaming_policy_name: str
:param alternative_media_id: An alternative media identifier associated with the streaming
locator created for the preview. This value is specified at creation time and cannot be
updated. The identifier can be used in the CustomLicenseAcquisitionUrlTemplate or the
CustomKeyAcquisitionUrlTemplate of the StreamingPolicy specified in the StreamingPolicyName
field.
:type alternative_media_id: str
"""
_attribute_map = {
'endpoints': {'key': 'endpoints', 'type': '[LiveEventEndpoint]'},
'access_control': {'key': 'accessControl', 'type': 'LiveEventPreviewAccessControl'},
'preview_locator': {'key': 'previewLocator', 'type': 'str'},
'streaming_policy_name': {'key': 'streamingPolicyName', 'type': 'str'},
'alternative_media_id': {'key': 'alternativeMediaId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LiveEventPreview, self).__init__(**kwargs)
self.endpoints = kwargs.get('endpoints', None)
self.access_control = kwargs.get('access_control', None)
self.preview_locator = kwargs.get('preview_locator', None)
self.streaming_policy_name = kwargs.get('streaming_policy_name', None)
self.alternative_media_id = kwargs.get('alternative_media_id', None)
class LiveEventPreviewAccessControl(msrest.serialization.Model):
"""The IP access control for the live event preview endpoint.
:param ip: The IP access control properties.
:type ip: ~azure.mgmt.media.models.IPAccessControl
"""
_attribute_map = {
'ip': {'key': 'ip', 'type': 'IPAccessControl'},
}
def __init__(
self,
**kwargs
):
super(LiveEventPreviewAccessControl, self).__init__(**kwargs)
self.ip = kwargs.get('ip', None)
class LiveEventTranscription(msrest.serialization.Model):
"""Describes the transcription tracks in the output of a live event, generated using speech-to-text transcription. This property is reserved for future use, any value set on this property will be ignored.
:param language: Specifies the language (locale) to be used for speech-to-text transcription –
it should match the spoken language in the audio track. The value should be in BCP-47 format
(e.g: 'en-US'). See https://go.microsoft.com/fwlink/?linkid=2133742 for more information about
the live transcription feature and the list of supported languages.
:type language: str
:param input_track_selection: Provides a mechanism to select the audio track in the input live
feed, to which speech-to-text transcription is applied. This property is reserved for future
use, any value set on this property will be ignored.
:type input_track_selection: list[~azure.mgmt.media.models.LiveEventInputTrackSelection]
:param output_transcription_track: Describes a transcription track in the output of a live
event, generated using speech-to-text transcription. This property is reserved for future use,
any value set on this property will be ignored.
:type output_transcription_track: ~azure.mgmt.media.models.LiveEventOutputTranscriptionTrack
"""
_attribute_map = {
'language': {'key': 'language', 'type': 'str'},
'input_track_selection': {'key': 'inputTrackSelection', 'type': '[LiveEventInputTrackSelection]'},
'output_transcription_track': {'key': 'outputTranscriptionTrack', 'type': 'LiveEventOutputTranscriptionTrack'},
}
def __init__(
self,
**kwargs
):
super(LiveEventTranscription, self).__init__(**kwargs)
self.language = kwargs.get('language', None)
self.input_track_selection = kwargs.get('input_track_selection', None)
self.output_transcription_track = kwargs.get('output_transcription_track', None)
class LiveOutput(ProxyResource):
"""The Live Output.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param description: The description of the live output.
:type description: str
:param asset_name: The asset that the live output will write to.
:type asset_name: str
:param archive_window_length: ISO 8601 time between 1 minute to 25 hours to indicate the
maximum content length that can be archived in the asset for this live output. This also sets
the maximum content length for the rewind window. For example, use PT1H30M to indicate 1 hour
and 30 minutes of archive window.
:type archive_window_length: ~datetime.timedelta
:param manifest_name: The manifest file name. If not provided, the service will generate one
automatically.
:type manifest_name: str
:param hls: HTTP Live Streaming (HLS) packing setting for the live output.
:type hls: ~azure.mgmt.media.models.Hls
:param output_snap_time: The initial timestamp that the live output will start at, any content
before this value will not be archived.
:type output_snap_time: long
:ivar created: The creation time the live output.
:vartype created: ~datetime.datetime
:ivar last_modified: The time the live output was last modified.
:vartype last_modified: ~datetime.datetime
:ivar provisioning_state: The provisioning state of the live output.
:vartype provisioning_state: str
:ivar resource_state: The resource state of the live output. Possible values include:
"Creating", "Running", "Deleting".
:vartype resource_state: str or ~azure.mgmt.media.models.LiveOutputResourceState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'created': {'readonly': True},
'last_modified': {'readonly': True},
'provisioning_state': {'readonly': True},
'resource_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'asset_name': {'key': 'properties.assetName', 'type': 'str'},
'archive_window_length': {'key': 'properties.archiveWindowLength', 'type': 'duration'},
'manifest_name': {'key': 'properties.manifestName', 'type': 'str'},
'hls': {'key': 'properties.hls', 'type': 'Hls'},
'output_snap_time': {'key': 'properties.outputSnapTime', 'type': 'long'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'resource_state': {'key': 'properties.resourceState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LiveOutput, self).__init__(**kwargs)
self.description = kwargs.get('description', None)
self.asset_name = kwargs.get('asset_name', None)
self.archive_window_length = kwargs.get('archive_window_length', None)
self.manifest_name = kwargs.get('manifest_name', None)
self.hls = kwargs.get('hls', None)
self.output_snap_time = kwargs.get('output_snap_time', None)
self.created = None
self.last_modified = None
self.provisioning_state = None
self.resource_state = None
class LiveOutputListResult(msrest.serialization.Model):
"""The LiveOutput list result.
:param value: The result of the List LiveOutput operation.
:type value: list[~azure.mgmt.media.models.LiveOutput]
:param odata_count: The number of result.
:type odata_count: int
:param odata_next_link: The link to the next set of results. Not empty if value contains
incomplete list of live outputs.
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[LiveOutput]'},
'odata_count': {'key': '@odata\\.count', 'type': 'int'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LiveOutputListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_count = kwargs.get('odata_count', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class Location(msrest.serialization.Model):
"""Location.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Location, self).__init__(**kwargs)
self.name = kwargs['name']
class LogSpecification(msrest.serialization.Model):
"""A diagnostic log emitted by service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The diagnostic log category name.
:vartype name: str
:ivar display_name: The diagnostic log category display name.
:vartype display_name: str
:ivar blob_duration: The time range for requests in each blob.
:vartype blob_duration: str
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'blob_duration': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'blob_duration': {'key': 'blobDuration', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LogSpecification, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.blob_duration = None
class MediaService(TrackedResource):
"""A Media Services account.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param identity: The Managed Identity for the Media Services account.
:type identity: ~azure.mgmt.media.models.MediaServiceIdentity
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:ivar media_service_id: The Media Services account ID.
:vartype media_service_id: str
:param storage_accounts: The storage accounts for this resource.
:type storage_accounts: list[~azure.mgmt.media.models.StorageAccount]
:param storage_authentication: Possible values include: "System", "ManagedIdentity".
:type storage_authentication: str or ~azure.mgmt.media.models.StorageAuthentication
:param encryption: The account encryption properties.
:type encryption: ~azure.mgmt.media.models.AccountEncryption
:param key_delivery: The Key Delivery properties for Media Services account.
:type key_delivery: ~azure.mgmt.media.models.KeyDelivery
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'system_data': {'readonly': True},
'media_service_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'MediaServiceIdentity'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'media_service_id': {'key': 'properties.mediaServiceId', 'type': 'str'},
'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'},
'storage_authentication': {'key': 'properties.storageAuthentication', 'type': 'str'},
'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'},
'key_delivery': {'key': 'properties.keyDelivery', 'type': 'KeyDelivery'},
}
def __init__(
self,
**kwargs
):
super(MediaService, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
self.system_data = None
self.media_service_id = None
self.storage_accounts = kwargs.get('storage_accounts', None)
self.storage_authentication = kwargs.get('storage_authentication', None)
self.encryption = kwargs.get('encryption', None)
self.key_delivery = kwargs.get('key_delivery', None)
class MediaServiceCollection(msrest.serialization.Model):
"""A collection of MediaService items.
:param value: A collection of MediaService items.
:type value: list[~azure.mgmt.media.models.MediaService]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[MediaService]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MediaServiceCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class MediaServiceIdentity(msrest.serialization.Model):
"""MediaServiceIdentity.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type: Required. The identity type. Possible values include: "SystemAssigned", "None".
:type type: str or ~azure.mgmt.media.models.ManagedIdentityType
:ivar principal_id: The Principal ID of the identity.
:vartype principal_id: str
:ivar tenant_id: The Tenant ID of the identity.
:vartype tenant_id: str
"""
_validation = {
'type': {'required': True},
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MediaServiceIdentity, self).__init__(**kwargs)
self.type = kwargs['type']
self.principal_id = None
self.tenant_id = None
class MediaServiceUpdate(msrest.serialization.Model):
"""A Media Services account update.
Variables are only populated by the server, and will be ignored when sending a request.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param identity: The Managed Identity for the Media Services account.
:type identity: ~azure.mgmt.media.models.MediaServiceIdentity
:ivar media_service_id: The Media Services account ID.
:vartype media_service_id: str
:param storage_accounts: The storage accounts for this resource.
:type storage_accounts: list[~azure.mgmt.media.models.StorageAccount]
:param storage_authentication: Possible values include: "System", "ManagedIdentity".
:type storage_authentication: str or ~azure.mgmt.media.models.StorageAuthentication
:param encryption: The account encryption properties.
:type encryption: ~azure.mgmt.media.models.AccountEncryption
:param key_delivery: The Key Delivery properties for Media Services account.
:type key_delivery: ~azure.mgmt.media.models.KeyDelivery
"""
_validation = {
'media_service_id': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'MediaServiceIdentity'},
'media_service_id': {'key': 'properties.mediaServiceId', 'type': 'str'},
'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'},
'storage_authentication': {'key': 'properties.storageAuthentication', 'type': 'str'},
'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'},
'key_delivery': {'key': 'properties.keyDelivery', 'type': 'KeyDelivery'},
}
def __init__(
self,
**kwargs
):
super(MediaServiceUpdate, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.identity = kwargs.get('identity', None)
self.media_service_id = None
self.storage_accounts = kwargs.get('storage_accounts', None)
self.storage_authentication = kwargs.get('storage_authentication', None)
self.encryption = kwargs.get('encryption', None)
self.key_delivery = kwargs.get('key_delivery', None)
class MetricDimension(msrest.serialization.Model):
"""A metric dimension.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The metric dimension name.
:vartype name: str
:ivar display_name: The display name for the dimension.
:vartype display_name: str
:ivar to_be_exported_for_shoebox: Whether to export metric to shoebox.
:vartype to_be_exported_for_shoebox: bool
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'to_be_exported_for_shoebox': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(MetricDimension, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.to_be_exported_for_shoebox = None
class MetricSpecification(msrest.serialization.Model):
"""A metric emitted by service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The metric name.
:vartype name: str
:ivar display_name: The metric display name.
:vartype display_name: str
:ivar display_description: The metric display description.
:vartype display_description: str
:ivar unit: The metric unit. Possible values include: "Bytes", "Count", "Milliseconds".
:vartype unit: str or ~azure.mgmt.media.models.MetricUnit
:ivar aggregation_type: The metric aggregation type. Possible values include: "Average",
"Count", "Total".
:vartype aggregation_type: str or ~azure.mgmt.media.models.MetricAggregationType
:ivar lock_aggregation_type: The metric lock aggregation type. Possible values include:
"Average", "Count", "Total".
:vartype lock_aggregation_type: str or ~azure.mgmt.media.models.MetricAggregationType
:param supported_aggregation_types: Supported aggregation types.
:type supported_aggregation_types: list[str]
:ivar dimensions: The metric dimensions.
:vartype dimensions: list[~azure.mgmt.media.models.MetricDimension]
:ivar enable_regional_mdm_account: Indicates whether regional MDM account is enabled.
:vartype enable_regional_mdm_account: bool
:ivar source_mdm_account: The source MDM account.
:vartype source_mdm_account: str
:ivar source_mdm_namespace: The source MDM namespace.
:vartype source_mdm_namespace: str
:ivar supported_time_grain_types: The supported time grain types.
:vartype supported_time_grain_types: list[str]
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'display_description': {'readonly': True},
'unit': {'readonly': True},
'aggregation_type': {'readonly': True},
'lock_aggregation_type': {'readonly': True},
'dimensions': {'readonly': True},
'enable_regional_mdm_account': {'readonly': True},
'source_mdm_account': {'readonly': True},
'source_mdm_namespace': {'readonly': True},
'supported_time_grain_types': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'lock_aggregation_type': {'key': 'lockAggregationType', 'type': 'str'},
'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'},
'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'},
'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'},
'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'},
'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(MetricSpecification, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.display_description = None
self.unit = None
self.aggregation_type = None
self.lock_aggregation_type = None
self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None)
self.dimensions = None
self.enable_regional_mdm_account = None
self.source_mdm_account = None
self.source_mdm_namespace = None
self.supported_time_grain_types = None
class MultiBitrateFormat(Format):
"""Describes the properties for producing a collection of GOP aligned multi-bitrate files. The default behavior is to produce one output file for each video layer which is muxed together with all the audios. The exact output files produced can be controlled by specifying the outputFiles collection.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Mp4Format, TransportStreamFormat.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param filename_pattern: Required. The pattern of the file names for the generated output
files. The following macros are supported in the file name: {Basename} - An expansion macro
that will use the name of the input video file. If the base name(the file suffix is not
included) of the input video file is less than 32 characters long, the base name of input video
files will be used. If the length of base name of the input video file exceeds 32 characters,
the base name is truncated to the first 32 characters in total length. {Extension} - The
appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index}
- A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video
bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec.
{Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed
from the filename.
:type filename_pattern: str
:param output_files: The list of output files to produce. Each entry in the list is a set of
audio and video layer labels to be muxed together .
:type output_files: list[~azure.mgmt.media.models.OutputFile]
"""
_validation = {
'odata_type': {'required': True},
'filename_pattern': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'filename_pattern': {'key': 'filenamePattern', 'type': 'str'},
'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.Mp4Format': 'Mp4Format', '#Microsoft.Media.TransportStreamFormat': 'TransportStreamFormat'}
}
def __init__(
self,
**kwargs
):
super(MultiBitrateFormat, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.MultiBitrateFormat' # type: str
self.output_files = kwargs.get('output_files', None)
class Mp4Format(MultiBitrateFormat):
"""Describes the properties for an output ISO MP4 file.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param filename_pattern: Required. The pattern of the file names for the generated output
files. The following macros are supported in the file name: {Basename} - An expansion macro
that will use the name of the input video file. If the base name(the file suffix is not
included) of the input video file is less than 32 characters long, the base name of input video
files will be used. If the length of base name of the input video file exceeds 32 characters,
the base name is truncated to the first 32 characters in total length. {Extension} - The
appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index}
- A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video
bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec.
{Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed
from the filename.
:type filename_pattern: str
:param output_files: The list of output files to produce. Each entry in the list is a set of
audio and video layer labels to be muxed together .
:type output_files: list[~azure.mgmt.media.models.OutputFile]
"""
_validation = {
'odata_type': {'required': True},
'filename_pattern': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'filename_pattern': {'key': 'filenamePattern', 'type': 'str'},
'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'},
}
def __init__(
self,
**kwargs
):
super(Mp4Format, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.Mp4Format' # type: str
class NoEncryption(msrest.serialization.Model):
"""Class for NoEncryption scheme.
:param enabled_protocols: Representing supported protocols.
:type enabled_protocols: ~azure.mgmt.media.models.EnabledProtocols
"""
_attribute_map = {
'enabled_protocols': {'key': 'enabledProtocols', 'type': 'EnabledProtocols'},
}
def __init__(
self,
**kwargs
):
super(NoEncryption, self).__init__(**kwargs)
self.enabled_protocols = kwargs.get('enabled_protocols', None)
class ODataError(msrest.serialization.Model):
"""Information about an error.
:param code: A language-independent error name.
:type code: str
:param message: The error message.
:type message: str
:param target: The target of the error (for example, the name of the property in error).
:type target: str
:param details: The error details.
:type details: list[~azure.mgmt.media.models.ODataError]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ODataError]'},
}
def __init__(
self,
**kwargs
):
super(ODataError, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
class Operation(msrest.serialization.Model):
"""An operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. The operation name.
:type name: str
:param display: The operation display name.
:type display: ~azure.mgmt.media.models.OperationDisplay
:param origin: Origin of the operation.
:type origin: str
:param properties: Operation properties format.
:type properties: ~azure.mgmt.media.models.Properties
:param is_data_action: Whether the operation applies to data-plane.
:type is_data_action: bool
:param action_type: Indicates the action type. Possible values include: "Internal".
:type action_type: str or ~azure.mgmt.media.models.ActionType
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'Properties'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'action_type': {'key': 'actionType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = kwargs['name']
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
self.properties = kwargs.get('properties', None)
self.is_data_action = kwargs.get('is_data_action', None)
self.action_type = kwargs.get('action_type', None)
class OperationCollection(msrest.serialization.Model):
"""A collection of Operation items.
:param value: A collection of Operation items.
:type value: list[~azure.mgmt.media.models.Operation]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class OperationDisplay(msrest.serialization.Model):
"""Operation details.
:param provider: The service provider.
:type provider: str
:param resource: Resource on which the operation is performed.
:type resource: str
:param operation: The operation type.
:type operation: str
:param description: The operation description.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class OutputFile(msrest.serialization.Model):
"""Represents an output file produced.
All required parameters must be populated in order to send to Azure.
:param labels: Required. The list of labels that describe how the encoder should multiplex
video and audio into an output file. For example, if the encoder is producing two video layers
with labels v1 and v2, and one audio layer with label a1, then an array like '[v1, a1]' tells
the encoder to produce an output file with the video track represented by v1 and the audio
track represented by a1.
:type labels: list[str]
"""
_validation = {
'labels': {'required': True},
}
_attribute_map = {
'labels': {'key': 'labels', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(OutputFile, self).__init__(**kwargs)
self.labels = kwargs['labels']
class PngFormat(ImageFormat):
"""Describes the settings for producing PNG thumbnails.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param filename_pattern: Required. The pattern of the file names for the generated output
files. The following macros are supported in the file name: {Basename} - An expansion macro
that will use the name of the input video file. If the base name(the file suffix is not
included) of the input video file is less than 32 characters long, the base name of input video
files will be used. If the length of base name of the input video file exceeds 32 characters,
the base name is truncated to the first 32 characters in total length. {Extension} - The
appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index}
- A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video
bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec.
{Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed
from the filename.
:type filename_pattern: str
"""
_validation = {
'odata_type': {'required': True},
'filename_pattern': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'filename_pattern': {'key': 'filenamePattern', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PngFormat, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.PngFormat' # type: str
class PngImage(Image):
"""Describes the properties for producing a series of PNG images from the input video.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
:param key_frame_interval: The distance between two key frames. The value should be non-zero in
the range [0.5, 20] seconds, specified in ISO 8601 format. The default is 2 seconds(PT2S). Note
that this setting is ignored if VideoSyncMode.Passthrough is set, where the KeyFrameInterval
value will follow the input source setting.
:type key_frame_interval: ~datetime.timedelta
:param stretch_mode: The resizing mode - how the input video will be resized to fit the desired
output resolution(s). Default is AutoSize. Possible values include: "None", "AutoSize",
"AutoFit".
:type stretch_mode: str or ~azure.mgmt.media.models.StretchMode
:param sync_mode: The Video Sync Mode. Possible values include: "Auto", "Passthrough", "Cfr",
"Vfr".
:type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode
:param start: Required. The position in the input video from where to start generating
thumbnails. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or
a frame count (For example, 10 to start at the 10th frame), or a relative value to stream
duration (For example, 10% to start at 10% of stream duration). Also supports a macro {Best},
which tells the encoder to select the best thumbnail from the first few seconds of the video
and will only produce one thumbnail, no matter what other settings are for Step and Range. The
default value is macro {Best}.
:type start: str
:param step: The intervals at which thumbnails are generated. The value can be in ISO 8601
format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30
for one image every 30 frames), or a relative value to stream duration (For example, 10% for
one image every 10% of stream duration). Note: Step value will affect the first generated
thumbnail, which may not be exactly the one specified at transform preset start time. This is
due to the encoder, which tries to select the best thumbnail between start time and Step
position from start time as the first output. As the default value is 10%, it means if stream
has long duration, the first generated thumbnail might be far away from the one specified at
start time. Try to select reasonable value for Step if the first thumbnail is expected close to
start time, or set Range value at 1 if only one thumbnail is needed at start time.
:type step: str
:param range: The position relative to transform preset start time in the input video at which
to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to
stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop
at the 300th frame from the frame at start time. If this value is 1, it means only producing
one thumbnail at start time), or a relative value to the stream duration (For example, 50% to
stop at half of stream duration from start time). The default value is 100%, which means to
stop at the end of the stream.
:type range: str
:param layers: A collection of output PNG image layers to be produced by the encoder.
:type layers: list[~azure.mgmt.media.models.PngLayer]
"""
_validation = {
'odata_type': {'required': True},
'start': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'},
'stretch_mode': {'key': 'stretchMode', 'type': 'str'},
'sync_mode': {'key': 'syncMode', 'type': 'str'},
'start': {'key': 'start', 'type': 'str'},
'step': {'key': 'step', 'type': 'str'},
'range': {'key': 'range', 'type': 'str'},
'layers': {'key': 'layers', 'type': '[PngLayer]'},
}
def __init__(
self,
**kwargs
):
super(PngImage, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.PngImage' # type: str
self.layers = kwargs.get('layers', None)
class PngLayer(Layer):
"""Describes the settings to produce a PNG image from the input video.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param width: The width of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in width as the input.
:type width: str
:param height: The height of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in height as the input.
:type height: str
:param label: The alphanumeric label for this layer, which can be used in multiplexing
different video and audio layers, or in naming the output file.
:type label: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'height': {'key': 'height', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PngLayer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.PngLayer' # type: str
class PresentationTimeRange(msrest.serialization.Model):
"""The presentation time range, this is asset related and not recommended for Account Filter.
:param start_timestamp: The absolute start time boundary.
:type start_timestamp: long
:param end_timestamp: The absolute end time boundary.
:type end_timestamp: long
:param presentation_window_duration: The relative to end sliding window.
:type presentation_window_duration: long
:param live_backoff_duration: The relative to end right edge.
:type live_backoff_duration: long
:param timescale: The time scale of time stamps.
:type timescale: long
:param force_end_timestamp: The indicator of forcing existing of end time stamp.
:type force_end_timestamp: bool
"""
_attribute_map = {
'start_timestamp': {'key': 'startTimestamp', 'type': 'long'},
'end_timestamp': {'key': 'endTimestamp', 'type': 'long'},
'presentation_window_duration': {'key': 'presentationWindowDuration', 'type': 'long'},
'live_backoff_duration': {'key': 'liveBackoffDuration', 'type': 'long'},
'timescale': {'key': 'timescale', 'type': 'long'},
'force_end_timestamp': {'key': 'forceEndTimestamp', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(PresentationTimeRange, self).__init__(**kwargs)
self.start_timestamp = kwargs.get('start_timestamp', None)
self.end_timestamp = kwargs.get('end_timestamp', None)
self.presentation_window_duration = kwargs.get('presentation_window_duration', None)
self.live_backoff_duration = kwargs.get('live_backoff_duration', None)
self.timescale = kwargs.get('timescale', None)
self.force_end_timestamp = kwargs.get('force_end_timestamp', None)
class PrivateEndpoint(msrest.serialization.Model):
"""The Private Endpoint resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ARM identifier for Private Endpoint.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpoint, self).__init__(**kwargs)
self.id = None
class PrivateEndpointConnection(Resource):
"""The Private Endpoint Connection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param private_endpoint: The resource of private end point.
:type private_endpoint: ~azure.mgmt.media.models.PrivateEndpoint
:param private_link_service_connection_state: A collection of information about the state of
the connection between service consumer and provider.
:type private_link_service_connection_state:
~azure.mgmt.media.models.PrivateLinkServiceConnectionState
:ivar provisioning_state: The provisioning state of the private endpoint connection resource.
Possible values include: "Succeeded", "Creating", "Deleting", "Failed".
:vartype provisioning_state: str or
~azure.mgmt.media.models.PrivateEndpointConnectionProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.private_endpoint = kwargs.get('private_endpoint', None)
self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None)
self.provisioning_state = None
class PrivateEndpointConnectionListResult(msrest.serialization.Model):
"""List of private endpoint connection associated with the specified storage account.
:param value: Array of private endpoint connections.
:type value: list[~azure.mgmt.media.models.PrivateEndpointConnection]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnectionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class PrivateLinkResource(Resource):
"""A private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:param required_zone_names: The private link resource Private link DNS zone name.
:type required_zone_names: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'group_id': {'readonly': True},
'required_members': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'group_id': {'key': 'properties.groupId', 'type': 'str'},
'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResource, self).__init__(**kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = kwargs.get('required_zone_names', None)
class PrivateLinkResourceListResult(msrest.serialization.Model):
"""A list of private link resources.
:param value: Array of private link resources.
:type value: list[~azure.mgmt.media.models.PrivateLinkResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateLinkResource]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResourceListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class PrivateLinkServiceConnectionState(msrest.serialization.Model):
"""A collection of information about the state of the connection between service consumer and provider.
:param status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
of the service. Possible values include: "Pending", "Approved", "Rejected".
:type status: str or ~azure.mgmt.media.models.PrivateEndpointServiceConnectionStatus
:param description: The reason for approval/rejection of the connection.
:type description: str
:param actions_required: A message indicating if changes on the service provider require any
updates on the consumer.
:type actions_required: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkServiceConnectionState, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.description = kwargs.get('description', None)
self.actions_required = kwargs.get('actions_required', None)
class Properties(msrest.serialization.Model):
"""The service specification property.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar service_specification: The service specifications.
:vartype service_specification: ~azure.mgmt.media.models.ServiceSpecification
"""
_validation = {
'service_specification': {'readonly': True},
}
_attribute_map = {
'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'},
}
def __init__(
self,
**kwargs
):
super(Properties, self).__init__(**kwargs)
self.service_specification = None
class Provider(msrest.serialization.Model):
"""A resource provider.
All required parameters must be populated in order to send to Azure.
:param provider_name: Required. The provider name.
:type provider_name: str
"""
_validation = {
'provider_name': {'required': True},
}
_attribute_map = {
'provider_name': {'key': 'providerName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Provider, self).__init__(**kwargs)
self.provider_name = kwargs['provider_name']
class Rectangle(msrest.serialization.Model):
"""Describes the properties of a rectangular window applied to the input media before processing it.
:param left: The number of pixels from the left-margin. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example, 50%).
:type left: str
:param top: The number of pixels from the top-margin. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example, 50%).
:type top: str
:param width: The width of the rectangular region in pixels. This can be absolute pixel value
(e.g 100), or relative to the size of the video (For example, 50%).
:type width: str
:param height: The height of the rectangular region in pixels. This can be absolute pixel value
(e.g 100), or relative to the size of the video (For example, 50%).
:type height: str
"""
_attribute_map = {
'left': {'key': 'left', 'type': 'str'},
'top': {'key': 'top', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'height': {'key': 'height', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Rectangle, self).__init__(**kwargs)
self.left = kwargs.get('left', None)
self.top = kwargs.get('top', None)
self.width = kwargs.get('width', None)
self.height = kwargs.get('height', None)
class SelectAudioTrackByAttribute(AudioTrackDescriptor):
"""Select audio tracks from the input by specifying an attribute and an attribute filter.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param channel_mapping: Optional designation for single channel audio tracks. Can be used to
combine the tracks into stereo or multi-channel audio tracks. Possible values include:
"FrontLeft", "FrontRight", "Center", "LowFrequencyEffects", "BackLeft", "BackRight",
"StereoLeft", "StereoRight".
:type channel_mapping: str or ~azure.mgmt.media.models.ChannelMapping
:param attribute: Required. The TrackAttribute to filter the tracks by. Possible values
include: "Bitrate", "Language".
:type attribute: str or ~azure.mgmt.media.models.TrackAttribute
:param filter: Required. The type of AttributeFilter to apply to the TrackAttribute in order to
select the tracks. Possible values include: "All", "Top", "Bottom", "ValueEquals".
:type filter: str or ~azure.mgmt.media.models.AttributeFilter
:param filter_value: The value to filter the tracks by. Only used when
AttributeFilter.ValueEquals is specified for the Filter property.
:type filter_value: str
"""
_validation = {
'odata_type': {'required': True},
'attribute': {'required': True},
'filter': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'channel_mapping': {'key': 'channelMapping', 'type': 'str'},
'attribute': {'key': 'attribute', 'type': 'str'},
'filter': {'key': 'filter', 'type': 'str'},
'filter_value': {'key': 'filterValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SelectAudioTrackByAttribute, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.SelectAudioTrackByAttribute' # type: str
self.attribute = kwargs['attribute']
self.filter = kwargs['filter']
self.filter_value = kwargs.get('filter_value', None)
class SelectAudioTrackById(AudioTrackDescriptor):
"""Select audio tracks from the input by specifying a track identifier.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param channel_mapping: Optional designation for single channel audio tracks. Can be used to
combine the tracks into stereo or multi-channel audio tracks. Possible values include:
"FrontLeft", "FrontRight", "Center", "LowFrequencyEffects", "BackLeft", "BackRight",
"StereoLeft", "StereoRight".
:type channel_mapping: str or ~azure.mgmt.media.models.ChannelMapping
:param track_id: Required. Track identifier to select.
:type track_id: long
"""
_validation = {
'odata_type': {'required': True},
'track_id': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'channel_mapping': {'key': 'channelMapping', 'type': 'str'},
'track_id': {'key': 'trackId', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(SelectAudioTrackById, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.SelectAudioTrackById' # type: str
self.track_id = kwargs['track_id']
class VideoTrackDescriptor(TrackDescriptor):
"""A TrackSelection to select video tracks.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SelectVideoTrackByAttribute, SelectVideoTrackById.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.SelectVideoTrackByAttribute': 'SelectVideoTrackByAttribute', '#Microsoft.Media.SelectVideoTrackById': 'SelectVideoTrackById'}
}
def __init__(
self,
**kwargs
):
super(VideoTrackDescriptor, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.VideoTrackDescriptor' # type: str
class SelectVideoTrackByAttribute(VideoTrackDescriptor):
"""Select video tracks from the input by specifying an attribute and an attribute filter.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param attribute: Required. The TrackAttribute to filter the tracks by. Possible values
include: "Bitrate", "Language".
:type attribute: str or ~azure.mgmt.media.models.TrackAttribute
:param filter: Required. The type of AttributeFilter to apply to the TrackAttribute in order to
select the tracks. Possible values include: "All", "Top", "Bottom", "ValueEquals".
:type filter: str or ~azure.mgmt.media.models.AttributeFilter
:param filter_value: The value to filter the tracks by. Only used when
AttributeFilter.ValueEquals is specified for the Filter property. For TrackAttribute.Bitrate,
this should be an integer value in bits per second (e.g: '1500000'). The
TrackAttribute.Language is not supported for video tracks.
:type filter_value: str
"""
_validation = {
'odata_type': {'required': True},
'attribute': {'required': True},
'filter': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'attribute': {'key': 'attribute', 'type': 'str'},
'filter': {'key': 'filter', 'type': 'str'},
'filter_value': {'key': 'filterValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SelectVideoTrackByAttribute, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.SelectVideoTrackByAttribute' # type: str
self.attribute = kwargs['attribute']
self.filter = kwargs['filter']
self.filter_value = kwargs.get('filter_value', None)
class SelectVideoTrackById(VideoTrackDescriptor):
"""Select video tracks from the input by specifying a track identifier.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param track_id: Required. Track identifier to select.
:type track_id: long
"""
_validation = {
'odata_type': {'required': True},
'track_id': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'track_id': {'key': 'trackId', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(SelectVideoTrackById, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.SelectVideoTrackById' # type: str
self.track_id = kwargs['track_id']
class ServiceSpecification(msrest.serialization.Model):
"""The service metric specifications.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar log_specifications: List of log specifications.
:vartype log_specifications: list[~azure.mgmt.media.models.LogSpecification]
:ivar metric_specifications: List of metric specifications.
:vartype metric_specifications: list[~azure.mgmt.media.models.MetricSpecification]
"""
_validation = {
'log_specifications': {'readonly': True},
'metric_specifications': {'readonly': True},
}
_attribute_map = {
'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'},
'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'},
}
def __init__(
self,
**kwargs
):
super(ServiceSpecification, self).__init__(**kwargs)
self.log_specifications = None
self.metric_specifications = None
class StandardEncoderPreset(Preset):
"""Describes all the settings to be used when encoding the input video with the Standard Encoder.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param filters: One or more filtering operations that are applied to the input media before
encoding.
:type filters: ~azure.mgmt.media.models.Filters
:param codecs: Required. The list of codecs to be used when encoding the input video.
:type codecs: list[~azure.mgmt.media.models.Codec]
:param formats: Required. The list of outputs to be produced by the encoder.
:type formats: list[~azure.mgmt.media.models.Format]
"""
_validation = {
'odata_type': {'required': True},
'codecs': {'required': True},
'formats': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'filters': {'key': 'filters', 'type': 'Filters'},
'codecs': {'key': 'codecs', 'type': '[Codec]'},
'formats': {'key': 'formats', 'type': '[Format]'},
}
def __init__(
self,
**kwargs
):
super(StandardEncoderPreset, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.StandardEncoderPreset' # type: str
self.filters = kwargs.get('filters', None)
self.codecs = kwargs['codecs']
self.formats = kwargs['formats']
class StorageAccount(msrest.serialization.Model):
"""The storage account details.
All required parameters must be populated in order to send to Azure.
:param id: The ID of the storage account resource. Media Services relies on tables and queues
as well as blobs, so the primary storage account must be a Standard Storage account (either
Microsoft.ClassicStorage or Microsoft.Storage). Blob only storage accounts can be added as
secondary storage accounts.
:type id: str
:param type: Required. The type of the storage account. Possible values include: "Primary",
"Secondary".
:type type: str or ~azure.mgmt.media.models.StorageAccountType
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccount, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.type = kwargs['type']
class StorageEncryptedAssetDecryptionData(msrest.serialization.Model):
"""Data needed to decrypt asset files encrypted with legacy storage encryption.
:param key: The Asset File storage encryption key.
:type key: bytearray
:param asset_file_encryption_metadata: Asset File encryption metadata.
:type asset_file_encryption_metadata:
list[~azure.mgmt.media.models.AssetFileEncryptionMetadata]
"""
_attribute_map = {
'key': {'key': 'key', 'type': 'bytearray'},
'asset_file_encryption_metadata': {'key': 'assetFileEncryptionMetadata', 'type': '[AssetFileEncryptionMetadata]'},
}
def __init__(
self,
**kwargs
):
super(StorageEncryptedAssetDecryptionData, self).__init__(**kwargs)
self.key = kwargs.get('key', None)
self.asset_file_encryption_metadata = kwargs.get('asset_file_encryption_metadata', None)
class StreamingEndpoint(TrackedResource):
"""The streaming endpoint.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:param description: The streaming endpoint description.
:type description: str
:param scale_units: The number of scale units. Use the Scale operation to adjust this value.
:type scale_units: int
:param availability_set_name: This feature is deprecated, do not set a value for this property.
:type availability_set_name: str
:param access_control: The access control definition of the streaming endpoint.
:type access_control: ~azure.mgmt.media.models.StreamingEndpointAccessControl
:param max_cache_age: Max cache age.
:type max_cache_age: long
:param custom_host_names: The custom host names of the streaming endpoint.
:type custom_host_names: list[str]
:ivar host_name: The streaming endpoint host name.
:vartype host_name: str
:param cdn_enabled: The CDN enabled flag.
:type cdn_enabled: bool
:param cdn_provider: The CDN provider name.
:type cdn_provider: str
:param cdn_profile: The CDN profile name.
:type cdn_profile: str
:ivar provisioning_state: The provisioning state of the streaming endpoint.
:vartype provisioning_state: str
:ivar resource_state: The resource state of the streaming endpoint. Possible values include:
"Stopped", "Starting", "Running", "Stopping", "Deleting", "Scaling".
:vartype resource_state: str or ~azure.mgmt.media.models.StreamingEndpointResourceState
:param cross_site_access_policies: The streaming endpoint access policies.
:type cross_site_access_policies: ~azure.mgmt.media.models.CrossSiteAccessPolicies
:ivar free_trial_end_time: The free trial expiration time.
:vartype free_trial_end_time: ~datetime.datetime
:ivar created: The exact time the streaming endpoint was created.
:vartype created: ~datetime.datetime
:ivar last_modified: The exact time the streaming endpoint was last modified.
:vartype last_modified: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'system_data': {'readonly': True},
'host_name': {'readonly': True},
'provisioning_state': {'readonly': True},
'resource_state': {'readonly': True},
'free_trial_end_time': {'readonly': True},
'created': {'readonly': True},
'last_modified': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'description': {'key': 'properties.description', 'type': 'str'},
'scale_units': {'key': 'properties.scaleUnits', 'type': 'int'},
'availability_set_name': {'key': 'properties.availabilitySetName', 'type': 'str'},
'access_control': {'key': 'properties.accessControl', 'type': 'StreamingEndpointAccessControl'},
'max_cache_age': {'key': 'properties.maxCacheAge', 'type': 'long'},
'custom_host_names': {'key': 'properties.customHostNames', 'type': '[str]'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'cdn_enabled': {'key': 'properties.cdnEnabled', 'type': 'bool'},
'cdn_provider': {'key': 'properties.cdnProvider', 'type': 'str'},
'cdn_profile': {'key': 'properties.cdnProfile', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'resource_state': {'key': 'properties.resourceState', 'type': 'str'},
'cross_site_access_policies': {'key': 'properties.crossSiteAccessPolicies', 'type': 'CrossSiteAccessPolicies'},
'free_trial_end_time': {'key': 'properties.freeTrialEndTime', 'type': 'iso-8601'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(StreamingEndpoint, self).__init__(**kwargs)
self.system_data = None
self.description = kwargs.get('description', None)
self.scale_units = kwargs.get('scale_units', None)
self.availability_set_name = kwargs.get('availability_set_name', None)
self.access_control = kwargs.get('access_control', None)
self.max_cache_age = kwargs.get('max_cache_age', None)
self.custom_host_names = kwargs.get('custom_host_names', None)
self.host_name = None
self.cdn_enabled = kwargs.get('cdn_enabled', None)
self.cdn_provider = kwargs.get('cdn_provider', None)
self.cdn_profile = kwargs.get('cdn_profile', None)
self.provisioning_state = None
self.resource_state = None
self.cross_site_access_policies = kwargs.get('cross_site_access_policies', None)
self.free_trial_end_time = None
self.created = None
self.last_modified = None
class StreamingEndpointAccessControl(msrest.serialization.Model):
"""Streaming endpoint access control definition.
:param akamai: The access control of Akamai.
:type akamai: ~azure.mgmt.media.models.AkamaiAccessControl
:param ip: The IP access control of the streaming endpoint.
:type ip: ~azure.mgmt.media.models.IPAccessControl
"""
_attribute_map = {
'akamai': {'key': 'akamai', 'type': 'AkamaiAccessControl'},
'ip': {'key': 'ip', 'type': 'IPAccessControl'},
}
def __init__(
self,
**kwargs
):
super(StreamingEndpointAccessControl, self).__init__(**kwargs)
self.akamai = kwargs.get('akamai', None)
self.ip = kwargs.get('ip', None)
class StreamingEndpointListResult(msrest.serialization.Model):
"""The streaming endpoint list result.
:param value: The result of the List StreamingEndpoint operation.
:type value: list[~azure.mgmt.media.models.StreamingEndpoint]
:param odata_count: The number of result.
:type odata_count: int
:param odata_next_link: The link to the next set of results. Not empty if value contains
incomplete list of streaming endpoints.
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[StreamingEndpoint]'},
'odata_count': {'key': '@odata\\.count', 'type': 'int'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StreamingEndpointListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_count = kwargs.get('odata_count', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class StreamingEntityScaleUnit(msrest.serialization.Model):
"""scale units definition.
:param scale_unit: The scale unit number of the streaming endpoint.
:type scale_unit: int
"""
_attribute_map = {
'scale_unit': {'key': 'scaleUnit', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(StreamingEntityScaleUnit, self).__init__(**kwargs)
self.scale_unit = kwargs.get('scale_unit', None)
class StreamingLocator(ProxyResource):
"""A Streaming Locator resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:param asset_name: Asset Name.
:type asset_name: str
:ivar created: The creation time of the Streaming Locator.
:vartype created: ~datetime.datetime
:param start_time: The start time of the Streaming Locator.
:type start_time: ~datetime.datetime
:param end_time: The end time of the Streaming Locator.
:type end_time: ~datetime.datetime
:param streaming_locator_id: The StreamingLocatorId of the Streaming Locator.
:type streaming_locator_id: str
:param streaming_policy_name: Name of the Streaming Policy used by this Streaming Locator.
Either specify the name of Streaming Policy you created or use one of the predefined Streaming
Policies. The predefined Streaming Policies available are: 'Predefined_DownloadOnly',
'Predefined_ClearStreamingOnly', 'Predefined_DownloadAndClearStreaming', 'Predefined_ClearKey',
'Predefined_MultiDrmCencStreaming' and 'Predefined_MultiDrmStreaming'.
:type streaming_policy_name: str
:param default_content_key_policy_name: Name of the default ContentKeyPolicy used by this
Streaming Locator.
:type default_content_key_policy_name: str
:param content_keys: The ContentKeys used by this Streaming Locator.
:type content_keys: list[~azure.mgmt.media.models.StreamingLocatorContentKey]
:param alternative_media_id: Alternative Media ID of this Streaming Locator.
:type alternative_media_id: str
:param filters: A list of asset or account filters which apply to this streaming locator.
:type filters: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'created': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'asset_name': {'key': 'properties.assetName', 'type': 'str'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'streaming_locator_id': {'key': 'properties.streamingLocatorId', 'type': 'str'},
'streaming_policy_name': {'key': 'properties.streamingPolicyName', 'type': 'str'},
'default_content_key_policy_name': {'key': 'properties.defaultContentKeyPolicyName', 'type': 'str'},
'content_keys': {'key': 'properties.contentKeys', 'type': '[StreamingLocatorContentKey]'},
'alternative_media_id': {'key': 'properties.alternativeMediaId', 'type': 'str'},
'filters': {'key': 'properties.filters', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(StreamingLocator, self).__init__(**kwargs)
self.system_data = None
self.asset_name = kwargs.get('asset_name', None)
self.created = None
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.streaming_locator_id = kwargs.get('streaming_locator_id', None)
self.streaming_policy_name = kwargs.get('streaming_policy_name', None)
self.default_content_key_policy_name = kwargs.get('default_content_key_policy_name', None)
self.content_keys = kwargs.get('content_keys', None)
self.alternative_media_id = kwargs.get('alternative_media_id', None)
self.filters = kwargs.get('filters', None)
class StreamingLocatorCollection(msrest.serialization.Model):
"""A collection of StreamingLocator items.
:param value: A collection of StreamingLocator items.
:type value: list[~azure.mgmt.media.models.StreamingLocator]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[StreamingLocator]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StreamingLocatorCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class StreamingLocatorContentKey(msrest.serialization.Model):
"""Class for content key in Streaming Locator.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. ID of Content Key.
:type id: str
:ivar type: Encryption type of Content Key. Possible values include: "CommonEncryptionCenc",
"CommonEncryptionCbcs", "EnvelopeEncryption".
:vartype type: str or ~azure.mgmt.media.models.StreamingLocatorContentKeyType
:param label_reference_in_streaming_policy: Label of Content Key as specified in the Streaming
Policy.
:type label_reference_in_streaming_policy: str
:param value: Value of Content Key.
:type value: str
:ivar policy_name: ContentKeyPolicy used by Content Key.
:vartype policy_name: str
:ivar tracks: Tracks which use this Content Key.
:vartype tracks: list[~azure.mgmt.media.models.TrackSelection]
"""
_validation = {
'id': {'required': True},
'type': {'readonly': True},
'policy_name': {'readonly': True},
'tracks': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'label_reference_in_streaming_policy': {'key': 'labelReferenceInStreamingPolicy', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'policy_name': {'key': 'policyName', 'type': 'str'},
'tracks': {'key': 'tracks', 'type': '[TrackSelection]'},
}
def __init__(
self,
**kwargs
):
super(StreamingLocatorContentKey, self).__init__(**kwargs)
self.id = kwargs['id']
self.type = None
self.label_reference_in_streaming_policy = kwargs.get('label_reference_in_streaming_policy', None)
self.value = kwargs.get('value', None)
self.policy_name = None
self.tracks = None
class StreamingPath(msrest.serialization.Model):
"""Class of paths for streaming.
All required parameters must be populated in order to send to Azure.
:param streaming_protocol: Required. Streaming protocol. Possible values include: "Hls",
"Dash", "SmoothStreaming", "Download".
:type streaming_protocol: str or ~azure.mgmt.media.models.StreamingPolicyStreamingProtocol
:param encryption_scheme: Required. Encryption scheme. Possible values include: "NoEncryption",
"EnvelopeEncryption", "CommonEncryptionCenc", "CommonEncryptionCbcs".
:type encryption_scheme: str or ~azure.mgmt.media.models.EncryptionScheme
:param paths: Streaming paths for each protocol and encryptionScheme pair.
:type paths: list[str]
"""
_validation = {
'streaming_protocol': {'required': True},
'encryption_scheme': {'required': True},
}
_attribute_map = {
'streaming_protocol': {'key': 'streamingProtocol', 'type': 'str'},
'encryption_scheme': {'key': 'encryptionScheme', 'type': 'str'},
'paths': {'key': 'paths', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(StreamingPath, self).__init__(**kwargs)
self.streaming_protocol = kwargs['streaming_protocol']
self.encryption_scheme = kwargs['encryption_scheme']
self.paths = kwargs.get('paths', None)
class StreamingPolicy(ProxyResource):
"""A Streaming Policy resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:ivar created: Creation time of Streaming Policy.
:vartype created: ~datetime.datetime
:param default_content_key_policy_name: Default ContentKey used by current Streaming Policy.
:type default_content_key_policy_name: str
:param envelope_encryption: Configuration of EnvelopeEncryption.
:type envelope_encryption: ~azure.mgmt.media.models.EnvelopeEncryption
:param common_encryption_cenc: Configuration of CommonEncryptionCenc.
:type common_encryption_cenc: ~azure.mgmt.media.models.CommonEncryptionCenc
:param common_encryption_cbcs: Configuration of CommonEncryptionCbcs.
:type common_encryption_cbcs: ~azure.mgmt.media.models.CommonEncryptionCbcs
:param no_encryption: Configurations of NoEncryption.
:type no_encryption: ~azure.mgmt.media.models.NoEncryption
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'created': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'default_content_key_policy_name': {'key': 'properties.defaultContentKeyPolicyName', 'type': 'str'},
'envelope_encryption': {'key': 'properties.envelopeEncryption', 'type': 'EnvelopeEncryption'},
'common_encryption_cenc': {'key': 'properties.commonEncryptionCenc', 'type': 'CommonEncryptionCenc'},
'common_encryption_cbcs': {'key': 'properties.commonEncryptionCbcs', 'type': 'CommonEncryptionCbcs'},
'no_encryption': {'key': 'properties.noEncryption', 'type': 'NoEncryption'},
}
def __init__(
self,
**kwargs
):
super(StreamingPolicy, self).__init__(**kwargs)
self.system_data = None
self.created = None
self.default_content_key_policy_name = kwargs.get('default_content_key_policy_name', None)
self.envelope_encryption = kwargs.get('envelope_encryption', None)
self.common_encryption_cenc = kwargs.get('common_encryption_cenc', None)
self.common_encryption_cbcs = kwargs.get('common_encryption_cbcs', None)
self.no_encryption = kwargs.get('no_encryption', None)
class StreamingPolicyCollection(msrest.serialization.Model):
"""A collection of StreamingPolicy items.
:param value: A collection of StreamingPolicy items.
:type value: list[~azure.mgmt.media.models.StreamingPolicy]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[StreamingPolicy]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StreamingPolicyCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class StreamingPolicyContentKey(msrest.serialization.Model):
"""Class to specify properties of content key.
:param label: Label can be used to specify Content Key when creating a Streaming Locator.
:type label: str
:param policy_name: Policy used by Content Key.
:type policy_name: str
:param tracks: Tracks which use this content key.
:type tracks: list[~azure.mgmt.media.models.TrackSelection]
"""
_attribute_map = {
'label': {'key': 'label', 'type': 'str'},
'policy_name': {'key': 'policyName', 'type': 'str'},
'tracks': {'key': 'tracks', 'type': '[TrackSelection]'},
}
def __init__(
self,
**kwargs
):
super(StreamingPolicyContentKey, self).__init__(**kwargs)
self.label = kwargs.get('label', None)
self.policy_name = kwargs.get('policy_name', None)
self.tracks = kwargs.get('tracks', None)
class StreamingPolicyContentKeys(msrest.serialization.Model):
"""Class to specify properties of all content keys in Streaming Policy.
:param default_key: Default content key for an encryption scheme.
:type default_key: ~azure.mgmt.media.models.DefaultKey
:param key_to_track_mappings: Representing tracks needs separate content key.
:type key_to_track_mappings: list[~azure.mgmt.media.models.StreamingPolicyContentKey]
"""
_attribute_map = {
'default_key': {'key': 'defaultKey', 'type': 'DefaultKey'},
'key_to_track_mappings': {'key': 'keyToTrackMappings', 'type': '[StreamingPolicyContentKey]'},
}
def __init__(
self,
**kwargs
):
super(StreamingPolicyContentKeys, self).__init__(**kwargs)
self.default_key = kwargs.get('default_key', None)
self.key_to_track_mappings = kwargs.get('key_to_track_mappings', None)
class StreamingPolicyFairPlayConfiguration(msrest.serialization.Model):
"""Class to specify configurations of FairPlay in Streaming Policy.
All required parameters must be populated in order to send to Azure.
:param custom_license_acquisition_url_template: Template for the URL of the custom service
delivering licenses to end user players. Not required when using Azure Media Services for
issuing licenses. The template supports replaceable tokens that the service will update at
runtime with the value specific to the request. The currently supported token values are
{AlternativeMediaId}, which is replaced with the value of
StreamingLocatorId.AlternativeMediaId, and {ContentKeyId}, which is replaced with the value of
identifier of the key being requested.
:type custom_license_acquisition_url_template: str
:param allow_persistent_license: Required. All license to be persistent or not.
:type allow_persistent_license: bool
"""
_validation = {
'allow_persistent_license': {'required': True},
}
_attribute_map = {
'custom_license_acquisition_url_template': {'key': 'customLicenseAcquisitionUrlTemplate', 'type': 'str'},
'allow_persistent_license': {'key': 'allowPersistentLicense', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(StreamingPolicyFairPlayConfiguration, self).__init__(**kwargs)
self.custom_license_acquisition_url_template = kwargs.get('custom_license_acquisition_url_template', None)
self.allow_persistent_license = kwargs['allow_persistent_license']
class StreamingPolicyPlayReadyConfiguration(msrest.serialization.Model):
"""Class to specify configurations of PlayReady in Streaming Policy.
:param custom_license_acquisition_url_template: Template for the URL of the custom service
delivering licenses to end user players. Not required when using Azure Media Services for
issuing licenses. The template supports replaceable tokens that the service will update at
runtime with the value specific to the request. The currently supported token values are
{AlternativeMediaId}, which is replaced with the value of
StreamingLocatorId.AlternativeMediaId, and {ContentKeyId}, which is replaced with the value of
identifier of the key being requested.
:type custom_license_acquisition_url_template: str
:param play_ready_custom_attributes: Custom attributes for PlayReady.
:type play_ready_custom_attributes: str
"""
_attribute_map = {
'custom_license_acquisition_url_template': {'key': 'customLicenseAcquisitionUrlTemplate', 'type': 'str'},
'play_ready_custom_attributes': {'key': 'playReadyCustomAttributes', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StreamingPolicyPlayReadyConfiguration, self).__init__(**kwargs)
self.custom_license_acquisition_url_template = kwargs.get('custom_license_acquisition_url_template', None)
self.play_ready_custom_attributes = kwargs.get('play_ready_custom_attributes', None)
class StreamingPolicyWidevineConfiguration(msrest.serialization.Model):
"""Class to specify configurations of Widevine in Streaming Policy.
:param custom_license_acquisition_url_template: Template for the URL of the custom service
delivering licenses to end user players. Not required when using Azure Media Services for
issuing licenses. The template supports replaceable tokens that the service will update at
runtime with the value specific to the request. The currently supported token values are
{AlternativeMediaId}, which is replaced with the value of
StreamingLocatorId.AlternativeMediaId, and {ContentKeyId}, which is replaced with the value of
identifier of the key being requested.
:type custom_license_acquisition_url_template: str
"""
_attribute_map = {
'custom_license_acquisition_url_template': {'key': 'customLicenseAcquisitionUrlTemplate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StreamingPolicyWidevineConfiguration, self).__init__(**kwargs)
self.custom_license_acquisition_url_template = kwargs.get('custom_license_acquisition_url_template', None)
class SyncStorageKeysInput(msrest.serialization.Model):
"""The input to the sync storage keys request.
:param id: The ID of the storage account resource.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SyncStorageKeysInput, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~azure.mgmt.media.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~azure.mgmt.media.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = kwargs.get('created_by', None)
self.created_by_type = kwargs.get('created_by_type', None)
self.created_at = kwargs.get('created_at', None)
self.last_modified_by = kwargs.get('last_modified_by', None)
self.last_modified_by_type = kwargs.get('last_modified_by_type', None)
self.last_modified_at = kwargs.get('last_modified_at', None)
class TrackPropertyCondition(msrest.serialization.Model):
"""Class to specify one track property condition.
All required parameters must be populated in order to send to Azure.
:param property: Required. Track property type. Possible values include: "Unknown", "FourCC".
:type property: str or ~azure.mgmt.media.models.TrackPropertyType
:param operation: Required. Track property condition operation. Possible values include:
"Unknown", "Equal".
:type operation: str or ~azure.mgmt.media.models.TrackPropertyCompareOperation
:param value: Track property value.
:type value: str
"""
_validation = {
'property': {'required': True},
'operation': {'required': True},
}
_attribute_map = {
'property': {'key': 'property', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackPropertyCondition, self).__init__(**kwargs)
self.property = kwargs['property']
self.operation = kwargs['operation']
self.value = kwargs.get('value', None)
class TrackSelection(msrest.serialization.Model):
"""Class to select a track.
:param track_selections: TrackSelections is a track property condition list which can specify
track(s).
:type track_selections: list[~azure.mgmt.media.models.TrackPropertyCondition]
"""
_attribute_map = {
'track_selections': {'key': 'trackSelections', 'type': '[TrackPropertyCondition]'},
}
def __init__(
self,
**kwargs
):
super(TrackSelection, self).__init__(**kwargs)
self.track_selections = kwargs.get('track_selections', None)
class Transform(ProxyResource):
"""A Transform encapsulates the rules or instructions for generating desired outputs from input media, such as by transcoding or by extracting insights. After the Transform is created, it can be applied to input media by creating Jobs.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:ivar created: The UTC date and time when the Transform was created, in 'YYYY-MM-DDThh:mm:ssZ'
format.
:vartype created: ~datetime.datetime
:param description: An optional verbose description of the Transform.
:type description: str
:ivar last_modified: The UTC date and time when the Transform was last updated, in
'YYYY-MM-DDThh:mm:ssZ' format.
:vartype last_modified: ~datetime.datetime
:param outputs: An array of one or more TransformOutputs that the Transform should generate.
:type outputs: list[~azure.mgmt.media.models.TransformOutput]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'created': {'readonly': True},
'last_modified': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'description': {'key': 'properties.description', 'type': 'str'},
'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'},
'outputs': {'key': 'properties.outputs', 'type': '[TransformOutput]'},
}
def __init__(
self,
**kwargs
):
super(Transform, self).__init__(**kwargs)
self.system_data = None
self.created = None
self.description = kwargs.get('description', None)
self.last_modified = None
self.outputs = kwargs.get('outputs', None)
class TransformCollection(msrest.serialization.Model):
"""A collection of Transform items.
:param value: A collection of Transform items.
:type value: list[~azure.mgmt.media.models.Transform]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Transform]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TransformCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class TransformOutput(msrest.serialization.Model):
"""Describes the properties of a TransformOutput, which are the rules to be applied while generating the desired output.
All required parameters must be populated in order to send to Azure.
:param on_error: A Transform can define more than one outputs. This property defines what the
service should do when one output fails - either continue to produce other outputs, or, stop
the other outputs. The overall Job state will not reflect failures of outputs that are
specified with 'ContinueJob'. The default is 'StopProcessingJob'. Possible values include:
"StopProcessingJob", "ContinueJob".
:type on_error: str or ~azure.mgmt.media.models.OnErrorType
:param relative_priority: Sets the relative priority of the TransformOutputs within a
Transform. This sets the priority that the service uses for processing TransformOutputs. The
default priority is Normal. Possible values include: "Low", "Normal", "High".
:type relative_priority: str or ~azure.mgmt.media.models.Priority
:param preset: Required. Preset that describes the operations that will be used to modify,
transcode, or extract insights from the source file to generate the output.
:type preset: ~azure.mgmt.media.models.Preset
"""
_validation = {
'preset': {'required': True},
}
_attribute_map = {
'on_error': {'key': 'onError', 'type': 'str'},
'relative_priority': {'key': 'relativePriority', 'type': 'str'},
'preset': {'key': 'preset', 'type': 'Preset'},
}
def __init__(
self,
**kwargs
):
super(TransformOutput, self).__init__(**kwargs)
self.on_error = kwargs.get('on_error', None)
self.relative_priority = kwargs.get('relative_priority', None)
self.preset = kwargs['preset']
class TransportStreamFormat(MultiBitrateFormat):
"""Describes the properties for generating an MPEG-2 Transport Stream (ISO/IEC 13818-1) output video file(s).
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param filename_pattern: Required. The pattern of the file names for the generated output
files. The following macros are supported in the file name: {Basename} - An expansion macro
that will use the name of the input video file. If the base name(the file suffix is not
included) of the input video file is less than 32 characters long, the base name of input video
files will be used. If the length of base name of the input video file exceeds 32 characters,
the base name is truncated to the first 32 characters in total length. {Extension} - The
appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index}
- A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video
bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec.
{Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed
from the filename.
:type filename_pattern: str
:param output_files: The list of output files to produce. Each entry in the list is a set of
audio and video layer labels to be muxed together .
:type output_files: list[~azure.mgmt.media.models.OutputFile]
"""
_validation = {
'odata_type': {'required': True},
'filename_pattern': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'filename_pattern': {'key': 'filenamePattern', 'type': 'str'},
'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'},
}
def __init__(
self,
**kwargs
):
super(TransportStreamFormat, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.TransportStreamFormat' # type: str
class UtcClipTime(ClipTime):
"""Specifies the clip time as a Utc time position in the media file. The Utc time can point to a different position depending on whether the media file starts from a timestamp of zero or not.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param time: Required. The time position on the timeline of the input media based on Utc time.
:type time: ~datetime.datetime
"""
_validation = {
'odata_type': {'required': True},
'time': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'time': {'key': 'time', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(UtcClipTime, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.UtcClipTime' # type: str
self.time = kwargs['time']
class VideoAnalyzerPreset(AudioAnalyzerPreset):
"""A video analyzer preset that extracts insights (rich metadata) from both audio and video, and outputs a JSON format file.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param audio_language: The language for the audio payload in the input using the BCP-47 format
of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is
recommended that you specify it. The language must be specified explicitly for
AudioAnalysisMode::Basic, since automatic language detection is not included in basic mode. If
the language isn't specified or set to null, automatic language detection will choose the first
language detected and process with the selected language for the duration of the file. It does
not currently support dynamically switching between languages after the first language is
detected. The automatic detection works best with audio recordings with clearly discernable
speech. If automatic detection fails to find the language, transcription would fallback to
'en-US'." The list of supported languages is available here:
https://go.microsoft.com/fwlink/?linkid=2109463.
:type audio_language: str
:param mode: Determines the set of audio analysis operations to be performed. If unspecified,
the Standard AudioAnalysisMode would be chosen. Possible values include: "Standard", "Basic".
:type mode: str or ~azure.mgmt.media.models.AudioAnalysisMode
:param experimental_options: Dictionary containing key value pairs for parameters not exposed
in the preset itself.
:type experimental_options: dict[str, str]
:param insights_to_extract: Defines the type of insights that you want the service to generate.
The allowed values are 'AudioInsightsOnly', 'VideoInsightsOnly', and 'AllInsights'. The default
is AllInsights. If you set this to AllInsights and the input is audio only, then only audio
insights are generated. Similarly if the input is video only, then only video insights are
generated. It is recommended that you not use AudioInsightsOnly if you expect some of your
inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio
only. Your Jobs in such conditions would error out. Possible values include:
"AudioInsightsOnly", "VideoInsightsOnly", "AllInsights".
:type insights_to_extract: str or ~azure.mgmt.media.models.InsightsType
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'audio_language': {'key': 'audioLanguage', 'type': 'str'},
'mode': {'key': 'mode', 'type': 'str'},
'experimental_options': {'key': 'experimentalOptions', 'type': '{str}'},
'insights_to_extract': {'key': 'insightsToExtract', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerPreset, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.VideoAnalyzerPreset' # type: str
self.insights_to_extract = kwargs.get('insights_to_extract', None)
class VideoOverlay(Overlay):
"""Describes the properties of a video overlay.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param input_label: Required. The label of the job input which is to be used as an overlay. The
Input must specify exactly one file. You can specify an image file in JPG, PNG, GIF or BMP
format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See
https://aka.ms/mesformats for the complete list of supported audio and video file formats.
:type input_label: str
:param start: The start position, with reference to the input video, at which the overlay
starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5
seconds into the input video. If not specified the overlay starts from the beginning of the
input video.
:type start: ~datetime.timedelta
:param end: The end position, with reference to the input video, at which the overlay ends. The
value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into
the input video. If not specified or the value is greater than the input video duration, the
overlay will be applied until the end of the input video if the overlay media duration is
greater than the input video duration, else the overlay will last as long as the overlay media
duration.
:type end: ~datetime.timedelta
:param fade_in_duration: The duration over which the overlay fades in onto the input video. The
value should be in ISO 8601 duration format. If not specified the default behavior is to have
no fade in (same as PT0S).
:type fade_in_duration: ~datetime.timedelta
:param fade_out_duration: The duration over which the overlay fades out of the input video. The
value should be in ISO 8601 duration format. If not specified the default behavior is to have
no fade out (same as PT0S).
:type fade_out_duration: ~datetime.timedelta
:param audio_gain_level: The gain level of audio in the overlay. The value should be in the
range [0, 1.0]. The default is 1.0.
:type audio_gain_level: float
:param position: The location in the input video where the overlay is applied.
:type position: ~azure.mgmt.media.models.Rectangle
:param opacity: The opacity of the overlay. This is a value in the range [0 - 1.0]. Default is
1.0 which mean the overlay is opaque.
:type opacity: float
:param crop_rectangle: An optional rectangular window used to crop the overlay image or video.
:type crop_rectangle: ~azure.mgmt.media.models.Rectangle
"""
_validation = {
'odata_type': {'required': True},
'input_label': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'input_label': {'key': 'inputLabel', 'type': 'str'},
'start': {'key': 'start', 'type': 'duration'},
'end': {'key': 'end', 'type': 'duration'},
'fade_in_duration': {'key': 'fadeInDuration', 'type': 'duration'},
'fade_out_duration': {'key': 'fadeOutDuration', 'type': 'duration'},
'audio_gain_level': {'key': 'audioGainLevel', 'type': 'float'},
'position': {'key': 'position', 'type': 'Rectangle'},
'opacity': {'key': 'opacity', 'type': 'float'},
'crop_rectangle': {'key': 'cropRectangle', 'type': 'Rectangle'},
}
def __init__(
self,
**kwargs
):
super(VideoOverlay, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.VideoOverlay' # type: str
self.position = kwargs.get('position', None)
self.opacity = kwargs.get('opacity', None)
self.crop_rectangle = kwargs.get('crop_rectangle', None)
|
Looking for a new pair genuine leather ankle boots with wedge heels? These are the perfect wedge heels snow boots to compliments any women's wedge heels outfits suitable for any occasion. See for yourself why these genuine leather wedge heels ankle boots have quickly become a favorite with our customers at Vinny's Digital Emporium.
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import re
from aim import aim_manager
from aim.api import resource as aim_resource
from aim import context as aim_context
from aim import utils as aim_utils
from neutron import policy
from neutron_lib import constants as n_constants
from neutron_lib import context as n_context
from neutron_lib import exceptions as n_exc
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import helpers as log
from oslo_log import log as logging
from oslo_utils import excutils
import six
import sqlalchemy as sa
from sqlalchemy.ext import baked
from gbpservice._i18n import _
from gbpservice.common import utils as gbp_utils
from gbpservice.neutron.db import api as db_api
from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb
from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpmdb
from gbpservice.neutron.extensions import cisco_apic
from gbpservice.neutron.extensions import cisco_apic_gbp as aim_ext
from gbpservice.neutron.extensions import cisco_apic_l3
from gbpservice.neutron.extensions import group_policy as gpolicy
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import (
mechanism_driver as md)
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import apic_mapper
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import nova_client
from gbpservice.neutron.services.grouppolicy.common import (
constants as gp_const)
from gbpservice.neutron.services.grouppolicy.common import constants as g_const
from gbpservice.neutron.services.grouppolicy.common import exceptions as exc
from gbpservice.neutron.services.grouppolicy.drivers import (
neutron_resources as nrd)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
aim_mapping_rpc as aim_rpc)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
aim_validation)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
apic_mapping_lib as alib)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import config # noqa
from gbpservice.neutron.services.grouppolicy import plugin as gbp_plugin
LOG = logging.getLogger(__name__)
BAKERY = baked.bakery(_size_alert=lambda c: LOG.warning(
"sqlalchemy baked query cache size exceeded in %s", __name__))
FORWARD = 'Forward'
REVERSE = 'Reverse'
FILTER_DIRECTIONS = {FORWARD: False, REVERSE: True}
FORWARD_FILTER_ENTRIES = 'Forward-FilterEntries'
REVERSE_FILTER_ENTRIES = 'Reverse-FilterEntries'
AUTO_PTG_NAME_PREFIX = 'ptg-for-l2p-%s'
# Note that this prefix should not exceede 4 characters
AUTO_PTG_PREFIX = 'auto'
AUTO_PTG_ID_PREFIX = AUTO_PTG_PREFIX + '%s'
# Definitions duplicated from apicapi lib
APIC_OWNED = 'apic_owned_'
CONTRACTS = 'contracts'
CONTRACT_SUBJECTS = 'contract_subjects'
FILTERS = 'filters'
FILTER_ENTRIES = 'filter_entries'
ENFORCED = aim_resource.EndpointGroup.POLICY_ENFORCED
UNENFORCED = aim_resource.EndpointGroup.POLICY_UNENFORCED
DEFAULT_SG_NAME = 'gbp_default'
COMMON_TENANT_AIM_RESOURCES = [aim_resource.Contract.__name__,
aim_resource.ContractSubject.__name__,
aim_resource.Filter.__name__,
aim_resource.FilterEntry.__name__]
# REVISIT: override add_router_interface L3 API check for now
NO_VALIDATE = cisco_apic_l3.OVERRIDE_NETWORK_ROUTING_TOPOLOGY_VALIDATION
class InvalidVrfForDualStackAddressScopes(exc.GroupPolicyBadRequest):
message = _("User-specified address scopes for both address families, "
"(IPv4 and IPv6) must use the same ACI VRF.")
class AutoPTGDeleteNotSupported(exc.GroupPolicyBadRequest):
message = _("Auto PTG %(id)s cannot be deleted.")
class ExplicitAPGAssociationNotSupportedForAutoPTG(
exc.GroupPolicyBadRequest):
message = _("Explicit APG association not supported for Auto PTG, "
"with AIM GBP driver")
class SharedAttributeUpdateNotSupported(exc.GroupPolicyBadRequest):
message = _("Resource shared attribute update not supported with AIM "
"GBP driver for resource of type %(type)s")
class IncorrectSubnetpoolUpdate(exc.GroupPolicyBadRequest):
message = _("Subnetpool %(subnetpool_id)s cannot be disassociated "
"from L3 Policy %(l3p_id)s since it has allocated subnet(s) "
"associated with that L3 Policy")
class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
"""AIM Mapping Orchestration driver.
This driver maps GBP resources to the ACI-Integration-Module (AIM).
"""
@log.log_method_call
def initialize(self):
LOG.info("APIC AIM Policy Driver initializing")
super(AIMMappingDriver, self).initialize()
self._apic_aim_mech_driver = None
self._apic_segmentation_label_driver = None
self._apic_allowed_vm_name_driver = None
self._aim = None
self._name_mapper = None
self.create_auto_ptg = cfg.CONF.aim_mapping.create_auto_ptg
if self.create_auto_ptg:
LOG.info('Auto PTG creation configuration set, '
'this will result in automatic creation of a PTG '
'per L2 Policy')
self.create_per_l3p_implicit_contracts = (
cfg.CONF.aim_mapping.create_per_l3p_implicit_contracts)
self.advertise_mtu = cfg.CONF.aim_mapping.advertise_mtu
if self.create_per_l3p_implicit_contracts:
LOG.info('Implicit AIM contracts will be created '
'for l3_policies which do not have them.')
self._create_per_l3p_implicit_contracts()
self._nested_host_vlan = (
cfg.CONF.aim_mapping.nested_host_vlan)
@log.log_method_call
def start_rpc_listeners(self):
return []
def validate_state(self, repair, resources, tenants):
mgr = aim_validation.ValidationManager()
return mgr.validate(repair, resources, tenants)
@property
def aim_mech_driver(self):
if not self._apic_aim_mech_driver:
self._apic_aim_mech_driver = (
self._core_plugin.mechanism_manager.mech_drivers[
'apic_aim'].obj)
return self._apic_aim_mech_driver
@property
def aim(self):
if not self._aim:
self._aim = self.aim_mech_driver.aim
return self._aim
@property
def name_mapper(self):
if not self._name_mapper:
self._name_mapper = self.aim_mech_driver.name_mapper
return self._name_mapper
@property
def apic_segmentation_label_driver(self):
if not self._apic_segmentation_label_driver:
ext_drivers = self.gbp_plugin.extension_manager.ordered_ext_drivers
for driver in ext_drivers:
if 'apic_segmentation_label' == driver.name:
self._apic_segmentation_label_driver = (
driver.obj)
break
return self._apic_segmentation_label_driver
@property
def apic_allowed_vm_name_driver(self):
if self._apic_allowed_vm_name_driver is False:
return False
if not self._apic_allowed_vm_name_driver:
ext_drivers = (self.gbp_plugin.extension_manager.
ordered_ext_drivers)
for driver in ext_drivers:
if 'apic_allowed_vm_name' == driver.name:
self._apic_allowed_vm_name_driver = driver.obj
break
if not self._apic_allowed_vm_name_driver:
self._apic_allowed_vm_name_driver = False
return self._apic_allowed_vm_name_driver
@log.log_method_call
def ensure_tenant(self, plugin_context, tenant_id):
self.aim_mech_driver.ensure_tenant(plugin_context, tenant_id)
def aim_display_name(self, name):
return aim_utils.sanitize_display_name(name)
def _use_implicit_address_scope(self, context, ip_version, **kwargs):
# Ensure ipv4 and ipv6 address scope have same vrf
kwargs = {}
if context.saved_scope_vrf:
kwargs.update({cisco_apic.DIST_NAMES: context.saved_scope_vrf})
address_scope = super(AIMMappingDriver,
self)._use_implicit_address_scope(context,
ip_version,
**kwargs)
context.saved_scope_vrf = address_scope[cisco_apic.DIST_NAMES]
return address_scope
# TODO(tbachman): remove once non-isomorphic address scopes
# are supported
def _validate_address_scopes(self, context):
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, context.current['id'])
v4_scope_id = l3p_db['address_scope_v4_id']
v6_scope_id = l3p_db['address_scope_v6_id']
if v4_scope_id and v6_scope_id:
v4_scope = self._get_address_scope(
context._plugin_context, v4_scope_id)
v6_scope = self._get_address_scope(
context._plugin_context, v6_scope_id)
if (v4_scope[cisco_apic.DIST_NAMES][cisco_apic.VRF] !=
v6_scope[cisco_apic.DIST_NAMES][cisco_apic.VRF]):
raise InvalidVrfForDualStackAddressScopes()
@log.log_method_call
def create_l3_policy_precommit(self, context):
l3p_req = context.current
self._check_l3policy_ext_segment(context, l3p_req)
self._validate_address_scopes(context)
# REVISIT: Check if the following constraint still holds.
if len(l3p_req['routers']) > 1:
raise exc.L3PolicyMultipleRoutersNotSupported()
# REVISIT: Validate non overlapping IPs in the same tenant.
# Currently this validation is not required for the
# AIM driver, and since the AIM driver is the only
# driver inheriting from this driver, we are okay
# without the check.
self._reject_invalid_router_access(context)
@log.log_method_call
def create_l3_policy_postcommit(self, context):
l3p_req = context.current
# Save VRF DN from v4 family address scope, if implicitly created,
# as we will need to reuse it if we also implicitly create a v6
# address scopes.
context.saved_scope_vrf = None
self._create_l3p_subnetpools_postcommit(context)
# Reset the temporarily saved scope.
context.saved_scope_vrf = None
if not l3p_req['routers']:
self._use_implicit_router(context)
if not l3p_req['external_segments']:
self._use_implicit_external_segment(context)
external_segments = l3p_req['external_segments']
if external_segments:
self._plug_l3p_routers_to_ext_segment(context, l3p_req,
external_segments)
self._create_implicit_contracts(context, l3p_req)
@log.log_method_call
def update_l3_policy_precommit(self, context):
self._reject_shared_update(context, 'l3_policy')
if context.current['routers'] != context.original['routers']:
raise exc.L3PolicyRoutersUpdateNotSupported()
# Currently there is no support for router update in l3p update.
# Added this check just in case it is supported in future.
self._reject_invalid_router_access(context)
self._validate_in_use_by_nsp(context)
self._update_l3p_subnetpools_precommit(context)
self._check_l3policy_ext_segment(context, context.current)
# TODO(Sumit): For extra safety add validation for address_scope change
@log.log_method_call
def update_l3_policy_postcommit(self, context):
self._update_l3p_subnetpools_postcommit(context)
l3p_orig = context.original
l3p_curr = context.current
old_segment_dict = l3p_orig['external_segments']
new_segment_dict = l3p_curr['external_segments']
if (l3p_curr['external_segments'] !=
l3p_orig['external_segments']):
new_segments = set(new_segment_dict.keys())
old_segments = set(old_segment_dict.keys())
removed = old_segments - new_segments
self._unplug_l3p_routers_from_ext_segment(context,
l3p_curr,
removed)
added_dict = {s: new_segment_dict[s]
for s in (new_segments - old_segments)}
if added_dict:
self._plug_l3p_routers_to_ext_segment(context,
l3p_curr,
added_dict)
@log.log_method_call
def delete_l3_policy_precommit(self, context):
pass
@log.log_method_call
def delete_l3_policy_postcommit(self, context):
external_segments = context.current['external_segments']
if external_segments:
self._unplug_l3p_routers_from_ext_segment(
context, context.current, list(external_segments.keys()), True)
self._delete_l3p_subnetpools_postcommit(context)
for router_id in context.current['routers']:
self._cleanup_router(context._plugin_context, router_id)
self._delete_implicit_contracts(context, context.current)
@log.log_method_call
def get_l3_policy_status(self, context):
# Not all of the neutron resources that l3_policy maps to
# has a status attribute, hence we derive the status
# from the AIM resources that the neutron resources map to
session = context._plugin_context.session
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, context.current['id'])
mapped_aim_resources = []
# Note: Subnetpool is not mapped to any AIM resource, hence it is not
# considered for deriving the status
mapped_status = []
for ascp in self.L3P_ADDRESS_SCOPE_KEYS.values():
if l3p_db[ascp]:
ascp_id = l3p_db[ascp]
ascope = self._get_address_scope(
context._plugin_context, ascp_id)
vrf_dn = ascope[cisco_apic.DIST_NAMES][cisco_apic.VRF]
aim_vrf = self._get_vrf_by_dn(context, vrf_dn)
mapped_aim_resources.append(aim_vrf)
routers = [router.router_id for router in l3p_db.routers]
for router_id in routers:
# elevated context is used here to enable router retrieval in
# shared L3P cases wherein the call to get_l3_policy might be
# made in the context of a different tenant
router = self._get_router(
context._plugin_context.elevated(), router_id)
mapped_status.append(
{'status': self._map_ml2plus_status(router)})
mapped_status.append({'status': self._merge_aim_status(
session, mapped_aim_resources)})
context.current['status'] = self._merge_gbp_status(mapped_status)
@log.log_method_call
def create_l2_policy_precommit(self, context):
self._reject_invalid_network_access(context)
self._reject_non_shared_net_on_shared_l2p(context)
@log.log_method_call
def create_l2_policy_postcommit(self, context):
if not context.current['l3_policy_id']:
self._use_implicit_l3_policy(context)
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, context.current['l3_policy_id'])
if not context.current['network_id']:
self._use_implicit_network(
context, address_scope_v4=l3p_db['address_scope_v4_id'],
address_scope_v6=l3p_db['address_scope_v6_id'])
l2p = context.current
net = self._get_network(context._plugin_context,
l2p['network_id'])
default_epg_dn = net[cisco_apic.DIST_NAMES][cisco_apic.EPG]
self._configure_contracts_for_default_epg(
context, l3p_db, default_epg_dn)
if self.create_auto_ptg:
default_epg = self._get_epg_by_dn(context, default_epg_dn)
desc = "System created PTG for L2P (UUID: %s)" % l2p['id']
data = {
"id": self._get_auto_ptg_id(l2p['id']),
"name": self._get_auto_ptg_name(l2p),
"description": desc,
"l2_policy_id": l2p['id'],
"proxied_group_id": None,
"proxy_type": None,
"proxy_group_id": n_constants.ATTR_NOT_SPECIFIED,
"network_service_policy_id": None,
"service_management": False,
"shared": l2p['shared'],
"intra_ptg_allow":
self._map_policy_enforcement_pref(default_epg),
}
self._create_policy_target_group(context._plugin_context, data)
@log.log_method_call
def update_l2_policy_precommit(self, context):
super(AIMMappingDriver, self).update_l2_policy_precommit(context)
@log.log_method_call
def update_l2_policy_postcommit(self, context):
pass
@log.log_method_call
def delete_l2_policy_precommit(self, context):
l2p_id = context.current['id']
auto_ptg_id = self._get_auto_ptg_id(l2p_id)
try:
auto_ptg = context._plugin._get_policy_target_group(
context._plugin_context, auto_ptg_id)
if auto_ptg['l2_policy_id']:
auto_ptg.update({'l2_policy_id': None})
except gpolicy.PolicyTargetGroupNotFound:
LOG.info("Auto PTG with ID %(id)s for "
"for L2P %(l2p)s not found. If create_auto_ptg "
"configuration was not set at the time of the L2P "
"creation, you can safely ignore this, else this "
"could potentially be indication of an error.",
{'id': auto_ptg_id, 'l2p': l2p_id})
@log.log_method_call
def delete_l2_policy_postcommit(self, context):
auto_ptg_id = self._get_auto_ptg_id(context.current['id'])
try:
auto_ptg = context._plugin._get_policy_target_group(
context._plugin_context, auto_ptg_id)
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, context.current['l3_policy_id'])
subnet_ids = [assoc['subnet_id'] for assoc in auto_ptg.subnets]
router_ids = [assoc.router_id for assoc in l3p_db.routers]
context._plugin._remove_subnets_from_policy_target_group(
context._plugin_context, auto_ptg_id)
self._process_subnets_for_ptg_delete(
context, subnet_ids, router_ids)
# REVISIT: Consider calling the actual GBP plugin instead
# of it's base DB mixin class, eliminating the need to
# call _process_subnets_for_ptg_delete above.
self._db_plugin(
context._plugin).delete_policy_target_group(
context._plugin_context, auto_ptg['id'])
except gpolicy.PolicyTargetGroupNotFound:
# Logged in precommit.
pass
super(AIMMappingDriver, self).delete_l2_policy_postcommit(context)
@log.log_method_call
def get_l2_policy_status(self, context):
l2p_db = context._plugin._get_l2_policy(
context._plugin_context, context.current['id'])
net = self._get_network(context._plugin_context,
l2p_db['network_id'])
if net:
context.current['status'] = net['status']
default_epg_dn = net[cisco_apic.DIST_NAMES][cisco_apic.EPG]
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, l2p_db['l3_policy_id'])
aim_resources = self._get_implicit_contracts_for_default_epg(
context, l3p_db, default_epg_dn)
aim_resources_list = []
for k in aim_resources.keys():
if not aim_resources[k] or not all(
x for x in aim_resources[k]):
# We expected a AIM mapped resource but did not find
# it, so something seems to be wrong
context.current['status'] = gp_const.STATUS_ERROR
return
aim_resources_list.extend(aim_resources[k])
merged_aim_status = self._merge_aim_status(
context._plugin_context.session, aim_resources_list)
context.current['status'] = self._merge_gbp_status(
[context.current, {'status': merged_aim_status}])
else:
context.current['status'] = gp_const.STATUS_ERROR
@log.log_method_call
def create_policy_target_group_precommit(self, context):
if self._is_auto_ptg(context.current):
if context.current['application_policy_group_id']:
raise ExplicitAPGAssociationNotSupportedForAutoPTG()
return
if context.current['subnets']:
raise alib.ExplicitSubnetAssociationNotSupported()
@log.log_method_call
def create_policy_target_group_postcommit(self, context):
if self._is_auto_ptg(context.current):
self._use_implicit_subnet(context)
self._handle_create_network_service_policy(context)
return
if not context.current['l2_policy_id']:
self._use_implicit_l2_policy(context)
self._use_implicit_subnet(context)
self._handle_create_network_service_policy(context)
with db_api.CONTEXT_WRITER.using(context) as session:
l2p_db = context._plugin._get_l2_policy(
context._plugin_context, context.current['l2_policy_id'])
net = self._get_network(
context._plugin_context, l2p_db['network_id'])
bd = self.aim_mech_driver.get_bd_for_network(session, net)
provided_contracts = self._get_aim_contract_names(
session, context.current['provided_policy_rule_sets'])
consumed_contracts = self._get_aim_contract_names(
session, context.current['consumed_policy_rule_sets'])
self._create_aim_ap_for_ptg_conditionally(context, context.current)
aim_epg = self._aim_endpoint_group(
session, context.current, bd.name, bd.tenant_name,
provided_contracts=provided_contracts,
consumed_contracts=consumed_contracts,
policy_enforcement_pref=(
self._get_policy_enforcement_pref(context.current)))
# AIM EPG will be persisted in the following call
self._add_implicit_svc_contracts_to_epg(context, l2p_db, aim_epg)
@log.log_method_call
def update_policy_target_group_precommit(self, context):
self._reject_shared_update(context, 'policy_target_group')
session = context._plugin_context.session
old_provided_contracts = self._get_aim_contract_names(
session, context.original['provided_policy_rule_sets'])
old_consumed_contracts = self._get_aim_contract_names(
session, context.original['consumed_policy_rule_sets'])
new_provided_contracts = self._get_aim_contract_names(
session, context.current['provided_policy_rule_sets'])
new_consumed_contracts = self._get_aim_contract_names(
session, context.current['consumed_policy_rule_sets'])
if (context.current['network_service_policy_id'] !=
context.original['network_service_policy_id']):
self._validate_nat_pool_for_nsp(context)
# The "original" version of the ptg is being used here since we
# want to retrieve the aim_epg based on the existing AP that is
# a part of its indentity
aim_epg = self._get_aim_endpoint_group(session, context.original)
if aim_epg:
if not self._is_auto_ptg(context.current):
aim_epg.display_name = (
self.aim_display_name(context.current['name']))
if (context.current['application_policy_group_id'] !=
context.original['application_policy_group_id']):
ap = self._create_aim_ap_for_ptg_conditionally(
context, context.current)
aim_epg = self._move_epg_to_new_ap(context, aim_epg, ap)
self._delete_aim_ap_for_ptg_conditionally(
context, context.original)
elif context.current['application_policy_group_id']:
raise ExplicitAPGAssociationNotSupportedForAutoPTG()
aim_epg.policy_enforcement_pref = (
self._get_policy_enforcement_pref(context.current))
aim_epg.provided_contract_names = (
list((set(aim_epg.provided_contract_names) -
set(old_provided_contracts)) |
set(new_provided_contracts)))
aim_epg.consumed_contract_names = (
list((set(aim_epg.consumed_contract_names) -
set(old_consumed_contracts)) |
set(new_consumed_contracts)))
self._add_contracts_for_epg(
aim_context.AimContext(session), aim_epg)
@log.log_method_call
def update_policy_target_group_postcommit(self, context):
if (context.current['network_service_policy_id'] !=
context.original['network_service_policy_id']):
self._handle_nsp_update_on_ptg(context)
if (context.current['application_policy_group_id'] !=
context.original['application_policy_group_id']):
ptargets = context._plugin.get_policy_targets(
context._plugin_context, {'policy_target_group_id':
[context.current['id']]})
for pt in ptargets:
self.aim_mech_driver._notify_port_update(
context._plugin_context, pt['port_id'])
@log.log_method_call
def delete_policy_target_group_precommit(self, context):
plugin_context = context._plugin_context
auto_ptg_id = self._get_auto_ptg_id(context.current['l2_policy_id'])
context.nsp_cleanup_ipaddress = self._get_ptg_policy_ipaddress_mapping(
context._plugin_context, context.current['id'])
context.nsp_cleanup_fips = self._get_ptg_policy_fip_mapping(
context._plugin_context, context.current['id'])
if context.current['id'] == auto_ptg_id:
raise AutoPTGDeleteNotSupported(id=context.current['id'])
ptg_db = context._plugin._get_policy_target_group(
plugin_context, context.current['id'])
context.subnet_ids = [assoc['subnet_id'] for assoc in ptg_db.subnets]
context.router_ids = [assoc.router_id for assoc in
ptg_db.l2_policy.l3_policy.routers]
session = context._plugin_context.session
aim_ctx = self._get_aim_context(context)
epg = self._aim_endpoint_group(session, context.current)
self.aim.delete(aim_ctx, epg)
self._delete_aim_ap_for_ptg_conditionally(context, ptg_db)
if ptg_db['network_service_policy_id']:
# Also called from _cleanup_network_service_policy during
# postcommit, but needed during precommit to avoid foreign
# key constraint error.
self._delete_policy_ipaddress_mapping(plugin_context, ptg_db['id'])
@log.log_method_call
def delete_policy_target_group_postcommit(self, context):
self._process_subnets_for_ptg_delete(
context, context.subnet_ids, context.router_ids)
ptg = context.current.copy()
ptg['subnets'] = []
l2p_id = ptg['l2_policy_id']
if l2p_id:
l2p_db = context._plugin._get_l2_policy(
context._plugin_context, l2p_id)
if not l2p_db['policy_target_groups'] or (
(len(l2p_db['policy_target_groups']) == 1) and (
self._is_auto_ptg(l2p_db['policy_target_groups'][0]))):
self._cleanup_l2_policy(context, l2p_id)
if ptg['network_service_policy_id']:
# REVISIT: Note that the RMD puts the following call in
# try/except block since in deployment it was observed
# that there are certain situations when the
# sa_exc.ObjectDeletedError is thrown.
self._cleanup_network_service_policy(
context, ptg, context.nsp_cleanup_ipaddress,
context.nsp_cleanup_fips)
@log.log_method_call
def extend_policy_target_group_dict(self, session, result):
epg = self._aim_endpoint_group(session, result)
if epg:
result[cisco_apic.DIST_NAMES] = {cisco_apic.EPG: epg.dn}
@log.log_method_call
def get_policy_target_group_status(self, context):
session = context._plugin_context.session
epg = self._aim_endpoint_group(session, context.current)
context.current['status'] = self._map_aim_status(session, epg)
@log.log_method_call
def create_application_policy_group_precommit(self, context):
pass
@log.log_method_call
def create_application_policy_group_postcommit(self, context):
pass
@log.log_method_call
def update_application_policy_group_precommit(self, context):
pass
@log.log_method_call
def update_application_policy_group_postcommit(self, context):
pass
@log.log_method_call
def delete_application_policy_group_precommit(self, context):
pass
@log.log_method_call
def delete_application_policy_group_postcommit(self, context):
pass
def _get_application_profiles_mapped_to_apg(self, session, apg):
aim_ctx = aim_context.AimContext(session)
ap_name = self.apic_ap_name_for_application_policy_group(
session, apg['id'])
return self.aim.find(
aim_ctx, aim_resource.ApplicationProfile, name=ap_name)
@log.log_method_call
def extend_application_policy_group_dict(self, session, result):
aim_aps = self._get_application_profiles_mapped_to_apg(session, result)
dn_list = [ap.dn for ap in aim_aps]
result[cisco_apic.DIST_NAMES] = {cisco_apic.AP: dn_list}
@log.log_method_call
def get_application_policy_group_status(self, context):
session = context._plugin_context.session
aim_aps = self._get_application_profiles_mapped_to_apg(
session, context.current)
context.current['status'] = self._merge_aim_status(
context._plugin_context.session, aim_aps)
@log.log_method_call
def create_policy_target_precommit(self, context):
context.ptg = self._get_policy_target_group(
context._plugin_context, context.current['policy_target_group_id'])
policy.enforce(context._plugin_context, 'get_policy_target_group',
context.ptg, pluralized='policy_target_groups')
if context.current['port_id']:
# Explicit port case.
#
# REVISIT: Add port extension to specify the EPG so the
# mechanism driver can take care of domain association
# itself.
port_context = self.aim_mech_driver.make_port_context(
context._plugin_context, context.current['port_id'])
self.aim_mech_driver.associate_domain(port_context)
@log.log_method_call
def create_policy_target_postcommit(self, context):
if not context.current['port_id']:
# Implicit port case.
subnets = self._get_subnets(
context._plugin_context, {'id': context.ptg['subnets']})
self._use_implicit_port(context, subnets=subnets)
self._associate_fip_to_pt(context)
@log.log_method_call
def update_policy_target_precommit(self, context):
pass
@log.log_method_call
def update_policy_target_postcommit(self, context):
if self.apic_segmentation_label_driver and (
set(context.current['segmentation_labels']) != (
set(context.original['segmentation_labels']))):
self.aim_mech_driver._notify_port_update(
context._plugin_context, context.current['port_id'])
@log.log_method_call
def delete_policy_target_precommit(self, context):
if context.current.get('port_id'):
# REVISIT: Add port extension to specify the EPG so the
# mechanism driver can take care of domain association
# itself.
port_context = self.aim_mech_driver.make_port_context(
context._plugin_context, context.current['port_id'])
self.aim_mech_driver.disassociate_domain(port_context)
@log.log_method_call
def delete_policy_target_postcommit(self, context):
fips = self._get_pt_floating_ip_mapping(
context._plugin_context, context.current['id'])
for fip in fips:
self._delete_fip(context._plugin_context, fip.floatingip_id)
self._cleanup_port(
context._plugin_context, context.current.get('port_id'))
@log.log_method_call
def get_policy_target_status(self, context):
pass
@log.log_method_call
def create_policy_classifier_precommit(self, context):
pass
@log.log_method_call
def create_policy_classifier_postcommit(self, context):
pass
@log.log_method_call
def update_policy_classifier_precommit(self, context):
o_dir = context.original['direction']
c_dir = context.current['direction']
o_prot = context.original['protocol']
c_prot = context.current['protocol']
o_port_min, o_port_max = (
gpmdb.GroupPolicyMappingDbPlugin._get_min_max_ports_from_range(
context.original['port_range']))
c_port_min, c_port_max = (
gpmdb.GroupPolicyMappingDbPlugin._get_min_max_ports_from_range(
context.current['port_range']))
if ((o_dir == c_dir) and (o_prot == c_prot) and (
o_port_min == c_port_min) and (o_port_max == c_port_max)):
# none of the fields relevant to the aim_mapping have changed
# so no further processing is required
return
prules = self._db_plugin(context._plugin).get_policy_rules(
context._plugin_context,
filters={'policy_classifier_id': [context.current['id']]})
if not prules:
# this policy_classifier has not yet been assocaited with
# a policy_rule and hence will not have any mapped aim
# resources
return
prule_ids = [x['id'] for x in prules]
prule_sets = self._get_prss_for_policy_rules(context, prule_ids)
for pr in prules:
session = context._plugin_context.session
aim_ctx = self._get_aim_context(context)
# delete old filter_entries
self._delete_filter_entries_for_policy_rule(
session, aim_ctx, pr)
aim_filter = self._aim_filter(session, pr)
aim_reverse_filter = self._aim_filter(
session, pr, reverse_prefix=True)
entries = alib.get_filter_entries_for_policy_classifier(
context.current)
remove_aim_reverse_filter = None
if not entries['reverse_rules']:
# the updated classifier's protocol does not have
# reverse filter_entries
if self.aim.get(aim_ctx, aim_reverse_filter):
# so remove the older reverse filter if it exists
self.aim.delete(aim_ctx, aim_reverse_filter)
remove_aim_reverse_filter = aim_reverse_filter.name
# Unset the reverse filter name so that its not
# used in further processing
aim_reverse_filter.name = None
# create new filter_entries mapping to the updated
# classifier and associated with aim_filters
self._create_policy_rule_aim_mappings(
session, aim_ctx, pr, entries)
# update contract_subject to put the filter in the
# appropriate in/out buckets corresponding to the
# updated direction of the policy_classifier
if remove_aim_reverse_filter or (o_dir != c_dir):
for prs in prule_sets:
aim_contract_subject = self._get_aim_contract_subject(
session, prs)
# Remove the older reverse filter if needed
for filters in [aim_contract_subject.in_filters,
aim_contract_subject.out_filters]:
if remove_aim_reverse_filter in filters:
filters.remove(remove_aim_reverse_filter)
if o_dir != c_dir:
# First remove the filter from the older
# direction list
for flist in [aim_contract_subject.in_filters,
aim_contract_subject.out_filters]:
for fname in [aim_filter.name,
aim_reverse_filter.name]:
if fname in flist:
flist.remove(fname)
# Now add it to the relevant direction list(s)
if c_dir == g_const.GP_DIRECTION_IN:
aim_contract_subject.in_filters.append(
aim_filter.name)
aim_contract_subject.out_filters.append(
aim_reverse_filter.name)
elif c_dir == g_const.GP_DIRECTION_OUT:
aim_contract_subject.in_filters.append(
aim_reverse_filter.name)
aim_contract_subject.out_filters.append(
aim_filter.name)
else:
aim_contract_subject.in_filters.append(
aim_filter.name)
aim_contract_subject.out_filters.append(
aim_reverse_filter.name)
aim_contract_subject.in_filters.append(
aim_reverse_filter.name)
aim_contract_subject.out_filters.append(
aim_filter.name)
self.aim.create(aim_ctx, aim_contract_subject,
overwrite=True)
@log.log_method_call
def update_policy_classifier_postcommit(self, context):
pass
@log.log_method_call
def delete_policy_classifier_precommit(self, context):
pass
@log.log_method_call
def delete_policy_classifier_postcommit(self, context):
pass
@log.log_method_call
def get_policy_classifier_status(self, context):
pass
@log.log_method_call
def create_policy_action_precommit(self, context):
pass
@log.log_method_call
def create_policy_action_postcommit(self, context):
pass
@log.log_method_call
def update_policy_action_precommit(self, context):
pass
@log.log_method_call
def update_policy_action_postcommit(self, context):
pass
@log.log_method_call
def delete_policy_action_precommit(self, context):
pass
@log.log_method_call
def delete_policy_action_postcommit(self, context):
pass
@log.log_method_call
def get_policy_action_status(self, context):
pass
@log.log_method_call
def create_policy_rule_precommit(self, context):
entries = alib.get_filter_entries_for_policy_rule(context)
session = context._plugin_context.session
aim_ctx = self._get_aim_context(context)
self._create_policy_rule_aim_mappings(
session, aim_ctx, context.current, entries)
@log.log_method_call
def create_policy_rule_postcommit(self, context):
pass
@log.log_method_call
def update_policy_rule_precommit(self, context):
self.delete_policy_rule_precommit(context)
self.create_policy_rule_precommit(context)
@log.log_method_call
def update_policy_rule_postcommit(self, context):
pass
@log.log_method_call
def delete_policy_rule_precommit(self, context):
session = context._plugin_context.session
aim_ctx = self._get_aim_context(context)
self._delete_filter_entries_for_policy_rule(session,
aim_ctx, context.current)
aim_filter = self._aim_filter(session, context.current)
aim_reverse_filter = self._aim_filter(
session, context.current, reverse_prefix=True)
for afilter in filter(None, [aim_filter, aim_reverse_filter]):
self.aim.delete(aim_ctx, afilter)
@log.log_method_call
def delete_policy_rule_postcommit(self, context):
pass
@log.log_method_call
def extend_policy_rule_dict(self, session, result):
result[cisco_apic.DIST_NAMES] = {}
aim_filter_entries = self._get_aim_filter_entries(session, result)
for k, v in six.iteritems(aim_filter_entries):
dn_list = []
for entry in v:
dn_list.append(entry.dn)
if k == FORWARD:
result[cisco_apic.DIST_NAMES].update(
{aim_ext.FORWARD_FILTER_ENTRIES: dn_list})
else:
result[cisco_apic.DIST_NAMES].update(
{aim_ext.REVERSE_FILTER_ENTRIES: dn_list})
@log.log_method_call
def get_policy_rule_status(self, context):
session = context._plugin_context.session
aim_filters = self._get_aim_filters(session, context.current)
aim_filter_entries = self._get_aim_filter_entries(
session, context.current)
context.current['status'] = self._merge_aim_status(
session,
list(aim_filters.values()) + list(aim_filter_entries.values()))
@log.log_method_call
def create_policy_rule_set_precommit(self, context):
if context.current['child_policy_rule_sets']:
raise alib.HierarchicalContractsNotSupported()
aim_ctx = self._get_aim_context(context)
session = context._plugin_context.session
aim_contract = self._aim_contract(session, context.current)
self.aim.create(aim_ctx, aim_contract)
rules = self._db_plugin(context._plugin).get_policy_rules(
context._plugin_context,
filters={'id': context.current['policy_rules']})
self._populate_aim_contract_subject(context, aim_contract, rules)
@log.log_method_call
def create_policy_rule_set_postcommit(self, context):
pass
@log.log_method_call
def update_policy_rule_set_precommit(self, context):
if context.current['child_policy_rule_sets']:
raise alib.HierarchicalContractsNotSupported()
session = context._plugin_context.session
aim_contract = self._aim_contract(session, context.current)
rules = self._db_plugin(context._plugin).get_policy_rules(
context._plugin_context,
filters={'id': context.current['policy_rules']})
self._populate_aim_contract_subject(
context, aim_contract, rules)
@log.log_method_call
def update_policy_rule_set_postcommit(self, context):
pass
@log.log_method_call
def delete_policy_rule_set_precommit(self, context):
aim_ctx = self._get_aim_context(context)
session = context._plugin_context.session
aim_contract = self._aim_contract(session, context.current)
self._delete_aim_contract_subject(aim_ctx, aim_contract)
self.aim.delete(aim_ctx, aim_contract)
@log.log_method_call
def delete_policy_rule_set_postcommit(self, context):
pass
@log.log_method_call
def extend_policy_rule_set_dict(self, session, result):
result[cisco_apic.DIST_NAMES] = {}
aim_contract = self._aim_contract(session, result)
aim_contract_subject = self._aim_contract_subject(aim_contract)
result[cisco_apic.DIST_NAMES].update(
{aim_ext.CONTRACT: aim_contract.dn,
aim_ext.CONTRACT_SUBJECT: aim_contract_subject.dn})
@log.log_method_call
def get_policy_rule_set_status(self, context):
session = context._plugin_context.session
aim_contract = self._aim_contract(session, context.current)
aim_contract_subject = self._aim_contract_subject(aim_contract)
context.current['status'] = self._merge_aim_status(
session, [aim_contract, aim_contract_subject])
@log.log_method_call
def create_external_segment_precommit(self, context):
self._validate_default_external_segment(context)
if not context.current['subnet_id']:
raise exc.ImplicitSubnetNotSupported()
subnet = self._get_subnet(context._plugin_context,
context.current['subnet_id'])
network = self._get_network(context._plugin_context,
subnet['network_id'])
if not network['router:external']:
raise exc.InvalidSubnetForES(sub_id=subnet['id'],
net_id=network['id'])
db_es = context._plugin._get_external_segment(
context._plugin_context, context.current['id'])
db_es.cidr = subnet['cidr']
db_es.ip_version = subnet['ip_version']
context.current['cidr'] = db_es.cidr
context.current['ip_version'] = db_es.ip_version
context.network_id = subnet['network_id']
@log.log_method_call
def create_external_segment_postcommit(self, context):
cidrs = sorted([x['destination']
for x in context.current['external_routes']])
self._update_network(context._plugin_context,
context.network_id,
{cisco_apic.EXTERNAL_CIDRS: cidrs})
@log.log_method_call
def update_external_segment_precommit(self, context):
# REVISIT: what other attributes should we prevent an update on?
invalid = ['port_address_translation']
for attr in invalid:
if context.current[attr] != context.original[attr]:
raise exc.InvalidAttributeUpdateForES(attribute=attr)
@log.log_method_call
def update_external_segment_postcommit(self, context):
old_cidrs = sorted([x['destination']
for x in context.original['external_routes']])
new_cidrs = sorted([x['destination']
for x in context.current['external_routes']])
if old_cidrs != new_cidrs:
subnet = self._get_subnet(context._plugin_context,
context.current['subnet_id'])
self._update_network(context._plugin_context,
subnet['network_id'],
{cisco_apic.EXTERNAL_CIDRS: new_cidrs})
@log.log_method_call
def delete_external_segment_precommit(self, context):
pass
@log.log_method_call
def delete_external_segment_postcommit(self, context):
subnet = self._get_subnet(context._plugin_context,
context.current['subnet_id'])
self._update_network(context._plugin_context,
subnet['network_id'],
{cisco_apic.EXTERNAL_CIDRS: ['0.0.0.0/0']})
@log.log_method_call
def get_external_segment_status(self, context):
pass
@log.log_method_call
def create_external_policy_precommit(self, context):
self._check_external_policy(context, context.current)
@log.log_method_call
def create_external_policy_postcommit(self, context):
if not context.current['external_segments']:
self._use_implicit_external_segment(context)
routers = self._get_ext_policy_routers(context,
context.current, context.current['external_segments'])
for r in routers:
self._set_router_ext_contracts(context, r, context.current)
@log.log_method_call
def update_external_policy_precommit(self, context):
self._check_external_policy(context, context.current)
@log.log_method_call
def update_external_policy_postcommit(self, context):
ep = context.current
old_ep = context.original
removed_segments = (set(old_ep['external_segments']) -
set(ep['external_segments']))
added_segment = (set(ep['external_segments']) -
set(old_ep['external_segments']))
if removed_segments:
routers = self._get_ext_policy_routers(context, ep,
removed_segments)
for r in routers:
self._set_router_ext_contracts(context, r, None)
if (added_segment or
sorted(old_ep['provided_policy_rule_sets']) !=
sorted(ep['provided_policy_rule_sets']) or
sorted(old_ep['consumed_policy_rule_sets']) !=
sorted(ep['consumed_policy_rule_sets'])):
routers = self._get_ext_policy_routers(context, ep,
ep['external_segments'])
for r in routers:
self._set_router_ext_contracts(context, r, ep)
@log.log_method_call
def delete_external_policy_precommit(self, context):
pass
@log.log_method_call
def delete_external_policy_postcommit(self, context):
routers = self._get_ext_policy_routers(context,
context.current, context.current['external_segments'])
for r in routers:
self._set_router_ext_contracts(context, r, None)
@log.log_method_call
def get_external_policy_status(self, context):
pass
@log.log_method_call
def create_network_service_policy_precommit(self, context):
self._validate_nsp_parameters(context)
@log.log_method_call
def create_network_service_policy_postcommit(self, context):
pass
@log.log_method_call
def update_network_service_policy_precommit(self, context):
self._validate_nsp_parameters(context)
@log.log_method_call
def update_network_service_policy_postcommit(self, context):
pass
@log.log_method_call
def delete_network_service_policy_precommit(self, context):
pass
@log.log_method_call
def delete_network_service_policy_postcommit(self, context):
pass
@log.log_method_call
def get_network_service_policy_status(self, context):
pass
@log.log_method_call
def create_nat_pool_precommit(self, context):
self._add_nat_pool_to_segment(context)
@log.log_method_call
def create_nat_pool_postcommit(self, context):
self._add_implicit_subnet_for_nat_pool_create(context)
@log.log_method_call
def update_nat_pool_precommit(self, context):
self._process_ext_segment_update_for_nat_pool(context)
@log.log_method_call
def update_nat_pool_postcommit(self, context):
self._add_implicit_subnet_for_nat_pool_update(context)
@log.log_method_call
def delete_nat_pool_precommit(self, context):
self._nat_pool_in_use(context)
@log.log_method_call
def delete_nat_pool_postcommit(self, context):
self._delete_subnet_on_nat_pool_delete(context)
@log.log_method_call
def get_nat_pool_status(self, context):
pass
# REVISIT: Called by mechanism driver during port
# binding. Consider replacing with a more general hook for the PD
# to participate in port binding. Or consider removing/replacing
# this feature, since VM names should not effect behavior.
def check_allow_vm_names(self, context, port):
ok_to_bind = True
ptg, pt = self._port_id_to_ptg(context._plugin_context, port['id'])
# enforce the allowed_vm_names rules if possible
if (ptg and port['device_id'] and
self.apic_allowed_vm_name_driver):
l2p = self._get_l2_policy(context._plugin_context,
ptg['l2_policy_id'])
l3p = self.gbp_plugin.get_l3_policy(
context._plugin_context, l2p['l3_policy_id'])
if l3p.get('allowed_vm_names'):
ok_to_bind = False
vm = nova_client.NovaClient().get_server(port['device_id'])
for allowed_vm_name in l3p['allowed_vm_names']:
match = re.search(allowed_vm_name, vm.name)
if match:
ok_to_bind = True
break
if not ok_to_bind:
LOG.warning("Failed to bind the port due to "
"allowed_vm_names rules %(rules)s "
"for VM: %(vm)s",
{'rules': l3p['allowed_vm_names'],
'vm': vm.name})
return ok_to_bind
# REVISIT: Called by mechanism driver when disassociating a
# domain. Consider a more general way for neutron ports to be
# bound using a non-default EPG.
def get_ptg_port_ids(self, context, ptg):
pts = self.gbp_plugin.get_policy_targets(
context, {'id': ptg['policy_targets']})
return [x['port_id'] for x in pts]
def _reject_shared_update(self, context, type):
if context.original.get('shared') != context.current.get('shared'):
raise SharedAttributeUpdateNotSupported(type=type)
def _aim_tenant_name(self, session, tenant_id, aim_resource_class=None,
gbp_resource=None, gbp_obj=None):
if aim_resource_class and (
aim_resource_class.__name__ in COMMON_TENANT_AIM_RESOURCES):
# COMMON_TENANT_AIM_RESOURCES will always be created in the
# ACI common tenant
aim_ctx = aim_context.AimContext(session)
self.aim_mech_driver._ensure_common_tenant(aim_ctx)
tenant_name = md.COMMON_TENANT_NAME
else:
l3p_id = None
if aim_resource_class.__name__ == (
aim_resource.EndpointGroup.__name__):
# the gbp_obj here should be a ptg
l2p_id = gbp_obj['l2_policy_id']
if l2p_id:
query = BAKERY(lambda s: s.query(
gpmdb.L2PolicyMapping))
query += lambda q: q.filter_by(
id=sa.bindparam('l2p_id'))
l2p_db = query(session).params(
l2p_id=l2p_id).first()
l3p_id = l2p_db['l3_policy_id']
elif aim_resource_class.__name__ == (
aim_resource.BridgeDomain.__name__):
# the gbp_obj here should be a l2p
l3p_id = gbp_obj['l3_policy_id']
if l3p_id:
query = BAKERY(lambda s: s.query(
gpmdb.L3PolicyMapping))
query += lambda q: q.filter_by(
id=sa.bindparam('l3p_id'))
l3p_db = query(session).params(
l3p_id=l3p_id).first()
tenant_id = l3p_db['tenant_id']
tenant_name = self.name_mapper.project(session, tenant_id)
LOG.debug("Mapped tenant_id %(id)s to %(apic_name)s",
{'id': tenant_id, 'apic_name': tenant_name})
return tenant_name
def _aim_application_profile_for_ptg(self, context, ptg):
# This returns a new AIM ApplicationProfile resource if apg_id
# is set, else returns None
apg_id = ptg['application_policy_group_id']
if apg_id:
apg = context._plugin._get_application_policy_group(
context._plugin_context, apg_id)
return self._aim_application_profile(
context._plugin_context.session, apg)
def _aim_application_profile(self, session, apg):
# This returns a new AIM ApplicationProfile resource
tenant_id = apg['tenant_id']
tenant_name = self._aim_tenant_name(
session, tenant_id,
aim_resource_class=aim_resource.ApplicationProfile, gbp_obj=apg)
display_name = self.aim_display_name(apg['name'])
ap_name = self.apic_ap_name_for_application_policy_group(
session, apg['id'])
ap = aim_resource.ApplicationProfile(tenant_name=tenant_name,
display_name=display_name,
name=ap_name)
LOG.debug("Mapped apg_id %(id)s with name %(name)s to %(apic_name)s",
{'id': apg['id'], 'name': display_name,
'apic_name': ap_name})
return ap
def _get_aim_application_profile_for_ptg(self, context, ptg):
# This gets an AP from the AIM DB
ap = self._aim_application_profile_for_ptg(context, ptg)
if ap:
return self._get_aim_application_profile_from_db(
context._plugin_context.session, ap)
def _get_aim_application_profile_from_db(self, session, ap):
aim_ctx = aim_context.AimContext(session)
ap_fetched = self.aim.get(aim_ctx, ap)
if not ap_fetched:
LOG.debug("No ApplicationProfile found in AIM DB")
else:
LOG.debug("Got ApplicationProfile: %s", ap_fetched.__dict__)
return ap_fetched
def _create_aim_ap_for_ptg_conditionally(self, context, ptg):
if ptg and ptg['application_policy_group_id'] and not (
self._get_aim_application_profile_for_ptg(context, ptg)):
ap = self._aim_application_profile_for_ptg(context, ptg)
aim_ctx = aim_context.AimContext(context._plugin_context.session)
self.aim.create(aim_ctx, ap)
return ap
def _move_epg_to_new_ap(self, context, old_epg, new_ap):
session = context._plugin_context.session
aim_ctx = aim_context.AimContext(session)
self.aim.delete(aim_ctx, old_epg)
old_epg.app_profile_name = (
self.apic_ap_name_for_application_policy_group(
session, context.current['application_policy_group_id']))
self.aim.create(aim_ctx, old_epg)
return old_epg
def _delete_aim_ap_for_ptg_conditionally(self, context, ptg):
# It is assumed that this method is called after the EPG corresponding
# to the PTG has been deleted in AIM
if ptg and ptg['application_policy_group_id']:
ap = self._aim_application_profile_for_ptg(context, ptg)
apg_id = ptg['application_policy_group_id']
apg_db = context._plugin._get_application_policy_group(
context._plugin_context, apg_id)
if not apg_db['policy_target_groups'] or (
len(apg_db['policy_target_groups']) == 1 and (
apg_db['policy_target_groups'][0]['id'] == ptg['id'])):
# We lazily create the ApplicationProfile, so we delete
# it when the last PTG associated with this APG is deleted
aim_ctx = aim_context.AimContext(
context._plugin_context.session)
self.aim.delete(aim_ctx, ap)
# REVISIT: Called by mechanism driver when associating or
# disassociating a domain. Consider a more general way for neutron
# ports to be bound using a non-default EPG.
def _aim_endpoint_group(self, session, ptg, bd_name=None,
bd_tenant_name=None,
provided_contracts=None,
consumed_contracts=None,
policy_enforcement_pref=UNENFORCED):
# This returns a new AIM EPG resource
tenant_id = ptg['tenant_id']
tenant_name = self._aim_tenant_name(
session, tenant_id, aim_resource_class=aim_resource.EndpointGroup,
gbp_obj=ptg)
id = ptg['id']
name = ptg['name']
display_name = self.aim_display_name(ptg['name'])
ap_name = self.apic_ap_name_for_application_policy_group(
session, ptg['application_policy_group_id'])
epg_name = self.apic_epg_name_for_policy_target_group(
session, id, name)
LOG.debug("Using application_profile %(ap_name)s "
"for epg %(epg_name)s",
{'ap_name': ap_name, 'epg_name': epg_name})
LOG.debug("Mapped ptg_id %(id)s with name %(name)s to %(apic_name)s",
{'id': id, 'name': name, 'apic_name': epg_name})
kwargs = {'tenant_name': str(tenant_name),
'name': str(epg_name),
'display_name': display_name,
'app_profile_name': ap_name,
'policy_enforcement_pref': policy_enforcement_pref}
if bd_name:
kwargs['bd_name'] = bd_name
if bd_tenant_name:
kwargs['bd_tenant_name'] = bd_tenant_name
if provided_contracts:
kwargs['provided_contract_names'] = provided_contracts
if consumed_contracts:
kwargs['consumed_contract_names'] = consumed_contracts
epg = aim_resource.EndpointGroup(**kwargs)
return epg
def _get_aim_endpoint_group(self, session, ptg):
# This gets an EPG from the AIM DB
epg = self._aim_endpoint_group(session, ptg)
aim_ctx = aim_context.AimContext(session)
epg_fetched = self.aim.get(aim_ctx, epg)
if not epg_fetched:
LOG.debug("No EPG found in AIM DB")
else:
LOG.debug("Got epg: %s", vars(epg_fetched))
return epg_fetched
def _aim_filter(self, session, pr, reverse_prefix=False):
# This returns a new AIM Filter resource
tenant_id = pr['tenant_id']
tenant_name = self._aim_tenant_name(session, tenant_id,
aim_resource.Filter)
id = pr['id']
name = pr['name']
display_name = self.aim_display_name(pr['name'])
if reverse_prefix:
filter_name = self.name_mapper.policy_rule(
session, id, prefix=alib.REVERSE_PREFIX)
else:
filter_name = self.name_mapper.policy_rule(session, id)
LOG.debug("Mapped policy_rule_id %(id)s with name %(name)s to"
"%(apic_name)s",
{'id': id, 'name': name, 'apic_name': filter_name})
kwargs = {'tenant_name': str(tenant_name),
'name': str(filter_name),
'display_name': display_name}
aim_filter = aim_resource.Filter(**kwargs)
return aim_filter
def _aim_filter_entry(self, session, aim_filter, filter_entry_name,
filter_entry_attrs):
# This returns a new AIM FilterEntry resource
tenant_name = aim_filter.tenant_name
filter_name = aim_filter.name
display_name = self.aim_display_name(filter_name)
kwargs = {'tenant_name': tenant_name,
'filter_name': filter_name,
'name': filter_entry_name,
'display_name': display_name}
kwargs.update(filter_entry_attrs)
aim_filter_entry = aim_resource.FilterEntry(**kwargs)
return aim_filter_entry
def _create_policy_rule_aim_mappings(
self, session, aim_context, pr, entries):
if entries['forward_rules']:
aim_filter = self._aim_filter(session, pr)
self.aim.create(aim_context, aim_filter, overwrite=True)
self._create_aim_filter_entries(session, aim_context, aim_filter,
entries['forward_rules'])
if entries['reverse_rules']:
# Also create reverse rule
aim_filter = self._aim_filter(session, pr,
reverse_prefix=True)
self.aim.create(aim_context, aim_filter, overwrite=True)
self._create_aim_filter_entries(
session, aim_context, aim_filter, entries['reverse_rules'])
def _delete_aim_filter_entries(self, aim_context, aim_filter):
aim_filter_entries = self.aim.find(
aim_context, aim_resource.FilterEntry,
tenant_name=aim_filter.tenant_name,
filter_name=aim_filter.name)
for entry in aim_filter_entries:
self.aim.delete(aim_context, entry)
def _delete_filter_entries_for_policy_rule(self, session, aim_context, pr):
aim_filter = self._aim_filter(session, pr)
aim_reverse_filter = self._aim_filter(
session, pr, reverse_prefix=True)
for afilter in filter(None, [aim_filter, aim_reverse_filter]):
self._delete_aim_filter_entries(aim_context, afilter)
def _create_aim_filter_entries(self, session, aim_ctx, aim_filter,
filter_entries):
for k, v in six.iteritems(filter_entries):
self._create_aim_filter_entry(
session, aim_ctx, aim_filter, k, v)
def _create_aim_filter_entry(self, session, aim_ctx, aim_filter,
filter_entry_name, filter_entry_attrs,
overwrite=False):
aim_filter_entry = self._aim_filter_entry(
session, aim_filter, filter_entry_name,
alib.map_to_aim_filter_entry(filter_entry_attrs))
self.aim.create(aim_ctx, aim_filter_entry, overwrite)
def _get_aim_filters(self, session, policy_rule):
# This gets the Forward and Reverse Filters from the AIM DB
aim_ctx = aim_context.AimContext(session)
filters = {}
for k, v in six.iteritems(FILTER_DIRECTIONS):
aim_filter = self._aim_filter(session, policy_rule, v)
aim_filter_fetched = self.aim.get(aim_ctx, aim_filter)
if not aim_filter_fetched:
LOG.debug("No %s Filter found in AIM DB", k)
else:
LOG.debug("Got Filter: %s", vars(aim_filter_fetched))
filters[k] = aim_filter_fetched
return filters
def _get_aim_filter_names(self, session, policy_rule):
# Forward and Reverse AIM Filter names for a Policy Rule
aim_filters = self._get_aim_filters(session, policy_rule)
aim_filter_names = [f.name for f in aim_filters.values() if f]
return aim_filter_names
def _get_aim_filter_entries(self, session, policy_rule):
# This gets the Forward and Reverse FilterEntries from the AIM DB
aim_ctx = aim_context.AimContext(session)
filters = self._get_aim_filters(session, policy_rule)
filters_entries = {}
for k, v in six.iteritems(filters):
if v:
aim_filter_entries = self.aim.find(
aim_ctx, aim_resource.FilterEntry,
tenant_name=v.tenant_name, filter_name=v.name)
if not aim_filter_entries:
LOG.debug("No %s FilterEntry found in AIM DB", k)
else:
LOG.debug("Got FilterEntry: %s", str(aim_filter_entries))
filters_entries[k] = aim_filter_entries
return filters_entries
def _aim_contract(self, session, policy_rule_set):
# This returns a new AIM Contract resource
return aim_resource.Contract(
tenant_name=self._aim_tenant_name(
session, policy_rule_set['tenant_id'], aim_resource.Contract),
name=self.name_mapper.policy_rule_set(
session, policy_rule_set['id']),
display_name=policy_rule_set['name'])
def _aim_contract_subject(self, aim_contract, in_filters=None,
out_filters=None, bi_filters=None):
# This returns a new AIM ContractSubject resource
if not in_filters:
in_filters = []
if not out_filters:
out_filters = []
if not bi_filters:
bi_filters = []
display_name = self.aim_display_name(aim_contract.name)
# Since we create one ContractSubject per Contract,
# ContractSubject is given the Contract name
kwargs = {'tenant_name': aim_contract.tenant_name,
'contract_name': aim_contract.name,
'name': aim_contract.name,
'display_name': display_name,
'in_filters': in_filters,
'out_filters': out_filters,
'bi_filters': bi_filters}
aim_contract_subject = aim_resource.ContractSubject(**kwargs)
return aim_contract_subject
def _populate_aim_contract_subject(self, context, aim_contract,
policy_rules):
in_filters, out_filters = [], []
session = context._plugin_context.session
for rule in policy_rules:
aim_filters = self._get_aim_filter_names(session, rule)
classifier = context._plugin.get_policy_classifier(
context._plugin_context, rule['policy_classifier_id'])
if classifier['direction'] == g_const.GP_DIRECTION_IN:
for fltr in aim_filters:
if fltr.startswith(alib.REVERSE_PREFIX):
out_filters.append(fltr)
else:
in_filters.append(fltr)
elif classifier['direction'] == g_const.GP_DIRECTION_OUT:
for fltr in aim_filters:
if fltr.startswith(alib.REVERSE_PREFIX):
in_filters.append(fltr)
else:
out_filters.append(fltr)
else:
in_filters += aim_filters
out_filters += aim_filters
self._populate_aim_contract_subject_by_filters(
context, aim_contract, in_filters, out_filters)
def _populate_aim_contract_subject_by_filters(
self, context, aim_contract, in_filters=None, out_filters=None,
bi_filters=None):
if not in_filters:
in_filters = []
if not out_filters:
out_filters = []
if not bi_filters:
bi_filters = []
aim_ctx = self._get_aim_context(context)
aim_contract_subject = self._aim_contract_subject(
aim_contract, in_filters, out_filters, bi_filters)
self.aim.create(aim_ctx, aim_contract_subject, overwrite=True)
def _get_aim_contract_names(self, session, prs_id_list):
contract_list = []
for prs_id in prs_id_list:
contract_name = self.name_mapper.policy_rule_set(session, prs_id)
contract_list.append(contract_name)
return contract_list
def _get_aim_contract_subject(self, session, policy_rule_set):
# This gets a ContractSubject from the AIM DB
aim_ctx = aim_context.AimContext(session)
contract = self._aim_contract(session, policy_rule_set)
contract_subject = self._aim_contract_subject(contract)
contract_subject_fetched = self.aim.get(aim_ctx, contract_subject)
if not contract_subject_fetched:
LOG.debug("No Contract found in AIM DB")
else:
LOG.debug("Got ContractSubject: %s",
vars(contract_subject_fetched))
return contract_subject_fetched
def _delete_aim_contract_subject(self, aim_context, aim_contract):
aim_contract_subject = self._aim_contract_subject(aim_contract)
self.aim.delete(aim_context, aim_contract_subject)
def _get_aim_default_endpoint_group(self, session, network):
return self.aim_mech_driver.get_epg_for_network(session, network)
def _get_l2p_subnets(self, context, l2p_id):
plugin_context = context._plugin_context
l2p = context._plugin.get_l2_policy(plugin_context, l2p_id)
# REVISIT: The following should be a get_subnets call via local API
return self._core_plugin.get_subnets_by_network(
plugin_context, l2p['network_id'])
def _sync_ptg_subnets(self, context, l2p):
l2p_subnets = [x['id'] for x in
self._get_l2p_subnets(context, l2p['id'])]
ptgs = context._plugin._get_policy_target_groups(
context._plugin_context.elevated(), {'l2_policy_id': [l2p['id']]})
for sub in l2p_subnets:
# Add to PTG
for ptg in ptgs:
if sub not in ptg['subnets']:
try:
(context._plugin.
_add_subnet_to_policy_target_group(
context._plugin_context.elevated(),
ptg['id'], sub))
except gpolicy.PolicyTargetGroupNotFound as e:
LOG.warning(e)
def _use_implicit_subnet(self, context, force_add=False):
"""Implicit subnet for AIM.
The first PTG in a L2P will allocate a new subnet from the L3P.
Any subsequent PTG in the same L2P will use the same subnet.
Additional subnets will be allocated as and when the currently used
subnet runs out of IP addresses.
"""
l2p_id = context.current['l2_policy_id']
with lockutils.lock(l2p_id, external=True):
subs = self._get_l2p_subnets(context, l2p_id)
subs = set([x['id'] for x in subs])
added = []
if not subs or force_add:
l2p = context._plugin.get_l2_policy(
context._plugin_context, l2p_id)
name = APIC_OWNED + l2p['name']
added = super(
AIMMappingDriver,
self)._use_implicit_subnet_from_subnetpool(
context, subnet_specifics={'name': name})
context.add_subnets(subs - set(context.current['subnets']))
if added:
self._sync_ptg_subnets(context, l2p)
l3p = self._get_l3p_for_l2policy(context, l2p_id)
for r in l3p['routers']:
self._attach_router_to_subnets(context._plugin_context,
r, added)
def _create_implicit_contracts(self, context, l3p):
self._process_contracts_for_default_epg(context, l3p)
def _configure_contracts_for_default_epg(self, context, l3p, epg_dn):
self._process_contracts_for_default_epg(
context, l3p, epg_dn=epg_dn, create=False, delete=False)
def _delete_implicit_contracts(self, context, l3p):
self._process_contracts_for_default_epg(
context, l3p, epg_dn=None, create=False, delete=True)
def _get_implicit_contracts_for_default_epg(
self, context, l3p, epg_dn):
return self._process_contracts_for_default_epg(
context, l3p, epg_dn=epg_dn, get=True)
def _process_contracts_for_default_epg(
self, context, l3p, epg_dn=None, create=True, delete=False, get=False):
# get=True overrides the create and delete cases, and returns a dict
# with the Contracts, ContractSubjects, Filters, and FilterEntries
# for the default EPG
# create=True, delete=False means create everything and add Contracts
# to the default EPG
# create=False, delete=False means only add Contracts to the default
# EPG
# create=False, delete=True means only remove Contracts from the
# default EPG and delete them
# create=True, delete=True is not a valid combination
if create and delete:
LOG.error("Incorrect use of internal method "
"_process_contracts_for_default_epg(), create and "
"delete cannot be True at the same time")
raise
session = context._plugin_context.session
aim_ctx = aim_context.AimContext(session)
# Infra Services' FilterEntries and attributes
infra_entries = alib.get_service_contract_filter_entries()
# ARP FilterEntry and attributes
arp_entries = alib.get_arp_filter_entry()
contracts = {alib.SERVICE_PREFIX: infra_entries,
alib.IMPLICIT_PREFIX: arp_entries}
for contract_name_prefix, entries in six.iteritems(contracts):
contract_name = self.name_mapper.l3_policy(
session, l3p['id'], prefix=contract_name_prefix)
# Create Contract (one per l3_policy)
aim_contract = aim_resource.Contract(
tenant_name=self._aim_tenant_name(
session, l3p['tenant_id'], aim_resource.Contract),
name=contract_name, display_name=contract_name)
if get:
aim_resources = {}
aim_resources[FILTERS] = []
aim_resources[FILTER_ENTRIES] = []
aim_resources[CONTRACT_SUBJECTS] = []
contract_fetched = self.aim.get(aim_ctx, aim_contract)
aim_resources[CONTRACTS] = [contract_fetched]
else:
if create:
self.aim.create(aim_ctx, aim_contract, overwrite=True)
if not delete and epg_dn:
aim_epg = self.aim.get(
aim_ctx, aim_resource.EndpointGroup.from_dn(epg_dn))
# Add Contracts to the default EPG
if contract_name_prefix == alib.IMPLICIT_PREFIX:
# Default EPG provides and consumes ARP Contract
self._add_contracts_for_epg(
aim_ctx, aim_epg,
provided_contracts=[contract_name],
consumed_contracts=[contract_name])
else:
# Default EPG provides Infra Services' Contract
self._add_contracts_for_epg(
aim_ctx, aim_epg,
provided_contracts=[contract_name])
continue
filter_names = []
for k, v in six.iteritems(entries):
filter_name = self.name_mapper.l3_policy(
session, l3p['id'],
prefix=''.join([contract_name_prefix, k, '-']))
# Create Filter (one per l3_policy)
aim_filter = aim_resource.Filter(
tenant_name=self._aim_tenant_name(
session, l3p['tenant_id'], aim_resource.Filter),
name=filter_name, display_name=filter_name)
if get:
filter_fetched = self.aim.get(aim_ctx, aim_filter)
aim_resources[FILTERS].append(filter_fetched)
aim_filter_entry = self._aim_filter_entry(
session, aim_filter, k,
alib.map_to_aim_filter_entry(v))
entry_fetched = self.aim.get(aim_ctx, aim_filter_entry)
aim_resources[FILTER_ENTRIES].append(entry_fetched)
else:
if create:
self.aim.create(aim_ctx, aim_filter, overwrite=True)
# Create FilterEntries (one per l3_policy) and
# associate with Filter
self._create_aim_filter_entry(
session, aim_ctx, aim_filter, k, v, overwrite=True)
filter_names.append(aim_filter.name)
if delete:
self._delete_aim_filter_entries(aim_ctx, aim_filter)
self.aim.delete(aim_ctx, aim_filter)
if get:
aim_contract_subject = self._aim_contract_subject(aim_contract)
subject_fetched = self.aim.get(aim_ctx, aim_contract_subject)
aim_resources[CONTRACT_SUBJECTS].append(subject_fetched)
return aim_resources
else:
if create:
# Create ContractSubject (one per l3_policy) with relevant
# Filters, and associate with Contract
self._populate_aim_contract_subject_by_filters(
context, aim_contract, bi_filters=filter_names)
if delete:
self._delete_aim_contract_subject(aim_ctx, aim_contract)
self.aim.delete(aim_ctx, aim_contract)
def _add_implicit_svc_contracts_to_epg(self, context, l2p, aim_epg):
session = context._plugin_context.session
aim_ctx = aim_context.AimContext(session)
implicit_contract_name = self.name_mapper.l3_policy(
session, l2p['l3_policy_id'], prefix=alib.IMPLICIT_PREFIX)
service_contract_name = self.name_mapper.l3_policy(
session, l2p['l3_policy_id'], prefix=alib.SERVICE_PREFIX)
self._add_contracts_for_epg(aim_ctx, aim_epg,
provided_contracts=[implicit_contract_name],
consumed_contracts=[implicit_contract_name, service_contract_name])
def _add_contracts_for_epg(self, aim_ctx, aim_epg, provided_contracts=None,
consumed_contracts=None):
if provided_contracts:
aim_epg.provided_contract_names += provided_contracts
if consumed_contracts:
aim_epg.consumed_contract_names += consumed_contracts
self.aim.create(aim_ctx, aim_epg, overwrite=True)
def _merge_gbp_status(self, gbp_resource_list):
merged_status = gp_const.STATUS_ACTIVE
for gbp_resource in gbp_resource_list:
if gbp_resource['status'] == gp_const.STATUS_BUILD:
merged_status = gp_const.STATUS_BUILD
elif gbp_resource['status'] == gp_const.STATUS_ERROR:
merged_status = gp_const.STATUS_ERROR
break
return merged_status
def _map_ml2plus_status(self, sync_status):
if not sync_status:
# REVIST(Sumit)
return gp_const.STATUS_BUILD
if sync_status == cisco_apic.SYNC_ERROR:
return gp_const.STATUS_ERROR
elif sync_status == cisco_apic.SYNC_BUILD:
return gp_const.STATUS_BUILD
else:
return gp_const.STATUS_ACTIVE
def _process_subnets_for_ptg_delete(self, context, subnet_ids, router_ids):
plugin_context = context._plugin_context
if subnet_ids:
for subnet_id in subnet_ids:
# Clean-up subnet if this is the last PTG using the L2P.
if not context._plugin._get_ptgs_for_subnet(
plugin_context, subnet_id):
for router_id in router_ids:
# If the subnet interface for this router has
# already been removed (say manually), the
# call to Neutron's remove_router_interface
# will cause the transaction to exit immediately.
# To avoid this, we first check if this subnet
# still has an interface on this router.
if self._get_router_interface_port_by_subnet(
plugin_context, router_id, subnet_id):
self._detach_router_from_subnets(
plugin_context, router_id, [subnet_id])
self._cleanup_subnet(plugin_context, subnet_id)
def _map_aim_status(self, session, aim_resource_obj):
# Note that this implementation assumes that this driver
# is the only policy driver configured, and no merging
# with any previous status is required.
aim_ctx = aim_context.AimContext(session)
aim_status = self.aim.get_status(
aim_ctx, aim_resource_obj, create_if_absent=False)
if not aim_status:
# REVIST(Sumit)
return gp_const.STATUS_BUILD
if aim_status.is_error():
return gp_const.STATUS_ERROR
elif aim_status.is_build():
return gp_const.STATUS_BUILD
else:
return gp_const.STATUS_ACTIVE
def _merge_aim_status(self, session, aim_resource_obj_list):
# Note that this implementation assumes that this driver
# is the only policy driver configured, and no merging
# with any previous status is required.
# When merging states of multiple AIM objects, the status
# priority is ERROR > BUILD > ACTIVE.
merged_status = gp_const.STATUS_ACTIVE
for aim_obj in aim_resource_obj_list:
status = self._map_aim_status(session, aim_obj)
if status != gp_const.STATUS_ACTIVE:
merged_status = status
if merged_status == gp_const.STATUS_ERROR:
break
return merged_status
def _db_plugin(self, plugin_obj):
return super(gbp_plugin.GroupPolicyPlugin, plugin_obj)
def _get_aim_context(self, context):
if hasattr(context, 'session'):
session = context.session
else:
session = context._plugin_context.session
return aim_context.AimContext(session)
# REVISIT: Called by mechanism driver when binding a port using
# DVS. Consider a more general way for neutron ports to be bound
# using a non-default EPG.
def _get_port_epg(self, plugin_context, port):
ptg, pt = self._port_id_to_ptg(plugin_context, port['id'])
if ptg:
# TODO(Kent): optimize this also for GBP workflow?
return self._get_aim_endpoint_group(plugin_context.session, ptg)
else:
# Return default EPG based on network
network = self._get_network(plugin_context, port['network_id'])
epg = self._get_aim_default_endpoint_group(plugin_context.session,
network)
if not epg:
# Something is wrong, default EPG doesn't exist.
# TODO(ivar): should rise an exception
LOG.error("Default EPG doesn't exist for "
"port %s", port['id'])
return epg
def _get_vrf_by_dn(self, context, vrf_dn):
aim_context = self._get_aim_context(context)
vrf = self.aim.get(
aim_context, aim_resource.VRF.from_dn(vrf_dn))
return vrf
def _check_l3policy_ext_segment(self, context, l3policy):
if l3policy['external_segments']:
for allocations in l3policy['external_segments'].values():
if len(allocations) > 1:
raise alib.OnlyOneAddressIsAllowedPerExternalSegment()
# if NAT is disabled, allow only one L3P per ES
ess = context._plugin.get_external_segments(
context._plugin_context,
filters={'id': list(l3policy['external_segments'].keys())})
for es in ess:
ext_net = self._ext_segment_2_ext_network(context, es)
if (ext_net and
ext_net.get(cisco_apic.NAT_TYPE) in
('distributed', 'edge')):
continue
if [x for x in es['l3_policies'] if x != l3policy['id']]:
raise alib.OnlyOneL3PolicyIsAllowedPerExternalSegment()
def _check_external_policy(self, context, ep):
if ep.get('shared', False):
# REVISIT(amitbose) This could be relaxed
raise alib.SharedExternalPolicyUnsupported()
ess = context._plugin.get_external_segments(
context._plugin_context,
filters={'id': ep['external_segments']})
for es in ess:
other_eps = context._plugin.get_external_policies(
context._plugin_context,
filters={'id': es['external_policies'],
'tenant_id': [ep['tenant_id']]})
if [x for x in other_eps if x['id'] != ep['id']]:
raise alib.MultipleExternalPoliciesForL3Policy()
def _get_l3p_subnets(self, context, l3policy):
l2p_sn = []
for l2p_id in l3policy['l2_policies']:
l2p_sn.extend(self._get_l2p_subnets(context, l2p_id))
return l2p_sn
def _ext_segment_2_ext_network(self, context, ext_segment):
subnet = self._get_subnet(context._plugin_context,
ext_segment['subnet_id'])
if subnet:
return self._get_network(context._plugin_context,
subnet['network_id'])
def _map_ext_segment_to_routers(self, context, ext_segments,
routers):
net_to_router = {r['external_gateway_info']['network_id']: r
for r in routers
if r.get('external_gateway_info')}
result = {}
for es in ext_segments:
sn = self._get_subnet(context._plugin_context, es['subnet_id'])
router = net_to_router.get(sn['network_id']) if sn else None
if router:
result[es['id']] = router
return result
def _plug_l3p_routers_to_ext_segment(self, context, l3policy,
ext_seg_info):
plugin_context = context._plugin_context
es_list = self._get_external_segments(plugin_context,
filters={'id': list(ext_seg_info.keys())})
l3p_subs = self._get_l3p_subnets(context, l3policy)
# REVISIT: We are not re-using the first router created
# implicitly for the L3Policy (or provided explicitly by the
# user). Consider using that for the first external segment
for es in es_list:
router_id = self._use_implicit_router(context,
router_name=l3policy['name'] + '-' + es['name'])
router = self._create_router_gw_for_external_segment(
context._plugin_context, es, ext_seg_info, router_id)
if not ext_seg_info[es['id']] or not ext_seg_info[es['id']][0]:
# Update L3P assigned address
efi = router['external_gateway_info']['external_fixed_ips']
assigned_ips = [x['ip_address'] for x in efi
if x['subnet_id'] == es['subnet_id']]
context.set_external_fixed_ips(es['id'], assigned_ips)
if es['external_policies']:
ext_policy = self._get_external_policies(plugin_context,
filters={'id': es['external_policies'],
'tenant_id': [l3policy['tenant_id']]})
if ext_policy:
self._set_router_ext_contracts(context, router_id,
ext_policy[0])
self._attach_router_to_subnets(plugin_context, router_id, l3p_subs)
def _unplug_l3p_routers_from_ext_segment(self, context, l3policy,
ext_seg_ids, deleting=False):
plugin_context = context._plugin_context
es_list = self._get_external_segments(plugin_context,
filters={'id': ext_seg_ids})
routers = self._get_routers(plugin_context,
filters={'id': l3policy['routers']})
es_2_router = self._map_ext_segment_to_routers(context, es_list,
routers)
for r in es_2_router.values():
router_subs = self._get_router_interface_subnets(plugin_context,
r['id'])
self._detach_router_from_subnets(plugin_context, r['id'],
router_subs)
if not deleting:
context.remove_router(r['id'])
self._cleanup_router(plugin_context, r['id'])
def _get_router_interface_subnets(self, plugin_context, router_id):
router_ports = self._get_ports(plugin_context,
filters={'device_owner': [n_constants.DEVICE_OWNER_ROUTER_INTF],
'device_id': [router_id]})
return set(y['subnet_id']
for x in router_ports for y in x['fixed_ips'])
def _get_router_interface_port_by_subnet(self, plugin_context,
router_id, subnet_id):
router_ports = self._get_ports(plugin_context,
filters={'device_owner': [n_constants.DEVICE_OWNER_ROUTER_INTF],
'device_id': [router_id],
'fixed_ips': {'subnet_id': [subnet_id]}})
return (router_ports or [None])[0]
def _attach_router_to_subnets(self, plugin_context, router_id, subs):
# On account of sharing configuration, the router and subnets might
# be in different tenants, hence we need to use admin context
plugin_context = plugin_context.elevated()
rtr_sn = self._get_router_interface_subnets(plugin_context, router_id)
for subnet in subs:
if subnet['id'] in rtr_sn: # already attached
continue
gw_port = self._get_ports(plugin_context,
filters={'fixed_ips': {'ip_address': [subnet['gateway_ip']],
'subnet_id': [subnet['id']]}})
if gw_port:
# Gateway port is in use, create new interface port
attrs = {'tenant_id': subnet['tenant_id'],
'network_id': subnet['network_id'],
'fixed_ips': [{'subnet_id': subnet['id']}],
'device_id': '',
'device_owner': n_constants.DEVICE_OWNER_ROUTER_INTF,
'mac_address': n_constants.ATTR_NOT_SPECIFIED,
'name': '%s-%s' % (router_id, subnet['id']),
'admin_state_up': True}
try:
intf_port = self._create_port(plugin_context, attrs)
except n_exc.NeutronException:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to create explicit router '
'interface port in subnet '
'%(subnet)s',
{'subnet': subnet['id']})
interface_info = {'port_id': intf_port['id'],
NO_VALIDATE: True}
try:
self._add_router_interface(plugin_context, router_id,
interface_info)
except n_exc.BadRequest:
self._delete_port(plugin_context, intf_port['id'])
with excutils.save_and_reraise_exception():
LOG.exception('Attaching router %(router)s to '
'%(subnet)s with explicit port '
'%(port) failed',
{'subnet': subnet['id'],
'router': router_id,
'port': intf_port['id']})
else:
self._plug_router_to_subnet(plugin_context, subnet['id'],
router_id)
def _plug_router_to_subnet(self, plugin_context, subnet_id, router_id):
interface_info = {'subnet_id': subnet_id,
NO_VALIDATE: True}
if router_id:
try:
self._add_router_interface(plugin_context, router_id,
interface_info)
except n_exc.BadRequest as e:
LOG.exception("Adding subnet to router failed, exception:"
"%s", e)
raise exc.GroupPolicyInternalError()
def _detach_router_from_subnets(self, plugin_context, router_id, sn_ids):
for subnet_id in sn_ids:
# Use admin context because router and subnet may be in
# different tenants
self._remove_router_interface(plugin_context.elevated(),
router_id,
{'subnet_id': subnet_id})
def _set_router_ext_contracts(self, context, router_id, ext_policy):
session = context._plugin_context.session
prov = []
cons = []
if ext_policy:
prov = self._get_aim_contract_names(session,
ext_policy['provided_policy_rule_sets'])
cons = self._get_aim_contract_names(session,
ext_policy['consumed_policy_rule_sets'])
attr = {cisco_apic_l3.EXTERNAL_PROVIDED_CONTRACTS: prov,
cisco_apic_l3.EXTERNAL_CONSUMED_CONTRACTS: cons}
self._update_router(context._plugin_context, router_id, attr)
def _get_ext_policy_routers(self, context, ext_policy, ext_seg_ids):
plugin_context = context._plugin_context
es = self._get_external_segments(plugin_context,
filters={'id': ext_seg_ids})
subs = self._get_subnets(context._plugin_context,
filters={'id': [e['subnet_id'] for e in es]})
ext_net = {s['network_id'] for s in subs}
l3ps = set([l3p for e in es for l3p in e['l3_policies']])
l3ps = self._get_l3_policies(plugin_context,
filters={'id': l3ps,
'tenant_id': [ext_policy['tenant_id']]})
routers = self._get_routers(plugin_context,
filters={'id': [r for l in l3ps for r in l['routers']]})
return [r['id'] for r in routers
if (r['external_gateway_info'] or {}).get('network_id') in ext_net]
def _get_auto_ptg_name(self, l2p):
return AUTO_PTG_NAME_PREFIX % l2p['id']
def _get_auto_ptg_id(self, l2p_id):
if l2p_id:
return AUTO_PTG_ID_PREFIX % hashlib.md5(
l2p_id.encode('utf-8')).hexdigest()
def _is_auto_ptg(self, ptg):
return ptg['id'].startswith(AUTO_PTG_PREFIX)
def _get_policy_enforcement_pref(self, ptg):
if ptg['intra_ptg_allow']:
policy_enforcement_pref = UNENFORCED
else:
policy_enforcement_pref = ENFORCED
return policy_enforcement_pref
def _map_policy_enforcement_pref(self, epg):
if epg.policy_enforcement_pref == UNENFORCED:
return True
else:
return False
def _get_epg_by_dn(self, context, epg_dn):
aim_context = self._get_aim_context(context)
epg = self.aim.get(
aim_context, aim_resource.EndpointGroup.from_dn(epg_dn))
return epg
def _get_epg_name_from_dn(self, context, epg_dn):
aim_context = self._get_aim_context(context)
default_epg_name = self.aim.get(
aim_context, aim_resource.EndpointGroup.from_dn(epg_dn)).name
return default_epg_name
def apic_epg_name_for_policy_target_group(self, session, ptg_id,
name=None, context=None):
if not context:
context = gbp_utils.get_current_context()
# get_network can do a DB write, hence we use a writer
with db_api.CONTEXT_WRITER.using(context):
query = BAKERY(lambda s: s.query(
gpmdb.PolicyTargetGroupMapping))
query += lambda q: q.filter_by(
id=sa.bindparam('ptg_id'))
ptg_db = query(session).params(
ptg_id=ptg_id).first()
if ptg_db and self._is_auto_ptg(ptg_db):
query = BAKERY(lambda s: s.query(
gpmdb.L2PolicyMapping))
query += lambda q: q.filter_by(
id=sa.bindparam('l2p_id'))
l2p_db = query(session).params(
l2p_id=ptg_db['l2_policy_id']).first()
network_id = l2p_db['network_id']
admin_context = n_context.get_admin_context()
net = self._get_network(admin_context, network_id)
default_epg_dn = net[cisco_apic.DIST_NAMES][cisco_apic.EPG]
default_epg_name = self._get_epg_name_from_dn(
admin_context, default_epg_dn)
return default_epg_name
else:
return ptg_id
def apic_ap_name_for_application_policy_group(self, session, apg_id):
if apg_id:
return self.name_mapper.application_policy_group(
session, apg_id)
else:
return self.aim_mech_driver.ap_name
def _get_default_security_group(self, plugin_context, ptg_id,
tenant_id):
filters = {'name': [DEFAULT_SG_NAME], 'tenant_id': [tenant_id]}
default_group = self._get_sgs(plugin_context, filters)
return default_group[0]['id'] if default_group else None
def _create_default_security_group(self, plugin_context, tenant_id):
# Allow all
sg_id = self._get_default_security_group(plugin_context, '', tenant_id)
ip_v = [(n_constants.IPv4, '0.0.0.0/0'), (n_constants.IPv6, '::/0')]
if not sg_id:
sg_name = DEFAULT_SG_NAME
sg = self._create_gbp_sg(plugin_context, tenant_id, sg_name,
description='default GBP security group')
sg_id = sg['id']
for v, g in ip_v:
self._sg_rule(plugin_context, tenant_id, sg_id,
'ingress', cidr=g, ethertype=v)
self._sg_rule(plugin_context, tenant_id, sg_id,
'egress', cidr=g, ethertype=v)
def _use_implicit_port(self, context, subnets=None):
self._create_default_security_group(context._plugin_context,
context.current['tenant_id'])
super(AIMMappingDriver, self)._use_implicit_port(
context, subnets=subnets)
def _handle_create_network_service_policy(self, context):
self._validate_nat_pool_for_nsp(context)
self._handle_network_service_policy(context)
def _get_prss_for_policy_rules(self, context, pr_ids):
if not pr_ids:
return []
query = BAKERY(lambda s: s.query(
gpdb.PolicyRuleSet))
query += lambda q: q.join(
gpdb.PRSToPRAssociation,
gpdb.PRSToPRAssociation.policy_rule_set_id ==
gpdb.PolicyRuleSet.id)
query += lambda q: q.join(
gpdb.PolicyRule,
gpdb.PRSToPRAssociation.policy_rule_id == gpdb.PolicyRule.id)
query += lambda q: q.filter(
gpdb.PolicyRule.id.in_(sa.bindparam('pr_ids', expanding=True)))
return [self._get_policy_rule_set(
context._plugin_context, x['id']) for x in (
query(context._plugin_context.session).params(
pr_ids=pr_ids).all())]
def _create_per_l3p_implicit_contracts(self):
admin_context = n_context.get_admin_context()
context = type('', (object,), {})()
context._plugin_context = admin_context
session = admin_context.session
aim_ctx = aim_context.AimContext(session)
contract_name_prefix = list(alib.get_service_contract_filter_entries(
).keys())[0]
query = BAKERY(lambda s: s.query(
gpmdb.L3PolicyMapping))
l3ps = query(session).all()
name_mapper = apic_mapper.APICNameMapper()
aim_mgr = aim_manager.AimManager()
self._aim = aim_mgr
self._name_mapper = name_mapper
orig_aim_tenant_name = self._aim_tenant_name
def _aim_tenant_name(self, session, tenant_id, aim_resource_class=None,
gbp_resource=None, gbp_obj=None):
attrs = aim_resource.Tenant(
name=md.COMMON_TENANT_NAME, display_name='')
tenant = aim_mgr.get(aim_ctx, attrs)
if not tenant:
tenant = aim_mgr.create(aim_ctx, attrs)
return md.COMMON_TENANT_NAME
self._aim_tenant_name = _aim_tenant_name
for l3p in l3ps:
implicit_contract_name = name_mapper.l3_policy(
session, l3p['id'], prefix=contract_name_prefix)
if not aim_mgr.find(
aim_ctx, aim_resource.Contract,
name=implicit_contract_name):
self._create_implicit_contracts(context, l3p)
self._aim = None
self._name_mapper = None
self._aim_tenant_name = orig_aim_tenant_name
def validate_neutron_mapping(self, mgr):
# REVISIT: Implement.
pass
def validate_aim_mapping(self, mgr):
# REVISIT: Register all AIM resource types used by GBP mapping
# but not the Neutron mapping.
# REVISIT: Register DB tables to be validated.
# Determine expected AIM resources and DB records for each
# GBP resource type.
self._validate_l3_policies(mgr)
self._validate_l2_policies(mgr)
self._validate_policy_target_groups(mgr)
self._validate_policy_targets(mgr)
self._validate_application_policy_groups(mgr)
self._validate_policy_classifiers(mgr)
self._validate_policy_rule_sets(mgr)
self._validate_external_segments(mgr)
self._validate_external_policies(mgr)
# REVISIT: Do any of the following top-level GBP resources map
# to or effect AIM resources: NetworkServicePolicy,
# PolicyAction, NATPool?
def _validate_l3_policies(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
query = BAKERY(lambda s: s.query(
gpdb.L3Policy))
if query(mgr.actual_session).first():
mgr.validation_failed(
"GBP->AIM validation for L3P not yet implemented")
def _validate_l2_policies(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
query = BAKERY(lambda s: s.query(
gpdb.L2Policy))
if query(mgr.actual_session).first():
mgr.validation_failed(
"GBP->AIM validation for L2P not yet implemented")
def _validate_policy_target_groups(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
query = BAKERY(lambda s: s.query(
gpdb.PolicyTargetGroup))
if query(mgr.actual_session).first():
mgr.validation_failed(
"GBP->AIM validation for PTG not yet implemented")
def _validate_policy_targets(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
query = BAKERY(lambda s: s.query(
gpdb.PolicyTarget))
if query(mgr.actual_session).first():
mgr.validation_failed(
"GBP->AIM validation for PT not yet implemented")
def _validate_application_policy_groups(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
query = BAKERY(lambda s: s.query(
gpdb.ApplicationPolicyGroup))
if query(mgr.actual_session).first():
mgr.validation_failed(
"GBP->AIM validation for APG not yet implemented")
def _validate_policy_classifiers(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
query = BAKERY(lambda s: s.query(
gpdb.PolicyClassifier))
if query(mgr.actual_session).first():
mgr.validation_failed(
"GBP->AIM validation for PC not yet implemented")
def _validate_policy_rule_sets(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
query = BAKERY(lambda s: s.query(
gpdb.PolicyRuleSet))
if query(mgr.actual_session).first():
mgr.validation_failed(
"GBP->AIM validation for PRS not yet implemented")
def _validate_external_segments(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources. This should probably be called from
# validate_neutron_mapping rather than validate_aim_mapping,
# since external_routes maps to the cisco_apic.EXTERNAL_CIDRS
# network extension.
query = BAKERY(lambda s: s.query(
gpdb.ExternalSegment))
if query(mgr.actual_session).first():
mgr.validation_failed(
"GBP->AIM validation for ES not yet implemented")
def _validate_external_policies(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
query = BAKERY(lambda s: s.query(
gpdb.ExternalPolicy))
if query(mgr.actual_session).first():
mgr.validation_failed(
"GBP->AIM validation for EP not yet implemented")
|
Shop online for all the top brands of miniature spirit drinks in 5cl / 50ml, mini bar size bottles, for sale in the UK, at bottom prices. These miniature bottles of spirits, brandy, cognac, gin, rum, tequila, vodka and whiskies, are a great alternative to buying full size bottles as it allows you to offer your guests a selection of mini beverages with no wastage. So whatever your drink choice, shop with the confidence knowing that Just A Glass is where to buy the best spirit miniatures at the most competitive prices online!
Please note: that you must be at least 18 years of age to shop for miniature spirits in the UK, which are normally the equivalent of a double pub measure, and 40% ABV. So please drink sensibly!
|
#!/usr/bin/env python2
import sys
import os
import unicodecsv as csv
input_filename = sys.argv[1]
output_filename = os.path.splitext(input_filename)[0] + ".csv"
start = False
lines = []
items = []
header = [
"Unit price",
"Qty",
"RS Stock No.",
"Manufacturer Part Number",
"Manufacturer",
"Description",
]
with open(input_filename, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter='\t')
for line in csvreader:
line = [l.strip() for l in line]
if start:
lines.append(line)
elif "RS Stock No." in line and len(line) == 9:
start = True
for i in range(len(lines)):
if len(lines[i]) == 7 and len(lines[i+1]) == 2 and len(lines[i+2]) == 3:
item = [
lines[i+2][1], # Unit price
lines[i][0], # Qty
lines[i][1], # RS Stock No.
lines[i][2], # Manufacturer Part Number
lines[i][3], # Manufacturer
lines[i][5], # Description
]
items.append(item)
with open(output_filename, 'wb') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(header)
csvwriter.writerows(items)
|
The FA is undertaking some research to understand the perspectives of those who are currently, or have been involved in 11 vs 11 football teams across different age groups. This is being done to obtain a deep understanding of player attitudes, perceptions and behaviours in the affiliated 11 v 11 game so that we can look to improve participation.
The discussion will last no longer than 90 minutes and all participants will receive £30 to cover any travel costs.
|
import steel
COMPRESSION_TYPES = (
(0, 'No compression'),
(1, '8-bit RLE'),
(2, '4-bit RLE'),
(3, 'Bit Field'),
(4, 'JPEG'), # Generally not supported for screen display
(5, 'PNG'), # Generally not supported for screen display
)
class PaletteColor(steel.Structure):
blue = steel.Integer(size=1)
green = steel.Integer(size=1)
red = steel.Integer(size=1)
alpha = steel.Integer(size=1)
def __str__(self):
return '#%x%x%x%x' % (self.red, self.green, self.blue, self.alpha)
class BMP(steel.Structure, endianness=steel.LittleEndian):
signature = steel.FixedString('BM')
filesize = steel.Integer('Total file size', size=4)
steel.Reserved(size=4)
data_offset = steel.Integer('Offset of the actual image data', size=4)
header_size = steel.Integer(size=4, default=40)
width = steel.Integer(size=4)
height = steel.Integer(size=4)
plane_count = steel.Integer(size=2, default=1)
bit_depth = steel.Integer(size=2)
compression_type = steel.Integer(size=4, choices=COMPRESSION_TYPES, default=0)
data_size = steel.Integer('Size of the actual image data', size=4)
ppm_x = steel.Integer('Pixels per meter (X axis)', size=4)
ppm_y = steel.Integer('Pixels per meter (Y axis)', size=4)
color_count = steel.Integer('Number of colors', size=4)
important_color_count = steel.Integer('Number of important colors', size=4)
palette = steel.List(PaletteColor, size=color_count)
pixel_data = steel.Bytes(size=steel.Remainder)
if __name__ == '__main__':
import sys
bmp = BMP(open(sys.argv[1], 'rb'))
print('%s x %s' % (bmp.width, bmp.height))
|
One of the things I love about training in anything is that usually there is always something positive to take away from it. At work I’m currently doing a course, Growing As A Manager, not exactly thrilling stuff but I’ve identified the type of manager I am and what the downfalls are of being a diplomat lol. At House of Glamdolls not only did I leave with some solid technical knowledge to move forward with but I also left with renewed confidence in my skills as a makeup artist. We could all do with a confidence boost every now and then and I’m truly excited for what the future could hold for me and my little business.
That’s enough about me for the moment, I’ve discovered an AMAZING product for reducing under eye contour and puffiness!!!
It’s a small bottle of magic from The Ordinary. I’m renaming it my Benjamin Button product. All you need to do do is dab a small amount, morning and night, underneath your eyes. I actually look like I slept well last night (I didn’t) and I’ve only used it once! Another amazing plus for this is it £5.99, if I had the choice of spending my last £6 of the month on some Flat Iron Steak or this……. I may well pick this, let’s just hope that I never have to make that decision.
Work work has been hard going this week. Trying desperately to prove myself an effective manager to be able to pass my probation. Problem is, since having my first child I’ve not really had to turn my brain back on, I’ve been coasting, on autopilot if you will, so it was a bit of a shock to the system to realise that I can’t carry on like that if I want to succeed. I know I’m more than capable if I put my mind to it. Time to brush off my Personal Development Plan and put my big girl pants on.
Anyway, I’m off to eat a steak.
|
# Unix SMB/CIFS implementation. Tests for smb manipulation
# Copyright (C) David Mulder <dmulder@suse.com> 2018
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, grp, pwd
import errno
from samba import gpo, tests
from samba.gpclass import register_gp_extension, list_gp_extensions, \
unregister_gp_extension, GPOStorage
from samba.param import LoadParm
from samba.gpclass import check_refresh_gpo_list, check_safe_path, \
check_guid, parse_gpext_conf, atomic_write_conf, get_deleted_gpos_list
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile, TemporaryDirectory
from samba.gp_sec_ext import gp_krb_ext, gp_access_ext
from samba.gp_scripts_ext import gp_scripts_ext
from samba.gp_sudoers_ext import gp_sudoers_ext
from samba.vgp_sudoers_ext import vgp_sudoers_ext
from samba.vgp_symlink_ext import vgp_symlink_ext
from samba.gpclass import gp_inf_ext
from samba.gp_smb_conf_ext import gp_smb_conf_ext
from samba.vgp_files_ext import vgp_files_ext
from samba.vgp_openssh_ext import vgp_openssh_ext
from samba.vgp_startup_scripts_ext import vgp_startup_scripts_ext
from samba.vgp_motd_ext import vgp_motd_ext
from samba.vgp_issue_ext import vgp_issue_ext
from samba.vgp_access_ext import vgp_access_ext
from samba.gp_gnome_settings_ext import gp_gnome_settings_ext
import logging
from samba.credentials import Credentials
from samba.gp_msgs_ext import gp_msgs_ext
from samba.common import get_bytes
from samba.dcerpc import preg
from samba.ndr import ndr_pack
import codecs
from shutil import copyfile
import xml.etree.ElementTree as etree
import hashlib
from samba.gp_parse.gp_pol import GPPolParser
from glob import glob
from configparser import ConfigParser
realm = os.environ.get('REALM')
policies = realm + '/POLICIES'
realm = realm.lower()
poldir = r'\\{0}\sysvol\{0}\Policies'.format(realm)
# the first part of the base DN varies by testenv. Work it out from the realm
base_dn = 'DC={0},DC=samba,DC=example,DC=com'.format(realm.split('.')[0])
dspath = 'CN=Policies,CN=System,' + base_dn
gpt_data = '[General]\nVersion=%d'
gnome_test_reg_pol = \
b"""
<?xml version="1.0" encoding="utf-8"?>
<PolFile num_entries="26" signature="PReg" version="1">
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Lock Down Enabled Extensions</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Lock Down Specific Settings</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Disable Printing</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Disable File Saving</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Disable Command-Line Access</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Disallow Login Using a Fingerprint</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Disable User Logout</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Disable User Switching</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Disable Repartitioning</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Whitelisted Online Accounts</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Compose Key</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Dim Screen when User is Idle</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Enabled Extensions</ValueName>
<Value>1</Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Compose Key</Key>
<ValueName>Key Name</ValueName>
<Value>Right Alt</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings\Dim Screen when User is Idle</Key>
<ValueName>Delay</ValueName>
<Value>300</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings\Dim Screen when User is Idle</Key>
<ValueName>Dim Idle Brightness</ValueName>
<Value>30</Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Enabled Extensions</Key>
<ValueName>**delvals.</ValueName>
<Value> </Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Enabled Extensions</Key>
<ValueName>myextension1@myname.example.com</ValueName>
<Value>myextension1@myname.example.com</Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Enabled Extensions</Key>
<ValueName>myextension2@myname.example.com</ValueName>
<Value>myextension2@myname.example.com</Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Lock Down Specific Settings</Key>
<ValueName>**delvals.</ValueName>
<Value> </Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Lock Down Specific Settings</Key>
<ValueName>/org/gnome/desktop/background/picture-uri</ValueName>
<Value>/org/gnome/desktop/background/picture-uri</Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Lock Down Specific Settings</Key>
<ValueName>/org/gnome/desktop/background/picture-options</ValueName>
<Value>/org/gnome/desktop/background/picture-options</Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Lock Down Specific Settings</Key>
<ValueName>/org/gnome/desktop/background/primary-color</ValueName>
<Value>/org/gnome/desktop/background/primary-color</Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Lock Down Specific Settings</Key>
<ValueName>/org/gnome/desktop/background/secondary-color</ValueName>
<Value>/org/gnome/desktop/background/secondary-color</Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Whitelisted Online Accounts</Key>
<ValueName>**delvals.</ValueName>
<Value> </Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Whitelisted Online Accounts</Key>
<ValueName>google</ValueName>
<Value>google</Value>
</Entry>
</PolFile>
"""
def days2rel_nttime(val):
seconds = 60
minutes = 60
hours = 24
sam_add = 10000000
return -(val * seconds * minutes * hours * sam_add)
def gpupdate(lp, arg):
gpupdate = lp.get('gpo update command')
gpupdate.append(arg)
p = Popen(gpupdate, stdout=PIPE, stderr=PIPE)
stdoutdata, stderrdata = p.communicate()
return p.returncode
def gpupdate_force(lp):
return gpupdate(lp, '--force')
def gpupdate_unapply(lp):
return gpupdate(lp, '--unapply')
def rsop(lp):
return gpupdate(lp, '--rsop')
def stage_file(path, data):
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError as e:
if not (e.errno == errno.EEXIST and os.path.isdir(dirname)):
return False
if os.path.exists(path):
os.rename(path, '%s.bak' % path)
with NamedTemporaryFile(delete=False, dir=os.path.dirname(path)) as f:
f.write(get_bytes(data))
os.rename(f.name, path)
os.chmod(path, 0o644)
return True
def unstage_file(path):
backup = '%s.bak' % path
if os.path.exists(backup):
os.rename(backup, path)
elif os.path.exists(path):
os.remove(path)
class GPOTests(tests.TestCase):
def setUp(self):
super(GPOTests, self).setUp()
self.server = os.environ["SERVER"]
self.dc_account = self.server.upper() + '$'
self.lp = LoadParm()
self.lp.load_default()
self.creds = self.insta_creds(template=self.get_credentials())
def tearDown(self):
super(GPOTests, self).tearDown()
def test_gpo_list(self):
global poldir, dspath
ads = gpo.ADS_STRUCT(self.server, self.lp, self.creds)
if ads.connect():
gpos = ads.get_gpo_list(self.creds.get_username())
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
names = ['Local Policy', guid]
file_sys_paths = [None, '%s\\%s' % (poldir, guid)]
ds_paths = [None, 'CN=%s,%s' % (guid, dspath)]
for i in range(0, len(gpos)):
self.assertEqual(gpos[i].name, names[i],
'The gpo name did not match expected name %s' % gpos[i].name)
self.assertEqual(gpos[i].file_sys_path, file_sys_paths[i],
'file_sys_path did not match expected %s' % gpos[i].file_sys_path)
self.assertEqual(gpos[i].ds_path, ds_paths[i],
'ds_path did not match expected %s' % gpos[i].ds_path)
def test_gpo_ads_does_not_segfault(self):
try:
ads = gpo.ADS_STRUCT(self.server, 42, self.creds)
except:
pass
def test_gpt_version(self):
global gpt_data
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
gpo_path = os.path.join(local_path, policies, guid)
old_vers = gpo.gpo_get_sysvol_gpt_version(gpo_path)[1]
with open(os.path.join(gpo_path, 'GPT.INI'), 'w') as gpt:
gpt.write(gpt_data % 42)
self.assertEqual(gpo.gpo_get_sysvol_gpt_version(gpo_path)[1], 42,
'gpo_get_sysvol_gpt_version() did not return the expected version')
with open(os.path.join(gpo_path, 'GPT.INI'), 'w') as gpt:
gpt.write(gpt_data % old_vers)
self.assertEqual(gpo.gpo_get_sysvol_gpt_version(gpo_path)[1], old_vers,
'gpo_get_sysvol_gpt_version() did not return the expected version')
def test_check_refresh_gpo_list(self):
cache = self.lp.cache_path('gpo_cache')
ads = gpo.ADS_STRUCT(self.server, self.lp, self.creds)
if ads.connect():
gpos = ads.get_gpo_list(self.creds.get_username())
check_refresh_gpo_list(self.server, self.lp, self.creds, gpos)
self.assertTrue(os.path.exists(cache),
'GPO cache %s was not created' % cache)
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
gpt_ini = os.path.join(cache, policies,
guid, 'GPT.INI')
self.assertTrue(os.path.exists(gpt_ini),
'GPT.INI was not cached for %s' % guid)
def test_check_refresh_gpo_list_malicious_paths(self):
# the path cannot contain ..
path = '/usr/local/samba/var/locks/sysvol/../../../../../../root/'
self.assertRaises(OSError, check_safe_path, path)
self.assertEqual(check_safe_path('/etc/passwd'), 'etc/passwd')
self.assertEqual(check_safe_path('\\\\etc/\\passwd'), 'etc/passwd')
# there should be no backslashes used to delineate paths
before = 'sysvol/' + realm + '\\Policies/' \
'{31B2F340-016D-11D2-945F-00C04FB984F9}\\GPT.INI'
after = realm + '/Policies/' \
'{31B2F340-016D-11D2-945F-00C04FB984F9}/GPT.INI'
result = check_safe_path(before)
self.assertEqual(result, after, 'check_safe_path() didn\'t'
' correctly convert \\ to /')
def test_check_safe_path_typesafe_name(self):
path = '\\\\toady.suse.de\\SysVol\\toady.suse.de\\Policies\\' \
'{31B2F340-016D-11D2-945F-00C04FB984F9}\\GPT.INI'
expected_path = 'toady.suse.de/Policies/' \
'{31B2F340-016D-11D2-945F-00C04FB984F9}/GPT.INI'
result = check_safe_path(path)
self.assertEqual(result, expected_path,
'check_safe_path unable to detect variable case sysvol components')
def test_gpt_ext_register(self):
this_path = os.path.dirname(os.path.realpath(__file__))
samba_path = os.path.realpath(os.path.join(this_path, '../../../'))
ext_path = os.path.join(samba_path, 'python/samba/gp_sec_ext.py')
ext_guid = '{827D319E-6EAC-11D2-A4EA-00C04F79F83A}'
ret = register_gp_extension(ext_guid, 'gp_access_ext', ext_path,
smb_conf=self.lp.configfile,
machine=True, user=False)
self.assertTrue(ret, 'Failed to register a gp ext')
gp_exts = list_gp_extensions(self.lp.configfile)
self.assertTrue(ext_guid in gp_exts.keys(),
'Failed to list gp exts')
self.assertEqual(gp_exts[ext_guid]['DllName'], ext_path,
'Failed to list gp exts')
unregister_gp_extension(ext_guid)
gp_exts = list_gp_extensions(self.lp.configfile)
self.assertTrue(ext_guid not in gp_exts.keys(),
'Failed to unregister gp exts')
self.assertTrue(check_guid(ext_guid), 'Failed to parse valid guid')
self.assertFalse(check_guid('AAAAAABBBBBBBCCC'), 'Parsed invalid guid')
lp, parser = parse_gpext_conf(self.lp.configfile)
self.assertTrue(lp and parser, 'parse_gpext_conf() invalid return')
parser.add_section('test_section')
parser.set('test_section', 'test_var', ext_guid)
atomic_write_conf(lp, parser)
lp, parser = parse_gpext_conf(self.lp.configfile)
self.assertTrue('test_section' in parser.sections(),
'test_section not found in gpext.conf')
self.assertEqual(parser.get('test_section', 'test_var'), ext_guid,
'Failed to find test variable in gpext.conf')
parser.remove_section('test_section')
atomic_write_conf(lp, parser)
def test_gp_log_get_applied(self):
local_path = self.lp.get('path', 'sysvol')
guids = ['{31B2F340-016D-11D2-945F-00C04FB984F9}',
'{6AC1786C-016F-11D2-945F-00C04FB984F9}']
gpofile = '%s/' + realm + '/Policies/%s/MACHINE/Microsoft/' \
'Windows NT/SecEdit/GptTmpl.inf'
stage = '[System Access]\nMinimumPasswordAge = 998\n'
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
for guid in guids:
gpttmpl = gpofile % (local_path, guid)
ret = stage_file(gpttmpl, stage)
self.assertTrue(ret, 'Could not create the target %s' % gpttmpl)
ret = gpupdate_force(self.lp)
self.assertEqual(ret, 0, 'gpupdate force failed')
gp_db = store.get_gplog(self.dc_account)
applied_guids = gp_db.get_applied_guids()
self.assertEqual(len(applied_guids), 2, 'The guids were not found')
self.assertIn(guids[0], applied_guids,
'%s not in applied guids' % guids[0])
self.assertIn(guids[1], applied_guids,
'%s not in applied guids' % guids[1])
applied_settings = gp_db.get_applied_settings(applied_guids)
for policy in applied_settings:
self.assertIn('System Access', policy[1],
'System Access policies not set')
self.assertIn('minPwdAge', policy[1]['System Access'],
'minPwdAge policy not set')
if policy[0] == guids[0]:
self.assertEqual(int(policy[1]['System Access']['minPwdAge']),
days2rel_nttime(1),
'minPwdAge policy not set')
elif policy[0] == guids[1]:
self.assertEqual(int(policy[1]['System Access']['minPwdAge']),
days2rel_nttime(998),
'minPwdAge policy not set')
ads = gpo.ADS_STRUCT(self.server, self.lp, self.creds)
if ads.connect():
gpos = ads.get_gpo_list(self.dc_account)
del_gpos = get_deleted_gpos_list(gp_db, gpos[:-1])
self.assertEqual(len(del_gpos), 1, 'Returned delete gpos is incorrect')
self.assertEqual(guids[-1], del_gpos[0][0],
'GUID for delete gpo is incorrect')
self.assertIn('System Access', del_gpos[0][1],
'System Access policies not set for removal')
self.assertIn('minPwdAge', del_gpos[0][1]['System Access'],
'minPwdAge policy not set for removal')
for guid in guids:
gpttmpl = gpofile % (local_path, guid)
unstage_file(gpttmpl)
ret = gpupdate_unapply(self.lp)
self.assertEqual(ret, 0, 'gpupdate unapply failed')
def test_process_group_policy(self):
local_path = self.lp.cache_path('gpo_cache')
guids = ['{31B2F340-016D-11D2-945F-00C04FB984F9}',
'{6AC1786C-016F-11D2-945F-00C04FB984F9}']
gpofile = '%s/' + policies + '/%s/MACHINE/MICROSOFT/' \
'WINDOWS NT/SECEDIT/GPTTMPL.INF'
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = gp_krb_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
stage = '[Kerberos Policy]\nMaxTicketAge = %d\n'
opts = [100, 200]
for i in range(0, 2):
gpttmpl = gpofile % (local_path, guids[i])
ret = stage_file(gpttmpl, stage % opts[i])
self.assertTrue(ret, 'Could not create the target %s' % gpttmpl)
# Process all gpos
ext.process_group_policy([], gpos)
ret = store.get_int('kdc:user_ticket_lifetime')
self.assertEqual(ret, opts[1], 'Higher priority policy was not set')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [])
ret = store.get_int('kdc:user_ticket_lifetime')
self.assertEqual(ret, None, 'MaxTicketAge should not have applied')
# Process just the first gpo
ext.process_group_policy([], gpos[:-1])
ret = store.get_int('kdc:user_ticket_lifetime')
self.assertEqual(ret, opts[0], 'Lower priority policy was not set')
# Remove policy
ext.process_group_policy(del_gpos, [])
for guid in guids:
gpttmpl = gpofile % (local_path, guid)
unstage_file(gpttmpl)
def test_gp_scripts(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
reg_pol = os.path.join(local_path, policies, guid,
'MACHINE/REGISTRY.POL')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = gp_scripts_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
reg_key = b'Software\\Policies\\Samba\\Unix Settings'
sections = { b'%s\\Daily Scripts' % reg_key : '.cron.daily',
b'%s\\Monthly Scripts' % reg_key : '.cron.monthly',
b'%s\\Weekly Scripts' % reg_key : '.cron.weekly',
b'%s\\Hourly Scripts' % reg_key : '.cron.hourly' }
for keyname in sections.keys():
# Stage the Registry.pol file with test data
stage = preg.file()
e = preg.entry()
e.keyname = keyname
e.valuename = b'Software\\Policies\\Samba\\Unix Settings'
e.type = 1
e.data = b'echo hello world'
stage.num_entries = 1
stage.entries = [e]
ret = stage_file(reg_pol, ndr_pack(stage))
self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
# Process all gpos, with temp output directory
with TemporaryDirectory(sections[keyname]) as dname:
ext.process_group_policy([], gpos, dname)
scripts = os.listdir(dname)
self.assertEquals(len(scripts), 1,
'The %s script was not created' % keyname.decode())
out, _ = Popen([os.path.join(dname, scripts[0])], stdout=PIPE).communicate()
self.assertIn(b'hello world', out,
'%s script execution failed' % keyname.decode())
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [])
self.assertEquals(len(os.listdir(dname)), 0,
'Unapply failed to cleanup scripts')
# Unstage the Registry.pol file
unstage_file(reg_pol)
def test_gp_sudoers(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
reg_pol = os.path.join(local_path, policies, guid,
'MACHINE/REGISTRY.POL')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = gp_sudoers_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the Registry.pol file with test data
stage = preg.file()
e = preg.entry()
e.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Sudo Rights'
e.valuename = b'Software\\Policies\\Samba\\Unix Settings'
e.type = 1
e.data = b'fakeu ALL=(ALL) NOPASSWD: ALL'
stage.num_entries = 1
stage.entries = [e]
ret = stage_file(reg_pol, ndr_pack(stage))
self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
# Process all gpos, with temp output directory
with TemporaryDirectory() as dname:
ext.process_group_policy([], gpos, dname)
sudoers = os.listdir(dname)
self.assertEquals(len(sudoers), 1, 'The sudoer file was not created')
self.assertIn(e.data,
open(os.path.join(dname, sudoers[0]), 'r').read(),
'The sudoers entry was not applied')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [])
self.assertEquals(len(os.listdir(dname)), 0,
'Unapply failed to cleanup scripts')
# Unstage the Registry.pol file
unstage_file(reg_pol)
def test_vgp_sudoers(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
manifest = os.path.join(local_path, policies, guid, 'MACHINE',
'VGP/VTLA/SUDO/SUDOERSCONFIGURATION/MANIFEST.XML')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = vgp_sudoers_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the manifest.xml file with test data
stage = etree.Element('vgppolicy')
policysetting = etree.Element('policysetting')
stage.append(policysetting)
version = etree.Element('version')
version.text = '1'
policysetting.append(version)
data = etree.Element('data')
sudoers_entry = etree.Element('sudoers_entry')
command = etree.Element('command')
command.text = 'ALL'
sudoers_entry.append(command)
user = etree.Element('user')
user.text = 'ALL'
sudoers_entry.append(user)
principal_list = etree.Element('listelement')
principal = etree.Element('principal')
principal.text = 'fakeu'
principal.attrib['type'] = 'user'
group = etree.Element('principal')
group.text = 'fakeg'
group.attrib['type'] = 'group'
principal_list.append(principal)
principal_list.append(group)
sudoers_entry.append(principal_list)
data.append(sudoers_entry)
# Ensure an empty principal doesn't cause a crash
sudoers_entry = etree.SubElement(data, 'sudoers_entry')
command = etree.SubElement(sudoers_entry, 'command')
command.text = 'ALL'
user = etree.SubElement(sudoers_entry, 'user')
user.text = 'ALL'
# Ensure having dispersed principals still works
sudoers_entry = etree.SubElement(data, 'sudoers_entry')
command = etree.SubElement(sudoers_entry, 'command')
command.text = 'ALL'
user = etree.SubElement(sudoers_entry, 'user')
user.text = 'ALL'
listelement = etree.SubElement(sudoers_entry, 'listelement')
principal = etree.SubElement(listelement, 'principal')
principal.text = 'fakeu2'
principal.attrib['type'] = 'user'
listelement = etree.SubElement(sudoers_entry, 'listelement')
group = etree.SubElement(listelement, 'principal')
group.text = 'fakeg2'
group.attrib['type'] = 'group'
policysetting.append(data)
ret = stage_file(manifest, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % manifest)
# Process all gpos, with temp output directory
data = 'fakeu,fakeg% ALL=(ALL) NOPASSWD: ALL'
data2 = 'fakeu2,fakeg2% ALL=(ALL) NOPASSWD: ALL'
data_no_principal = 'ALL ALL=(ALL) NOPASSWD: ALL'
with TemporaryDirectory() as dname:
ext.process_group_policy([], gpos, dname)
sudoers = os.listdir(dname)
self.assertEquals(len(sudoers), 3, 'The sudoer file was not created')
output = open(os.path.join(dname, sudoers[0]), 'r').read() + \
open(os.path.join(dname, sudoers[1]), 'r').read() + \
open(os.path.join(dname, sudoers[2]), 'r').read()
self.assertIn(data, output,
'The sudoers entry was not applied')
self.assertIn(data2, output,
'The sudoers entry was not applied')
self.assertIn(data_no_principal, output,
'The sudoers entry was not applied')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [])
self.assertEquals(len(os.listdir(dname)), 0,
'Unapply failed to cleanup scripts')
# Unstage the Registry.pol file
unstage_file(manifest)
def test_gp_inf_ext_utf(self):
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
ext = gp_inf_ext(logger, self.lp, machine_creds, store)
test_data = '[Kerberos Policy]\nMaxTicketAge = 99\n'
with NamedTemporaryFile() as f:
with codecs.open(f.name, 'w', 'utf-16') as w:
w.write(test_data)
try:
inf_conf = ext.read(f.name)
except UnicodeDecodeError:
self.fail('Failed to parse utf-16')
self.assertIn('Kerberos Policy', inf_conf.keys(),
'Kerberos Policy was not read from the file')
self.assertEquals(inf_conf.get('Kerberos Policy', 'MaxTicketAge'),
'99', 'MaxTicketAge was not read from the file')
with NamedTemporaryFile() as f:
with codecs.open(f.name, 'w', 'utf-8') as w:
w.write(test_data)
inf_conf = ext.read(f.name)
self.assertIn('Kerberos Policy', inf_conf.keys(),
'Kerberos Policy was not read from the file')
self.assertEquals(inf_conf.get('Kerberos Policy', 'MaxTicketAge'),
'99', 'MaxTicketAge was not read from the file')
def test_rsop(self):
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
local_path = self.lp.cache_path('gpo_cache')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
gp_extensions = []
gp_extensions.append(gp_krb_ext)
gp_extensions.append(gp_scripts_ext)
gp_extensions.append(gp_sudoers_ext)
gp_extensions.append(gp_smb_conf_ext)
gp_extensions.append(gp_msgs_ext)
# Create registry stage data
reg_pol = os.path.join(local_path, policies, '%s/MACHINE/REGISTRY.POL')
reg_stage = preg.file()
e = preg.entry()
e.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Daily Scripts'
e.valuename = b'Software\\Policies\\Samba\\Unix Settings'
e.type = 1
e.data = b'echo hello world'
e2 = preg.entry()
e2.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Sudo Rights'
e2.valuename = b'Software\\Policies\\Samba\\Unix Settings'
e2.type = 1
e2.data = b'fakeu ALL=(ALL) NOPASSWD: ALL'
e3 = preg.entry()
e3.keyname = 'Software\\Policies\\Samba\\smb_conf\\apply group policies'
e3.type = 4
e3.data = 1
e3.valuename = 'apply group policies'
e4 = preg.entry()
e4.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Messages'
e4.valuename = b'issue'
e4.type = 1
e4.data = b'Welcome to \\s \\r \\l'
reg_stage.num_entries = 4
reg_stage.entries = [e, e2, e3, e4]
# Create krb stage date
gpofile = os.path.join(local_path, policies, '%s/MACHINE/MICROSOFT/' \
'WINDOWS NT/SECEDIT/GPTTMPL.INF')
krb_stage = '[Kerberos Policy]\nMaxTicketAge = 99\n' \
'[System Access]\nMinimumPasswordAge = 998\n'
for g in [g for g in gpos if g.file_sys_path]:
ret = stage_file(gpofile % g.name, krb_stage)
self.assertTrue(ret, 'Could not create the target %s' %
(gpofile % g.name))
ret = stage_file(reg_pol % g.name, ndr_pack(reg_stage))
self.assertTrue(ret, 'Could not create the target %s' %
(reg_pol % g.name))
for ext in gp_extensions:
ext = ext(logger, self.lp, machine_creds, store)
ret = ext.rsop(g)
self.assertEquals(len(ret.keys()), 1,
'A single policy should have been displayed')
# Check the Security Extension
if type(ext) == gp_krb_ext:
self.assertIn('Kerberos Policy', ret.keys(),
'Kerberos Policy not found')
self.assertIn('MaxTicketAge', ret['Kerberos Policy'],
'MaxTicketAge setting not found')
self.assertEquals(ret['Kerberos Policy']['MaxTicketAge'], '99',
'MaxTicketAge was not set to 99')
# Check the Scripts Extension
elif type(ext) == gp_scripts_ext:
self.assertIn('Daily Scripts', ret.keys(),
'Daily Scripts not found')
self.assertIn('echo hello world', ret['Daily Scripts'],
'Daily script was not created')
# Check the Sudoers Extension
elif type(ext) == gp_sudoers_ext:
self.assertIn('Sudo Rights', ret.keys(),
'Sudoers not found')
self.assertIn('fakeu ALL=(ALL) NOPASSWD: ALL',
ret['Sudo Rights'],
'Sudoers policy not created')
# Check the smb.conf Extension
elif type(ext) == gp_smb_conf_ext:
self.assertIn('smb.conf', ret.keys(),
'apply group policies was not applied')
self.assertIn(e3.valuename, ret['smb.conf'],
'apply group policies was not applied')
self.assertEquals(ret['smb.conf'][e3.valuename], e3.data,
'apply group policies was not set')
# Check the Messages Extension
elif type(ext) == gp_msgs_ext:
self.assertIn('/etc/issue', ret,
'Login Prompt Message not applied')
self.assertEquals(ret['/etc/issue'], e4.data,
'Login Prompt Message not set')
unstage_file(gpofile % g.name)
unstage_file(reg_pol % g.name)
# Check that a call to gpupdate --rsop also succeeds
ret = rsop(self.lp)
self.assertEquals(ret, 0, 'gpupdate --rsop failed!')
def test_gp_unapply(self):
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
gp_extensions = []
gp_extensions.append(gp_krb_ext)
gp_extensions.append(gp_scripts_ext)
gp_extensions.append(gp_sudoers_ext)
# Create registry stage data
reg_pol = os.path.join(local_path, policies, '%s/MACHINE/REGISTRY.POL')
reg_stage = preg.file()
e = preg.entry()
e.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Daily Scripts'
e.valuename = b'Software\\Policies\\Samba\\Unix Settings'
e.type = 1
e.data = b'echo hello world'
e2 = preg.entry()
e2.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Sudo Rights'
e2.valuename = b'Software\\Policies\\Samba\\Unix Settings'
e2.type = 1
e2.data = b'fakeu ALL=(ALL) NOPASSWD: ALL'
reg_stage.num_entries = 2
reg_stage.entries = [e, e2]
# Create krb stage date
gpofile = os.path.join(local_path, policies, '%s/MACHINE/MICROSOFT/' \
'WINDOWS NT/SECEDIT/GPTTMPL.INF')
krb_stage = '[Kerberos Policy]\nMaxTicketAge = 99\n'
ret = stage_file(gpofile % guid, krb_stage)
self.assertTrue(ret, 'Could not create the target %s' %
(gpofile % guid))
ret = stage_file(reg_pol % guid, ndr_pack(reg_stage))
self.assertTrue(ret, 'Could not create the target %s' %
(reg_pol % guid))
# Process all gpos, with temp output directory
remove = []
with TemporaryDirectory() as dname:
for ext in gp_extensions:
ext = ext(logger, self.lp, machine_creds, store)
if type(ext) == gp_krb_ext:
ext.process_group_policy([], gpos)
ret = store.get_int('kdc:user_ticket_lifetime')
self.assertEqual(ret, 99, 'Kerberos policy was not set')
elif type(ext) in [gp_scripts_ext, gp_sudoers_ext]:
ext.process_group_policy([], gpos, dname)
gp_db = store.get_gplog(machine_creds.get_username())
applied_settings = gp_db.get_applied_settings([guid])
for _, fname in applied_settings[-1][-1][str(ext)].items():
self.assertIn(dname, fname,
'Test file not created in tmp dir')
self.assertTrue(os.path.exists(fname),
'Test file not created')
remove.append(fname)
# Unapply policy, and ensure policies are removed
gpupdate_unapply(self.lp)
for fname in remove:
self.assertFalse(os.path.exists(fname),
'Unapply did not remove test file')
ret = store.get_int('kdc:user_ticket_lifetime')
self.assertNotEqual(ret, 99, 'Kerberos policy was not unapplied')
unstage_file(gpofile % guid)
unstage_file(reg_pol % guid)
def test_smb_conf_ext(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
reg_pol = os.path.join(local_path, policies, guid,
'MACHINE/REGISTRY.POL')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
entries = []
e = preg.entry()
e.keyname = 'Software\\Policies\\Samba\\smb_conf\\template homedir'
e.type = 1
e.data = '/home/samba/%D/%U'
e.valuename = 'template homedir'
entries.append(e)
e = preg.entry()
e.keyname = 'Software\\Policies\\Samba\\smb_conf\\apply group policies'
e.type = 4
e.data = 1
e.valuename = 'apply group policies'
entries.append(e)
e = preg.entry()
e.keyname = 'Software\\Policies\\Samba\\smb_conf\\ldap timeout'
e.type = 4
e.data = 9999
e.valuename = 'ldap timeout'
entries.append(e)
stage = preg.file()
stage.num_entries = len(entries)
stage.entries = entries
ret = stage_file(reg_pol, ndr_pack(stage))
self.assertTrue(ret, 'Failed to create the Registry.pol file')
with NamedTemporaryFile(suffix='_smb.conf') as f:
copyfile(self.lp.configfile, f.name)
lp = LoadParm(f.name)
# Initialize the group policy extension
ext = gp_smb_conf_ext(logger, lp, machine_creds, store)
ext.process_group_policy([], gpos)
lp = LoadParm(f.name)
template_homedir = lp.get('template homedir')
self.assertEquals(template_homedir, '/home/samba/%D/%U',
'template homedir was not applied')
apply_group_policies = lp.get('apply group policies')
self.assertTrue(apply_group_policies,
'apply group policies was not applied')
ldap_timeout = lp.get('ldap timeout')
self.assertEquals(ldap_timeout, 9999, 'ldap timeout was not applied')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [])
lp = LoadParm(f.name)
template_homedir = lp.get('template homedir')
self.assertEquals(template_homedir, self.lp.get('template homedir'),
'template homedir was not unapplied')
apply_group_policies = lp.get('apply group policies')
self.assertEquals(apply_group_policies, self.lp.get('apply group policies'),
'apply group policies was not unapplied')
ldap_timeout = lp.get('ldap timeout')
self.assertEquals(ldap_timeout, self.lp.get('ldap timeout'),
'ldap timeout was not unapplied')
# Unstage the Registry.pol file
unstage_file(reg_pol)
def test_gp_motd(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
reg_pol = os.path.join(local_path, policies, guid,
'MACHINE/REGISTRY.POL')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = gp_msgs_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the Registry.pol file with test data
stage = preg.file()
e1 = preg.entry()
e1.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Messages'
e1.valuename = b'motd'
e1.type = 1
e1.data = b'Have a lot of fun!'
stage.num_entries = 2
e2 = preg.entry()
e2.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Messages'
e2.valuename = b'issue'
e2.type = 1
e2.data = b'Welcome to \\s \\r \\l'
stage.entries = [e1, e2]
ret = stage_file(reg_pol, ndr_pack(stage))
self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
# Process all gpos, with temp output directory
with TemporaryDirectory() as dname:
ext.process_group_policy([], gpos, dname)
motd_file = os.path.join(dname, 'motd')
self.assertTrue(os.path.exists(motd_file),
'Message of the day file not created')
data = open(motd_file, 'r').read()
self.assertEquals(data, e1.data, 'Message of the day not applied')
issue_file = os.path.join(dname, 'issue')
self.assertTrue(os.path.exists(issue_file),
'Login Prompt Message file not created')
data = open(issue_file, 'r').read()
self.assertEquals(data, e2.data, 'Login Prompt Message not applied')
# Unapply policy, and ensure the test files are removed
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [], dname)
data = open(motd_file, 'r').read()
self.assertFalse(data, 'Message of the day file not removed')
data = open(issue_file, 'r').read()
self.assertFalse(data, 'Login Prompt Message file not removed')
# Unstage the Registry.pol file
unstage_file(reg_pol)
def test_vgp_symlink(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
manifest = os.path.join(local_path, policies, guid, 'MACHINE',
'VGP/VTLA/UNIX/SYMLINK/MANIFEST.XML')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = vgp_symlink_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
with TemporaryDirectory() as dname:
test_source = os.path.join(dname, 'test.source')
test_target = os.path.join(dname, 'test.target')
# Stage the manifest.xml file with test data
stage = etree.Element('vgppolicy')
policysetting = etree.Element('policysetting')
stage.append(policysetting)
version = etree.Element('version')
version.text = '1'
policysetting.append(version)
data = etree.Element('data')
file_properties = etree.Element('file_properties')
source = etree.Element('source')
source.text = test_source
file_properties.append(source)
target = etree.Element('target')
target.text = test_target
file_properties.append(target)
data.append(file_properties)
policysetting.append(data)
ret = stage_file(manifest, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % manifest)
# Create test source
test_source_data = 'hello world!'
with open(test_source, 'w') as w:
w.write(test_source_data)
# Process all gpos, with temp output directory
ext.process_group_policy([], gpos)
self.assertTrue(os.path.exists(test_target),
'The test symlink was not created')
self.assertTrue(os.path.islink(test_target),
'The test file is not a symlink')
self.assertIn(test_source_data, open(test_target, 'r').read(),
'Reading from symlink does not produce source data')
# Unapply the policy, ensure removal
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [])
self.assertFalse(os.path.exists(test_target),
'The test symlink was not delete')
# Verify RSOP
ret = ext.rsop([g for g in gpos if g.name == guid][0])
self.assertIn('ln -s %s %s' % (test_source, test_target),
list(ret.values())[0])
# Unstage the manifest.xml file
unstage_file(manifest)
def test_vgp_files(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
manifest = os.path.join(local_path, policies, guid, 'MACHINE',
'VGP/VTLA/UNIX/FILES/MANIFEST.XML')
source_file = os.path.join(os.path.dirname(manifest), 'TEST.SOURCE')
source_data = '#!/bin/sh\necho hello world'
ret = stage_file(source_file, source_data)
self.assertTrue(ret, 'Could not create the target %s' % source_file)
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = vgp_files_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the manifest.xml file with test data
with TemporaryDirectory() as dname:
stage = etree.Element('vgppolicy')
policysetting = etree.Element('policysetting')
stage.append(policysetting)
version = etree.Element('version')
version.text = '1'
policysetting.append(version)
data = etree.Element('data')
file_properties = etree.SubElement(data, 'file_properties')
source = etree.SubElement(file_properties, 'source')
source.text = os.path.basename(source_file).lower()
target = etree.SubElement(file_properties, 'target')
target.text = os.path.join(dname, 'test.target')
user = etree.SubElement(file_properties, 'user')
user.text = pwd.getpwuid(os.getuid()).pw_name
group = etree.SubElement(file_properties, 'group')
group.text = grp.getgrgid(os.getgid()).gr_name
# Request permissions of 755
permissions = etree.SubElement(file_properties, 'permissions')
permissions.set('type', 'user')
etree.SubElement(permissions, 'read')
etree.SubElement(permissions, 'write')
etree.SubElement(permissions, 'execute')
permissions = etree.SubElement(file_properties, 'permissions')
permissions.set('type', 'group')
etree.SubElement(permissions, 'read')
etree.SubElement(permissions, 'execute')
permissions = etree.SubElement(file_properties, 'permissions')
permissions.set('type', 'other')
etree.SubElement(permissions, 'read')
etree.SubElement(permissions, 'execute')
policysetting.append(data)
ret = stage_file(manifest, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % manifest)
# Process all gpos, with temp output directory
ext.process_group_policy([], gpos)
self.assertTrue(os.path.exists(target.text),
'The target file does not exist')
self.assertEquals(os.stat(target.text).st_mode & 0o777, 0o755,
'The target file permissions are incorrect')
self.assertEquals(open(target.text).read(), source_data,
'The target file contents are incorrect')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [])
self.assertFalse(os.path.exists(target.text),
'The target file was not removed')
# Test rsop
g = [g for g in gpos if g.name == guid][0]
ret = ext.rsop(g)
self.assertIn(target.text, list(ret.values())[0][0],
'The target file was not listed by rsop')
self.assertIn('-rwxr-xr-x', list(ret.values())[0][0],
'The target permissions were not listed by rsop')
# Unstage the manifest and source files
unstage_file(manifest)
unstage_file(source_file)
def test_vgp_openssh(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
manifest = os.path.join(local_path, policies, guid, 'MACHINE',
'VGP/VTLA/SSHCFG/SSHD/MANIFEST.XML')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = vgp_openssh_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the manifest.xml file with test data
stage = etree.Element('vgppolicy')
policysetting = etree.Element('policysetting')
stage.append(policysetting)
version = etree.Element('version')
version.text = '1'
policysetting.append(version)
data = etree.Element('data')
configfile = etree.Element('configfile')
configsection = etree.Element('configsection')
sectionname = etree.Element('sectionname')
configsection.append(sectionname)
kvpair = etree.Element('keyvaluepair')
key = etree.Element('key')
key.text = 'AddressFamily'
kvpair.append(key)
value = etree.Element('value')
value.text = 'inet6'
kvpair.append(value)
configsection.append(kvpair)
configfile.append(configsection)
data.append(configfile)
policysetting.append(data)
ret = stage_file(manifest, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % manifest)
# Process all gpos, with temp output directory
data = 'AddressFamily inet6'
with TemporaryDirectory() as dname:
ext.process_group_policy([], gpos, dname)
conf = os.listdir(dname)
self.assertEquals(len(conf), 1, 'The conf file was not created')
gp_cfg = os.path.join(dname, conf[0])
self.assertIn(data, open(gp_cfg, 'r').read(),
'The sshd_config entry was not applied')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [], dname)
self.assertFalse(os.path.exists(gp_cfg),
'Unapply failed to cleanup config')
# Unstage the Registry.pol file
unstage_file(manifest)
def test_vgp_startup_scripts(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
manifest = os.path.join(local_path, policies, guid, 'MACHINE',
'VGP/VTLA/UNIX/SCRIPTS/STARTUP/MANIFEST.XML')
test_script = os.path.join(os.path.dirname(manifest), 'TEST.SH')
test_data = '#!/bin/sh\necho $@ hello world'
ret = stage_file(test_script, test_data)
self.assertTrue(ret, 'Could not create the target %s' % test_script)
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = vgp_startup_scripts_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the manifest.xml file with test data
stage = etree.Element('vgppolicy')
policysetting = etree.SubElement(stage, 'policysetting')
version = etree.SubElement(policysetting, 'version')
version.text = '1'
data = etree.SubElement(policysetting, 'data')
listelement = etree.SubElement(data, 'listelement')
script = etree.SubElement(listelement, 'script')
script.text = os.path.basename(test_script).lower()
parameters = etree.SubElement(listelement, 'parameters')
parameters.text = '-n'
hash = etree.SubElement(listelement, 'hash')
hash.text = \
hashlib.md5(open(test_script, 'rb').read()).hexdigest().upper()
run_as = etree.SubElement(listelement, 'run_as')
run_as.text = 'root'
ret = stage_file(manifest, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % manifest)
# Process all gpos, with temp output directory
with TemporaryDirectory() as dname:
ext.process_group_policy([], gpos, dname)
files = os.listdir(dname)
self.assertEquals(len(files), 1,
'The target script was not created')
entry = '@reboot %s %s %s' % (run_as.text, test_script,
parameters.text)
self.assertIn(entry,
open(os.path.join(dname, files[0]), 'r').read(),
'The test entry was not found')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [])
files = os.listdir(dname)
self.assertEquals(len(files), 0,
'The target script was not removed')
# Test rsop
g = [g for g in gpos if g.name == guid][0]
ret = ext.rsop(g)
self.assertIn(entry, list(ret.values())[0][0],
'The target entry was not listed by rsop')
# Unstage the manifest.xml and script files
unstage_file(manifest)
unstage_file(test_script)
# Stage the manifest.xml file for run once scripts
etree.SubElement(listelement, 'run_once')
run_as.text = pwd.getpwuid(os.getuid()).pw_name
ret = stage_file(manifest, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % manifest)
# Process all gpos, with temp output directory
# A run once script will be executed immediately,
# instead of creating a cron job
with TemporaryDirectory() as dname:
test_file = '%s/TESTING.txt' % dname
test_data = '#!/bin/sh\ntouch %s' % test_file
ret = stage_file(test_script, test_data)
self.assertTrue(ret, 'Could not create the target %s' % test_script)
ext.process_group_policy([], gpos, dname)
files = os.listdir(dname)
self.assertEquals(len(files), 1,
'The test file was not created')
self.assertEquals(files[0], os.path.basename(test_file),
'The test file was not created')
# Unlink the test file and ensure that processing
# policy again does not recreate it.
os.unlink(test_file)
ext.process_group_policy([], gpos, dname)
files = os.listdir(dname)
self.assertEquals(len(files), 0,
'The test file should not have been created')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [])
# Test rsop
entry = 'Run once as: %s `%s %s`' % (run_as.text, test_script,
parameters.text)
g = [g for g in gpos if g.name == guid][0]
ret = ext.rsop(g)
self.assertIn(entry, list(ret.values())[0][0],
'The target entry was not listed by rsop')
# Unstage the manifest.xml and script files
unstage_file(manifest)
unstage_file(test_script)
def test_vgp_motd(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
manifest = os.path.join(local_path, policies, guid, 'MACHINE',
'VGP/VTLA/UNIX/MOTD/MANIFEST.XML')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = vgp_motd_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the manifest.xml file with test data
stage = etree.Element('vgppolicy')
policysetting = etree.SubElement(stage, 'policysetting')
version = etree.SubElement(policysetting, 'version')
version.text = '1'
data = etree.SubElement(policysetting, 'data')
filename = etree.SubElement(data, 'filename')
filename.text = 'motd'
text = etree.SubElement(data, 'text')
text.text = 'This is the message of the day'
ret = stage_file(manifest, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % manifest)
# Process all gpos, with temp output directory
with NamedTemporaryFile() as f:
ext.process_group_policy([], gpos, f.name)
self.assertEquals(open(f.name, 'r').read(), text.text,
'The motd was not applied')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [], f.name)
self.assertNotEquals(open(f.name, 'r').read(), text.text,
'The motd was not unapplied')
# Unstage the Registry.pol file
unstage_file(manifest)
def test_vgp_issue(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
manifest = os.path.join(local_path, policies, guid, 'MACHINE',
'VGP/VTLA/UNIX/ISSUE/MANIFEST.XML')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = vgp_issue_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the manifest.xml file with test data
stage = etree.Element('vgppolicy')
policysetting = etree.SubElement(stage, 'policysetting')
version = etree.SubElement(policysetting, 'version')
version.text = '1'
data = etree.SubElement(policysetting, 'data')
filename = etree.SubElement(data, 'filename')
filename.text = 'issue'
text = etree.SubElement(data, 'text')
text.text = 'Welcome to Samba!'
ret = stage_file(manifest, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % manifest)
# Process all gpos, with temp output directory
with NamedTemporaryFile() as f:
ext.process_group_policy([], gpos, f.name)
self.assertEquals(open(f.name, 'r').read(), text.text,
'The issue was not applied')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [], f.name)
self.assertNotEquals(open(f.name, 'r').read(), text.text,
'The issue was not unapplied')
# Unstage the manifest.xml file
unstage_file(manifest)
def test_vgp_access(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
allow = os.path.join(local_path, policies, guid, 'MACHINE',
'VGP/VTLA/VAS/HOSTACCESSCONTROL/ALLOW/MANIFEST.XML')
deny = os.path.join(local_path, policies, guid, 'MACHINE',
'VGP/VTLA/VAS/HOSTACCESSCONTROL/DENY/MANIFEST.XML')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = vgp_access_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the manifest.xml allow file
stage = etree.Element('vgppolicy')
policysetting = etree.SubElement(stage, 'policysetting')
version = etree.SubElement(policysetting, 'version')
version.text = '2'
apply_mode = etree.SubElement(policysetting, 'apply_mode')
apply_mode.text = 'merge'
data = etree.SubElement(policysetting, 'data')
# Add an allowed user
listelement = etree.SubElement(data, 'listelement')
otype = etree.SubElement(listelement, 'type')
otype.text = 'USER'
entry = etree.SubElement(listelement, 'entry')
entry.text = 'goodguy@%s' % realm
adobject = etree.SubElement(listelement, 'adobject')
name = etree.SubElement(adobject, 'name')
name.text = 'goodguy'
domain = etree.SubElement(adobject, 'domain')
domain.text = realm
otype = etree.SubElement(adobject, 'type')
otype.text = 'user'
# Add an allowed group
groupattr = etree.SubElement(data, 'groupattr')
groupattr.text = 'samAccountName'
listelement = etree.SubElement(data, 'listelement')
otype = etree.SubElement(listelement, 'type')
otype.text = 'GROUP'
entry = etree.SubElement(listelement, 'entry')
entry.text = '%s\\goodguys' % realm
dn = etree.SubElement(listelement, 'dn')
dn.text = 'CN=goodguys,CN=Users,%s' % base_dn
adobject = etree.SubElement(listelement, 'adobject')
name = etree.SubElement(adobject, 'name')
name.text = 'goodguys'
domain = etree.SubElement(adobject, 'domain')
domain.text = realm
otype = etree.SubElement(adobject, 'type')
otype.text = 'group'
ret = stage_file(allow, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % allow)
# Stage the manifest.xml deny file
stage = etree.Element('vgppolicy')
policysetting = etree.SubElement(stage, 'policysetting')
version = etree.SubElement(policysetting, 'version')
version.text = '2'
apply_mode = etree.SubElement(policysetting, 'apply_mode')
apply_mode.text = 'merge'
data = etree.SubElement(policysetting, 'data')
# Add a denied user
listelement = etree.SubElement(data, 'listelement')
otype = etree.SubElement(listelement, 'type')
otype.text = 'USER'
entry = etree.SubElement(listelement, 'entry')
entry.text = 'badguy@%s' % realm
adobject = etree.SubElement(listelement, 'adobject')
name = etree.SubElement(adobject, 'name')
name.text = 'badguy'
domain = etree.SubElement(adobject, 'domain')
domain.text = realm
otype = etree.SubElement(adobject, 'type')
otype.text = 'user'
# Add a denied group
groupattr = etree.SubElement(data, 'groupattr')
groupattr.text = 'samAccountName'
listelement = etree.SubElement(data, 'listelement')
otype = etree.SubElement(listelement, 'type')
otype.text = 'GROUP'
entry = etree.SubElement(listelement, 'entry')
entry.text = '%s\\badguys' % realm
dn = etree.SubElement(listelement, 'dn')
dn.text = 'CN=badguys,CN=Users,%s' % base_dn
adobject = etree.SubElement(listelement, 'adobject')
name = etree.SubElement(adobject, 'name')
name.text = 'badguys'
domain = etree.SubElement(adobject, 'domain')
domain.text = realm
otype = etree.SubElement(adobject, 'type')
otype.text = 'group'
ret = stage_file(deny, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % deny)
# Process all gpos, with temp output directory
with TemporaryDirectory() as dname:
ext.process_group_policy([], gpos, dname)
conf = os.listdir(dname)
self.assertEquals(len(conf), 1, 'The conf file was not created')
gp_cfg = os.path.join(dname, conf[0])
# Check the access config for the correct access.conf entries
print('Config file %s found' % gp_cfg)
data = open(gp_cfg, 'r').read()
self.assertIn('+:%s\\goodguy:ALL' % realm, data)
self.assertIn('+:%s\\goodguys:ALL' % realm, data)
self.assertIn('-:%s\\badguy:ALL' % realm, data)
self.assertIn('-:%s\\badguys:ALL' % realm, data)
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [], dname)
self.assertFalse(os.path.exists(gp_cfg),
'Unapply failed to cleanup config')
# Unstage the manifest.pol files
unstage_file(allow)
unstage_file(deny)
def test_gnome_settings(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
reg_pol = os.path.join(local_path, policies, guid,
'MACHINE/REGISTRY.POL')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = gp_gnome_settings_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the Registry.pol file with test data
parser = GPPolParser()
parser.load_xml(etree.fromstring(gnome_test_reg_pol.strip()))
ret = stage_file(reg_pol, ndr_pack(parser.pol_file))
self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
with TemporaryDirectory() as dname:
ext.process_group_policy([], gpos, dname)
local_db = os.path.join(dname, 'etc/dconf/db/local.d')
self.assertTrue(os.path.isdir(local_db),
'Local db dir not created')
def db_check(name, data, count=1):
db = glob(os.path.join(local_db, '*-%s' % name))
self.assertEquals(len(db), count, '%s not created' % name)
file_contents = ConfigParser()
file_contents.read(db)
for key in data.keys():
self.assertTrue(file_contents.has_section(key),
'Section %s not found' % key)
options = data[key]
for k, v in options.items():
v_content = file_contents.get(key, k)
self.assertEqual(v_content, v,
'%s: %s != %s' % (key, v_content, v))
def del_db_check(name):
db = glob(os.path.join(local_db, '*-%s' % name))
self.assertEquals(len(db), 0, '%s not deleted' % name)
locks = os.path.join(local_db, 'locks')
self.assertTrue(os.path.isdir(local_db), 'Locks dir not created')
def lock_check(name, items, count=1):
lock = glob(os.path.join(locks, '*%s' % name))
self.assertEquals(len(lock), count,
'%s lock not created' % name)
file_contents = []
for i in range(count):
file_contents.extend(open(lock[i], 'r').read().split('\n'))
for data in items:
self.assertIn(data, file_contents,
'%s lock not created' % data)
def del_lock_check(name):
lock = glob(os.path.join(locks, '*%s' % name))
self.assertEquals(len(lock), 0, '%s lock not deleted' % name)
# Check the user profile
user_profile = os.path.join(dname, 'etc/dconf/profile/user')
self.assertTrue(os.path.exists(user_profile),
'User profile not created')
# Enable the compose key
data = { 'org/gnome/desktop/input-sources':
{ 'xkb-options': '[\'compose:ralt\']' }
}
db_check('input-sources', data)
items = ['/org/gnome/desktop/input-sources/xkb-options']
lock_check('input-sources', items)
# Dim screen when user is idle
data = { 'org/gnome/settings-daemon/plugins/power':
{ 'idle-dim': 'true',
'idle-brightness': '30'
}
}
db_check('power', data)
data = { 'org/gnome/desktop/session':
{ 'idle-delay': 'uint32 300' }
}
db_check('session', data)
items = ['/org/gnome/settings-daemon/plugins/power/idle-dim',
'/org/gnome/settings-daemon/plugins/power/idle-brightness',
'/org/gnome/desktop/session/idle-delay']
lock_check('power-saving', items)
# Lock down specific settings
bg_locks = ['/org/gnome/desktop/background/picture-uri',
'/org/gnome/desktop/background/picture-options',
'/org/gnome/desktop/background/primary-color',
'/org/gnome/desktop/background/secondary-color']
lock_check('group-policy', bg_locks)
# Lock down enabled extensions
data = { 'org/gnome/shell':
{ 'enabled-extensions':
'[\'myextension1@myname.example.com\', \'myextension2@myname.example.com\']',
'development-tools': 'false' }
}
db_check('extensions', data)
items = [ '/org/gnome/shell/enabled-extensions',
'/org/gnome/shell/development-tools' ]
lock_check('extensions', items)
# Disallow login using a fingerprint
data = { 'org/gnome/login-screen':
{ 'enable-fingerprint-authentication': 'false' }
}
db_check('fingerprintreader', data)
items = ['/org/gnome/login-screen/enable-fingerprint-authentication']
lock_check('fingerprintreader', items)
# Disable user logout and user switching
data = { 'org/gnome/desktop/lockdown':
{ 'disable-log-out': 'true',
'disable-user-switching': 'true' }
}
db_check('logout', data, 2)
items = ['/org/gnome/desktop/lockdown/disable-log-out',
'/org/gnome/desktop/lockdown/disable-user-switching']
lock_check('logout', items, 2)
# Disable repartitioning
actions = os.path.join(dname, 'etc/share/polkit-1/actions')
udisk2 = glob(os.path.join(actions,
'org.freedesktop.[u|U][d|D]isks2.policy'))
self.assertEquals(len(udisk2), 1, 'udisk2 policy not created')
udisk2_tree = etree.fromstring(open(udisk2[0], 'r').read())
actions = udisk2_tree.findall('action')
md = 'org.freedesktop.udisks2.modify-device'
action = [a for a in actions if a.attrib['id'] == md]
self.assertEquals(len(action), 1, 'modify-device not found')
defaults = action[0].find('defaults')
self.assertTrue(defaults is not None,
'modify-device defaults not found')
allow_any = defaults.find('allow_any').text
self.assertEquals(allow_any, 'no',
'modify-device allow_any not set to no')
allow_inactive = defaults.find('allow_inactive').text
self.assertEquals(allow_inactive, 'no',
'modify-device allow_inactive not set to no')
allow_active = defaults.find('allow_active').text
self.assertEquals(allow_active, 'yes',
'modify-device allow_active not set to yes')
# Disable printing
data = { 'org/gnome/desktop/lockdown':
{ 'disable-printing': 'true' }
}
db_check('printing', data)
items = ['/org/gnome/desktop/lockdown/disable-printing']
lock_check('printing', items)
# Disable file saving
data = { 'org/gnome/desktop/lockdown':
{ 'disable-save-to-disk': 'true' }
}
db_check('filesaving', data)
items = ['/org/gnome/desktop/lockdown/disable-save-to-disk']
lock_check('filesaving', items)
# Disable command-line access
data = { 'org/gnome/desktop/lockdown':
{ 'disable-command-line': 'true' }
}
db_check('cmdline', data)
items = ['/org/gnome/desktop/lockdown/disable-command-line']
lock_check('cmdline', items)
# Allow or disallow online accounts
data = { 'org/gnome/online-accounts':
{ 'whitelisted-providers': '[\'google\']' }
}
db_check('goa', data)
items = ['/org/gnome/online-accounts/whitelisted-providers']
lock_check('goa', items)
# Verify RSOP does not fail
ext.rsop([g for g in gpos if g.name == guid][0])
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [], dname)
del_db_check('input-sources')
del_lock_check('input-sources')
del_db_check('power')
del_db_check('session')
del_lock_check('power-saving')
del_lock_check('group-policy')
del_db_check('extensions')
del_lock_check('extensions')
del_db_check('fingerprintreader')
del_lock_check('fingerprintreader')
del_db_check('logout')
del_lock_check('logout')
actions = os.path.join(dname, 'etc/share/polkit-1/actions')
udisk2 = glob(os.path.join(actions,
'org.freedesktop.[u|U][d|D]isks2.policy'))
self.assertEquals(len(udisk2), 0, 'udisk2 policy not deleted')
del_db_check('printing')
del_lock_check('printing')
del_db_check('filesaving')
del_lock_check('filesaving')
del_db_check('cmdline')
del_lock_check('cmdline')
del_db_check('goa')
del_lock_check('goa')
# Unstage the Registry.pol file
unstage_file(reg_pol)
|
LAHORE: International Cricket Council (ICC) 2015 World Cup trophy will reach in Pakistan on Tuesday September 16.
ISLAMABAD: Pakistan Prime Minister Nawaz Sharif formally received, Wednesday, India’s Prime Minister-elect Narendra Modi’s invitation to participate in oath taking ceremony.
LAHORE: Former Chief Justice of Pakistan, Iftakhar Mohammad Chaudhry on Saturday has said that confrontation among the institutions can be harmful for the country.
Islamabad: The 50th JF-17 thunder aircraft has formally inducted into Pakistan Air Force at a ceremony at Pakistan Aeronautical complex Kamra today (Wednesday).
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import time
from marionette_driver import By, expected, Wait
from firefox_puppeteer.testcases import FirefoxTestCase
class TestSafeBrowsingNotificationBar(FirefoxTestCase):
def setUp(self):
FirefoxTestCase.setUp(self)
self.test_data = [
# Unwanted software URL
{
# First two properties are not needed,
# since these errors are not reported
'button_property': None,
'report_page': None,
'unsafe_page': 'https://www.itisatrap.org/firefox/unwanted.html'
},
# Phishing URL info
{
'button_property': 'safebrowsing.notAForgeryButton.label',
'report_page': 'www.google.com/safebrowsing/report_error',
'unsafe_page': 'https://www.itisatrap.org/firefox/its-a-trap.html'
},
# Malware URL object
{
'button_property': 'safebrowsing.notAnAttackButton.label',
'report_page': 'www.stopbadware.org',
'unsafe_page': 'https://www.itisatrap.org/firefox/its-an-attack.html'
}
]
self.prefs.set_pref('browser.safebrowsing.enabled', True)
self.prefs.set_pref('browser.safebrowsing.malware.enabled', True)
# Give the browser a little time, because SafeBrowsing.jsm takes a while
# between start up and adding the example urls to the db.
# hg.mozilla.org/mozilla-central/file/46aebcd9481e/browser/base/content/browser.js#l1194
time.sleep(3)
# TODO: Bug 1139544: While we don't have a reliable way to close the safe browsing
# notification bar when a test fails, run this test in a new tab.
self.browser.tabbar.open_tab()
def tearDown(self):
try:
self.utils.remove_perms('https://www.itisatrap.org', 'safe-browsing')
self.browser.tabbar.close_all_tabs([self.browser.tabbar.tabs[0]])
finally:
FirefoxTestCase.tearDown(self)
def test_notification_bar(self):
with self.marionette.using_context('content'):
for item in self.test_data:
button_property = item['button_property']
report_page, unsafe_page = item['report_page'], item['unsafe_page']
# Navigate to the unsafe page
# Check "ignore warning" link then notification bar's "not badware" button
# Only do this if feature supports it
if button_property is not None:
self.marionette.navigate(unsafe_page)
# Wait for the DOM to receive events for about:blocked
time.sleep(1)
self.check_ignore_warning_button(unsafe_page)
self.check_not_badware_button(button_property, report_page)
# Return to the unsafe page
# Check "ignore warning" link then notification bar's "get me out" button
self.marionette.navigate(unsafe_page)
# Wait for the DOM to receive events for about:blocked
time.sleep(1)
self.check_ignore_warning_button(unsafe_page)
self.check_get_me_out_of_here_button()
# Return to the unsafe page
# Check "ignore warning" link then notification bar's "X" button
self.marionette.navigate(unsafe_page)
# Wait for the DOM to receive events for about:blocked
time.sleep(1)
self.check_ignore_warning_button(unsafe_page)
self.check_x_button()
def check_ignore_warning_button(self, unsafe_page):
button = self.marionette.find_element(By.ID, 'ignoreWarningButton')
button.click()
Wait(self.marionette, timeout=self.browser.timeout_page_load).until(
expected.element_present(By.ID, 'main-feature'))
self.assertEquals(self.marionette.get_url(), self.browser.get_final_url(unsafe_page))
# Clean up here since the permission gets set in this function
self.utils.remove_perms('https://www.itisatrap.org', 'safe-browsing')
# Check the not a forgery or attack button in the notification bar
def check_not_badware_button(self, button_property, report_page):
with self.marionette.using_context('chrome'):
# TODO: update to use safe browsing notification bar class when bug 1139544 lands
label = self.browser.get_property(button_property)
button = (self.marionette.find_element(By.ID, 'content')
.find_element('anon attribute', {'label': label}))
self.browser.tabbar.open_tab(lambda _: button.click())
Wait(self.marionette, timeout=self.browser.timeout_page_load).until(
lambda mn: report_page in mn.get_url())
with self.marionette.using_context('chrome'):
self.browser.tabbar.close_tab()
def check_get_me_out_of_here_button(self):
with self.marionette.using_context('chrome'):
# TODO: update to use safe browsing notification bar class when bug 1139544 lands
label = self.browser.get_property('safebrowsing.getMeOutOfHereButton.label')
button = (self.marionette.find_element(By.ID, 'content')
.find_element('anon attribute', {'label': label}))
button.click()
Wait(self.marionette, timeout=self.browser.timeout_page_load).until(
lambda mn: self.browser.default_homepage in mn.get_url())
def check_x_button(self):
with self.marionette.using_context('chrome'):
# TODO: update to use safe browsing notification bar class when bug 1139544 lands
button = (self.marionette.find_element(By.ID, 'content')
.find_element('anon attribute', {'value': 'blocked-badware-page'})
.find_element('anon attribute',
{'class': 'messageCloseButton close-icon tabbable'}))
button.click()
Wait(self.marionette, timeout=self.browser.timeout_page_load).until(
expected.element_stale(button))
|
Know-How ?Nuri Gokalp Yazar has specialized knowledge about the following topics.
Interests ?Nuri Gokalp Yazar is interested in the following topics.
Organisations ?Nuri Gokalp Yazar is member of the following organisations.
Professional Experience 7 Years / 10 MonthNuri Gokalp Yazar has 7 years and 10 month professional experience in 2 different positions.
Mechanical Maintenance Chief Engineer, Engineer (Full-time employee)Nuri Gokalp Yazar works for Atlas Energy Inc. as Mechanical Maintenance Chief Engineer since January 2014.
Mechanical Maintenance Engineer, Engineer (Full-time employee)Nuri Gokalp Yazar worked for Erdemir Mining Industry and Trade Inc. as Mechanical Maintenance Engineer from June 2011 to January 2014.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.